linux/drivers/net/ethernet/dlink/sundance.c
<<
>>
Prefs
   1/* sundance.c: A Linux device driver for the Sundance ST201 "Alta". */
   2/*
   3        Written 1999-2000 by Donald Becker.
   4
   5        This software may be used and distributed according to the terms of
   6        the GNU General Public License (GPL), incorporated herein by reference.
   7        Drivers based on or derived from this code fall under the GPL and must
   8        retain the authorship, copyright and license notice.  This file is not
   9        a complete program and may only be used when the entire operating
  10        system is licensed under the GPL.
  11
  12        The author may be reached as becker@scyld.com, or C/O
  13        Scyld Computing Corporation
  14        410 Severn Ave., Suite 210
  15        Annapolis MD 21403
  16
  17        Support and updates available at
  18        http://www.scyld.com/network/sundance.html
  19        [link no longer provides useful info -jgarzik]
  20        Archives of the mailing list are still available at
  21        http://www.beowulf.org/pipermail/netdrivers/
  22
  23*/
  24
  25#define DRV_NAME        "sundance"
  26#define DRV_VERSION     "1.2"
  27#define DRV_RELDATE     "11-Sep-2006"
  28
  29
  30/* The user-configurable values.
  31   These may be modified when a driver module is loaded.*/
  32static int debug = 1;                   /* 1 normal messages, 0 quiet .. 7 verbose. */
  33/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
  34   Typical is a 64 element hash table based on the Ethernet CRC.  */
  35static const int multicast_filter_limit = 32;
  36
  37/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
  38   Setting to > 1518 effectively disables this feature.
  39   This chip can receive into offset buffers, so the Alpha does not
  40   need a copy-align. */
  41static int rx_copybreak;
  42static int flowctrl=1;
  43
  44/* media[] specifies the media type the NIC operates at.
  45                 autosense      Autosensing active media.
  46                 10mbps_hd      10Mbps half duplex.
  47                 10mbps_fd      10Mbps full duplex.
  48                 100mbps_hd     100Mbps half duplex.
  49                 100mbps_fd     100Mbps full duplex.
  50                 0              Autosensing active media.
  51                 1              10Mbps half duplex.
  52                 2              10Mbps full duplex.
  53                 3              100Mbps half duplex.
  54                 4              100Mbps full duplex.
  55*/
  56#define MAX_UNITS 8
  57static char *media[MAX_UNITS];
  58
  59
  60/* Operational parameters that are set at compile time. */
  61
  62/* Keep the ring sizes a power of two for compile efficiency.
  63   The compiler will convert <unsigned>'%'<2^N> into a bit mask.
  64   Making the Tx ring too large decreases the effectiveness of channel
  65   bonding and packet priority, and more than 128 requires modifying the
  66   Tx error recovery.
  67   Large receive rings merely waste memory. */
  68#define TX_RING_SIZE    32
  69#define TX_QUEUE_LEN    (TX_RING_SIZE - 1) /* Limit ring entries actually used.  */
  70#define RX_RING_SIZE    64
  71#define RX_BUDGET       32
  72#define TX_TOTAL_SIZE   TX_RING_SIZE*sizeof(struct netdev_desc)
  73#define RX_TOTAL_SIZE   RX_RING_SIZE*sizeof(struct netdev_desc)
  74
  75/* Operational parameters that usually are not changed. */
  76/* Time in jiffies before concluding the transmitter is hung. */
  77#define TX_TIMEOUT  (4*HZ)
  78#define PKT_BUF_SZ              1536    /* Size of each temporary Rx buffer.*/
  79
  80/* Include files, designed to support most kernel versions 2.0.0 and later. */
  81#include <linux/module.h>
  82#include <linux/kernel.h>
  83#include <linux/string.h>
  84#include <linux/timer.h>
  85#include <linux/errno.h>
  86#include <linux/ioport.h>
  87#include <linux/interrupt.h>
  88#include <linux/pci.h>
  89#include <linux/netdevice.h>
  90#include <linux/etherdevice.h>
  91#include <linux/skbuff.h>
  92#include <linux/init.h>
  93#include <linux/bitops.h>
  94#include <linux/uaccess.h>
  95#include <asm/processor.h>              /* Processor type for cache alignment. */
  96#include <asm/io.h>
  97#include <linux/delay.h>
  98#include <linux/spinlock.h>
  99#include <linux/dma-mapping.h>
 100#include <linux/crc32.h>
 101#include <linux/ethtool.h>
 102#include <linux/mii.h>
 103
 104/* These identify the driver base version and may not be removed. */
 105static const char version[] =
 106        KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE
 107        " Written by Donald Becker\n";
 108
 109MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
 110MODULE_DESCRIPTION("Sundance Alta Ethernet driver");
 111MODULE_LICENSE("GPL");
 112
 113module_param(debug, int, 0);
 114module_param(rx_copybreak, int, 0);
 115module_param_array(media, charp, NULL, 0);
 116module_param(flowctrl, int, 0);
 117MODULE_PARM_DESC(debug, "Sundance Alta debug level (0-5)");
 118MODULE_PARM_DESC(rx_copybreak, "Sundance Alta copy breakpoint for copy-only-tiny-frames");
 119MODULE_PARM_DESC(flowctrl, "Sundance Alta flow control [0|1]");
 120
 121/*
 122                                Theory of Operation
 123
 124I. Board Compatibility
 125
 126This driver is designed for the Sundance Technologies "Alta" ST201 chip.
 127
 128II. Board-specific settings
 129
 130III. Driver operation
 131
 132IIIa. Ring buffers
 133
 134This driver uses two statically allocated fixed-size descriptor lists
 135formed into rings by a branch from the final descriptor to the beginning of
 136the list.  The ring sizes are set at compile time by RX/TX_RING_SIZE.
 137Some chips explicitly use only 2^N sized rings, while others use a
 138'next descriptor' pointer that the driver forms into rings.
 139
 140IIIb/c. Transmit/Receive Structure
 141
 142This driver uses a zero-copy receive and transmit scheme.
 143The driver allocates full frame size skbuffs for the Rx ring buffers at
 144open() time and passes the skb->data field to the chip as receive data
 145buffers.  When an incoming frame is less than RX_COPYBREAK bytes long,
 146a fresh skbuff is allocated and the frame is copied to the new skbuff.
 147When the incoming frame is larger, the skbuff is passed directly up the
 148protocol stack.  Buffers consumed this way are replaced by newly allocated
 149skbuffs in a later phase of receives.
 150
 151The RX_COPYBREAK value is chosen to trade-off the memory wasted by
 152using a full-sized skbuff for small frames vs. the copying costs of larger
 153frames.  New boards are typically used in generously configured machines
 154and the underfilled buffers have negligible impact compared to the benefit of
 155a single allocation size, so the default value of zero results in never
 156copying packets.  When copying is done, the cost is usually mitigated by using
 157a combined copy/checksum routine.  Copying also preloads the cache, which is
 158most useful with small frames.
 159
 160A subtle aspect of the operation is that the IP header at offset 14 in an
 161ethernet frame isn't longword aligned for further processing.
 162Unaligned buffers are permitted by the Sundance hardware, so
 163frames are received into the skbuff at an offset of "+2", 16-byte aligning
 164the IP header.
 165
 166IIId. Synchronization
 167
 168The driver runs as two independent, single-threaded flows of control.  One
 169is the send-packet routine, which enforces single-threaded use by the
 170dev->tbusy flag.  The other thread is the interrupt handler, which is single
 171threaded by the hardware and interrupt handling software.
 172
 173The send packet thread has partial control over the Tx ring and 'dev->tbusy'
 174flag.  It sets the tbusy flag whenever it's queuing a Tx packet. If the next
 175queue slot is empty, it clears the tbusy flag when finished otherwise it sets
 176the 'lp->tx_full' flag.
 177
 178The interrupt handler has exclusive control over the Rx ring and records stats
 179from the Tx ring.  After reaping the stats, it marks the Tx queue entry as
 180empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
 181clears both the tx_full and tbusy flags.
 182
 183IV. Notes
 184
 185IVb. References
 186
 187The Sundance ST201 datasheet, preliminary version.
 188The Kendin KS8723 datasheet, preliminary version.
 189The ICplus IP100 datasheet, preliminary version.
 190http://www.scyld.com/expert/100mbps.html
 191http://www.scyld.com/expert/NWay.html
 192
 193IVc. Errata
 194
 195*/
 196
 197/* Work-around for Kendin chip bugs. */
 198#ifndef CONFIG_SUNDANCE_MMIO
 199#define USE_IO_OPS 1
 200#endif
 201
 202static const struct pci_device_id sundance_pci_tbl[] = {
 203        { 0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0 },
 204        { 0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1 },
 205        { 0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2 },
 206        { 0x1186, 0x1002, 0x1186, 0x1040, 0, 0, 3 },
 207        { 0x1186, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
 208        { 0x13F0, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
 209        { 0x13F0, 0x0200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
 210        { }
 211};
 212MODULE_DEVICE_TABLE(pci, sundance_pci_tbl);
 213
 214enum {
 215        netdev_io_size = 128
 216};
 217
 218struct pci_id_info {
 219        const char *name;
 220};
 221static const struct pci_id_info pci_id_tbl[] = {
 222        {"D-Link DFE-550TX FAST Ethernet Adapter"},
 223        {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
 224        {"D-Link DFE-580TX 4 port Server Adapter"},
 225        {"D-Link DFE-530TXS FAST Ethernet Adapter"},
 226        {"D-Link DL10050-based FAST Ethernet Adapter"},
 227        {"Sundance Technology Alta"},
 228        {"IC Plus Corporation IP100A FAST Ethernet Adapter"},
 229        { }     /* terminate list. */
 230};
 231
 232/* This driver was written to use PCI memory space, however x86-oriented
 233   hardware often uses I/O space accesses. */
 234
 235/* Offsets to the device registers.
 236   Unlike software-only systems, device drivers interact with complex hardware.
 237   It's not useful to define symbolic names for every register bit in the
 238   device.  The name can only partially document the semantics and make
 239   the driver longer and more difficult to read.
 240   In general, only the important configuration values or bits changed
 241   multiple times should be defined symbolically.
 242*/
 243enum alta_offsets {
 244        DMACtrl = 0x00,
 245        TxListPtr = 0x04,
 246        TxDMABurstThresh = 0x08,
 247        TxDMAUrgentThresh = 0x09,
 248        TxDMAPollPeriod = 0x0a,
 249        RxDMAStatus = 0x0c,
 250        RxListPtr = 0x10,
 251        DebugCtrl0 = 0x1a,
 252        DebugCtrl1 = 0x1c,
 253        RxDMABurstThresh = 0x14,
 254        RxDMAUrgentThresh = 0x15,
 255        RxDMAPollPeriod = 0x16,
 256        LEDCtrl = 0x1a,
 257        ASICCtrl = 0x30,
 258        EEData = 0x34,
 259        EECtrl = 0x36,
 260        FlashAddr = 0x40,
 261        FlashData = 0x44,
 262        WakeEvent = 0x45,
 263        TxStatus = 0x46,
 264        TxFrameId = 0x47,
 265        DownCounter = 0x18,
 266        IntrClear = 0x4a,
 267        IntrEnable = 0x4c,
 268        IntrStatus = 0x4e,
 269        MACCtrl0 = 0x50,
 270        MACCtrl1 = 0x52,
 271        StationAddr = 0x54,
 272        MaxFrameSize = 0x5A,
 273        RxMode = 0x5c,
 274        MIICtrl = 0x5e,
 275        MulticastFilter0 = 0x60,
 276        MulticastFilter1 = 0x64,
 277        RxOctetsLow = 0x68,
 278        RxOctetsHigh = 0x6a,
 279        TxOctetsLow = 0x6c,
 280        TxOctetsHigh = 0x6e,
 281        TxFramesOK = 0x70,
 282        RxFramesOK = 0x72,
 283        StatsCarrierError = 0x74,
 284        StatsLateColl = 0x75,
 285        StatsMultiColl = 0x76,
 286        StatsOneColl = 0x77,
 287        StatsTxDefer = 0x78,
 288        RxMissed = 0x79,
 289        StatsTxXSDefer = 0x7a,
 290        StatsTxAbort = 0x7b,
 291        StatsBcastTx = 0x7c,
 292        StatsBcastRx = 0x7d,
 293        StatsMcastTx = 0x7e,
 294        StatsMcastRx = 0x7f,
 295        /* Aliased and bogus values! */
 296        RxStatus = 0x0c,
 297};
 298
 299#define ASIC_HI_WORD(x) ((x) + 2)
 300
 301enum ASICCtrl_HiWord_bit {
 302        GlobalReset = 0x0001,
 303        RxReset = 0x0002,
 304        TxReset = 0x0004,
 305        DMAReset = 0x0008,
 306        FIFOReset = 0x0010,
 307        NetworkReset = 0x0020,
 308        HostReset = 0x0040,
 309        ResetBusy = 0x0400,
 310};
 311
 312/* Bits in the interrupt status/mask registers. */
 313enum intr_status_bits {
 314        IntrSummary=0x0001, IntrPCIErr=0x0002, IntrMACCtrl=0x0008,
 315        IntrTxDone=0x0004, IntrRxDone=0x0010, IntrRxStart=0x0020,
 316        IntrDrvRqst=0x0040,
 317        StatsMax=0x0080, LinkChange=0x0100,
 318        IntrTxDMADone=0x0200, IntrRxDMADone=0x0400,
 319};
 320
 321/* Bits in the RxMode register. */
 322enum rx_mode_bits {
 323        AcceptAllIPMulti=0x20, AcceptMultiHash=0x10, AcceptAll=0x08,
 324        AcceptBroadcast=0x04, AcceptMulticast=0x02, AcceptMyPhys=0x01,
 325};
 326/* Bits in MACCtrl. */
 327enum mac_ctrl0_bits {
 328        EnbFullDuplex=0x20, EnbRcvLargeFrame=0x40,
 329        EnbFlowCtrl=0x100, EnbPassRxCRC=0x200,
 330};
 331enum mac_ctrl1_bits {
 332        StatsEnable=0x0020,     StatsDisable=0x0040, StatsEnabled=0x0080,
 333        TxEnable=0x0100, TxDisable=0x0200, TxEnabled=0x0400,
 334        RxEnable=0x0800, RxDisable=0x1000, RxEnabled=0x2000,
 335};
 336
 337/* Bits in WakeEvent register. */
 338enum wake_event_bits {
 339        WakePktEnable = 0x01,
 340        MagicPktEnable = 0x02,
 341        LinkEventEnable = 0x04,
 342        WolEnable = 0x80,
 343};
 344
 345/* The Rx and Tx buffer descriptors. */
 346/* Note that using only 32 bit fields simplifies conversion to big-endian
 347   architectures. */
 348struct netdev_desc {
 349        __le32 next_desc;
 350        __le32 status;
 351        struct desc_frag { __le32 addr, length; } frag[1];
 352};
 353
 354/* Bits in netdev_desc.status */
 355enum desc_status_bits {
 356        DescOwn=0x8000,
 357        DescEndPacket=0x4000,
 358        DescEndRing=0x2000,
 359        LastFrag=0x80000000,
 360        DescIntrOnTx=0x8000,
 361        DescIntrOnDMADone=0x80000000,
 362        DisableAlign = 0x00000001,
 363};
 364
 365#define PRIV_ALIGN      15      /* Required alignment mask */
 366/* Use  __attribute__((aligned (L1_CACHE_BYTES)))  to maintain alignment
 367   within the structure. */
 368#define MII_CNT         4
 369struct netdev_private {
 370        /* Descriptor rings first for alignment. */
 371        struct netdev_desc *rx_ring;
 372        struct netdev_desc *tx_ring;
 373        struct sk_buff* rx_skbuff[RX_RING_SIZE];
 374        struct sk_buff* tx_skbuff[TX_RING_SIZE];
 375        dma_addr_t tx_ring_dma;
 376        dma_addr_t rx_ring_dma;
 377        struct timer_list timer;                /* Media monitoring timer. */
 378        /* ethtool extra stats */
 379        struct {
 380                u64 tx_multiple_collisions;
 381                u64 tx_single_collisions;
 382                u64 tx_late_collisions;
 383                u64 tx_deferred;
 384                u64 tx_deferred_excessive;
 385                u64 tx_aborted;
 386                u64 tx_bcasts;
 387                u64 rx_bcasts;
 388                u64 tx_mcasts;
 389                u64 rx_mcasts;
 390        } xstats;
 391        /* Frequently used values: keep some adjacent for cache effect. */
 392        spinlock_t lock;
 393        int msg_enable;
 394        int chip_id;
 395        unsigned int cur_rx, dirty_rx;          /* Producer/consumer ring indices */
 396        unsigned int rx_buf_sz;                 /* Based on MTU+slack. */
 397        struct netdev_desc *last_tx;            /* Last Tx descriptor used. */
 398        unsigned int cur_tx, dirty_tx;
 399        /* These values are keep track of the transceiver/media in use. */
 400        unsigned int flowctrl:1;
 401        unsigned int default_port:4;            /* Last dev->if_port value. */
 402        unsigned int an_enable:1;
 403        unsigned int speed;
 404        unsigned int wol_enabled:1;                     /* Wake on LAN enabled */
 405        struct tasklet_struct rx_tasklet;
 406        struct tasklet_struct tx_tasklet;
 407        int budget;
 408        int cur_task;
 409        /* Multicast and receive mode. */
 410        spinlock_t mcastlock;                   /* SMP lock multicast updates. */
 411        u16 mcast_filter[4];
 412        /* MII transceiver section. */
 413        struct mii_if_info mii_if;
 414        int mii_preamble_required;
 415        unsigned char phys[MII_CNT];            /* MII device addresses, only first one used. */
 416        struct pci_dev *pci_dev;
 417        void __iomem *base;
 418        spinlock_t statlock;
 419};
 420
 421/* The station address location in the EEPROM. */
 422#define EEPROM_SA_OFFSET        0x10
 423#define DEFAULT_INTR (IntrRxDMADone | IntrPCIErr | \
 424                        IntrDrvRqst | IntrTxDone | StatsMax | \
 425                        LinkChange)
 426
 427static int  change_mtu(struct net_device *dev, int new_mtu);
 428static int  eeprom_read(void __iomem *ioaddr, int location);
 429static int  mdio_read(struct net_device *dev, int phy_id, int location);
 430static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
 431static int  mdio_wait_link(struct net_device *dev, int wait);
 432static int  netdev_open(struct net_device *dev);
 433static void check_duplex(struct net_device *dev);
 434static void netdev_timer(struct timer_list *t);
 435static void tx_timeout(struct net_device *dev);
 436static void init_ring(struct net_device *dev);
 437static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
 438static int reset_tx (struct net_device *dev);
 439static irqreturn_t intr_handler(int irq, void *dev_instance);
 440static void rx_poll(unsigned long data);
 441static void tx_poll(unsigned long data);
 442static void refill_rx (struct net_device *dev);
 443static void netdev_error(struct net_device *dev, int intr_status);
 444static void netdev_error(struct net_device *dev, int intr_status);
 445static void set_rx_mode(struct net_device *dev);
 446static int __set_mac_addr(struct net_device *dev);
 447static int sundance_set_mac_addr(struct net_device *dev, void *data);
 448static struct net_device_stats *get_stats(struct net_device *dev);
 449static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 450static int  netdev_close(struct net_device *dev);
 451static const struct ethtool_ops ethtool_ops;
 452
 453static void sundance_reset(struct net_device *dev, unsigned long reset_cmd)
 454{
 455        struct netdev_private *np = netdev_priv(dev);
 456        void __iomem *ioaddr = np->base + ASICCtrl;
 457        int countdown;
 458
 459        /* ST201 documentation states ASICCtrl is a 32bit register */
 460        iowrite32 (reset_cmd | ioread32 (ioaddr), ioaddr);
 461        /* ST201 documentation states reset can take up to 1 ms */
 462        countdown = 10 + 1;
 463        while (ioread32 (ioaddr) & (ResetBusy << 16)) {
 464                if (--countdown == 0) {
 465                        printk(KERN_WARNING "%s : reset not completed !!\n", dev->name);
 466                        break;
 467                }
 468                udelay(100);
 469        }
 470}
 471
 472#ifdef CONFIG_NET_POLL_CONTROLLER
 473static void sundance_poll_controller(struct net_device *dev)
 474{
 475        struct netdev_private *np = netdev_priv(dev);
 476
 477        disable_irq(np->pci_dev->irq);
 478        intr_handler(np->pci_dev->irq, dev);
 479        enable_irq(np->pci_dev->irq);
 480}
 481#endif
 482
 483static const struct net_device_ops netdev_ops = {
 484        .ndo_open               = netdev_open,
 485        .ndo_stop               = netdev_close,
 486        .ndo_start_xmit         = start_tx,
 487        .ndo_get_stats          = get_stats,
 488        .ndo_set_rx_mode        = set_rx_mode,
 489        .ndo_do_ioctl           = netdev_ioctl,
 490        .ndo_tx_timeout         = tx_timeout,
 491        .ndo_change_mtu         = change_mtu,
 492        .ndo_set_mac_address    = sundance_set_mac_addr,
 493        .ndo_validate_addr      = eth_validate_addr,
 494#ifdef CONFIG_NET_POLL_CONTROLLER
 495        .ndo_poll_controller    = sundance_poll_controller,
 496#endif
 497};
 498
 499static int sundance_probe1(struct pci_dev *pdev,
 500                           const struct pci_device_id *ent)
 501{
 502        struct net_device *dev;
 503        struct netdev_private *np;
 504        static int card_idx;
 505        int chip_idx = ent->driver_data;
 506        int irq;
 507        int i;
 508        void __iomem *ioaddr;
 509        u16 mii_ctl;
 510        void *ring_space;
 511        dma_addr_t ring_dma;
 512#ifdef USE_IO_OPS
 513        int bar = 0;
 514#else
 515        int bar = 1;
 516#endif
 517        int phy, phy_end, phy_idx = 0;
 518
 519/* when built into the kernel, we only print version if device is found */
 520#ifndef MODULE
 521        static int printed_version;
 522        if (!printed_version++)
 523                printk(version);
 524#endif
 525
 526        if (pci_enable_device(pdev))
 527                return -EIO;
 528        pci_set_master(pdev);
 529
 530        irq = pdev->irq;
 531
 532        dev = alloc_etherdev(sizeof(*np));
 533        if (!dev)
 534                return -ENOMEM;
 535        SET_NETDEV_DEV(dev, &pdev->dev);
 536
 537        if (pci_request_regions(pdev, DRV_NAME))
 538                goto err_out_netdev;
 539
 540        ioaddr = pci_iomap(pdev, bar, netdev_io_size);
 541        if (!ioaddr)
 542                goto err_out_res;
 543
 544        for (i = 0; i < 3; i++)
 545                ((__le16 *)dev->dev_addr)[i] =
 546                        cpu_to_le16(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
 547
 548        np = netdev_priv(dev);
 549        np->base = ioaddr;
 550        np->pci_dev = pdev;
 551        np->chip_id = chip_idx;
 552        np->msg_enable = (1 << debug) - 1;
 553        spin_lock_init(&np->lock);
 554        spin_lock_init(&np->statlock);
 555        tasklet_init(&np->rx_tasklet, rx_poll, (unsigned long)dev);
 556        tasklet_init(&np->tx_tasklet, tx_poll, (unsigned long)dev);
 557
 558        ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE,
 559                        &ring_dma, GFP_KERNEL);
 560        if (!ring_space)
 561                goto err_out_cleardev;
 562        np->tx_ring = (struct netdev_desc *)ring_space;
 563        np->tx_ring_dma = ring_dma;
 564
 565        ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE,
 566                        &ring_dma, GFP_KERNEL);
 567        if (!ring_space)
 568                goto err_out_unmap_tx;
 569        np->rx_ring = (struct netdev_desc *)ring_space;
 570        np->rx_ring_dma = ring_dma;
 571
 572        np->mii_if.dev = dev;
 573        np->mii_if.mdio_read = mdio_read;
 574        np->mii_if.mdio_write = mdio_write;
 575        np->mii_if.phy_id_mask = 0x1f;
 576        np->mii_if.reg_num_mask = 0x1f;
 577
 578        /* The chip-specific entries in the device structure. */
 579        dev->netdev_ops = &netdev_ops;
 580        dev->ethtool_ops = &ethtool_ops;
 581        dev->watchdog_timeo = TX_TIMEOUT;
 582
 583        /* MTU range: 68 - 8191 */
 584        dev->min_mtu = ETH_MIN_MTU;
 585        dev->max_mtu = 8191;
 586
 587        pci_set_drvdata(pdev, dev);
 588
 589        i = register_netdev(dev);
 590        if (i)
 591                goto err_out_unmap_rx;
 592
 593        printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n",
 594               dev->name, pci_id_tbl[chip_idx].name, ioaddr,
 595               dev->dev_addr, irq);
 596
 597        np->phys[0] = 1;                /* Default setting */
 598        np->mii_preamble_required++;
 599
 600        /*
 601         * It seems some phys doesn't deal well with address 0 being accessed
 602         * first
 603         */
 604        if (sundance_pci_tbl[np->chip_id].device == 0x0200) {
 605                phy = 0;
 606                phy_end = 31;
 607        } else {
 608                phy = 1;
 609                phy_end = 32;   /* wraps to zero, due to 'phy & 0x1f' */
 610        }
 611        for (; phy <= phy_end && phy_idx < MII_CNT; phy++) {
 612                int phyx = phy & 0x1f;
 613                int mii_status = mdio_read(dev, phyx, MII_BMSR);
 614                if (mii_status != 0xffff  &&  mii_status != 0x0000) {
 615                        np->phys[phy_idx++] = phyx;
 616                        np->mii_if.advertising = mdio_read(dev, phyx, MII_ADVERTISE);
 617                        if ((mii_status & 0x0040) == 0)
 618                                np->mii_preamble_required++;
 619                        printk(KERN_INFO "%s: MII PHY found at address %d, status "
 620                                   "0x%4.4x advertising %4.4x.\n",
 621                                   dev->name, phyx, mii_status, np->mii_if.advertising);
 622                }
 623        }
 624        np->mii_preamble_required--;
 625
 626        if (phy_idx == 0) {
 627                printk(KERN_INFO "%s: No MII transceiver found, aborting.  ASIC status %x\n",
 628                           dev->name, ioread32(ioaddr + ASICCtrl));
 629                goto err_out_unregister;
 630        }
 631
 632        np->mii_if.phy_id = np->phys[0];
 633
 634        /* Parse override configuration */
 635        np->an_enable = 1;
 636        if (card_idx < MAX_UNITS) {
 637                if (media[card_idx] != NULL) {
 638                        np->an_enable = 0;
 639                        if (strcmp (media[card_idx], "100mbps_fd") == 0 ||
 640                            strcmp (media[card_idx], "4") == 0) {
 641                                np->speed = 100;
 642                                np->mii_if.full_duplex = 1;
 643                        } else if (strcmp (media[card_idx], "100mbps_hd") == 0 ||
 644                                   strcmp (media[card_idx], "3") == 0) {
 645                                np->speed = 100;
 646                                np->mii_if.full_duplex = 0;
 647                        } else if (strcmp (media[card_idx], "10mbps_fd") == 0 ||
 648                                   strcmp (media[card_idx], "2") == 0) {
 649                                np->speed = 10;
 650                                np->mii_if.full_duplex = 1;
 651                        } else if (strcmp (media[card_idx], "10mbps_hd") == 0 ||
 652                                   strcmp (media[card_idx], "1") == 0) {
 653                                np->speed = 10;
 654                                np->mii_if.full_duplex = 0;
 655                        } else {
 656                                np->an_enable = 1;
 657                        }
 658                }
 659                if (flowctrl == 1)
 660                        np->flowctrl = 1;
 661        }
 662
 663        /* Fibre PHY? */
 664        if (ioread32 (ioaddr + ASICCtrl) & 0x80) {
 665                /* Default 100Mbps Full */
 666                if (np->an_enable) {
 667                        np->speed = 100;
 668                        np->mii_if.full_duplex = 1;
 669                        np->an_enable = 0;
 670                }
 671        }
 672        /* Reset PHY */
 673        mdio_write (dev, np->phys[0], MII_BMCR, BMCR_RESET);
 674        mdelay (300);
 675        /* If flow control enabled, we need to advertise it.*/
 676        if (np->flowctrl)
 677                mdio_write (dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising | 0x0400);
 678        mdio_write (dev, np->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
 679        /* Force media type */
 680        if (!np->an_enable) {
 681                mii_ctl = 0;
 682                mii_ctl |= (np->speed == 100) ? BMCR_SPEED100 : 0;
 683                mii_ctl |= (np->mii_if.full_duplex) ? BMCR_FULLDPLX : 0;
 684                mdio_write (dev, np->phys[0], MII_BMCR, mii_ctl);
 685                printk (KERN_INFO "Override speed=%d, %s duplex\n",
 686                        np->speed, np->mii_if.full_duplex ? "Full" : "Half");
 687
 688        }
 689
 690        /* Perhaps move the reset here? */
 691        /* Reset the chip to erase previous misconfiguration. */
 692        if (netif_msg_hw(np))
 693                printk("ASIC Control is %x.\n", ioread32(ioaddr + ASICCtrl));
 694        sundance_reset(dev, 0x00ff << 16);
 695        if (netif_msg_hw(np))
 696                printk("ASIC Control is now %x.\n", ioread32(ioaddr + ASICCtrl));
 697
 698        card_idx++;
 699        return 0;
 700
 701err_out_unregister:
 702        unregister_netdev(dev);
 703err_out_unmap_rx:
 704        dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE,
 705                np->rx_ring, np->rx_ring_dma);
 706err_out_unmap_tx:
 707        dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE,
 708                np->tx_ring, np->tx_ring_dma);
 709err_out_cleardev:
 710        pci_iounmap(pdev, ioaddr);
 711err_out_res:
 712        pci_release_regions(pdev);
 713err_out_netdev:
 714        free_netdev (dev);
 715        return -ENODEV;
 716}
 717
 718static int change_mtu(struct net_device *dev, int new_mtu)
 719{
 720        if (netif_running(dev))
 721                return -EBUSY;
 722        dev->mtu = new_mtu;
 723        return 0;
 724}
 725
 726#define eeprom_delay(ee_addr)   ioread32(ee_addr)
 727/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. */
 728static int eeprom_read(void __iomem *ioaddr, int location)
 729{
 730        int boguscnt = 10000;           /* Typical 1900 ticks. */
 731        iowrite16(0x0200 | (location & 0xff), ioaddr + EECtrl);
 732        do {
 733                eeprom_delay(ioaddr + EECtrl);
 734                if (! (ioread16(ioaddr + EECtrl) & 0x8000)) {
 735                        return ioread16(ioaddr + EEData);
 736                }
 737        } while (--boguscnt > 0);
 738        return 0;
 739}
 740
 741/*  MII transceiver control section.
 742        Read and write the MII registers using software-generated serial
 743        MDIO protocol.  See the MII specifications or DP83840A data sheet
 744        for details.
 745
 746        The maximum data clock rate is 2.5 Mhz.  The minimum timing is usually
 747        met by back-to-back 33Mhz PCI cycles. */
 748#define mdio_delay() ioread8(mdio_addr)
 749
 750enum mii_reg_bits {
 751        MDIO_ShiftClk=0x0001, MDIO_Data=0x0002, MDIO_EnbOutput=0x0004,
 752};
 753#define MDIO_EnbIn  (0)
 754#define MDIO_WRITE0 (MDIO_EnbOutput)
 755#define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
 756
 757/* Generate the preamble required for initial synchronization and
 758   a few older transceivers. */
 759static void mdio_sync(void __iomem *mdio_addr)
 760{
 761        int bits = 32;
 762
 763        /* Establish sync by sending at least 32 logic ones. */
 764        while (--bits >= 0) {
 765                iowrite8(MDIO_WRITE1, mdio_addr);
 766                mdio_delay();
 767                iowrite8(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
 768                mdio_delay();
 769        }
 770}
 771
 772static int mdio_read(struct net_device *dev, int phy_id, int location)
 773{
 774        struct netdev_private *np = netdev_priv(dev);
 775        void __iomem *mdio_addr = np->base + MIICtrl;
 776        int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
 777        int i, retval = 0;
 778
 779        if (np->mii_preamble_required)
 780                mdio_sync(mdio_addr);
 781
 782        /* Shift the read command bits out. */
 783        for (i = 15; i >= 0; i--) {
 784                int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
 785
 786                iowrite8(dataval, mdio_addr);
 787                mdio_delay();
 788                iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
 789                mdio_delay();
 790        }
 791        /* Read the two transition, 16 data, and wire-idle bits. */
 792        for (i = 19; i > 0; i--) {
 793                iowrite8(MDIO_EnbIn, mdio_addr);
 794                mdio_delay();
 795                retval = (retval << 1) | ((ioread8(mdio_addr) & MDIO_Data) ? 1 : 0);
 796                iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
 797                mdio_delay();
 798        }
 799        return (retval>>1) & 0xffff;
 800}
 801
 802static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
 803{
 804        struct netdev_private *np = netdev_priv(dev);
 805        void __iomem *mdio_addr = np->base + MIICtrl;
 806        int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
 807        int i;
 808
 809        if (np->mii_preamble_required)
 810                mdio_sync(mdio_addr);
 811
 812        /* Shift the command bits out. */
 813        for (i = 31; i >= 0; i--) {
 814                int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
 815
 816                iowrite8(dataval, mdio_addr);
 817                mdio_delay();
 818                iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
 819                mdio_delay();
 820        }
 821        /* Clear out extra bits. */
 822        for (i = 2; i > 0; i--) {
 823                iowrite8(MDIO_EnbIn, mdio_addr);
 824                mdio_delay();
 825                iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
 826                mdio_delay();
 827        }
 828}
 829
 830static int mdio_wait_link(struct net_device *dev, int wait)
 831{
 832        int bmsr;
 833        int phy_id;
 834        struct netdev_private *np;
 835
 836        np = netdev_priv(dev);
 837        phy_id = np->phys[0];
 838
 839        do {
 840                bmsr = mdio_read(dev, phy_id, MII_BMSR);
 841                if (bmsr & 0x0004)
 842                        return 0;
 843                mdelay(1);
 844        } while (--wait > 0);
 845        return -1;
 846}
 847
 848static int netdev_open(struct net_device *dev)
 849{
 850        struct netdev_private *np = netdev_priv(dev);
 851        void __iomem *ioaddr = np->base;
 852        const int irq = np->pci_dev->irq;
 853        unsigned long flags;
 854        int i;
 855
 856        sundance_reset(dev, 0x00ff << 16);
 857
 858        i = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
 859        if (i)
 860                return i;
 861
 862        if (netif_msg_ifup(np))
 863                printk(KERN_DEBUG "%s: netdev_open() irq %d\n", dev->name, irq);
 864
 865        init_ring(dev);
 866
 867        iowrite32(np->rx_ring_dma, ioaddr + RxListPtr);
 868        /* The Tx list pointer is written as packets are queued. */
 869
 870        /* Initialize other registers. */
 871        __set_mac_addr(dev);
 872#if IS_ENABLED(CONFIG_VLAN_8021Q)
 873        iowrite16(dev->mtu + 18, ioaddr + MaxFrameSize);
 874#else
 875        iowrite16(dev->mtu + 14, ioaddr + MaxFrameSize);
 876#endif
 877        if (dev->mtu > 2047)
 878                iowrite32(ioread32(ioaddr + ASICCtrl) | 0x0C, ioaddr + ASICCtrl);
 879
 880        /* Configure the PCI bus bursts and FIFO thresholds. */
 881
 882        if (dev->if_port == 0)
 883                dev->if_port = np->default_port;
 884
 885        spin_lock_init(&np->mcastlock);
 886
 887        set_rx_mode(dev);
 888        iowrite16(0, ioaddr + IntrEnable);
 889        iowrite16(0, ioaddr + DownCounter);
 890        /* Set the chip to poll every N*320nsec. */
 891        iowrite8(100, ioaddr + RxDMAPollPeriod);
 892        iowrite8(127, ioaddr + TxDMAPollPeriod);
 893        /* Fix DFE-580TX packet drop issue */
 894        if (np->pci_dev->revision >= 0x14)
 895                iowrite8(0x01, ioaddr + DebugCtrl1);
 896        netif_start_queue(dev);
 897
 898        spin_lock_irqsave(&np->lock, flags);
 899        reset_tx(dev);
 900        spin_unlock_irqrestore(&np->lock, flags);
 901
 902        iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
 903
 904        /* Disable Wol */
 905        iowrite8(ioread8(ioaddr + WakeEvent) | 0x00, ioaddr + WakeEvent);
 906        np->wol_enabled = 0;
 907
 908        if (netif_msg_ifup(np))
 909                printk(KERN_DEBUG "%s: Done netdev_open(), status: Rx %x Tx %x "
 910                           "MAC Control %x, %4.4x %4.4x.\n",
 911                           dev->name, ioread32(ioaddr + RxStatus), ioread8(ioaddr + TxStatus),
 912                           ioread32(ioaddr + MACCtrl0),
 913                           ioread16(ioaddr + MACCtrl1), ioread16(ioaddr + MACCtrl0));
 914
 915        /* Set the timer to check for link beat. */
 916        timer_setup(&np->timer, netdev_timer, 0);
 917        np->timer.expires = jiffies + 3*HZ;
 918        add_timer(&np->timer);
 919
 920        /* Enable interrupts by setting the interrupt mask. */
 921        iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
 922
 923        return 0;
 924}
 925
 926static void check_duplex(struct net_device *dev)
 927{
 928        struct netdev_private *np = netdev_priv(dev);
 929        void __iomem *ioaddr = np->base;
 930        int mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
 931        int negotiated = mii_lpa & np->mii_if.advertising;
 932        int duplex;
 933
 934        /* Force media */
 935        if (!np->an_enable || mii_lpa == 0xffff) {
 936                if (np->mii_if.full_duplex)
 937                        iowrite16 (ioread16 (ioaddr + MACCtrl0) | EnbFullDuplex,
 938                                ioaddr + MACCtrl0);
 939                return;
 940        }
 941
 942        /* Autonegotiation */
 943        duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
 944        if (np->mii_if.full_duplex != duplex) {
 945                np->mii_if.full_duplex = duplex;
 946                if (netif_msg_link(np))
 947                        printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d "
 948                                   "negotiated capability %4.4x.\n", dev->name,
 949                                   duplex ? "full" : "half", np->phys[0], negotiated);
 950                iowrite16(ioread16(ioaddr + MACCtrl0) | (duplex ? 0x20 : 0), ioaddr + MACCtrl0);
 951        }
 952}
 953
 954static void netdev_timer(struct timer_list *t)
 955{
 956        struct netdev_private *np = from_timer(np, t, timer);
 957        struct net_device *dev = np->mii_if.dev;
 958        void __iomem *ioaddr = np->base;
 959        int next_tick = 10*HZ;
 960
 961        if (netif_msg_timer(np)) {
 962                printk(KERN_DEBUG "%s: Media selection timer tick, intr status %4.4x, "
 963                           "Tx %x Rx %x.\n",
 964                           dev->name, ioread16(ioaddr + IntrEnable),
 965                           ioread8(ioaddr + TxStatus), ioread32(ioaddr + RxStatus));
 966        }
 967        check_duplex(dev);
 968        np->timer.expires = jiffies + next_tick;
 969        add_timer(&np->timer);
 970}
 971
 972static void tx_timeout(struct net_device *dev)
 973{
 974        struct netdev_private *np = netdev_priv(dev);
 975        void __iomem *ioaddr = np->base;
 976        unsigned long flag;
 977
 978        netif_stop_queue(dev);
 979        tasklet_disable(&np->tx_tasklet);
 980        iowrite16(0, ioaddr + IntrEnable);
 981        printk(KERN_WARNING "%s: Transmit timed out, TxStatus %2.2x "
 982                   "TxFrameId %2.2x,"
 983                   " resetting...\n", dev->name, ioread8(ioaddr + TxStatus),
 984                   ioread8(ioaddr + TxFrameId));
 985
 986        {
 987                int i;
 988                for (i=0; i<TX_RING_SIZE; i++) {
 989                        printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i,
 990                                (unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
 991                                le32_to_cpu(np->tx_ring[i].next_desc),
 992                                le32_to_cpu(np->tx_ring[i].status),
 993                                (le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff,
 994                                le32_to_cpu(np->tx_ring[i].frag[0].addr),
 995                                le32_to_cpu(np->tx_ring[i].frag[0].length));
 996                }
 997                printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
 998                        ioread32(np->base + TxListPtr),
 999                        netif_queue_stopped(dev));
1000                printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
1001                        np->cur_tx, np->cur_tx % TX_RING_SIZE,
1002                        np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
1003                printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
1004                printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
1005        }
1006        spin_lock_irqsave(&np->lock, flag);
1007
1008        /* Stop and restart the chip's Tx processes . */
1009        reset_tx(dev);
1010        spin_unlock_irqrestore(&np->lock, flag);
1011
1012        dev->if_port = 0;
1013
1014        netif_trans_update(dev); /* prevent tx timeout */
1015        dev->stats.tx_errors++;
1016        if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1017                netif_wake_queue(dev);
1018        }
1019        iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
1020        tasklet_enable(&np->tx_tasklet);
1021}
1022
1023
1024/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1025static void init_ring(struct net_device *dev)
1026{
1027        struct netdev_private *np = netdev_priv(dev);
1028        int i;
1029
1030        np->cur_rx = np->cur_tx = 0;
1031        np->dirty_rx = np->dirty_tx = 0;
1032        np->cur_task = 0;
1033
1034        np->rx_buf_sz = (dev->mtu <= 1520 ? PKT_BUF_SZ : dev->mtu + 16);
1035
1036        /* Initialize all Rx descriptors. */
1037        for (i = 0; i < RX_RING_SIZE; i++) {
1038                np->rx_ring[i].next_desc = cpu_to_le32(np->rx_ring_dma +
1039                        ((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring));
1040                np->rx_ring[i].status = 0;
1041                np->rx_ring[i].frag[0].length = 0;
1042                np->rx_skbuff[i] = NULL;
1043        }
1044
1045        /* Fill in the Rx buffers.  Handle allocation failure gracefully. */
1046        for (i = 0; i < RX_RING_SIZE; i++) {
1047                struct sk_buff *skb =
1048                        netdev_alloc_skb(dev, np->rx_buf_sz + 2);
1049                np->rx_skbuff[i] = skb;
1050                if (skb == NULL)
1051                        break;
1052                skb_reserve(skb, 2);    /* 16 byte align the IP header. */
1053                np->rx_ring[i].frag[0].addr = cpu_to_le32(
1054                        dma_map_single(&np->pci_dev->dev, skb->data,
1055                                np->rx_buf_sz, DMA_FROM_DEVICE));
1056                if (dma_mapping_error(&np->pci_dev->dev,
1057                                        np->rx_ring[i].frag[0].addr)) {
1058                        dev_kfree_skb(skb);
1059                        np->rx_skbuff[i] = NULL;
1060                        break;
1061                }
1062                np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag);
1063        }
1064        np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1065
1066        for (i = 0; i < TX_RING_SIZE; i++) {
1067                np->tx_skbuff[i] = NULL;
1068                np->tx_ring[i].status = 0;
1069        }
1070}
1071
1072static void tx_poll (unsigned long data)
1073{
1074        struct net_device *dev = (struct net_device *)data;
1075        struct netdev_private *np = netdev_priv(dev);
1076        unsigned head = np->cur_task % TX_RING_SIZE;
1077        struct netdev_desc *txdesc =
1078                &np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE];
1079
1080        /* Chain the next pointer */
1081        for (; np->cur_tx - np->cur_task > 0; np->cur_task++) {
1082                int entry = np->cur_task % TX_RING_SIZE;
1083                txdesc = &np->tx_ring[entry];
1084                if (np->last_tx) {
1085                        np->last_tx->next_desc = cpu_to_le32(np->tx_ring_dma +
1086                                entry*sizeof(struct netdev_desc));
1087                }
1088                np->last_tx = txdesc;
1089        }
1090        /* Indicate the latest descriptor of tx ring */
1091        txdesc->status |= cpu_to_le32(DescIntrOnTx);
1092
1093        if (ioread32 (np->base + TxListPtr) == 0)
1094                iowrite32 (np->tx_ring_dma + head * sizeof(struct netdev_desc),
1095                        np->base + TxListPtr);
1096}
1097
1098static netdev_tx_t
1099start_tx (struct sk_buff *skb, struct net_device *dev)
1100{
1101        struct netdev_private *np = netdev_priv(dev);
1102        struct netdev_desc *txdesc;
1103        unsigned entry;
1104
1105        /* Calculate the next Tx descriptor entry. */
1106        entry = np->cur_tx % TX_RING_SIZE;
1107        np->tx_skbuff[entry] = skb;
1108        txdesc = &np->tx_ring[entry];
1109
1110        txdesc->next_desc = 0;
1111        txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign);
1112        txdesc->frag[0].addr = cpu_to_le32(dma_map_single(&np->pci_dev->dev,
1113                                skb->data, skb->len, DMA_TO_DEVICE));
1114        if (dma_mapping_error(&np->pci_dev->dev,
1115                                txdesc->frag[0].addr))
1116                        goto drop_frame;
1117        txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag);
1118
1119        /* Increment cur_tx before tasklet_schedule() */
1120        np->cur_tx++;
1121        mb();
1122        /* Schedule a tx_poll() task */
1123        tasklet_schedule(&np->tx_tasklet);
1124
1125        /* On some architectures: explicitly flush cache lines here. */
1126        if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1 &&
1127            !netif_queue_stopped(dev)) {
1128                /* do nothing */
1129        } else {
1130                netif_stop_queue (dev);
1131        }
1132        if (netif_msg_tx_queued(np)) {
1133                printk (KERN_DEBUG
1134                        "%s: Transmit frame #%d queued in slot %d.\n",
1135                        dev->name, np->cur_tx, entry);
1136        }
1137        return NETDEV_TX_OK;
1138
1139drop_frame:
1140        dev_kfree_skb_any(skb);
1141        np->tx_skbuff[entry] = NULL;
1142        dev->stats.tx_dropped++;
1143        return NETDEV_TX_OK;
1144}
1145
1146/* Reset hardware tx and free all of tx buffers */
1147static int
1148reset_tx (struct net_device *dev)
1149{
1150        struct netdev_private *np = netdev_priv(dev);
1151        void __iomem *ioaddr = np->base;
1152        struct sk_buff *skb;
1153        int i;
1154
1155        /* Reset tx logic, TxListPtr will be cleaned */
1156        iowrite16 (TxDisable, ioaddr + MACCtrl1);
1157        sundance_reset(dev, (NetworkReset|FIFOReset|DMAReset|TxReset) << 16);
1158
1159        /* free all tx skbuff */
1160        for (i = 0; i < TX_RING_SIZE; i++) {
1161                np->tx_ring[i].next_desc = 0;
1162
1163                skb = np->tx_skbuff[i];
1164                if (skb) {
1165                        dma_unmap_single(&np->pci_dev->dev,
1166                                le32_to_cpu(np->tx_ring[i].frag[0].addr),
1167                                skb->len, DMA_TO_DEVICE);
1168                        dev_kfree_skb_any(skb);
1169                        np->tx_skbuff[i] = NULL;
1170                        dev->stats.tx_dropped++;
1171                }
1172        }
1173        np->cur_tx = np->dirty_tx = 0;
1174        np->cur_task = 0;
1175
1176        np->last_tx = NULL;
1177        iowrite8(127, ioaddr + TxDMAPollPeriod);
1178
1179        iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
1180        return 0;
1181}
1182
1183/* The interrupt handler cleans up after the Tx thread,
1184   and schedule a Rx thread work */
1185static irqreturn_t intr_handler(int irq, void *dev_instance)
1186{
1187        struct net_device *dev = (struct net_device *)dev_instance;
1188        struct netdev_private *np = netdev_priv(dev);
1189        void __iomem *ioaddr = np->base;
1190        int hw_frame_id;
1191        int tx_cnt;
1192        int tx_status;
1193        int handled = 0;
1194        int i;
1195
1196        do {
1197                int intr_status = ioread16(ioaddr + IntrStatus);
1198                iowrite16(intr_status, ioaddr + IntrStatus);
1199
1200                if (netif_msg_intr(np))
1201                        printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
1202                                   dev->name, intr_status);
1203
1204                if (!(intr_status & DEFAULT_INTR))
1205                        break;
1206
1207                handled = 1;
1208
1209                if (intr_status & (IntrRxDMADone)) {
1210                        iowrite16(DEFAULT_INTR & ~(IntrRxDone|IntrRxDMADone),
1211                                        ioaddr + IntrEnable);
1212                        if (np->budget < 0)
1213                                np->budget = RX_BUDGET;
1214                        tasklet_schedule(&np->rx_tasklet);
1215                }
1216                if (intr_status & (IntrTxDone | IntrDrvRqst)) {
1217                        tx_status = ioread16 (ioaddr + TxStatus);
1218                        for (tx_cnt=32; tx_status & 0x80; --tx_cnt) {
1219                                if (netif_msg_tx_done(np))
1220                                        printk
1221                                            ("%s: Transmit status is %2.2x.\n",
1222                                        dev->name, tx_status);
1223                                if (tx_status & 0x1e) {
1224                                        if (netif_msg_tx_err(np))
1225                                                printk("%s: Transmit error status %4.4x.\n",
1226                                                           dev->name, tx_status);
1227                                        dev->stats.tx_errors++;
1228                                        if (tx_status & 0x10)
1229                                                dev->stats.tx_fifo_errors++;
1230                                        if (tx_status & 0x08)
1231                                                dev->stats.collisions++;
1232                                        if (tx_status & 0x04)
1233                                                dev->stats.tx_fifo_errors++;
1234                                        if (tx_status & 0x02)
1235                                                dev->stats.tx_window_errors++;
1236
1237                                        /*
1238                                        ** This reset has been verified on
1239                                        ** DFE-580TX boards ! phdm@macqel.be.
1240                                        */
1241                                        if (tx_status & 0x10) { /* TxUnderrun */
1242                                                /* Restart Tx FIFO and transmitter */
1243                                                sundance_reset(dev, (NetworkReset|FIFOReset|TxReset) << 16);
1244                                                /* No need to reset the Tx pointer here */
1245                                        }
1246                                        /* Restart the Tx. Need to make sure tx enabled */
1247                                        i = 10;
1248                                        do {
1249                                                iowrite16(ioread16(ioaddr + MACCtrl1) | TxEnable, ioaddr + MACCtrl1);
1250                                                if (ioread16(ioaddr + MACCtrl1) & TxEnabled)
1251                                                        break;
1252                                                mdelay(1);
1253                                        } while (--i);
1254                                }
1255                                /* Yup, this is a documentation bug.  It cost me *hours*. */
1256                                iowrite16 (0, ioaddr + TxStatus);
1257                                if (tx_cnt < 0) {
1258                                        iowrite32(5000, ioaddr + DownCounter);
1259                                        break;
1260                                }
1261                                tx_status = ioread16 (ioaddr + TxStatus);
1262                        }
1263                        hw_frame_id = (tx_status >> 8) & 0xff;
1264                } else  {
1265                        hw_frame_id = ioread8(ioaddr + TxFrameId);
1266                }
1267
1268                if (np->pci_dev->revision >= 0x14) {
1269                        spin_lock(&np->lock);
1270                        for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1271                                int entry = np->dirty_tx % TX_RING_SIZE;
1272                                struct sk_buff *skb;
1273                                int sw_frame_id;
1274                                sw_frame_id = (le32_to_cpu(
1275                                        np->tx_ring[entry].status) >> 2) & 0xff;
1276                                if (sw_frame_id == hw_frame_id &&
1277                                        !(le32_to_cpu(np->tx_ring[entry].status)
1278                                        & 0x00010000))
1279                                                break;
1280                                if (sw_frame_id == (hw_frame_id + 1) %
1281                                        TX_RING_SIZE)
1282                                                break;
1283                                skb = np->tx_skbuff[entry];
1284                                /* Free the original skb. */
1285                                dma_unmap_single(&np->pci_dev->dev,
1286                                        le32_to_cpu(np->tx_ring[entry].frag[0].addr),
1287                                        skb->len, DMA_TO_DEVICE);
1288                                dev_consume_skb_irq(np->tx_skbuff[entry]);
1289                                np->tx_skbuff[entry] = NULL;
1290                                np->tx_ring[entry].frag[0].addr = 0;
1291                                np->tx_ring[entry].frag[0].length = 0;
1292                        }
1293                        spin_unlock(&np->lock);
1294                } else {
1295                        spin_lock(&np->lock);
1296                        for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1297                                int entry = np->dirty_tx % TX_RING_SIZE;
1298                                struct sk_buff *skb;
1299                                if (!(le32_to_cpu(np->tx_ring[entry].status)
1300                                                        & 0x00010000))
1301                                        break;
1302                                skb = np->tx_skbuff[entry];
1303                                /* Free the original skb. */
1304                                dma_unmap_single(&np->pci_dev->dev,
1305                                        le32_to_cpu(np->tx_ring[entry].frag[0].addr),
1306                                        skb->len, DMA_TO_DEVICE);
1307                                dev_consume_skb_irq(np->tx_skbuff[entry]);
1308                                np->tx_skbuff[entry] = NULL;
1309                                np->tx_ring[entry].frag[0].addr = 0;
1310                                np->tx_ring[entry].frag[0].length = 0;
1311                        }
1312                        spin_unlock(&np->lock);
1313                }
1314
1315                if (netif_queue_stopped(dev) &&
1316                        np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1317                        /* The ring is no longer full, clear busy flag. */
1318                        netif_wake_queue (dev);
1319                }
1320                /* Abnormal error summary/uncommon events handlers. */
1321                if (intr_status & (IntrPCIErr | LinkChange | StatsMax))
1322                        netdev_error(dev, intr_status);
1323        } while (0);
1324        if (netif_msg_intr(np))
1325                printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1326                           dev->name, ioread16(ioaddr + IntrStatus));
1327        return IRQ_RETVAL(handled);
1328}
1329
1330static void rx_poll(unsigned long data)
1331{
1332        struct net_device *dev = (struct net_device *)data;
1333        struct netdev_private *np = netdev_priv(dev);
1334        int entry = np->cur_rx % RX_RING_SIZE;
1335        int boguscnt = np->budget;
1336        void __iomem *ioaddr = np->base;
1337        int received = 0;
1338
1339        /* If EOP is set on the next entry, it's a new packet. Send it up. */
1340        while (1) {
1341                struct netdev_desc *desc = &(np->rx_ring[entry]);
1342                u32 frame_status = le32_to_cpu(desc->status);
1343                int pkt_len;
1344
1345                if (--boguscnt < 0) {
1346                        goto not_done;
1347                }
1348                if (!(frame_status & DescOwn))
1349                        break;
1350                pkt_len = frame_status & 0x1fff;        /* Chip omits the CRC. */
1351                if (netif_msg_rx_status(np))
1352                        printk(KERN_DEBUG "  netdev_rx() status was %8.8x.\n",
1353                                   frame_status);
1354                if (frame_status & 0x001f4000) {
1355                        /* There was a error. */
1356                        if (netif_msg_rx_err(np))
1357                                printk(KERN_DEBUG "  netdev_rx() Rx error was %8.8x.\n",
1358                                           frame_status);
1359                        dev->stats.rx_errors++;
1360                        if (frame_status & 0x00100000)
1361                                dev->stats.rx_length_errors++;
1362                        if (frame_status & 0x00010000)
1363                                dev->stats.rx_fifo_errors++;
1364                        if (frame_status & 0x00060000)
1365                                dev->stats.rx_frame_errors++;
1366                        if (frame_status & 0x00080000)
1367                                dev->stats.rx_crc_errors++;
1368                        if (frame_status & 0x00100000) {
1369                                printk(KERN_WARNING "%s: Oversized Ethernet frame,"
1370                                           " status %8.8x.\n",
1371                                           dev->name, frame_status);
1372                        }
1373                } else {
1374                        struct sk_buff *skb;
1375#ifndef final_version
1376                        if (netif_msg_rx_status(np))
1377                                printk(KERN_DEBUG "  netdev_rx() normal Rx pkt length %d"
1378                                           ", bogus_cnt %d.\n",
1379                                           pkt_len, boguscnt);
1380#endif
1381                        /* Check if the packet is long enough to accept without copying
1382                           to a minimally-sized skbuff. */
1383                        if (pkt_len < rx_copybreak &&
1384                            (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
1385                                skb_reserve(skb, 2);    /* 16 byte align the IP header */
1386                                dma_sync_single_for_cpu(&np->pci_dev->dev,
1387                                                le32_to_cpu(desc->frag[0].addr),
1388                                                np->rx_buf_sz, DMA_FROM_DEVICE);
1389                                skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
1390                                dma_sync_single_for_device(&np->pci_dev->dev,
1391                                                le32_to_cpu(desc->frag[0].addr),
1392                                                np->rx_buf_sz, DMA_FROM_DEVICE);
1393                                skb_put(skb, pkt_len);
1394                        } else {
1395                                dma_unmap_single(&np->pci_dev->dev,
1396                                        le32_to_cpu(desc->frag[0].addr),
1397                                        np->rx_buf_sz, DMA_FROM_DEVICE);
1398                                skb_put(skb = np->rx_skbuff[entry], pkt_len);
1399                                np->rx_skbuff[entry] = NULL;
1400                        }
1401                        skb->protocol = eth_type_trans(skb, dev);
1402                        /* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
1403                        netif_rx(skb);
1404                }
1405                entry = (entry + 1) % RX_RING_SIZE;
1406                received++;
1407        }
1408        np->cur_rx = entry;
1409        refill_rx (dev);
1410        np->budget -= received;
1411        iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
1412        return;
1413
1414not_done:
1415        np->cur_rx = entry;
1416        refill_rx (dev);
1417        if (!received)
1418                received = 1;
1419        np->budget -= received;
1420        if (np->budget <= 0)
1421                np->budget = RX_BUDGET;
1422        tasklet_schedule(&np->rx_tasklet);
1423}
1424
1425static void refill_rx (struct net_device *dev)
1426{
1427        struct netdev_private *np = netdev_priv(dev);
1428        int entry;
1429        int cnt = 0;
1430
1431        /* Refill the Rx ring buffers. */
1432        for (;(np->cur_rx - np->dirty_rx + RX_RING_SIZE) % RX_RING_SIZE > 0;
1433                np->dirty_rx = (np->dirty_rx + 1) % RX_RING_SIZE) {
1434                struct sk_buff *skb;
1435                entry = np->dirty_rx % RX_RING_SIZE;
1436                if (np->rx_skbuff[entry] == NULL) {
1437                        skb = netdev_alloc_skb(dev, np->rx_buf_sz + 2);
1438                        np->rx_skbuff[entry] = skb;
1439                        if (skb == NULL)
1440                                break;          /* Better luck next round. */
1441                        skb_reserve(skb, 2);    /* Align IP on 16 byte boundaries */
1442                        np->rx_ring[entry].frag[0].addr = cpu_to_le32(
1443                                dma_map_single(&np->pci_dev->dev, skb->data,
1444                                        np->rx_buf_sz, DMA_FROM_DEVICE));
1445                        if (dma_mapping_error(&np->pci_dev->dev,
1446                                    np->rx_ring[entry].frag[0].addr)) {
1447                            dev_kfree_skb_irq(skb);
1448                            np->rx_skbuff[entry] = NULL;
1449                            break;
1450                        }
1451                }
1452                /* Perhaps we need not reset this field. */
1453                np->rx_ring[entry].frag[0].length =
1454                        cpu_to_le32(np->rx_buf_sz | LastFrag);
1455                np->rx_ring[entry].status = 0;
1456                cnt++;
1457        }
1458}
1459static void netdev_error(struct net_device *dev, int intr_status)
1460{
1461        struct netdev_private *np = netdev_priv(dev);
1462        void __iomem *ioaddr = np->base;
1463        u16 mii_ctl, mii_advertise, mii_lpa;
1464        int speed;
1465
1466        if (intr_status & LinkChange) {
1467                if (mdio_wait_link(dev, 10) == 0) {
1468                        printk(KERN_INFO "%s: Link up\n", dev->name);
1469                        if (np->an_enable) {
1470                                mii_advertise = mdio_read(dev, np->phys[0],
1471                                                           MII_ADVERTISE);
1472                                mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
1473                                mii_advertise &= mii_lpa;
1474                                printk(KERN_INFO "%s: Link changed: ",
1475                                        dev->name);
1476                                if (mii_advertise & ADVERTISE_100FULL) {
1477                                        np->speed = 100;
1478                                        printk("100Mbps, full duplex\n");
1479                                } else if (mii_advertise & ADVERTISE_100HALF) {
1480                                        np->speed = 100;
1481                                        printk("100Mbps, half duplex\n");
1482                                } else if (mii_advertise & ADVERTISE_10FULL) {
1483                                        np->speed = 10;
1484                                        printk("10Mbps, full duplex\n");
1485                                } else if (mii_advertise & ADVERTISE_10HALF) {
1486                                        np->speed = 10;
1487                                        printk("10Mbps, half duplex\n");
1488                                } else
1489                                        printk("\n");
1490
1491                        } else {
1492                                mii_ctl = mdio_read(dev, np->phys[0], MII_BMCR);
1493                                speed = (mii_ctl & BMCR_SPEED100) ? 100 : 10;
1494                                np->speed = speed;
1495                                printk(KERN_INFO "%s: Link changed: %dMbps ,",
1496                                        dev->name, speed);
1497                                printk("%s duplex.\n",
1498                                        (mii_ctl & BMCR_FULLDPLX) ?
1499                                                "full" : "half");
1500                        }
1501                        check_duplex(dev);
1502                        if (np->flowctrl && np->mii_if.full_duplex) {
1503                                iowrite16(ioread16(ioaddr + MulticastFilter1+2) | 0x0200,
1504                                        ioaddr + MulticastFilter1+2);
1505                                iowrite16(ioread16(ioaddr + MACCtrl0) | EnbFlowCtrl,
1506                                        ioaddr + MACCtrl0);
1507                        }
1508                        netif_carrier_on(dev);
1509                } else {
1510                        printk(KERN_INFO "%s: Link down\n", dev->name);
1511                        netif_carrier_off(dev);
1512                }
1513        }
1514        if (intr_status & StatsMax) {
1515                get_stats(dev);
1516        }
1517        if (intr_status & IntrPCIErr) {
1518                printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
1519                           dev->name, intr_status);
1520                /* We must do a global reset of DMA to continue. */
1521        }
1522}
1523
1524static struct net_device_stats *get_stats(struct net_device *dev)
1525{
1526        struct netdev_private *np = netdev_priv(dev);
1527        void __iomem *ioaddr = np->base;
1528        unsigned long flags;
1529        u8 late_coll, single_coll, mult_coll;
1530
1531        spin_lock_irqsave(&np->statlock, flags);
1532        /* The chip only need report frame silently dropped. */
1533        dev->stats.rx_missed_errors     += ioread8(ioaddr + RxMissed);
1534        dev->stats.tx_packets += ioread16(ioaddr + TxFramesOK);
1535        dev->stats.rx_packets += ioread16(ioaddr + RxFramesOK);
1536        dev->stats.tx_carrier_errors += ioread8(ioaddr + StatsCarrierError);
1537
1538        mult_coll = ioread8(ioaddr + StatsMultiColl);
1539        np->xstats.tx_multiple_collisions += mult_coll;
1540        single_coll = ioread8(ioaddr + StatsOneColl);
1541        np->xstats.tx_single_collisions += single_coll;
1542        late_coll = ioread8(ioaddr + StatsLateColl);
1543        np->xstats.tx_late_collisions += late_coll;
1544        dev->stats.collisions += mult_coll
1545                + single_coll
1546                + late_coll;
1547
1548        np->xstats.tx_deferred += ioread8(ioaddr + StatsTxDefer);
1549        np->xstats.tx_deferred_excessive += ioread8(ioaddr + StatsTxXSDefer);
1550        np->xstats.tx_aborted += ioread8(ioaddr + StatsTxAbort);
1551        np->xstats.tx_bcasts += ioread8(ioaddr + StatsBcastTx);
1552        np->xstats.rx_bcasts += ioread8(ioaddr + StatsBcastRx);
1553        np->xstats.tx_mcasts += ioread8(ioaddr + StatsMcastTx);
1554        np->xstats.rx_mcasts += ioread8(ioaddr + StatsMcastRx);
1555
1556        dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsLow);
1557        dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsHigh) << 16;
1558        dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsLow);
1559        dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsHigh) << 16;
1560
1561        spin_unlock_irqrestore(&np->statlock, flags);
1562
1563        return &dev->stats;
1564}
1565
1566static void set_rx_mode(struct net_device *dev)
1567{
1568        struct netdev_private *np = netdev_priv(dev);
1569        void __iomem *ioaddr = np->base;
1570        u16 mc_filter[4];                       /* Multicast hash filter */
1571        u32 rx_mode;
1572        int i;
1573
1574        if (dev->flags & IFF_PROMISC) {                 /* Set promiscuous. */
1575                memset(mc_filter, 0xff, sizeof(mc_filter));
1576                rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys;
1577        } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
1578                   (dev->flags & IFF_ALLMULTI)) {
1579                /* Too many to match, or accept all multicasts. */
1580                memset(mc_filter, 0xff, sizeof(mc_filter));
1581                rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1582        } else if (!netdev_mc_empty(dev)) {
1583                struct netdev_hw_addr *ha;
1584                int bit;
1585                int index;
1586                int crc;
1587                memset (mc_filter, 0, sizeof (mc_filter));
1588                netdev_for_each_mc_addr(ha, dev) {
1589                        crc = ether_crc_le(ETH_ALEN, ha->addr);
1590                        for (index=0, bit=0; bit < 6; bit++, crc <<= 1)
1591                                if (crc & 0x80000000) index |= 1 << bit;
1592                        mc_filter[index/16] |= (1 << (index % 16));
1593                }
1594                rx_mode = AcceptBroadcast | AcceptMultiHash | AcceptMyPhys;
1595        } else {
1596                iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
1597                return;
1598        }
1599        if (np->mii_if.full_duplex && np->flowctrl)
1600                mc_filter[3] |= 0x0200;
1601
1602        for (i = 0; i < 4; i++)
1603                iowrite16(mc_filter[i], ioaddr + MulticastFilter0 + i*2);
1604        iowrite8(rx_mode, ioaddr + RxMode);
1605}
1606
1607static int __set_mac_addr(struct net_device *dev)
1608{
1609        struct netdev_private *np = netdev_priv(dev);
1610        u16 addr16;
1611
1612        addr16 = (dev->dev_addr[0] | (dev->dev_addr[1] << 8));
1613        iowrite16(addr16, np->base + StationAddr);
1614        addr16 = (dev->dev_addr[2] | (dev->dev_addr[3] << 8));
1615        iowrite16(addr16, np->base + StationAddr+2);
1616        addr16 = (dev->dev_addr[4] | (dev->dev_addr[5] << 8));
1617        iowrite16(addr16, np->base + StationAddr+4);
1618        return 0;
1619}
1620
1621/* Invoked with rtnl_lock held */
1622static int sundance_set_mac_addr(struct net_device *dev, void *data)
1623{
1624        const struct sockaddr *addr = data;
1625
1626        if (!is_valid_ether_addr(addr->sa_data))
1627                return -EADDRNOTAVAIL;
1628        memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
1629        __set_mac_addr(dev);
1630
1631        return 0;
1632}
1633
1634static const struct {
1635        const char name[ETH_GSTRING_LEN];
1636} sundance_stats[] = {
1637        { "tx_multiple_collisions" },
1638        { "tx_single_collisions" },
1639        { "tx_late_collisions" },
1640        { "tx_deferred" },
1641        { "tx_deferred_excessive" },
1642        { "tx_aborted" },
1643        { "tx_bcasts" },
1644        { "rx_bcasts" },
1645        { "tx_mcasts" },
1646        { "rx_mcasts" },
1647};
1648
1649static int check_if_running(struct net_device *dev)
1650{
1651        if (!netif_running(dev))
1652                return -EINVAL;
1653        return 0;
1654}
1655
1656static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1657{
1658        struct netdev_private *np = netdev_priv(dev);
1659        strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1660        strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1661        strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
1662}
1663
1664static int get_link_ksettings(struct net_device *dev,
1665                              struct ethtool_link_ksettings *cmd)
1666{
1667        struct netdev_private *np = netdev_priv(dev);
1668        spin_lock_irq(&np->lock);
1669        mii_ethtool_get_link_ksettings(&np->mii_if, cmd);
1670        spin_unlock_irq(&np->lock);
1671        return 0;
1672}
1673
1674static int set_link_ksettings(struct net_device *dev,
1675                              const struct ethtool_link_ksettings *cmd)
1676{
1677        struct netdev_private *np = netdev_priv(dev);
1678        int res;
1679        spin_lock_irq(&np->lock);
1680        res = mii_ethtool_set_link_ksettings(&np->mii_if, cmd);
1681        spin_unlock_irq(&np->lock);
1682        return res;
1683}
1684
1685static int nway_reset(struct net_device *dev)
1686{
1687        struct netdev_private *np = netdev_priv(dev);
1688        return mii_nway_restart(&np->mii_if);
1689}
1690
1691static u32 get_link(struct net_device *dev)
1692{
1693        struct netdev_private *np = netdev_priv(dev);
1694        return mii_link_ok(&np->mii_if);
1695}
1696
1697static u32 get_msglevel(struct net_device *dev)
1698{
1699        struct netdev_private *np = netdev_priv(dev);
1700        return np->msg_enable;
1701}
1702
1703static void set_msglevel(struct net_device *dev, u32 val)
1704{
1705        struct netdev_private *np = netdev_priv(dev);
1706        np->msg_enable = val;
1707}
1708
1709static void get_strings(struct net_device *dev, u32 stringset,
1710                u8 *data)
1711{
1712        if (stringset == ETH_SS_STATS)
1713                memcpy(data, sundance_stats, sizeof(sundance_stats));
1714}
1715
1716static int get_sset_count(struct net_device *dev, int sset)
1717{
1718        switch (sset) {
1719        case ETH_SS_STATS:
1720                return ARRAY_SIZE(sundance_stats);
1721        default:
1722                return -EOPNOTSUPP;
1723        }
1724}
1725
1726static void get_ethtool_stats(struct net_device *dev,
1727                struct ethtool_stats *stats, u64 *data)
1728{
1729        struct netdev_private *np = netdev_priv(dev);
1730        int i = 0;
1731
1732        get_stats(dev);
1733        data[i++] = np->xstats.tx_multiple_collisions;
1734        data[i++] = np->xstats.tx_single_collisions;
1735        data[i++] = np->xstats.tx_late_collisions;
1736        data[i++] = np->xstats.tx_deferred;
1737        data[i++] = np->xstats.tx_deferred_excessive;
1738        data[i++] = np->xstats.tx_aborted;
1739        data[i++] = np->xstats.tx_bcasts;
1740        data[i++] = np->xstats.rx_bcasts;
1741        data[i++] = np->xstats.tx_mcasts;
1742        data[i++] = np->xstats.rx_mcasts;
1743}
1744
1745#ifdef CONFIG_PM
1746
1747static void sundance_get_wol(struct net_device *dev,
1748                struct ethtool_wolinfo *wol)
1749{
1750        struct netdev_private *np = netdev_priv(dev);
1751        void __iomem *ioaddr = np->base;
1752        u8 wol_bits;
1753
1754        wol->wolopts = 0;
1755
1756        wol->supported = (WAKE_PHY | WAKE_MAGIC);
1757        if (!np->wol_enabled)
1758                return;
1759
1760        wol_bits = ioread8(ioaddr + WakeEvent);
1761        if (wol_bits & MagicPktEnable)
1762                wol->wolopts |= WAKE_MAGIC;
1763        if (wol_bits & LinkEventEnable)
1764                wol->wolopts |= WAKE_PHY;
1765}
1766
1767static int sundance_set_wol(struct net_device *dev,
1768        struct ethtool_wolinfo *wol)
1769{
1770        struct netdev_private *np = netdev_priv(dev);
1771        void __iomem *ioaddr = np->base;
1772        u8 wol_bits;
1773
1774        if (!device_can_wakeup(&np->pci_dev->dev))
1775                return -EOPNOTSUPP;
1776
1777        np->wol_enabled = !!(wol->wolopts);
1778        wol_bits = ioread8(ioaddr + WakeEvent);
1779        wol_bits &= ~(WakePktEnable | MagicPktEnable |
1780                        LinkEventEnable | WolEnable);
1781
1782        if (np->wol_enabled) {
1783                if (wol->wolopts & WAKE_MAGIC)
1784                        wol_bits |= (MagicPktEnable | WolEnable);
1785                if (wol->wolopts & WAKE_PHY)
1786                        wol_bits |= (LinkEventEnable | WolEnable);
1787        }
1788        iowrite8(wol_bits, ioaddr + WakeEvent);
1789
1790        device_set_wakeup_enable(&np->pci_dev->dev, np->wol_enabled);
1791
1792        return 0;
1793}
1794#else
1795#define sundance_get_wol NULL
1796#define sundance_set_wol NULL
1797#endif /* CONFIG_PM */
1798
1799static const struct ethtool_ops ethtool_ops = {
1800        .begin = check_if_running,
1801        .get_drvinfo = get_drvinfo,
1802        .nway_reset = nway_reset,
1803        .get_link = get_link,
1804        .get_wol = sundance_get_wol,
1805        .set_wol = sundance_set_wol,
1806        .get_msglevel = get_msglevel,
1807        .set_msglevel = set_msglevel,
1808        .get_strings = get_strings,
1809        .get_sset_count = get_sset_count,
1810        .get_ethtool_stats = get_ethtool_stats,
1811        .get_link_ksettings = get_link_ksettings,
1812        .set_link_ksettings = set_link_ksettings,
1813};
1814
1815static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1816{
1817        struct netdev_private *np = netdev_priv(dev);
1818        int rc;
1819
1820        if (!netif_running(dev))
1821                return -EINVAL;
1822
1823        spin_lock_irq(&np->lock);
1824        rc = generic_mii_ioctl(&np->mii_if, if_mii(rq), cmd, NULL);
1825        spin_unlock_irq(&np->lock);
1826
1827        return rc;
1828}
1829
1830static int netdev_close(struct net_device *dev)
1831{
1832        struct netdev_private *np = netdev_priv(dev);
1833        void __iomem *ioaddr = np->base;
1834        struct sk_buff *skb;
1835        int i;
1836
1837        /* Wait and kill tasklet */
1838        tasklet_kill(&np->rx_tasklet);
1839        tasklet_kill(&np->tx_tasklet);
1840        np->cur_tx = 0;
1841        np->dirty_tx = 0;
1842        np->cur_task = 0;
1843        np->last_tx = NULL;
1844
1845        netif_stop_queue(dev);
1846
1847        if (netif_msg_ifdown(np)) {
1848                printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %2.2x "
1849                           "Rx %4.4x Int %2.2x.\n",
1850                           dev->name, ioread8(ioaddr + TxStatus),
1851                           ioread32(ioaddr + RxStatus), ioread16(ioaddr + IntrStatus));
1852                printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d,  Rx %d / %d.\n",
1853                           dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
1854        }
1855
1856        /* Disable interrupts by clearing the interrupt mask. */
1857        iowrite16(0x0000, ioaddr + IntrEnable);
1858
1859        /* Disable Rx and Tx DMA for safely release resource */
1860        iowrite32(0x500, ioaddr + DMACtrl);
1861
1862        /* Stop the chip's Tx and Rx processes. */
1863        iowrite16(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1);
1864
1865        for (i = 2000; i > 0; i--) {
1866                if ((ioread32(ioaddr + DMACtrl) & 0xc000) == 0)
1867                        break;
1868                mdelay(1);
1869        }
1870
1871        iowrite16(GlobalReset | DMAReset | FIFOReset | NetworkReset,
1872                        ioaddr + ASIC_HI_WORD(ASICCtrl));
1873
1874        for (i = 2000; i > 0; i--) {
1875                if ((ioread16(ioaddr + ASIC_HI_WORD(ASICCtrl)) & ResetBusy) == 0)
1876                        break;
1877                mdelay(1);
1878        }
1879
1880#ifdef __i386__
1881        if (netif_msg_hw(np)) {
1882                printk(KERN_DEBUG "  Tx ring at %8.8x:\n",
1883                           (int)(np->tx_ring_dma));
1884                for (i = 0; i < TX_RING_SIZE; i++)
1885                        printk(KERN_DEBUG " #%d desc. %4.4x %8.8x %8.8x.\n",
1886                                   i, np->tx_ring[i].status, np->tx_ring[i].frag[0].addr,
1887                                   np->tx_ring[i].frag[0].length);
1888                printk(KERN_DEBUG "  Rx ring %8.8x:\n",
1889                           (int)(np->rx_ring_dma));
1890                for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) {
1891                        printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
1892                                   i, np->rx_ring[i].status, np->rx_ring[i].frag[0].addr,
1893                                   np->rx_ring[i].frag[0].length);
1894                }
1895        }
1896#endif /* __i386__ debugging only */
1897
1898        free_irq(np->pci_dev->irq, dev);
1899
1900        del_timer_sync(&np->timer);
1901
1902        /* Free all the skbuffs in the Rx queue. */
1903        for (i = 0; i < RX_RING_SIZE; i++) {
1904                np->rx_ring[i].status = 0;
1905                skb = np->rx_skbuff[i];
1906                if (skb) {
1907                        dma_unmap_single(&np->pci_dev->dev,
1908                                le32_to_cpu(np->rx_ring[i].frag[0].addr),
1909                                np->rx_buf_sz, DMA_FROM_DEVICE);
1910                        dev_kfree_skb(skb);
1911                        np->rx_skbuff[i] = NULL;
1912                }
1913                np->rx_ring[i].frag[0].addr = cpu_to_le32(0xBADF00D0); /* poison */
1914        }
1915        for (i = 0; i < TX_RING_SIZE; i++) {
1916                np->tx_ring[i].next_desc = 0;
1917                skb = np->tx_skbuff[i];
1918                if (skb) {
1919                        dma_unmap_single(&np->pci_dev->dev,
1920                                le32_to_cpu(np->tx_ring[i].frag[0].addr),
1921                                skb->len, DMA_TO_DEVICE);
1922                        dev_kfree_skb(skb);
1923                        np->tx_skbuff[i] = NULL;
1924                }
1925        }
1926
1927        return 0;
1928}
1929
1930static void sundance_remove1(struct pci_dev *pdev)
1931{
1932        struct net_device *dev = pci_get_drvdata(pdev);
1933
1934        if (dev) {
1935            struct netdev_private *np = netdev_priv(dev);
1936            unregister_netdev(dev);
1937            dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE,
1938                    np->rx_ring, np->rx_ring_dma);
1939            dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE,
1940                    np->tx_ring, np->tx_ring_dma);
1941            pci_iounmap(pdev, np->base);
1942            pci_release_regions(pdev);
1943            free_netdev(dev);
1944        }
1945}
1946
1947#ifdef CONFIG_PM
1948
1949static int sundance_suspend(struct pci_dev *pci_dev, pm_message_t state)
1950{
1951        struct net_device *dev = pci_get_drvdata(pci_dev);
1952        struct netdev_private *np = netdev_priv(dev);
1953        void __iomem *ioaddr = np->base;
1954
1955        if (!netif_running(dev))
1956                return 0;
1957
1958        netdev_close(dev);
1959        netif_device_detach(dev);
1960
1961        pci_save_state(pci_dev);
1962        if (np->wol_enabled) {
1963                iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
1964                iowrite16(RxEnable, ioaddr + MACCtrl1);
1965        }
1966        pci_enable_wake(pci_dev, pci_choose_state(pci_dev, state),
1967                        np->wol_enabled);
1968        pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
1969
1970        return 0;
1971}
1972
1973static int sundance_resume(struct pci_dev *pci_dev)
1974{
1975        struct net_device *dev = pci_get_drvdata(pci_dev);
1976        int err = 0;
1977
1978        if (!netif_running(dev))
1979                return 0;
1980
1981        pci_set_power_state(pci_dev, PCI_D0);
1982        pci_restore_state(pci_dev);
1983        pci_enable_wake(pci_dev, PCI_D0, 0);
1984
1985        err = netdev_open(dev);
1986        if (err) {
1987                printk(KERN_ERR "%s: Can't resume interface!\n",
1988                                dev->name);
1989                goto out;
1990        }
1991
1992        netif_device_attach(dev);
1993
1994out:
1995        return err;
1996}
1997
1998#endif /* CONFIG_PM */
1999
2000static struct pci_driver sundance_driver = {
2001        .name           = DRV_NAME,
2002        .id_table       = sundance_pci_tbl,
2003        .probe          = sundance_probe1,
2004        .remove         = sundance_remove1,
2005#ifdef CONFIG_PM
2006        .suspend        = sundance_suspend,
2007        .resume         = sundance_resume,
2008#endif /* CONFIG_PM */
2009};
2010
2011static int __init sundance_init(void)
2012{
2013/* when a module, this is printed whether or not devices are found in probe */
2014#ifdef MODULE
2015        printk(version);
2016#endif
2017        return pci_register_driver(&sundance_driver);
2018}
2019
2020static void __exit sundance_exit(void)
2021{
2022        pci_unregister_driver(&sundance_driver);
2023}
2024
2025module_init(sundance_init);
2026module_exit(sundance_exit);
2027
2028
2029