linux/drivers/net/ethernet/dec/tulip/winbond-840.c
<<
>>
Prefs
   1/* winbond-840.c: A Linux PCI network adapter device driver. */
   2/*
   3        Written 1998-2001 by Donald Becker.
   4
   5        This software may be used and distributed according to the terms of
   6        the GNU General Public License (GPL), incorporated herein by reference.
   7        Drivers based on or derived from this code fall under the GPL and must
   8        retain the authorship, copyright and license notice.  This file is not
   9        a complete program and may only be used when the entire operating
  10        system is licensed under the GPL.
  11
  12        The author may be reached as becker@scyld.com, or C/O
  13        Scyld Computing Corporation
  14        410 Severn Ave., Suite 210
  15        Annapolis MD 21403
  16
  17        Support and updates available at
  18        http://www.scyld.com/network/drivers.html
  19
  20        Do not remove the copyright information.
  21        Do not change the version information unless an improvement has been made.
  22        Merely removing my name, as Compex has done in the past, does not count
  23        as an improvement.
  24
  25        Changelog:
  26        * ported to 2.4
  27                ???
  28        * spin lock update, memory barriers, new style dma mappings
  29                limit each tx buffer to < 1024 bytes
  30                remove DescIntr from Rx descriptors (that's an Tx flag)
  31                remove next pointer from Tx descriptors
  32                synchronize tx_q_bytes
  33                software reset in tx_timeout
  34                        Copyright (C) 2000 Manfred Spraul
  35        * further cleanups
  36                power management.
  37                support for big endian descriptors
  38                        Copyright (C) 2001 Manfred Spraul
  39        * ethtool support (jgarzik)
  40        * Replace some MII-related magic numbers with constants (jgarzik)
  41
  42        TODO:
  43        * enable pci_power_off
  44        * Wake-On-LAN
  45*/
  46
  47#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  48
  49#define DRV_NAME        "winbond-840"
  50
  51/* Automatically extracted configuration info:
  52probe-func: winbond840_probe
  53config-in: tristate 'Winbond W89c840 Ethernet support' CONFIG_WINBOND_840
  54
  55c-help-name: Winbond W89c840 PCI Ethernet support
  56c-help-symbol: CONFIG_WINBOND_840
  57c-help: This driver is for the Winbond W89c840 chip.  It also works with
  58c-help: the TX9882 chip on the Compex RL100-ATX board.
  59c-help: More specific information and updates are available from
  60c-help: http://www.scyld.com/network/drivers.html
  61*/
  62
  63/* The user-configurable values.
  64   These may be modified when a driver module is loaded.*/
  65
  66static int debug = 1;                   /* 1 normal messages, 0 quiet .. 7 verbose. */
  67static int max_interrupt_work = 20;
  68/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
  69   The '840 uses a 64 element hash table based on the Ethernet CRC.  */
  70static int multicast_filter_limit = 32;
  71
  72/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
  73   Setting to > 1518 effectively disables this feature. */
  74static int rx_copybreak;
  75
  76/* Used to pass the media type, etc.
  77   Both 'options[]' and 'full_duplex[]' should exist for driver
  78   interoperability.
  79   The media type is usually passed in 'options[]'.
  80*/
  81#define MAX_UNITS 8             /* More are supported, limit only on options */
  82static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
  83static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
  84
  85/* Operational parameters that are set at compile time. */
  86
  87/* Keep the ring sizes a power of two for compile efficiency.
  88   The compiler will convert <unsigned>'%'<2^N> into a bit mask.
  89   Making the Tx ring too large decreases the effectiveness of channel
  90   bonding and packet priority.
  91   There are no ill effects from too-large receive rings. */
  92#define TX_QUEUE_LEN    10              /* Limit ring entries actually used.  */
  93#define TX_QUEUE_LEN_RESTART    5
  94
  95#define TX_BUFLIMIT     (1024-128)
  96
  97/* The presumed FIFO size for working around the Tx-FIFO-overflow bug.
  98   To avoid overflowing we don't queue again until we have room for a
  99   full-size packet.
 100 */
 101#define TX_FIFO_SIZE (2048)
 102#define TX_BUG_FIFO_LIMIT (TX_FIFO_SIZE-1514-16)
 103
 104
 105/* Operational parameters that usually are not changed. */
 106/* Time in jiffies before concluding the transmitter is hung. */
 107#define TX_TIMEOUT  (2*HZ)
 108
 109/* Include files, designed to support most kernel versions 2.0.0 and later. */
 110#include <linux/module.h>
 111#include <linux/kernel.h>
 112#include <linux/string.h>
 113#include <linux/timer.h>
 114#include <linux/errno.h>
 115#include <linux/ioport.h>
 116#include <linux/interrupt.h>
 117#include <linux/pci.h>
 118#include <linux/dma-mapping.h>
 119#include <linux/netdevice.h>
 120#include <linux/etherdevice.h>
 121#include <linux/skbuff.h>
 122#include <linux/init.h>
 123#include <linux/delay.h>
 124#include <linux/ethtool.h>
 125#include <linux/mii.h>
 126#include <linux/rtnetlink.h>
 127#include <linux/crc32.h>
 128#include <linux/bitops.h>
 129#include <linux/uaccess.h>
 130#include <asm/processor.h>              /* Processor type for cache alignment. */
 131#include <asm/io.h>
 132#include <asm/irq.h>
 133
 134#include "tulip.h"
 135
 136#undef PKT_BUF_SZ                       /* tulip.h also defines this */
 137#define PKT_BUF_SZ              1536    /* Size of each temporary Rx buffer.*/
 138
 139MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
 140MODULE_DESCRIPTION("Winbond W89c840 Ethernet driver");
 141MODULE_LICENSE("GPL");
 142
 143module_param(max_interrupt_work, int, 0);
 144module_param(debug, int, 0);
 145module_param(rx_copybreak, int, 0);
 146module_param(multicast_filter_limit, int, 0);
 147module_param_array(options, int, NULL, 0);
 148module_param_array(full_duplex, int, NULL, 0);
 149MODULE_PARM_DESC(max_interrupt_work, "winbond-840 maximum events handled per interrupt");
 150MODULE_PARM_DESC(debug, "winbond-840 debug level (0-6)");
 151MODULE_PARM_DESC(rx_copybreak, "winbond-840 copy breakpoint for copy-only-tiny-frames");
 152MODULE_PARM_DESC(multicast_filter_limit, "winbond-840 maximum number of filtered multicast addresses");
 153MODULE_PARM_DESC(options, "winbond-840: Bits 0-3: media type, bit 17: full duplex");
 154MODULE_PARM_DESC(full_duplex, "winbond-840 full duplex setting(s) (1)");
 155
 156/*
 157                                Theory of Operation
 158
 159I. Board Compatibility
 160
 161This driver is for the Winbond w89c840 chip.
 162
 163II. Board-specific settings
 164
 165None.
 166
 167III. Driver operation
 168
 169This chip is very similar to the Digital 21*4* "Tulip" family.  The first
 170twelve registers and the descriptor format are nearly identical.  Read a
 171Tulip manual for operational details.
 172
 173A significant difference is that the multicast filter and station address are
 174stored in registers rather than loaded through a pseudo-transmit packet.
 175
 176Unlike the Tulip, transmit buffers are limited to 1KB.  To transmit a
 177full-sized packet we must use both data buffers in a descriptor.  Thus the
 178driver uses ring mode where descriptors are implicitly sequential in memory,
 179rather than using the second descriptor address as a chain pointer to
 180subsequent descriptors.
 181
 182IV. Notes
 183
 184If you are going to almost clone a Tulip, why not go all the way and avoid
 185the need for a new driver?
 186
 187IVb. References
 188
 189http://www.scyld.com/expert/100mbps.html
 190http://www.scyld.com/expert/NWay.html
 191http://www.winbond.com.tw/
 192
 193IVc. Errata
 194
 195A horrible bug exists in the transmit FIFO.  Apparently the chip doesn't
 196correctly detect a full FIFO, and queuing more than 2048 bytes may result in
 197silent data corruption.
 198
 199Test with 'ping -s 10000' on a fast computer.
 200
 201*/
 202
 203
 204
 205/*
 206  PCI probe table.
 207*/
 208enum chip_capability_flags {
 209        CanHaveMII=1, HasBrokenTx=2, AlwaysFDX=4, FDXOnNoMII=8,
 210};
 211
 212static const struct pci_device_id w840_pci_tbl[] = {
 213        { 0x1050, 0x0840, PCI_ANY_ID, 0x8153,     0, 0, 0 },
 214        { 0x1050, 0x0840, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
 215        { 0x11f6, 0x2011, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
 216        { }
 217};
 218MODULE_DEVICE_TABLE(pci, w840_pci_tbl);
 219
 220enum {
 221        netdev_res_size         = 128,  /* size of PCI BAR resource */
 222};
 223
 224struct pci_id_info {
 225        const char *name;
 226        int drv_flags;          /* Driver use, intended as capability flags. */
 227};
 228
 229static const struct pci_id_info pci_id_tbl[] = {
 230        {                               /* Sometime a Level-One switch card. */
 231          "Winbond W89c840",    CanHaveMII | HasBrokenTx | FDXOnNoMII},
 232        { "Winbond W89c840",    CanHaveMII | HasBrokenTx},
 233        { "Compex RL100-ATX",   CanHaveMII | HasBrokenTx},
 234        { }     /* terminate list. */
 235};
 236
 237/* This driver was written to use PCI memory space, however some x86 systems
 238   work only with I/O space accesses. See CONFIG_TULIP_MMIO in .config
 239*/
 240
 241/* Offsets to the Command and Status Registers, "CSRs".
 242   While similar to the Tulip, these registers are longword aligned.
 243   Note: It's not useful to define symbolic names for every register bit in
 244   the device.  The name can only partially document the semantics and make
 245   the driver longer and more difficult to read.
 246*/
 247enum w840_offsets {
 248        PCIBusCfg=0x00, TxStartDemand=0x04, RxStartDemand=0x08,
 249        RxRingPtr=0x0C, TxRingPtr=0x10,
 250        IntrStatus=0x14, NetworkConfig=0x18, IntrEnable=0x1C,
 251        RxMissed=0x20, EECtrl=0x24, MIICtrl=0x24, BootRom=0x28, GPTimer=0x2C,
 252        CurRxDescAddr=0x30, CurRxBufAddr=0x34,                  /* Debug use */
 253        MulticastFilter0=0x38, MulticastFilter1=0x3C, StationAddr=0x40,
 254        CurTxDescAddr=0x4C, CurTxBufAddr=0x50,
 255};
 256
 257/* Bits in the NetworkConfig register. */
 258enum rx_mode_bits {
 259        AcceptErr=0x80,
 260        RxAcceptBroadcast=0x20, AcceptMulticast=0x10,
 261        RxAcceptAllPhys=0x08, AcceptMyPhys=0x02,
 262};
 263
 264enum mii_reg_bits {
 265        MDIO_ShiftClk=0x10000, MDIO_DataIn=0x80000, MDIO_DataOut=0x20000,
 266        MDIO_EnbOutput=0x40000, MDIO_EnbIn = 0x00000,
 267};
 268
 269/* The Tulip Rx and Tx buffer descriptors. */
 270struct w840_rx_desc {
 271        s32 status;
 272        s32 length;
 273        u32 buffer1;
 274        u32 buffer2;
 275};
 276
 277struct w840_tx_desc {
 278        s32 status;
 279        s32 length;
 280        u32 buffer1, buffer2;
 281};
 282
 283#define MII_CNT         1 /* winbond only supports one MII */
 284struct netdev_private {
 285        struct w840_rx_desc *rx_ring;
 286        dma_addr_t      rx_addr[RX_RING_SIZE];
 287        struct w840_tx_desc *tx_ring;
 288        dma_addr_t      tx_addr[TX_RING_SIZE];
 289        dma_addr_t ring_dma_addr;
 290        /* The addresses of receive-in-place skbuffs. */
 291        struct sk_buff* rx_skbuff[RX_RING_SIZE];
 292        /* The saved address of a sent-in-place packet/buffer, for later free(). */
 293        struct sk_buff* tx_skbuff[TX_RING_SIZE];
 294        struct net_device_stats stats;
 295        struct timer_list timer;        /* Media monitoring timer. */
 296        /* Frequently used values: keep some adjacent for cache effect. */
 297        spinlock_t lock;
 298        int chip_id, drv_flags;
 299        struct pci_dev *pci_dev;
 300        int csr6;
 301        struct w840_rx_desc *rx_head_desc;
 302        unsigned int cur_rx, dirty_rx;          /* Producer/consumer ring indices */
 303        unsigned int rx_buf_sz;                         /* Based on MTU+slack. */
 304        unsigned int cur_tx, dirty_tx;
 305        unsigned int tx_q_bytes;
 306        unsigned int tx_full;                           /* The Tx queue is full. */
 307        /* MII transceiver section. */
 308        int mii_cnt;                                            /* MII device addresses. */
 309        unsigned char phys[MII_CNT];            /* MII device addresses, but only the first is used */
 310        u32 mii;
 311        struct mii_if_info mii_if;
 312        void __iomem *base_addr;
 313};
 314
 315static int  eeprom_read(void __iomem *ioaddr, int location);
 316static int  mdio_read(struct net_device *dev, int phy_id, int location);
 317static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
 318static int  netdev_open(struct net_device *dev);
 319static int  update_link(struct net_device *dev);
 320static void netdev_timer(struct timer_list *t);
 321static void init_rxtx_rings(struct net_device *dev);
 322static void free_rxtx_rings(struct netdev_private *np);
 323static void init_registers(struct net_device *dev);
 324static void tx_timeout(struct net_device *dev, unsigned int txqueue);
 325static int alloc_ringdesc(struct net_device *dev);
 326static void free_ringdesc(struct netdev_private *np);
 327static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
 328static irqreturn_t intr_handler(int irq, void *dev_instance);
 329static void netdev_error(struct net_device *dev, int intr_status);
 330static int  netdev_rx(struct net_device *dev);
 331static u32 __set_rx_mode(struct net_device *dev);
 332static void set_rx_mode(struct net_device *dev);
 333static struct net_device_stats *get_stats(struct net_device *dev);
 334static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 335static const struct ethtool_ops netdev_ethtool_ops;
 336static int  netdev_close(struct net_device *dev);
 337
 338static const struct net_device_ops netdev_ops = {
 339        .ndo_open               = netdev_open,
 340        .ndo_stop               = netdev_close,
 341        .ndo_start_xmit         = start_tx,
 342        .ndo_get_stats          = get_stats,
 343        .ndo_set_rx_mode        = set_rx_mode,
 344        .ndo_do_ioctl           = netdev_ioctl,
 345        .ndo_tx_timeout         = tx_timeout,
 346        .ndo_set_mac_address    = eth_mac_addr,
 347        .ndo_validate_addr      = eth_validate_addr,
 348};
 349
 350static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
 351{
 352        struct net_device *dev;
 353        struct netdev_private *np;
 354        static int find_cnt;
 355        int chip_idx = ent->driver_data;
 356        int irq;
 357        int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
 358        void __iomem *ioaddr;
 359
 360        i = pci_enable_device(pdev);
 361        if (i) return i;
 362
 363        pci_set_master(pdev);
 364
 365        irq = pdev->irq;
 366
 367        if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
 368                pr_warn("Device %s disabled due to DMA limitations\n",
 369                        pci_name(pdev));
 370                return -EIO;
 371        }
 372        dev = alloc_etherdev(sizeof(*np));
 373        if (!dev)
 374                return -ENOMEM;
 375        SET_NETDEV_DEV(dev, &pdev->dev);
 376
 377        if (pci_request_regions(pdev, DRV_NAME))
 378                goto err_out_netdev;
 379
 380        ioaddr = pci_iomap(pdev, TULIP_BAR, netdev_res_size);
 381        if (!ioaddr)
 382                goto err_out_free_res;
 383
 384        for (i = 0; i < 3; i++)
 385                ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(eeprom_read(ioaddr, i));
 386
 387        /* Reset the chip to erase previous misconfiguration.
 388           No hold time required! */
 389        iowrite32(0x00000001, ioaddr + PCIBusCfg);
 390
 391        np = netdev_priv(dev);
 392        np->pci_dev = pdev;
 393        np->chip_id = chip_idx;
 394        np->drv_flags = pci_id_tbl[chip_idx].drv_flags;
 395        spin_lock_init(&np->lock);
 396        np->mii_if.dev = dev;
 397        np->mii_if.mdio_read = mdio_read;
 398        np->mii_if.mdio_write = mdio_write;
 399        np->base_addr = ioaddr;
 400
 401        pci_set_drvdata(pdev, dev);
 402
 403        if (dev->mem_start)
 404                option = dev->mem_start;
 405
 406        /* The lower four bits are the media type. */
 407        if (option > 0) {
 408                if (option & 0x200)
 409                        np->mii_if.full_duplex = 1;
 410                if (option & 15)
 411                        dev_info(&dev->dev,
 412                                 "ignoring user supplied media type %d",
 413                                 option & 15);
 414        }
 415        if (find_cnt < MAX_UNITS  &&  full_duplex[find_cnt] > 0)
 416                np->mii_if.full_duplex = 1;
 417
 418        if (np->mii_if.full_duplex)
 419                np->mii_if.force_media = 1;
 420
 421        /* The chip-specific entries in the device structure. */
 422        dev->netdev_ops = &netdev_ops;
 423        dev->ethtool_ops = &netdev_ethtool_ops;
 424        dev->watchdog_timeo = TX_TIMEOUT;
 425
 426        i = register_netdev(dev);
 427        if (i)
 428                goto err_out_cleardev;
 429
 430        dev_info(&dev->dev, "%s at %p, %pM, IRQ %d\n",
 431                 pci_id_tbl[chip_idx].name, ioaddr, dev->dev_addr, irq);
 432
 433        if (np->drv_flags & CanHaveMII) {
 434                int phy, phy_idx = 0;
 435                for (phy = 1; phy < 32 && phy_idx < MII_CNT; phy++) {
 436                        int mii_status = mdio_read(dev, phy, MII_BMSR);
 437                        if (mii_status != 0xffff  &&  mii_status != 0x0000) {
 438                                np->phys[phy_idx++] = phy;
 439                                np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE);
 440                                np->mii = (mdio_read(dev, phy, MII_PHYSID1) << 16)+
 441                                                mdio_read(dev, phy, MII_PHYSID2);
 442                                dev_info(&dev->dev,
 443                                         "MII PHY %08xh found at address %d, status 0x%04x advertising %04x\n",
 444                                         np->mii, phy, mii_status,
 445                                         np->mii_if.advertising);
 446                        }
 447                }
 448                np->mii_cnt = phy_idx;
 449                np->mii_if.phy_id = np->phys[0];
 450                if (phy_idx == 0) {
 451                        dev_warn(&dev->dev,
 452                                 "MII PHY not found -- this device may not operate correctly\n");
 453                }
 454        }
 455
 456        find_cnt++;
 457        return 0;
 458
 459err_out_cleardev:
 460        pci_iounmap(pdev, ioaddr);
 461err_out_free_res:
 462        pci_release_regions(pdev);
 463err_out_netdev:
 464        free_netdev (dev);
 465        return -ENODEV;
 466}
 467
 468
 469/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces.  These are
 470   often serial bit streams generated by the host processor.
 471   The example below is for the common 93c46 EEPROM, 64 16 bit words. */
 472
 473/* Delay between EEPROM clock transitions.
 474   No extra delay is needed with 33Mhz PCI, but future 66Mhz access may need
 475   a delay.  Note that pre-2.0.34 kernels had a cache-alignment bug that
 476   made udelay() unreliable.
 477   The old method of using an ISA access as a delay, __SLOW_DOWN_IO__, is
 478   deprecated.
 479*/
 480#define eeprom_delay(ee_addr)   ioread32(ee_addr)
 481
 482enum EEPROM_Ctrl_Bits {
 483        EE_ShiftClk=0x02, EE_Write0=0x801, EE_Write1=0x805,
 484        EE_ChipSelect=0x801, EE_DataIn=0x08,
 485};
 486
 487/* The EEPROM commands include the alway-set leading bit. */
 488enum EEPROM_Cmds {
 489        EE_WriteCmd=(5 << 6), EE_ReadCmd=(6 << 6), EE_EraseCmd=(7 << 6),
 490};
 491
 492static int eeprom_read(void __iomem *addr, int location)
 493{
 494        int i;
 495        int retval = 0;
 496        void __iomem *ee_addr = addr + EECtrl;
 497        int read_cmd = location | EE_ReadCmd;
 498        iowrite32(EE_ChipSelect, ee_addr);
 499
 500        /* Shift the read command bits out. */
 501        for (i = 10; i >= 0; i--) {
 502                short dataval = (read_cmd & (1 << i)) ? EE_Write1 : EE_Write0;
 503                iowrite32(dataval, ee_addr);
 504                eeprom_delay(ee_addr);
 505                iowrite32(dataval | EE_ShiftClk, ee_addr);
 506                eeprom_delay(ee_addr);
 507        }
 508        iowrite32(EE_ChipSelect, ee_addr);
 509        eeprom_delay(ee_addr);
 510
 511        for (i = 16; i > 0; i--) {
 512                iowrite32(EE_ChipSelect | EE_ShiftClk, ee_addr);
 513                eeprom_delay(ee_addr);
 514                retval = (retval << 1) | ((ioread32(ee_addr) & EE_DataIn) ? 1 : 0);
 515                iowrite32(EE_ChipSelect, ee_addr);
 516                eeprom_delay(ee_addr);
 517        }
 518
 519        /* Terminate the EEPROM access. */
 520        iowrite32(0, ee_addr);
 521        return retval;
 522}
 523
 524/*  MII transceiver control section.
 525        Read and write the MII registers using software-generated serial
 526        MDIO protocol.  See the MII specifications or DP83840A data sheet
 527        for details.
 528
 529        The maximum data clock rate is 2.5 Mhz.  The minimum timing is usually
 530        met by back-to-back 33Mhz PCI cycles. */
 531#define mdio_delay(mdio_addr) ioread32(mdio_addr)
 532
 533/* Set iff a MII transceiver on any interface requires mdio preamble.
 534   This only set with older transceivers, so the extra
 535   code size of a per-interface flag is not worthwhile. */
 536static char mii_preamble_required = 1;
 537
 538#define MDIO_WRITE0 (MDIO_EnbOutput)
 539#define MDIO_WRITE1 (MDIO_DataOut | MDIO_EnbOutput)
 540
 541/* Generate the preamble required for initial synchronization and
 542   a few older transceivers. */
 543static void mdio_sync(void __iomem *mdio_addr)
 544{
 545        int bits = 32;
 546
 547        /* Establish sync by sending at least 32 logic ones. */
 548        while (--bits >= 0) {
 549                iowrite32(MDIO_WRITE1, mdio_addr);
 550                mdio_delay(mdio_addr);
 551                iowrite32(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
 552                mdio_delay(mdio_addr);
 553        }
 554}
 555
 556static int mdio_read(struct net_device *dev, int phy_id, int location)
 557{
 558        struct netdev_private *np = netdev_priv(dev);
 559        void __iomem *mdio_addr = np->base_addr + MIICtrl;
 560        int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
 561        int i, retval = 0;
 562
 563        if (mii_preamble_required)
 564                mdio_sync(mdio_addr);
 565
 566        /* Shift the read command bits out. */
 567        for (i = 15; i >= 0; i--) {
 568                int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
 569
 570                iowrite32(dataval, mdio_addr);
 571                mdio_delay(mdio_addr);
 572                iowrite32(dataval | MDIO_ShiftClk, mdio_addr);
 573                mdio_delay(mdio_addr);
 574        }
 575        /* Read the two transition, 16 data, and wire-idle bits. */
 576        for (i = 20; i > 0; i--) {
 577                iowrite32(MDIO_EnbIn, mdio_addr);
 578                mdio_delay(mdio_addr);
 579                retval = (retval << 1) | ((ioread32(mdio_addr) & MDIO_DataIn) ? 1 : 0);
 580                iowrite32(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
 581                mdio_delay(mdio_addr);
 582        }
 583        return (retval>>1) & 0xffff;
 584}
 585
 586static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
 587{
 588        struct netdev_private *np = netdev_priv(dev);
 589        void __iomem *mdio_addr = np->base_addr + MIICtrl;
 590        int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
 591        int i;
 592
 593        if (location == 4  &&  phy_id == np->phys[0])
 594                np->mii_if.advertising = value;
 595
 596        if (mii_preamble_required)
 597                mdio_sync(mdio_addr);
 598
 599        /* Shift the command bits out. */
 600        for (i = 31; i >= 0; i--) {
 601                int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
 602
 603                iowrite32(dataval, mdio_addr);
 604                mdio_delay(mdio_addr);
 605                iowrite32(dataval | MDIO_ShiftClk, mdio_addr);
 606                mdio_delay(mdio_addr);
 607        }
 608        /* Clear out extra bits. */
 609        for (i = 2; i > 0; i--) {
 610                iowrite32(MDIO_EnbIn, mdio_addr);
 611                mdio_delay(mdio_addr);
 612                iowrite32(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
 613                mdio_delay(mdio_addr);
 614        }
 615}
 616
 617
 618static int netdev_open(struct net_device *dev)
 619{
 620        struct netdev_private *np = netdev_priv(dev);
 621        void __iomem *ioaddr = np->base_addr;
 622        const int irq = np->pci_dev->irq;
 623        int i;
 624
 625        iowrite32(0x00000001, ioaddr + PCIBusCfg);              /* Reset */
 626
 627        netif_device_detach(dev);
 628        i = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
 629        if (i)
 630                goto out_err;
 631
 632        if (debug > 1)
 633                netdev_dbg(dev, "%s() irq %d\n", __func__, irq);
 634
 635        i = alloc_ringdesc(dev);
 636        if (i)
 637                goto out_err;
 638
 639        spin_lock_irq(&np->lock);
 640        netif_device_attach(dev);
 641        init_registers(dev);
 642        spin_unlock_irq(&np->lock);
 643
 644        netif_start_queue(dev);
 645        if (debug > 2)
 646                netdev_dbg(dev, "Done %s()\n", __func__);
 647
 648        /* Set the timer to check for link beat. */
 649        timer_setup(&np->timer, netdev_timer, 0);
 650        np->timer.expires = jiffies + 1*HZ;
 651        add_timer(&np->timer);
 652        return 0;
 653out_err:
 654        netif_device_attach(dev);
 655        return i;
 656}
 657
 658#define MII_DAVICOM_DM9101      0x0181b800
 659
 660static int update_link(struct net_device *dev)
 661{
 662        struct netdev_private *np = netdev_priv(dev);
 663        int duplex, fasteth, result, mii_reg;
 664
 665        /* BSMR */
 666        mii_reg = mdio_read(dev, np->phys[0], MII_BMSR);
 667
 668        if (mii_reg == 0xffff)
 669                return np->csr6;
 670        /* reread: the link status bit is sticky */
 671        mii_reg = mdio_read(dev, np->phys[0], MII_BMSR);
 672        if (!(mii_reg & 0x4)) {
 673                if (netif_carrier_ok(dev)) {
 674                        if (debug)
 675                                dev_info(&dev->dev,
 676                                         "MII #%d reports no link. Disabling watchdog\n",
 677                                         np->phys[0]);
 678                        netif_carrier_off(dev);
 679                }
 680                return np->csr6;
 681        }
 682        if (!netif_carrier_ok(dev)) {
 683                if (debug)
 684                        dev_info(&dev->dev,
 685                                 "MII #%d link is back. Enabling watchdog\n",
 686                                 np->phys[0]);
 687                netif_carrier_on(dev);
 688        }
 689
 690        if ((np->mii & ~0xf) == MII_DAVICOM_DM9101) {
 691                /* If the link partner doesn't support autonegotiation
 692                 * the MII detects it's abilities with the "parallel detection".
 693                 * Some MIIs update the LPA register to the result of the parallel
 694                 * detection, some don't.
 695                 * The Davicom PHY [at least 0181b800] doesn't.
 696                 * Instead bit 9 and 13 of the BMCR are updated to the result
 697                 * of the negotiation..
 698                 */
 699                mii_reg = mdio_read(dev, np->phys[0], MII_BMCR);
 700                duplex = mii_reg & BMCR_FULLDPLX;
 701                fasteth = mii_reg & BMCR_SPEED100;
 702        } else {
 703                int negotiated;
 704                mii_reg = mdio_read(dev, np->phys[0], MII_LPA);
 705                negotiated = mii_reg & np->mii_if.advertising;
 706
 707                duplex = (negotiated & LPA_100FULL) || ((negotiated & 0x02C0) == LPA_10FULL);
 708                fasteth = negotiated & 0x380;
 709        }
 710        duplex |= np->mii_if.force_media;
 711        /* remove fastether and fullduplex */
 712        result = np->csr6 & ~0x20000200;
 713        if (duplex)
 714                result |= 0x200;
 715        if (fasteth)
 716                result |= 0x20000000;
 717        if (result != np->csr6 && debug)
 718                dev_info(&dev->dev,
 719                         "Setting %dMBit-%s-duplex based on MII#%d\n",
 720                         fasteth ? 100 : 10, duplex ? "full" : "half",
 721                         np->phys[0]);
 722        return result;
 723}
 724
 725#define RXTX_TIMEOUT    2000
 726static inline void update_csr6(struct net_device *dev, int new)
 727{
 728        struct netdev_private *np = netdev_priv(dev);
 729        void __iomem *ioaddr = np->base_addr;
 730        int limit = RXTX_TIMEOUT;
 731
 732        if (!netif_device_present(dev))
 733                new = 0;
 734        if (new==np->csr6)
 735                return;
 736        /* stop both Tx and Rx processes */
 737        iowrite32(np->csr6 & ~0x2002, ioaddr + NetworkConfig);
 738        /* wait until they have really stopped */
 739        for (;;) {
 740                int csr5 = ioread32(ioaddr + IntrStatus);
 741                int t;
 742
 743                t = (csr5 >> 17) & 0x07;
 744                if (t==0||t==1) {
 745                        /* rx stopped */
 746                        t = (csr5 >> 20) & 0x07;
 747                        if (t==0||t==1)
 748                                break;
 749                }
 750
 751                limit--;
 752                if(!limit) {
 753                        dev_info(&dev->dev,
 754                                 "couldn't stop rxtx, IntrStatus %xh\n", csr5);
 755                        break;
 756                }
 757                udelay(1);
 758        }
 759        np->csr6 = new;
 760        /* and restart them with the new configuration */
 761        iowrite32(np->csr6, ioaddr + NetworkConfig);
 762        if (new & 0x200)
 763                np->mii_if.full_duplex = 1;
 764}
 765
 766static void netdev_timer(struct timer_list *t)
 767{
 768        struct netdev_private *np = from_timer(np, t, timer);
 769        struct net_device *dev = pci_get_drvdata(np->pci_dev);
 770        void __iomem *ioaddr = np->base_addr;
 771
 772        if (debug > 2)
 773                netdev_dbg(dev, "Media selection timer tick, status %08x config %08x\n",
 774                           ioread32(ioaddr + IntrStatus),
 775                           ioread32(ioaddr + NetworkConfig));
 776        spin_lock_irq(&np->lock);
 777        update_csr6(dev, update_link(dev));
 778        spin_unlock_irq(&np->lock);
 779        np->timer.expires = jiffies + 10*HZ;
 780        add_timer(&np->timer);
 781}
 782
 783static void init_rxtx_rings(struct net_device *dev)
 784{
 785        struct netdev_private *np = netdev_priv(dev);
 786        int i;
 787
 788        np->rx_head_desc = &np->rx_ring[0];
 789        np->tx_ring = (struct w840_tx_desc*)&np->rx_ring[RX_RING_SIZE];
 790
 791        /* Initial all Rx descriptors. */
 792        for (i = 0; i < RX_RING_SIZE; i++) {
 793                np->rx_ring[i].length = np->rx_buf_sz;
 794                np->rx_ring[i].status = 0;
 795                np->rx_skbuff[i] = NULL;
 796        }
 797        /* Mark the last entry as wrapping the ring. */
 798        np->rx_ring[i-1].length |= DescEndRing;
 799
 800        /* Fill in the Rx buffers.  Handle allocation failure gracefully. */
 801        for (i = 0; i < RX_RING_SIZE; i++) {
 802                struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz);
 803                np->rx_skbuff[i] = skb;
 804                if (skb == NULL)
 805                        break;
 806                np->rx_addr[i] = dma_map_single(&np->pci_dev->dev, skb->data,
 807                                                np->rx_buf_sz,
 808                                                DMA_FROM_DEVICE);
 809
 810                np->rx_ring[i].buffer1 = np->rx_addr[i];
 811                np->rx_ring[i].status = DescOwned;
 812        }
 813
 814        np->cur_rx = 0;
 815        np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
 816
 817        /* Initialize the Tx descriptors */
 818        for (i = 0; i < TX_RING_SIZE; i++) {
 819                np->tx_skbuff[i] = NULL;
 820                np->tx_ring[i].status = 0;
 821        }
 822        np->tx_full = 0;
 823        np->tx_q_bytes = np->dirty_tx = np->cur_tx = 0;
 824
 825        iowrite32(np->ring_dma_addr, np->base_addr + RxRingPtr);
 826        iowrite32(np->ring_dma_addr+sizeof(struct w840_rx_desc)*RX_RING_SIZE,
 827                np->base_addr + TxRingPtr);
 828
 829}
 830
 831static void free_rxtx_rings(struct netdev_private* np)
 832{
 833        int i;
 834        /* Free all the skbuffs in the Rx queue. */
 835        for (i = 0; i < RX_RING_SIZE; i++) {
 836                np->rx_ring[i].status = 0;
 837                if (np->rx_skbuff[i]) {
 838                        dma_unmap_single(&np->pci_dev->dev, np->rx_addr[i],
 839                                         np->rx_skbuff[i]->len,
 840                                         DMA_FROM_DEVICE);
 841                        dev_kfree_skb(np->rx_skbuff[i]);
 842                }
 843                np->rx_skbuff[i] = NULL;
 844        }
 845        for (i = 0; i < TX_RING_SIZE; i++) {
 846                if (np->tx_skbuff[i]) {
 847                        dma_unmap_single(&np->pci_dev->dev, np->tx_addr[i],
 848                                         np->tx_skbuff[i]->len, DMA_TO_DEVICE);
 849                        dev_kfree_skb(np->tx_skbuff[i]);
 850                }
 851                np->tx_skbuff[i] = NULL;
 852        }
 853}
 854
 855static void init_registers(struct net_device *dev)
 856{
 857        struct netdev_private *np = netdev_priv(dev);
 858        void __iomem *ioaddr = np->base_addr;
 859        int i;
 860
 861        for (i = 0; i < 6; i++)
 862                iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
 863
 864        /* Initialize other registers. */
 865#ifdef __BIG_ENDIAN
 866        i = (1<<20);    /* Big-endian descriptors */
 867#else
 868        i = 0;
 869#endif
 870        i |= (0x04<<2);         /* skip length 4 u32 */
 871        i |= 0x02;              /* give Rx priority */
 872
 873        /* Configure the PCI bus bursts and FIFO thresholds.
 874           486: Set 8 longword cache alignment, 8 longword burst.
 875           586: Set 16 longword cache alignment, no burst limit.
 876           Cache alignment bits 15:14        Burst length 13:8
 877                0000    <not allowed>           0000 align to cache     0800 8 longwords
 878                4000    8  longwords            0100 1 longword         1000 16 longwords
 879                8000    16 longwords            0200 2 longwords        2000 32 longwords
 880                C000    32  longwords           0400 4 longwords */
 881
 882#if defined (__i386__) && !defined(MODULE)
 883        /* When not a module we can work around broken '486 PCI boards. */
 884        if (boot_cpu_data.x86 <= 4) {
 885                i |= 0x4800;
 886                dev_info(&dev->dev,
 887                         "This is a 386/486 PCI system, setting cache alignment to 8 longwords\n");
 888        } else {
 889                i |= 0xE000;
 890        }
 891#elif defined(__powerpc__) || defined(__i386__) || defined(__alpha__) || defined(__ia64__) || defined(__x86_64__)
 892        i |= 0xE000;
 893#elif defined(CONFIG_SPARC) || defined (CONFIG_PARISC) || defined(CONFIG_ARM)
 894        i |= 0x4800;
 895#else
 896        dev_warn(&dev->dev, "unknown CPU architecture, using default csr0 setting\n");
 897        i |= 0x4800;
 898#endif
 899        iowrite32(i, ioaddr + PCIBusCfg);
 900
 901        np->csr6 = 0;
 902        /* 128 byte Tx threshold;
 903                Transmit on; Receive on; */
 904        update_csr6(dev, 0x00022002 | update_link(dev) | __set_rx_mode(dev));
 905
 906        /* Clear and Enable interrupts by setting the interrupt mask. */
 907        iowrite32(0x1A0F5, ioaddr + IntrStatus);
 908        iowrite32(0x1A0F5, ioaddr + IntrEnable);
 909
 910        iowrite32(0, ioaddr + RxStartDemand);
 911}
 912
 913static void tx_timeout(struct net_device *dev, unsigned int txqueue)
 914{
 915        struct netdev_private *np = netdev_priv(dev);
 916        void __iomem *ioaddr = np->base_addr;
 917        const int irq = np->pci_dev->irq;
 918
 919        dev_warn(&dev->dev, "Transmit timed out, status %08x, resetting...\n",
 920                 ioread32(ioaddr + IntrStatus));
 921
 922        {
 923                int i;
 924                printk(KERN_DEBUG "  Rx ring %p: ", np->rx_ring);
 925                for (i = 0; i < RX_RING_SIZE; i++)
 926                        printk(KERN_CONT " %08x", (unsigned int)np->rx_ring[i].status);
 927                printk(KERN_CONT "\n");
 928                printk(KERN_DEBUG "  Tx ring %p: ", np->tx_ring);
 929                for (i = 0; i < TX_RING_SIZE; i++)
 930                        printk(KERN_CONT " %08x", np->tx_ring[i].status);
 931                printk(KERN_CONT "\n");
 932        }
 933        printk(KERN_DEBUG "Tx cur %d Tx dirty %d Tx Full %d, q bytes %d\n",
 934               np->cur_tx, np->dirty_tx, np->tx_full, np->tx_q_bytes);
 935        printk(KERN_DEBUG "Tx Descriptor addr %xh\n", ioread32(ioaddr+0x4C));
 936
 937        disable_irq(irq);
 938        spin_lock_irq(&np->lock);
 939        /*
 940         * Under high load dirty_tx and the internal tx descriptor pointer
 941         * come out of sync, thus perform a software reset and reinitialize
 942         * everything.
 943         */
 944
 945        iowrite32(1, np->base_addr+PCIBusCfg);
 946        udelay(1);
 947
 948        free_rxtx_rings(np);
 949        init_rxtx_rings(dev);
 950        init_registers(dev);
 951        spin_unlock_irq(&np->lock);
 952        enable_irq(irq);
 953
 954        netif_wake_queue(dev);
 955        netif_trans_update(dev); /* prevent tx timeout */
 956        np->stats.tx_errors++;
 957}
 958
 959/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
 960static int alloc_ringdesc(struct net_device *dev)
 961{
 962        struct netdev_private *np = netdev_priv(dev);
 963
 964        np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
 965
 966        np->rx_ring = dma_alloc_coherent(&np->pci_dev->dev,
 967                                         sizeof(struct w840_rx_desc) * RX_RING_SIZE +
 968                                         sizeof(struct w840_tx_desc) * TX_RING_SIZE,
 969                                         &np->ring_dma_addr, GFP_KERNEL);
 970        if(!np->rx_ring)
 971                return -ENOMEM;
 972        init_rxtx_rings(dev);
 973        return 0;
 974}
 975
 976static void free_ringdesc(struct netdev_private *np)
 977{
 978        dma_free_coherent(&np->pci_dev->dev,
 979                          sizeof(struct w840_rx_desc) * RX_RING_SIZE +
 980                          sizeof(struct w840_tx_desc) * TX_RING_SIZE,
 981                          np->rx_ring, np->ring_dma_addr);
 982
 983}
 984
 985static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
 986{
 987        struct netdev_private *np = netdev_priv(dev);
 988        unsigned entry;
 989
 990        /* Caution: the write order is important here, set the field
 991           with the "ownership" bits last. */
 992
 993        /* Calculate the next Tx descriptor entry. */
 994        entry = np->cur_tx % TX_RING_SIZE;
 995
 996        np->tx_addr[entry] = dma_map_single(&np->pci_dev->dev, skb->data,
 997                                            skb->len, DMA_TO_DEVICE);
 998        np->tx_skbuff[entry] = skb;
 999
1000        np->tx_ring[entry].buffer1 = np->tx_addr[entry];
1001        if (skb->len < TX_BUFLIMIT) {
1002                np->tx_ring[entry].length = DescWholePkt | skb->len;
1003        } else {
1004                int len = skb->len - TX_BUFLIMIT;
1005
1006                np->tx_ring[entry].buffer2 = np->tx_addr[entry]+TX_BUFLIMIT;
1007                np->tx_ring[entry].length = DescWholePkt | (len << 11) | TX_BUFLIMIT;
1008        }
1009        if(entry == TX_RING_SIZE-1)
1010                np->tx_ring[entry].length |= DescEndRing;
1011
1012        /* Now acquire the irq spinlock.
1013         * The difficult race is the ordering between
1014         * increasing np->cur_tx and setting DescOwned:
1015         * - if np->cur_tx is increased first the interrupt
1016         *   handler could consider the packet as transmitted
1017         *   since DescOwned is cleared.
1018         * - If DescOwned is set first the NIC could report the
1019         *   packet as sent, but the interrupt handler would ignore it
1020         *   since the np->cur_tx was not yet increased.
1021         */
1022        spin_lock_irq(&np->lock);
1023        np->cur_tx++;
1024
1025        wmb(); /* flush length, buffer1, buffer2 */
1026        np->tx_ring[entry].status = DescOwned;
1027        wmb(); /* flush status and kick the hardware */
1028        iowrite32(0, np->base_addr + TxStartDemand);
1029        np->tx_q_bytes += skb->len;
1030        /* Work around horrible bug in the chip by marking the queue as full
1031           when we do not have FIFO room for a maximum sized packet. */
1032        if (np->cur_tx - np->dirty_tx > TX_QUEUE_LEN ||
1033                ((np->drv_flags & HasBrokenTx) && np->tx_q_bytes > TX_BUG_FIFO_LIMIT)) {
1034                netif_stop_queue(dev);
1035                wmb();
1036                np->tx_full = 1;
1037        }
1038        spin_unlock_irq(&np->lock);
1039
1040        if (debug > 4) {
1041                netdev_dbg(dev, "Transmit frame #%d queued in slot %d\n",
1042                           np->cur_tx, entry);
1043        }
1044        return NETDEV_TX_OK;
1045}
1046
1047static void netdev_tx_done(struct net_device *dev)
1048{
1049        struct netdev_private *np = netdev_priv(dev);
1050        for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1051                int entry = np->dirty_tx % TX_RING_SIZE;
1052                int tx_status = np->tx_ring[entry].status;
1053
1054                if (tx_status < 0)
1055                        break;
1056                if (tx_status & 0x8000) {       /* There was an error, log it. */
1057#ifndef final_version
1058                        if (debug > 1)
1059                                netdev_dbg(dev, "Transmit error, Tx status %08x\n",
1060                                           tx_status);
1061#endif
1062                        np->stats.tx_errors++;
1063                        if (tx_status & 0x0104) np->stats.tx_aborted_errors++;
1064                        if (tx_status & 0x0C80) np->stats.tx_carrier_errors++;
1065                        if (tx_status & 0x0200) np->stats.tx_window_errors++;
1066                        if (tx_status & 0x0002) np->stats.tx_fifo_errors++;
1067                        if ((tx_status & 0x0080) && np->mii_if.full_duplex == 0)
1068                                np->stats.tx_heartbeat_errors++;
1069                } else {
1070#ifndef final_version
1071                        if (debug > 3)
1072                                netdev_dbg(dev, "Transmit slot %d ok, Tx status %08x\n",
1073                                           entry, tx_status);
1074#endif
1075                        np->stats.tx_bytes += np->tx_skbuff[entry]->len;
1076                        np->stats.collisions += (tx_status >> 3) & 15;
1077                        np->stats.tx_packets++;
1078                }
1079                /* Free the original skb. */
1080                dma_unmap_single(&np->pci_dev->dev, np->tx_addr[entry],
1081                                 np->tx_skbuff[entry]->len, DMA_TO_DEVICE);
1082                np->tx_q_bytes -= np->tx_skbuff[entry]->len;
1083                dev_kfree_skb_irq(np->tx_skbuff[entry]);
1084                np->tx_skbuff[entry] = NULL;
1085        }
1086        if (np->tx_full &&
1087                np->cur_tx - np->dirty_tx < TX_QUEUE_LEN_RESTART &&
1088                np->tx_q_bytes < TX_BUG_FIFO_LIMIT) {
1089                /* The ring is no longer full, clear tbusy. */
1090                np->tx_full = 0;
1091                wmb();
1092                netif_wake_queue(dev);
1093        }
1094}
1095
1096/* The interrupt handler does all of the Rx thread work and cleans up
1097   after the Tx thread. */
1098static irqreturn_t intr_handler(int irq, void *dev_instance)
1099{
1100        struct net_device *dev = (struct net_device *)dev_instance;
1101        struct netdev_private *np = netdev_priv(dev);
1102        void __iomem *ioaddr = np->base_addr;
1103        int work_limit = max_interrupt_work;
1104        int handled = 0;
1105
1106        if (!netif_device_present(dev))
1107                return IRQ_NONE;
1108        do {
1109                u32 intr_status = ioread32(ioaddr + IntrStatus);
1110
1111                /* Acknowledge all of the current interrupt sources ASAP. */
1112                iowrite32(intr_status & 0x001ffff, ioaddr + IntrStatus);
1113
1114                if (debug > 4)
1115                        netdev_dbg(dev, "Interrupt, status %04x\n", intr_status);
1116
1117                if ((intr_status & (NormalIntr|AbnormalIntr)) == 0)
1118                        break;
1119
1120                handled = 1;
1121
1122                if (intr_status & (RxIntr | RxNoBuf))
1123                        netdev_rx(dev);
1124                if (intr_status & RxNoBuf)
1125                        iowrite32(0, ioaddr + RxStartDemand);
1126
1127                if (intr_status & (TxNoBuf | TxIntr) &&
1128                        np->cur_tx != np->dirty_tx) {
1129                        spin_lock(&np->lock);
1130                        netdev_tx_done(dev);
1131                        spin_unlock(&np->lock);
1132                }
1133
1134                /* Abnormal error summary/uncommon events handlers. */
1135                if (intr_status & (AbnormalIntr | TxFIFOUnderflow | SystemError |
1136                                                   TimerInt | TxDied))
1137                        netdev_error(dev, intr_status);
1138
1139                if (--work_limit < 0) {
1140                        dev_warn(&dev->dev,
1141                                 "Too much work at interrupt, status=0x%04x\n",
1142                                 intr_status);
1143                        /* Set the timer to re-enable the other interrupts after
1144                           10*82usec ticks. */
1145                        spin_lock(&np->lock);
1146                        if (netif_device_present(dev)) {
1147                                iowrite32(AbnormalIntr | TimerInt, ioaddr + IntrEnable);
1148                                iowrite32(10, ioaddr + GPTimer);
1149                        }
1150                        spin_unlock(&np->lock);
1151                        break;
1152                }
1153        } while (1);
1154
1155        if (debug > 3)
1156                netdev_dbg(dev, "exiting interrupt, status=%#4.4x\n",
1157                           ioread32(ioaddr + IntrStatus));
1158        return IRQ_RETVAL(handled);
1159}
1160
1161/* This routine is logically part of the interrupt handler, but separated
1162   for clarity and better register allocation. */
1163static int netdev_rx(struct net_device *dev)
1164{
1165        struct netdev_private *np = netdev_priv(dev);
1166        int entry = np->cur_rx % RX_RING_SIZE;
1167        int work_limit = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
1168
1169        if (debug > 4) {
1170                netdev_dbg(dev, " In netdev_rx(), entry %d status %04x\n",
1171                           entry, np->rx_ring[entry].status);
1172        }
1173
1174        /* If EOP is set on the next entry, it's a new packet. Send it up. */
1175        while (--work_limit >= 0) {
1176                struct w840_rx_desc *desc = np->rx_head_desc;
1177                s32 status = desc->status;
1178
1179                if (debug > 4)
1180                        netdev_dbg(dev, "  netdev_rx() status was %08x\n",
1181                                   status);
1182                if (status < 0)
1183                        break;
1184                if ((status & 0x38008300) != 0x0300) {
1185                        if ((status & 0x38000300) != 0x0300) {
1186                                /* Ingore earlier buffers. */
1187                                if ((status & 0xffff) != 0x7fff) {
1188                                        dev_warn(&dev->dev,
1189                                                 "Oversized Ethernet frame spanned multiple buffers, entry %#x status %04x!\n",
1190                                                 np->cur_rx, status);
1191                                        np->stats.rx_length_errors++;
1192                                }
1193                        } else if (status & 0x8000) {
1194                                /* There was a fatal error. */
1195                                if (debug > 2)
1196                                        netdev_dbg(dev, "Receive error, Rx status %08x\n",
1197                                                   status);
1198                                np->stats.rx_errors++; /* end of a packet.*/
1199                                if (status & 0x0890) np->stats.rx_length_errors++;
1200                                if (status & 0x004C) np->stats.rx_frame_errors++;
1201                                if (status & 0x0002) np->stats.rx_crc_errors++;
1202                        }
1203                } else {
1204                        struct sk_buff *skb;
1205                        /* Omit the four octet CRC from the length. */
1206                        int pkt_len = ((status >> 16) & 0x7ff) - 4;
1207
1208#ifndef final_version
1209                        if (debug > 4)
1210                                netdev_dbg(dev, "  netdev_rx() normal Rx pkt length %d status %x\n",
1211                                           pkt_len, status);
1212#endif
1213                        /* Check if the packet is long enough to accept without copying
1214                           to a minimally-sized skbuff. */
1215                        if (pkt_len < rx_copybreak &&
1216                            (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
1217                                skb_reserve(skb, 2);    /* 16 byte align the IP header */
1218                                dma_sync_single_for_cpu(&np->pci_dev->dev,
1219                                                        np->rx_addr[entry],
1220                                                        np->rx_skbuff[entry]->len,
1221                                                        DMA_FROM_DEVICE);
1222                                skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
1223                                skb_put(skb, pkt_len);
1224                                dma_sync_single_for_device(&np->pci_dev->dev,
1225                                                           np->rx_addr[entry],
1226                                                           np->rx_skbuff[entry]->len,
1227                                                           DMA_FROM_DEVICE);
1228                        } else {
1229                                dma_unmap_single(&np->pci_dev->dev,
1230                                                 np->rx_addr[entry],
1231                                                 np->rx_skbuff[entry]->len,
1232                                                 DMA_FROM_DEVICE);
1233                                skb_put(skb = np->rx_skbuff[entry], pkt_len);
1234                                np->rx_skbuff[entry] = NULL;
1235                        }
1236#ifndef final_version                           /* Remove after testing. */
1237                        /* You will want this info for the initial debug. */
1238                        if (debug > 5)
1239                                netdev_dbg(dev, "  Rx data %pM %pM %02x%02x %pI4\n",
1240                                           &skb->data[0], &skb->data[6],
1241                                           skb->data[12], skb->data[13],
1242                                           &skb->data[14]);
1243#endif
1244                        skb->protocol = eth_type_trans(skb, dev);
1245                        netif_rx(skb);
1246                        np->stats.rx_packets++;
1247                        np->stats.rx_bytes += pkt_len;
1248                }
1249                entry = (++np->cur_rx) % RX_RING_SIZE;
1250                np->rx_head_desc = &np->rx_ring[entry];
1251        }
1252
1253        /* Refill the Rx ring buffers. */
1254        for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
1255                struct sk_buff *skb;
1256                entry = np->dirty_rx % RX_RING_SIZE;
1257                if (np->rx_skbuff[entry] == NULL) {
1258                        skb = netdev_alloc_skb(dev, np->rx_buf_sz);
1259                        np->rx_skbuff[entry] = skb;
1260                        if (skb == NULL)
1261                                break;                  /* Better luck next round. */
1262                        np->rx_addr[entry] = dma_map_single(&np->pci_dev->dev,
1263                                                            skb->data,
1264                                                            np->rx_buf_sz,
1265                                                            DMA_FROM_DEVICE);
1266                        np->rx_ring[entry].buffer1 = np->rx_addr[entry];
1267                }
1268                wmb();
1269                np->rx_ring[entry].status = DescOwned;
1270        }
1271
1272        return 0;
1273}
1274
1275static void netdev_error(struct net_device *dev, int intr_status)
1276{
1277        struct netdev_private *np = netdev_priv(dev);
1278        void __iomem *ioaddr = np->base_addr;
1279
1280        if (debug > 2)
1281                netdev_dbg(dev, "Abnormal event, %08x\n", intr_status);
1282        if (intr_status == 0xffffffff)
1283                return;
1284        spin_lock(&np->lock);
1285        if (intr_status & TxFIFOUnderflow) {
1286                int new;
1287                /* Bump up the Tx threshold */
1288#if 0
1289                /* This causes lots of dropped packets,
1290                 * and under high load even tx_timeouts
1291                 */
1292                new = np->csr6 + 0x4000;
1293#else
1294                new = (np->csr6 >> 14)&0x7f;
1295                if (new < 64)
1296                        new *= 2;
1297                 else
1298                        new = 127; /* load full packet before starting */
1299                new = (np->csr6 & ~(0x7F << 14)) | (new<<14);
1300#endif
1301                netdev_dbg(dev, "Tx underflow, new csr6 %08x\n", new);
1302                update_csr6(dev, new);
1303        }
1304        if (intr_status & RxDied) {             /* Missed a Rx frame. */
1305                np->stats.rx_errors++;
1306        }
1307        if (intr_status & TimerInt) {
1308                /* Re-enable other interrupts. */
1309                if (netif_device_present(dev))
1310                        iowrite32(0x1A0F5, ioaddr + IntrEnable);
1311        }
1312        np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
1313        iowrite32(0, ioaddr + RxStartDemand);
1314        spin_unlock(&np->lock);
1315}
1316
1317static struct net_device_stats *get_stats(struct net_device *dev)
1318{
1319        struct netdev_private *np = netdev_priv(dev);
1320        void __iomem *ioaddr = np->base_addr;
1321
1322        /* The chip only need report frame silently dropped. */
1323        spin_lock_irq(&np->lock);
1324        if (netif_running(dev) && netif_device_present(dev))
1325                np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
1326        spin_unlock_irq(&np->lock);
1327
1328        return &np->stats;
1329}
1330
1331
1332static u32 __set_rx_mode(struct net_device *dev)
1333{
1334        struct netdev_private *np = netdev_priv(dev);
1335        void __iomem *ioaddr = np->base_addr;
1336        u32 mc_filter[2];                       /* Multicast hash filter */
1337        u32 rx_mode;
1338
1339        if (dev->flags & IFF_PROMISC) {                 /* Set promiscuous. */
1340                memset(mc_filter, 0xff, sizeof(mc_filter));
1341                rx_mode = RxAcceptBroadcast | AcceptMulticast | RxAcceptAllPhys
1342                        | AcceptMyPhys;
1343        } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
1344                   (dev->flags & IFF_ALLMULTI)) {
1345                /* Too many to match, or accept all multicasts. */
1346                memset(mc_filter, 0xff, sizeof(mc_filter));
1347                rx_mode = RxAcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1348        } else {
1349                struct netdev_hw_addr *ha;
1350
1351                memset(mc_filter, 0, sizeof(mc_filter));
1352                netdev_for_each_mc_addr(ha, dev) {
1353                        int filbit;
1354
1355                        filbit = (ether_crc(ETH_ALEN, ha->addr) >> 26) ^ 0x3F;
1356                        filbit &= 0x3f;
1357                        mc_filter[filbit >> 5] |= 1 << (filbit & 31);
1358                }
1359                rx_mode = RxAcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1360        }
1361        iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
1362        iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
1363        return rx_mode;
1364}
1365
1366static void set_rx_mode(struct net_device *dev)
1367{
1368        struct netdev_private *np = netdev_priv(dev);
1369        u32 rx_mode = __set_rx_mode(dev);
1370        spin_lock_irq(&np->lock);
1371        update_csr6(dev, (np->csr6 & ~0x00F8) | rx_mode);
1372        spin_unlock_irq(&np->lock);
1373}
1374
1375static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1376{
1377        struct netdev_private *np = netdev_priv(dev);
1378
1379        strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1380        strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
1381}
1382
1383static int netdev_get_link_ksettings(struct net_device *dev,
1384                                     struct ethtool_link_ksettings *cmd)
1385{
1386        struct netdev_private *np = netdev_priv(dev);
1387
1388        spin_lock_irq(&np->lock);
1389        mii_ethtool_get_link_ksettings(&np->mii_if, cmd);
1390        spin_unlock_irq(&np->lock);
1391
1392        return 0;
1393}
1394
1395static int netdev_set_link_ksettings(struct net_device *dev,
1396                                     const struct ethtool_link_ksettings *cmd)
1397{
1398        struct netdev_private *np = netdev_priv(dev);
1399        int rc;
1400
1401        spin_lock_irq(&np->lock);
1402        rc = mii_ethtool_set_link_ksettings(&np->mii_if, cmd);
1403        spin_unlock_irq(&np->lock);
1404
1405        return rc;
1406}
1407
1408static int netdev_nway_reset(struct net_device *dev)
1409{
1410        struct netdev_private *np = netdev_priv(dev);
1411        return mii_nway_restart(&np->mii_if);
1412}
1413
1414static u32 netdev_get_link(struct net_device *dev)
1415{
1416        struct netdev_private *np = netdev_priv(dev);
1417        return mii_link_ok(&np->mii_if);
1418}
1419
1420static u32 netdev_get_msglevel(struct net_device *dev)
1421{
1422        return debug;
1423}
1424
1425static void netdev_set_msglevel(struct net_device *dev, u32 value)
1426{
1427        debug = value;
1428}
1429
1430static const struct ethtool_ops netdev_ethtool_ops = {
1431        .get_drvinfo            = netdev_get_drvinfo,
1432        .nway_reset             = netdev_nway_reset,
1433        .get_link               = netdev_get_link,
1434        .get_msglevel           = netdev_get_msglevel,
1435        .set_msglevel           = netdev_set_msglevel,
1436        .get_link_ksettings     = netdev_get_link_ksettings,
1437        .set_link_ksettings     = netdev_set_link_ksettings,
1438};
1439
1440static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1441{
1442        struct mii_ioctl_data *data = if_mii(rq);
1443        struct netdev_private *np = netdev_priv(dev);
1444
1445        switch(cmd) {
1446        case SIOCGMIIPHY:               /* Get address of MII PHY in use. */
1447                data->phy_id = ((struct netdev_private *)netdev_priv(dev))->phys[0] & 0x1f;
1448                fallthrough;
1449
1450        case SIOCGMIIREG:               /* Read MII PHY register. */
1451                spin_lock_irq(&np->lock);
1452                data->val_out = mdio_read(dev, data->phy_id & 0x1f, data->reg_num & 0x1f);
1453                spin_unlock_irq(&np->lock);
1454                return 0;
1455
1456        case SIOCSMIIREG:               /* Write MII PHY register. */
1457                spin_lock_irq(&np->lock);
1458                mdio_write(dev, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in);
1459                spin_unlock_irq(&np->lock);
1460                return 0;
1461        default:
1462                return -EOPNOTSUPP;
1463        }
1464}
1465
1466static int netdev_close(struct net_device *dev)
1467{
1468        struct netdev_private *np = netdev_priv(dev);
1469        void __iomem *ioaddr = np->base_addr;
1470
1471        netif_stop_queue(dev);
1472
1473        if (debug > 1) {
1474                netdev_dbg(dev, "Shutting down ethercard, status was %08x Config %08x\n",
1475                           ioread32(ioaddr + IntrStatus),
1476                           ioread32(ioaddr + NetworkConfig));
1477                netdev_dbg(dev, "Queue pointers were Tx %d / %d,  Rx %d / %d\n",
1478                           np->cur_tx, np->dirty_tx,
1479                           np->cur_rx, np->dirty_rx);
1480        }
1481
1482        /* Stop the chip's Tx and Rx processes. */
1483        spin_lock_irq(&np->lock);
1484        netif_device_detach(dev);
1485        update_csr6(dev, 0);
1486        iowrite32(0x0000, ioaddr + IntrEnable);
1487        spin_unlock_irq(&np->lock);
1488
1489        free_irq(np->pci_dev->irq, dev);
1490        wmb();
1491        netif_device_attach(dev);
1492
1493        if (ioread32(ioaddr + NetworkConfig) != 0xffffffff)
1494                np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
1495
1496#ifdef __i386__
1497        if (debug > 2) {
1498                int i;
1499
1500                printk(KERN_DEBUG"  Tx ring at %p:\n", np->tx_ring);
1501                for (i = 0; i < TX_RING_SIZE; i++)
1502                        printk(KERN_DEBUG " #%d desc. %04x %04x %08x\n",
1503                               i, np->tx_ring[i].length,
1504                               np->tx_ring[i].status, np->tx_ring[i].buffer1);
1505                printk(KERN_DEBUG "  Rx ring %p:\n", np->rx_ring);
1506                for (i = 0; i < RX_RING_SIZE; i++) {
1507                        printk(KERN_DEBUG " #%d desc. %04x %04x %08x\n",
1508                               i, np->rx_ring[i].length,
1509                               np->rx_ring[i].status, np->rx_ring[i].buffer1);
1510                }
1511        }
1512#endif /* __i386__ debugging only */
1513
1514        del_timer_sync(&np->timer);
1515
1516        free_rxtx_rings(np);
1517        free_ringdesc(np);
1518
1519        return 0;
1520}
1521
1522static void w840_remove1(struct pci_dev *pdev)
1523{
1524        struct net_device *dev = pci_get_drvdata(pdev);
1525
1526        if (dev) {
1527                struct netdev_private *np = netdev_priv(dev);
1528                unregister_netdev(dev);
1529                pci_release_regions(pdev);
1530                pci_iounmap(pdev, np->base_addr);
1531                free_netdev(dev);
1532        }
1533}
1534
1535/*
1536 * suspend/resume synchronization:
1537 * - open, close, do_ioctl:
1538 *      rtnl_lock, & netif_device_detach after the rtnl_unlock.
1539 * - get_stats:
1540 *      spin_lock_irq(np->lock), doesn't touch hw if not present
1541 * - start_xmit:
1542 *      synchronize_irq + netif_tx_disable;
1543 * - tx_timeout:
1544 *      netif_device_detach + netif_tx_disable;
1545 * - set_multicast_list
1546 *      netif_device_detach + netif_tx_disable;
1547 * - interrupt handler
1548 *      doesn't touch hw if not present, synchronize_irq waits for
1549 *      running instances of the interrupt handler.
1550 *
1551 * Disabling hw requires clearing csr6 & IntrEnable.
1552 * update_csr6 & all function that write IntrEnable check netif_device_present
1553 * before settings any bits.
1554 *
1555 * Detach must occur under spin_unlock_irq(), interrupts from a detached
1556 * device would cause an irq storm.
1557 */
1558static int __maybe_unused w840_suspend(struct device *dev_d)
1559{
1560        struct net_device *dev = dev_get_drvdata(dev_d);
1561        struct netdev_private *np = netdev_priv(dev);
1562        void __iomem *ioaddr = np->base_addr;
1563
1564        rtnl_lock();
1565        if (netif_running (dev)) {
1566                del_timer_sync(&np->timer);
1567
1568                spin_lock_irq(&np->lock);
1569                netif_device_detach(dev);
1570                update_csr6(dev, 0);
1571                iowrite32(0, ioaddr + IntrEnable);
1572                spin_unlock_irq(&np->lock);
1573
1574                synchronize_irq(np->pci_dev->irq);
1575                netif_tx_disable(dev);
1576
1577                np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
1578
1579                /* no more hardware accesses behind this line. */
1580
1581                BUG_ON(np->csr6 || ioread32(ioaddr + IntrEnable));
1582
1583                /* pci_power_off(pdev, -1); */
1584
1585                free_rxtx_rings(np);
1586        } else {
1587                netif_device_detach(dev);
1588        }
1589        rtnl_unlock();
1590        return 0;
1591}
1592
1593static int __maybe_unused w840_resume(struct device *dev_d)
1594{
1595        struct net_device *dev = dev_get_drvdata(dev_d);
1596        struct netdev_private *np = netdev_priv(dev);
1597
1598        rtnl_lock();
1599        if (netif_device_present(dev))
1600                goto out; /* device not suspended */
1601        if (netif_running(dev)) {
1602                spin_lock_irq(&np->lock);
1603                iowrite32(1, np->base_addr+PCIBusCfg);
1604                ioread32(np->base_addr+PCIBusCfg);
1605                udelay(1);
1606                netif_device_attach(dev);
1607                init_rxtx_rings(dev);
1608                init_registers(dev);
1609                spin_unlock_irq(&np->lock);
1610
1611                netif_wake_queue(dev);
1612
1613                mod_timer(&np->timer, jiffies + 1*HZ);
1614        } else {
1615                netif_device_attach(dev);
1616        }
1617out:
1618        rtnl_unlock();
1619        return 0;
1620}
1621
1622static SIMPLE_DEV_PM_OPS(w840_pm_ops, w840_suspend, w840_resume);
1623
1624static struct pci_driver w840_driver = {
1625        .name           = DRV_NAME,
1626        .id_table       = w840_pci_tbl,
1627        .probe          = w840_probe1,
1628        .remove         = w840_remove1,
1629        .driver.pm      = &w840_pm_ops,
1630};
1631
1632static int __init w840_init(void)
1633{
1634        return pci_register_driver(&w840_driver);
1635}
1636
1637static void __exit w840_exit(void)
1638{
1639        pci_unregister_driver(&w840_driver);
1640}
1641
1642module_init(w840_init);
1643module_exit(w840_exit);
1644