linux/drivers/net/yellowfin.c
<<
>>
Prefs
   1/* yellowfin.c: A Packet Engines G-NIC ethernet driver for linux. */
   2/*
   3        Written 1997-2001 by Donald Becker.
   4
   5        This software may be used and distributed according to the terms of
   6        the GNU General Public License (GPL), incorporated herein by reference.
   7        Drivers based on or derived from this code fall under the GPL and must
   8        retain the authorship, copyright and license notice.  This file is not
   9        a complete program and may only be used when the entire operating
  10        system is licensed under the GPL.
  11
  12        This driver is for the Packet Engines G-NIC PCI Gigabit Ethernet adapter.
  13        It also supports the Symbios Logic version of the same chip core.
  14
  15        The author may be reached as becker@scyld.com, or C/O
  16        Scyld Computing Corporation
  17        410 Severn Ave., Suite 210
  18        Annapolis MD 21403
  19
  20        Support and updates available at
  21        http://www.scyld.com/network/yellowfin.html
  22        [link no longer provides useful info -jgarzik]
  23
  24*/
  25
  26#define DRV_NAME        "yellowfin"
  27#define DRV_VERSION     "2.1"
  28#define DRV_RELDATE     "Sep 11, 2006"
  29
  30#define PFX DRV_NAME ": "
  31
  32/* The user-configurable values.
  33   These may be modified when a driver module is loaded.*/
  34
  35static int debug = 1;                   /* 1 normal messages, 0 quiet .. 7 verbose. */
  36/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
  37static int max_interrupt_work = 20;
  38static int mtu;
  39#ifdef YF_PROTOTYPE                     /* Support for prototype hardware errata. */
  40/* System-wide count of bogus-rx frames. */
  41static int bogus_rx;
  42static int dma_ctrl = 0x004A0263;                       /* Constrained by errata */
  43static int fifo_cfg = 0x0020;                           /* Bypass external Tx FIFO. */
  44#elif defined(YF_NEW)                                   /* A future perfect board :->.  */
  45static int dma_ctrl = 0x00CAC277;                       /* Override when loading module! */
  46static int fifo_cfg = 0x0028;
  47#else
  48static const int dma_ctrl = 0x004A0263;                         /* Constrained by errata */
  49static const int fifo_cfg = 0x0020;                             /* Bypass external Tx FIFO. */
  50#endif
  51
  52/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
  53   Setting to > 1514 effectively disables this feature. */
  54static int rx_copybreak;
  55
  56/* Used to pass the media type, etc.
  57   No media types are currently defined.  These exist for driver
  58   interoperability.
  59*/
  60#define MAX_UNITS 8                             /* More are supported, limit only on options */
  61static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
  62static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
  63
  64/* Do ugly workaround for GX server chipset errata. */
  65static int gx_fix;
  66
  67/* Operational parameters that are set at compile time. */
  68
  69/* Keep the ring sizes a power of two for efficiency.
  70   Making the Tx ring too long decreases the effectiveness of channel
  71   bonding and packet priority.
  72   There are no ill effects from too-large receive rings. */
  73#define TX_RING_SIZE    16
  74#define TX_QUEUE_SIZE   12              /* Must be > 4 && <= TX_RING_SIZE */
  75#define RX_RING_SIZE    64
  76#define STATUS_TOTAL_SIZE       TX_RING_SIZE*sizeof(struct tx_status_words)
  77#define TX_TOTAL_SIZE           2*TX_RING_SIZE*sizeof(struct yellowfin_desc)
  78#define RX_TOTAL_SIZE           RX_RING_SIZE*sizeof(struct yellowfin_desc)
  79
  80/* Operational parameters that usually are not changed. */
  81/* Time in jiffies before concluding the transmitter is hung. */
  82#define TX_TIMEOUT  (2*HZ)
  83#define PKT_BUF_SZ              1536                    /* Size of each temporary Rx buffer.*/
  84
  85#define yellowfin_debug debug
  86
  87#include <linux/module.h>
  88#include <linux/kernel.h>
  89#include <linux/string.h>
  90#include <linux/timer.h>
  91#include <linux/errno.h>
  92#include <linux/ioport.h>
  93#include <linux/slab.h>
  94#include <linux/interrupt.h>
  95#include <linux/pci.h>
  96#include <linux/init.h>
  97#include <linux/mii.h>
  98#include <linux/netdevice.h>
  99#include <linux/etherdevice.h>
 100#include <linux/skbuff.h>
 101#include <linux/ethtool.h>
 102#include <linux/crc32.h>
 103#include <linux/bitops.h>
 104#include <asm/uaccess.h>
 105#include <asm/processor.h>              /* Processor type for cache alignment. */
 106#include <asm/unaligned.h>
 107#include <asm/io.h>
 108
 109/* These identify the driver base version and may not be removed. */
 110static const char version[] __devinitconst =
 111  KERN_INFO DRV_NAME ".c:v1.05  1/09/2001  Written by Donald Becker <becker@scyld.com>\n"
 112  "  (unofficial 2.4.x port, " DRV_VERSION ", " DRV_RELDATE ")\n";
 113
 114MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
 115MODULE_DESCRIPTION("Packet Engines Yellowfin G-NIC Gigabit Ethernet driver");
 116MODULE_LICENSE("GPL");
 117
 118module_param(max_interrupt_work, int, 0);
 119module_param(mtu, int, 0);
 120module_param(debug, int, 0);
 121module_param(rx_copybreak, int, 0);
 122module_param_array(options, int, NULL, 0);
 123module_param_array(full_duplex, int, NULL, 0);
 124module_param(gx_fix, int, 0);
 125MODULE_PARM_DESC(max_interrupt_work, "G-NIC maximum events handled per interrupt");
 126MODULE_PARM_DESC(mtu, "G-NIC MTU (all boards)");
 127MODULE_PARM_DESC(debug, "G-NIC debug level (0-7)");
 128MODULE_PARM_DESC(rx_copybreak, "G-NIC copy breakpoint for copy-only-tiny-frames");
 129MODULE_PARM_DESC(options, "G-NIC: Bits 0-3: media type, bit 17: full duplex");
 130MODULE_PARM_DESC(full_duplex, "G-NIC full duplex setting(s) (1)");
 131MODULE_PARM_DESC(gx_fix, "G-NIC: enable GX server chipset bug workaround (0-1)");
 132
 133/*
 134                                Theory of Operation
 135
 136I. Board Compatibility
 137
 138This device driver is designed for the Packet Engines "Yellowfin" Gigabit
 139Ethernet adapter.  The G-NIC 64-bit PCI card is supported, as well as the
 140Symbios 53C885E dual function chip.
 141
 142II. Board-specific settings
 143
 144PCI bus devices are configured by the system at boot time, so no jumpers
 145need to be set on the board.  The system BIOS preferably should assign the
 146PCI INTA signal to an otherwise unused system IRQ line.
 147Note: Kernel versions earlier than 1.3.73 do not support shared PCI
 148interrupt lines.
 149
 150III. Driver operation
 151
 152IIIa. Ring buffers
 153
 154The Yellowfin uses the Descriptor Based DMA Architecture specified by Apple.
 155This is a descriptor list scheme similar to that used by the EEPro100 and
 156Tulip.  This driver uses two statically allocated fixed-size descriptor lists
 157formed into rings by a branch from the final descriptor to the beginning of
 158the list.  The ring sizes are set at compile time by RX/TX_RING_SIZE.
 159
 160The driver allocates full frame size skbuffs for the Rx ring buffers at
 161open() time and passes the skb->data field to the Yellowfin as receive data
 162buffers.  When an incoming frame is less than RX_COPYBREAK bytes long,
 163a fresh skbuff is allocated and the frame is copied to the new skbuff.
 164When the incoming frame is larger, the skbuff is passed directly up the
 165protocol stack and replaced by a newly allocated skbuff.
 166
 167The RX_COPYBREAK value is chosen to trade-off the memory wasted by
 168using a full-sized skbuff for small frames vs. the copying costs of larger
 169frames.  For small frames the copying cost is negligible (esp. considering
 170that we are pre-loading the cache with immediately useful header
 171information).  For large frames the copying cost is non-trivial, and the
 172larger copy might flush the cache of useful data.
 173
 174IIIC. Synchronization
 175
 176The driver runs as two independent, single-threaded flows of control.  One
 177is the send-packet routine, which enforces single-threaded use by the
 178dev->tbusy flag.  The other thread is the interrupt handler, which is single
 179threaded by the hardware and other software.
 180
 181The send packet thread has partial control over the Tx ring and 'dev->tbusy'
 182flag.  It sets the tbusy flag whenever it's queuing a Tx packet. If the next
 183queue slot is empty, it clears the tbusy flag when finished otherwise it sets
 184the 'yp->tx_full' flag.
 185
 186The interrupt handler has exclusive control over the Rx ring and records stats
 187from the Tx ring.  After reaping the stats, it marks the Tx queue entry as
 188empty by incrementing the dirty_tx mark. Iff the 'yp->tx_full' flag is set, it
 189clears both the tx_full and tbusy flags.
 190
 191IV. Notes
 192
 193Thanks to Kim Stearns of Packet Engines for providing a pair of G-NIC boards.
 194Thanks to Bruce Faust of Digitalscape for providing both their SYM53C885 board
 195and an AlphaStation to verifty the Alpha port!
 196
 197IVb. References
 198
 199Yellowfin Engineering Design Specification, 4/23/97 Preliminary/Confidential
 200Symbios SYM53C885 PCI-SCSI/Fast Ethernet Multifunction Controller Preliminary
 201   Data Manual v3.0
 202http://cesdis.gsfc.nasa.gov/linux/misc/NWay.html
 203http://cesdis.gsfc.nasa.gov/linux/misc/100mbps.html
 204
 205IVc. Errata
 206
 207See Packet Engines confidential appendix (prototype chips only).
 208*/
 209
 210
 211
 212enum capability_flags {
 213        HasMII=1, FullTxStatus=2, IsGigabit=4, HasMulticastBug=8, FullRxStatus=16,
 214        HasMACAddrBug=32, /* Only on early revs.  */
 215        DontUseEeprom=64, /* Don't read the MAC from the EEPROm. */
 216};
 217
 218/* The PCI I/O space extent. */
 219enum {
 220        YELLOWFIN_SIZE  = 0x100,
 221};
 222
 223struct pci_id_info {
 224        const char *name;
 225        struct match_info {
 226                int     pci, pci_mask, subsystem, subsystem_mask;
 227                int revision, revision_mask;                            /* Only 8 bits. */
 228        } id;
 229        int drv_flags;                          /* Driver use, intended as capability flags. */
 230};
 231
 232static const struct pci_id_info pci_id_tbl[] = {
 233        {"Yellowfin G-NIC Gigabit Ethernet", { 0x07021000, 0xffffffff},
 234         FullTxStatus | IsGigabit | HasMulticastBug | HasMACAddrBug | DontUseEeprom},
 235        {"Symbios SYM83C885", { 0x07011000, 0xffffffff},
 236          HasMII | DontUseEeprom },
 237        { }
 238};
 239
 240static const struct pci_device_id yellowfin_pci_tbl[] = {
 241        { 0x1000, 0x0702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
 242        { 0x1000, 0x0701, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
 243        { }
 244};
 245MODULE_DEVICE_TABLE (pci, yellowfin_pci_tbl);
 246
 247
 248/* Offsets to the Yellowfin registers.  Various sizes and alignments. */
 249enum yellowfin_offsets {
 250        TxCtrl=0x00, TxStatus=0x04, TxPtr=0x0C,
 251        TxIntrSel=0x10, TxBranchSel=0x14, TxWaitSel=0x18,
 252        RxCtrl=0x40, RxStatus=0x44, RxPtr=0x4C,
 253        RxIntrSel=0x50, RxBranchSel=0x54, RxWaitSel=0x58,
 254        EventStatus=0x80, IntrEnb=0x82, IntrClear=0x84, IntrStatus=0x86,
 255        ChipRev=0x8C, DMACtrl=0x90, TxThreshold=0x94,
 256        Cnfg=0xA0, FrameGap0=0xA2, FrameGap1=0xA4,
 257        MII_Cmd=0xA6, MII_Addr=0xA8, MII_Wr_Data=0xAA, MII_Rd_Data=0xAC,
 258        MII_Status=0xAE,
 259        RxDepth=0xB8, FlowCtrl=0xBC,
 260        AddrMode=0xD0, StnAddr=0xD2, HashTbl=0xD8, FIFOcfg=0xF8,
 261        EEStatus=0xF0, EECtrl=0xF1, EEAddr=0xF2, EERead=0xF3, EEWrite=0xF4,
 262        EEFeature=0xF5,
 263};
 264
 265/* The Yellowfin Rx and Tx buffer descriptors.
 266   Elements are written as 32 bit for endian portability. */
 267struct yellowfin_desc {
 268        __le32 dbdma_cmd;
 269        __le32 addr;
 270        __le32 branch_addr;
 271        __le32 result_status;
 272};
 273
 274struct tx_status_words {
 275#ifdef __BIG_ENDIAN
 276        u16 tx_errs;
 277        u16 tx_cnt;
 278        u16 paused;
 279        u16 total_tx_cnt;
 280#else  /* Little endian chips. */
 281        u16 tx_cnt;
 282        u16 tx_errs;
 283        u16 total_tx_cnt;
 284        u16 paused;
 285#endif /* __BIG_ENDIAN */
 286};
 287
 288/* Bits in yellowfin_desc.cmd */
 289enum desc_cmd_bits {
 290        CMD_TX_PKT=0x10000000, CMD_RX_BUF=0x20000000, CMD_TXSTATUS=0x30000000,
 291        CMD_NOP=0x60000000, CMD_STOP=0x70000000,
 292        BRANCH_ALWAYS=0x0C0000, INTR_ALWAYS=0x300000, WAIT_ALWAYS=0x030000,
 293        BRANCH_IFTRUE=0x040000,
 294};
 295
 296/* Bits in yellowfin_desc.status */
 297enum desc_status_bits { RX_EOP=0x0040, };
 298
 299/* Bits in the interrupt status/mask registers. */
 300enum intr_status_bits {
 301        IntrRxDone=0x01, IntrRxInvalid=0x02, IntrRxPCIFault=0x04,IntrRxPCIErr=0x08,
 302        IntrTxDone=0x10, IntrTxInvalid=0x20, IntrTxPCIFault=0x40,IntrTxPCIErr=0x80,
 303        IntrEarlyRx=0x100, IntrWakeup=0x200, };
 304
 305#define PRIV_ALIGN      31      /* Required alignment mask */
 306#define MII_CNT         4
 307struct yellowfin_private {
 308        /* Descriptor rings first for alignment.
 309           Tx requires a second descriptor for status. */
 310        struct yellowfin_desc *rx_ring;
 311        struct yellowfin_desc *tx_ring;
 312        struct sk_buff* rx_skbuff[RX_RING_SIZE];
 313        struct sk_buff* tx_skbuff[TX_RING_SIZE];
 314        dma_addr_t rx_ring_dma;
 315        dma_addr_t tx_ring_dma;
 316
 317        struct tx_status_words *tx_status;
 318        dma_addr_t tx_status_dma;
 319
 320        struct timer_list timer;        /* Media selection timer. */
 321        /* Frequently used and paired value: keep adjacent for cache effect. */
 322        int chip_id, drv_flags;
 323        struct pci_dev *pci_dev;
 324        unsigned int cur_rx, dirty_rx;          /* Producer/consumer ring indices */
 325        unsigned int rx_buf_sz;                         /* Based on MTU+slack. */
 326        struct tx_status_words *tx_tail_desc;
 327        unsigned int cur_tx, dirty_tx;
 328        int tx_threshold;
 329        unsigned int tx_full:1;                         /* The Tx queue is full. */
 330        unsigned int full_duplex:1;                     /* Full-duplex operation requested. */
 331        unsigned int duplex_lock:1;
 332        unsigned int medialock:1;                       /* Do not sense media. */
 333        unsigned int default_port:4;            /* Last dev->if_port value. */
 334        /* MII transceiver section. */
 335        int mii_cnt;                                            /* MII device addresses. */
 336        u16 advertising;                                        /* NWay media advertisement */
 337        unsigned char phys[MII_CNT];            /* MII device addresses, only first one used */
 338        spinlock_t lock;
 339        void __iomem *base;
 340};
 341
 342static int read_eeprom(void __iomem *ioaddr, int location);
 343static int mdio_read(void __iomem *ioaddr, int phy_id, int location);
 344static void mdio_write(void __iomem *ioaddr, int phy_id, int location, int value);
 345static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 346static int yellowfin_open(struct net_device *dev);
 347static void yellowfin_timer(unsigned long data);
 348static void yellowfin_tx_timeout(struct net_device *dev);
 349static int yellowfin_init_ring(struct net_device *dev);
 350static netdev_tx_t yellowfin_start_xmit(struct sk_buff *skb,
 351                                        struct net_device *dev);
 352static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance);
 353static int yellowfin_rx(struct net_device *dev);
 354static void yellowfin_error(struct net_device *dev, int intr_status);
 355static int yellowfin_close(struct net_device *dev);
 356static void set_rx_mode(struct net_device *dev);
 357static const struct ethtool_ops ethtool_ops;
 358
 359static const struct net_device_ops netdev_ops = {
 360        .ndo_open               = yellowfin_open,
 361        .ndo_stop               = yellowfin_close,
 362        .ndo_start_xmit         = yellowfin_start_xmit,
 363        .ndo_set_multicast_list = set_rx_mode,
 364        .ndo_change_mtu         = eth_change_mtu,
 365        .ndo_validate_addr      = eth_validate_addr,
 366        .ndo_set_mac_address    = eth_mac_addr,
 367        .ndo_do_ioctl           = netdev_ioctl,
 368        .ndo_tx_timeout         = yellowfin_tx_timeout,
 369};
 370
 371static int __devinit yellowfin_init_one(struct pci_dev *pdev,
 372                                        const struct pci_device_id *ent)
 373{
 374        struct net_device *dev;
 375        struct yellowfin_private *np;
 376        int irq;
 377        int chip_idx = ent->driver_data;
 378        static int find_cnt;
 379        void __iomem *ioaddr;
 380        int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
 381        int drv_flags = pci_id_tbl[chip_idx].drv_flags;
 382        void *ring_space;
 383        dma_addr_t ring_dma;
 384#ifdef USE_IO_OPS
 385        int bar = 0;
 386#else
 387        int bar = 1;
 388#endif
 389
 390/* when built into the kernel, we only print version if device is found */
 391#ifndef MODULE
 392        static int printed_version;
 393        if (!printed_version++)
 394                printk(version);
 395#endif
 396
 397        i = pci_enable_device(pdev);
 398        if (i) return i;
 399
 400        dev = alloc_etherdev(sizeof(*np));
 401        if (!dev) {
 402                printk (KERN_ERR PFX "cannot allocate ethernet device\n");
 403                return -ENOMEM;
 404        }
 405        SET_NETDEV_DEV(dev, &pdev->dev);
 406
 407        np = netdev_priv(dev);
 408
 409        if (pci_request_regions(pdev, DRV_NAME))
 410                goto err_out_free_netdev;
 411
 412        pci_set_master (pdev);
 413
 414        ioaddr = pci_iomap(pdev, bar, YELLOWFIN_SIZE);
 415        if (!ioaddr)
 416                goto err_out_free_res;
 417
 418        irq = pdev->irq;
 419
 420        if (drv_flags & DontUseEeprom)
 421                for (i = 0; i < 6; i++)
 422                        dev->dev_addr[i] = ioread8(ioaddr + StnAddr + i);
 423        else {
 424                int ee_offset = (read_eeprom(ioaddr, 6) == 0xff ? 0x100 : 0);
 425                for (i = 0; i < 6; i++)
 426                        dev->dev_addr[i] = read_eeprom(ioaddr, ee_offset + i);
 427        }
 428
 429        /* Reset the chip. */
 430        iowrite32(0x80000000, ioaddr + DMACtrl);
 431
 432        dev->base_addr = (unsigned long)ioaddr;
 433        dev->irq = irq;
 434
 435        pci_set_drvdata(pdev, dev);
 436        spin_lock_init(&np->lock);
 437
 438        np->pci_dev = pdev;
 439        np->chip_id = chip_idx;
 440        np->drv_flags = drv_flags;
 441        np->base = ioaddr;
 442
 443        ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
 444        if (!ring_space)
 445                goto err_out_cleardev;
 446        np->tx_ring = (struct yellowfin_desc *)ring_space;
 447        np->tx_ring_dma = ring_dma;
 448
 449        ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
 450        if (!ring_space)
 451                goto err_out_unmap_tx;
 452        np->rx_ring = (struct yellowfin_desc *)ring_space;
 453        np->rx_ring_dma = ring_dma;
 454
 455        ring_space = pci_alloc_consistent(pdev, STATUS_TOTAL_SIZE, &ring_dma);
 456        if (!ring_space)
 457                goto err_out_unmap_rx;
 458        np->tx_status = (struct tx_status_words *)ring_space;
 459        np->tx_status_dma = ring_dma;
 460
 461        if (dev->mem_start)
 462                option = dev->mem_start;
 463
 464        /* The lower four bits are the media type. */
 465        if (option > 0) {
 466                if (option & 0x200)
 467                        np->full_duplex = 1;
 468                np->default_port = option & 15;
 469                if (np->default_port)
 470                        np->medialock = 1;
 471        }
 472        if (find_cnt < MAX_UNITS  &&  full_duplex[find_cnt] > 0)
 473                np->full_duplex = 1;
 474
 475        if (np->full_duplex)
 476                np->duplex_lock = 1;
 477
 478        /* The Yellowfin-specific entries in the device structure. */
 479        dev->netdev_ops = &netdev_ops;
 480        SET_ETHTOOL_OPS(dev, &ethtool_ops);
 481        dev->watchdog_timeo = TX_TIMEOUT;
 482
 483        if (mtu)
 484                dev->mtu = mtu;
 485
 486        i = register_netdev(dev);
 487        if (i)
 488                goto err_out_unmap_status;
 489
 490        printk(KERN_INFO "%s: %s type %8x at %p, %pM, IRQ %d.\n",
 491                   dev->name, pci_id_tbl[chip_idx].name,
 492                   ioread32(ioaddr + ChipRev), ioaddr,
 493                   dev->dev_addr, irq);
 494
 495        if (np->drv_flags & HasMII) {
 496                int phy, phy_idx = 0;
 497                for (phy = 0; phy < 32 && phy_idx < MII_CNT; phy++) {
 498                        int mii_status = mdio_read(ioaddr, phy, 1);
 499                        if (mii_status != 0xffff  &&  mii_status != 0x0000) {
 500                                np->phys[phy_idx++] = phy;
 501                                np->advertising = mdio_read(ioaddr, phy, 4);
 502                                printk(KERN_INFO "%s: MII PHY found at address %d, status "
 503                                           "0x%4.4x advertising %4.4x.\n",
 504                                           dev->name, phy, mii_status, np->advertising);
 505                        }
 506                }
 507                np->mii_cnt = phy_idx;
 508        }
 509
 510        find_cnt++;
 511
 512        return 0;
 513
 514err_out_unmap_status:
 515        pci_free_consistent(pdev, STATUS_TOTAL_SIZE, np->tx_status,
 516                np->tx_status_dma);
 517err_out_unmap_rx:
 518        pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
 519err_out_unmap_tx:
 520        pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
 521err_out_cleardev:
 522        pci_set_drvdata(pdev, NULL);
 523        pci_iounmap(pdev, ioaddr);
 524err_out_free_res:
 525        pci_release_regions(pdev);
 526err_out_free_netdev:
 527        free_netdev (dev);
 528        return -ENODEV;
 529}
 530
 531static int __devinit read_eeprom(void __iomem *ioaddr, int location)
 532{
 533        int bogus_cnt = 10000;          /* Typical 33Mhz: 1050 ticks */
 534
 535        iowrite8(location, ioaddr + EEAddr);
 536        iowrite8(0x30 | ((location >> 8) & 7), ioaddr + EECtrl);
 537        while ((ioread8(ioaddr + EEStatus) & 0x80)  &&  --bogus_cnt > 0)
 538                ;
 539        return ioread8(ioaddr + EERead);
 540}
 541
 542/* MII Managemen Data I/O accesses.
 543   These routines assume the MDIO controller is idle, and do not exit until
 544   the command is finished. */
 545
 546static int mdio_read(void __iomem *ioaddr, int phy_id, int location)
 547{
 548        int i;
 549
 550        iowrite16((phy_id<<8) + location, ioaddr + MII_Addr);
 551        iowrite16(1, ioaddr + MII_Cmd);
 552        for (i = 10000; i >= 0; i--)
 553                if ((ioread16(ioaddr + MII_Status) & 1) == 0)
 554                        break;
 555        return ioread16(ioaddr + MII_Rd_Data);
 556}
 557
 558static void mdio_write(void __iomem *ioaddr, int phy_id, int location, int value)
 559{
 560        int i;
 561
 562        iowrite16((phy_id<<8) + location, ioaddr + MII_Addr);
 563        iowrite16(value, ioaddr + MII_Wr_Data);
 564
 565        /* Wait for the command to finish. */
 566        for (i = 10000; i >= 0; i--)
 567                if ((ioread16(ioaddr + MII_Status) & 1) == 0)
 568                        break;
 569        return;
 570}
 571
 572
 573static int yellowfin_open(struct net_device *dev)
 574{
 575        struct yellowfin_private *yp = netdev_priv(dev);
 576        void __iomem *ioaddr = yp->base;
 577        int i, ret;
 578
 579        /* Reset the chip. */
 580        iowrite32(0x80000000, ioaddr + DMACtrl);
 581
 582        ret = request_irq(dev->irq, &yellowfin_interrupt, IRQF_SHARED, dev->name, dev);
 583        if (ret)
 584                return ret;
 585
 586        if (yellowfin_debug > 1)
 587                printk(KERN_DEBUG "%s: yellowfin_open() irq %d.\n",
 588                           dev->name, dev->irq);
 589
 590        ret = yellowfin_init_ring(dev);
 591        if (ret) {
 592                free_irq(dev->irq, dev);
 593                return ret;
 594        }
 595
 596        iowrite32(yp->rx_ring_dma, ioaddr + RxPtr);
 597        iowrite32(yp->tx_ring_dma, ioaddr + TxPtr);
 598
 599        for (i = 0; i < 6; i++)
 600                iowrite8(dev->dev_addr[i], ioaddr + StnAddr + i);
 601
 602        /* Set up various condition 'select' registers.
 603           There are no options here. */
 604        iowrite32(0x00800080, ioaddr + TxIntrSel);      /* Interrupt on Tx abort */
 605        iowrite32(0x00800080, ioaddr + TxBranchSel);    /* Branch on Tx abort */
 606        iowrite32(0x00400040, ioaddr + TxWaitSel);      /* Wait on Tx status */
 607        iowrite32(0x00400040, ioaddr + RxIntrSel);      /* Interrupt on Rx done */
 608        iowrite32(0x00400040, ioaddr + RxBranchSel);    /* Branch on Rx error */
 609        iowrite32(0x00400040, ioaddr + RxWaitSel);      /* Wait on Rx done */
 610
 611        /* Initialize other registers: with so many this eventually this will
 612           converted to an offset/value list. */
 613        iowrite32(dma_ctrl, ioaddr + DMACtrl);
 614        iowrite16(fifo_cfg, ioaddr + FIFOcfg);
 615        /* Enable automatic generation of flow control frames, period 0xffff. */
 616        iowrite32(0x0030FFFF, ioaddr + FlowCtrl);
 617
 618        yp->tx_threshold = 32;
 619        iowrite32(yp->tx_threshold, ioaddr + TxThreshold);
 620
 621        if (dev->if_port == 0)
 622                dev->if_port = yp->default_port;
 623
 624        netif_start_queue(dev);
 625
 626        /* Setting the Rx mode will start the Rx process. */
 627        if (yp->drv_flags & IsGigabit) {
 628                /* We are always in full-duplex mode with gigabit! */
 629                yp->full_duplex = 1;
 630                iowrite16(0x01CF, ioaddr + Cnfg);
 631        } else {
 632                iowrite16(0x0018, ioaddr + FrameGap0); /* 0060/4060 for non-MII 10baseT */
 633                iowrite16(0x1018, ioaddr + FrameGap1);
 634                iowrite16(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg);
 635        }
 636        set_rx_mode(dev);
 637
 638        /* Enable interrupts by setting the interrupt mask. */
 639        iowrite16(0x81ff, ioaddr + IntrEnb);                    /* See enum intr_status_bits */
 640        iowrite16(0x0000, ioaddr + EventStatus);                /* Clear non-interrupting events */
 641        iowrite32(0x80008000, ioaddr + RxCtrl);         /* Start Rx and Tx channels. */
 642        iowrite32(0x80008000, ioaddr + TxCtrl);
 643
 644        if (yellowfin_debug > 2) {
 645                printk(KERN_DEBUG "%s: Done yellowfin_open().\n",
 646                           dev->name);
 647        }
 648
 649        /* Set the timer to check for link beat. */
 650        init_timer(&yp->timer);
 651        yp->timer.expires = jiffies + 3*HZ;
 652        yp->timer.data = (unsigned long)dev;
 653        yp->timer.function = &yellowfin_timer;                          /* timer handler */
 654        add_timer(&yp->timer);
 655
 656        return 0;
 657}
 658
 659static void yellowfin_timer(unsigned long data)
 660{
 661        struct net_device *dev = (struct net_device *)data;
 662        struct yellowfin_private *yp = netdev_priv(dev);
 663        void __iomem *ioaddr = yp->base;
 664        int next_tick = 60*HZ;
 665
 666        if (yellowfin_debug > 3) {
 667                printk(KERN_DEBUG "%s: Yellowfin timer tick, status %8.8x.\n",
 668                           dev->name, ioread16(ioaddr + IntrStatus));
 669        }
 670
 671        if (yp->mii_cnt) {
 672                int bmsr = mdio_read(ioaddr, yp->phys[0], MII_BMSR);
 673                int lpa = mdio_read(ioaddr, yp->phys[0], MII_LPA);
 674                int negotiated = lpa & yp->advertising;
 675                if (yellowfin_debug > 1)
 676                        printk(KERN_DEBUG "%s: MII #%d status register is %4.4x, "
 677                                   "link partner capability %4.4x.\n",
 678                                   dev->name, yp->phys[0], bmsr, lpa);
 679
 680                yp->full_duplex = mii_duplex(yp->duplex_lock, negotiated);
 681
 682                iowrite16(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg);
 683
 684                if (bmsr & BMSR_LSTATUS)
 685                        next_tick = 60*HZ;
 686                else
 687                        next_tick = 3*HZ;
 688        }
 689
 690        yp->timer.expires = jiffies + next_tick;
 691        add_timer(&yp->timer);
 692}
 693
 694static void yellowfin_tx_timeout(struct net_device *dev)
 695{
 696        struct yellowfin_private *yp = netdev_priv(dev);
 697        void __iomem *ioaddr = yp->base;
 698
 699        printk(KERN_WARNING "%s: Yellowfin transmit timed out at %d/%d Tx "
 700                   "status %4.4x, Rx status %4.4x, resetting...\n",
 701                   dev->name, yp->cur_tx, yp->dirty_tx,
 702                   ioread32(ioaddr + TxStatus), ioread32(ioaddr + RxStatus));
 703
 704        /* Note: these should be KERN_DEBUG. */
 705        if (yellowfin_debug) {
 706                int i;
 707                printk(KERN_WARNING "  Rx ring %p: ", yp->rx_ring);
 708                for (i = 0; i < RX_RING_SIZE; i++)
 709                        printk(KERN_CONT " %8.8x",
 710                               yp->rx_ring[i].result_status);
 711                printk(KERN_CONT "\n");
 712                printk(KERN_WARNING"  Tx ring %p: ", yp->tx_ring);
 713                for (i = 0; i < TX_RING_SIZE; i++)
 714                        printk(KERN_CONT " %4.4x /%8.8x",
 715                               yp->tx_status[i].tx_errs,
 716                               yp->tx_ring[i].result_status);
 717                printk(KERN_CONT "\n");
 718        }
 719
 720        /* If the hardware is found to hang regularly, we will update the code
 721           to reinitialize the chip here. */
 722        dev->if_port = 0;
 723
 724        /* Wake the potentially-idle transmit channel. */
 725        iowrite32(0x10001000, yp->base + TxCtrl);
 726        if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE)
 727                netif_wake_queue (dev);         /* Typical path */
 728
 729        dev->trans_start = jiffies; /* prevent tx timeout */
 730        dev->stats.tx_errors++;
 731}
 732
 733/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
 734static int yellowfin_init_ring(struct net_device *dev)
 735{
 736        struct yellowfin_private *yp = netdev_priv(dev);
 737        int i, j;
 738
 739        yp->tx_full = 0;
 740        yp->cur_rx = yp->cur_tx = 0;
 741        yp->dirty_tx = 0;
 742
 743        yp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
 744
 745        for (i = 0; i < RX_RING_SIZE; i++) {
 746                yp->rx_ring[i].dbdma_cmd =
 747                        cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz);
 748                yp->rx_ring[i].branch_addr = cpu_to_le32(yp->rx_ring_dma +
 749                        ((i+1)%RX_RING_SIZE)*sizeof(struct yellowfin_desc));
 750        }
 751
 752        for (i = 0; i < RX_RING_SIZE; i++) {
 753                struct sk_buff *skb = dev_alloc_skb(yp->rx_buf_sz);
 754                yp->rx_skbuff[i] = skb;
 755                if (skb == NULL)
 756                        break;
 757                skb->dev = dev;         /* Mark as being used by this device. */
 758                skb_reserve(skb, 2);    /* 16 byte align the IP header. */
 759                yp->rx_ring[i].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
 760                        skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE));
 761        }
 762        if (i != RX_RING_SIZE) {
 763                for (j = 0; j < i; j++)
 764                        dev_kfree_skb(yp->rx_skbuff[j]);
 765                return -ENOMEM;
 766        }
 767        yp->rx_ring[i-1].dbdma_cmd = cpu_to_le32(CMD_STOP);
 768        yp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
 769
 770#define NO_TXSTATS
 771#ifdef NO_TXSTATS
 772        /* In this mode the Tx ring needs only a single descriptor. */
 773        for (i = 0; i < TX_RING_SIZE; i++) {
 774                yp->tx_skbuff[i] = NULL;
 775                yp->tx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP);
 776                yp->tx_ring[i].branch_addr = cpu_to_le32(yp->tx_ring_dma +
 777                        ((i+1)%TX_RING_SIZE)*sizeof(struct yellowfin_desc));
 778        }
 779        /* Wrap ring */
 780        yp->tx_ring[--i].dbdma_cmd = cpu_to_le32(CMD_STOP | BRANCH_ALWAYS);
 781#else
 782{
 783        /* Tx ring needs a pair of descriptors, the second for the status. */
 784        for (i = 0; i < TX_RING_SIZE; i++) {
 785                j = 2*i;
 786                yp->tx_skbuff[i] = 0;
 787                /* Branch on Tx error. */
 788                yp->tx_ring[j].dbdma_cmd = cpu_to_le32(CMD_STOP);
 789                yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma +
 790                        (j+1)*sizeof(struct yellowfin_desc));
 791                j++;
 792                if (yp->flags & FullTxStatus) {
 793                        yp->tx_ring[j].dbdma_cmd =
 794                                cpu_to_le32(CMD_TXSTATUS | sizeof(*yp->tx_status));
 795                        yp->tx_ring[j].request_cnt = sizeof(*yp->tx_status);
 796                        yp->tx_ring[j].addr = cpu_to_le32(yp->tx_status_dma +
 797                                i*sizeof(struct tx_status_words));
 798                } else {
 799                        /* Symbios chips write only tx_errs word. */
 800                        yp->tx_ring[j].dbdma_cmd =
 801                                cpu_to_le32(CMD_TXSTATUS | INTR_ALWAYS | 2);
 802                        yp->tx_ring[j].request_cnt = 2;
 803                        /* Om pade ummmmm... */
 804                        yp->tx_ring[j].addr = cpu_to_le32(yp->tx_status_dma +
 805                                i*sizeof(struct tx_status_words) +
 806                                &(yp->tx_status[0].tx_errs) -
 807                                &(yp->tx_status[0]));
 808                }
 809                yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma +
 810                        ((j+1)%(2*TX_RING_SIZE))*sizeof(struct yellowfin_desc));
 811        }
 812        /* Wrap ring */
 813        yp->tx_ring[++j].dbdma_cmd |= cpu_to_le32(BRANCH_ALWAYS | INTR_ALWAYS);
 814}
 815#endif
 816        yp->tx_tail_desc = &yp->tx_status[0];
 817        return 0;
 818}
 819
 820static netdev_tx_t yellowfin_start_xmit(struct sk_buff *skb,
 821                                        struct net_device *dev)
 822{
 823        struct yellowfin_private *yp = netdev_priv(dev);
 824        unsigned entry;
 825        int len = skb->len;
 826
 827        netif_stop_queue (dev);
 828
 829        /* Note: Ordering is important here, set the field with the
 830           "ownership" bit last, and only then increment cur_tx. */
 831
 832        /* Calculate the next Tx descriptor entry. */
 833        entry = yp->cur_tx % TX_RING_SIZE;
 834
 835        if (gx_fix) {   /* Note: only works for paddable protocols e.g.  IP. */
 836                int cacheline_end = ((unsigned long)skb->data + skb->len) % 32;
 837                /* Fix GX chipset errata. */
 838                if (cacheline_end > 24  || cacheline_end == 0) {
 839                        len = skb->len + 32 - cacheline_end + 1;
 840                        if (skb_padto(skb, len)) {
 841                                yp->tx_skbuff[entry] = NULL;
 842                                netif_wake_queue(dev);
 843                                return NETDEV_TX_OK;
 844                        }
 845                }
 846        }
 847        yp->tx_skbuff[entry] = skb;
 848
 849#ifdef NO_TXSTATS
 850        yp->tx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
 851                skb->data, len, PCI_DMA_TODEVICE));
 852        yp->tx_ring[entry].result_status = 0;
 853        if (entry >= TX_RING_SIZE-1) {
 854                /* New stop command. */
 855                yp->tx_ring[0].dbdma_cmd = cpu_to_le32(CMD_STOP);
 856                yp->tx_ring[TX_RING_SIZE-1].dbdma_cmd =
 857                        cpu_to_le32(CMD_TX_PKT|BRANCH_ALWAYS | len);
 858        } else {
 859                yp->tx_ring[entry+1].dbdma_cmd = cpu_to_le32(CMD_STOP);
 860                yp->tx_ring[entry].dbdma_cmd =
 861                        cpu_to_le32(CMD_TX_PKT | BRANCH_IFTRUE | len);
 862        }
 863        yp->cur_tx++;
 864#else
 865        yp->tx_ring[entry<<1].request_cnt = len;
 866        yp->tx_ring[entry<<1].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
 867                skb->data, len, PCI_DMA_TODEVICE));
 868        /* The input_last (status-write) command is constant, but we must
 869           rewrite the subsequent 'stop' command. */
 870
 871        yp->cur_tx++;
 872        {
 873                unsigned next_entry = yp->cur_tx % TX_RING_SIZE;
 874                yp->tx_ring[next_entry<<1].dbdma_cmd = cpu_to_le32(CMD_STOP);
 875        }
 876        /* Final step -- overwrite the old 'stop' command. */
 877
 878        yp->tx_ring[entry<<1].dbdma_cmd =
 879                cpu_to_le32( ((entry % 6) == 0 ? CMD_TX_PKT|INTR_ALWAYS|BRANCH_IFTRUE :
 880                                          CMD_TX_PKT | BRANCH_IFTRUE) | len);
 881#endif
 882
 883        /* Non-x86 Todo: explicitly flush cache lines here. */
 884
 885        /* Wake the potentially-idle transmit channel. */
 886        iowrite32(0x10001000, yp->base + TxCtrl);
 887
 888        if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE)
 889                netif_start_queue (dev);                /* Typical path */
 890        else
 891                yp->tx_full = 1;
 892
 893        if (yellowfin_debug > 4) {
 894                printk(KERN_DEBUG "%s: Yellowfin transmit frame #%d queued in slot %d.\n",
 895                           dev->name, yp->cur_tx, entry);
 896        }
 897        return NETDEV_TX_OK;
 898}
 899
 900/* The interrupt handler does all of the Rx thread work and cleans up
 901   after the Tx thread. */
 902static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance)
 903{
 904        struct net_device *dev = dev_instance;
 905        struct yellowfin_private *yp;
 906        void __iomem *ioaddr;
 907        int boguscnt = max_interrupt_work;
 908        unsigned int handled = 0;
 909
 910        yp = netdev_priv(dev);
 911        ioaddr = yp->base;
 912
 913        spin_lock (&yp->lock);
 914
 915        do {
 916                u16 intr_status = ioread16(ioaddr + IntrClear);
 917
 918                if (yellowfin_debug > 4)
 919                        printk(KERN_DEBUG "%s: Yellowfin interrupt, status %4.4x.\n",
 920                                   dev->name, intr_status);
 921
 922                if (intr_status == 0)
 923                        break;
 924                handled = 1;
 925
 926                if (intr_status & (IntrRxDone | IntrEarlyRx)) {
 927                        yellowfin_rx(dev);
 928                        iowrite32(0x10001000, ioaddr + RxCtrl);         /* Wake Rx engine. */
 929                }
 930
 931#ifdef NO_TXSTATS
 932                for (; yp->cur_tx - yp->dirty_tx > 0; yp->dirty_tx++) {
 933                        int entry = yp->dirty_tx % TX_RING_SIZE;
 934                        struct sk_buff *skb;
 935
 936                        if (yp->tx_ring[entry].result_status == 0)
 937                                break;
 938                        skb = yp->tx_skbuff[entry];
 939                        dev->stats.tx_packets++;
 940                        dev->stats.tx_bytes += skb->len;
 941                        /* Free the original skb. */
 942                        pci_unmap_single(yp->pci_dev, le32_to_cpu(yp->tx_ring[entry].addr),
 943                                skb->len, PCI_DMA_TODEVICE);
 944                        dev_kfree_skb_irq(skb);
 945                        yp->tx_skbuff[entry] = NULL;
 946                }
 947                if (yp->tx_full
 948                        && yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE - 4) {
 949                        /* The ring is no longer full, clear tbusy. */
 950                        yp->tx_full = 0;
 951                        netif_wake_queue(dev);
 952                }
 953#else
 954                if ((intr_status & IntrTxDone) || (yp->tx_tail_desc->tx_errs)) {
 955                        unsigned dirty_tx = yp->dirty_tx;
 956
 957                        for (dirty_tx = yp->dirty_tx; yp->cur_tx - dirty_tx > 0;
 958                                 dirty_tx++) {
 959                                /* Todo: optimize this. */
 960                                int entry = dirty_tx % TX_RING_SIZE;
 961                                u16 tx_errs = yp->tx_status[entry].tx_errs;
 962                                struct sk_buff *skb;
 963
 964#ifndef final_version
 965                                if (yellowfin_debug > 5)
 966                                        printk(KERN_DEBUG "%s: Tx queue %d check, Tx status "
 967                                                   "%4.4x %4.4x %4.4x %4.4x.\n",
 968                                                   dev->name, entry,
 969                                                   yp->tx_status[entry].tx_cnt,
 970                                                   yp->tx_status[entry].tx_errs,
 971                                                   yp->tx_status[entry].total_tx_cnt,
 972                                                   yp->tx_status[entry].paused);
 973#endif
 974                                if (tx_errs == 0)
 975                                        break;  /* It still hasn't been Txed */
 976                                skb = yp->tx_skbuff[entry];
 977                                if (tx_errs & 0xF810) {
 978                                        /* There was an major error, log it. */
 979#ifndef final_version
 980                                        if (yellowfin_debug > 1)
 981                                                printk(KERN_DEBUG "%s: Transmit error, Tx status %4.4x.\n",
 982                                                           dev->name, tx_errs);
 983#endif
 984                                        dev->stats.tx_errors++;
 985                                        if (tx_errs & 0xF800) dev->stats.tx_aborted_errors++;
 986                                        if (tx_errs & 0x0800) dev->stats.tx_carrier_errors++;
 987                                        if (tx_errs & 0x2000) dev->stats.tx_window_errors++;
 988                                        if (tx_errs & 0x8000) dev->stats.tx_fifo_errors++;
 989                                } else {
 990#ifndef final_version
 991                                        if (yellowfin_debug > 4)
 992                                                printk(KERN_DEBUG "%s: Normal transmit, Tx status %4.4x.\n",
 993                                                           dev->name, tx_errs);
 994#endif
 995                                        dev->stats.tx_bytes += skb->len;
 996                                        dev->stats.collisions += tx_errs & 15;
 997                                        dev->stats.tx_packets++;
 998                                }
 999                                /* Free the original skb. */
1000                                pci_unmap_single(yp->pci_dev,
1001                                        yp->tx_ring[entry<<1].addr, skb->len,
1002                                        PCI_DMA_TODEVICE);
1003                                dev_kfree_skb_irq(skb);
1004                                yp->tx_skbuff[entry] = 0;
1005                                /* Mark status as empty. */
1006                                yp->tx_status[entry].tx_errs = 0;
1007                        }
1008
1009#ifndef final_version
1010                        if (yp->cur_tx - dirty_tx > TX_RING_SIZE) {
1011                                printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
1012                                           dev->name, dirty_tx, yp->cur_tx, yp->tx_full);
1013                                dirty_tx += TX_RING_SIZE;
1014                        }
1015#endif
1016
1017                        if (yp->tx_full
1018                                && yp->cur_tx - dirty_tx < TX_QUEUE_SIZE - 2) {
1019                                /* The ring is no longer full, clear tbusy. */
1020                                yp->tx_full = 0;
1021                                netif_wake_queue(dev);
1022                        }
1023
1024                        yp->dirty_tx = dirty_tx;
1025                        yp->tx_tail_desc = &yp->tx_status[dirty_tx % TX_RING_SIZE];
1026                }
1027#endif
1028
1029                /* Log errors and other uncommon events. */
1030                if (intr_status & 0x2ee)        /* Abnormal error summary. */
1031                        yellowfin_error(dev, intr_status);
1032
1033                if (--boguscnt < 0) {
1034                        printk(KERN_WARNING "%s: Too much work at interrupt, "
1035                                   "status=0x%4.4x.\n",
1036                                   dev->name, intr_status);
1037                        break;
1038                }
1039        } while (1);
1040
1041        if (yellowfin_debug > 3)
1042                printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1043                           dev->name, ioread16(ioaddr + IntrStatus));
1044
1045        spin_unlock (&yp->lock);
1046        return IRQ_RETVAL(handled);
1047}
1048
1049/* This routine is logically part of the interrupt handler, but separated
1050   for clarity and better register allocation. */
1051static int yellowfin_rx(struct net_device *dev)
1052{
1053        struct yellowfin_private *yp = netdev_priv(dev);
1054        int entry = yp->cur_rx % RX_RING_SIZE;
1055        int boguscnt = yp->dirty_rx + RX_RING_SIZE - yp->cur_rx;
1056
1057        if (yellowfin_debug > 4) {
1058                printk(KERN_DEBUG " In yellowfin_rx(), entry %d status %8.8x.\n",
1059                           entry, yp->rx_ring[entry].result_status);
1060                printk(KERN_DEBUG "   #%d desc. %8.8x %8.8x %8.8x.\n",
1061                           entry, yp->rx_ring[entry].dbdma_cmd, yp->rx_ring[entry].addr,
1062                           yp->rx_ring[entry].result_status);
1063        }
1064
1065        /* If EOP is set on the next entry, it's a new packet. Send it up. */
1066        while (1) {
1067                struct yellowfin_desc *desc = &yp->rx_ring[entry];
1068                struct sk_buff *rx_skb = yp->rx_skbuff[entry];
1069                s16 frame_status;
1070                u16 desc_status;
1071                int data_size;
1072                u8 *buf_addr;
1073
1074                if(!desc->result_status)
1075                        break;
1076                pci_dma_sync_single_for_cpu(yp->pci_dev, le32_to_cpu(desc->addr),
1077                        yp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1078                desc_status = le32_to_cpu(desc->result_status) >> 16;
1079                buf_addr = rx_skb->data;
1080                data_size = (le32_to_cpu(desc->dbdma_cmd) -
1081                        le32_to_cpu(desc->result_status)) & 0xffff;
1082                frame_status = get_unaligned_le16(&(buf_addr[data_size - 2]));
1083                if (yellowfin_debug > 4)
1084                        printk(KERN_DEBUG "  yellowfin_rx() status was %4.4x.\n",
1085                                   frame_status);
1086                if (--boguscnt < 0)
1087                        break;
1088                if ( ! (desc_status & RX_EOP)) {
1089                        if (data_size != 0)
1090                                printk(KERN_WARNING "%s: Oversized Ethernet frame spanned multiple buffers,"
1091                                           " status %4.4x, data_size %d!\n", dev->name, desc_status, data_size);
1092                        dev->stats.rx_length_errors++;
1093                } else if ((yp->drv_flags & IsGigabit)  &&  (frame_status & 0x0038)) {
1094                        /* There was a error. */
1095                        if (yellowfin_debug > 3)
1096                                printk(KERN_DEBUG "  yellowfin_rx() Rx error was %4.4x.\n",
1097                                           frame_status);
1098                        dev->stats.rx_errors++;
1099                        if (frame_status & 0x0060) dev->stats.rx_length_errors++;
1100                        if (frame_status & 0x0008) dev->stats.rx_frame_errors++;
1101                        if (frame_status & 0x0010) dev->stats.rx_crc_errors++;
1102                        if (frame_status < 0) dev->stats.rx_dropped++;
1103                } else if ( !(yp->drv_flags & IsGigabit)  &&
1104                                   ((buf_addr[data_size-1] & 0x85) || buf_addr[data_size-2] & 0xC0)) {
1105                        u8 status1 = buf_addr[data_size-2];
1106                        u8 status2 = buf_addr[data_size-1];
1107                        dev->stats.rx_errors++;
1108                        if (status1 & 0xC0) dev->stats.rx_length_errors++;
1109                        if (status2 & 0x03) dev->stats.rx_frame_errors++;
1110                        if (status2 & 0x04) dev->stats.rx_crc_errors++;
1111                        if (status2 & 0x80) dev->stats.rx_dropped++;
1112#ifdef YF_PROTOTYPE             /* Support for prototype hardware errata. */
1113                } else if ((yp->flags & HasMACAddrBug)  &&
1114                        memcmp(le32_to_cpu(yp->rx_ring_dma +
1115                                entry*sizeof(struct yellowfin_desc)),
1116                                dev->dev_addr, 6) != 0 &&
1117                        memcmp(le32_to_cpu(yp->rx_ring_dma +
1118                                entry*sizeof(struct yellowfin_desc)),
1119                                "\377\377\377\377\377\377", 6) != 0) {
1120                        if (bogus_rx++ == 0)
1121                                printk(KERN_WARNING "%s: Bad frame to %pM\n",
1122                                           dev->name, buf_addr);
1123#endif
1124                } else {
1125                        struct sk_buff *skb;
1126                        int pkt_len = data_size -
1127                                (yp->chip_id ? 7 : 8 + buf_addr[data_size - 8]);
1128                        /* To verify: Yellowfin Length should omit the CRC! */
1129
1130#ifndef final_version
1131                        if (yellowfin_debug > 4)
1132                                printk(KERN_DEBUG "  yellowfin_rx() normal Rx pkt length %d"
1133                                           " of %d, bogus_cnt %d.\n",
1134                                           pkt_len, data_size, boguscnt);
1135#endif
1136                        /* Check if the packet is long enough to just pass up the skbuff
1137                           without copying to a properly sized skbuff. */
1138                        if (pkt_len > rx_copybreak) {
1139                                skb_put(skb = rx_skb, pkt_len);
1140                                pci_unmap_single(yp->pci_dev,
1141                                        le32_to_cpu(yp->rx_ring[entry].addr),
1142                                        yp->rx_buf_sz,
1143                                        PCI_DMA_FROMDEVICE);
1144                                yp->rx_skbuff[entry] = NULL;
1145                        } else {
1146                                skb = dev_alloc_skb(pkt_len + 2);
1147                                if (skb == NULL)
1148                                        break;
1149                                skb_reserve(skb, 2);    /* 16 byte align the IP header */
1150                                skb_copy_to_linear_data(skb, rx_skb->data, pkt_len);
1151                                skb_put(skb, pkt_len);
1152                                pci_dma_sync_single_for_device(yp->pci_dev,
1153                                                                le32_to_cpu(desc->addr),
1154                                                                yp->rx_buf_sz,
1155                                                                PCI_DMA_FROMDEVICE);
1156                        }
1157                        skb->protocol = eth_type_trans(skb, dev);
1158                        netif_rx(skb);
1159                        dev->stats.rx_packets++;
1160                        dev->stats.rx_bytes += pkt_len;
1161                }
1162                entry = (++yp->cur_rx) % RX_RING_SIZE;
1163        }
1164
1165        /* Refill the Rx ring buffers. */
1166        for (; yp->cur_rx - yp->dirty_rx > 0; yp->dirty_rx++) {
1167                entry = yp->dirty_rx % RX_RING_SIZE;
1168                if (yp->rx_skbuff[entry] == NULL) {
1169                        struct sk_buff *skb = dev_alloc_skb(yp->rx_buf_sz);
1170                        if (skb == NULL)
1171                                break;                          /* Better luck next round. */
1172                        yp->rx_skbuff[entry] = skb;
1173                        skb->dev = dev; /* Mark as being used by this device. */
1174                        skb_reserve(skb, 2);    /* Align IP on 16 byte boundaries */
1175                        yp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
1176                                skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE));
1177                }
1178                yp->rx_ring[entry].dbdma_cmd = cpu_to_le32(CMD_STOP);
1179                yp->rx_ring[entry].result_status = 0;   /* Clear complete bit. */
1180                if (entry != 0)
1181                        yp->rx_ring[entry - 1].dbdma_cmd =
1182                                cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz);
1183                else
1184                        yp->rx_ring[RX_RING_SIZE - 1].dbdma_cmd =
1185                                cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | BRANCH_ALWAYS
1186                                                        | yp->rx_buf_sz);
1187        }
1188
1189        return 0;
1190}
1191
1192static void yellowfin_error(struct net_device *dev, int intr_status)
1193{
1194        printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
1195                   dev->name, intr_status);
1196        /* Hmmmmm, it's not clear what to do here. */
1197        if (intr_status & (IntrTxPCIErr | IntrTxPCIFault))
1198                dev->stats.tx_errors++;
1199        if (intr_status & (IntrRxPCIErr | IntrRxPCIFault))
1200                dev->stats.rx_errors++;
1201}
1202
1203static int yellowfin_close(struct net_device *dev)
1204{
1205        struct yellowfin_private *yp = netdev_priv(dev);
1206        void __iomem *ioaddr = yp->base;
1207        int i;
1208
1209        netif_stop_queue (dev);
1210
1211        if (yellowfin_debug > 1) {
1212                printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %4.4x "
1213                           "Rx %4.4x Int %2.2x.\n",
1214                           dev->name, ioread16(ioaddr + TxStatus),
1215                           ioread16(ioaddr + RxStatus),
1216                           ioread16(ioaddr + IntrStatus));
1217                printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d,  Rx %d / %d.\n",
1218                           dev->name, yp->cur_tx, yp->dirty_tx, yp->cur_rx, yp->dirty_rx);
1219        }
1220
1221        /* Disable interrupts by clearing the interrupt mask. */
1222        iowrite16(0x0000, ioaddr + IntrEnb);
1223
1224        /* Stop the chip's Tx and Rx processes. */
1225        iowrite32(0x80000000, ioaddr + RxCtrl);
1226        iowrite32(0x80000000, ioaddr + TxCtrl);
1227
1228        del_timer(&yp->timer);
1229
1230#if defined(__i386__)
1231        if (yellowfin_debug > 2) {
1232                printk(KERN_DEBUG"  Tx ring at %8.8llx:\n",
1233                                (unsigned long long)yp->tx_ring_dma);
1234                for (i = 0; i < TX_RING_SIZE*2; i++)
1235                        printk(KERN_DEBUG " %c #%d desc. %8.8x %8.8x %8.8x %8.8x.\n",
1236                                   ioread32(ioaddr + TxPtr) == (long)&yp->tx_ring[i] ? '>' : ' ',
1237                                   i, yp->tx_ring[i].dbdma_cmd, yp->tx_ring[i].addr,
1238                                   yp->tx_ring[i].branch_addr, yp->tx_ring[i].result_status);
1239                printk(KERN_DEBUG "  Tx status %p:\n", yp->tx_status);
1240                for (i = 0; i < TX_RING_SIZE; i++)
1241                        printk(KERN_DEBUG "   #%d status %4.4x %4.4x %4.4x %4.4x.\n",
1242                                   i, yp->tx_status[i].tx_cnt, yp->tx_status[i].tx_errs,
1243                                   yp->tx_status[i].total_tx_cnt, yp->tx_status[i].paused);
1244
1245                printk(KERN_DEBUG "  Rx ring %8.8llx:\n",
1246                                (unsigned long long)yp->rx_ring_dma);
1247                for (i = 0; i < RX_RING_SIZE; i++) {
1248                        printk(KERN_DEBUG " %c #%d desc. %8.8x %8.8x %8.8x\n",
1249                                   ioread32(ioaddr + RxPtr) == (long)&yp->rx_ring[i] ? '>' : ' ',
1250                                   i, yp->rx_ring[i].dbdma_cmd, yp->rx_ring[i].addr,
1251                                   yp->rx_ring[i].result_status);
1252                        if (yellowfin_debug > 6) {
1253                                if (get_unaligned((u8*)yp->rx_ring[i].addr) != 0x69) {
1254                                        int j;
1255                                        for (j = 0; j < 0x50; j++)
1256                                                printk(" %4.4x",
1257                                                           get_unaligned(((u16*)yp->rx_ring[i].addr) + j));
1258                                        printk("\n");
1259                                }
1260                        }
1261                }
1262        }
1263#endif /* __i386__ debugging only */
1264
1265        free_irq(dev->irq, dev);
1266
1267        /* Free all the skbuffs in the Rx queue. */
1268        for (i = 0; i < RX_RING_SIZE; i++) {
1269                yp->rx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP);
1270                yp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1271                if (yp->rx_skbuff[i]) {
1272                        dev_kfree_skb(yp->rx_skbuff[i]);
1273                }
1274                yp->rx_skbuff[i] = NULL;
1275        }
1276        for (i = 0; i < TX_RING_SIZE; i++) {
1277                if (yp->tx_skbuff[i])
1278                        dev_kfree_skb(yp->tx_skbuff[i]);
1279                yp->tx_skbuff[i] = NULL;
1280        }
1281
1282#ifdef YF_PROTOTYPE                     /* Support for prototype hardware errata. */
1283        if (yellowfin_debug > 0) {
1284                printk(KERN_DEBUG "%s: Received %d frames that we should not have.\n",
1285                           dev->name, bogus_rx);
1286        }
1287#endif
1288
1289        return 0;
1290}
1291
1292/* Set or clear the multicast filter for this adaptor. */
1293
1294static void set_rx_mode(struct net_device *dev)
1295{
1296        struct yellowfin_private *yp = netdev_priv(dev);
1297        void __iomem *ioaddr = yp->base;
1298        u16 cfg_value = ioread16(ioaddr + Cnfg);
1299
1300        /* Stop the Rx process to change any value. */
1301        iowrite16(cfg_value & ~0x1000, ioaddr + Cnfg);
1302        if (dev->flags & IFF_PROMISC) {                 /* Set promiscuous. */
1303                iowrite16(0x000F, ioaddr + AddrMode);
1304        } else if ((dev->mc_count > 64)  ||  (dev->flags & IFF_ALLMULTI)) {
1305                /* Too many to filter well, or accept all multicasts. */
1306                iowrite16(0x000B, ioaddr + AddrMode);
1307        } else if (dev->mc_count > 0) { /* Must use the multicast hash table. */
1308                struct dev_mc_list *mclist;
1309                u16 hash_table[4];
1310                int i;
1311                memset(hash_table, 0, sizeof(hash_table));
1312                for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1313                         i++, mclist = mclist->next) {
1314                        unsigned int bit;
1315
1316                        /* Due to a bug in the early chip versions, multiple filter
1317                           slots must be set for each address. */
1318                        if (yp->drv_flags & HasMulticastBug) {
1319                                bit = (ether_crc_le(3, mclist->dmi_addr) >> 3) & 0x3f;
1320                                hash_table[bit >> 4] |= (1 << bit);
1321                                bit = (ether_crc_le(4, mclist->dmi_addr) >> 3) & 0x3f;
1322                                hash_table[bit >> 4] |= (1 << bit);
1323                                bit = (ether_crc_le(5, mclist->dmi_addr) >> 3) & 0x3f;
1324                                hash_table[bit >> 4] |= (1 << bit);
1325                        }
1326                        bit = (ether_crc_le(6, mclist->dmi_addr) >> 3) & 0x3f;
1327                        hash_table[bit >> 4] |= (1 << bit);
1328                }
1329                /* Copy the hash table to the chip. */
1330                for (i = 0; i < 4; i++)
1331                        iowrite16(hash_table[i], ioaddr + HashTbl + i*2);
1332                iowrite16(0x0003, ioaddr + AddrMode);
1333        } else {                                        /* Normal, unicast/broadcast-only mode. */
1334                iowrite16(0x0001, ioaddr + AddrMode);
1335        }
1336        /* Restart the Rx process. */
1337        iowrite16(cfg_value | 0x1000, ioaddr + Cnfg);
1338}
1339
1340static void yellowfin_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1341{
1342        struct yellowfin_private *np = netdev_priv(dev);
1343        strcpy(info->driver, DRV_NAME);
1344        strcpy(info->version, DRV_VERSION);
1345        strcpy(info->bus_info, pci_name(np->pci_dev));
1346}
1347
1348static const struct ethtool_ops ethtool_ops = {
1349        .get_drvinfo = yellowfin_get_drvinfo
1350};
1351
1352static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1353{
1354        struct yellowfin_private *np = netdev_priv(dev);
1355        void __iomem *ioaddr = np->base;
1356        struct mii_ioctl_data *data = if_mii(rq);
1357
1358        switch(cmd) {
1359        case SIOCGMIIPHY:               /* Get address of MII PHY in use. */
1360                data->phy_id = np->phys[0] & 0x1f;
1361                /* Fall Through */
1362
1363        case SIOCGMIIREG:               /* Read MII PHY register. */
1364                data->val_out = mdio_read(ioaddr, data->phy_id & 0x1f, data->reg_num & 0x1f);
1365                return 0;
1366
1367        case SIOCSMIIREG:               /* Write MII PHY register. */
1368                if (data->phy_id == np->phys[0]) {
1369                        u16 value = data->val_in;
1370                        switch (data->reg_num) {
1371                        case 0:
1372                                /* Check for autonegotiation on or reset. */
1373                                np->medialock = (value & 0x9000) ? 0 : 1;
1374                                if (np->medialock)
1375                                        np->full_duplex = (value & 0x0100) ? 1 : 0;
1376                                break;
1377                        case 4: np->advertising = value; break;
1378                        }
1379                        /* Perhaps check_duplex(dev), depending on chip semantics. */
1380                }
1381                mdio_write(ioaddr, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in);
1382                return 0;
1383        default:
1384                return -EOPNOTSUPP;
1385        }
1386}
1387
1388
1389static void __devexit yellowfin_remove_one (struct pci_dev *pdev)
1390{
1391        struct net_device *dev = pci_get_drvdata(pdev);
1392        struct yellowfin_private *np;
1393
1394        BUG_ON(!dev);
1395        np = netdev_priv(dev);
1396
1397        pci_free_consistent(pdev, STATUS_TOTAL_SIZE, np->tx_status,
1398                np->tx_status_dma);
1399        pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
1400        pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
1401        unregister_netdev (dev);
1402
1403        pci_iounmap(pdev, np->base);
1404
1405        pci_release_regions (pdev);
1406
1407        free_netdev (dev);
1408        pci_set_drvdata(pdev, NULL);
1409}
1410
1411
1412static struct pci_driver yellowfin_driver = {
1413        .name           = DRV_NAME,
1414        .id_table       = yellowfin_pci_tbl,
1415        .probe          = yellowfin_init_one,
1416        .remove         = __devexit_p(yellowfin_remove_one),
1417};
1418
1419
1420static int __init yellowfin_init (void)
1421{
1422/* when a module, this is printed whether or not devices are found in probe */
1423#ifdef MODULE
1424        printk(version);
1425#endif
1426        return pci_register_driver(&yellowfin_driver);
1427}
1428
1429
1430static void __exit yellowfin_cleanup (void)
1431{
1432        pci_unregister_driver (&yellowfin_driver);
1433}
1434
1435
1436module_init(yellowfin_init);
1437module_exit(yellowfin_cleanup);
1438