linux/drivers/net/ethernet/packetengines/yellowfin.c
<<
>>
Prefs
   1/* yellowfin.c: A Packet Engines G-NIC ethernet driver for linux. */
   2/*
   3        Written 1997-2001 by Donald Becker.
   4
   5        This software may be used and distributed according to the terms of
   6        the GNU General Public License (GPL), incorporated herein by reference.
   7        Drivers based on or derived from this code fall under the GPL and must
   8        retain the authorship, copyright and license notice.  This file is not
   9        a complete program and may only be used when the entire operating
  10        system is licensed under the GPL.
  11
  12        This driver is for the Packet Engines G-NIC PCI Gigabit Ethernet adapter.
  13        It also supports the Symbios Logic version of the same chip core.
  14
  15        The author may be reached as becker@scyld.com, or C/O
  16        Scyld Computing Corporation
  17        410 Severn Ave., Suite 210
  18        Annapolis MD 21403
  19
  20        Support and updates available at
  21        http://www.scyld.com/network/yellowfin.html
  22        [link no longer provides useful info -jgarzik]
  23
  24*/
  25
  26#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  27
  28#define DRV_NAME        "yellowfin"
  29#define DRV_VERSION     "2.1"
  30#define DRV_RELDATE     "Sep 11, 2006"
  31
  32/* The user-configurable values.
  33   These may be modified when a driver module is loaded.*/
  34
  35static int debug = 1;                   /* 1 normal messages, 0 quiet .. 7 verbose. */
  36/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
  37static int max_interrupt_work = 20;
  38static int mtu;
  39#ifdef YF_PROTOTYPE                     /* Support for prototype hardware errata. */
  40/* System-wide count of bogus-rx frames. */
  41static int bogus_rx;
  42static int dma_ctrl = 0x004A0263;                       /* Constrained by errata */
  43static int fifo_cfg = 0x0020;                           /* Bypass external Tx FIFO. */
  44#elif defined(YF_NEW)                                   /* A future perfect board :->.  */
  45static int dma_ctrl = 0x00CAC277;                       /* Override when loading module! */
  46static int fifo_cfg = 0x0028;
  47#else
  48static const int dma_ctrl = 0x004A0263;                         /* Constrained by errata */
  49static const int fifo_cfg = 0x0020;                             /* Bypass external Tx FIFO. */
  50#endif
  51
  52/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
  53   Setting to > 1514 effectively disables this feature. */
  54static int rx_copybreak;
  55
  56/* Used to pass the media type, etc.
  57   No media types are currently defined.  These exist for driver
  58   interoperability.
  59*/
  60#define MAX_UNITS 8                             /* More are supported, limit only on options */
  61static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
  62static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
  63
  64/* Do ugly workaround for GX server chipset errata. */
  65static int gx_fix;
  66
  67/* Operational parameters that are set at compile time. */
  68
  69/* Keep the ring sizes a power of two for efficiency.
  70   Making the Tx ring too long decreases the effectiveness of channel
  71   bonding and packet priority.
  72   There are no ill effects from too-large receive rings. */
  73#define TX_RING_SIZE    16
  74#define TX_QUEUE_SIZE   12              /* Must be > 4 && <= TX_RING_SIZE */
  75#define RX_RING_SIZE    64
  76#define STATUS_TOTAL_SIZE       TX_RING_SIZE*sizeof(struct tx_status_words)
  77#define TX_TOTAL_SIZE           2*TX_RING_SIZE*sizeof(struct yellowfin_desc)
  78#define RX_TOTAL_SIZE           RX_RING_SIZE*sizeof(struct yellowfin_desc)
  79
  80/* Operational parameters that usually are not changed. */
  81/* Time in jiffies before concluding the transmitter is hung. */
  82#define TX_TIMEOUT  (2*HZ)
  83#define PKT_BUF_SZ              1536                    /* Size of each temporary Rx buffer.*/
  84
  85#define yellowfin_debug debug
  86
  87#include <linux/module.h>
  88#include <linux/kernel.h>
  89#include <linux/string.h>
  90#include <linux/timer.h>
  91#include <linux/errno.h>
  92#include <linux/ioport.h>
  93#include <linux/interrupt.h>
  94#include <linux/pci.h>
  95#include <linux/init.h>
  96#include <linux/mii.h>
  97#include <linux/netdevice.h>
  98#include <linux/etherdevice.h>
  99#include <linux/skbuff.h>
 100#include <linux/ethtool.h>
 101#include <linux/crc32.h>
 102#include <linux/bitops.h>
 103#include <asm/uaccess.h>
 104#include <asm/processor.h>              /* Processor type for cache alignment. */
 105#include <asm/unaligned.h>
 106#include <asm/io.h>
 107
 108/* These identify the driver base version and may not be removed. */
 109static const char version[] =
 110  KERN_INFO DRV_NAME ".c:v1.05  1/09/2001  Written by Donald Becker <becker@scyld.com>\n"
 111  "  (unofficial 2.4.x port, " DRV_VERSION ", " DRV_RELDATE ")\n";
 112
 113MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
 114MODULE_DESCRIPTION("Packet Engines Yellowfin G-NIC Gigabit Ethernet driver");
 115MODULE_LICENSE("GPL");
 116
 117module_param(max_interrupt_work, int, 0);
 118module_param(mtu, int, 0);
 119module_param(debug, int, 0);
 120module_param(rx_copybreak, int, 0);
 121module_param_array(options, int, NULL, 0);
 122module_param_array(full_duplex, int, NULL, 0);
 123module_param(gx_fix, int, 0);
 124MODULE_PARM_DESC(max_interrupt_work, "G-NIC maximum events handled per interrupt");
 125MODULE_PARM_DESC(mtu, "G-NIC MTU (all boards)");
 126MODULE_PARM_DESC(debug, "G-NIC debug level (0-7)");
 127MODULE_PARM_DESC(rx_copybreak, "G-NIC copy breakpoint for copy-only-tiny-frames");
 128MODULE_PARM_DESC(options, "G-NIC: Bits 0-3: media type, bit 17: full duplex");
 129MODULE_PARM_DESC(full_duplex, "G-NIC full duplex setting(s) (1)");
 130MODULE_PARM_DESC(gx_fix, "G-NIC: enable GX server chipset bug workaround (0-1)");
 131
 132/*
 133                                Theory of Operation
 134
 135I. Board Compatibility
 136
 137This device driver is designed for the Packet Engines "Yellowfin" Gigabit
 138Ethernet adapter.  The G-NIC 64-bit PCI card is supported, as well as the
 139Symbios 53C885E dual function chip.
 140
 141II. Board-specific settings
 142
 143PCI bus devices are configured by the system at boot time, so no jumpers
 144need to be set on the board.  The system BIOS preferably should assign the
 145PCI INTA signal to an otherwise unused system IRQ line.
 146Note: Kernel versions earlier than 1.3.73 do not support shared PCI
 147interrupt lines.
 148
 149III. Driver operation
 150
 151IIIa. Ring buffers
 152
 153The Yellowfin uses the Descriptor Based DMA Architecture specified by Apple.
 154This is a descriptor list scheme similar to that used by the EEPro100 and
 155Tulip.  This driver uses two statically allocated fixed-size descriptor lists
 156formed into rings by a branch from the final descriptor to the beginning of
 157the list.  The ring sizes are set at compile time by RX/TX_RING_SIZE.
 158
 159The driver allocates full frame size skbuffs for the Rx ring buffers at
 160open() time and passes the skb->data field to the Yellowfin as receive data
 161buffers.  When an incoming frame is less than RX_COPYBREAK bytes long,
 162a fresh skbuff is allocated and the frame is copied to the new skbuff.
 163When the incoming frame is larger, the skbuff is passed directly up the
 164protocol stack and replaced by a newly allocated skbuff.
 165
 166The RX_COPYBREAK value is chosen to trade-off the memory wasted by
 167using a full-sized skbuff for small frames vs. the copying costs of larger
 168frames.  For small frames the copying cost is negligible (esp. considering
 169that we are pre-loading the cache with immediately useful header
 170information).  For large frames the copying cost is non-trivial, and the
 171larger copy might flush the cache of useful data.
 172
 173IIIC. Synchronization
 174
 175The driver runs as two independent, single-threaded flows of control.  One
 176is the send-packet routine, which enforces single-threaded use by the
 177dev->tbusy flag.  The other thread is the interrupt handler, which is single
 178threaded by the hardware and other software.
 179
 180The send packet thread has partial control over the Tx ring and 'dev->tbusy'
 181flag.  It sets the tbusy flag whenever it's queuing a Tx packet. If the next
 182queue slot is empty, it clears the tbusy flag when finished otherwise it sets
 183the 'yp->tx_full' flag.
 184
 185The interrupt handler has exclusive control over the Rx ring and records stats
 186from the Tx ring.  After reaping the stats, it marks the Tx queue entry as
 187empty by incrementing the dirty_tx mark. Iff the 'yp->tx_full' flag is set, it
 188clears both the tx_full and tbusy flags.
 189
 190IV. Notes
 191
 192Thanks to Kim Stearns of Packet Engines for providing a pair of G-NIC boards.
 193Thanks to Bruce Faust of Digitalscape for providing both their SYM53C885 board
 194and an AlphaStation to verifty the Alpha port!
 195
 196IVb. References
 197
 198Yellowfin Engineering Design Specification, 4/23/97 Preliminary/Confidential
 199Symbios SYM53C885 PCI-SCSI/Fast Ethernet Multifunction Controller Preliminary
 200   Data Manual v3.0
 201http://cesdis.gsfc.nasa.gov/linux/misc/NWay.html
 202http://cesdis.gsfc.nasa.gov/linux/misc/100mbps.html
 203
 204IVc. Errata
 205
 206See Packet Engines confidential appendix (prototype chips only).
 207*/
 208
 209
 210
 211enum capability_flags {
 212        HasMII=1, FullTxStatus=2, IsGigabit=4, HasMulticastBug=8, FullRxStatus=16,
 213        HasMACAddrBug=32, /* Only on early revs.  */
 214        DontUseEeprom=64, /* Don't read the MAC from the EEPROm. */
 215};
 216
 217/* The PCI I/O space extent. */
 218enum {
 219        YELLOWFIN_SIZE  = 0x100,
 220};
 221
 222struct pci_id_info {
 223        const char *name;
 224        struct match_info {
 225                int     pci, pci_mask, subsystem, subsystem_mask;
 226                int revision, revision_mask;                            /* Only 8 bits. */
 227        } id;
 228        int drv_flags;                          /* Driver use, intended as capability flags. */
 229};
 230
 231static const struct pci_id_info pci_id_tbl[] = {
 232        {"Yellowfin G-NIC Gigabit Ethernet", { 0x07021000, 0xffffffff},
 233         FullTxStatus | IsGigabit | HasMulticastBug | HasMACAddrBug | DontUseEeprom},
 234        {"Symbios SYM83C885", { 0x07011000, 0xffffffff},
 235          HasMII | DontUseEeprom },
 236        { }
 237};
 238
 239static DEFINE_PCI_DEVICE_TABLE(yellowfin_pci_tbl) = {
 240        { 0x1000, 0x0702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
 241        { 0x1000, 0x0701, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
 242        { }
 243};
 244MODULE_DEVICE_TABLE (pci, yellowfin_pci_tbl);
 245
 246
 247/* Offsets to the Yellowfin registers.  Various sizes and alignments. */
 248enum yellowfin_offsets {
 249        TxCtrl=0x00, TxStatus=0x04, TxPtr=0x0C,
 250        TxIntrSel=0x10, TxBranchSel=0x14, TxWaitSel=0x18,
 251        RxCtrl=0x40, RxStatus=0x44, RxPtr=0x4C,
 252        RxIntrSel=0x50, RxBranchSel=0x54, RxWaitSel=0x58,
 253        EventStatus=0x80, IntrEnb=0x82, IntrClear=0x84, IntrStatus=0x86,
 254        ChipRev=0x8C, DMACtrl=0x90, TxThreshold=0x94,
 255        Cnfg=0xA0, FrameGap0=0xA2, FrameGap1=0xA4,
 256        MII_Cmd=0xA6, MII_Addr=0xA8, MII_Wr_Data=0xAA, MII_Rd_Data=0xAC,
 257        MII_Status=0xAE,
 258        RxDepth=0xB8, FlowCtrl=0xBC,
 259        AddrMode=0xD0, StnAddr=0xD2, HashTbl=0xD8, FIFOcfg=0xF8,
 260        EEStatus=0xF0, EECtrl=0xF1, EEAddr=0xF2, EERead=0xF3, EEWrite=0xF4,
 261        EEFeature=0xF5,
 262};
 263
 264/* The Yellowfin Rx and Tx buffer descriptors.
 265   Elements are written as 32 bit for endian portability. */
 266struct yellowfin_desc {
 267        __le32 dbdma_cmd;
 268        __le32 addr;
 269        __le32 branch_addr;
 270        __le32 result_status;
 271};
 272
 273struct tx_status_words {
 274#ifdef __BIG_ENDIAN
 275        u16 tx_errs;
 276        u16 tx_cnt;
 277        u16 paused;
 278        u16 total_tx_cnt;
 279#else  /* Little endian chips. */
 280        u16 tx_cnt;
 281        u16 tx_errs;
 282        u16 total_tx_cnt;
 283        u16 paused;
 284#endif /* __BIG_ENDIAN */
 285};
 286
 287/* Bits in yellowfin_desc.cmd */
 288enum desc_cmd_bits {
 289        CMD_TX_PKT=0x10000000, CMD_RX_BUF=0x20000000, CMD_TXSTATUS=0x30000000,
 290        CMD_NOP=0x60000000, CMD_STOP=0x70000000,
 291        BRANCH_ALWAYS=0x0C0000, INTR_ALWAYS=0x300000, WAIT_ALWAYS=0x030000,
 292        BRANCH_IFTRUE=0x040000,
 293};
 294
 295/* Bits in yellowfin_desc.status */
 296enum desc_status_bits { RX_EOP=0x0040, };
 297
 298/* Bits in the interrupt status/mask registers. */
 299enum intr_status_bits {
 300        IntrRxDone=0x01, IntrRxInvalid=0x02, IntrRxPCIFault=0x04,IntrRxPCIErr=0x08,
 301        IntrTxDone=0x10, IntrTxInvalid=0x20, IntrTxPCIFault=0x40,IntrTxPCIErr=0x80,
 302        IntrEarlyRx=0x100, IntrWakeup=0x200, };
 303
 304#define PRIV_ALIGN      31      /* Required alignment mask */
 305#define MII_CNT         4
 306struct yellowfin_private {
 307        /* Descriptor rings first for alignment.
 308           Tx requires a second descriptor for status. */
 309        struct yellowfin_desc *rx_ring;
 310        struct yellowfin_desc *tx_ring;
 311        struct sk_buff* rx_skbuff[RX_RING_SIZE];
 312        struct sk_buff* tx_skbuff[TX_RING_SIZE];
 313        dma_addr_t rx_ring_dma;
 314        dma_addr_t tx_ring_dma;
 315
 316        struct tx_status_words *tx_status;
 317        dma_addr_t tx_status_dma;
 318
 319        struct timer_list timer;        /* Media selection timer. */
 320        /* Frequently used and paired value: keep adjacent for cache effect. */
 321        int chip_id, drv_flags;
 322        struct pci_dev *pci_dev;
 323        unsigned int cur_rx, dirty_rx;          /* Producer/consumer ring indices */
 324        unsigned int rx_buf_sz;                         /* Based on MTU+slack. */
 325        struct tx_status_words *tx_tail_desc;
 326        unsigned int cur_tx, dirty_tx;
 327        int tx_threshold;
 328        unsigned int tx_full:1;                         /* The Tx queue is full. */
 329        unsigned int full_duplex:1;                     /* Full-duplex operation requested. */
 330        unsigned int duplex_lock:1;
 331        unsigned int medialock:1;                       /* Do not sense media. */
 332        unsigned int default_port:4;            /* Last dev->if_port value. */
 333        /* MII transceiver section. */
 334        int mii_cnt;                                            /* MII device addresses. */
 335        u16 advertising;                                        /* NWay media advertisement */
 336        unsigned char phys[MII_CNT];            /* MII device addresses, only first one used */
 337        spinlock_t lock;
 338        void __iomem *base;
 339};
 340
 341static int read_eeprom(void __iomem *ioaddr, int location);
 342static int mdio_read(void __iomem *ioaddr, int phy_id, int location);
 343static void mdio_write(void __iomem *ioaddr, int phy_id, int location, int value);
 344static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 345static int yellowfin_open(struct net_device *dev);
 346static void yellowfin_timer(unsigned long data);
 347static void yellowfin_tx_timeout(struct net_device *dev);
 348static int yellowfin_init_ring(struct net_device *dev);
 349static netdev_tx_t yellowfin_start_xmit(struct sk_buff *skb,
 350                                        struct net_device *dev);
 351static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance);
 352static int yellowfin_rx(struct net_device *dev);
 353static void yellowfin_error(struct net_device *dev, int intr_status);
 354static int yellowfin_close(struct net_device *dev);
 355static void set_rx_mode(struct net_device *dev);
 356static const struct ethtool_ops ethtool_ops;
 357
 358static const struct net_device_ops netdev_ops = {
 359        .ndo_open               = yellowfin_open,
 360        .ndo_stop               = yellowfin_close,
 361        .ndo_start_xmit         = yellowfin_start_xmit,
 362        .ndo_set_rx_mode        = set_rx_mode,
 363        .ndo_change_mtu         = eth_change_mtu,
 364        .ndo_validate_addr      = eth_validate_addr,
 365        .ndo_set_mac_address    = eth_mac_addr,
 366        .ndo_do_ioctl           = netdev_ioctl,
 367        .ndo_tx_timeout         = yellowfin_tx_timeout,
 368};
 369
 370static int yellowfin_init_one(struct pci_dev *pdev,
 371                              const struct pci_device_id *ent)
 372{
 373        struct net_device *dev;
 374        struct yellowfin_private *np;
 375        int irq;
 376        int chip_idx = ent->driver_data;
 377        static int find_cnt;
 378        void __iomem *ioaddr;
 379        int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
 380        int drv_flags = pci_id_tbl[chip_idx].drv_flags;
 381        void *ring_space;
 382        dma_addr_t ring_dma;
 383#ifdef USE_IO_OPS
 384        int bar = 0;
 385#else
 386        int bar = 1;
 387#endif
 388
 389/* when built into the kernel, we only print version if device is found */
 390#ifndef MODULE
 391        static int printed_version;
 392        if (!printed_version++)
 393                printk(version);
 394#endif
 395
 396        i = pci_enable_device(pdev);
 397        if (i) return i;
 398
 399        dev = alloc_etherdev(sizeof(*np));
 400        if (!dev)
 401                return -ENOMEM;
 402
 403        SET_NETDEV_DEV(dev, &pdev->dev);
 404
 405        np = netdev_priv(dev);
 406
 407        if (pci_request_regions(pdev, DRV_NAME))
 408                goto err_out_free_netdev;
 409
 410        pci_set_master (pdev);
 411
 412        ioaddr = pci_iomap(pdev, bar, YELLOWFIN_SIZE);
 413        if (!ioaddr)
 414                goto err_out_free_res;
 415
 416        irq = pdev->irq;
 417
 418        if (drv_flags & DontUseEeprom)
 419                for (i = 0; i < 6; i++)
 420                        dev->dev_addr[i] = ioread8(ioaddr + StnAddr + i);
 421        else {
 422                int ee_offset = (read_eeprom(ioaddr, 6) == 0xff ? 0x100 : 0);
 423                for (i = 0; i < 6; i++)
 424                        dev->dev_addr[i] = read_eeprom(ioaddr, ee_offset + i);
 425        }
 426
 427        /* Reset the chip. */
 428        iowrite32(0x80000000, ioaddr + DMACtrl);
 429
 430        pci_set_drvdata(pdev, dev);
 431        spin_lock_init(&np->lock);
 432
 433        np->pci_dev = pdev;
 434        np->chip_id = chip_idx;
 435        np->drv_flags = drv_flags;
 436        np->base = ioaddr;
 437
 438        ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
 439        if (!ring_space)
 440                goto err_out_cleardev;
 441        np->tx_ring = ring_space;
 442        np->tx_ring_dma = ring_dma;
 443
 444        ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
 445        if (!ring_space)
 446                goto err_out_unmap_tx;
 447        np->rx_ring = ring_space;
 448        np->rx_ring_dma = ring_dma;
 449
 450        ring_space = pci_alloc_consistent(pdev, STATUS_TOTAL_SIZE, &ring_dma);
 451        if (!ring_space)
 452                goto err_out_unmap_rx;
 453        np->tx_status = ring_space;
 454        np->tx_status_dma = ring_dma;
 455
 456        if (dev->mem_start)
 457                option = dev->mem_start;
 458
 459        /* The lower four bits are the media type. */
 460        if (option > 0) {
 461                if (option & 0x200)
 462                        np->full_duplex = 1;
 463                np->default_port = option & 15;
 464                if (np->default_port)
 465                        np->medialock = 1;
 466        }
 467        if (find_cnt < MAX_UNITS  &&  full_duplex[find_cnt] > 0)
 468                np->full_duplex = 1;
 469
 470        if (np->full_duplex)
 471                np->duplex_lock = 1;
 472
 473        /* The Yellowfin-specific entries in the device structure. */
 474        dev->netdev_ops = &netdev_ops;
 475        SET_ETHTOOL_OPS(dev, &ethtool_ops);
 476        dev->watchdog_timeo = TX_TIMEOUT;
 477
 478        if (mtu)
 479                dev->mtu = mtu;
 480
 481        i = register_netdev(dev);
 482        if (i)
 483                goto err_out_unmap_status;
 484
 485        netdev_info(dev, "%s type %8x at %p, %pM, IRQ %d\n",
 486                    pci_id_tbl[chip_idx].name,
 487                    ioread32(ioaddr + ChipRev), ioaddr,
 488                    dev->dev_addr, irq);
 489
 490        if (np->drv_flags & HasMII) {
 491                int phy, phy_idx = 0;
 492                for (phy = 0; phy < 32 && phy_idx < MII_CNT; phy++) {
 493                        int mii_status = mdio_read(ioaddr, phy, 1);
 494                        if (mii_status != 0xffff  &&  mii_status != 0x0000) {
 495                                np->phys[phy_idx++] = phy;
 496                                np->advertising = mdio_read(ioaddr, phy, 4);
 497                                netdev_info(dev, "MII PHY found at address %d, status 0x%04x advertising %04x\n",
 498                                            phy, mii_status, np->advertising);
 499                        }
 500                }
 501                np->mii_cnt = phy_idx;
 502        }
 503
 504        find_cnt++;
 505
 506        return 0;
 507
 508err_out_unmap_status:
 509        pci_free_consistent(pdev, STATUS_TOTAL_SIZE, np->tx_status,
 510                np->tx_status_dma);
 511err_out_unmap_rx:
 512        pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
 513err_out_unmap_tx:
 514        pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
 515err_out_cleardev:
 516        pci_set_drvdata(pdev, NULL);
 517        pci_iounmap(pdev, ioaddr);
 518err_out_free_res:
 519        pci_release_regions(pdev);
 520err_out_free_netdev:
 521        free_netdev (dev);
 522        return -ENODEV;
 523}
 524
 525static int read_eeprom(void __iomem *ioaddr, int location)
 526{
 527        int bogus_cnt = 10000;          /* Typical 33Mhz: 1050 ticks */
 528
 529        iowrite8(location, ioaddr + EEAddr);
 530        iowrite8(0x30 | ((location >> 8) & 7), ioaddr + EECtrl);
 531        while ((ioread8(ioaddr + EEStatus) & 0x80)  &&  --bogus_cnt > 0)
 532                ;
 533        return ioread8(ioaddr + EERead);
 534}
 535
 536/* MII Managemen Data I/O accesses.
 537   These routines assume the MDIO controller is idle, and do not exit until
 538   the command is finished. */
 539
 540static int mdio_read(void __iomem *ioaddr, int phy_id, int location)
 541{
 542        int i;
 543
 544        iowrite16((phy_id<<8) + location, ioaddr + MII_Addr);
 545        iowrite16(1, ioaddr + MII_Cmd);
 546        for (i = 10000; i >= 0; i--)
 547                if ((ioread16(ioaddr + MII_Status) & 1) == 0)
 548                        break;
 549        return ioread16(ioaddr + MII_Rd_Data);
 550}
 551
 552static void mdio_write(void __iomem *ioaddr, int phy_id, int location, int value)
 553{
 554        int i;
 555
 556        iowrite16((phy_id<<8) + location, ioaddr + MII_Addr);
 557        iowrite16(value, ioaddr + MII_Wr_Data);
 558
 559        /* Wait for the command to finish. */
 560        for (i = 10000; i >= 0; i--)
 561                if ((ioread16(ioaddr + MII_Status) & 1) == 0)
 562                        break;
 563}
 564
 565
 566static int yellowfin_open(struct net_device *dev)
 567{
 568        struct yellowfin_private *yp = netdev_priv(dev);
 569        const int irq = yp->pci_dev->irq;
 570        void __iomem *ioaddr = yp->base;
 571        int i, rc;
 572
 573        /* Reset the chip. */
 574        iowrite32(0x80000000, ioaddr + DMACtrl);
 575
 576        rc = request_irq(irq, yellowfin_interrupt, IRQF_SHARED, dev->name, dev);
 577        if (rc)
 578                return rc;
 579
 580        rc = yellowfin_init_ring(dev);
 581        if (rc < 0)
 582                goto err_free_irq;
 583
 584        iowrite32(yp->rx_ring_dma, ioaddr + RxPtr);
 585        iowrite32(yp->tx_ring_dma, ioaddr + TxPtr);
 586
 587        for (i = 0; i < 6; i++)
 588                iowrite8(dev->dev_addr[i], ioaddr + StnAddr + i);
 589
 590        /* Set up various condition 'select' registers.
 591           There are no options here. */
 592        iowrite32(0x00800080, ioaddr + TxIntrSel);      /* Interrupt on Tx abort */
 593        iowrite32(0x00800080, ioaddr + TxBranchSel);    /* Branch on Tx abort */
 594        iowrite32(0x00400040, ioaddr + TxWaitSel);      /* Wait on Tx status */
 595        iowrite32(0x00400040, ioaddr + RxIntrSel);      /* Interrupt on Rx done */
 596        iowrite32(0x00400040, ioaddr + RxBranchSel);    /* Branch on Rx error */
 597        iowrite32(0x00400040, ioaddr + RxWaitSel);      /* Wait on Rx done */
 598
 599        /* Initialize other registers: with so many this eventually this will
 600           converted to an offset/value list. */
 601        iowrite32(dma_ctrl, ioaddr + DMACtrl);
 602        iowrite16(fifo_cfg, ioaddr + FIFOcfg);
 603        /* Enable automatic generation of flow control frames, period 0xffff. */
 604        iowrite32(0x0030FFFF, ioaddr + FlowCtrl);
 605
 606        yp->tx_threshold = 32;
 607        iowrite32(yp->tx_threshold, ioaddr + TxThreshold);
 608
 609        if (dev->if_port == 0)
 610                dev->if_port = yp->default_port;
 611
 612        netif_start_queue(dev);
 613
 614        /* Setting the Rx mode will start the Rx process. */
 615        if (yp->drv_flags & IsGigabit) {
 616                /* We are always in full-duplex mode with gigabit! */
 617                yp->full_duplex = 1;
 618                iowrite16(0x01CF, ioaddr + Cnfg);
 619        } else {
 620                iowrite16(0x0018, ioaddr + FrameGap0); /* 0060/4060 for non-MII 10baseT */
 621                iowrite16(0x1018, ioaddr + FrameGap1);
 622                iowrite16(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg);
 623        }
 624        set_rx_mode(dev);
 625
 626        /* Enable interrupts by setting the interrupt mask. */
 627        iowrite16(0x81ff, ioaddr + IntrEnb);                    /* See enum intr_status_bits */
 628        iowrite16(0x0000, ioaddr + EventStatus);                /* Clear non-interrupting events */
 629        iowrite32(0x80008000, ioaddr + RxCtrl);         /* Start Rx and Tx channels. */
 630        iowrite32(0x80008000, ioaddr + TxCtrl);
 631
 632        if (yellowfin_debug > 2) {
 633                netdev_printk(KERN_DEBUG, dev, "Done %s()\n", __func__);
 634        }
 635
 636        /* Set the timer to check for link beat. */
 637        init_timer(&yp->timer);
 638        yp->timer.expires = jiffies + 3*HZ;
 639        yp->timer.data = (unsigned long)dev;
 640        yp->timer.function = yellowfin_timer;                           /* timer handler */
 641        add_timer(&yp->timer);
 642out:
 643        return rc;
 644
 645err_free_irq:
 646        free_irq(irq, dev);
 647        goto out;
 648}
 649
 650static void yellowfin_timer(unsigned long data)
 651{
 652        struct net_device *dev = (struct net_device *)data;
 653        struct yellowfin_private *yp = netdev_priv(dev);
 654        void __iomem *ioaddr = yp->base;
 655        int next_tick = 60*HZ;
 656
 657        if (yellowfin_debug > 3) {
 658                netdev_printk(KERN_DEBUG, dev, "Yellowfin timer tick, status %08x\n",
 659                              ioread16(ioaddr + IntrStatus));
 660        }
 661
 662        if (yp->mii_cnt) {
 663                int bmsr = mdio_read(ioaddr, yp->phys[0], MII_BMSR);
 664                int lpa = mdio_read(ioaddr, yp->phys[0], MII_LPA);
 665                int negotiated = lpa & yp->advertising;
 666                if (yellowfin_debug > 1)
 667                        netdev_printk(KERN_DEBUG, dev, "MII #%d status register is %04x, link partner capability %04x\n",
 668                                      yp->phys[0], bmsr, lpa);
 669
 670                yp->full_duplex = mii_duplex(yp->duplex_lock, negotiated);
 671
 672                iowrite16(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg);
 673
 674                if (bmsr & BMSR_LSTATUS)
 675                        next_tick = 60*HZ;
 676                else
 677                        next_tick = 3*HZ;
 678        }
 679
 680        yp->timer.expires = jiffies + next_tick;
 681        add_timer(&yp->timer);
 682}
 683
 684static void yellowfin_tx_timeout(struct net_device *dev)
 685{
 686        struct yellowfin_private *yp = netdev_priv(dev);
 687        void __iomem *ioaddr = yp->base;
 688
 689        netdev_warn(dev, "Yellowfin transmit timed out at %d/%d Tx status %04x, Rx status %04x, resetting...\n",
 690                    yp->cur_tx, yp->dirty_tx,
 691                    ioread32(ioaddr + TxStatus),
 692                    ioread32(ioaddr + RxStatus));
 693
 694        /* Note: these should be KERN_DEBUG. */
 695        if (yellowfin_debug) {
 696                int i;
 697                pr_warning("  Rx ring %p: ", yp->rx_ring);
 698                for (i = 0; i < RX_RING_SIZE; i++)
 699                        pr_cont(" %08x", yp->rx_ring[i].result_status);
 700                pr_cont("\n");
 701                pr_warning("  Tx ring %p: ", yp->tx_ring);
 702                for (i = 0; i < TX_RING_SIZE; i++)
 703                        pr_cont(" %04x /%08x",
 704                               yp->tx_status[i].tx_errs,
 705                               yp->tx_ring[i].result_status);
 706                pr_cont("\n");
 707        }
 708
 709        /* If the hardware is found to hang regularly, we will update the code
 710           to reinitialize the chip here. */
 711        dev->if_port = 0;
 712
 713        /* Wake the potentially-idle transmit channel. */
 714        iowrite32(0x10001000, yp->base + TxCtrl);
 715        if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE)
 716                netif_wake_queue (dev);         /* Typical path */
 717
 718        dev->trans_start = jiffies; /* prevent tx timeout */
 719        dev->stats.tx_errors++;
 720}
 721
 722/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
 723static int yellowfin_init_ring(struct net_device *dev)
 724{
 725        struct yellowfin_private *yp = netdev_priv(dev);
 726        int i, j;
 727
 728        yp->tx_full = 0;
 729        yp->cur_rx = yp->cur_tx = 0;
 730        yp->dirty_tx = 0;
 731
 732        yp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
 733
 734        for (i = 0; i < RX_RING_SIZE; i++) {
 735                yp->rx_ring[i].dbdma_cmd =
 736                        cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz);
 737                yp->rx_ring[i].branch_addr = cpu_to_le32(yp->rx_ring_dma +
 738                        ((i+1)%RX_RING_SIZE)*sizeof(struct yellowfin_desc));
 739        }
 740
 741        for (i = 0; i < RX_RING_SIZE; i++) {
 742                struct sk_buff *skb = netdev_alloc_skb(dev, yp->rx_buf_sz + 2);
 743                yp->rx_skbuff[i] = skb;
 744                if (skb == NULL)
 745                        break;
 746                skb_reserve(skb, 2);    /* 16 byte align the IP header. */
 747                yp->rx_ring[i].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
 748                        skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE));
 749        }
 750        if (i != RX_RING_SIZE) {
 751                for (j = 0; j < i; j++)
 752                        dev_kfree_skb(yp->rx_skbuff[j]);
 753                return -ENOMEM;
 754        }
 755        yp->rx_ring[i-1].dbdma_cmd = cpu_to_le32(CMD_STOP);
 756        yp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
 757
 758#define NO_TXSTATS
 759#ifdef NO_TXSTATS
 760        /* In this mode the Tx ring needs only a single descriptor. */
 761        for (i = 0; i < TX_RING_SIZE; i++) {
 762                yp->tx_skbuff[i] = NULL;
 763                yp->tx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP);
 764                yp->tx_ring[i].branch_addr = cpu_to_le32(yp->tx_ring_dma +
 765                        ((i+1)%TX_RING_SIZE)*sizeof(struct yellowfin_desc));
 766        }
 767        /* Wrap ring */
 768        yp->tx_ring[--i].dbdma_cmd = cpu_to_le32(CMD_STOP | BRANCH_ALWAYS);
 769#else
 770{
 771        /* Tx ring needs a pair of descriptors, the second for the status. */
 772        for (i = 0; i < TX_RING_SIZE; i++) {
 773                j = 2*i;
 774                yp->tx_skbuff[i] = 0;
 775                /* Branch on Tx error. */
 776                yp->tx_ring[j].dbdma_cmd = cpu_to_le32(CMD_STOP);
 777                yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma +
 778                        (j+1)*sizeof(struct yellowfin_desc));
 779                j++;
 780                if (yp->flags & FullTxStatus) {
 781                        yp->tx_ring[j].dbdma_cmd =
 782                                cpu_to_le32(CMD_TXSTATUS | sizeof(*yp->tx_status));
 783                        yp->tx_ring[j].request_cnt = sizeof(*yp->tx_status);
 784                        yp->tx_ring[j].addr = cpu_to_le32(yp->tx_status_dma +
 785                                i*sizeof(struct tx_status_words));
 786                } else {
 787                        /* Symbios chips write only tx_errs word. */
 788                        yp->tx_ring[j].dbdma_cmd =
 789                                cpu_to_le32(CMD_TXSTATUS | INTR_ALWAYS | 2);
 790                        yp->tx_ring[j].request_cnt = 2;
 791                        /* Om pade ummmmm... */
 792                        yp->tx_ring[j].addr = cpu_to_le32(yp->tx_status_dma +
 793                                i*sizeof(struct tx_status_words) +
 794                                &(yp->tx_status[0].tx_errs) -
 795                                &(yp->tx_status[0]));
 796                }
 797                yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma +
 798                        ((j+1)%(2*TX_RING_SIZE))*sizeof(struct yellowfin_desc));
 799        }
 800        /* Wrap ring */
 801        yp->tx_ring[++j].dbdma_cmd |= cpu_to_le32(BRANCH_ALWAYS | INTR_ALWAYS);
 802}
 803#endif
 804        yp->tx_tail_desc = &yp->tx_status[0];
 805        return 0;
 806}
 807
 808static netdev_tx_t yellowfin_start_xmit(struct sk_buff *skb,
 809                                        struct net_device *dev)
 810{
 811        struct yellowfin_private *yp = netdev_priv(dev);
 812        unsigned entry;
 813        int len = skb->len;
 814
 815        netif_stop_queue (dev);
 816
 817        /* Note: Ordering is important here, set the field with the
 818           "ownership" bit last, and only then increment cur_tx. */
 819
 820        /* Calculate the next Tx descriptor entry. */
 821        entry = yp->cur_tx % TX_RING_SIZE;
 822
 823        if (gx_fix) {   /* Note: only works for paddable protocols e.g.  IP. */
 824                int cacheline_end = ((unsigned long)skb->data + skb->len) % 32;
 825                /* Fix GX chipset errata. */
 826                if (cacheline_end > 24  || cacheline_end == 0) {
 827                        len = skb->len + 32 - cacheline_end + 1;
 828                        if (skb_padto(skb, len)) {
 829                                yp->tx_skbuff[entry] = NULL;
 830                                netif_wake_queue(dev);
 831                                return NETDEV_TX_OK;
 832                        }
 833                }
 834        }
 835        yp->tx_skbuff[entry] = skb;
 836
 837#ifdef NO_TXSTATS
 838        yp->tx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
 839                skb->data, len, PCI_DMA_TODEVICE));
 840        yp->tx_ring[entry].result_status = 0;
 841        if (entry >= TX_RING_SIZE-1) {
 842                /* New stop command. */
 843                yp->tx_ring[0].dbdma_cmd = cpu_to_le32(CMD_STOP);
 844                yp->tx_ring[TX_RING_SIZE-1].dbdma_cmd =
 845                        cpu_to_le32(CMD_TX_PKT|BRANCH_ALWAYS | len);
 846        } else {
 847                yp->tx_ring[entry+1].dbdma_cmd = cpu_to_le32(CMD_STOP);
 848                yp->tx_ring[entry].dbdma_cmd =
 849                        cpu_to_le32(CMD_TX_PKT | BRANCH_IFTRUE | len);
 850        }
 851        yp->cur_tx++;
 852#else
 853        yp->tx_ring[entry<<1].request_cnt = len;
 854        yp->tx_ring[entry<<1].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
 855                skb->data, len, PCI_DMA_TODEVICE));
 856        /* The input_last (status-write) command is constant, but we must
 857           rewrite the subsequent 'stop' command. */
 858
 859        yp->cur_tx++;
 860        {
 861                unsigned next_entry = yp->cur_tx % TX_RING_SIZE;
 862                yp->tx_ring[next_entry<<1].dbdma_cmd = cpu_to_le32(CMD_STOP);
 863        }
 864        /* Final step -- overwrite the old 'stop' command. */
 865
 866        yp->tx_ring[entry<<1].dbdma_cmd =
 867                cpu_to_le32( ((entry % 6) == 0 ? CMD_TX_PKT|INTR_ALWAYS|BRANCH_IFTRUE :
 868                                          CMD_TX_PKT | BRANCH_IFTRUE) | len);
 869#endif
 870
 871        /* Non-x86 Todo: explicitly flush cache lines here. */
 872
 873        /* Wake the potentially-idle transmit channel. */
 874        iowrite32(0x10001000, yp->base + TxCtrl);
 875
 876        if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE)
 877                netif_start_queue (dev);                /* Typical path */
 878        else
 879                yp->tx_full = 1;
 880
 881        if (yellowfin_debug > 4) {
 882                netdev_printk(KERN_DEBUG, dev, "Yellowfin transmit frame #%d queued in slot %d\n",
 883                              yp->cur_tx, entry);
 884        }
 885        return NETDEV_TX_OK;
 886}
 887
 888/* The interrupt handler does all of the Rx thread work and cleans up
 889   after the Tx thread. */
 890static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance)
 891{
 892        struct net_device *dev = dev_instance;
 893        struct yellowfin_private *yp;
 894        void __iomem *ioaddr;
 895        int boguscnt = max_interrupt_work;
 896        unsigned int handled = 0;
 897
 898        yp = netdev_priv(dev);
 899        ioaddr = yp->base;
 900
 901        spin_lock (&yp->lock);
 902
 903        do {
 904                u16 intr_status = ioread16(ioaddr + IntrClear);
 905
 906                if (yellowfin_debug > 4)
 907                        netdev_printk(KERN_DEBUG, dev, "Yellowfin interrupt, status %04x\n",
 908                                      intr_status);
 909
 910                if (intr_status == 0)
 911                        break;
 912                handled = 1;
 913
 914                if (intr_status & (IntrRxDone | IntrEarlyRx)) {
 915                        yellowfin_rx(dev);
 916                        iowrite32(0x10001000, ioaddr + RxCtrl);         /* Wake Rx engine. */
 917                }
 918
 919#ifdef NO_TXSTATS
 920                for (; yp->cur_tx - yp->dirty_tx > 0; yp->dirty_tx++) {
 921                        int entry = yp->dirty_tx % TX_RING_SIZE;
 922                        struct sk_buff *skb;
 923
 924                        if (yp->tx_ring[entry].result_status == 0)
 925                                break;
 926                        skb = yp->tx_skbuff[entry];
 927                        dev->stats.tx_packets++;
 928                        dev->stats.tx_bytes += skb->len;
 929                        /* Free the original skb. */
 930                        pci_unmap_single(yp->pci_dev, le32_to_cpu(yp->tx_ring[entry].addr),
 931                                skb->len, PCI_DMA_TODEVICE);
 932                        dev_kfree_skb_irq(skb);
 933                        yp->tx_skbuff[entry] = NULL;
 934                }
 935                if (yp->tx_full &&
 936                    yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE - 4) {
 937                        /* The ring is no longer full, clear tbusy. */
 938                        yp->tx_full = 0;
 939                        netif_wake_queue(dev);
 940                }
 941#else
 942                if ((intr_status & IntrTxDone) || (yp->tx_tail_desc->tx_errs)) {
 943                        unsigned dirty_tx = yp->dirty_tx;
 944
 945                        for (dirty_tx = yp->dirty_tx; yp->cur_tx - dirty_tx > 0;
 946                                 dirty_tx++) {
 947                                /* Todo: optimize this. */
 948                                int entry = dirty_tx % TX_RING_SIZE;
 949                                u16 tx_errs = yp->tx_status[entry].tx_errs;
 950                                struct sk_buff *skb;
 951
 952#ifndef final_version
 953                                if (yellowfin_debug > 5)
 954                                        netdev_printk(KERN_DEBUG, dev, "Tx queue %d check, Tx status %04x %04x %04x %04x\n",
 955                                                      entry,
 956                                                      yp->tx_status[entry].tx_cnt,
 957                                                      yp->tx_status[entry].tx_errs,
 958                                                      yp->tx_status[entry].total_tx_cnt,
 959                                                      yp->tx_status[entry].paused);
 960#endif
 961                                if (tx_errs == 0)
 962                                        break;  /* It still hasn't been Txed */
 963                                skb = yp->tx_skbuff[entry];
 964                                if (tx_errs & 0xF810) {
 965                                        /* There was an major error, log it. */
 966#ifndef final_version
 967                                        if (yellowfin_debug > 1)
 968                                                netdev_printk(KERN_DEBUG, dev, "Transmit error, Tx status %04x\n",
 969                                                              tx_errs);
 970#endif
 971                                        dev->stats.tx_errors++;
 972                                        if (tx_errs & 0xF800) dev->stats.tx_aborted_errors++;
 973                                        if (tx_errs & 0x0800) dev->stats.tx_carrier_errors++;
 974                                        if (tx_errs & 0x2000) dev->stats.tx_window_errors++;
 975                                        if (tx_errs & 0x8000) dev->stats.tx_fifo_errors++;
 976                                } else {
 977#ifndef final_version
 978                                        if (yellowfin_debug > 4)
 979                                                netdev_printk(KERN_DEBUG, dev, "Normal transmit, Tx status %04x\n",
 980                                                              tx_errs);
 981#endif
 982                                        dev->stats.tx_bytes += skb->len;
 983                                        dev->stats.collisions += tx_errs & 15;
 984                                        dev->stats.tx_packets++;
 985                                }
 986                                /* Free the original skb. */
 987                                pci_unmap_single(yp->pci_dev,
 988                                        yp->tx_ring[entry<<1].addr, skb->len,
 989                                        PCI_DMA_TODEVICE);
 990                                dev_kfree_skb_irq(skb);
 991                                yp->tx_skbuff[entry] = 0;
 992                                /* Mark status as empty. */
 993                                yp->tx_status[entry].tx_errs = 0;
 994                        }
 995
 996#ifndef final_version
 997                        if (yp->cur_tx - dirty_tx > TX_RING_SIZE) {
 998                                netdev_err(dev, "Out-of-sync dirty pointer, %d vs. %d, full=%d\n",
 999                                           dirty_tx, yp->cur_tx, yp->tx_full);
1000                                dirty_tx += TX_RING_SIZE;
1001                        }
1002#endif
1003
1004                        if (yp->tx_full &&
1005                            yp->cur_tx - dirty_tx < TX_QUEUE_SIZE - 2) {
1006                                /* The ring is no longer full, clear tbusy. */
1007                                yp->tx_full = 0;
1008                                netif_wake_queue(dev);
1009                        }
1010
1011                        yp->dirty_tx = dirty_tx;
1012                        yp->tx_tail_desc = &yp->tx_status[dirty_tx % TX_RING_SIZE];
1013                }
1014#endif
1015
1016                /* Log errors and other uncommon events. */
1017                if (intr_status & 0x2ee)        /* Abnormal error summary. */
1018                        yellowfin_error(dev, intr_status);
1019
1020                if (--boguscnt < 0) {
1021                        netdev_warn(dev, "Too much work at interrupt, status=%#04x\n",
1022                                    intr_status);
1023                        break;
1024                }
1025        } while (1);
1026
1027        if (yellowfin_debug > 3)
1028                netdev_printk(KERN_DEBUG, dev, "exiting interrupt, status=%#04x\n",
1029                              ioread16(ioaddr + IntrStatus));
1030
1031        spin_unlock (&yp->lock);
1032        return IRQ_RETVAL(handled);
1033}
1034
1035/* This routine is logically part of the interrupt handler, but separated
1036   for clarity and better register allocation. */
1037static int yellowfin_rx(struct net_device *dev)
1038{
1039        struct yellowfin_private *yp = netdev_priv(dev);
1040        int entry = yp->cur_rx % RX_RING_SIZE;
1041        int boguscnt = yp->dirty_rx + RX_RING_SIZE - yp->cur_rx;
1042
1043        if (yellowfin_debug > 4) {
1044                printk(KERN_DEBUG " In yellowfin_rx(), entry %d status %08x\n",
1045                           entry, yp->rx_ring[entry].result_status);
1046                printk(KERN_DEBUG "   #%d desc. %08x %08x %08x\n",
1047                           entry, yp->rx_ring[entry].dbdma_cmd, yp->rx_ring[entry].addr,
1048                           yp->rx_ring[entry].result_status);
1049        }
1050
1051        /* If EOP is set on the next entry, it's a new packet. Send it up. */
1052        while (1) {
1053                struct yellowfin_desc *desc = &yp->rx_ring[entry];
1054                struct sk_buff *rx_skb = yp->rx_skbuff[entry];
1055                s16 frame_status;
1056                u16 desc_status;
1057                int data_size;
1058                u8 *buf_addr;
1059
1060                if(!desc->result_status)
1061                        break;
1062                pci_dma_sync_single_for_cpu(yp->pci_dev, le32_to_cpu(desc->addr),
1063                        yp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1064                desc_status = le32_to_cpu(desc->result_status) >> 16;
1065                buf_addr = rx_skb->data;
1066                data_size = (le32_to_cpu(desc->dbdma_cmd) -
1067                        le32_to_cpu(desc->result_status)) & 0xffff;
1068                frame_status = get_unaligned_le16(&(buf_addr[data_size - 2]));
1069                if (yellowfin_debug > 4)
1070                        printk(KERN_DEBUG "  %s() status was %04x\n",
1071                               __func__, frame_status);
1072                if (--boguscnt < 0)
1073                        break;
1074                if ( ! (desc_status & RX_EOP)) {
1075                        if (data_size != 0)
1076                                netdev_warn(dev, "Oversized Ethernet frame spanned multiple buffers, status %04x, data_size %d!\n",
1077                                            desc_status, data_size);
1078                        dev->stats.rx_length_errors++;
1079                } else if ((yp->drv_flags & IsGigabit)  &&  (frame_status & 0x0038)) {
1080                        /* There was a error. */
1081                        if (yellowfin_debug > 3)
1082                                printk(KERN_DEBUG "  %s() Rx error was %04x\n",
1083                                       __func__, frame_status);
1084                        dev->stats.rx_errors++;
1085                        if (frame_status & 0x0060) dev->stats.rx_length_errors++;
1086                        if (frame_status & 0x0008) dev->stats.rx_frame_errors++;
1087                        if (frame_status & 0x0010) dev->stats.rx_crc_errors++;
1088                        if (frame_status < 0) dev->stats.rx_dropped++;
1089                } else if ( !(yp->drv_flags & IsGigabit)  &&
1090                                   ((buf_addr[data_size-1] & 0x85) || buf_addr[data_size-2] & 0xC0)) {
1091                        u8 status1 = buf_addr[data_size-2];
1092                        u8 status2 = buf_addr[data_size-1];
1093                        dev->stats.rx_errors++;
1094                        if (status1 & 0xC0) dev->stats.rx_length_errors++;
1095                        if (status2 & 0x03) dev->stats.rx_frame_errors++;
1096                        if (status2 & 0x04) dev->stats.rx_crc_errors++;
1097                        if (status2 & 0x80) dev->stats.rx_dropped++;
1098#ifdef YF_PROTOTYPE             /* Support for prototype hardware errata. */
1099                } else if ((yp->flags & HasMACAddrBug)  &&
1100                        memcmp(le32_to_cpu(yp->rx_ring_dma +
1101                                entry*sizeof(struct yellowfin_desc)),
1102                                dev->dev_addr, 6) != 0 &&
1103                        memcmp(le32_to_cpu(yp->rx_ring_dma +
1104                                entry*sizeof(struct yellowfin_desc)),
1105                                "\377\377\377\377\377\377", 6) != 0) {
1106                        if (bogus_rx++ == 0)
1107                                netdev_warn(dev, "Bad frame to %pM\n",
1108                                            buf_addr);
1109#endif
1110                } else {
1111                        struct sk_buff *skb;
1112                        int pkt_len = data_size -
1113                                (yp->chip_id ? 7 : 8 + buf_addr[data_size - 8]);
1114                        /* To verify: Yellowfin Length should omit the CRC! */
1115
1116#ifndef final_version
1117                        if (yellowfin_debug > 4)
1118                                printk(KERN_DEBUG "  %s() normal Rx pkt length %d of %d, bogus_cnt %d\n",
1119                                       __func__, pkt_len, data_size, boguscnt);
1120#endif
1121                        /* Check if the packet is long enough to just pass up the skbuff
1122                           without copying to a properly sized skbuff. */
1123                        if (pkt_len > rx_copybreak) {
1124                                skb_put(skb = rx_skb, pkt_len);
1125                                pci_unmap_single(yp->pci_dev,
1126                                        le32_to_cpu(yp->rx_ring[entry].addr),
1127                                        yp->rx_buf_sz,
1128                                        PCI_DMA_FROMDEVICE);
1129                                yp->rx_skbuff[entry] = NULL;
1130                        } else {
1131                                skb = netdev_alloc_skb(dev, pkt_len + 2);
1132                                if (skb == NULL)
1133                                        break;
1134                                skb_reserve(skb, 2);    /* 16 byte align the IP header */
1135                                skb_copy_to_linear_data(skb, rx_skb->data, pkt_len);
1136                                skb_put(skb, pkt_len);
1137                                pci_dma_sync_single_for_device(yp->pci_dev,
1138                                                                le32_to_cpu(desc->addr),
1139                                                                yp->rx_buf_sz,
1140                                                                PCI_DMA_FROMDEVICE);
1141                        }
1142                        skb->protocol = eth_type_trans(skb, dev);
1143                        netif_rx(skb);
1144                        dev->stats.rx_packets++;
1145                        dev->stats.rx_bytes += pkt_len;
1146                }
1147                entry = (++yp->cur_rx) % RX_RING_SIZE;
1148        }
1149
1150        /* Refill the Rx ring buffers. */
1151        for (; yp->cur_rx - yp->dirty_rx > 0; yp->dirty_rx++) {
1152                entry = yp->dirty_rx % RX_RING_SIZE;
1153                if (yp->rx_skbuff[entry] == NULL) {
1154                        struct sk_buff *skb = netdev_alloc_skb(dev, yp->rx_buf_sz + 2);
1155                        if (skb == NULL)
1156                                break;                          /* Better luck next round. */
1157                        yp->rx_skbuff[entry] = skb;
1158                        skb_reserve(skb, 2);    /* Align IP on 16 byte boundaries */
1159                        yp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
1160                                skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE));
1161                }
1162                yp->rx_ring[entry].dbdma_cmd = cpu_to_le32(CMD_STOP);
1163                yp->rx_ring[entry].result_status = 0;   /* Clear complete bit. */
1164                if (entry != 0)
1165                        yp->rx_ring[entry - 1].dbdma_cmd =
1166                                cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz);
1167                else
1168                        yp->rx_ring[RX_RING_SIZE - 1].dbdma_cmd =
1169                                cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | BRANCH_ALWAYS
1170                                                        | yp->rx_buf_sz);
1171        }
1172
1173        return 0;
1174}
1175
1176static void yellowfin_error(struct net_device *dev, int intr_status)
1177{
1178        netdev_err(dev, "Something Wicked happened! %04x\n", intr_status);
1179        /* Hmmmmm, it's not clear what to do here. */
1180        if (intr_status & (IntrTxPCIErr | IntrTxPCIFault))
1181                dev->stats.tx_errors++;
1182        if (intr_status & (IntrRxPCIErr | IntrRxPCIFault))
1183                dev->stats.rx_errors++;
1184}
1185
1186static int yellowfin_close(struct net_device *dev)
1187{
1188        struct yellowfin_private *yp = netdev_priv(dev);
1189        void __iomem *ioaddr = yp->base;
1190        int i;
1191
1192        netif_stop_queue (dev);
1193
1194        if (yellowfin_debug > 1) {
1195                netdev_printk(KERN_DEBUG, dev, "Shutting down ethercard, status was Tx %04x Rx %04x Int %02x\n",
1196                              ioread16(ioaddr + TxStatus),
1197                              ioread16(ioaddr + RxStatus),
1198                              ioread16(ioaddr + IntrStatus));
1199                netdev_printk(KERN_DEBUG, dev, "Queue pointers were Tx %d / %d,  Rx %d / %d\n",
1200                              yp->cur_tx, yp->dirty_tx,
1201                              yp->cur_rx, yp->dirty_rx);
1202        }
1203
1204        /* Disable interrupts by clearing the interrupt mask. */
1205        iowrite16(0x0000, ioaddr + IntrEnb);
1206
1207        /* Stop the chip's Tx and Rx processes. */
1208        iowrite32(0x80000000, ioaddr + RxCtrl);
1209        iowrite32(0x80000000, ioaddr + TxCtrl);
1210
1211        del_timer(&yp->timer);
1212
1213#if defined(__i386__)
1214        if (yellowfin_debug > 2) {
1215                printk(KERN_DEBUG "  Tx ring at %08llx:\n",
1216                                (unsigned long long)yp->tx_ring_dma);
1217                for (i = 0; i < TX_RING_SIZE*2; i++)
1218                        printk(KERN_DEBUG " %c #%d desc. %08x %08x %08x %08x\n",
1219                                   ioread32(ioaddr + TxPtr) == (long)&yp->tx_ring[i] ? '>' : ' ',
1220                                   i, yp->tx_ring[i].dbdma_cmd, yp->tx_ring[i].addr,
1221                                   yp->tx_ring[i].branch_addr, yp->tx_ring[i].result_status);
1222                printk(KERN_DEBUG "  Tx status %p:\n", yp->tx_status);
1223                for (i = 0; i < TX_RING_SIZE; i++)
1224                        printk(KERN_DEBUG "   #%d status %04x %04x %04x %04x\n",
1225                                   i, yp->tx_status[i].tx_cnt, yp->tx_status[i].tx_errs,
1226                                   yp->tx_status[i].total_tx_cnt, yp->tx_status[i].paused);
1227
1228                printk(KERN_DEBUG "  Rx ring %08llx:\n",
1229                                (unsigned long long)yp->rx_ring_dma);
1230                for (i = 0; i < RX_RING_SIZE; i++) {
1231                        printk(KERN_DEBUG " %c #%d desc. %08x %08x %08x\n",
1232                                   ioread32(ioaddr + RxPtr) == (long)&yp->rx_ring[i] ? '>' : ' ',
1233                                   i, yp->rx_ring[i].dbdma_cmd, yp->rx_ring[i].addr,
1234                                   yp->rx_ring[i].result_status);
1235                        if (yellowfin_debug > 6) {
1236                                if (get_unaligned((u8*)yp->rx_ring[i].addr) != 0x69) {
1237                                        int j;
1238
1239                                        printk(KERN_DEBUG);
1240                                        for (j = 0; j < 0x50; j++)
1241                                                pr_cont(" %04x",
1242                                                        get_unaligned(((u16*)yp->rx_ring[i].addr) + j));
1243                                        pr_cont("\n");
1244                                }
1245                        }
1246                }
1247        }
1248#endif /* __i386__ debugging only */
1249
1250        free_irq(yp->pci_dev->irq, dev);
1251
1252        /* Free all the skbuffs in the Rx queue. */
1253        for (i = 0; i < RX_RING_SIZE; i++) {
1254                yp->rx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP);
1255                yp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1256                if (yp->rx_skbuff[i]) {
1257                        dev_kfree_skb(yp->rx_skbuff[i]);
1258                }
1259                yp->rx_skbuff[i] = NULL;
1260        }
1261        for (i = 0; i < TX_RING_SIZE; i++) {
1262                if (yp->tx_skbuff[i])
1263                        dev_kfree_skb(yp->tx_skbuff[i]);
1264                yp->tx_skbuff[i] = NULL;
1265        }
1266
1267#ifdef YF_PROTOTYPE                     /* Support for prototype hardware errata. */
1268        if (yellowfin_debug > 0) {
1269                netdev_printk(KERN_DEBUG, dev, "Received %d frames that we should not have\n",
1270                              bogus_rx);
1271        }
1272#endif
1273
1274        return 0;
1275}
1276
1277/* Set or clear the multicast filter for this adaptor. */
1278
1279static void set_rx_mode(struct net_device *dev)
1280{
1281        struct yellowfin_private *yp = netdev_priv(dev);
1282        void __iomem *ioaddr = yp->base;
1283        u16 cfg_value = ioread16(ioaddr + Cnfg);
1284
1285        /* Stop the Rx process to change any value. */
1286        iowrite16(cfg_value & ~0x1000, ioaddr + Cnfg);
1287        if (dev->flags & IFF_PROMISC) {                 /* Set promiscuous. */
1288                iowrite16(0x000F, ioaddr + AddrMode);
1289        } else if ((netdev_mc_count(dev) > 64) ||
1290                   (dev->flags & IFF_ALLMULTI)) {
1291                /* Too many to filter well, or accept all multicasts. */
1292                iowrite16(0x000B, ioaddr + AddrMode);
1293        } else if (!netdev_mc_empty(dev)) { /* Must use the multicast hash table. */
1294                struct netdev_hw_addr *ha;
1295                u16 hash_table[4];
1296                int i;
1297
1298                memset(hash_table, 0, sizeof(hash_table));
1299                netdev_for_each_mc_addr(ha, dev) {
1300                        unsigned int bit;
1301
1302                        /* Due to a bug in the early chip versions, multiple filter
1303                           slots must be set for each address. */
1304                        if (yp->drv_flags & HasMulticastBug) {
1305                                bit = (ether_crc_le(3, ha->addr) >> 3) & 0x3f;
1306                                hash_table[bit >> 4] |= (1 << bit);
1307                                bit = (ether_crc_le(4, ha->addr) >> 3) & 0x3f;
1308                                hash_table[bit >> 4] |= (1 << bit);
1309                                bit = (ether_crc_le(5, ha->addr) >> 3) & 0x3f;
1310                                hash_table[bit >> 4] |= (1 << bit);
1311                        }
1312                        bit = (ether_crc_le(6, ha->addr) >> 3) & 0x3f;
1313                        hash_table[bit >> 4] |= (1 << bit);
1314                }
1315                /* Copy the hash table to the chip. */
1316                for (i = 0; i < 4; i++)
1317                        iowrite16(hash_table[i], ioaddr + HashTbl + i*2);
1318                iowrite16(0x0003, ioaddr + AddrMode);
1319        } else {                                        /* Normal, unicast/broadcast-only mode. */
1320                iowrite16(0x0001, ioaddr + AddrMode);
1321        }
1322        /* Restart the Rx process. */
1323        iowrite16(cfg_value | 0x1000, ioaddr + Cnfg);
1324}
1325
1326static void yellowfin_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1327{
1328        struct yellowfin_private *np = netdev_priv(dev);
1329
1330        strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1331        strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1332        strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
1333}
1334
1335static const struct ethtool_ops ethtool_ops = {
1336        .get_drvinfo = yellowfin_get_drvinfo
1337};
1338
1339static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1340{
1341        struct yellowfin_private *np = netdev_priv(dev);
1342        void __iomem *ioaddr = np->base;
1343        struct mii_ioctl_data *data = if_mii(rq);
1344
1345        switch(cmd) {
1346        case SIOCGMIIPHY:               /* Get address of MII PHY in use. */
1347                data->phy_id = np->phys[0] & 0x1f;
1348                /* Fall Through */
1349
1350        case SIOCGMIIREG:               /* Read MII PHY register. */
1351                data->val_out = mdio_read(ioaddr, data->phy_id & 0x1f, data->reg_num & 0x1f);
1352                return 0;
1353
1354        case SIOCSMIIREG:               /* Write MII PHY register. */
1355                if (data->phy_id == np->phys[0]) {
1356                        u16 value = data->val_in;
1357                        switch (data->reg_num) {
1358                        case 0:
1359                                /* Check for autonegotiation on or reset. */
1360                                np->medialock = (value & 0x9000) ? 0 : 1;
1361                                if (np->medialock)
1362                                        np->full_duplex = (value & 0x0100) ? 1 : 0;
1363                                break;
1364                        case 4: np->advertising = value; break;
1365                        }
1366                        /* Perhaps check_duplex(dev), depending on chip semantics. */
1367                }
1368                mdio_write(ioaddr, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in);
1369                return 0;
1370        default:
1371                return -EOPNOTSUPP;
1372        }
1373}
1374
1375
1376static void yellowfin_remove_one(struct pci_dev *pdev)
1377{
1378        struct net_device *dev = pci_get_drvdata(pdev);
1379        struct yellowfin_private *np;
1380
1381        BUG_ON(!dev);
1382        np = netdev_priv(dev);
1383
1384        pci_free_consistent(pdev, STATUS_TOTAL_SIZE, np->tx_status,
1385                np->tx_status_dma);
1386        pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
1387        pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
1388        unregister_netdev (dev);
1389
1390        pci_iounmap(pdev, np->base);
1391
1392        pci_release_regions (pdev);
1393
1394        free_netdev (dev);
1395        pci_set_drvdata(pdev, NULL);
1396}
1397
1398
1399static struct pci_driver yellowfin_driver = {
1400        .name           = DRV_NAME,
1401        .id_table       = yellowfin_pci_tbl,
1402        .probe          = yellowfin_init_one,
1403        .remove         = yellowfin_remove_one,
1404};
1405
1406
1407static int __init yellowfin_init (void)
1408{
1409/* when a module, this is printed whether or not devices are found in probe */
1410#ifdef MODULE
1411        printk(version);
1412#endif
1413        return pci_register_driver(&yellowfin_driver);
1414}
1415
1416
1417static void __exit yellowfin_cleanup (void)
1418{
1419        pci_unregister_driver (&yellowfin_driver);
1420}
1421
1422
1423module_init(yellowfin_init);
1424module_exit(yellowfin_cleanup);
1425