linux/drivers/net/ethernet/packetengines/yellowfin.c
<<
>>
Prefs
   1/* yellowfin.c: A Packet Engines G-NIC ethernet driver for linux. */
   2/*
   3        Written 1997-2001 by Donald Becker.
   4
   5        This software may be used and distributed according to the terms of
   6        the GNU General Public License (GPL), incorporated herein by reference.
   7        Drivers based on or derived from this code fall under the GPL and must
   8        retain the authorship, copyright and license notice.  This file is not
   9        a complete program and may only be used when the entire operating
  10        system is licensed under the GPL.
  11
  12        This driver is for the Packet Engines G-NIC PCI Gigabit Ethernet adapter.
  13        It also supports the Symbios Logic version of the same chip core.
  14
  15        The author may be reached as becker@scyld.com, or C/O
  16        Scyld Computing Corporation
  17        410 Severn Ave., Suite 210
  18        Annapolis MD 21403
  19
  20        Support and updates available at
  21        http://www.scyld.com/network/yellowfin.html
  22        [link no longer provides useful info -jgarzik]
  23
  24*/
  25
  26#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  27
  28#define DRV_NAME        "yellowfin"
  29#define DRV_VERSION     "2.1"
  30#define DRV_RELDATE     "Sep 11, 2006"
  31
  32/* The user-configurable values.
  33   These may be modified when a driver module is loaded.*/
  34
  35static int debug = 1;                   /* 1 normal messages, 0 quiet .. 7 verbose. */
  36/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
  37static int max_interrupt_work = 20;
  38static int mtu;
  39#ifdef YF_PROTOTYPE                     /* Support for prototype hardware errata. */
  40/* System-wide count of bogus-rx frames. */
  41static int bogus_rx;
  42static int dma_ctrl = 0x004A0263;                       /* Constrained by errata */
  43static int fifo_cfg = 0x0020;                           /* Bypass external Tx FIFO. */
  44#elif defined(YF_NEW)                                   /* A future perfect board :->.  */
  45static int dma_ctrl = 0x00CAC277;                       /* Override when loading module! */
  46static int fifo_cfg = 0x0028;
  47#else
  48static const int dma_ctrl = 0x004A0263;                         /* Constrained by errata */
  49static const int fifo_cfg = 0x0020;                             /* Bypass external Tx FIFO. */
  50#endif
  51
  52/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
  53   Setting to > 1514 effectively disables this feature. */
  54static int rx_copybreak;
  55
  56/* Used to pass the media type, etc.
  57   No media types are currently defined.  These exist for driver
  58   interoperability.
  59*/
  60#define MAX_UNITS 8                             /* More are supported, limit only on options */
  61static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
  62static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
  63
  64/* Do ugly workaround for GX server chipset errata. */
  65static int gx_fix;
  66
  67/* Operational parameters that are set at compile time. */
  68
  69/* Keep the ring sizes a power of two for efficiency.
  70   Making the Tx ring too long decreases the effectiveness of channel
  71   bonding and packet priority.
  72   There are no ill effects from too-large receive rings. */
  73#define TX_RING_SIZE    16
  74#define TX_QUEUE_SIZE   12              /* Must be > 4 && <= TX_RING_SIZE */
  75#define RX_RING_SIZE    64
  76#define STATUS_TOTAL_SIZE       TX_RING_SIZE*sizeof(struct tx_status_words)
  77#define TX_TOTAL_SIZE           2*TX_RING_SIZE*sizeof(struct yellowfin_desc)
  78#define RX_TOTAL_SIZE           RX_RING_SIZE*sizeof(struct yellowfin_desc)
  79
  80/* Operational parameters that usually are not changed. */
  81/* Time in jiffies before concluding the transmitter is hung. */
  82#define TX_TIMEOUT  (2*HZ)
  83#define PKT_BUF_SZ              1536                    /* Size of each temporary Rx buffer.*/
  84
  85#define yellowfin_debug debug
  86
  87#include <linux/module.h>
  88#include <linux/kernel.h>
  89#include <linux/string.h>
  90#include <linux/timer.h>
  91#include <linux/errno.h>
  92#include <linux/ioport.h>
  93#include <linux/interrupt.h>
  94#include <linux/pci.h>
  95#include <linux/init.h>
  96#include <linux/mii.h>
  97#include <linux/netdevice.h>
  98#include <linux/etherdevice.h>
  99#include <linux/skbuff.h>
 100#include <linux/ethtool.h>
 101#include <linux/crc32.h>
 102#include <linux/bitops.h>
 103#include <linux/uaccess.h>
 104#include <asm/processor.h>              /* Processor type for cache alignment. */
 105#include <asm/unaligned.h>
 106#include <asm/io.h>
 107
 108/* These identify the driver base version and may not be removed. */
 109static const char version[] =
 110  KERN_INFO DRV_NAME ".c:v1.05  1/09/2001  Written by Donald Becker <becker@scyld.com>\n"
 111  "  (unofficial 2.4.x port, " DRV_VERSION ", " DRV_RELDATE ")\n";
 112
 113MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
 114MODULE_DESCRIPTION("Packet Engines Yellowfin G-NIC Gigabit Ethernet driver");
 115MODULE_LICENSE("GPL");
 116
 117module_param(max_interrupt_work, int, 0);
 118module_param(mtu, int, 0);
 119module_param(debug, int, 0);
 120module_param(rx_copybreak, int, 0);
 121module_param_array(options, int, NULL, 0);
 122module_param_array(full_duplex, int, NULL, 0);
 123module_param(gx_fix, int, 0);
 124MODULE_PARM_DESC(max_interrupt_work, "G-NIC maximum events handled per interrupt");
 125MODULE_PARM_DESC(mtu, "G-NIC MTU (all boards)");
 126MODULE_PARM_DESC(debug, "G-NIC debug level (0-7)");
 127MODULE_PARM_DESC(rx_copybreak, "G-NIC copy breakpoint for copy-only-tiny-frames");
 128MODULE_PARM_DESC(options, "G-NIC: Bits 0-3: media type, bit 17: full duplex");
 129MODULE_PARM_DESC(full_duplex, "G-NIC full duplex setting(s) (1)");
 130MODULE_PARM_DESC(gx_fix, "G-NIC: enable GX server chipset bug workaround (0-1)");
 131
 132/*
 133                                Theory of Operation
 134
 135I. Board Compatibility
 136
 137This device driver is designed for the Packet Engines "Yellowfin" Gigabit
 138Ethernet adapter.  The G-NIC 64-bit PCI card is supported, as well as the
 139Symbios 53C885E dual function chip.
 140
 141II. Board-specific settings
 142
 143PCI bus devices are configured by the system at boot time, so no jumpers
 144need to be set on the board.  The system BIOS preferably should assign the
 145PCI INTA signal to an otherwise unused system IRQ line.
 146Note: Kernel versions earlier than 1.3.73 do not support shared PCI
 147interrupt lines.
 148
 149III. Driver operation
 150
 151IIIa. Ring buffers
 152
 153The Yellowfin uses the Descriptor Based DMA Architecture specified by Apple.
 154This is a descriptor list scheme similar to that used by the EEPro100 and
 155Tulip.  This driver uses two statically allocated fixed-size descriptor lists
 156formed into rings by a branch from the final descriptor to the beginning of
 157the list.  The ring sizes are set at compile time by RX/TX_RING_SIZE.
 158
 159The driver allocates full frame size skbuffs for the Rx ring buffers at
 160open() time and passes the skb->data field to the Yellowfin as receive data
 161buffers.  When an incoming frame is less than RX_COPYBREAK bytes long,
 162a fresh skbuff is allocated and the frame is copied to the new skbuff.
 163When the incoming frame is larger, the skbuff is passed directly up the
 164protocol stack and replaced by a newly allocated skbuff.
 165
 166The RX_COPYBREAK value is chosen to trade-off the memory wasted by
 167using a full-sized skbuff for small frames vs. the copying costs of larger
 168frames.  For small frames the copying cost is negligible (esp. considering
 169that we are pre-loading the cache with immediately useful header
 170information).  For large frames the copying cost is non-trivial, and the
 171larger copy might flush the cache of useful data.
 172
 173IIIC. Synchronization
 174
 175The driver runs as two independent, single-threaded flows of control.  One
 176is the send-packet routine, which enforces single-threaded use by the
 177dev->tbusy flag.  The other thread is the interrupt handler, which is single
 178threaded by the hardware and other software.
 179
 180The send packet thread has partial control over the Tx ring and 'dev->tbusy'
 181flag.  It sets the tbusy flag whenever it's queuing a Tx packet. If the next
 182queue slot is empty, it clears the tbusy flag when finished otherwise it sets
 183the 'yp->tx_full' flag.
 184
 185The interrupt handler has exclusive control over the Rx ring and records stats
 186from the Tx ring.  After reaping the stats, it marks the Tx queue entry as
 187empty by incrementing the dirty_tx mark. Iff the 'yp->tx_full' flag is set, it
 188clears both the tx_full and tbusy flags.
 189
 190IV. Notes
 191
 192Thanks to Kim Stearns of Packet Engines for providing a pair of G-NIC boards.
 193Thanks to Bruce Faust of Digitalscape for providing both their SYM53C885 board
 194and an AlphaStation to verifty the Alpha port!
 195
 196IVb. References
 197
 198Yellowfin Engineering Design Specification, 4/23/97 Preliminary/Confidential
 199Symbios SYM53C885 PCI-SCSI/Fast Ethernet Multifunction Controller Preliminary
 200   Data Manual v3.0
 201http://cesdis.gsfc.nasa.gov/linux/misc/NWay.html
 202http://cesdis.gsfc.nasa.gov/linux/misc/100mbps.html
 203
 204IVc. Errata
 205
 206See Packet Engines confidential appendix (prototype chips only).
 207*/
 208
 209
 210
 211enum capability_flags {
 212        HasMII=1, FullTxStatus=2, IsGigabit=4, HasMulticastBug=8, FullRxStatus=16,
 213        HasMACAddrBug=32, /* Only on early revs.  */
 214        DontUseEeprom=64, /* Don't read the MAC from the EEPROm. */
 215};
 216
 217/* The PCI I/O space extent. */
 218enum {
 219        YELLOWFIN_SIZE  = 0x100,
 220};
 221
 222struct pci_id_info {
 223        const char *name;
 224        struct match_info {
 225                int     pci, pci_mask, subsystem, subsystem_mask;
 226                int revision, revision_mask;                            /* Only 8 bits. */
 227        } id;
 228        int drv_flags;                          /* Driver use, intended as capability flags. */
 229};
 230
 231static const struct pci_id_info pci_id_tbl[] = {
 232        {"Yellowfin G-NIC Gigabit Ethernet", { 0x07021000, 0xffffffff},
 233         FullTxStatus | IsGigabit | HasMulticastBug | HasMACAddrBug | DontUseEeprom},
 234        {"Symbios SYM83C885", { 0x07011000, 0xffffffff},
 235          HasMII | DontUseEeprom },
 236        { }
 237};
 238
 239static const struct pci_device_id yellowfin_pci_tbl[] = {
 240        { 0x1000, 0x0702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
 241        { 0x1000, 0x0701, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
 242        { }
 243};
 244MODULE_DEVICE_TABLE (pci, yellowfin_pci_tbl);
 245
 246
 247/* Offsets to the Yellowfin registers.  Various sizes and alignments. */
 248enum yellowfin_offsets {
 249        TxCtrl=0x00, TxStatus=0x04, TxPtr=0x0C,
 250        TxIntrSel=0x10, TxBranchSel=0x14, TxWaitSel=0x18,
 251        RxCtrl=0x40, RxStatus=0x44, RxPtr=0x4C,
 252        RxIntrSel=0x50, RxBranchSel=0x54, RxWaitSel=0x58,
 253        EventStatus=0x80, IntrEnb=0x82, IntrClear=0x84, IntrStatus=0x86,
 254        ChipRev=0x8C, DMACtrl=0x90, TxThreshold=0x94,
 255        Cnfg=0xA0, FrameGap0=0xA2, FrameGap1=0xA4,
 256        MII_Cmd=0xA6, MII_Addr=0xA8, MII_Wr_Data=0xAA, MII_Rd_Data=0xAC,
 257        MII_Status=0xAE,
 258        RxDepth=0xB8, FlowCtrl=0xBC,
 259        AddrMode=0xD0, StnAddr=0xD2, HashTbl=0xD8, FIFOcfg=0xF8,
 260        EEStatus=0xF0, EECtrl=0xF1, EEAddr=0xF2, EERead=0xF3, EEWrite=0xF4,
 261        EEFeature=0xF5,
 262};
 263
 264/* The Yellowfin Rx and Tx buffer descriptors.
 265   Elements are written as 32 bit for endian portability. */
 266struct yellowfin_desc {
 267        __le32 dbdma_cmd;
 268        __le32 addr;
 269        __le32 branch_addr;
 270        __le32 result_status;
 271};
 272
 273struct tx_status_words {
 274#ifdef __BIG_ENDIAN
 275        u16 tx_errs;
 276        u16 tx_cnt;
 277        u16 paused;
 278        u16 total_tx_cnt;
 279#else  /* Little endian chips. */
 280        u16 tx_cnt;
 281        u16 tx_errs;
 282        u16 total_tx_cnt;
 283        u16 paused;
 284#endif /* __BIG_ENDIAN */
 285};
 286
 287/* Bits in yellowfin_desc.cmd */
 288enum desc_cmd_bits {
 289        CMD_TX_PKT=0x10000000, CMD_RX_BUF=0x20000000, CMD_TXSTATUS=0x30000000,
 290        CMD_NOP=0x60000000, CMD_STOP=0x70000000,
 291        BRANCH_ALWAYS=0x0C0000, INTR_ALWAYS=0x300000, WAIT_ALWAYS=0x030000,
 292        BRANCH_IFTRUE=0x040000,
 293};
 294
 295/* Bits in yellowfin_desc.status */
 296enum desc_status_bits { RX_EOP=0x0040, };
 297
 298/* Bits in the interrupt status/mask registers. */
 299enum intr_status_bits {
 300        IntrRxDone=0x01, IntrRxInvalid=0x02, IntrRxPCIFault=0x04,IntrRxPCIErr=0x08,
 301        IntrTxDone=0x10, IntrTxInvalid=0x20, IntrTxPCIFault=0x40,IntrTxPCIErr=0x80,
 302        IntrEarlyRx=0x100, IntrWakeup=0x200, };
 303
 304#define PRIV_ALIGN      31      /* Required alignment mask */
 305#define MII_CNT         4
 306struct yellowfin_private {
 307        /* Descriptor rings first for alignment.
 308           Tx requires a second descriptor for status. */
 309        struct yellowfin_desc *rx_ring;
 310        struct yellowfin_desc *tx_ring;
 311        struct sk_buff* rx_skbuff[RX_RING_SIZE];
 312        struct sk_buff* tx_skbuff[TX_RING_SIZE];
 313        dma_addr_t rx_ring_dma;
 314        dma_addr_t tx_ring_dma;
 315
 316        struct tx_status_words *tx_status;
 317        dma_addr_t tx_status_dma;
 318
 319        struct timer_list timer;        /* Media selection timer. */
 320        /* Frequently used and paired value: keep adjacent for cache effect. */
 321        int chip_id, drv_flags;
 322        struct pci_dev *pci_dev;
 323        unsigned int cur_rx, dirty_rx;          /* Producer/consumer ring indices */
 324        unsigned int rx_buf_sz;                         /* Based on MTU+slack. */
 325        struct tx_status_words *tx_tail_desc;
 326        unsigned int cur_tx, dirty_tx;
 327        int tx_threshold;
 328        unsigned int tx_full:1;                         /* The Tx queue is full. */
 329        unsigned int full_duplex:1;                     /* Full-duplex operation requested. */
 330        unsigned int duplex_lock:1;
 331        unsigned int medialock:1;                       /* Do not sense media. */
 332        unsigned int default_port:4;            /* Last dev->if_port value. */
 333        /* MII transceiver section. */
 334        int mii_cnt;                                            /* MII device addresses. */
 335        u16 advertising;                                        /* NWay media advertisement */
 336        unsigned char phys[MII_CNT];            /* MII device addresses, only first one used */
 337        spinlock_t lock;
 338        void __iomem *base;
 339};
 340
 341static int read_eeprom(void __iomem *ioaddr, int location);
 342static int mdio_read(void __iomem *ioaddr, int phy_id, int location);
 343static void mdio_write(void __iomem *ioaddr, int phy_id, int location, int value);
 344static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 345static int yellowfin_open(struct net_device *dev);
 346static void yellowfin_timer(struct timer_list *t);
 347static void yellowfin_tx_timeout(struct net_device *dev, unsigned int txqueue);
 348static int yellowfin_init_ring(struct net_device *dev);
 349static netdev_tx_t yellowfin_start_xmit(struct sk_buff *skb,
 350                                        struct net_device *dev);
 351static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance);
 352static int yellowfin_rx(struct net_device *dev);
 353static void yellowfin_error(struct net_device *dev, int intr_status);
 354static int yellowfin_close(struct net_device *dev);
 355static void set_rx_mode(struct net_device *dev);
 356static const struct ethtool_ops ethtool_ops;
 357
 358static const struct net_device_ops netdev_ops = {
 359        .ndo_open               = yellowfin_open,
 360        .ndo_stop               = yellowfin_close,
 361        .ndo_start_xmit         = yellowfin_start_xmit,
 362        .ndo_set_rx_mode        = set_rx_mode,
 363        .ndo_validate_addr      = eth_validate_addr,
 364        .ndo_set_mac_address    = eth_mac_addr,
 365        .ndo_eth_ioctl          = netdev_ioctl,
 366        .ndo_tx_timeout         = yellowfin_tx_timeout,
 367};
 368
 369static int yellowfin_init_one(struct pci_dev *pdev,
 370                              const struct pci_device_id *ent)
 371{
 372        struct net_device *dev;
 373        struct yellowfin_private *np;
 374        int irq;
 375        int chip_idx = ent->driver_data;
 376        static int find_cnt;
 377        void __iomem *ioaddr;
 378        int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
 379        int drv_flags = pci_id_tbl[chip_idx].drv_flags;
 380        void *ring_space;
 381        dma_addr_t ring_dma;
 382#ifdef USE_IO_OPS
 383        int bar = 0;
 384#else
 385        int bar = 1;
 386#endif
 387
 388/* when built into the kernel, we only print version if device is found */
 389#ifndef MODULE
 390        static int printed_version;
 391        if (!printed_version++)
 392                printk(version);
 393#endif
 394
 395        i = pci_enable_device(pdev);
 396        if (i) return i;
 397
 398        dev = alloc_etherdev(sizeof(*np));
 399        if (!dev)
 400                return -ENOMEM;
 401
 402        SET_NETDEV_DEV(dev, &pdev->dev);
 403
 404        np = netdev_priv(dev);
 405
 406        if (pci_request_regions(pdev, DRV_NAME))
 407                goto err_out_free_netdev;
 408
 409        pci_set_master (pdev);
 410
 411        ioaddr = pci_iomap(pdev, bar, YELLOWFIN_SIZE);
 412        if (!ioaddr)
 413                goto err_out_free_res;
 414
 415        irq = pdev->irq;
 416
 417        if (drv_flags & DontUseEeprom)
 418                for (i = 0; i < 6; i++)
 419                        dev->dev_addr[i] = ioread8(ioaddr + StnAddr + i);
 420        else {
 421                int ee_offset = (read_eeprom(ioaddr, 6) == 0xff ? 0x100 : 0);
 422                for (i = 0; i < 6; i++)
 423                        dev->dev_addr[i] = read_eeprom(ioaddr, ee_offset + i);
 424        }
 425
 426        /* Reset the chip. */
 427        iowrite32(0x80000000, ioaddr + DMACtrl);
 428
 429        pci_set_drvdata(pdev, dev);
 430        spin_lock_init(&np->lock);
 431
 432        np->pci_dev = pdev;
 433        np->chip_id = chip_idx;
 434        np->drv_flags = drv_flags;
 435        np->base = ioaddr;
 436
 437        ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE, &ring_dma,
 438                                        GFP_KERNEL);
 439        if (!ring_space)
 440                goto err_out_cleardev;
 441        np->tx_ring = ring_space;
 442        np->tx_ring_dma = ring_dma;
 443
 444        ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE, &ring_dma,
 445                                        GFP_KERNEL);
 446        if (!ring_space)
 447                goto err_out_unmap_tx;
 448        np->rx_ring = ring_space;
 449        np->rx_ring_dma = ring_dma;
 450
 451        ring_space = dma_alloc_coherent(&pdev->dev, STATUS_TOTAL_SIZE,
 452                                        &ring_dma, GFP_KERNEL);
 453        if (!ring_space)
 454                goto err_out_unmap_rx;
 455        np->tx_status = ring_space;
 456        np->tx_status_dma = ring_dma;
 457
 458        if (dev->mem_start)
 459                option = dev->mem_start;
 460
 461        /* The lower four bits are the media type. */
 462        if (option > 0) {
 463                if (option & 0x200)
 464                        np->full_duplex = 1;
 465                np->default_port = option & 15;
 466                if (np->default_port)
 467                        np->medialock = 1;
 468        }
 469        if (find_cnt < MAX_UNITS  &&  full_duplex[find_cnt] > 0)
 470                np->full_duplex = 1;
 471
 472        if (np->full_duplex)
 473                np->duplex_lock = 1;
 474
 475        /* The Yellowfin-specific entries in the device structure. */
 476        dev->netdev_ops = &netdev_ops;
 477        dev->ethtool_ops = &ethtool_ops;
 478        dev->watchdog_timeo = TX_TIMEOUT;
 479
 480        if (mtu)
 481                dev->mtu = mtu;
 482
 483        i = register_netdev(dev);
 484        if (i)
 485                goto err_out_unmap_status;
 486
 487        netdev_info(dev, "%s type %8x at %p, %pM, IRQ %d\n",
 488                    pci_id_tbl[chip_idx].name,
 489                    ioread32(ioaddr + ChipRev), ioaddr,
 490                    dev->dev_addr, irq);
 491
 492        if (np->drv_flags & HasMII) {
 493                int phy, phy_idx = 0;
 494                for (phy = 0; phy < 32 && phy_idx < MII_CNT; phy++) {
 495                        int mii_status = mdio_read(ioaddr, phy, 1);
 496                        if (mii_status != 0xffff  &&  mii_status != 0x0000) {
 497                                np->phys[phy_idx++] = phy;
 498                                np->advertising = mdio_read(ioaddr, phy, 4);
 499                                netdev_info(dev, "MII PHY found at address %d, status 0x%04x advertising %04x\n",
 500                                            phy, mii_status, np->advertising);
 501                        }
 502                }
 503                np->mii_cnt = phy_idx;
 504        }
 505
 506        find_cnt++;
 507
 508        return 0;
 509
 510err_out_unmap_status:
 511        dma_free_coherent(&pdev->dev, STATUS_TOTAL_SIZE, np->tx_status,
 512                          np->tx_status_dma);
 513err_out_unmap_rx:
 514        dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, np->rx_ring,
 515                          np->rx_ring_dma);
 516err_out_unmap_tx:
 517        dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring,
 518                          np->tx_ring_dma);
 519err_out_cleardev:
 520        pci_iounmap(pdev, ioaddr);
 521err_out_free_res:
 522        pci_release_regions(pdev);
 523err_out_free_netdev:
 524        free_netdev (dev);
 525        return -ENODEV;
 526}
 527
 528static int read_eeprom(void __iomem *ioaddr, int location)
 529{
 530        int bogus_cnt = 10000;          /* Typical 33Mhz: 1050 ticks */
 531
 532        iowrite8(location, ioaddr + EEAddr);
 533        iowrite8(0x30 | ((location >> 8) & 7), ioaddr + EECtrl);
 534        while ((ioread8(ioaddr + EEStatus) & 0x80)  &&  --bogus_cnt > 0)
 535                ;
 536        return ioread8(ioaddr + EERead);
 537}
 538
 539/* MII Managemen Data I/O accesses.
 540   These routines assume the MDIO controller is idle, and do not exit until
 541   the command is finished. */
 542
 543static int mdio_read(void __iomem *ioaddr, int phy_id, int location)
 544{
 545        int i;
 546
 547        iowrite16((phy_id<<8) + location, ioaddr + MII_Addr);
 548        iowrite16(1, ioaddr + MII_Cmd);
 549        for (i = 10000; i >= 0; i--)
 550                if ((ioread16(ioaddr + MII_Status) & 1) == 0)
 551                        break;
 552        return ioread16(ioaddr + MII_Rd_Data);
 553}
 554
 555static void mdio_write(void __iomem *ioaddr, int phy_id, int location, int value)
 556{
 557        int i;
 558
 559        iowrite16((phy_id<<8) + location, ioaddr + MII_Addr);
 560        iowrite16(value, ioaddr + MII_Wr_Data);
 561
 562        /* Wait for the command to finish. */
 563        for (i = 10000; i >= 0; i--)
 564                if ((ioread16(ioaddr + MII_Status) & 1) == 0)
 565                        break;
 566}
 567
 568
 569static int yellowfin_open(struct net_device *dev)
 570{
 571        struct yellowfin_private *yp = netdev_priv(dev);
 572        const int irq = yp->pci_dev->irq;
 573        void __iomem *ioaddr = yp->base;
 574        int i, rc;
 575
 576        /* Reset the chip. */
 577        iowrite32(0x80000000, ioaddr + DMACtrl);
 578
 579        rc = request_irq(irq, yellowfin_interrupt, IRQF_SHARED, dev->name, dev);
 580        if (rc)
 581                return rc;
 582
 583        rc = yellowfin_init_ring(dev);
 584        if (rc < 0)
 585                goto err_free_irq;
 586
 587        iowrite32(yp->rx_ring_dma, ioaddr + RxPtr);
 588        iowrite32(yp->tx_ring_dma, ioaddr + TxPtr);
 589
 590        for (i = 0; i < 6; i++)
 591                iowrite8(dev->dev_addr[i], ioaddr + StnAddr + i);
 592
 593        /* Set up various condition 'select' registers.
 594           There are no options here. */
 595        iowrite32(0x00800080, ioaddr + TxIntrSel);      /* Interrupt on Tx abort */
 596        iowrite32(0x00800080, ioaddr + TxBranchSel);    /* Branch on Tx abort */
 597        iowrite32(0x00400040, ioaddr + TxWaitSel);      /* Wait on Tx status */
 598        iowrite32(0x00400040, ioaddr + RxIntrSel);      /* Interrupt on Rx done */
 599        iowrite32(0x00400040, ioaddr + RxBranchSel);    /* Branch on Rx error */
 600        iowrite32(0x00400040, ioaddr + RxWaitSel);      /* Wait on Rx done */
 601
 602        /* Initialize other registers: with so many this eventually this will
 603           converted to an offset/value list. */
 604        iowrite32(dma_ctrl, ioaddr + DMACtrl);
 605        iowrite16(fifo_cfg, ioaddr + FIFOcfg);
 606        /* Enable automatic generation of flow control frames, period 0xffff. */
 607        iowrite32(0x0030FFFF, ioaddr + FlowCtrl);
 608
 609        yp->tx_threshold = 32;
 610        iowrite32(yp->tx_threshold, ioaddr + TxThreshold);
 611
 612        if (dev->if_port == 0)
 613                dev->if_port = yp->default_port;
 614
 615        netif_start_queue(dev);
 616
 617        /* Setting the Rx mode will start the Rx process. */
 618        if (yp->drv_flags & IsGigabit) {
 619                /* We are always in full-duplex mode with gigabit! */
 620                yp->full_duplex = 1;
 621                iowrite16(0x01CF, ioaddr + Cnfg);
 622        } else {
 623                iowrite16(0x0018, ioaddr + FrameGap0); /* 0060/4060 for non-MII 10baseT */
 624                iowrite16(0x1018, ioaddr + FrameGap1);
 625                iowrite16(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg);
 626        }
 627        set_rx_mode(dev);
 628
 629        /* Enable interrupts by setting the interrupt mask. */
 630        iowrite16(0x81ff, ioaddr + IntrEnb);                    /* See enum intr_status_bits */
 631        iowrite16(0x0000, ioaddr + EventStatus);                /* Clear non-interrupting events */
 632        iowrite32(0x80008000, ioaddr + RxCtrl);         /* Start Rx and Tx channels. */
 633        iowrite32(0x80008000, ioaddr + TxCtrl);
 634
 635        if (yellowfin_debug > 2) {
 636                netdev_printk(KERN_DEBUG, dev, "Done %s()\n", __func__);
 637        }
 638
 639        /* Set the timer to check for link beat. */
 640        timer_setup(&yp->timer, yellowfin_timer, 0);
 641        yp->timer.expires = jiffies + 3*HZ;
 642        add_timer(&yp->timer);
 643out:
 644        return rc;
 645
 646err_free_irq:
 647        free_irq(irq, dev);
 648        goto out;
 649}
 650
 651static void yellowfin_timer(struct timer_list *t)
 652{
 653        struct yellowfin_private *yp = from_timer(yp, t, timer);
 654        struct net_device *dev = pci_get_drvdata(yp->pci_dev);
 655        void __iomem *ioaddr = yp->base;
 656        int next_tick = 60*HZ;
 657
 658        if (yellowfin_debug > 3) {
 659                netdev_printk(KERN_DEBUG, dev, "Yellowfin timer tick, status %08x\n",
 660                              ioread16(ioaddr + IntrStatus));
 661        }
 662
 663        if (yp->mii_cnt) {
 664                int bmsr = mdio_read(ioaddr, yp->phys[0], MII_BMSR);
 665                int lpa = mdio_read(ioaddr, yp->phys[0], MII_LPA);
 666                int negotiated = lpa & yp->advertising;
 667                if (yellowfin_debug > 1)
 668                        netdev_printk(KERN_DEBUG, dev, "MII #%d status register is %04x, link partner capability %04x\n",
 669                                      yp->phys[0], bmsr, lpa);
 670
 671                yp->full_duplex = mii_duplex(yp->duplex_lock, negotiated);
 672
 673                iowrite16(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg);
 674
 675                if (bmsr & BMSR_LSTATUS)
 676                        next_tick = 60*HZ;
 677                else
 678                        next_tick = 3*HZ;
 679        }
 680
 681        yp->timer.expires = jiffies + next_tick;
 682        add_timer(&yp->timer);
 683}
 684
 685static void yellowfin_tx_timeout(struct net_device *dev, unsigned int txqueue)
 686{
 687        struct yellowfin_private *yp = netdev_priv(dev);
 688        void __iomem *ioaddr = yp->base;
 689
 690        netdev_warn(dev, "Yellowfin transmit timed out at %d/%d Tx status %04x, Rx status %04x, resetting...\n",
 691                    yp->cur_tx, yp->dirty_tx,
 692                    ioread32(ioaddr + TxStatus),
 693                    ioread32(ioaddr + RxStatus));
 694
 695        /* Note: these should be KERN_DEBUG. */
 696        if (yellowfin_debug) {
 697                int i;
 698                pr_warn("  Rx ring %p: ", yp->rx_ring);
 699                for (i = 0; i < RX_RING_SIZE; i++)
 700                        pr_cont(" %08x", yp->rx_ring[i].result_status);
 701                pr_cont("\n");
 702                pr_warn("  Tx ring %p: ", yp->tx_ring);
 703                for (i = 0; i < TX_RING_SIZE; i++)
 704                        pr_cont(" %04x /%08x",
 705                               yp->tx_status[i].tx_errs,
 706                               yp->tx_ring[i].result_status);
 707                pr_cont("\n");
 708        }
 709
 710        /* If the hardware is found to hang regularly, we will update the code
 711           to reinitialize the chip here. */
 712        dev->if_port = 0;
 713
 714        /* Wake the potentially-idle transmit channel. */
 715        iowrite32(0x10001000, yp->base + TxCtrl);
 716        if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE)
 717                netif_wake_queue (dev);         /* Typical path */
 718
 719        netif_trans_update(dev); /* prevent tx timeout */
 720        dev->stats.tx_errors++;
 721}
 722
 723/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
 724static int yellowfin_init_ring(struct net_device *dev)
 725{
 726        struct yellowfin_private *yp = netdev_priv(dev);
 727        int i, j;
 728
 729        yp->tx_full = 0;
 730        yp->cur_rx = yp->cur_tx = 0;
 731        yp->dirty_tx = 0;
 732
 733        yp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
 734
 735        for (i = 0; i < RX_RING_SIZE; i++) {
 736                yp->rx_ring[i].dbdma_cmd =
 737                        cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz);
 738                yp->rx_ring[i].branch_addr = cpu_to_le32(yp->rx_ring_dma +
 739                        ((i+1)%RX_RING_SIZE)*sizeof(struct yellowfin_desc));
 740        }
 741
 742        for (i = 0; i < RX_RING_SIZE; i++) {
 743                struct sk_buff *skb = netdev_alloc_skb(dev, yp->rx_buf_sz + 2);
 744                yp->rx_skbuff[i] = skb;
 745                if (skb == NULL)
 746                        break;
 747                skb_reserve(skb, 2);    /* 16 byte align the IP header. */
 748                yp->rx_ring[i].addr = cpu_to_le32(dma_map_single(&yp->pci_dev->dev,
 749                                                                 skb->data,
 750                                                                 yp->rx_buf_sz,
 751                                                                 DMA_FROM_DEVICE));
 752        }
 753        if (i != RX_RING_SIZE) {
 754                for (j = 0; j < i; j++)
 755                        dev_kfree_skb(yp->rx_skbuff[j]);
 756                return -ENOMEM;
 757        }
 758        yp->rx_ring[i-1].dbdma_cmd = cpu_to_le32(CMD_STOP);
 759        yp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
 760
 761#define NO_TXSTATS
 762#ifdef NO_TXSTATS
 763        /* In this mode the Tx ring needs only a single descriptor. */
 764        for (i = 0; i < TX_RING_SIZE; i++) {
 765                yp->tx_skbuff[i] = NULL;
 766                yp->tx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP);
 767                yp->tx_ring[i].branch_addr = cpu_to_le32(yp->tx_ring_dma +
 768                        ((i+1)%TX_RING_SIZE)*sizeof(struct yellowfin_desc));
 769        }
 770        /* Wrap ring */
 771        yp->tx_ring[--i].dbdma_cmd = cpu_to_le32(CMD_STOP | BRANCH_ALWAYS);
 772#else
 773{
 774        /* Tx ring needs a pair of descriptors, the second for the status. */
 775        for (i = 0; i < TX_RING_SIZE; i++) {
 776                j = 2*i;
 777                yp->tx_skbuff[i] = 0;
 778                /* Branch on Tx error. */
 779                yp->tx_ring[j].dbdma_cmd = cpu_to_le32(CMD_STOP);
 780                yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma +
 781                        (j+1)*sizeof(struct yellowfin_desc));
 782                j++;
 783                if (yp->flags & FullTxStatus) {
 784                        yp->tx_ring[j].dbdma_cmd =
 785                                cpu_to_le32(CMD_TXSTATUS | sizeof(*yp->tx_status));
 786                        yp->tx_ring[j].request_cnt = sizeof(*yp->tx_status);
 787                        yp->tx_ring[j].addr = cpu_to_le32(yp->tx_status_dma +
 788                                i*sizeof(struct tx_status_words));
 789                } else {
 790                        /* Symbios chips write only tx_errs word. */
 791                        yp->tx_ring[j].dbdma_cmd =
 792                                cpu_to_le32(CMD_TXSTATUS | INTR_ALWAYS | 2);
 793                        yp->tx_ring[j].request_cnt = 2;
 794                        /* Om pade ummmmm... */
 795                        yp->tx_ring[j].addr = cpu_to_le32(yp->tx_status_dma +
 796                                i*sizeof(struct tx_status_words) +
 797                                &(yp->tx_status[0].tx_errs) -
 798                                &(yp->tx_status[0]));
 799                }
 800                yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma +
 801                        ((j+1)%(2*TX_RING_SIZE))*sizeof(struct yellowfin_desc));
 802        }
 803        /* Wrap ring */
 804        yp->tx_ring[++j].dbdma_cmd |= cpu_to_le32(BRANCH_ALWAYS | INTR_ALWAYS);
 805}
 806#endif
 807        yp->tx_tail_desc = &yp->tx_status[0];
 808        return 0;
 809}
 810
 811static netdev_tx_t yellowfin_start_xmit(struct sk_buff *skb,
 812                                        struct net_device *dev)
 813{
 814        struct yellowfin_private *yp = netdev_priv(dev);
 815        unsigned entry;
 816        int len = skb->len;
 817
 818        netif_stop_queue (dev);
 819
 820        /* Note: Ordering is important here, set the field with the
 821           "ownership" bit last, and only then increment cur_tx. */
 822
 823        /* Calculate the next Tx descriptor entry. */
 824        entry = yp->cur_tx % TX_RING_SIZE;
 825
 826        if (gx_fix) {   /* Note: only works for paddable protocols e.g.  IP. */
 827                int cacheline_end = ((unsigned long)skb->data + skb->len) % 32;
 828                /* Fix GX chipset errata. */
 829                if (cacheline_end > 24  || cacheline_end == 0) {
 830                        len = skb->len + 32 - cacheline_end + 1;
 831                        if (skb_padto(skb, len)) {
 832                                yp->tx_skbuff[entry] = NULL;
 833                                netif_wake_queue(dev);
 834                                return NETDEV_TX_OK;
 835                        }
 836                }
 837        }
 838        yp->tx_skbuff[entry] = skb;
 839
 840#ifdef NO_TXSTATS
 841        yp->tx_ring[entry].addr = cpu_to_le32(dma_map_single(&yp->pci_dev->dev,
 842                                                             skb->data,
 843                                                             len, DMA_TO_DEVICE));
 844        yp->tx_ring[entry].result_status = 0;
 845        if (entry >= TX_RING_SIZE-1) {
 846                /* New stop command. */
 847                yp->tx_ring[0].dbdma_cmd = cpu_to_le32(CMD_STOP);
 848                yp->tx_ring[TX_RING_SIZE-1].dbdma_cmd =
 849                        cpu_to_le32(CMD_TX_PKT|BRANCH_ALWAYS | len);
 850        } else {
 851                yp->tx_ring[entry+1].dbdma_cmd = cpu_to_le32(CMD_STOP);
 852                yp->tx_ring[entry].dbdma_cmd =
 853                        cpu_to_le32(CMD_TX_PKT | BRANCH_IFTRUE | len);
 854        }
 855        yp->cur_tx++;
 856#else
 857        yp->tx_ring[entry<<1].request_cnt = len;
 858        yp->tx_ring[entry<<1].addr = cpu_to_le32(dma_map_single(&yp->pci_dev->dev,
 859                                                                skb->data,
 860                                                                len, DMA_TO_DEVICE));
 861        /* The input_last (status-write) command is constant, but we must
 862           rewrite the subsequent 'stop' command. */
 863
 864        yp->cur_tx++;
 865        {
 866                unsigned next_entry = yp->cur_tx % TX_RING_SIZE;
 867                yp->tx_ring[next_entry<<1].dbdma_cmd = cpu_to_le32(CMD_STOP);
 868        }
 869        /* Final step -- overwrite the old 'stop' command. */
 870
 871        yp->tx_ring[entry<<1].dbdma_cmd =
 872                cpu_to_le32( ((entry % 6) == 0 ? CMD_TX_PKT|INTR_ALWAYS|BRANCH_IFTRUE :
 873                                          CMD_TX_PKT | BRANCH_IFTRUE) | len);
 874#endif
 875
 876        /* Non-x86 Todo: explicitly flush cache lines here. */
 877
 878        /* Wake the potentially-idle transmit channel. */
 879        iowrite32(0x10001000, yp->base + TxCtrl);
 880
 881        if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE)
 882                netif_start_queue (dev);                /* Typical path */
 883        else
 884                yp->tx_full = 1;
 885
 886        if (yellowfin_debug > 4) {
 887                netdev_printk(KERN_DEBUG, dev, "Yellowfin transmit frame #%d queued in slot %d\n",
 888                              yp->cur_tx, entry);
 889        }
 890        return NETDEV_TX_OK;
 891}
 892
 893/* The interrupt handler does all of the Rx thread work and cleans up
 894   after the Tx thread. */
 895static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance)
 896{
 897        struct net_device *dev = dev_instance;
 898        struct yellowfin_private *yp;
 899        void __iomem *ioaddr;
 900        int boguscnt = max_interrupt_work;
 901        unsigned int handled = 0;
 902
 903        yp = netdev_priv(dev);
 904        ioaddr = yp->base;
 905
 906        spin_lock (&yp->lock);
 907
 908        do {
 909                u16 intr_status = ioread16(ioaddr + IntrClear);
 910
 911                if (yellowfin_debug > 4)
 912                        netdev_printk(KERN_DEBUG, dev, "Yellowfin interrupt, status %04x\n",
 913                                      intr_status);
 914
 915                if (intr_status == 0)
 916                        break;
 917                handled = 1;
 918
 919                if (intr_status & (IntrRxDone | IntrEarlyRx)) {
 920                        yellowfin_rx(dev);
 921                        iowrite32(0x10001000, ioaddr + RxCtrl);         /* Wake Rx engine. */
 922                }
 923
 924#ifdef NO_TXSTATS
 925                for (; yp->cur_tx - yp->dirty_tx > 0; yp->dirty_tx++) {
 926                        int entry = yp->dirty_tx % TX_RING_SIZE;
 927                        struct sk_buff *skb;
 928
 929                        if (yp->tx_ring[entry].result_status == 0)
 930                                break;
 931                        skb = yp->tx_skbuff[entry];
 932                        dev->stats.tx_packets++;
 933                        dev->stats.tx_bytes += skb->len;
 934                        /* Free the original skb. */
 935                        dma_unmap_single(&yp->pci_dev->dev,
 936                                         le32_to_cpu(yp->tx_ring[entry].addr),
 937                                         skb->len, DMA_TO_DEVICE);
 938                        dev_consume_skb_irq(skb);
 939                        yp->tx_skbuff[entry] = NULL;
 940                }
 941                if (yp->tx_full &&
 942                    yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE - 4) {
 943                        /* The ring is no longer full, clear tbusy. */
 944                        yp->tx_full = 0;
 945                        netif_wake_queue(dev);
 946                }
 947#else
 948                if ((intr_status & IntrTxDone) || (yp->tx_tail_desc->tx_errs)) {
 949                        unsigned dirty_tx = yp->dirty_tx;
 950
 951                        for (dirty_tx = yp->dirty_tx; yp->cur_tx - dirty_tx > 0;
 952                                 dirty_tx++) {
 953                                /* Todo: optimize this. */
 954                                int entry = dirty_tx % TX_RING_SIZE;
 955                                u16 tx_errs = yp->tx_status[entry].tx_errs;
 956                                struct sk_buff *skb;
 957
 958#ifndef final_version
 959                                if (yellowfin_debug > 5)
 960                                        netdev_printk(KERN_DEBUG, dev, "Tx queue %d check, Tx status %04x %04x %04x %04x\n",
 961                                                      entry,
 962                                                      yp->tx_status[entry].tx_cnt,
 963                                                      yp->tx_status[entry].tx_errs,
 964                                                      yp->tx_status[entry].total_tx_cnt,
 965                                                      yp->tx_status[entry].paused);
 966#endif
 967                                if (tx_errs == 0)
 968                                        break;  /* It still hasn't been Txed */
 969                                skb = yp->tx_skbuff[entry];
 970                                if (tx_errs & 0xF810) {
 971                                        /* There was an major error, log it. */
 972#ifndef final_version
 973                                        if (yellowfin_debug > 1)
 974                                                netdev_printk(KERN_DEBUG, dev, "Transmit error, Tx status %04x\n",
 975                                                              tx_errs);
 976#endif
 977                                        dev->stats.tx_errors++;
 978                                        if (tx_errs & 0xF800) dev->stats.tx_aborted_errors++;
 979                                        if (tx_errs & 0x0800) dev->stats.tx_carrier_errors++;
 980                                        if (tx_errs & 0x2000) dev->stats.tx_window_errors++;
 981                                        if (tx_errs & 0x8000) dev->stats.tx_fifo_errors++;
 982                                } else {
 983#ifndef final_version
 984                                        if (yellowfin_debug > 4)
 985                                                netdev_printk(KERN_DEBUG, dev, "Normal transmit, Tx status %04x\n",
 986                                                              tx_errs);
 987#endif
 988                                        dev->stats.tx_bytes += skb->len;
 989                                        dev->stats.collisions += tx_errs & 15;
 990                                        dev->stats.tx_packets++;
 991                                }
 992                                /* Free the original skb. */
 993                                dma_unmap_single(&yp->pci_dev->dev,
 994                                                 yp->tx_ring[entry << 1].addr,
 995                                                 skb->len, DMA_TO_DEVICE);
 996                                dev_consume_skb_irq(skb);
 997                                yp->tx_skbuff[entry] = 0;
 998                                /* Mark status as empty. */
 999                                yp->tx_status[entry].tx_errs = 0;
1000                        }
1001
1002#ifndef final_version
1003                        if (yp->cur_tx - dirty_tx > TX_RING_SIZE) {
1004                                netdev_err(dev, "Out-of-sync dirty pointer, %d vs. %d, full=%d\n",
1005                                           dirty_tx, yp->cur_tx, yp->tx_full);
1006                                dirty_tx += TX_RING_SIZE;
1007                        }
1008#endif
1009
1010                        if (yp->tx_full &&
1011                            yp->cur_tx - dirty_tx < TX_QUEUE_SIZE - 2) {
1012                                /* The ring is no longer full, clear tbusy. */
1013                                yp->tx_full = 0;
1014                                netif_wake_queue(dev);
1015                        }
1016
1017                        yp->dirty_tx = dirty_tx;
1018                        yp->tx_tail_desc = &yp->tx_status[dirty_tx % TX_RING_SIZE];
1019                }
1020#endif
1021
1022                /* Log errors and other uncommon events. */
1023                if (intr_status & 0x2ee)        /* Abnormal error summary. */
1024                        yellowfin_error(dev, intr_status);
1025
1026                if (--boguscnt < 0) {
1027                        netdev_warn(dev, "Too much work at interrupt, status=%#04x\n",
1028                                    intr_status);
1029                        break;
1030                }
1031        } while (1);
1032
1033        if (yellowfin_debug > 3)
1034                netdev_printk(KERN_DEBUG, dev, "exiting interrupt, status=%#04x\n",
1035                              ioread16(ioaddr + IntrStatus));
1036
1037        spin_unlock (&yp->lock);
1038        return IRQ_RETVAL(handled);
1039}
1040
1041/* This routine is logically part of the interrupt handler, but separated
1042   for clarity and better register allocation. */
1043static int yellowfin_rx(struct net_device *dev)
1044{
1045        struct yellowfin_private *yp = netdev_priv(dev);
1046        int entry = yp->cur_rx % RX_RING_SIZE;
1047        int boguscnt = yp->dirty_rx + RX_RING_SIZE - yp->cur_rx;
1048
1049        if (yellowfin_debug > 4) {
1050                printk(KERN_DEBUG " In yellowfin_rx(), entry %d status %08x\n",
1051                           entry, yp->rx_ring[entry].result_status);
1052                printk(KERN_DEBUG "   #%d desc. %08x %08x %08x\n",
1053                           entry, yp->rx_ring[entry].dbdma_cmd, yp->rx_ring[entry].addr,
1054                           yp->rx_ring[entry].result_status);
1055        }
1056
1057        /* If EOP is set on the next entry, it's a new packet. Send it up. */
1058        while (1) {
1059                struct yellowfin_desc *desc = &yp->rx_ring[entry];
1060                struct sk_buff *rx_skb = yp->rx_skbuff[entry];
1061                s16 frame_status;
1062                u16 desc_status;
1063                int data_size, __maybe_unused yf_size;
1064                u8 *buf_addr;
1065
1066                if(!desc->result_status)
1067                        break;
1068                dma_sync_single_for_cpu(&yp->pci_dev->dev,
1069                                        le32_to_cpu(desc->addr),
1070                                        yp->rx_buf_sz, DMA_FROM_DEVICE);
1071                desc_status = le32_to_cpu(desc->result_status) >> 16;
1072                buf_addr = rx_skb->data;
1073                data_size = (le32_to_cpu(desc->dbdma_cmd) -
1074                        le32_to_cpu(desc->result_status)) & 0xffff;
1075                frame_status = get_unaligned_le16(&(buf_addr[data_size - 2]));
1076                if (yellowfin_debug > 4)
1077                        printk(KERN_DEBUG "  %s() status was %04x\n",
1078                               __func__, frame_status);
1079                if (--boguscnt < 0)
1080                        break;
1081
1082                yf_size = sizeof(struct yellowfin_desc);
1083
1084                if ( ! (desc_status & RX_EOP)) {
1085                        if (data_size != 0)
1086                                netdev_warn(dev, "Oversized Ethernet frame spanned multiple buffers, status %04x, data_size %d!\n",
1087                                            desc_status, data_size);
1088                        dev->stats.rx_length_errors++;
1089                } else if ((yp->drv_flags & IsGigabit)  &&  (frame_status & 0x0038)) {
1090                        /* There was a error. */
1091                        if (yellowfin_debug > 3)
1092                                printk(KERN_DEBUG "  %s() Rx error was %04x\n",
1093                                       __func__, frame_status);
1094                        dev->stats.rx_errors++;
1095                        if (frame_status & 0x0060) dev->stats.rx_length_errors++;
1096                        if (frame_status & 0x0008) dev->stats.rx_frame_errors++;
1097                        if (frame_status & 0x0010) dev->stats.rx_crc_errors++;
1098                        if (frame_status < 0) dev->stats.rx_dropped++;
1099                } else if ( !(yp->drv_flags & IsGigabit)  &&
1100                                   ((buf_addr[data_size-1] & 0x85) || buf_addr[data_size-2] & 0xC0)) {
1101                        u8 status1 = buf_addr[data_size-2];
1102                        u8 status2 = buf_addr[data_size-1];
1103                        dev->stats.rx_errors++;
1104                        if (status1 & 0xC0) dev->stats.rx_length_errors++;
1105                        if (status2 & 0x03) dev->stats.rx_frame_errors++;
1106                        if (status2 & 0x04) dev->stats.rx_crc_errors++;
1107                        if (status2 & 0x80) dev->stats.rx_dropped++;
1108#ifdef YF_PROTOTYPE             /* Support for prototype hardware errata. */
1109                } else if ((yp->flags & HasMACAddrBug)  &&
1110                        !ether_addr_equal(le32_to_cpu(yp->rx_ring_dma +
1111                                                      entry * yf_size),
1112                                          dev->dev_addr) &&
1113                        !ether_addr_equal(le32_to_cpu(yp->rx_ring_dma +
1114                                                      entry * yf_size),
1115                                          "\377\377\377\377\377\377")) {
1116                        if (bogus_rx++ == 0)
1117                                netdev_warn(dev, "Bad frame to %pM\n",
1118                                            buf_addr);
1119#endif
1120                } else {
1121                        struct sk_buff *skb;
1122                        int pkt_len = data_size -
1123                                (yp->chip_id ? 7 : 8 + buf_addr[data_size - 8]);
1124                        /* To verify: Yellowfin Length should omit the CRC! */
1125
1126#ifndef final_version
1127                        if (yellowfin_debug > 4)
1128                                printk(KERN_DEBUG "  %s() normal Rx pkt length %d of %d, bogus_cnt %d\n",
1129                                       __func__, pkt_len, data_size, boguscnt);
1130#endif
1131                        /* Check if the packet is long enough to just pass up the skbuff
1132                           without copying to a properly sized skbuff. */
1133                        if (pkt_len > rx_copybreak) {
1134                                skb_put(skb = rx_skb, pkt_len);
1135                                dma_unmap_single(&yp->pci_dev->dev,
1136                                                 le32_to_cpu(yp->rx_ring[entry].addr),
1137                                                 yp->rx_buf_sz,
1138                                                 DMA_FROM_DEVICE);
1139                                yp->rx_skbuff[entry] = NULL;
1140                        } else {
1141                                skb = netdev_alloc_skb(dev, pkt_len + 2);
1142                                if (skb == NULL)
1143                                        break;
1144                                skb_reserve(skb, 2);    /* 16 byte align the IP header */
1145                                skb_copy_to_linear_data(skb, rx_skb->data, pkt_len);
1146                                skb_put(skb, pkt_len);
1147                                dma_sync_single_for_device(&yp->pci_dev->dev,
1148                                                           le32_to_cpu(desc->addr),
1149                                                           yp->rx_buf_sz,
1150                                                           DMA_FROM_DEVICE);
1151                        }
1152                        skb->protocol = eth_type_trans(skb, dev);
1153                        netif_rx(skb);
1154                        dev->stats.rx_packets++;
1155                        dev->stats.rx_bytes += pkt_len;
1156                }
1157                entry = (++yp->cur_rx) % RX_RING_SIZE;
1158        }
1159
1160        /* Refill the Rx ring buffers. */
1161        for (; yp->cur_rx - yp->dirty_rx > 0; yp->dirty_rx++) {
1162                entry = yp->dirty_rx % RX_RING_SIZE;
1163                if (yp->rx_skbuff[entry] == NULL) {
1164                        struct sk_buff *skb = netdev_alloc_skb(dev, yp->rx_buf_sz + 2);
1165                        if (skb == NULL)
1166                                break;                          /* Better luck next round. */
1167                        yp->rx_skbuff[entry] = skb;
1168                        skb_reserve(skb, 2);    /* Align IP on 16 byte boundaries */
1169                        yp->rx_ring[entry].addr = cpu_to_le32(dma_map_single(&yp->pci_dev->dev,
1170                                                                             skb->data,
1171                                                                             yp->rx_buf_sz,
1172                                                                             DMA_FROM_DEVICE));
1173                }
1174                yp->rx_ring[entry].dbdma_cmd = cpu_to_le32(CMD_STOP);
1175                yp->rx_ring[entry].result_status = 0;   /* Clear complete bit. */
1176                if (entry != 0)
1177                        yp->rx_ring[entry - 1].dbdma_cmd =
1178                                cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz);
1179                else
1180                        yp->rx_ring[RX_RING_SIZE - 1].dbdma_cmd =
1181                                cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | BRANCH_ALWAYS
1182                                                        | yp->rx_buf_sz);
1183        }
1184
1185        return 0;
1186}
1187
1188static void yellowfin_error(struct net_device *dev, int intr_status)
1189{
1190        netdev_err(dev, "Something Wicked happened! %04x\n", intr_status);
1191        /* Hmmmmm, it's not clear what to do here. */
1192        if (intr_status & (IntrTxPCIErr | IntrTxPCIFault))
1193                dev->stats.tx_errors++;
1194        if (intr_status & (IntrRxPCIErr | IntrRxPCIFault))
1195                dev->stats.rx_errors++;
1196}
1197
1198static int yellowfin_close(struct net_device *dev)
1199{
1200        struct yellowfin_private *yp = netdev_priv(dev);
1201        void __iomem *ioaddr = yp->base;
1202        int i;
1203
1204        netif_stop_queue (dev);
1205
1206        if (yellowfin_debug > 1) {
1207                netdev_printk(KERN_DEBUG, dev, "Shutting down ethercard, status was Tx %04x Rx %04x Int %02x\n",
1208                              ioread16(ioaddr + TxStatus),
1209                              ioread16(ioaddr + RxStatus),
1210                              ioread16(ioaddr + IntrStatus));
1211                netdev_printk(KERN_DEBUG, dev, "Queue pointers were Tx %d / %d,  Rx %d / %d\n",
1212                              yp->cur_tx, yp->dirty_tx,
1213                              yp->cur_rx, yp->dirty_rx);
1214        }
1215
1216        /* Disable interrupts by clearing the interrupt mask. */
1217        iowrite16(0x0000, ioaddr + IntrEnb);
1218
1219        /* Stop the chip's Tx and Rx processes. */
1220        iowrite32(0x80000000, ioaddr + RxCtrl);
1221        iowrite32(0x80000000, ioaddr + TxCtrl);
1222
1223        del_timer(&yp->timer);
1224
1225#if defined(__i386__)
1226        if (yellowfin_debug > 2) {
1227                printk(KERN_DEBUG "  Tx ring at %08llx:\n",
1228                                (unsigned long long)yp->tx_ring_dma);
1229                for (i = 0; i < TX_RING_SIZE*2; i++)
1230                        printk(KERN_DEBUG " %c #%d desc. %08x %08x %08x %08x\n",
1231                                   ioread32(ioaddr + TxPtr) == (long)&yp->tx_ring[i] ? '>' : ' ',
1232                                   i, yp->tx_ring[i].dbdma_cmd, yp->tx_ring[i].addr,
1233                                   yp->tx_ring[i].branch_addr, yp->tx_ring[i].result_status);
1234                printk(KERN_DEBUG "  Tx status %p:\n", yp->tx_status);
1235                for (i = 0; i < TX_RING_SIZE; i++)
1236                        printk(KERN_DEBUG "   #%d status %04x %04x %04x %04x\n",
1237                                   i, yp->tx_status[i].tx_cnt, yp->tx_status[i].tx_errs,
1238                                   yp->tx_status[i].total_tx_cnt, yp->tx_status[i].paused);
1239
1240                printk(KERN_DEBUG "  Rx ring %08llx:\n",
1241                                (unsigned long long)yp->rx_ring_dma);
1242                for (i = 0; i < RX_RING_SIZE; i++) {
1243                        printk(KERN_DEBUG " %c #%d desc. %08x %08x %08x\n",
1244                                   ioread32(ioaddr + RxPtr) == (long)&yp->rx_ring[i] ? '>' : ' ',
1245                                   i, yp->rx_ring[i].dbdma_cmd, yp->rx_ring[i].addr,
1246                                   yp->rx_ring[i].result_status);
1247                        if (yellowfin_debug > 6) {
1248                                if (get_unaligned((u8*)yp->rx_ring[i].addr) != 0x69) {
1249                                        int j;
1250
1251                                        printk(KERN_DEBUG);
1252                                        for (j = 0; j < 0x50; j++)
1253                                                pr_cont(" %04x",
1254                                                        get_unaligned(((u16*)yp->rx_ring[i].addr) + j));
1255                                        pr_cont("\n");
1256                                }
1257                        }
1258                }
1259        }
1260#endif /* __i386__ debugging only */
1261
1262        free_irq(yp->pci_dev->irq, dev);
1263
1264        /* Free all the skbuffs in the Rx queue. */
1265        for (i = 0; i < RX_RING_SIZE; i++) {
1266                yp->rx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP);
1267                yp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1268                if (yp->rx_skbuff[i]) {
1269                        dev_kfree_skb(yp->rx_skbuff[i]);
1270                }
1271                yp->rx_skbuff[i] = NULL;
1272        }
1273        for (i = 0; i < TX_RING_SIZE; i++) {
1274                dev_kfree_skb(yp->tx_skbuff[i]);
1275                yp->tx_skbuff[i] = NULL;
1276        }
1277
1278#ifdef YF_PROTOTYPE                     /* Support for prototype hardware errata. */
1279        if (yellowfin_debug > 0) {
1280                netdev_printk(KERN_DEBUG, dev, "Received %d frames that we should not have\n",
1281                              bogus_rx);
1282        }
1283#endif
1284
1285        return 0;
1286}
1287
1288/* Set or clear the multicast filter for this adaptor. */
1289
1290static void set_rx_mode(struct net_device *dev)
1291{
1292        struct yellowfin_private *yp = netdev_priv(dev);
1293        void __iomem *ioaddr = yp->base;
1294        u16 cfg_value = ioread16(ioaddr + Cnfg);
1295
1296        /* Stop the Rx process to change any value. */
1297        iowrite16(cfg_value & ~0x1000, ioaddr + Cnfg);
1298        if (dev->flags & IFF_PROMISC) {                 /* Set promiscuous. */
1299                iowrite16(0x000F, ioaddr + AddrMode);
1300        } else if ((netdev_mc_count(dev) > 64) ||
1301                   (dev->flags & IFF_ALLMULTI)) {
1302                /* Too many to filter well, or accept all multicasts. */
1303                iowrite16(0x000B, ioaddr + AddrMode);
1304        } else if (!netdev_mc_empty(dev)) { /* Must use the multicast hash table. */
1305                struct netdev_hw_addr *ha;
1306                u16 hash_table[4];
1307                int i;
1308
1309                memset(hash_table, 0, sizeof(hash_table));
1310                netdev_for_each_mc_addr(ha, dev) {
1311                        unsigned int bit;
1312
1313                        /* Due to a bug in the early chip versions, multiple filter
1314                           slots must be set for each address. */
1315                        if (yp->drv_flags & HasMulticastBug) {
1316                                bit = (ether_crc_le(3, ha->addr) >> 3) & 0x3f;
1317                                hash_table[bit >> 4] |= (1 << bit);
1318                                bit = (ether_crc_le(4, ha->addr) >> 3) & 0x3f;
1319                                hash_table[bit >> 4] |= (1 << bit);
1320                                bit = (ether_crc_le(5, ha->addr) >> 3) & 0x3f;
1321                                hash_table[bit >> 4] |= (1 << bit);
1322                        }
1323                        bit = (ether_crc_le(6, ha->addr) >> 3) & 0x3f;
1324                        hash_table[bit >> 4] |= (1 << bit);
1325                }
1326                /* Copy the hash table to the chip. */
1327                for (i = 0; i < 4; i++)
1328                        iowrite16(hash_table[i], ioaddr + HashTbl + i*2);
1329                iowrite16(0x0003, ioaddr + AddrMode);
1330        } else {                                        /* Normal, unicast/broadcast-only mode. */
1331                iowrite16(0x0001, ioaddr + AddrMode);
1332        }
1333        /* Restart the Rx process. */
1334        iowrite16(cfg_value | 0x1000, ioaddr + Cnfg);
1335}
1336
1337static void yellowfin_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1338{
1339        struct yellowfin_private *np = netdev_priv(dev);
1340
1341        strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1342        strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1343        strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
1344}
1345
1346static const struct ethtool_ops ethtool_ops = {
1347        .get_drvinfo = yellowfin_get_drvinfo
1348};
1349
1350static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1351{
1352        struct yellowfin_private *np = netdev_priv(dev);
1353        void __iomem *ioaddr = np->base;
1354        struct mii_ioctl_data *data = if_mii(rq);
1355
1356        switch(cmd) {
1357        case SIOCGMIIPHY:               /* Get address of MII PHY in use. */
1358                data->phy_id = np->phys[0] & 0x1f;
1359                fallthrough;
1360
1361        case SIOCGMIIREG:               /* Read MII PHY register. */
1362                data->val_out = mdio_read(ioaddr, data->phy_id & 0x1f, data->reg_num & 0x1f);
1363                return 0;
1364
1365        case SIOCSMIIREG:               /* Write MII PHY register. */
1366                if (data->phy_id == np->phys[0]) {
1367                        u16 value = data->val_in;
1368                        switch (data->reg_num) {
1369                        case 0:
1370                                /* Check for autonegotiation on or reset. */
1371                                np->medialock = (value & 0x9000) ? 0 : 1;
1372                                if (np->medialock)
1373                                        np->full_duplex = (value & 0x0100) ? 1 : 0;
1374                                break;
1375                        case 4: np->advertising = value; break;
1376                        }
1377                        /* Perhaps check_duplex(dev), depending on chip semantics. */
1378                }
1379                mdio_write(ioaddr, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in);
1380                return 0;
1381        default:
1382                return -EOPNOTSUPP;
1383        }
1384}
1385
1386
1387static void yellowfin_remove_one(struct pci_dev *pdev)
1388{
1389        struct net_device *dev = pci_get_drvdata(pdev);
1390        struct yellowfin_private *np;
1391
1392        BUG_ON(!dev);
1393        np = netdev_priv(dev);
1394
1395        dma_free_coherent(&pdev->dev, STATUS_TOTAL_SIZE, np->tx_status,
1396                          np->tx_status_dma);
1397        dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, np->rx_ring,
1398                          np->rx_ring_dma);
1399        dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring,
1400                          np->tx_ring_dma);
1401        unregister_netdev (dev);
1402
1403        pci_iounmap(pdev, np->base);
1404
1405        pci_release_regions (pdev);
1406
1407        free_netdev (dev);
1408}
1409
1410
1411static struct pci_driver yellowfin_driver = {
1412        .name           = DRV_NAME,
1413        .id_table       = yellowfin_pci_tbl,
1414        .probe          = yellowfin_init_one,
1415        .remove         = yellowfin_remove_one,
1416};
1417
1418
1419static int __init yellowfin_init (void)
1420{
1421/* when a module, this is printed whether or not devices are found in probe */
1422#ifdef MODULE
1423        printk(version);
1424#endif
1425        return pci_register_driver(&yellowfin_driver);
1426}
1427
1428
1429static void __exit yellowfin_cleanup (void)
1430{
1431        pci_unregister_driver (&yellowfin_driver);
1432}
1433
1434
1435module_init(yellowfin_init);
1436module_exit(yellowfin_cleanup);
1437