linux/drivers/net/ethernet/packetengines/yellowfin.c
<<
>>
Prefs
   1/* yellowfin.c: A Packet Engines G-NIC ethernet driver for linux. */
   2/*
   3        Written 1997-2001 by Donald Becker.
   4
   5        This software may be used and distributed according to the terms of
   6        the GNU General Public License (GPL), incorporated herein by reference.
   7        Drivers based on or derived from this code fall under the GPL and must
   8        retain the authorship, copyright and license notice.  This file is not
   9        a complete program and may only be used when the entire operating
  10        system is licensed under the GPL.
  11
  12        This driver is for the Packet Engines G-NIC PCI Gigabit Ethernet adapter.
  13        It also supports the Symbios Logic version of the same chip core.
  14
  15        The author may be reached as becker@scyld.com, or C/O
  16        Scyld Computing Corporation
  17        410 Severn Ave., Suite 210
  18        Annapolis MD 21403
  19
  20        Support and updates available at
  21        http://www.scyld.com/network/yellowfin.html
  22        [link no longer provides useful info -jgarzik]
  23
  24*/
  25
  26#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  27
  28#define DRV_NAME        "yellowfin"
  29#define DRV_VERSION     "2.1"
  30#define DRV_RELDATE     "Sep 11, 2006"
  31
  32/* The user-configurable values.
  33   These may be modified when a driver module is loaded.*/
  34
  35static int debug = 1;                   /* 1 normal messages, 0 quiet .. 7 verbose. */
  36/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
  37static int max_interrupt_work = 20;
  38static int mtu;
  39#ifdef YF_PROTOTYPE                     /* Support for prototype hardware errata. */
  40/* System-wide count of bogus-rx frames. */
  41static int bogus_rx;
  42static int dma_ctrl = 0x004A0263;                       /* Constrained by errata */
  43static int fifo_cfg = 0x0020;                           /* Bypass external Tx FIFO. */
  44#elif defined(YF_NEW)                                   /* A future perfect board :->.  */
  45static int dma_ctrl = 0x00CAC277;                       /* Override when loading module! */
  46static int fifo_cfg = 0x0028;
  47#else
  48static const int dma_ctrl = 0x004A0263;                         /* Constrained by errata */
  49static const int fifo_cfg = 0x0020;                             /* Bypass external Tx FIFO. */
  50#endif
  51
  52/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
  53   Setting to > 1514 effectively disables this feature. */
  54static int rx_copybreak;
  55
  56/* Used to pass the media type, etc.
  57   No media types are currently defined.  These exist for driver
  58   interoperability.
  59*/
  60#define MAX_UNITS 8                             /* More are supported, limit only on options */
  61static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
  62static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
  63
  64/* Do ugly workaround for GX server chipset errata. */
  65static int gx_fix;
  66
  67/* Operational parameters that are set at compile time. */
  68
  69/* Keep the ring sizes a power of two for efficiency.
  70   Making the Tx ring too long decreases the effectiveness of channel
  71   bonding and packet priority.
  72   There are no ill effects from too-large receive rings. */
  73#define TX_RING_SIZE    16
  74#define TX_QUEUE_SIZE   12              /* Must be > 4 && <= TX_RING_SIZE */
  75#define RX_RING_SIZE    64
  76#define STATUS_TOTAL_SIZE       TX_RING_SIZE*sizeof(struct tx_status_words)
  77#define TX_TOTAL_SIZE           2*TX_RING_SIZE*sizeof(struct yellowfin_desc)
  78#define RX_TOTAL_SIZE           RX_RING_SIZE*sizeof(struct yellowfin_desc)
  79
  80/* Operational parameters that usually are not changed. */
  81/* Time in jiffies before concluding the transmitter is hung. */
  82#define TX_TIMEOUT  (2*HZ)
  83#define PKT_BUF_SZ              1536                    /* Size of each temporary Rx buffer.*/
  84
  85#define yellowfin_debug debug
  86
  87#include <linux/module.h>
  88#include <linux/kernel.h>
  89#include <linux/string.h>
  90#include <linux/timer.h>
  91#include <linux/errno.h>
  92#include <linux/ioport.h>
  93#include <linux/interrupt.h>
  94#include <linux/pci.h>
  95#include <linux/init.h>
  96#include <linux/mii.h>
  97#include <linux/netdevice.h>
  98#include <linux/etherdevice.h>
  99#include <linux/skbuff.h>
 100#include <linux/ethtool.h>
 101#include <linux/crc32.h>
 102#include <linux/bitops.h>
 103#include <asm/uaccess.h>
 104#include <asm/processor.h>              /* Processor type for cache alignment. */
 105#include <asm/unaligned.h>
 106#include <asm/io.h>
 107
 108/* These identify the driver base version and may not be removed. */
 109static const char version[] =
 110  KERN_INFO DRV_NAME ".c:v1.05  1/09/2001  Written by Donald Becker <becker@scyld.com>\n"
 111  "  (unofficial 2.4.x port, " DRV_VERSION ", " DRV_RELDATE ")\n";
 112
 113MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
 114MODULE_DESCRIPTION("Packet Engines Yellowfin G-NIC Gigabit Ethernet driver");
 115MODULE_LICENSE("GPL");
 116
 117module_param(max_interrupt_work, int, 0);
 118module_param(mtu, int, 0);
 119module_param(debug, int, 0);
 120module_param(rx_copybreak, int, 0);
 121module_param_array(options, int, NULL, 0);
 122module_param_array(full_duplex, int, NULL, 0);
 123module_param(gx_fix, int, 0);
 124MODULE_PARM_DESC(max_interrupt_work, "G-NIC maximum events handled per interrupt");
 125MODULE_PARM_DESC(mtu, "G-NIC MTU (all boards)");
 126MODULE_PARM_DESC(debug, "G-NIC debug level (0-7)");
 127MODULE_PARM_DESC(rx_copybreak, "G-NIC copy breakpoint for copy-only-tiny-frames");
 128MODULE_PARM_DESC(options, "G-NIC: Bits 0-3: media type, bit 17: full duplex");
 129MODULE_PARM_DESC(full_duplex, "G-NIC full duplex setting(s) (1)");
 130MODULE_PARM_DESC(gx_fix, "G-NIC: enable GX server chipset bug workaround (0-1)");
 131
 132/*
 133                                Theory of Operation
 134
 135I. Board Compatibility
 136
 137This device driver is designed for the Packet Engines "Yellowfin" Gigabit
 138Ethernet adapter.  The G-NIC 64-bit PCI card is supported, as well as the
 139Symbios 53C885E dual function chip.
 140
 141II. Board-specific settings
 142
 143PCI bus devices are configured by the system at boot time, so no jumpers
 144need to be set on the board.  The system BIOS preferably should assign the
 145PCI INTA signal to an otherwise unused system IRQ line.
 146Note: Kernel versions earlier than 1.3.73 do not support shared PCI
 147interrupt lines.
 148
 149III. Driver operation
 150
 151IIIa. Ring buffers
 152
 153The Yellowfin uses the Descriptor Based DMA Architecture specified by Apple.
 154This is a descriptor list scheme similar to that used by the EEPro100 and
 155Tulip.  This driver uses two statically allocated fixed-size descriptor lists
 156formed into rings by a branch from the final descriptor to the beginning of
 157the list.  The ring sizes are set at compile time by RX/TX_RING_SIZE.
 158
 159The driver allocates full frame size skbuffs for the Rx ring buffers at
 160open() time and passes the skb->data field to the Yellowfin as receive data
 161buffers.  When an incoming frame is less than RX_COPYBREAK bytes long,
 162a fresh skbuff is allocated and the frame is copied to the new skbuff.
 163When the incoming frame is larger, the skbuff is passed directly up the
 164protocol stack and replaced by a newly allocated skbuff.
 165
 166The RX_COPYBREAK value is chosen to trade-off the memory wasted by
 167using a full-sized skbuff for small frames vs. the copying costs of larger
 168frames.  For small frames the copying cost is negligible (esp. considering
 169that we are pre-loading the cache with immediately useful header
 170information).  For large frames the copying cost is non-trivial, and the
 171larger copy might flush the cache of useful data.
 172
 173IIIC. Synchronization
 174
 175The driver runs as two independent, single-threaded flows of control.  One
 176is the send-packet routine, which enforces single-threaded use by the
 177dev->tbusy flag.  The other thread is the interrupt handler, which is single
 178threaded by the hardware and other software.
 179
 180The send packet thread has partial control over the Tx ring and 'dev->tbusy'
 181flag.  It sets the tbusy flag whenever it's queuing a Tx packet. If the next
 182queue slot is empty, it clears the tbusy flag when finished otherwise it sets
 183the 'yp->tx_full' flag.
 184
 185The interrupt handler has exclusive control over the Rx ring and records stats
 186from the Tx ring.  After reaping the stats, it marks the Tx queue entry as
 187empty by incrementing the dirty_tx mark. Iff the 'yp->tx_full' flag is set, it
 188clears both the tx_full and tbusy flags.
 189
 190IV. Notes
 191
 192Thanks to Kim Stearns of Packet Engines for providing a pair of G-NIC boards.
 193Thanks to Bruce Faust of Digitalscape for providing both their SYM53C885 board
 194and an AlphaStation to verifty the Alpha port!
 195
 196IVb. References
 197
 198Yellowfin Engineering Design Specification, 4/23/97 Preliminary/Confidential
 199Symbios SYM53C885 PCI-SCSI/Fast Ethernet Multifunction Controller Preliminary
 200   Data Manual v3.0
 201http://cesdis.gsfc.nasa.gov/linux/misc/NWay.html
 202http://cesdis.gsfc.nasa.gov/linux/misc/100mbps.html
 203
 204IVc. Errata
 205
 206See Packet Engines confidential appendix (prototype chips only).
 207*/
 208
 209
 210
 211enum capability_flags {
 212        HasMII=1, FullTxStatus=2, IsGigabit=4, HasMulticastBug=8, FullRxStatus=16,
 213        HasMACAddrBug=32, /* Only on early revs.  */
 214        DontUseEeprom=64, /* Don't read the MAC from the EEPROm. */
 215};
 216
 217/* The PCI I/O space extent. */
 218enum {
 219        YELLOWFIN_SIZE  = 0x100,
 220};
 221
 222struct pci_id_info {
 223        const char *name;
 224        struct match_info {
 225                int     pci, pci_mask, subsystem, subsystem_mask;
 226                int revision, revision_mask;                            /* Only 8 bits. */
 227        } id;
 228        int drv_flags;                          /* Driver use, intended as capability flags. */
 229};
 230
 231static const struct pci_id_info pci_id_tbl[] = {
 232        {"Yellowfin G-NIC Gigabit Ethernet", { 0x07021000, 0xffffffff},
 233         FullTxStatus | IsGigabit | HasMulticastBug | HasMACAddrBug | DontUseEeprom},
 234        {"Symbios SYM83C885", { 0x07011000, 0xffffffff},
 235          HasMII | DontUseEeprom },
 236        { }
 237};
 238
 239static const struct pci_device_id yellowfin_pci_tbl[] = {
 240        { 0x1000, 0x0702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
 241        { 0x1000, 0x0701, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
 242        { }
 243};
 244MODULE_DEVICE_TABLE (pci, yellowfin_pci_tbl);
 245
 246
 247/* Offsets to the Yellowfin registers.  Various sizes and alignments. */
 248enum yellowfin_offsets {
 249        TxCtrl=0x00, TxStatus=0x04, TxPtr=0x0C,
 250        TxIntrSel=0x10, TxBranchSel=0x14, TxWaitSel=0x18,
 251        RxCtrl=0x40, RxStatus=0x44, RxPtr=0x4C,
 252        RxIntrSel=0x50, RxBranchSel=0x54, RxWaitSel=0x58,
 253        EventStatus=0x80, IntrEnb=0x82, IntrClear=0x84, IntrStatus=0x86,
 254        ChipRev=0x8C, DMACtrl=0x90, TxThreshold=0x94,
 255        Cnfg=0xA0, FrameGap0=0xA2, FrameGap1=0xA4,
 256        MII_Cmd=0xA6, MII_Addr=0xA8, MII_Wr_Data=0xAA, MII_Rd_Data=0xAC,
 257        MII_Status=0xAE,
 258        RxDepth=0xB8, FlowCtrl=0xBC,
 259        AddrMode=0xD0, StnAddr=0xD2, HashTbl=0xD8, FIFOcfg=0xF8,
 260        EEStatus=0xF0, EECtrl=0xF1, EEAddr=0xF2, EERead=0xF3, EEWrite=0xF4,
 261        EEFeature=0xF5,
 262};
 263
 264/* The Yellowfin Rx and Tx buffer descriptors.
 265   Elements are written as 32 bit for endian portability. */
 266struct yellowfin_desc {
 267        __le32 dbdma_cmd;
 268        __le32 addr;
 269        __le32 branch_addr;
 270        __le32 result_status;
 271};
 272
 273struct tx_status_words {
 274#ifdef __BIG_ENDIAN
 275        u16 tx_errs;
 276        u16 tx_cnt;
 277        u16 paused;
 278        u16 total_tx_cnt;
 279#else  /* Little endian chips. */
 280        u16 tx_cnt;
 281        u16 tx_errs;
 282        u16 total_tx_cnt;
 283        u16 paused;
 284#endif /* __BIG_ENDIAN */
 285};
 286
 287/* Bits in yellowfin_desc.cmd */
 288enum desc_cmd_bits {
 289        CMD_TX_PKT=0x10000000, CMD_RX_BUF=0x20000000, CMD_TXSTATUS=0x30000000,
 290        CMD_NOP=0x60000000, CMD_STOP=0x70000000,
 291        BRANCH_ALWAYS=0x0C0000, INTR_ALWAYS=0x300000, WAIT_ALWAYS=0x030000,
 292        BRANCH_IFTRUE=0x040000,
 293};
 294
 295/* Bits in yellowfin_desc.status */
 296enum desc_status_bits { RX_EOP=0x0040, };
 297
 298/* Bits in the interrupt status/mask registers. */
 299enum intr_status_bits {
 300        IntrRxDone=0x01, IntrRxInvalid=0x02, IntrRxPCIFault=0x04,IntrRxPCIErr=0x08,
 301        IntrTxDone=0x10, IntrTxInvalid=0x20, IntrTxPCIFault=0x40,IntrTxPCIErr=0x80,
 302        IntrEarlyRx=0x100, IntrWakeup=0x200, };
 303
 304#define PRIV_ALIGN      31      /* Required alignment mask */
 305#define MII_CNT         4
 306struct yellowfin_private {
 307        /* Descriptor rings first for alignment.
 308           Tx requires a second descriptor for status. */
 309        struct yellowfin_desc *rx_ring;
 310        struct yellowfin_desc *tx_ring;
 311        struct sk_buff* rx_skbuff[RX_RING_SIZE];
 312        struct sk_buff* tx_skbuff[TX_RING_SIZE];
 313        dma_addr_t rx_ring_dma;
 314        dma_addr_t tx_ring_dma;
 315
 316        struct tx_status_words *tx_status;
 317        dma_addr_t tx_status_dma;
 318
 319        struct timer_list timer;        /* Media selection timer. */
 320        /* Frequently used and paired value: keep adjacent for cache effect. */
 321        int chip_id, drv_flags;
 322        struct pci_dev *pci_dev;
 323        unsigned int cur_rx, dirty_rx;          /* Producer/consumer ring indices */
 324        unsigned int rx_buf_sz;                         /* Based on MTU+slack. */
 325        struct tx_status_words *tx_tail_desc;
 326        unsigned int cur_tx, dirty_tx;
 327        int tx_threshold;
 328        unsigned int tx_full:1;                         /* The Tx queue is full. */
 329        unsigned int full_duplex:1;                     /* Full-duplex operation requested. */
 330        unsigned int duplex_lock:1;
 331        unsigned int medialock:1;                       /* Do not sense media. */
 332        unsigned int default_port:4;            /* Last dev->if_port value. */
 333        /* MII transceiver section. */
 334        int mii_cnt;                                            /* MII device addresses. */
 335        u16 advertising;                                        /* NWay media advertisement */
 336        unsigned char phys[MII_CNT];            /* MII device addresses, only first one used */
 337        spinlock_t lock;
 338        void __iomem *base;
 339};
 340
 341static int read_eeprom(void __iomem *ioaddr, int location);
 342static int mdio_read(void __iomem *ioaddr, int phy_id, int location);
 343static void mdio_write(void __iomem *ioaddr, int phy_id, int location, int value);
 344static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 345static int yellowfin_open(struct net_device *dev);
 346static void yellowfin_timer(unsigned long data);
 347static void yellowfin_tx_timeout(struct net_device *dev);
 348static int yellowfin_init_ring(struct net_device *dev);
 349static netdev_tx_t yellowfin_start_xmit(struct sk_buff *skb,
 350                                        struct net_device *dev);
 351static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance);
 352static int yellowfin_rx(struct net_device *dev);
 353static void yellowfin_error(struct net_device *dev, int intr_status);
 354static int yellowfin_close(struct net_device *dev);
 355static void set_rx_mode(struct net_device *dev);
 356static const struct ethtool_ops ethtool_ops;
 357
 358static const struct net_device_ops netdev_ops = {
 359        .ndo_open               = yellowfin_open,
 360        .ndo_stop               = yellowfin_close,
 361        .ndo_start_xmit         = yellowfin_start_xmit,
 362        .ndo_set_rx_mode        = set_rx_mode,
 363        .ndo_change_mtu         = eth_change_mtu,
 364        .ndo_validate_addr      = eth_validate_addr,
 365        .ndo_set_mac_address    = eth_mac_addr,
 366        .ndo_do_ioctl           = netdev_ioctl,
 367        .ndo_tx_timeout         = yellowfin_tx_timeout,
 368};
 369
 370static int yellowfin_init_one(struct pci_dev *pdev,
 371                              const struct pci_device_id *ent)
 372{
 373        struct net_device *dev;
 374        struct yellowfin_private *np;
 375        int irq;
 376        int chip_idx = ent->driver_data;
 377        static int find_cnt;
 378        void __iomem *ioaddr;
 379        int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
 380        int drv_flags = pci_id_tbl[chip_idx].drv_flags;
 381        void *ring_space;
 382        dma_addr_t ring_dma;
 383#ifdef USE_IO_OPS
 384        int bar = 0;
 385#else
 386        int bar = 1;
 387#endif
 388
 389/* when built into the kernel, we only print version if device is found */
 390#ifndef MODULE
 391        static int printed_version;
 392        if (!printed_version++)
 393                printk(version);
 394#endif
 395
 396        i = pci_enable_device(pdev);
 397        if (i) return i;
 398
 399        dev = alloc_etherdev(sizeof(*np));
 400        if (!dev)
 401                return -ENOMEM;
 402
 403        SET_NETDEV_DEV(dev, &pdev->dev);
 404
 405        np = netdev_priv(dev);
 406
 407        if (pci_request_regions(pdev, DRV_NAME))
 408                goto err_out_free_netdev;
 409
 410        pci_set_master (pdev);
 411
 412        ioaddr = pci_iomap(pdev, bar, YELLOWFIN_SIZE);
 413        if (!ioaddr)
 414                goto err_out_free_res;
 415
 416        irq = pdev->irq;
 417
 418        if (drv_flags & DontUseEeprom)
 419                for (i = 0; i < 6; i++)
 420                        dev->dev_addr[i] = ioread8(ioaddr + StnAddr + i);
 421        else {
 422                int ee_offset = (read_eeprom(ioaddr, 6) == 0xff ? 0x100 : 0);
 423                for (i = 0; i < 6; i++)
 424                        dev->dev_addr[i] = read_eeprom(ioaddr, ee_offset + i);
 425        }
 426
 427        /* Reset the chip. */
 428        iowrite32(0x80000000, ioaddr + DMACtrl);
 429
 430        pci_set_drvdata(pdev, dev);
 431        spin_lock_init(&np->lock);
 432
 433        np->pci_dev = pdev;
 434        np->chip_id = chip_idx;
 435        np->drv_flags = drv_flags;
 436        np->base = ioaddr;
 437
 438        ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
 439        if (!ring_space)
 440                goto err_out_cleardev;
 441        np->tx_ring = ring_space;
 442        np->tx_ring_dma = ring_dma;
 443
 444        ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
 445        if (!ring_space)
 446                goto err_out_unmap_tx;
 447        np->rx_ring = ring_space;
 448        np->rx_ring_dma = ring_dma;
 449
 450        ring_space = pci_alloc_consistent(pdev, STATUS_TOTAL_SIZE, &ring_dma);
 451        if (!ring_space)
 452                goto err_out_unmap_rx;
 453        np->tx_status = ring_space;
 454        np->tx_status_dma = ring_dma;
 455
 456        if (dev->mem_start)
 457                option = dev->mem_start;
 458
 459        /* The lower four bits are the media type. */
 460        if (option > 0) {
 461                if (option & 0x200)
 462                        np->full_duplex = 1;
 463                np->default_port = option & 15;
 464                if (np->default_port)
 465                        np->medialock = 1;
 466        }
 467        if (find_cnt < MAX_UNITS  &&  full_duplex[find_cnt] > 0)
 468                np->full_duplex = 1;
 469
 470        if (np->full_duplex)
 471                np->duplex_lock = 1;
 472
 473        /* The Yellowfin-specific entries in the device structure. */
 474        dev->netdev_ops = &netdev_ops;
 475        dev->ethtool_ops = &ethtool_ops;
 476        dev->watchdog_timeo = TX_TIMEOUT;
 477
 478        if (mtu)
 479                dev->mtu = mtu;
 480
 481        i = register_netdev(dev);
 482        if (i)
 483                goto err_out_unmap_status;
 484
 485        netdev_info(dev, "%s type %8x at %p, %pM, IRQ %d\n",
 486                    pci_id_tbl[chip_idx].name,
 487                    ioread32(ioaddr + ChipRev), ioaddr,
 488                    dev->dev_addr, irq);
 489
 490        if (np->drv_flags & HasMII) {
 491                int phy, phy_idx = 0;
 492                for (phy = 0; phy < 32 && phy_idx < MII_CNT; phy++) {
 493                        int mii_status = mdio_read(ioaddr, phy, 1);
 494                        if (mii_status != 0xffff  &&  mii_status != 0x0000) {
 495                                np->phys[phy_idx++] = phy;
 496                                np->advertising = mdio_read(ioaddr, phy, 4);
 497                                netdev_info(dev, "MII PHY found at address %d, status 0x%04x advertising %04x\n",
 498                                            phy, mii_status, np->advertising);
 499                        }
 500                }
 501                np->mii_cnt = phy_idx;
 502        }
 503
 504        find_cnt++;
 505
 506        return 0;
 507
 508err_out_unmap_status:
 509        pci_free_consistent(pdev, STATUS_TOTAL_SIZE, np->tx_status,
 510                np->tx_status_dma);
 511err_out_unmap_rx:
 512        pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
 513err_out_unmap_tx:
 514        pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
 515err_out_cleardev:
 516        pci_iounmap(pdev, ioaddr);
 517err_out_free_res:
 518        pci_release_regions(pdev);
 519err_out_free_netdev:
 520        free_netdev (dev);
 521        return -ENODEV;
 522}
 523
 524static int read_eeprom(void __iomem *ioaddr, int location)
 525{
 526        int bogus_cnt = 10000;          /* Typical 33Mhz: 1050 ticks */
 527
 528        iowrite8(location, ioaddr + EEAddr);
 529        iowrite8(0x30 | ((location >> 8) & 7), ioaddr + EECtrl);
 530        while ((ioread8(ioaddr + EEStatus) & 0x80)  &&  --bogus_cnt > 0)
 531                ;
 532        return ioread8(ioaddr + EERead);
 533}
 534
 535/* MII Managemen Data I/O accesses.
 536   These routines assume the MDIO controller is idle, and do not exit until
 537   the command is finished. */
 538
 539static int mdio_read(void __iomem *ioaddr, int phy_id, int location)
 540{
 541        int i;
 542
 543        iowrite16((phy_id<<8) + location, ioaddr + MII_Addr);
 544        iowrite16(1, ioaddr + MII_Cmd);
 545        for (i = 10000; i >= 0; i--)
 546                if ((ioread16(ioaddr + MII_Status) & 1) == 0)
 547                        break;
 548        return ioread16(ioaddr + MII_Rd_Data);
 549}
 550
 551static void mdio_write(void __iomem *ioaddr, int phy_id, int location, int value)
 552{
 553        int i;
 554
 555        iowrite16((phy_id<<8) + location, ioaddr + MII_Addr);
 556        iowrite16(value, ioaddr + MII_Wr_Data);
 557
 558        /* Wait for the command to finish. */
 559        for (i = 10000; i >= 0; i--)
 560                if ((ioread16(ioaddr + MII_Status) & 1) == 0)
 561                        break;
 562}
 563
 564
 565static int yellowfin_open(struct net_device *dev)
 566{
 567        struct yellowfin_private *yp = netdev_priv(dev);
 568        const int irq = yp->pci_dev->irq;
 569        void __iomem *ioaddr = yp->base;
 570        int i, rc;
 571
 572        /* Reset the chip. */
 573        iowrite32(0x80000000, ioaddr + DMACtrl);
 574
 575        rc = request_irq(irq, yellowfin_interrupt, IRQF_SHARED, dev->name, dev);
 576        if (rc)
 577                return rc;
 578
 579        rc = yellowfin_init_ring(dev);
 580        if (rc < 0)
 581                goto err_free_irq;
 582
 583        iowrite32(yp->rx_ring_dma, ioaddr + RxPtr);
 584        iowrite32(yp->tx_ring_dma, ioaddr + TxPtr);
 585
 586        for (i = 0; i < 6; i++)
 587                iowrite8(dev->dev_addr[i], ioaddr + StnAddr + i);
 588
 589        /* Set up various condition 'select' registers.
 590           There are no options here. */
 591        iowrite32(0x00800080, ioaddr + TxIntrSel);      /* Interrupt on Tx abort */
 592        iowrite32(0x00800080, ioaddr + TxBranchSel);    /* Branch on Tx abort */
 593        iowrite32(0x00400040, ioaddr + TxWaitSel);      /* Wait on Tx status */
 594        iowrite32(0x00400040, ioaddr + RxIntrSel);      /* Interrupt on Rx done */
 595        iowrite32(0x00400040, ioaddr + RxBranchSel);    /* Branch on Rx error */
 596        iowrite32(0x00400040, ioaddr + RxWaitSel);      /* Wait on Rx done */
 597
 598        /* Initialize other registers: with so many this eventually this will
 599           converted to an offset/value list. */
 600        iowrite32(dma_ctrl, ioaddr + DMACtrl);
 601        iowrite16(fifo_cfg, ioaddr + FIFOcfg);
 602        /* Enable automatic generation of flow control frames, period 0xffff. */
 603        iowrite32(0x0030FFFF, ioaddr + FlowCtrl);
 604
 605        yp->tx_threshold = 32;
 606        iowrite32(yp->tx_threshold, ioaddr + TxThreshold);
 607
 608        if (dev->if_port == 0)
 609                dev->if_port = yp->default_port;
 610
 611        netif_start_queue(dev);
 612
 613        /* Setting the Rx mode will start the Rx process. */
 614        if (yp->drv_flags & IsGigabit) {
 615                /* We are always in full-duplex mode with gigabit! */
 616                yp->full_duplex = 1;
 617                iowrite16(0x01CF, ioaddr + Cnfg);
 618        } else {
 619                iowrite16(0x0018, ioaddr + FrameGap0); /* 0060/4060 for non-MII 10baseT */
 620                iowrite16(0x1018, ioaddr + FrameGap1);
 621                iowrite16(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg);
 622        }
 623        set_rx_mode(dev);
 624
 625        /* Enable interrupts by setting the interrupt mask. */
 626        iowrite16(0x81ff, ioaddr + IntrEnb);                    /* See enum intr_status_bits */
 627        iowrite16(0x0000, ioaddr + EventStatus);                /* Clear non-interrupting events */
 628        iowrite32(0x80008000, ioaddr + RxCtrl);         /* Start Rx and Tx channels. */
 629        iowrite32(0x80008000, ioaddr + TxCtrl);
 630
 631        if (yellowfin_debug > 2) {
 632                netdev_printk(KERN_DEBUG, dev, "Done %s()\n", __func__);
 633        }
 634
 635        /* Set the timer to check for link beat. */
 636        init_timer(&yp->timer);
 637        yp->timer.expires = jiffies + 3*HZ;
 638        yp->timer.data = (unsigned long)dev;
 639        yp->timer.function = yellowfin_timer;                           /* timer handler */
 640        add_timer(&yp->timer);
 641out:
 642        return rc;
 643
 644err_free_irq:
 645        free_irq(irq, dev);
 646        goto out;
 647}
 648
 649static void yellowfin_timer(unsigned long data)
 650{
 651        struct net_device *dev = (struct net_device *)data;
 652        struct yellowfin_private *yp = netdev_priv(dev);
 653        void __iomem *ioaddr = yp->base;
 654        int next_tick = 60*HZ;
 655
 656        if (yellowfin_debug > 3) {
 657                netdev_printk(KERN_DEBUG, dev, "Yellowfin timer tick, status %08x\n",
 658                              ioread16(ioaddr + IntrStatus));
 659        }
 660
 661        if (yp->mii_cnt) {
 662                int bmsr = mdio_read(ioaddr, yp->phys[0], MII_BMSR);
 663                int lpa = mdio_read(ioaddr, yp->phys[0], MII_LPA);
 664                int negotiated = lpa & yp->advertising;
 665                if (yellowfin_debug > 1)
 666                        netdev_printk(KERN_DEBUG, dev, "MII #%d status register is %04x, link partner capability %04x\n",
 667                                      yp->phys[0], bmsr, lpa);
 668
 669                yp->full_duplex = mii_duplex(yp->duplex_lock, negotiated);
 670
 671                iowrite16(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg);
 672
 673                if (bmsr & BMSR_LSTATUS)
 674                        next_tick = 60*HZ;
 675                else
 676                        next_tick = 3*HZ;
 677        }
 678
 679        yp->timer.expires = jiffies + next_tick;
 680        add_timer(&yp->timer);
 681}
 682
 683static void yellowfin_tx_timeout(struct net_device *dev)
 684{
 685        struct yellowfin_private *yp = netdev_priv(dev);
 686        void __iomem *ioaddr = yp->base;
 687
 688        netdev_warn(dev, "Yellowfin transmit timed out at %d/%d Tx status %04x, Rx status %04x, resetting...\n",
 689                    yp->cur_tx, yp->dirty_tx,
 690                    ioread32(ioaddr + TxStatus),
 691                    ioread32(ioaddr + RxStatus));
 692
 693        /* Note: these should be KERN_DEBUG. */
 694        if (yellowfin_debug) {
 695                int i;
 696                pr_warn("  Rx ring %p: ", yp->rx_ring);
 697                for (i = 0; i < RX_RING_SIZE; i++)
 698                        pr_cont(" %08x", yp->rx_ring[i].result_status);
 699                pr_cont("\n");
 700                pr_warn("  Tx ring %p: ", yp->tx_ring);
 701                for (i = 0; i < TX_RING_SIZE; i++)
 702                        pr_cont(" %04x /%08x",
 703                               yp->tx_status[i].tx_errs,
 704                               yp->tx_ring[i].result_status);
 705                pr_cont("\n");
 706        }
 707
 708        /* If the hardware is found to hang regularly, we will update the code
 709           to reinitialize the chip here. */
 710        dev->if_port = 0;
 711
 712        /* Wake the potentially-idle transmit channel. */
 713        iowrite32(0x10001000, yp->base + TxCtrl);
 714        if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE)
 715                netif_wake_queue (dev);         /* Typical path */
 716
 717        dev->trans_start = jiffies; /* prevent tx timeout */
 718        dev->stats.tx_errors++;
 719}
 720
 721/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
 722static int yellowfin_init_ring(struct net_device *dev)
 723{
 724        struct yellowfin_private *yp = netdev_priv(dev);
 725        int i, j;
 726
 727        yp->tx_full = 0;
 728        yp->cur_rx = yp->cur_tx = 0;
 729        yp->dirty_tx = 0;
 730
 731        yp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
 732
 733        for (i = 0; i < RX_RING_SIZE; i++) {
 734                yp->rx_ring[i].dbdma_cmd =
 735                        cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz);
 736                yp->rx_ring[i].branch_addr = cpu_to_le32(yp->rx_ring_dma +
 737                        ((i+1)%RX_RING_SIZE)*sizeof(struct yellowfin_desc));
 738        }
 739
 740        for (i = 0; i < RX_RING_SIZE; i++) {
 741                struct sk_buff *skb = netdev_alloc_skb(dev, yp->rx_buf_sz + 2);
 742                yp->rx_skbuff[i] = skb;
 743                if (skb == NULL)
 744                        break;
 745                skb_reserve(skb, 2);    /* 16 byte align the IP header. */
 746                yp->rx_ring[i].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
 747                        skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE));
 748        }
 749        if (i != RX_RING_SIZE) {
 750                for (j = 0; j < i; j++)
 751                        dev_kfree_skb(yp->rx_skbuff[j]);
 752                return -ENOMEM;
 753        }
 754        yp->rx_ring[i-1].dbdma_cmd = cpu_to_le32(CMD_STOP);
 755        yp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
 756
 757#define NO_TXSTATS
 758#ifdef NO_TXSTATS
 759        /* In this mode the Tx ring needs only a single descriptor. */
 760        for (i = 0; i < TX_RING_SIZE; i++) {
 761                yp->tx_skbuff[i] = NULL;
 762                yp->tx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP);
 763                yp->tx_ring[i].branch_addr = cpu_to_le32(yp->tx_ring_dma +
 764                        ((i+1)%TX_RING_SIZE)*sizeof(struct yellowfin_desc));
 765        }
 766        /* Wrap ring */
 767        yp->tx_ring[--i].dbdma_cmd = cpu_to_le32(CMD_STOP | BRANCH_ALWAYS);
 768#else
 769{
 770        /* Tx ring needs a pair of descriptors, the second for the status. */
 771        for (i = 0; i < TX_RING_SIZE; i++) {
 772                j = 2*i;
 773                yp->tx_skbuff[i] = 0;
 774                /* Branch on Tx error. */
 775                yp->tx_ring[j].dbdma_cmd = cpu_to_le32(CMD_STOP);
 776                yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma +
 777                        (j+1)*sizeof(struct yellowfin_desc));
 778                j++;
 779                if (yp->flags & FullTxStatus) {
 780                        yp->tx_ring[j].dbdma_cmd =
 781                                cpu_to_le32(CMD_TXSTATUS | sizeof(*yp->tx_status));
 782                        yp->tx_ring[j].request_cnt = sizeof(*yp->tx_status);
 783                        yp->tx_ring[j].addr = cpu_to_le32(yp->tx_status_dma +
 784                                i*sizeof(struct tx_status_words));
 785                } else {
 786                        /* Symbios chips write only tx_errs word. */
 787                        yp->tx_ring[j].dbdma_cmd =
 788                                cpu_to_le32(CMD_TXSTATUS | INTR_ALWAYS | 2);
 789                        yp->tx_ring[j].request_cnt = 2;
 790                        /* Om pade ummmmm... */
 791                        yp->tx_ring[j].addr = cpu_to_le32(yp->tx_status_dma +
 792                                i*sizeof(struct tx_status_words) +
 793                                &(yp->tx_status[0].tx_errs) -
 794                                &(yp->tx_status[0]));
 795                }
 796                yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma +
 797                        ((j+1)%(2*TX_RING_SIZE))*sizeof(struct yellowfin_desc));
 798        }
 799        /* Wrap ring */
 800        yp->tx_ring[++j].dbdma_cmd |= cpu_to_le32(BRANCH_ALWAYS | INTR_ALWAYS);
 801}
 802#endif
 803        yp->tx_tail_desc = &yp->tx_status[0];
 804        return 0;
 805}
 806
 807static netdev_tx_t yellowfin_start_xmit(struct sk_buff *skb,
 808                                        struct net_device *dev)
 809{
 810        struct yellowfin_private *yp = netdev_priv(dev);
 811        unsigned entry;
 812        int len = skb->len;
 813
 814        netif_stop_queue (dev);
 815
 816        /* Note: Ordering is important here, set the field with the
 817           "ownership" bit last, and only then increment cur_tx. */
 818
 819        /* Calculate the next Tx descriptor entry. */
 820        entry = yp->cur_tx % TX_RING_SIZE;
 821
 822        if (gx_fix) {   /* Note: only works for paddable protocols e.g.  IP. */
 823                int cacheline_end = ((unsigned long)skb->data + skb->len) % 32;
 824                /* Fix GX chipset errata. */
 825                if (cacheline_end > 24  || cacheline_end == 0) {
 826                        len = skb->len + 32 - cacheline_end + 1;
 827                        if (skb_padto(skb, len)) {
 828                                yp->tx_skbuff[entry] = NULL;
 829                                netif_wake_queue(dev);
 830                                return NETDEV_TX_OK;
 831                        }
 832                }
 833        }
 834        yp->tx_skbuff[entry] = skb;
 835
 836#ifdef NO_TXSTATS
 837        yp->tx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
 838                skb->data, len, PCI_DMA_TODEVICE));
 839        yp->tx_ring[entry].result_status = 0;
 840        if (entry >= TX_RING_SIZE-1) {
 841                /* New stop command. */
 842                yp->tx_ring[0].dbdma_cmd = cpu_to_le32(CMD_STOP);
 843                yp->tx_ring[TX_RING_SIZE-1].dbdma_cmd =
 844                        cpu_to_le32(CMD_TX_PKT|BRANCH_ALWAYS | len);
 845        } else {
 846                yp->tx_ring[entry+1].dbdma_cmd = cpu_to_le32(CMD_STOP);
 847                yp->tx_ring[entry].dbdma_cmd =
 848                        cpu_to_le32(CMD_TX_PKT | BRANCH_IFTRUE | len);
 849        }
 850        yp->cur_tx++;
 851#else
 852        yp->tx_ring[entry<<1].request_cnt = len;
 853        yp->tx_ring[entry<<1].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
 854                skb->data, len, PCI_DMA_TODEVICE));
 855        /* The input_last (status-write) command is constant, but we must
 856           rewrite the subsequent 'stop' command. */
 857
 858        yp->cur_tx++;
 859        {
 860                unsigned next_entry = yp->cur_tx % TX_RING_SIZE;
 861                yp->tx_ring[next_entry<<1].dbdma_cmd = cpu_to_le32(CMD_STOP);
 862        }
 863        /* Final step -- overwrite the old 'stop' command. */
 864
 865        yp->tx_ring[entry<<1].dbdma_cmd =
 866                cpu_to_le32( ((entry % 6) == 0 ? CMD_TX_PKT|INTR_ALWAYS|BRANCH_IFTRUE :
 867                                          CMD_TX_PKT | BRANCH_IFTRUE) | len);
 868#endif
 869
 870        /* Non-x86 Todo: explicitly flush cache lines here. */
 871
 872        /* Wake the potentially-idle transmit channel. */
 873        iowrite32(0x10001000, yp->base + TxCtrl);
 874
 875        if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE)
 876                netif_start_queue (dev);                /* Typical path */
 877        else
 878                yp->tx_full = 1;
 879
 880        if (yellowfin_debug > 4) {
 881                netdev_printk(KERN_DEBUG, dev, "Yellowfin transmit frame #%d queued in slot %d\n",
 882                              yp->cur_tx, entry);
 883        }
 884        return NETDEV_TX_OK;
 885}
 886
 887/* The interrupt handler does all of the Rx thread work and cleans up
 888   after the Tx thread. */
 889static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance)
 890{
 891        struct net_device *dev = dev_instance;
 892        struct yellowfin_private *yp;
 893        void __iomem *ioaddr;
 894        int boguscnt = max_interrupt_work;
 895        unsigned int handled = 0;
 896
 897        yp = netdev_priv(dev);
 898        ioaddr = yp->base;
 899
 900        spin_lock (&yp->lock);
 901
 902        do {
 903                u16 intr_status = ioread16(ioaddr + IntrClear);
 904
 905                if (yellowfin_debug > 4)
 906                        netdev_printk(KERN_DEBUG, dev, "Yellowfin interrupt, status %04x\n",
 907                                      intr_status);
 908
 909                if (intr_status == 0)
 910                        break;
 911                handled = 1;
 912
 913                if (intr_status & (IntrRxDone | IntrEarlyRx)) {
 914                        yellowfin_rx(dev);
 915                        iowrite32(0x10001000, ioaddr + RxCtrl);         /* Wake Rx engine. */
 916                }
 917
 918#ifdef NO_TXSTATS
 919                for (; yp->cur_tx - yp->dirty_tx > 0; yp->dirty_tx++) {
 920                        int entry = yp->dirty_tx % TX_RING_SIZE;
 921                        struct sk_buff *skb;
 922
 923                        if (yp->tx_ring[entry].result_status == 0)
 924                                break;
 925                        skb = yp->tx_skbuff[entry];
 926                        dev->stats.tx_packets++;
 927                        dev->stats.tx_bytes += skb->len;
 928                        /* Free the original skb. */
 929                        pci_unmap_single(yp->pci_dev, le32_to_cpu(yp->tx_ring[entry].addr),
 930                                skb->len, PCI_DMA_TODEVICE);
 931                        dev_kfree_skb_irq(skb);
 932                        yp->tx_skbuff[entry] = NULL;
 933                }
 934                if (yp->tx_full &&
 935                    yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE - 4) {
 936                        /* The ring is no longer full, clear tbusy. */
 937                        yp->tx_full = 0;
 938                        netif_wake_queue(dev);
 939                }
 940#else
 941                if ((intr_status & IntrTxDone) || (yp->tx_tail_desc->tx_errs)) {
 942                        unsigned dirty_tx = yp->dirty_tx;
 943
 944                        for (dirty_tx = yp->dirty_tx; yp->cur_tx - dirty_tx > 0;
 945                                 dirty_tx++) {
 946                                /* Todo: optimize this. */
 947                                int entry = dirty_tx % TX_RING_SIZE;
 948                                u16 tx_errs = yp->tx_status[entry].tx_errs;
 949                                struct sk_buff *skb;
 950
 951#ifndef final_version
 952                                if (yellowfin_debug > 5)
 953                                        netdev_printk(KERN_DEBUG, dev, "Tx queue %d check, Tx status %04x %04x %04x %04x\n",
 954                                                      entry,
 955                                                      yp->tx_status[entry].tx_cnt,
 956                                                      yp->tx_status[entry].tx_errs,
 957                                                      yp->tx_status[entry].total_tx_cnt,
 958                                                      yp->tx_status[entry].paused);
 959#endif
 960                                if (tx_errs == 0)
 961                                        break;  /* It still hasn't been Txed */
 962                                skb = yp->tx_skbuff[entry];
 963                                if (tx_errs & 0xF810) {
 964                                        /* There was an major error, log it. */
 965#ifndef final_version
 966                                        if (yellowfin_debug > 1)
 967                                                netdev_printk(KERN_DEBUG, dev, "Transmit error, Tx status %04x\n",
 968                                                              tx_errs);
 969#endif
 970                                        dev->stats.tx_errors++;
 971                                        if (tx_errs & 0xF800) dev->stats.tx_aborted_errors++;
 972                                        if (tx_errs & 0x0800) dev->stats.tx_carrier_errors++;
 973                                        if (tx_errs & 0x2000) dev->stats.tx_window_errors++;
 974                                        if (tx_errs & 0x8000) dev->stats.tx_fifo_errors++;
 975                                } else {
 976#ifndef final_version
 977                                        if (yellowfin_debug > 4)
 978                                                netdev_printk(KERN_DEBUG, dev, "Normal transmit, Tx status %04x\n",
 979                                                              tx_errs);
 980#endif
 981                                        dev->stats.tx_bytes += skb->len;
 982                                        dev->stats.collisions += tx_errs & 15;
 983                                        dev->stats.tx_packets++;
 984                                }
 985                                /* Free the original skb. */
 986                                pci_unmap_single(yp->pci_dev,
 987                                        yp->tx_ring[entry<<1].addr, skb->len,
 988                                        PCI_DMA_TODEVICE);
 989                                dev_kfree_skb_irq(skb);
 990                                yp->tx_skbuff[entry] = 0;
 991                                /* Mark status as empty. */
 992                                yp->tx_status[entry].tx_errs = 0;
 993                        }
 994
 995#ifndef final_version
 996                        if (yp->cur_tx - dirty_tx > TX_RING_SIZE) {
 997                                netdev_err(dev, "Out-of-sync dirty pointer, %d vs. %d, full=%d\n",
 998                                           dirty_tx, yp->cur_tx, yp->tx_full);
 999                                dirty_tx += TX_RING_SIZE;
1000                        }
1001#endif
1002
1003                        if (yp->tx_full &&
1004                            yp->cur_tx - dirty_tx < TX_QUEUE_SIZE - 2) {
1005                                /* The ring is no longer full, clear tbusy. */
1006                                yp->tx_full = 0;
1007                                netif_wake_queue(dev);
1008                        }
1009
1010                        yp->dirty_tx = dirty_tx;
1011                        yp->tx_tail_desc = &yp->tx_status[dirty_tx % TX_RING_SIZE];
1012                }
1013#endif
1014
1015                /* Log errors and other uncommon events. */
1016                if (intr_status & 0x2ee)        /* Abnormal error summary. */
1017                        yellowfin_error(dev, intr_status);
1018
1019                if (--boguscnt < 0) {
1020                        netdev_warn(dev, "Too much work at interrupt, status=%#04x\n",
1021                                    intr_status);
1022                        break;
1023                }
1024        } while (1);
1025
1026        if (yellowfin_debug > 3)
1027                netdev_printk(KERN_DEBUG, dev, "exiting interrupt, status=%#04x\n",
1028                              ioread16(ioaddr + IntrStatus));
1029
1030        spin_unlock (&yp->lock);
1031        return IRQ_RETVAL(handled);
1032}
1033
1034/* This routine is logically part of the interrupt handler, but separated
1035   for clarity and better register allocation. */
1036static int yellowfin_rx(struct net_device *dev)
1037{
1038        struct yellowfin_private *yp = netdev_priv(dev);
1039        int entry = yp->cur_rx % RX_RING_SIZE;
1040        int boguscnt = yp->dirty_rx + RX_RING_SIZE - yp->cur_rx;
1041
1042        if (yellowfin_debug > 4) {
1043                printk(KERN_DEBUG " In yellowfin_rx(), entry %d status %08x\n",
1044                           entry, yp->rx_ring[entry].result_status);
1045                printk(KERN_DEBUG "   #%d desc. %08x %08x %08x\n",
1046                           entry, yp->rx_ring[entry].dbdma_cmd, yp->rx_ring[entry].addr,
1047                           yp->rx_ring[entry].result_status);
1048        }
1049
1050        /* If EOP is set on the next entry, it's a new packet. Send it up. */
1051        while (1) {
1052                struct yellowfin_desc *desc = &yp->rx_ring[entry];
1053                struct sk_buff *rx_skb = yp->rx_skbuff[entry];
1054                s16 frame_status;
1055                u16 desc_status;
1056                int data_size, yf_size;
1057                u8 *buf_addr;
1058
1059                if(!desc->result_status)
1060                        break;
1061                pci_dma_sync_single_for_cpu(yp->pci_dev, le32_to_cpu(desc->addr),
1062                        yp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1063                desc_status = le32_to_cpu(desc->result_status) >> 16;
1064                buf_addr = rx_skb->data;
1065                data_size = (le32_to_cpu(desc->dbdma_cmd) -
1066                        le32_to_cpu(desc->result_status)) & 0xffff;
1067                frame_status = get_unaligned_le16(&(buf_addr[data_size - 2]));
1068                if (yellowfin_debug > 4)
1069                        printk(KERN_DEBUG "  %s() status was %04x\n",
1070                               __func__, frame_status);
1071                if (--boguscnt < 0)
1072                        break;
1073
1074                yf_size = sizeof(struct yellowfin_desc);
1075
1076                if ( ! (desc_status & RX_EOP)) {
1077                        if (data_size != 0)
1078                                netdev_warn(dev, "Oversized Ethernet frame spanned multiple buffers, status %04x, data_size %d!\n",
1079                                            desc_status, data_size);
1080                        dev->stats.rx_length_errors++;
1081                } else if ((yp->drv_flags & IsGigabit)  &&  (frame_status & 0x0038)) {
1082                        /* There was a error. */
1083                        if (yellowfin_debug > 3)
1084                                printk(KERN_DEBUG "  %s() Rx error was %04x\n",
1085                                       __func__, frame_status);
1086                        dev->stats.rx_errors++;
1087                        if (frame_status & 0x0060) dev->stats.rx_length_errors++;
1088                        if (frame_status & 0x0008) dev->stats.rx_frame_errors++;
1089                        if (frame_status & 0x0010) dev->stats.rx_crc_errors++;
1090                        if (frame_status < 0) dev->stats.rx_dropped++;
1091                } else if ( !(yp->drv_flags & IsGigabit)  &&
1092                                   ((buf_addr[data_size-1] & 0x85) || buf_addr[data_size-2] & 0xC0)) {
1093                        u8 status1 = buf_addr[data_size-2];
1094                        u8 status2 = buf_addr[data_size-1];
1095                        dev->stats.rx_errors++;
1096                        if (status1 & 0xC0) dev->stats.rx_length_errors++;
1097                        if (status2 & 0x03) dev->stats.rx_frame_errors++;
1098                        if (status2 & 0x04) dev->stats.rx_crc_errors++;
1099                        if (status2 & 0x80) dev->stats.rx_dropped++;
1100#ifdef YF_PROTOTYPE             /* Support for prototype hardware errata. */
1101                } else if ((yp->flags & HasMACAddrBug)  &&
1102                        !ether_addr_equal(le32_to_cpu(yp->rx_ring_dma +
1103                                                      entry * yf_size),
1104                                          dev->dev_addr) &&
1105                        !ether_addr_equal(le32_to_cpu(yp->rx_ring_dma +
1106                                                      entry * yf_size),
1107                                          "\377\377\377\377\377\377")) {
1108                        if (bogus_rx++ == 0)
1109                                netdev_warn(dev, "Bad frame to %pM\n",
1110                                            buf_addr);
1111#endif
1112                } else {
1113                        struct sk_buff *skb;
1114                        int pkt_len = data_size -
1115                                (yp->chip_id ? 7 : 8 + buf_addr[data_size - 8]);
1116                        /* To verify: Yellowfin Length should omit the CRC! */
1117
1118#ifndef final_version
1119                        if (yellowfin_debug > 4)
1120                                printk(KERN_DEBUG "  %s() normal Rx pkt length %d of %d, bogus_cnt %d\n",
1121                                       __func__, pkt_len, data_size, boguscnt);
1122#endif
1123                        /* Check if the packet is long enough to just pass up the skbuff
1124                           without copying to a properly sized skbuff. */
1125                        if (pkt_len > rx_copybreak) {
1126                                skb_put(skb = rx_skb, pkt_len);
1127                                pci_unmap_single(yp->pci_dev,
1128                                        le32_to_cpu(yp->rx_ring[entry].addr),
1129                                        yp->rx_buf_sz,
1130                                        PCI_DMA_FROMDEVICE);
1131                                yp->rx_skbuff[entry] = NULL;
1132                        } else {
1133                                skb = netdev_alloc_skb(dev, pkt_len + 2);
1134                                if (skb == NULL)
1135                                        break;
1136                                skb_reserve(skb, 2);    /* 16 byte align the IP header */
1137                                skb_copy_to_linear_data(skb, rx_skb->data, pkt_len);
1138                                skb_put(skb, pkt_len);
1139                                pci_dma_sync_single_for_device(yp->pci_dev,
1140                                                                le32_to_cpu(desc->addr),
1141                                                                yp->rx_buf_sz,
1142                                                                PCI_DMA_FROMDEVICE);
1143                        }
1144                        skb->protocol = eth_type_trans(skb, dev);
1145                        netif_rx(skb);
1146                        dev->stats.rx_packets++;
1147                        dev->stats.rx_bytes += pkt_len;
1148                }
1149                entry = (++yp->cur_rx) % RX_RING_SIZE;
1150        }
1151
1152        /* Refill the Rx ring buffers. */
1153        for (; yp->cur_rx - yp->dirty_rx > 0; yp->dirty_rx++) {
1154                entry = yp->dirty_rx % RX_RING_SIZE;
1155                if (yp->rx_skbuff[entry] == NULL) {
1156                        struct sk_buff *skb = netdev_alloc_skb(dev, yp->rx_buf_sz + 2);
1157                        if (skb == NULL)
1158                                break;                          /* Better luck next round. */
1159                        yp->rx_skbuff[entry] = skb;
1160                        skb_reserve(skb, 2);    /* Align IP on 16 byte boundaries */
1161                        yp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
1162                                skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE));
1163                }
1164                yp->rx_ring[entry].dbdma_cmd = cpu_to_le32(CMD_STOP);
1165                yp->rx_ring[entry].result_status = 0;   /* Clear complete bit. */
1166                if (entry != 0)
1167                        yp->rx_ring[entry - 1].dbdma_cmd =
1168                                cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz);
1169                else
1170                        yp->rx_ring[RX_RING_SIZE - 1].dbdma_cmd =
1171                                cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | BRANCH_ALWAYS
1172                                                        | yp->rx_buf_sz);
1173        }
1174
1175        return 0;
1176}
1177
1178static void yellowfin_error(struct net_device *dev, int intr_status)
1179{
1180        netdev_err(dev, "Something Wicked happened! %04x\n", intr_status);
1181        /* Hmmmmm, it's not clear what to do here. */
1182        if (intr_status & (IntrTxPCIErr | IntrTxPCIFault))
1183                dev->stats.tx_errors++;
1184        if (intr_status & (IntrRxPCIErr | IntrRxPCIFault))
1185                dev->stats.rx_errors++;
1186}
1187
1188static int yellowfin_close(struct net_device *dev)
1189{
1190        struct yellowfin_private *yp = netdev_priv(dev);
1191        void __iomem *ioaddr = yp->base;
1192        int i;
1193
1194        netif_stop_queue (dev);
1195
1196        if (yellowfin_debug > 1) {
1197                netdev_printk(KERN_DEBUG, dev, "Shutting down ethercard, status was Tx %04x Rx %04x Int %02x\n",
1198                              ioread16(ioaddr + TxStatus),
1199                              ioread16(ioaddr + RxStatus),
1200                              ioread16(ioaddr + IntrStatus));
1201                netdev_printk(KERN_DEBUG, dev, "Queue pointers were Tx %d / %d,  Rx %d / %d\n",
1202                              yp->cur_tx, yp->dirty_tx,
1203                              yp->cur_rx, yp->dirty_rx);
1204        }
1205
1206        /* Disable interrupts by clearing the interrupt mask. */
1207        iowrite16(0x0000, ioaddr + IntrEnb);
1208
1209        /* Stop the chip's Tx and Rx processes. */
1210        iowrite32(0x80000000, ioaddr + RxCtrl);
1211        iowrite32(0x80000000, ioaddr + TxCtrl);
1212
1213        del_timer(&yp->timer);
1214
1215#if defined(__i386__)
1216        if (yellowfin_debug > 2) {
1217                printk(KERN_DEBUG "  Tx ring at %08llx:\n",
1218                                (unsigned long long)yp->tx_ring_dma);
1219                for (i = 0; i < TX_RING_SIZE*2; i++)
1220                        printk(KERN_DEBUG " %c #%d desc. %08x %08x %08x %08x\n",
1221                                   ioread32(ioaddr + TxPtr) == (long)&yp->tx_ring[i] ? '>' : ' ',
1222                                   i, yp->tx_ring[i].dbdma_cmd, yp->tx_ring[i].addr,
1223                                   yp->tx_ring[i].branch_addr, yp->tx_ring[i].result_status);
1224                printk(KERN_DEBUG "  Tx status %p:\n", yp->tx_status);
1225                for (i = 0; i < TX_RING_SIZE; i++)
1226                        printk(KERN_DEBUG "   #%d status %04x %04x %04x %04x\n",
1227                                   i, yp->tx_status[i].tx_cnt, yp->tx_status[i].tx_errs,
1228                                   yp->tx_status[i].total_tx_cnt, yp->tx_status[i].paused);
1229
1230                printk(KERN_DEBUG "  Rx ring %08llx:\n",
1231                                (unsigned long long)yp->rx_ring_dma);
1232                for (i = 0; i < RX_RING_SIZE; i++) {
1233                        printk(KERN_DEBUG " %c #%d desc. %08x %08x %08x\n",
1234                                   ioread32(ioaddr + RxPtr) == (long)&yp->rx_ring[i] ? '>' : ' ',
1235                                   i, yp->rx_ring[i].dbdma_cmd, yp->rx_ring[i].addr,
1236                                   yp->rx_ring[i].result_status);
1237                        if (yellowfin_debug > 6) {
1238                                if (get_unaligned((u8*)yp->rx_ring[i].addr) != 0x69) {
1239                                        int j;
1240
1241                                        printk(KERN_DEBUG);
1242                                        for (j = 0; j < 0x50; j++)
1243                                                pr_cont(" %04x",
1244                                                        get_unaligned(((u16*)yp->rx_ring[i].addr) + j));
1245                                        pr_cont("\n");
1246                                }
1247                        }
1248                }
1249        }
1250#endif /* __i386__ debugging only */
1251
1252        free_irq(yp->pci_dev->irq, dev);
1253
1254        /* Free all the skbuffs in the Rx queue. */
1255        for (i = 0; i < RX_RING_SIZE; i++) {
1256                yp->rx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP);
1257                yp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1258                if (yp->rx_skbuff[i]) {
1259                        dev_kfree_skb(yp->rx_skbuff[i]);
1260                }
1261                yp->rx_skbuff[i] = NULL;
1262        }
1263        for (i = 0; i < TX_RING_SIZE; i++) {
1264                if (yp->tx_skbuff[i])
1265                        dev_kfree_skb(yp->tx_skbuff[i]);
1266                yp->tx_skbuff[i] = NULL;
1267        }
1268
1269#ifdef YF_PROTOTYPE                     /* Support for prototype hardware errata. */
1270        if (yellowfin_debug > 0) {
1271                netdev_printk(KERN_DEBUG, dev, "Received %d frames that we should not have\n",
1272                              bogus_rx);
1273        }
1274#endif
1275
1276        return 0;
1277}
1278
1279/* Set or clear the multicast filter for this adaptor. */
1280
1281static void set_rx_mode(struct net_device *dev)
1282{
1283        struct yellowfin_private *yp = netdev_priv(dev);
1284        void __iomem *ioaddr = yp->base;
1285        u16 cfg_value = ioread16(ioaddr + Cnfg);
1286
1287        /* Stop the Rx process to change any value. */
1288        iowrite16(cfg_value & ~0x1000, ioaddr + Cnfg);
1289        if (dev->flags & IFF_PROMISC) {                 /* Set promiscuous. */
1290                iowrite16(0x000F, ioaddr + AddrMode);
1291        } else if ((netdev_mc_count(dev) > 64) ||
1292                   (dev->flags & IFF_ALLMULTI)) {
1293                /* Too many to filter well, or accept all multicasts. */
1294                iowrite16(0x000B, ioaddr + AddrMode);
1295        } else if (!netdev_mc_empty(dev)) { /* Must use the multicast hash table. */
1296                struct netdev_hw_addr *ha;
1297                u16 hash_table[4];
1298                int i;
1299
1300                memset(hash_table, 0, sizeof(hash_table));
1301                netdev_for_each_mc_addr(ha, dev) {
1302                        unsigned int bit;
1303
1304                        /* Due to a bug in the early chip versions, multiple filter
1305                           slots must be set for each address. */
1306                        if (yp->drv_flags & HasMulticastBug) {
1307                                bit = (ether_crc_le(3, ha->addr) >> 3) & 0x3f;
1308                                hash_table[bit >> 4] |= (1 << bit);
1309                                bit = (ether_crc_le(4, ha->addr) >> 3) & 0x3f;
1310                                hash_table[bit >> 4] |= (1 << bit);
1311                                bit = (ether_crc_le(5, ha->addr) >> 3) & 0x3f;
1312                                hash_table[bit >> 4] |= (1 << bit);
1313                        }
1314                        bit = (ether_crc_le(6, ha->addr) >> 3) & 0x3f;
1315                        hash_table[bit >> 4] |= (1 << bit);
1316                }
1317                /* Copy the hash table to the chip. */
1318                for (i = 0; i < 4; i++)
1319                        iowrite16(hash_table[i], ioaddr + HashTbl + i*2);
1320                iowrite16(0x0003, ioaddr + AddrMode);
1321        } else {                                        /* Normal, unicast/broadcast-only mode. */
1322                iowrite16(0x0001, ioaddr + AddrMode);
1323        }
1324        /* Restart the Rx process. */
1325        iowrite16(cfg_value | 0x1000, ioaddr + Cnfg);
1326}
1327
1328static void yellowfin_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1329{
1330        struct yellowfin_private *np = netdev_priv(dev);
1331
1332        strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1333        strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1334        strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
1335}
1336
1337static const struct ethtool_ops ethtool_ops = {
1338        .get_drvinfo = yellowfin_get_drvinfo
1339};
1340
1341static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1342{
1343        struct yellowfin_private *np = netdev_priv(dev);
1344        void __iomem *ioaddr = np->base;
1345        struct mii_ioctl_data *data = if_mii(rq);
1346
1347        switch(cmd) {
1348        case SIOCGMIIPHY:               /* Get address of MII PHY in use. */
1349                data->phy_id = np->phys[0] & 0x1f;
1350                /* Fall Through */
1351
1352        case SIOCGMIIREG:               /* Read MII PHY register. */
1353                data->val_out = mdio_read(ioaddr, data->phy_id & 0x1f, data->reg_num & 0x1f);
1354                return 0;
1355
1356        case SIOCSMIIREG:               /* Write MII PHY register. */
1357                if (data->phy_id == np->phys[0]) {
1358                        u16 value = data->val_in;
1359                        switch (data->reg_num) {
1360                        case 0:
1361                                /* Check for autonegotiation on or reset. */
1362                                np->medialock = (value & 0x9000) ? 0 : 1;
1363                                if (np->medialock)
1364                                        np->full_duplex = (value & 0x0100) ? 1 : 0;
1365                                break;
1366                        case 4: np->advertising = value; break;
1367                        }
1368                        /* Perhaps check_duplex(dev), depending on chip semantics. */
1369                }
1370                mdio_write(ioaddr, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in);
1371                return 0;
1372        default:
1373                return -EOPNOTSUPP;
1374        }
1375}
1376
1377
1378static void yellowfin_remove_one(struct pci_dev *pdev)
1379{
1380        struct net_device *dev = pci_get_drvdata(pdev);
1381        struct yellowfin_private *np;
1382
1383        BUG_ON(!dev);
1384        np = netdev_priv(dev);
1385
1386        pci_free_consistent(pdev, STATUS_TOTAL_SIZE, np->tx_status,
1387                np->tx_status_dma);
1388        pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
1389        pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
1390        unregister_netdev (dev);
1391
1392        pci_iounmap(pdev, np->base);
1393
1394        pci_release_regions (pdev);
1395
1396        free_netdev (dev);
1397}
1398
1399
1400static struct pci_driver yellowfin_driver = {
1401        .name           = DRV_NAME,
1402        .id_table       = yellowfin_pci_tbl,
1403        .probe          = yellowfin_init_one,
1404        .remove         = yellowfin_remove_one,
1405};
1406
1407
1408static int __init yellowfin_init (void)
1409{
1410/* when a module, this is printed whether or not devices are found in probe */
1411#ifdef MODULE
1412        printk(version);
1413#endif
1414        return pci_register_driver(&yellowfin_driver);
1415}
1416
1417
1418static void __exit yellowfin_cleanup (void)
1419{
1420        pci_unregister_driver (&yellowfin_driver);
1421}
1422
1423
1424module_init(yellowfin_init);
1425module_exit(yellowfin_cleanup);
1426