linux/drivers/net/epic100.c
<<
>>
Prefs
   1/* epic100.c: A SMC 83c170 EPIC/100 Fast Ethernet driver for Linux. */
   2/*
   3        Written/copyright 1997-2001 by Donald Becker.
   4
   5        This software may be used and distributed according to the terms of
   6        the GNU General Public License (GPL), incorporated herein by reference.
   7        Drivers based on or derived from this code fall under the GPL and must
   8        retain the authorship, copyright and license notice.  This file is not
   9        a complete program and may only be used when the entire operating
  10        system is licensed under the GPL.
  11
  12        This driver is for the SMC83c170/175 "EPIC" series, as used on the
  13        SMC EtherPower II 9432 PCI adapter, and several CardBus cards.
  14
  15        The author may be reached as becker@scyld.com, or C/O
  16        Scyld Computing Corporation
  17        410 Severn Ave., Suite 210
  18        Annapolis MD 21403
  19
  20        Information and updates available at
  21        http://www.scyld.com/network/epic100.html
  22        [this link no longer provides anything useful -jgarzik]
  23
  24        ---------------------------------------------------------------------
  25
  26*/
  27
  28#define DRV_NAME        "epic100"
  29#define DRV_VERSION     "2.1"
  30#define DRV_RELDATE     "Sept 11, 2006"
  31
  32/* The user-configurable values.
  33   These may be modified when a driver module is loaded.*/
  34
  35static int debug = 1;                   /* 1 normal messages, 0 quiet .. 7 verbose. */
  36
  37/* Used to pass the full-duplex flag, etc. */
  38#define MAX_UNITS 8             /* More are supported, limit only on options */
  39static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
  40static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
  41
  42/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
  43   Setting to > 1518 effectively disables this feature. */
  44static int rx_copybreak;
  45
  46/* Operational parameters that are set at compile time. */
  47
  48/* Keep the ring sizes a power of two for operational efficiency.
  49   The compiler will convert <unsigned>'%'<2^N> into a bit mask.
  50   Making the Tx ring too large decreases the effectiveness of channel
  51   bonding and packet priority.
  52   There are no ill effects from too-large receive rings. */
  53#define TX_RING_SIZE    256
  54#define TX_QUEUE_LEN    240             /* Limit ring entries actually used.  */
  55#define RX_RING_SIZE    256
  56#define TX_TOTAL_SIZE   TX_RING_SIZE*sizeof(struct epic_tx_desc)
  57#define RX_TOTAL_SIZE   RX_RING_SIZE*sizeof(struct epic_rx_desc)
  58
  59/* Operational parameters that usually are not changed. */
  60/* Time in jiffies before concluding the transmitter is hung. */
  61#define TX_TIMEOUT  (2*HZ)
  62
  63#define PKT_BUF_SZ              1536                    /* Size of each temporary Rx buffer.*/
  64
  65/* Bytes transferred to chip before transmission starts. */
  66/* Initial threshold, increased on underflow, rounded down to 4 byte units. */
  67#define TX_FIFO_THRESH 256
  68#define RX_FIFO_THRESH 1                /* 0-3, 0==32, 64,96, or 3==128 bytes  */
  69
  70#include <linux/module.h>
  71#include <linux/kernel.h>
  72#include <linux/string.h>
  73#include <linux/timer.h>
  74#include <linux/errno.h>
  75#include <linux/ioport.h>
  76#include <linux/interrupt.h>
  77#include <linux/pci.h>
  78#include <linux/delay.h>
  79#include <linux/netdevice.h>
  80#include <linux/etherdevice.h>
  81#include <linux/skbuff.h>
  82#include <linux/init.h>
  83#include <linux/spinlock.h>
  84#include <linux/ethtool.h>
  85#include <linux/mii.h>
  86#include <linux/crc32.h>
  87#include <linux/bitops.h>
  88#include <asm/io.h>
  89#include <asm/uaccess.h>
  90#include <asm/byteorder.h>
  91
  92/* These identify the driver base version and may not be removed. */
  93static char version[] __devinitdata =
  94DRV_NAME ".c:v1.11 1/7/2001 Written by Donald Becker <becker@scyld.com>\n";
  95static char version2[] __devinitdata =
  96"  (unofficial 2.4.x kernel port, version " DRV_VERSION ", " DRV_RELDATE ")\n";
  97
  98MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
  99MODULE_DESCRIPTION("SMC 83c170 EPIC series Ethernet driver");
 100MODULE_LICENSE("GPL");
 101
 102module_param(debug, int, 0);
 103module_param(rx_copybreak, int, 0);
 104module_param_array(options, int, NULL, 0);
 105module_param_array(full_duplex, int, NULL, 0);
 106MODULE_PARM_DESC(debug, "EPIC/100 debug level (0-5)");
 107MODULE_PARM_DESC(options, "EPIC/100: Bits 0-3: media type, bit 4: full duplex");
 108MODULE_PARM_DESC(rx_copybreak, "EPIC/100 copy breakpoint for copy-only-tiny-frames");
 109MODULE_PARM_DESC(full_duplex, "EPIC/100 full duplex setting(s) (1)");
 110
 111/*
 112                                Theory of Operation
 113
 114I. Board Compatibility
 115
 116This device driver is designed for the SMC "EPIC/100", the SMC
 117single-chip Ethernet controllers for PCI.  This chip is used on
 118the SMC EtherPower II boards.
 119
 120II. Board-specific settings
 121
 122PCI bus devices are configured by the system at boot time, so no jumpers
 123need to be set on the board.  The system BIOS will assign the
 124PCI INTA signal to a (preferably otherwise unused) system IRQ line.
 125Note: Kernel versions earlier than 1.3.73 do not support shared PCI
 126interrupt lines.
 127
 128III. Driver operation
 129
 130IIIa. Ring buffers
 131
 132IVb. References
 133
 134http://www.smsc.com/media/Downloads_Public/discontinued/83c171.pdf
 135http://www.smsc.com/media/Downloads_Public/discontinued/83c175.pdf
 136http://scyld.com/expert/NWay.html
 137http://www.national.com/pf/DP/DP83840A.html
 138
 139IVc. Errata
 140
 141*/
 142
 143
 144enum chip_capability_flags { MII_PWRDWN=1, TYPE2_INTR=2, NO_MII=4 };
 145
 146#define EPIC_TOTAL_SIZE 0x100
 147#define USE_IO_OPS 1
 148
 149typedef enum {
 150        SMSC_83C170_0,
 151        SMSC_83C170,
 152        SMSC_83C175,
 153} chip_t;
 154
 155
 156struct epic_chip_info {
 157        const char *name;
 158        int drv_flags;                          /* Driver use, intended as capability flags. */
 159};
 160
 161
 162/* indexed by chip_t */
 163static const struct epic_chip_info pci_id_tbl[] = {
 164        { "SMSC EPIC/100 83c170",       TYPE2_INTR | NO_MII | MII_PWRDWN },
 165        { "SMSC EPIC/100 83c170",       TYPE2_INTR },
 166        { "SMSC EPIC/C 83c175",         TYPE2_INTR | MII_PWRDWN },
 167};
 168
 169
 170static DEFINE_PCI_DEVICE_TABLE(epic_pci_tbl) = {
 171        { 0x10B8, 0x0005, 0x1092, 0x0AB4, 0, 0, SMSC_83C170_0 },
 172        { 0x10B8, 0x0005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SMSC_83C170 },
 173        { 0x10B8, 0x0006, PCI_ANY_ID, PCI_ANY_ID,
 174          PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, SMSC_83C175 },
 175        { 0,}
 176};
 177MODULE_DEVICE_TABLE (pci, epic_pci_tbl);
 178
 179
 180#ifndef USE_IO_OPS
 181#undef inb
 182#undef inw
 183#undef inl
 184#undef outb
 185#undef outw
 186#undef outl
 187#define inb readb
 188#define inw readw
 189#define inl readl
 190#define outb writeb
 191#define outw writew
 192#define outl writel
 193#endif
 194
 195/* Offsets to registers, using the (ugh) SMC names. */
 196enum epic_registers {
 197  COMMAND=0, INTSTAT=4, INTMASK=8, GENCTL=0x0C, NVCTL=0x10, EECTL=0x14,
 198  PCIBurstCnt=0x18,
 199  TEST1=0x1C, CRCCNT=0x20, ALICNT=0x24, MPCNT=0x28,     /* Rx error counters. */
 200  MIICtrl=0x30, MIIData=0x34, MIICfg=0x38,
 201  LAN0=64,                                              /* MAC address. */
 202  MC0=80,                                               /* Multicast filter table. */
 203  RxCtrl=96, TxCtrl=112, TxSTAT=0x74,
 204  PRxCDAR=0x84, RxSTAT=0xA4, EarlyRx=0xB0, PTxCDAR=0xC4, TxThresh=0xDC,
 205};
 206
 207/* Interrupt register bits, using my own meaningful names. */
 208enum IntrStatus {
 209        TxIdle=0x40000, RxIdle=0x20000, IntrSummary=0x010000,
 210        PCIBusErr170=0x7000, PCIBusErr175=0x1000, PhyEvent175=0x8000,
 211        RxStarted=0x0800, RxEarlyWarn=0x0400, CntFull=0x0200, TxUnderrun=0x0100,
 212        TxEmpty=0x0080, TxDone=0x0020, RxError=0x0010,
 213        RxOverflow=0x0008, RxFull=0x0004, RxHeader=0x0002, RxDone=0x0001,
 214};
 215enum CommandBits {
 216        StopRx=1, StartRx=2, TxQueued=4, RxQueued=8,
 217        StopTxDMA=0x20, StopRxDMA=0x40, RestartTx=0x80,
 218};
 219
 220#define EpicRemoved     0xffffffff      /* Chip failed or removed (CardBus) */
 221
 222#define EpicNapiEvent   (TxEmpty | TxDone | \
 223                         RxDone | RxStarted | RxEarlyWarn | RxOverflow | RxFull)
 224#define EpicNormalEvent (0x0000ffff & ~EpicNapiEvent)
 225
 226static const u16 media2miictl[16] = {
 227        0, 0x0C00, 0x0C00, 0x2000,  0x0100, 0x2100, 0, 0,
 228        0, 0, 0, 0,  0, 0, 0, 0 };
 229
 230/*
 231 * The EPIC100 Rx and Tx buffer descriptors.  Note that these
 232 * really ARE host-endian; it's not a misannotation.  We tell
 233 * the card to byteswap them internally on big-endian hosts -
 234 * look for #ifdef __BIG_ENDIAN in epic_open().
 235 */
 236
 237struct epic_tx_desc {
 238        u32 txstatus;
 239        u32 bufaddr;
 240        u32 buflength;
 241        u32 next;
 242};
 243
 244struct epic_rx_desc {
 245        u32 rxstatus;
 246        u32 bufaddr;
 247        u32 buflength;
 248        u32 next;
 249};
 250
 251enum desc_status_bits {
 252        DescOwn=0x8000,
 253};
 254
 255#define PRIV_ALIGN      15      /* Required alignment mask */
 256struct epic_private {
 257        struct epic_rx_desc *rx_ring;
 258        struct epic_tx_desc *tx_ring;
 259        /* The saved address of a sent-in-place packet/buffer, for skfree(). */
 260        struct sk_buff* tx_skbuff[TX_RING_SIZE];
 261        /* The addresses of receive-in-place skbuffs. */
 262        struct sk_buff* rx_skbuff[RX_RING_SIZE];
 263
 264        dma_addr_t tx_ring_dma;
 265        dma_addr_t rx_ring_dma;
 266
 267        /* Ring pointers. */
 268        spinlock_t lock;                                /* Group with Tx control cache line. */
 269        spinlock_t napi_lock;
 270        struct napi_struct napi;
 271        unsigned int reschedule_in_poll;
 272        unsigned int cur_tx, dirty_tx;
 273
 274        unsigned int cur_rx, dirty_rx;
 275        u32 irq_mask;
 276        unsigned int rx_buf_sz;                         /* Based on MTU+slack. */
 277
 278        struct pci_dev *pci_dev;                        /* PCI bus location. */
 279        int chip_id, chip_flags;
 280
 281        struct timer_list timer;                        /* Media selection timer. */
 282        int tx_threshold;
 283        unsigned char mc_filter[8];
 284        signed char phys[4];                            /* MII device addresses. */
 285        u16 advertising;                                        /* NWay media advertisement */
 286        int mii_phy_cnt;
 287        struct mii_if_info mii;
 288        unsigned int tx_full:1;                         /* The Tx queue is full. */
 289        unsigned int default_port:4;            /* Last dev->if_port value. */
 290};
 291
 292static int epic_open(struct net_device *dev);
 293static int read_eeprom(long ioaddr, int location);
 294static int mdio_read(struct net_device *dev, int phy_id, int location);
 295static void mdio_write(struct net_device *dev, int phy_id, int loc, int val);
 296static void epic_restart(struct net_device *dev);
 297static void epic_timer(unsigned long data);
 298static void epic_tx_timeout(struct net_device *dev);
 299static void epic_init_ring(struct net_device *dev);
 300static netdev_tx_t epic_start_xmit(struct sk_buff *skb,
 301                                   struct net_device *dev);
 302static int epic_rx(struct net_device *dev, int budget);
 303static int epic_poll(struct napi_struct *napi, int budget);
 304static irqreturn_t epic_interrupt(int irq, void *dev_instance);
 305static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 306static const struct ethtool_ops netdev_ethtool_ops;
 307static int epic_close(struct net_device *dev);
 308static struct net_device_stats *epic_get_stats(struct net_device *dev);
 309static void set_rx_mode(struct net_device *dev);
 310
 311static const struct net_device_ops epic_netdev_ops = {
 312        .ndo_open               = epic_open,
 313        .ndo_stop               = epic_close,
 314        .ndo_start_xmit         = epic_start_xmit,
 315        .ndo_tx_timeout         = epic_tx_timeout,
 316        .ndo_get_stats          = epic_get_stats,
 317        .ndo_set_multicast_list = set_rx_mode,
 318        .ndo_do_ioctl           = netdev_ioctl,
 319        .ndo_change_mtu         = eth_change_mtu,
 320        .ndo_set_mac_address    = eth_mac_addr,
 321        .ndo_validate_addr      = eth_validate_addr,
 322};
 323
 324static int __devinit epic_init_one (struct pci_dev *pdev,
 325                                    const struct pci_device_id *ent)
 326{
 327        static int card_idx = -1;
 328        long ioaddr;
 329        int chip_idx = (int) ent->driver_data;
 330        int irq;
 331        struct net_device *dev;
 332        struct epic_private *ep;
 333        int i, ret, option = 0, duplex = 0;
 334        void *ring_space;
 335        dma_addr_t ring_dma;
 336
 337/* when built into the kernel, we only print version if device is found */
 338#ifndef MODULE
 339        static int printed_version;
 340        if (!printed_version++)
 341                printk(KERN_INFO "%s%s", version, version2);
 342#endif
 343
 344        card_idx++;
 345
 346        ret = pci_enable_device(pdev);
 347        if (ret)
 348                goto out;
 349        irq = pdev->irq;
 350
 351        if (pci_resource_len(pdev, 0) < EPIC_TOTAL_SIZE) {
 352                dev_err(&pdev->dev, "no PCI region space\n");
 353                ret = -ENODEV;
 354                goto err_out_disable;
 355        }
 356
 357        pci_set_master(pdev);
 358
 359        ret = pci_request_regions(pdev, DRV_NAME);
 360        if (ret < 0)
 361                goto err_out_disable;
 362
 363        ret = -ENOMEM;
 364
 365        dev = alloc_etherdev(sizeof (*ep));
 366        if (!dev) {
 367                dev_err(&pdev->dev, "no memory for eth device\n");
 368                goto err_out_free_res;
 369        }
 370        SET_NETDEV_DEV(dev, &pdev->dev);
 371
 372#ifdef USE_IO_OPS
 373        ioaddr = pci_resource_start (pdev, 0);
 374#else
 375        ioaddr = pci_resource_start (pdev, 1);
 376        ioaddr = (long) pci_ioremap_bar(pdev, 1);
 377        if (!ioaddr) {
 378                dev_err(&pdev->dev, "ioremap failed\n");
 379                goto err_out_free_netdev;
 380        }
 381#endif
 382
 383        pci_set_drvdata(pdev, dev);
 384        ep = netdev_priv(dev);
 385        ep->mii.dev = dev;
 386        ep->mii.mdio_read = mdio_read;
 387        ep->mii.mdio_write = mdio_write;
 388        ep->mii.phy_id_mask = 0x1f;
 389        ep->mii.reg_num_mask = 0x1f;
 390
 391        ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
 392        if (!ring_space)
 393                goto err_out_iounmap;
 394        ep->tx_ring = ring_space;
 395        ep->tx_ring_dma = ring_dma;
 396
 397        ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
 398        if (!ring_space)
 399                goto err_out_unmap_tx;
 400        ep->rx_ring = ring_space;
 401        ep->rx_ring_dma = ring_dma;
 402
 403        if (dev->mem_start) {
 404                option = dev->mem_start;
 405                duplex = (dev->mem_start & 16) ? 1 : 0;
 406        } else if (card_idx >= 0  &&  card_idx < MAX_UNITS) {
 407                if (options[card_idx] >= 0)
 408                        option = options[card_idx];
 409                if (full_duplex[card_idx] >= 0)
 410                        duplex = full_duplex[card_idx];
 411        }
 412
 413        dev->base_addr = ioaddr;
 414        dev->irq = irq;
 415
 416        spin_lock_init(&ep->lock);
 417        spin_lock_init(&ep->napi_lock);
 418        ep->reschedule_in_poll = 0;
 419
 420        /* Bring the chip out of low-power mode. */
 421        outl(0x4200, ioaddr + GENCTL);
 422        /* Magic?!  If we don't set this bit the MII interface won't work. */
 423        /* This magic is documented in SMSC app note 7.15 */
 424        for (i = 16; i > 0; i--)
 425                outl(0x0008, ioaddr + TEST1);
 426
 427        /* Turn on the MII transceiver. */
 428        outl(0x12, ioaddr + MIICfg);
 429        if (chip_idx == 1)
 430                outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
 431        outl(0x0200, ioaddr + GENCTL);
 432
 433        /* Note: the '175 does not have a serial EEPROM. */
 434        for (i = 0; i < 3; i++)
 435                ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(inw(ioaddr + LAN0 + i*4));
 436
 437        if (debug > 2) {
 438                dev_printk(KERN_DEBUG, &pdev->dev, "EEPROM contents:\n");
 439                for (i = 0; i < 64; i++)
 440                        printk(" %4.4x%s", read_eeprom(ioaddr, i),
 441                                   i % 16 == 15 ? "\n" : "");
 442        }
 443
 444        ep->pci_dev = pdev;
 445        ep->chip_id = chip_idx;
 446        ep->chip_flags = pci_id_tbl[chip_idx].drv_flags;
 447        ep->irq_mask =
 448                (ep->chip_flags & TYPE2_INTR ?  PCIBusErr175 : PCIBusErr170)
 449                 | CntFull | TxUnderrun | EpicNapiEvent;
 450
 451        /* Find the connected MII xcvrs.
 452           Doing this in open() would allow detecting external xcvrs later, but
 453           takes much time and no cards have external MII. */
 454        {
 455                int phy, phy_idx = 0;
 456                for (phy = 1; phy < 32 && phy_idx < sizeof(ep->phys); phy++) {
 457                        int mii_status = mdio_read(dev, phy, MII_BMSR);
 458                        if (mii_status != 0xffff  &&  mii_status != 0x0000) {
 459                                ep->phys[phy_idx++] = phy;
 460                                dev_info(&pdev->dev,
 461                                        "MII transceiver #%d control "
 462                                        "%4.4x status %4.4x.\n",
 463                                        phy, mdio_read(dev, phy, 0), mii_status);
 464                        }
 465                }
 466                ep->mii_phy_cnt = phy_idx;
 467                if (phy_idx != 0) {
 468                        phy = ep->phys[0];
 469                        ep->mii.advertising = mdio_read(dev, phy, MII_ADVERTISE);
 470                        dev_info(&pdev->dev,
 471                                "Autonegotiation advertising %4.4x link "
 472                                   "partner %4.4x.\n",
 473                                   ep->mii.advertising, mdio_read(dev, phy, 5));
 474                } else if ( ! (ep->chip_flags & NO_MII)) {
 475                        dev_warn(&pdev->dev,
 476                                "***WARNING***: No MII transceiver found!\n");
 477                        /* Use the known PHY address of the EPII. */
 478                        ep->phys[0] = 3;
 479                }
 480                ep->mii.phy_id = ep->phys[0];
 481        }
 482
 483        /* Turn off the MII xcvr (175 only!), leave the chip in low-power mode. */
 484        if (ep->chip_flags & MII_PWRDWN)
 485                outl(inl(ioaddr + NVCTL) & ~0x483C, ioaddr + NVCTL);
 486        outl(0x0008, ioaddr + GENCTL);
 487
 488        /* The lower four bits are the media type. */
 489        if (duplex) {
 490                ep->mii.force_media = ep->mii.full_duplex = 1;
 491                dev_info(&pdev->dev, "Forced full duplex requested.\n");
 492        }
 493        dev->if_port = ep->default_port = option;
 494
 495        /* The Epic-specific entries in the device structure. */
 496        dev->netdev_ops = &epic_netdev_ops;
 497        dev->ethtool_ops = &netdev_ethtool_ops;
 498        dev->watchdog_timeo = TX_TIMEOUT;
 499        netif_napi_add(dev, &ep->napi, epic_poll, 64);
 500
 501        ret = register_netdev(dev);
 502        if (ret < 0)
 503                goto err_out_unmap_rx;
 504
 505        printk(KERN_INFO "%s: %s at %#lx, IRQ %d, %pM\n",
 506               dev->name, pci_id_tbl[chip_idx].name, ioaddr, dev->irq,
 507               dev->dev_addr);
 508
 509out:
 510        return ret;
 511
 512err_out_unmap_rx:
 513        pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma);
 514err_out_unmap_tx:
 515        pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
 516err_out_iounmap:
 517#ifndef USE_IO_OPS
 518        iounmap(ioaddr);
 519err_out_free_netdev:
 520#endif
 521        free_netdev(dev);
 522err_out_free_res:
 523        pci_release_regions(pdev);
 524err_out_disable:
 525        pci_disable_device(pdev);
 526        goto out;
 527}
 528
 529/* Serial EEPROM section. */
 530
 531/*  EEPROM_Ctrl bits. */
 532#define EE_SHIFT_CLK    0x04    /* EEPROM shift clock. */
 533#define EE_CS                   0x02    /* EEPROM chip select. */
 534#define EE_DATA_WRITE   0x08    /* EEPROM chip data in. */
 535#define EE_WRITE_0              0x01
 536#define EE_WRITE_1              0x09
 537#define EE_DATA_READ    0x10    /* EEPROM chip data out. */
 538#define EE_ENB                  (0x0001 | EE_CS)
 539
 540/* Delay between EEPROM clock transitions.
 541   This serves to flush the operation to the PCI bus.
 542 */
 543
 544#define eeprom_delay()  inl(ee_addr)
 545
 546/* The EEPROM commands include the alway-set leading bit. */
 547#define EE_WRITE_CMD    (5 << 6)
 548#define EE_READ64_CMD   (6 << 6)
 549#define EE_READ256_CMD  (6 << 8)
 550#define EE_ERASE_CMD    (7 << 6)
 551
 552static void epic_disable_int(struct net_device *dev, struct epic_private *ep)
 553{
 554        long ioaddr = dev->base_addr;
 555
 556        outl(0x00000000, ioaddr + INTMASK);
 557}
 558
 559static inline void __epic_pci_commit(long ioaddr)
 560{
 561#ifndef USE_IO_OPS
 562        inl(ioaddr + INTMASK);
 563#endif
 564}
 565
 566static inline void epic_napi_irq_off(struct net_device *dev,
 567                                     struct epic_private *ep)
 568{
 569        long ioaddr = dev->base_addr;
 570
 571        outl(ep->irq_mask & ~EpicNapiEvent, ioaddr + INTMASK);
 572        __epic_pci_commit(ioaddr);
 573}
 574
 575static inline void epic_napi_irq_on(struct net_device *dev,
 576                                    struct epic_private *ep)
 577{
 578        long ioaddr = dev->base_addr;
 579
 580        /* No need to commit possible posted write */
 581        outl(ep->irq_mask | EpicNapiEvent, ioaddr + INTMASK);
 582}
 583
 584static int __devinit read_eeprom(long ioaddr, int location)
 585{
 586        int i;
 587        int retval = 0;
 588        long ee_addr = ioaddr + EECTL;
 589        int read_cmd = location |
 590                (inl(ee_addr) & 0x40 ? EE_READ64_CMD : EE_READ256_CMD);
 591
 592        outl(EE_ENB & ~EE_CS, ee_addr);
 593        outl(EE_ENB, ee_addr);
 594
 595        /* Shift the read command bits out. */
 596        for (i = 12; i >= 0; i--) {
 597                short dataval = (read_cmd & (1 << i)) ? EE_WRITE_1 : EE_WRITE_0;
 598                outl(EE_ENB | dataval, ee_addr);
 599                eeprom_delay();
 600                outl(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
 601                eeprom_delay();
 602        }
 603        outl(EE_ENB, ee_addr);
 604
 605        for (i = 16; i > 0; i--) {
 606                outl(EE_ENB | EE_SHIFT_CLK, ee_addr);
 607                eeprom_delay();
 608                retval = (retval << 1) | ((inl(ee_addr) & EE_DATA_READ) ? 1 : 0);
 609                outl(EE_ENB, ee_addr);
 610                eeprom_delay();
 611        }
 612
 613        /* Terminate the EEPROM access. */
 614        outl(EE_ENB & ~EE_CS, ee_addr);
 615        return retval;
 616}
 617
 618#define MII_READOP              1
 619#define MII_WRITEOP             2
 620static int mdio_read(struct net_device *dev, int phy_id, int location)
 621{
 622        long ioaddr = dev->base_addr;
 623        int read_cmd = (phy_id << 9) | (location << 4) | MII_READOP;
 624        int i;
 625
 626        outl(read_cmd, ioaddr + MIICtrl);
 627        /* Typical operation takes 25 loops. */
 628        for (i = 400; i > 0; i--) {
 629                barrier();
 630                if ((inl(ioaddr + MIICtrl) & MII_READOP) == 0) {
 631                        /* Work around read failure bug. */
 632                        if (phy_id == 1 && location < 6 &&
 633                            inw(ioaddr + MIIData) == 0xffff) {
 634                                outl(read_cmd, ioaddr + MIICtrl);
 635                                continue;
 636                        }
 637                        return inw(ioaddr + MIIData);
 638                }
 639        }
 640        return 0xffff;
 641}
 642
 643static void mdio_write(struct net_device *dev, int phy_id, int loc, int value)
 644{
 645        long ioaddr = dev->base_addr;
 646        int i;
 647
 648        outw(value, ioaddr + MIIData);
 649        outl((phy_id << 9) | (loc << 4) | MII_WRITEOP, ioaddr + MIICtrl);
 650        for (i = 10000; i > 0; i--) {
 651                barrier();
 652                if ((inl(ioaddr + MIICtrl) & MII_WRITEOP) == 0)
 653                        break;
 654        }
 655}
 656
 657
 658static int epic_open(struct net_device *dev)
 659{
 660        struct epic_private *ep = netdev_priv(dev);
 661        long ioaddr = dev->base_addr;
 662        int i;
 663        int retval;
 664
 665        /* Soft reset the chip. */
 666        outl(0x4001, ioaddr + GENCTL);
 667
 668        napi_enable(&ep->napi);
 669        if ((retval = request_irq(dev->irq, epic_interrupt, IRQF_SHARED, dev->name, dev))) {
 670                napi_disable(&ep->napi);
 671                return retval;
 672        }
 673
 674        epic_init_ring(dev);
 675
 676        outl(0x4000, ioaddr + GENCTL);
 677        /* This magic is documented in SMSC app note 7.15 */
 678        for (i = 16; i > 0; i--)
 679                outl(0x0008, ioaddr + TEST1);
 680
 681        /* Pull the chip out of low-power mode, enable interrupts, and set for
 682           PCI read multiple.  The MIIcfg setting and strange write order are
 683           required by the details of which bits are reset and the transceiver
 684           wiring on the Ositech CardBus card.
 685        */
 686#if 0
 687        outl(dev->if_port == 1 ? 0x13 : 0x12, ioaddr + MIICfg);
 688#endif
 689        if (ep->chip_flags & MII_PWRDWN)
 690                outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
 691
 692        /* Tell the chip to byteswap descriptors on big-endian hosts */
 693#ifdef __BIG_ENDIAN
 694        outl(0x4432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
 695        inl(ioaddr + GENCTL);
 696        outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
 697#else
 698        outl(0x4412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
 699        inl(ioaddr + GENCTL);
 700        outl(0x0412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
 701#endif
 702
 703        udelay(20); /* Looks like EPII needs that if you want reliable RX init. FIXME: pci posting bug? */
 704
 705        for (i = 0; i < 3; i++)
 706                outl(le16_to_cpu(((__le16*)dev->dev_addr)[i]), ioaddr + LAN0 + i*4);
 707
 708        ep->tx_threshold = TX_FIFO_THRESH;
 709        outl(ep->tx_threshold, ioaddr + TxThresh);
 710
 711        if (media2miictl[dev->if_port & 15]) {
 712                if (ep->mii_phy_cnt)
 713                        mdio_write(dev, ep->phys[0], MII_BMCR, media2miictl[dev->if_port&15]);
 714                if (dev->if_port == 1) {
 715                        if (debug > 1)
 716                                printk(KERN_INFO "%s: Using the 10base2 transceiver, MII "
 717                                           "status %4.4x.\n",
 718                                           dev->name, mdio_read(dev, ep->phys[0], MII_BMSR));
 719                }
 720        } else {
 721                int mii_lpa = mdio_read(dev, ep->phys[0], MII_LPA);
 722                if (mii_lpa != 0xffff) {
 723                        if ((mii_lpa & LPA_100FULL) || (mii_lpa & 0x01C0) == LPA_10FULL)
 724                                ep->mii.full_duplex = 1;
 725                        else if (! (mii_lpa & LPA_LPACK))
 726                                mdio_write(dev, ep->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
 727                        if (debug > 1)
 728                                printk(KERN_INFO "%s: Setting %s-duplex based on MII xcvr %d"
 729                                           " register read of %4.4x.\n", dev->name,
 730                                           ep->mii.full_duplex ? "full" : "half",
 731                                           ep->phys[0], mii_lpa);
 732                }
 733        }
 734
 735        outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);
 736        outl(ep->rx_ring_dma, ioaddr + PRxCDAR);
 737        outl(ep->tx_ring_dma, ioaddr + PTxCDAR);
 738
 739        /* Start the chip's Rx process. */
 740        set_rx_mode(dev);
 741        outl(StartRx | RxQueued, ioaddr + COMMAND);
 742
 743        netif_start_queue(dev);
 744
 745        /* Enable interrupts by setting the interrupt mask. */
 746        outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
 747                 | CntFull | TxUnderrun
 748                 | RxError | RxHeader | EpicNapiEvent, ioaddr + INTMASK);
 749
 750        if (debug > 1)
 751                printk(KERN_DEBUG "%s: epic_open() ioaddr %lx IRQ %d status %4.4x "
 752                           "%s-duplex.\n",
 753                           dev->name, ioaddr, dev->irq, (int)inl(ioaddr + GENCTL),
 754                           ep->mii.full_duplex ? "full" : "half");
 755
 756        /* Set the timer to switch to check for link beat and perhaps switch
 757           to an alternate media type. */
 758        init_timer(&ep->timer);
 759        ep->timer.expires = jiffies + 3*HZ;
 760        ep->timer.data = (unsigned long)dev;
 761        ep->timer.function = epic_timer;                                /* timer handler */
 762        add_timer(&ep->timer);
 763
 764        return 0;
 765}
 766
 767/* Reset the chip to recover from a PCI transaction error.
 768   This may occur at interrupt time. */
 769static void epic_pause(struct net_device *dev)
 770{
 771        long ioaddr = dev->base_addr;
 772
 773        netif_stop_queue (dev);
 774
 775        /* Disable interrupts by clearing the interrupt mask. */
 776        outl(0x00000000, ioaddr + INTMASK);
 777        /* Stop the chip's Tx and Rx DMA processes. */
 778        outw(StopRx | StopTxDMA | StopRxDMA, ioaddr + COMMAND);
 779
 780        /* Update the error counts. */
 781        if (inw(ioaddr + COMMAND) != 0xffff) {
 782                dev->stats.rx_missed_errors += inb(ioaddr + MPCNT);
 783                dev->stats.rx_frame_errors += inb(ioaddr + ALICNT);
 784                dev->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
 785        }
 786
 787        /* Remove the packets on the Rx queue. */
 788        epic_rx(dev, RX_RING_SIZE);
 789}
 790
 791static void epic_restart(struct net_device *dev)
 792{
 793        long ioaddr = dev->base_addr;
 794        struct epic_private *ep = netdev_priv(dev);
 795        int i;
 796
 797        /* Soft reset the chip. */
 798        outl(0x4001, ioaddr + GENCTL);
 799
 800        printk(KERN_DEBUG "%s: Restarting the EPIC chip, Rx %d/%d Tx %d/%d.\n",
 801                   dev->name, ep->cur_rx, ep->dirty_rx, ep->dirty_tx, ep->cur_tx);
 802        udelay(1);
 803
 804        /* This magic is documented in SMSC app note 7.15 */
 805        for (i = 16; i > 0; i--)
 806                outl(0x0008, ioaddr + TEST1);
 807
 808#ifdef __BIG_ENDIAN
 809        outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
 810#else
 811        outl(0x0412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
 812#endif
 813        outl(dev->if_port == 1 ? 0x13 : 0x12, ioaddr + MIICfg);
 814        if (ep->chip_flags & MII_PWRDWN)
 815                outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
 816
 817        for (i = 0; i < 3; i++)
 818                outl(le16_to_cpu(((__le16*)dev->dev_addr)[i]), ioaddr + LAN0 + i*4);
 819
 820        ep->tx_threshold = TX_FIFO_THRESH;
 821        outl(ep->tx_threshold, ioaddr + TxThresh);
 822        outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);
 823        outl(ep->rx_ring_dma + (ep->cur_rx%RX_RING_SIZE)*
 824                sizeof(struct epic_rx_desc), ioaddr + PRxCDAR);
 825        outl(ep->tx_ring_dma + (ep->dirty_tx%TX_RING_SIZE)*
 826                 sizeof(struct epic_tx_desc), ioaddr + PTxCDAR);
 827
 828        /* Start the chip's Rx process. */
 829        set_rx_mode(dev);
 830        outl(StartRx | RxQueued, ioaddr + COMMAND);
 831
 832        /* Enable interrupts by setting the interrupt mask. */
 833        outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
 834                 | CntFull | TxUnderrun
 835                 | RxError | RxHeader | EpicNapiEvent, ioaddr + INTMASK);
 836
 837        printk(KERN_DEBUG "%s: epic_restart() done, cmd status %4.4x, ctl %4.4x"
 838                   " interrupt %4.4x.\n",
 839                   dev->name, (int)inl(ioaddr + COMMAND), (int)inl(ioaddr + GENCTL),
 840                   (int)inl(ioaddr + INTSTAT));
 841}
 842
 843static void check_media(struct net_device *dev)
 844{
 845        struct epic_private *ep = netdev_priv(dev);
 846        long ioaddr = dev->base_addr;
 847        int mii_lpa = ep->mii_phy_cnt ? mdio_read(dev, ep->phys[0], MII_LPA) : 0;
 848        int negotiated = mii_lpa & ep->mii.advertising;
 849        int duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
 850
 851        if (ep->mii.force_media)
 852                return;
 853        if (mii_lpa == 0xffff)          /* Bogus read */
 854                return;
 855        if (ep->mii.full_duplex != duplex) {
 856                ep->mii.full_duplex = duplex;
 857                printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d link"
 858                           " partner capability of %4.4x.\n", dev->name,
 859                           ep->mii.full_duplex ? "full" : "half", ep->phys[0], mii_lpa);
 860                outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);
 861        }
 862}
 863
 864static void epic_timer(unsigned long data)
 865{
 866        struct net_device *dev = (struct net_device *)data;
 867        struct epic_private *ep = netdev_priv(dev);
 868        long ioaddr = dev->base_addr;
 869        int next_tick = 5*HZ;
 870
 871        if (debug > 3) {
 872                printk(KERN_DEBUG "%s: Media monitor tick, Tx status %8.8x.\n",
 873                           dev->name, (int)inl(ioaddr + TxSTAT));
 874                printk(KERN_DEBUG "%s: Other registers are IntMask %4.4x "
 875                           "IntStatus %4.4x RxStatus %4.4x.\n",
 876                           dev->name, (int)inl(ioaddr + INTMASK),
 877                           (int)inl(ioaddr + INTSTAT), (int)inl(ioaddr + RxSTAT));
 878        }
 879
 880        check_media(dev);
 881
 882        ep->timer.expires = jiffies + next_tick;
 883        add_timer(&ep->timer);
 884}
 885
 886static void epic_tx_timeout(struct net_device *dev)
 887{
 888        struct epic_private *ep = netdev_priv(dev);
 889        long ioaddr = dev->base_addr;
 890
 891        if (debug > 0) {
 892                printk(KERN_WARNING "%s: Transmit timeout using MII device, "
 893                           "Tx status %4.4x.\n",
 894                           dev->name, (int)inw(ioaddr + TxSTAT));
 895                if (debug > 1) {
 896                        printk(KERN_DEBUG "%s: Tx indices: dirty_tx %d, cur_tx %d.\n",
 897                                   dev->name, ep->dirty_tx, ep->cur_tx);
 898                }
 899        }
 900        if (inw(ioaddr + TxSTAT) & 0x10) {              /* Tx FIFO underflow. */
 901                dev->stats.tx_fifo_errors++;
 902                outl(RestartTx, ioaddr + COMMAND);
 903        } else {
 904                epic_restart(dev);
 905                outl(TxQueued, dev->base_addr + COMMAND);
 906        }
 907
 908        dev->trans_start = jiffies; /* prevent tx timeout */
 909        dev->stats.tx_errors++;
 910        if (!ep->tx_full)
 911                netif_wake_queue(dev);
 912}
 913
 914/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
 915static void epic_init_ring(struct net_device *dev)
 916{
 917        struct epic_private *ep = netdev_priv(dev);
 918        int i;
 919
 920        ep->tx_full = 0;
 921        ep->dirty_tx = ep->cur_tx = 0;
 922        ep->cur_rx = ep->dirty_rx = 0;
 923        ep->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
 924
 925        /* Initialize all Rx descriptors. */
 926        for (i = 0; i < RX_RING_SIZE; i++) {
 927                ep->rx_ring[i].rxstatus = 0;
 928                ep->rx_ring[i].buflength = ep->rx_buf_sz;
 929                ep->rx_ring[i].next = ep->rx_ring_dma +
 930                                      (i+1)*sizeof(struct epic_rx_desc);
 931                ep->rx_skbuff[i] = NULL;
 932        }
 933        /* Mark the last entry as wrapping the ring. */
 934        ep->rx_ring[i-1].next = ep->rx_ring_dma;
 935
 936        /* Fill in the Rx buffers.  Handle allocation failure gracefully. */
 937        for (i = 0; i < RX_RING_SIZE; i++) {
 938                struct sk_buff *skb = dev_alloc_skb(ep->rx_buf_sz + 2);
 939                ep->rx_skbuff[i] = skb;
 940                if (skb == NULL)
 941                        break;
 942                skb_reserve(skb, 2);    /* 16 byte align the IP header. */
 943                ep->rx_ring[i].bufaddr = pci_map_single(ep->pci_dev,
 944                        skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
 945                ep->rx_ring[i].rxstatus = DescOwn;
 946        }
 947        ep->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
 948
 949        /* The Tx buffer descriptor is filled in as needed, but we
 950           do need to clear the ownership bit. */
 951        for (i = 0; i < TX_RING_SIZE; i++) {
 952                ep->tx_skbuff[i] = NULL;
 953                ep->tx_ring[i].txstatus = 0x0000;
 954                ep->tx_ring[i].next = ep->tx_ring_dma +
 955                        (i+1)*sizeof(struct epic_tx_desc);
 956        }
 957        ep->tx_ring[i-1].next = ep->tx_ring_dma;
 958}
 959
 960static netdev_tx_t epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
 961{
 962        struct epic_private *ep = netdev_priv(dev);
 963        int entry, free_count;
 964        u32 ctrl_word;
 965        unsigned long flags;
 966
 967        if (skb_padto(skb, ETH_ZLEN))
 968                return NETDEV_TX_OK;
 969
 970        /* Caution: the write order is important here, set the field with the
 971           "ownership" bit last. */
 972
 973        /* Calculate the next Tx descriptor entry. */
 974        spin_lock_irqsave(&ep->lock, flags);
 975        free_count = ep->cur_tx - ep->dirty_tx;
 976        entry = ep->cur_tx % TX_RING_SIZE;
 977
 978        ep->tx_skbuff[entry] = skb;
 979        ep->tx_ring[entry].bufaddr = pci_map_single(ep->pci_dev, skb->data,
 980                                                    skb->len, PCI_DMA_TODEVICE);
 981        if (free_count < TX_QUEUE_LEN/2) {/* Typical path */
 982                ctrl_word = 0x100000; /* No interrupt */
 983        } else if (free_count == TX_QUEUE_LEN/2) {
 984                ctrl_word = 0x140000; /* Tx-done intr. */
 985        } else if (free_count < TX_QUEUE_LEN - 1) {
 986                ctrl_word = 0x100000; /* No Tx-done intr. */
 987        } else {
 988                /* Leave room for an additional entry. */
 989                ctrl_word = 0x140000; /* Tx-done intr. */
 990                ep->tx_full = 1;
 991        }
 992        ep->tx_ring[entry].buflength = ctrl_word | skb->len;
 993        ep->tx_ring[entry].txstatus =
 994                ((skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN) << 16)
 995                            | DescOwn;
 996
 997        ep->cur_tx++;
 998        if (ep->tx_full)
 999                netif_stop_queue(dev);
1000
1001        spin_unlock_irqrestore(&ep->lock, flags);
1002        /* Trigger an immediate transmit demand. */
1003        outl(TxQueued, dev->base_addr + COMMAND);
1004
1005        if (debug > 4)
1006                printk(KERN_DEBUG "%s: Queued Tx packet size %d to slot %d, "
1007                           "flag %2.2x Tx status %8.8x.\n",
1008                           dev->name, (int)skb->len, entry, ctrl_word,
1009                           (int)inl(dev->base_addr + TxSTAT));
1010
1011        return NETDEV_TX_OK;
1012}
1013
1014static void epic_tx_error(struct net_device *dev, struct epic_private *ep,
1015                          int status)
1016{
1017        struct net_device_stats *stats = &dev->stats;
1018
1019#ifndef final_version
1020        /* There was an major error, log it. */
1021        if (debug > 1)
1022                printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
1023                       dev->name, status);
1024#endif
1025        stats->tx_errors++;
1026        if (status & 0x1050)
1027                stats->tx_aborted_errors++;
1028        if (status & 0x0008)
1029                stats->tx_carrier_errors++;
1030        if (status & 0x0040)
1031                stats->tx_window_errors++;
1032        if (status & 0x0010)
1033                stats->tx_fifo_errors++;
1034}
1035
1036static void epic_tx(struct net_device *dev, struct epic_private *ep)
1037{
1038        unsigned int dirty_tx, cur_tx;
1039
1040        /*
1041         * Note: if this lock becomes a problem we can narrow the locked
1042         * region at the cost of occasionally grabbing the lock more times.
1043         */
1044        cur_tx = ep->cur_tx;
1045        for (dirty_tx = ep->dirty_tx; cur_tx - dirty_tx > 0; dirty_tx++) {
1046                struct sk_buff *skb;
1047                int entry = dirty_tx % TX_RING_SIZE;
1048                int txstatus = ep->tx_ring[entry].txstatus;
1049
1050                if (txstatus & DescOwn)
1051                        break;  /* It still hasn't been Txed */
1052
1053                if (likely(txstatus & 0x0001)) {
1054                        dev->stats.collisions += (txstatus >> 8) & 15;
1055                        dev->stats.tx_packets++;
1056                        dev->stats.tx_bytes += ep->tx_skbuff[entry]->len;
1057                } else
1058                        epic_tx_error(dev, ep, txstatus);
1059
1060                /* Free the original skb. */
1061                skb = ep->tx_skbuff[entry];
1062                pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr,
1063                                 skb->len, PCI_DMA_TODEVICE);
1064                dev_kfree_skb_irq(skb);
1065                ep->tx_skbuff[entry] = NULL;
1066        }
1067
1068#ifndef final_version
1069        if (cur_tx - dirty_tx > TX_RING_SIZE) {
1070                printk(KERN_WARNING
1071                       "%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
1072                       dev->name, dirty_tx, cur_tx, ep->tx_full);
1073                dirty_tx += TX_RING_SIZE;
1074        }
1075#endif
1076        ep->dirty_tx = dirty_tx;
1077        if (ep->tx_full && cur_tx - dirty_tx < TX_QUEUE_LEN - 4) {
1078                /* The ring is no longer full, allow new TX entries. */
1079                ep->tx_full = 0;
1080                netif_wake_queue(dev);
1081        }
1082}
1083
1084/* The interrupt handler does all of the Rx thread work and cleans up
1085   after the Tx thread. */
1086static irqreturn_t epic_interrupt(int irq, void *dev_instance)
1087{
1088        struct net_device *dev = dev_instance;
1089        struct epic_private *ep = netdev_priv(dev);
1090        long ioaddr = dev->base_addr;
1091        unsigned int handled = 0;
1092        int status;
1093
1094        status = inl(ioaddr + INTSTAT);
1095        /* Acknowledge all of the current interrupt sources ASAP. */
1096        outl(status & EpicNormalEvent, ioaddr + INTSTAT);
1097
1098        if (debug > 4) {
1099                printk(KERN_DEBUG "%s: Interrupt, status=%#8.8x new "
1100                                   "intstat=%#8.8x.\n", dev->name, status,
1101                                   (int)inl(ioaddr + INTSTAT));
1102        }
1103
1104        if ((status & IntrSummary) == 0)
1105                goto out;
1106
1107        handled = 1;
1108
1109        if ((status & EpicNapiEvent) && !ep->reschedule_in_poll) {
1110                spin_lock(&ep->napi_lock);
1111                if (napi_schedule_prep(&ep->napi)) {
1112                        epic_napi_irq_off(dev, ep);
1113                        __napi_schedule(&ep->napi);
1114                } else
1115                        ep->reschedule_in_poll++;
1116                spin_unlock(&ep->napi_lock);
1117        }
1118        status &= ~EpicNapiEvent;
1119
1120        /* Check uncommon events all at once. */
1121        if (status & (CntFull | TxUnderrun | PCIBusErr170 | PCIBusErr175)) {
1122                if (status == EpicRemoved)
1123                        goto out;
1124
1125                /* Always update the error counts to avoid overhead later. */
1126                dev->stats.rx_missed_errors += inb(ioaddr + MPCNT);
1127                dev->stats.rx_frame_errors += inb(ioaddr + ALICNT);
1128                dev->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
1129
1130                if (status & TxUnderrun) { /* Tx FIFO underflow. */
1131                        dev->stats.tx_fifo_errors++;
1132                        outl(ep->tx_threshold += 128, ioaddr + TxThresh);
1133                        /* Restart the transmit process. */
1134                        outl(RestartTx, ioaddr + COMMAND);
1135                }
1136                if (status & PCIBusErr170) {
1137                        printk(KERN_ERR "%s: PCI Bus Error! status %4.4x.\n",
1138                                         dev->name, status);
1139                        epic_pause(dev);
1140                        epic_restart(dev);
1141                }
1142                /* Clear all error sources. */
1143                outl(status & 0x7f18, ioaddr + INTSTAT);
1144        }
1145
1146out:
1147        if (debug > 3) {
1148                printk(KERN_DEBUG "%s: exit interrupt, intr_status=%#4.4x.\n",
1149                                   dev->name, status);
1150        }
1151
1152        return IRQ_RETVAL(handled);
1153}
1154
1155static int epic_rx(struct net_device *dev, int budget)
1156{
1157        struct epic_private *ep = netdev_priv(dev);
1158        int entry = ep->cur_rx % RX_RING_SIZE;
1159        int rx_work_limit = ep->dirty_rx + RX_RING_SIZE - ep->cur_rx;
1160        int work_done = 0;
1161
1162        if (debug > 4)
1163                printk(KERN_DEBUG " In epic_rx(), entry %d %8.8x.\n", entry,
1164                           ep->rx_ring[entry].rxstatus);
1165
1166        if (rx_work_limit > budget)
1167                rx_work_limit = budget;
1168
1169        /* If we own the next entry, it's a new packet. Send it up. */
1170        while ((ep->rx_ring[entry].rxstatus & DescOwn) == 0) {
1171                int status = ep->rx_ring[entry].rxstatus;
1172
1173                if (debug > 4)
1174                        printk(KERN_DEBUG "  epic_rx() status was %8.8x.\n", status);
1175                if (--rx_work_limit < 0)
1176                        break;
1177                if (status & 0x2006) {
1178                        if (debug > 2)
1179                                printk(KERN_DEBUG "%s: epic_rx() error status was %8.8x.\n",
1180                                           dev->name, status);
1181                        if (status & 0x2000) {
1182                                printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
1183                                           "multiple buffers, status %4.4x!\n", dev->name, status);
1184                                dev->stats.rx_length_errors++;
1185                        } else if (status & 0x0006)
1186                                /* Rx Frame errors are counted in hardware. */
1187                                dev->stats.rx_errors++;
1188                } else {
1189                        /* Malloc up new buffer, compatible with net-2e. */
1190                        /* Omit the four octet CRC from the length. */
1191                        short pkt_len = (status >> 16) - 4;
1192                        struct sk_buff *skb;
1193
1194                        if (pkt_len > PKT_BUF_SZ - 4) {
1195                                printk(KERN_ERR "%s: Oversized Ethernet frame, status %x "
1196                                           "%d bytes.\n",
1197                                           dev->name, status, pkt_len);
1198                                pkt_len = 1514;
1199                        }
1200                        /* Check if the packet is long enough to accept without copying
1201                           to a minimally-sized skbuff. */
1202                        if (pkt_len < rx_copybreak &&
1203                            (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1204                                skb_reserve(skb, 2);    /* 16 byte align the IP header */
1205                                pci_dma_sync_single_for_cpu(ep->pci_dev,
1206                                                            ep->rx_ring[entry].bufaddr,
1207                                                            ep->rx_buf_sz,
1208                                                            PCI_DMA_FROMDEVICE);
1209                                skb_copy_to_linear_data(skb, ep->rx_skbuff[entry]->data, pkt_len);
1210                                skb_put(skb, pkt_len);
1211                                pci_dma_sync_single_for_device(ep->pci_dev,
1212                                                               ep->rx_ring[entry].bufaddr,
1213                                                               ep->rx_buf_sz,
1214                                                               PCI_DMA_FROMDEVICE);
1215                        } else {
1216                                pci_unmap_single(ep->pci_dev,
1217                                        ep->rx_ring[entry].bufaddr,
1218                                        ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1219                                skb_put(skb = ep->rx_skbuff[entry], pkt_len);
1220                                ep->rx_skbuff[entry] = NULL;
1221                        }
1222                        skb->protocol = eth_type_trans(skb, dev);
1223                        netif_receive_skb(skb);
1224                        dev->stats.rx_packets++;
1225                        dev->stats.rx_bytes += pkt_len;
1226                }
1227                work_done++;
1228                entry = (++ep->cur_rx) % RX_RING_SIZE;
1229        }
1230
1231        /* Refill the Rx ring buffers. */
1232        for (; ep->cur_rx - ep->dirty_rx > 0; ep->dirty_rx++) {
1233                entry = ep->dirty_rx % RX_RING_SIZE;
1234                if (ep->rx_skbuff[entry] == NULL) {
1235                        struct sk_buff *skb;
1236                        skb = ep->rx_skbuff[entry] = dev_alloc_skb(ep->rx_buf_sz + 2);
1237                        if (skb == NULL)
1238                                break;
1239                        skb_reserve(skb, 2);    /* Align IP on 16 byte boundaries */
1240                        ep->rx_ring[entry].bufaddr = pci_map_single(ep->pci_dev,
1241                                skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1242                        work_done++;
1243                }
1244                /* AV: shouldn't we add a barrier here? */
1245                ep->rx_ring[entry].rxstatus = DescOwn;
1246        }
1247        return work_done;
1248}
1249
1250static void epic_rx_err(struct net_device *dev, struct epic_private *ep)
1251{
1252        long ioaddr = dev->base_addr;
1253        int status;
1254
1255        status = inl(ioaddr + INTSTAT);
1256
1257        if (status == EpicRemoved)
1258                return;
1259        if (status & RxOverflow)        /* Missed a Rx frame. */
1260                dev->stats.rx_errors++;
1261        if (status & (RxOverflow | RxFull))
1262                outw(RxQueued, ioaddr + COMMAND);
1263}
1264
1265static int epic_poll(struct napi_struct *napi, int budget)
1266{
1267        struct epic_private *ep = container_of(napi, struct epic_private, napi);
1268        struct net_device *dev = ep->mii.dev;
1269        int work_done = 0;
1270        long ioaddr = dev->base_addr;
1271
1272rx_action:
1273
1274        epic_tx(dev, ep);
1275
1276        work_done += epic_rx(dev, budget);
1277
1278        epic_rx_err(dev, ep);
1279
1280        if (work_done < budget) {
1281                unsigned long flags;
1282                int more;
1283
1284                /* A bit baroque but it avoids a (space hungry) spin_unlock */
1285
1286                spin_lock_irqsave(&ep->napi_lock, flags);
1287
1288                more = ep->reschedule_in_poll;
1289                if (!more) {
1290                        __napi_complete(napi);
1291                        outl(EpicNapiEvent, ioaddr + INTSTAT);
1292                        epic_napi_irq_on(dev, ep);
1293                } else
1294                        ep->reschedule_in_poll--;
1295
1296                spin_unlock_irqrestore(&ep->napi_lock, flags);
1297
1298                if (more)
1299                        goto rx_action;
1300        }
1301
1302        return work_done;
1303}
1304
1305static int epic_close(struct net_device *dev)
1306{
1307        long ioaddr = dev->base_addr;
1308        struct epic_private *ep = netdev_priv(dev);
1309        struct sk_buff *skb;
1310        int i;
1311
1312        netif_stop_queue(dev);
1313        napi_disable(&ep->napi);
1314
1315        if (debug > 1)
1316                printk(KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n",
1317                           dev->name, (int)inl(ioaddr + INTSTAT));
1318
1319        del_timer_sync(&ep->timer);
1320
1321        epic_disable_int(dev, ep);
1322
1323        free_irq(dev->irq, dev);
1324
1325        epic_pause(dev);
1326
1327        /* Free all the skbuffs in the Rx queue. */
1328        for (i = 0; i < RX_RING_SIZE; i++) {
1329                skb = ep->rx_skbuff[i];
1330                ep->rx_skbuff[i] = NULL;
1331                ep->rx_ring[i].rxstatus = 0;            /* Not owned by Epic chip. */
1332                ep->rx_ring[i].buflength = 0;
1333                if (skb) {
1334                        pci_unmap_single(ep->pci_dev, ep->rx_ring[i].bufaddr,
1335                                         ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1336                        dev_kfree_skb(skb);
1337                }
1338                ep->rx_ring[i].bufaddr = 0xBADF00D0; /* An invalid address. */
1339        }
1340        for (i = 0; i < TX_RING_SIZE; i++) {
1341                skb = ep->tx_skbuff[i];
1342                ep->tx_skbuff[i] = NULL;
1343                if (!skb)
1344                        continue;
1345                pci_unmap_single(ep->pci_dev, ep->tx_ring[i].bufaddr,
1346                                 skb->len, PCI_DMA_TODEVICE);
1347                dev_kfree_skb(skb);
1348        }
1349
1350        /* Green! Leave the chip in low-power mode. */
1351        outl(0x0008, ioaddr + GENCTL);
1352
1353        return 0;
1354}
1355
1356static struct net_device_stats *epic_get_stats(struct net_device *dev)
1357{
1358        long ioaddr = dev->base_addr;
1359
1360        if (netif_running(dev)) {
1361                /* Update the error counts. */
1362                dev->stats.rx_missed_errors += inb(ioaddr + MPCNT);
1363                dev->stats.rx_frame_errors += inb(ioaddr + ALICNT);
1364                dev->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
1365        }
1366
1367        return &dev->stats;
1368}
1369
1370/* Set or clear the multicast filter for this adaptor.
1371   Note that we only use exclusion around actually queueing the
1372   new frame, not around filling ep->setup_frame.  This is non-deterministic
1373   when re-entered but still correct. */
1374
1375static void set_rx_mode(struct net_device *dev)
1376{
1377        long ioaddr = dev->base_addr;
1378        struct epic_private *ep = netdev_priv(dev);
1379        unsigned char mc_filter[8];              /* Multicast hash filter */
1380        int i;
1381
1382        if (dev->flags & IFF_PROMISC) {                 /* Set promiscuous. */
1383                outl(0x002C, ioaddr + RxCtrl);
1384                /* Unconditionally log net taps. */
1385                memset(mc_filter, 0xff, sizeof(mc_filter));
1386        } else if ((!netdev_mc_empty(dev)) || (dev->flags & IFF_ALLMULTI)) {
1387                /* There is apparently a chip bug, so the multicast filter
1388                   is never enabled. */
1389                /* Too many to filter perfectly -- accept all multicasts. */
1390                memset(mc_filter, 0xff, sizeof(mc_filter));
1391                outl(0x000C, ioaddr + RxCtrl);
1392        } else if (netdev_mc_empty(dev)) {
1393                outl(0x0004, ioaddr + RxCtrl);
1394                return;
1395        } else {                                        /* Never executed, for now. */
1396                struct netdev_hw_addr *ha;
1397
1398                memset(mc_filter, 0, sizeof(mc_filter));
1399                netdev_for_each_mc_addr(ha, dev) {
1400                        unsigned int bit_nr =
1401                                ether_crc_le(ETH_ALEN, ha->addr) & 0x3f;
1402                        mc_filter[bit_nr >> 3] |= (1 << bit_nr);
1403                }
1404        }
1405        /* ToDo: perhaps we need to stop the Tx and Rx process here? */
1406        if (memcmp(mc_filter, ep->mc_filter, sizeof(mc_filter))) {
1407                for (i = 0; i < 4; i++)
1408                        outw(((u16 *)mc_filter)[i], ioaddr + MC0 + i*4);
1409                memcpy(ep->mc_filter, mc_filter, sizeof(mc_filter));
1410        }
1411}
1412
1413static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1414{
1415        struct epic_private *np = netdev_priv(dev);
1416
1417        strcpy (info->driver, DRV_NAME);
1418        strcpy (info->version, DRV_VERSION);
1419        strcpy (info->bus_info, pci_name(np->pci_dev));
1420}
1421
1422static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1423{
1424        struct epic_private *np = netdev_priv(dev);
1425        int rc;
1426
1427        spin_lock_irq(&np->lock);
1428        rc = mii_ethtool_gset(&np->mii, cmd);
1429        spin_unlock_irq(&np->lock);
1430
1431        return rc;
1432}
1433
1434static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1435{
1436        struct epic_private *np = netdev_priv(dev);
1437        int rc;
1438
1439        spin_lock_irq(&np->lock);
1440        rc = mii_ethtool_sset(&np->mii, cmd);
1441        spin_unlock_irq(&np->lock);
1442
1443        return rc;
1444}
1445
1446static int netdev_nway_reset(struct net_device *dev)
1447{
1448        struct epic_private *np = netdev_priv(dev);
1449        return mii_nway_restart(&np->mii);
1450}
1451
1452static u32 netdev_get_link(struct net_device *dev)
1453{
1454        struct epic_private *np = netdev_priv(dev);
1455        return mii_link_ok(&np->mii);
1456}
1457
1458static u32 netdev_get_msglevel(struct net_device *dev)
1459{
1460        return debug;
1461}
1462
1463static void netdev_set_msglevel(struct net_device *dev, u32 value)
1464{
1465        debug = value;
1466}
1467
1468static int ethtool_begin(struct net_device *dev)
1469{
1470        unsigned long ioaddr = dev->base_addr;
1471        /* power-up, if interface is down */
1472        if (! netif_running(dev)) {
1473                outl(0x0200, ioaddr + GENCTL);
1474                outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
1475        }
1476        return 0;
1477}
1478
1479static void ethtool_complete(struct net_device *dev)
1480{
1481        unsigned long ioaddr = dev->base_addr;
1482        /* power-down, if interface is down */
1483        if (! netif_running(dev)) {
1484                outl(0x0008, ioaddr + GENCTL);
1485                outl((inl(ioaddr + NVCTL) & ~0x483C) | 0x0000, ioaddr + NVCTL);
1486        }
1487}
1488
1489static const struct ethtool_ops netdev_ethtool_ops = {
1490        .get_drvinfo            = netdev_get_drvinfo,
1491        .get_settings           = netdev_get_settings,
1492        .set_settings           = netdev_set_settings,
1493        .nway_reset             = netdev_nway_reset,
1494        .get_link               = netdev_get_link,
1495        .get_msglevel           = netdev_get_msglevel,
1496        .set_msglevel           = netdev_set_msglevel,
1497        .begin                  = ethtool_begin,
1498        .complete               = ethtool_complete
1499};
1500
1501static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1502{
1503        struct epic_private *np = netdev_priv(dev);
1504        long ioaddr = dev->base_addr;
1505        struct mii_ioctl_data *data = if_mii(rq);
1506        int rc;
1507
1508        /* power-up, if interface is down */
1509        if (! netif_running(dev)) {
1510                outl(0x0200, ioaddr + GENCTL);
1511                outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
1512        }
1513
1514        /* all non-ethtool ioctls (the SIOC[GS]MIIxxx ioctls) */
1515        spin_lock_irq(&np->lock);
1516        rc = generic_mii_ioctl(&np->mii, data, cmd, NULL);
1517        spin_unlock_irq(&np->lock);
1518
1519        /* power-down, if interface is down */
1520        if (! netif_running(dev)) {
1521                outl(0x0008, ioaddr + GENCTL);
1522                outl((inl(ioaddr + NVCTL) & ~0x483C) | 0x0000, ioaddr + NVCTL);
1523        }
1524        return rc;
1525}
1526
1527
1528static void __devexit epic_remove_one (struct pci_dev *pdev)
1529{
1530        struct net_device *dev = pci_get_drvdata(pdev);
1531        struct epic_private *ep = netdev_priv(dev);
1532
1533        pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
1534        pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma);
1535        unregister_netdev(dev);
1536#ifndef USE_IO_OPS
1537        iounmap((void*) dev->base_addr);
1538#endif
1539        pci_release_regions(pdev);
1540        free_netdev(dev);
1541        pci_disable_device(pdev);
1542        pci_set_drvdata(pdev, NULL);
1543        /* pci_power_off(pdev, -1); */
1544}
1545
1546
1547#ifdef CONFIG_PM
1548
1549static int epic_suspend (struct pci_dev *pdev, pm_message_t state)
1550{
1551        struct net_device *dev = pci_get_drvdata(pdev);
1552        long ioaddr = dev->base_addr;
1553
1554        if (!netif_running(dev))
1555                return 0;
1556        epic_pause(dev);
1557        /* Put the chip into low-power mode. */
1558        outl(0x0008, ioaddr + GENCTL);
1559        /* pci_power_off(pdev, -1); */
1560        return 0;
1561}
1562
1563
1564static int epic_resume (struct pci_dev *pdev)
1565{
1566        struct net_device *dev = pci_get_drvdata(pdev);
1567
1568        if (!netif_running(dev))
1569                return 0;
1570        epic_restart(dev);
1571        /* pci_power_on(pdev); */
1572        return 0;
1573}
1574
1575#endif /* CONFIG_PM */
1576
1577
1578static struct pci_driver epic_driver = {
1579        .name           = DRV_NAME,
1580        .id_table       = epic_pci_tbl,
1581        .probe          = epic_init_one,
1582        .remove         = __devexit_p(epic_remove_one),
1583#ifdef CONFIG_PM
1584        .suspend        = epic_suspend,
1585        .resume         = epic_resume,
1586#endif /* CONFIG_PM */
1587};
1588
1589
1590static int __init epic_init (void)
1591{
1592/* when a module, this is printed whether or not devices are found in probe */
1593#ifdef MODULE
1594        printk (KERN_INFO "%s%s",
1595                version, version2);
1596#endif
1597
1598        return pci_register_driver(&epic_driver);
1599}
1600
1601
1602static void __exit epic_cleanup (void)
1603{
1604        pci_unregister_driver (&epic_driver);
1605}
1606
1607
1608module_init(epic_init);
1609module_exit(epic_cleanup);
1610