linux/drivers/net/ethernet/smsc/epic100.c
<<
>>
Prefs
   1/* epic100.c: A SMC 83c170 EPIC/100 Fast Ethernet driver for Linux. */
   2/*
   3        Written/copyright 1997-2001 by Donald Becker.
   4
   5        This software may be used and distributed according to the terms of
   6        the GNU General Public License (GPL), incorporated herein by reference.
   7        Drivers based on or derived from this code fall under the GPL and must
   8        retain the authorship, copyright and license notice.  This file is not
   9        a complete program and may only be used when the entire operating
  10        system is licensed under the GPL.
  11
  12        This driver is for the SMC83c170/175 "EPIC" series, as used on the
  13        SMC EtherPower II 9432 PCI adapter, and several CardBus cards.
  14
  15        The author may be reached as becker@scyld.com, or C/O
  16        Scyld Computing Corporation
  17        410 Severn Ave., Suite 210
  18        Annapolis MD 21403
  19
  20        Information and updates available at
  21        http://www.scyld.com/network/epic100.html
  22        [this link no longer provides anything useful -jgarzik]
  23
  24        ---------------------------------------------------------------------
  25
  26*/
  27
  28#define DRV_NAME        "epic100"
  29#define DRV_VERSION     "2.1"
  30#define DRV_RELDATE     "Sept 11, 2006"
  31
  32/* The user-configurable values.
  33   These may be modified when a driver module is loaded.*/
  34
  35static int debug = 1;                   /* 1 normal messages, 0 quiet .. 7 verbose. */
  36
  37/* Used to pass the full-duplex flag, etc. */
  38#define MAX_UNITS 8             /* More are supported, limit only on options */
  39static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
  40static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
  41
  42/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
  43   Setting to > 1518 effectively disables this feature. */
  44static int rx_copybreak;
  45
  46/* Operational parameters that are set at compile time. */
  47
  48/* Keep the ring sizes a power of two for operational efficiency.
  49   The compiler will convert <unsigned>'%'<2^N> into a bit mask.
  50   Making the Tx ring too large decreases the effectiveness of channel
  51   bonding and packet priority.
  52   There are no ill effects from too-large receive rings. */
  53#define TX_RING_SIZE    256
  54#define TX_QUEUE_LEN    240             /* Limit ring entries actually used.  */
  55#define RX_RING_SIZE    256
  56#define TX_TOTAL_SIZE   TX_RING_SIZE*sizeof(struct epic_tx_desc)
  57#define RX_TOTAL_SIZE   RX_RING_SIZE*sizeof(struct epic_rx_desc)
  58
  59/* Operational parameters that usually are not changed. */
  60/* Time in jiffies before concluding the transmitter is hung. */
  61#define TX_TIMEOUT  (2*HZ)
  62
  63#define PKT_BUF_SZ              1536                    /* Size of each temporary Rx buffer.*/
  64
  65/* Bytes transferred to chip before transmission starts. */
  66/* Initial threshold, increased on underflow, rounded down to 4 byte units. */
  67#define TX_FIFO_THRESH 256
  68#define RX_FIFO_THRESH 1                /* 0-3, 0==32, 64,96, or 3==128 bytes  */
  69
  70#include <linux/module.h>
  71#include <linux/kernel.h>
  72#include <linux/string.h>
  73#include <linux/timer.h>
  74#include <linux/errno.h>
  75#include <linux/ioport.h>
  76#include <linux/interrupt.h>
  77#include <linux/pci.h>
  78#include <linux/delay.h>
  79#include <linux/netdevice.h>
  80#include <linux/etherdevice.h>
  81#include <linux/skbuff.h>
  82#include <linux/init.h>
  83#include <linux/spinlock.h>
  84#include <linux/ethtool.h>
  85#include <linux/mii.h>
  86#include <linux/crc32.h>
  87#include <linux/bitops.h>
  88#include <asm/io.h>
  89#include <asm/uaccess.h>
  90#include <asm/byteorder.h>
  91
  92/* These identify the driver base version and may not be removed. */
  93static char version[] =
  94DRV_NAME ".c:v1.11 1/7/2001 Written by Donald Becker <becker@scyld.com>";
  95static char version2[] =
  96"  (unofficial 2.4.x kernel port, version " DRV_VERSION ", " DRV_RELDATE ")";
  97
  98MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
  99MODULE_DESCRIPTION("SMC 83c170 EPIC series Ethernet driver");
 100MODULE_LICENSE("GPL");
 101
 102module_param(debug, int, 0);
 103module_param(rx_copybreak, int, 0);
 104module_param_array(options, int, NULL, 0);
 105module_param_array(full_duplex, int, NULL, 0);
 106MODULE_PARM_DESC(debug, "EPIC/100 debug level (0-5)");
 107MODULE_PARM_DESC(options, "EPIC/100: Bits 0-3: media type, bit 4: full duplex");
 108MODULE_PARM_DESC(rx_copybreak, "EPIC/100 copy breakpoint for copy-only-tiny-frames");
 109MODULE_PARM_DESC(full_duplex, "EPIC/100 full duplex setting(s) (1)");
 110
 111/*
 112                                Theory of Operation
 113
 114I. Board Compatibility
 115
 116This device driver is designed for the SMC "EPIC/100", the SMC
 117single-chip Ethernet controllers for PCI.  This chip is used on
 118the SMC EtherPower II boards.
 119
 120II. Board-specific settings
 121
 122PCI bus devices are configured by the system at boot time, so no jumpers
 123need to be set on the board.  The system BIOS will assign the
 124PCI INTA signal to a (preferably otherwise unused) system IRQ line.
 125Note: Kernel versions earlier than 1.3.73 do not support shared PCI
 126interrupt lines.
 127
 128III. Driver operation
 129
 130IIIa. Ring buffers
 131
 132IVb. References
 133
 134http://www.smsc.com/media/Downloads_Public/discontinued/83c171.pdf
 135http://www.smsc.com/media/Downloads_Public/discontinued/83c175.pdf
 136http://scyld.com/expert/NWay.html
 137http://www.national.com/pf/DP/DP83840A.html
 138
 139IVc. Errata
 140
 141*/
 142
 143
 144enum chip_capability_flags { MII_PWRDWN=1, TYPE2_INTR=2, NO_MII=4 };
 145
 146#define EPIC_TOTAL_SIZE 0x100
 147#define USE_IO_OPS 1
 148
 149#ifdef USE_IO_OPS
 150#define EPIC_BAR        0
 151#else
 152#define EPIC_BAR        1
 153#endif
 154
 155typedef enum {
 156        SMSC_83C170_0,
 157        SMSC_83C170,
 158        SMSC_83C175,
 159} chip_t;
 160
 161
 162struct epic_chip_info {
 163        const char *name;
 164        int drv_flags;                          /* Driver use, intended as capability flags. */
 165};
 166
 167
 168/* indexed by chip_t */
 169static const struct epic_chip_info pci_id_tbl[] = {
 170        { "SMSC EPIC/100 83c170",       TYPE2_INTR | NO_MII | MII_PWRDWN },
 171        { "SMSC EPIC/100 83c170",       TYPE2_INTR },
 172        { "SMSC EPIC/C 83c175",         TYPE2_INTR | MII_PWRDWN },
 173};
 174
 175
 176static const struct pci_device_id epic_pci_tbl[] = {
 177        { 0x10B8, 0x0005, 0x1092, 0x0AB4, 0, 0, SMSC_83C170_0 },
 178        { 0x10B8, 0x0005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SMSC_83C170 },
 179        { 0x10B8, 0x0006, PCI_ANY_ID, PCI_ANY_ID,
 180          PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, SMSC_83C175 },
 181        { 0,}
 182};
 183MODULE_DEVICE_TABLE (pci, epic_pci_tbl);
 184
 185#define ew16(reg, val)  iowrite16(val, ioaddr + (reg))
 186#define ew32(reg, val)  iowrite32(val, ioaddr + (reg))
 187#define er8(reg)        ioread8(ioaddr + (reg))
 188#define er16(reg)       ioread16(ioaddr + (reg))
 189#define er32(reg)       ioread32(ioaddr + (reg))
 190
 191/* Offsets to registers, using the (ugh) SMC names. */
 192enum epic_registers {
 193  COMMAND=0, INTSTAT=4, INTMASK=8, GENCTL=0x0C, NVCTL=0x10, EECTL=0x14,
 194  PCIBurstCnt=0x18,
 195  TEST1=0x1C, CRCCNT=0x20, ALICNT=0x24, MPCNT=0x28,     /* Rx error counters. */
 196  MIICtrl=0x30, MIIData=0x34, MIICfg=0x38,
 197  LAN0=64,                                              /* MAC address. */
 198  MC0=80,                                               /* Multicast filter table. */
 199  RxCtrl=96, TxCtrl=112, TxSTAT=0x74,
 200  PRxCDAR=0x84, RxSTAT=0xA4, EarlyRx=0xB0, PTxCDAR=0xC4, TxThresh=0xDC,
 201};
 202
 203/* Interrupt register bits, using my own meaningful names. */
 204enum IntrStatus {
 205        TxIdle=0x40000, RxIdle=0x20000, IntrSummary=0x010000,
 206        PCIBusErr170=0x7000, PCIBusErr175=0x1000, PhyEvent175=0x8000,
 207        RxStarted=0x0800, RxEarlyWarn=0x0400, CntFull=0x0200, TxUnderrun=0x0100,
 208        TxEmpty=0x0080, TxDone=0x0020, RxError=0x0010,
 209        RxOverflow=0x0008, RxFull=0x0004, RxHeader=0x0002, RxDone=0x0001,
 210};
 211enum CommandBits {
 212        StopRx=1, StartRx=2, TxQueued=4, RxQueued=8,
 213        StopTxDMA=0x20, StopRxDMA=0x40, RestartTx=0x80,
 214};
 215
 216#define EpicRemoved     0xffffffff      /* Chip failed or removed (CardBus) */
 217
 218#define EpicNapiEvent   (TxEmpty | TxDone | \
 219                         RxDone | RxStarted | RxEarlyWarn | RxOverflow | RxFull)
 220#define EpicNormalEvent (0x0000ffff & ~EpicNapiEvent)
 221
 222static const u16 media2miictl[16] = {
 223        0, 0x0C00, 0x0C00, 0x2000,  0x0100, 0x2100, 0, 0,
 224        0, 0, 0, 0,  0, 0, 0, 0 };
 225
 226/*
 227 * The EPIC100 Rx and Tx buffer descriptors.  Note that these
 228 * really ARE host-endian; it's not a misannotation.  We tell
 229 * the card to byteswap them internally on big-endian hosts -
 230 * look for #ifdef __BIG_ENDIAN in epic_open().
 231 */
 232
 233struct epic_tx_desc {
 234        u32 txstatus;
 235        u32 bufaddr;
 236        u32 buflength;
 237        u32 next;
 238};
 239
 240struct epic_rx_desc {
 241        u32 rxstatus;
 242        u32 bufaddr;
 243        u32 buflength;
 244        u32 next;
 245};
 246
 247enum desc_status_bits {
 248        DescOwn=0x8000,
 249};
 250
 251#define PRIV_ALIGN      15      /* Required alignment mask */
 252struct epic_private {
 253        struct epic_rx_desc *rx_ring;
 254        struct epic_tx_desc *tx_ring;
 255        /* The saved address of a sent-in-place packet/buffer, for skfree(). */
 256        struct sk_buff* tx_skbuff[TX_RING_SIZE];
 257        /* The addresses of receive-in-place skbuffs. */
 258        struct sk_buff* rx_skbuff[RX_RING_SIZE];
 259
 260        dma_addr_t tx_ring_dma;
 261        dma_addr_t rx_ring_dma;
 262
 263        /* Ring pointers. */
 264        spinlock_t lock;                                /* Group with Tx control cache line. */
 265        spinlock_t napi_lock;
 266        struct napi_struct napi;
 267        unsigned int reschedule_in_poll;
 268        unsigned int cur_tx, dirty_tx;
 269
 270        unsigned int cur_rx, dirty_rx;
 271        u32 irq_mask;
 272        unsigned int rx_buf_sz;                         /* Based on MTU+slack. */
 273
 274        void __iomem *ioaddr;
 275        struct pci_dev *pci_dev;                        /* PCI bus location. */
 276        int chip_id, chip_flags;
 277
 278        struct timer_list timer;                        /* Media selection timer. */
 279        int tx_threshold;
 280        unsigned char mc_filter[8];
 281        signed char phys[4];                            /* MII device addresses. */
 282        u16 advertising;                                        /* NWay media advertisement */
 283        int mii_phy_cnt;
 284        struct mii_if_info mii;
 285        unsigned int tx_full:1;                         /* The Tx queue is full. */
 286        unsigned int default_port:4;            /* Last dev->if_port value. */
 287};
 288
 289static int epic_open(struct net_device *dev);
 290static int read_eeprom(struct epic_private *, int);
 291static int mdio_read(struct net_device *dev, int phy_id, int location);
 292static void mdio_write(struct net_device *dev, int phy_id, int loc, int val);
 293static void epic_restart(struct net_device *dev);
 294static void epic_timer(unsigned long data);
 295static void epic_tx_timeout(struct net_device *dev);
 296static void epic_init_ring(struct net_device *dev);
 297static netdev_tx_t epic_start_xmit(struct sk_buff *skb,
 298                                   struct net_device *dev);
 299static int epic_rx(struct net_device *dev, int budget);
 300static int epic_poll(struct napi_struct *napi, int budget);
 301static irqreturn_t epic_interrupt(int irq, void *dev_instance);
 302static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 303static const struct ethtool_ops netdev_ethtool_ops;
 304static int epic_close(struct net_device *dev);
 305static struct net_device_stats *epic_get_stats(struct net_device *dev);
 306static void set_rx_mode(struct net_device *dev);
 307
 308static const struct net_device_ops epic_netdev_ops = {
 309        .ndo_open               = epic_open,
 310        .ndo_stop               = epic_close,
 311        .ndo_start_xmit         = epic_start_xmit,
 312        .ndo_tx_timeout         = epic_tx_timeout,
 313        .ndo_get_stats          = epic_get_stats,
 314        .ndo_set_rx_mode        = set_rx_mode,
 315        .ndo_do_ioctl           = netdev_ioctl,
 316        .ndo_change_mtu         = eth_change_mtu,
 317        .ndo_set_mac_address    = eth_mac_addr,
 318        .ndo_validate_addr      = eth_validate_addr,
 319};
 320
 321static int epic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 322{
 323        static int card_idx = -1;
 324        void __iomem *ioaddr;
 325        int chip_idx = (int) ent->driver_data;
 326        int irq;
 327        struct net_device *dev;
 328        struct epic_private *ep;
 329        int i, ret, option = 0, duplex = 0;
 330        void *ring_space;
 331        dma_addr_t ring_dma;
 332
 333/* when built into the kernel, we only print version if device is found */
 334#ifndef MODULE
 335        pr_info_once("%s%s\n", version, version2);
 336#endif
 337
 338        card_idx++;
 339
 340        ret = pci_enable_device(pdev);
 341        if (ret)
 342                goto out;
 343        irq = pdev->irq;
 344
 345        if (pci_resource_len(pdev, 0) < EPIC_TOTAL_SIZE) {
 346                dev_err(&pdev->dev, "no PCI region space\n");
 347                ret = -ENODEV;
 348                goto err_out_disable;
 349        }
 350
 351        pci_set_master(pdev);
 352
 353        ret = pci_request_regions(pdev, DRV_NAME);
 354        if (ret < 0)
 355                goto err_out_disable;
 356
 357        ret = -ENOMEM;
 358
 359        dev = alloc_etherdev(sizeof (*ep));
 360        if (!dev)
 361                goto err_out_free_res;
 362
 363        SET_NETDEV_DEV(dev, &pdev->dev);
 364
 365        ioaddr = pci_iomap(pdev, EPIC_BAR, 0);
 366        if (!ioaddr) {
 367                dev_err(&pdev->dev, "ioremap failed\n");
 368                goto err_out_free_netdev;
 369        }
 370
 371        pci_set_drvdata(pdev, dev);
 372        ep = netdev_priv(dev);
 373        ep->ioaddr = ioaddr;
 374        ep->mii.dev = dev;
 375        ep->mii.mdio_read = mdio_read;
 376        ep->mii.mdio_write = mdio_write;
 377        ep->mii.phy_id_mask = 0x1f;
 378        ep->mii.reg_num_mask = 0x1f;
 379
 380        ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
 381        if (!ring_space)
 382                goto err_out_iounmap;
 383        ep->tx_ring = ring_space;
 384        ep->tx_ring_dma = ring_dma;
 385
 386        ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
 387        if (!ring_space)
 388                goto err_out_unmap_tx;
 389        ep->rx_ring = ring_space;
 390        ep->rx_ring_dma = ring_dma;
 391
 392        if (dev->mem_start) {
 393                option = dev->mem_start;
 394                duplex = (dev->mem_start & 16) ? 1 : 0;
 395        } else if (card_idx >= 0  &&  card_idx < MAX_UNITS) {
 396                if (options[card_idx] >= 0)
 397                        option = options[card_idx];
 398                if (full_duplex[card_idx] >= 0)
 399                        duplex = full_duplex[card_idx];
 400        }
 401
 402        spin_lock_init(&ep->lock);
 403        spin_lock_init(&ep->napi_lock);
 404        ep->reschedule_in_poll = 0;
 405
 406        /* Bring the chip out of low-power mode. */
 407        ew32(GENCTL, 0x4200);
 408        /* Magic?!  If we don't set this bit the MII interface won't work. */
 409        /* This magic is documented in SMSC app note 7.15 */
 410        for (i = 16; i > 0; i--)
 411                ew32(TEST1, 0x0008);
 412
 413        /* Turn on the MII transceiver. */
 414        ew32(MIICfg, 0x12);
 415        if (chip_idx == 1)
 416                ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
 417        ew32(GENCTL, 0x0200);
 418
 419        /* Note: the '175 does not have a serial EEPROM. */
 420        for (i = 0; i < 3; i++)
 421                ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(er16(LAN0 + i*4));
 422
 423        if (debug > 2) {
 424                dev_dbg(&pdev->dev, "EEPROM contents:\n");
 425                for (i = 0; i < 64; i++)
 426                        pr_cont(" %4.4x%s", read_eeprom(ep, i),
 427                                   i % 16 == 15 ? "\n" : "");
 428        }
 429
 430        ep->pci_dev = pdev;
 431        ep->chip_id = chip_idx;
 432        ep->chip_flags = pci_id_tbl[chip_idx].drv_flags;
 433        ep->irq_mask =
 434                (ep->chip_flags & TYPE2_INTR ?  PCIBusErr175 : PCIBusErr170)
 435                 | CntFull | TxUnderrun | EpicNapiEvent;
 436
 437        /* Find the connected MII xcvrs.
 438           Doing this in open() would allow detecting external xcvrs later, but
 439           takes much time and no cards have external MII. */
 440        {
 441                int phy, phy_idx = 0;
 442                for (phy = 1; phy < 32 && phy_idx < sizeof(ep->phys); phy++) {
 443                        int mii_status = mdio_read(dev, phy, MII_BMSR);
 444                        if (mii_status != 0xffff  &&  mii_status != 0x0000) {
 445                                ep->phys[phy_idx++] = phy;
 446                                dev_info(&pdev->dev,
 447                                        "MII transceiver #%d control "
 448                                        "%4.4x status %4.4x.\n",
 449                                        phy, mdio_read(dev, phy, 0), mii_status);
 450                        }
 451                }
 452                ep->mii_phy_cnt = phy_idx;
 453                if (phy_idx != 0) {
 454                        phy = ep->phys[0];
 455                        ep->mii.advertising = mdio_read(dev, phy, MII_ADVERTISE);
 456                        dev_info(&pdev->dev,
 457                                "Autonegotiation advertising %4.4x link "
 458                                   "partner %4.4x.\n",
 459                                   ep->mii.advertising, mdio_read(dev, phy, 5));
 460                } else if ( ! (ep->chip_flags & NO_MII)) {
 461                        dev_warn(&pdev->dev,
 462                                "***WARNING***: No MII transceiver found!\n");
 463                        /* Use the known PHY address of the EPII. */
 464                        ep->phys[0] = 3;
 465                }
 466                ep->mii.phy_id = ep->phys[0];
 467        }
 468
 469        /* Turn off the MII xcvr (175 only!), leave the chip in low-power mode. */
 470        if (ep->chip_flags & MII_PWRDWN)
 471                ew32(NVCTL, er32(NVCTL) & ~0x483c);
 472        ew32(GENCTL, 0x0008);
 473
 474        /* The lower four bits are the media type. */
 475        if (duplex) {
 476                ep->mii.force_media = ep->mii.full_duplex = 1;
 477                dev_info(&pdev->dev, "Forced full duplex requested.\n");
 478        }
 479        dev->if_port = ep->default_port = option;
 480
 481        /* The Epic-specific entries in the device structure. */
 482        dev->netdev_ops = &epic_netdev_ops;
 483        dev->ethtool_ops = &netdev_ethtool_ops;
 484        dev->watchdog_timeo = TX_TIMEOUT;
 485        netif_napi_add(dev, &ep->napi, epic_poll, 64);
 486
 487        ret = register_netdev(dev);
 488        if (ret < 0)
 489                goto err_out_unmap_rx;
 490
 491        netdev_info(dev, "%s at %lx, IRQ %d, %pM\n",
 492                    pci_id_tbl[chip_idx].name,
 493                    (long)pci_resource_start(pdev, EPIC_BAR), pdev->irq,
 494                    dev->dev_addr);
 495
 496out:
 497        return ret;
 498
 499err_out_unmap_rx:
 500        pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma);
 501err_out_unmap_tx:
 502        pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
 503err_out_iounmap:
 504        pci_iounmap(pdev, ioaddr);
 505err_out_free_netdev:
 506        free_netdev(dev);
 507err_out_free_res:
 508        pci_release_regions(pdev);
 509err_out_disable:
 510        pci_disable_device(pdev);
 511        goto out;
 512}
 513
 514/* Serial EEPROM section. */
 515
 516/*  EEPROM_Ctrl bits. */
 517#define EE_SHIFT_CLK    0x04    /* EEPROM shift clock. */
 518#define EE_CS                   0x02    /* EEPROM chip select. */
 519#define EE_DATA_WRITE   0x08    /* EEPROM chip data in. */
 520#define EE_WRITE_0              0x01
 521#define EE_WRITE_1              0x09
 522#define EE_DATA_READ    0x10    /* EEPROM chip data out. */
 523#define EE_ENB                  (0x0001 | EE_CS)
 524
 525/* Delay between EEPROM clock transitions.
 526   This serves to flush the operation to the PCI bus.
 527 */
 528
 529#define eeprom_delay()  er32(EECTL)
 530
 531/* The EEPROM commands include the alway-set leading bit. */
 532#define EE_WRITE_CMD    (5 << 6)
 533#define EE_READ64_CMD   (6 << 6)
 534#define EE_READ256_CMD  (6 << 8)
 535#define EE_ERASE_CMD    (7 << 6)
 536
 537static void epic_disable_int(struct net_device *dev, struct epic_private *ep)
 538{
 539        void __iomem *ioaddr = ep->ioaddr;
 540
 541        ew32(INTMASK, 0x00000000);
 542}
 543
 544static inline void __epic_pci_commit(void __iomem *ioaddr)
 545{
 546#ifndef USE_IO_OPS
 547        er32(INTMASK);
 548#endif
 549}
 550
 551static inline void epic_napi_irq_off(struct net_device *dev,
 552                                     struct epic_private *ep)
 553{
 554        void __iomem *ioaddr = ep->ioaddr;
 555
 556        ew32(INTMASK, ep->irq_mask & ~EpicNapiEvent);
 557        __epic_pci_commit(ioaddr);
 558}
 559
 560static inline void epic_napi_irq_on(struct net_device *dev,
 561                                    struct epic_private *ep)
 562{
 563        void __iomem *ioaddr = ep->ioaddr;
 564
 565        /* No need to commit possible posted write */
 566        ew32(INTMASK, ep->irq_mask | EpicNapiEvent);
 567}
 568
 569static int read_eeprom(struct epic_private *ep, int location)
 570{
 571        void __iomem *ioaddr = ep->ioaddr;
 572        int i;
 573        int retval = 0;
 574        int read_cmd = location |
 575                (er32(EECTL) & 0x40 ? EE_READ64_CMD : EE_READ256_CMD);
 576
 577        ew32(EECTL, EE_ENB & ~EE_CS);
 578        ew32(EECTL, EE_ENB);
 579
 580        /* Shift the read command bits out. */
 581        for (i = 12; i >= 0; i--) {
 582                short dataval = (read_cmd & (1 << i)) ? EE_WRITE_1 : EE_WRITE_0;
 583                ew32(EECTL, EE_ENB | dataval);
 584                eeprom_delay();
 585                ew32(EECTL, EE_ENB | dataval | EE_SHIFT_CLK);
 586                eeprom_delay();
 587        }
 588        ew32(EECTL, EE_ENB);
 589
 590        for (i = 16; i > 0; i--) {
 591                ew32(EECTL, EE_ENB | EE_SHIFT_CLK);
 592                eeprom_delay();
 593                retval = (retval << 1) | ((er32(EECTL) & EE_DATA_READ) ? 1 : 0);
 594                ew32(EECTL, EE_ENB);
 595                eeprom_delay();
 596        }
 597
 598        /* Terminate the EEPROM access. */
 599        ew32(EECTL, EE_ENB & ~EE_CS);
 600        return retval;
 601}
 602
 603#define MII_READOP              1
 604#define MII_WRITEOP             2
 605static int mdio_read(struct net_device *dev, int phy_id, int location)
 606{
 607        struct epic_private *ep = netdev_priv(dev);
 608        void __iomem *ioaddr = ep->ioaddr;
 609        int read_cmd = (phy_id << 9) | (location << 4) | MII_READOP;
 610        int i;
 611
 612        ew32(MIICtrl, read_cmd);
 613        /* Typical operation takes 25 loops. */
 614        for (i = 400; i > 0; i--) {
 615                barrier();
 616                if ((er32(MIICtrl) & MII_READOP) == 0) {
 617                        /* Work around read failure bug. */
 618                        if (phy_id == 1 && location < 6 &&
 619                            er16(MIIData) == 0xffff) {
 620                                ew32(MIICtrl, read_cmd);
 621                                continue;
 622                        }
 623                        return er16(MIIData);
 624                }
 625        }
 626        return 0xffff;
 627}
 628
 629static void mdio_write(struct net_device *dev, int phy_id, int loc, int value)
 630{
 631        struct epic_private *ep = netdev_priv(dev);
 632        void __iomem *ioaddr = ep->ioaddr;
 633        int i;
 634
 635        ew16(MIIData, value);
 636        ew32(MIICtrl, (phy_id << 9) | (loc << 4) | MII_WRITEOP);
 637        for (i = 10000; i > 0; i--) {
 638                barrier();
 639                if ((er32(MIICtrl) & MII_WRITEOP) == 0)
 640                        break;
 641        }
 642}
 643
 644
 645static int epic_open(struct net_device *dev)
 646{
 647        struct epic_private *ep = netdev_priv(dev);
 648        void __iomem *ioaddr = ep->ioaddr;
 649        const int irq = ep->pci_dev->irq;
 650        int rc, i;
 651
 652        /* Soft reset the chip. */
 653        ew32(GENCTL, 0x4001);
 654
 655        napi_enable(&ep->napi);
 656        rc = request_irq(irq, epic_interrupt, IRQF_SHARED, dev->name, dev);
 657        if (rc) {
 658                napi_disable(&ep->napi);
 659                return rc;
 660        }
 661
 662        epic_init_ring(dev);
 663
 664        ew32(GENCTL, 0x4000);
 665        /* This magic is documented in SMSC app note 7.15 */
 666        for (i = 16; i > 0; i--)
 667                ew32(TEST1, 0x0008);
 668
 669        /* Pull the chip out of low-power mode, enable interrupts, and set for
 670           PCI read multiple.  The MIIcfg setting and strange write order are
 671           required by the details of which bits are reset and the transceiver
 672           wiring on the Ositech CardBus card.
 673        */
 674#if 0
 675        ew32(MIICfg, dev->if_port == 1 ? 0x13 : 0x12);
 676#endif
 677        if (ep->chip_flags & MII_PWRDWN)
 678                ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
 679
 680        /* Tell the chip to byteswap descriptors on big-endian hosts */
 681#ifdef __BIG_ENDIAN
 682        ew32(GENCTL, 0x4432 | (RX_FIFO_THRESH << 8));
 683        er32(GENCTL);
 684        ew32(GENCTL, 0x0432 | (RX_FIFO_THRESH << 8));
 685#else
 686        ew32(GENCTL, 0x4412 | (RX_FIFO_THRESH << 8));
 687        er32(GENCTL);
 688        ew32(GENCTL, 0x0412 | (RX_FIFO_THRESH << 8));
 689#endif
 690
 691        udelay(20); /* Looks like EPII needs that if you want reliable RX init. FIXME: pci posting bug? */
 692
 693        for (i = 0; i < 3; i++)
 694                ew32(LAN0 + i*4, le16_to_cpu(((__le16*)dev->dev_addr)[i]));
 695
 696        ep->tx_threshold = TX_FIFO_THRESH;
 697        ew32(TxThresh, ep->tx_threshold);
 698
 699        if (media2miictl[dev->if_port & 15]) {
 700                if (ep->mii_phy_cnt)
 701                        mdio_write(dev, ep->phys[0], MII_BMCR, media2miictl[dev->if_port&15]);
 702                if (dev->if_port == 1) {
 703                        if (debug > 1)
 704                                netdev_info(dev, "Using the 10base2 transceiver, MII status %4.4x.\n",
 705                                            mdio_read(dev, ep->phys[0], MII_BMSR));
 706                }
 707        } else {
 708                int mii_lpa = mdio_read(dev, ep->phys[0], MII_LPA);
 709                if (mii_lpa != 0xffff) {
 710                        if ((mii_lpa & LPA_100FULL) || (mii_lpa & 0x01C0) == LPA_10FULL)
 711                                ep->mii.full_duplex = 1;
 712                        else if (! (mii_lpa & LPA_LPACK))
 713                                mdio_write(dev, ep->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
 714                        if (debug > 1)
 715                                netdev_info(dev, "Setting %s-duplex based on MII xcvr %d register read of %4.4x.\n",
 716                                            ep->mii.full_duplex ? "full"
 717                                                                : "half",
 718                                            ep->phys[0], mii_lpa);
 719                }
 720        }
 721
 722        ew32(TxCtrl, ep->mii.full_duplex ? 0x7f : 0x79);
 723        ew32(PRxCDAR, ep->rx_ring_dma);
 724        ew32(PTxCDAR, ep->tx_ring_dma);
 725
 726        /* Start the chip's Rx process. */
 727        set_rx_mode(dev);
 728        ew32(COMMAND, StartRx | RxQueued);
 729
 730        netif_start_queue(dev);
 731
 732        /* Enable interrupts by setting the interrupt mask. */
 733        ew32(INTMASK, RxError | RxHeader | EpicNapiEvent | CntFull |
 734             ((ep->chip_flags & TYPE2_INTR) ? PCIBusErr175 : PCIBusErr170) |
 735             TxUnderrun);
 736
 737        if (debug > 1) {
 738                netdev_dbg(dev, "epic_open() ioaddr %p IRQ %d status %4.4x %s-duplex.\n",
 739                           ioaddr, irq, er32(GENCTL),
 740                           ep->mii.full_duplex ? "full" : "half");
 741        }
 742
 743        /* Set the timer to switch to check for link beat and perhaps switch
 744           to an alternate media type. */
 745        init_timer(&ep->timer);
 746        ep->timer.expires = jiffies + 3*HZ;
 747        ep->timer.data = (unsigned long)dev;
 748        ep->timer.function = epic_timer;                                /* timer handler */
 749        add_timer(&ep->timer);
 750
 751        return rc;
 752}
 753
 754/* Reset the chip to recover from a PCI transaction error.
 755   This may occur at interrupt time. */
 756static void epic_pause(struct net_device *dev)
 757{
 758        struct net_device_stats *stats = &dev->stats;
 759        struct epic_private *ep = netdev_priv(dev);
 760        void __iomem *ioaddr = ep->ioaddr;
 761
 762        netif_stop_queue (dev);
 763
 764        /* Disable interrupts by clearing the interrupt mask. */
 765        ew32(INTMASK, 0x00000000);
 766        /* Stop the chip's Tx and Rx DMA processes. */
 767        ew16(COMMAND, StopRx | StopTxDMA | StopRxDMA);
 768
 769        /* Update the error counts. */
 770        if (er16(COMMAND) != 0xffff) {
 771                stats->rx_missed_errors += er8(MPCNT);
 772                stats->rx_frame_errors  += er8(ALICNT);
 773                stats->rx_crc_errors    += er8(CRCCNT);
 774        }
 775
 776        /* Remove the packets on the Rx queue. */
 777        epic_rx(dev, RX_RING_SIZE);
 778}
 779
 780static void epic_restart(struct net_device *dev)
 781{
 782        struct epic_private *ep = netdev_priv(dev);
 783        void __iomem *ioaddr = ep->ioaddr;
 784        int i;
 785
 786        /* Soft reset the chip. */
 787        ew32(GENCTL, 0x4001);
 788
 789        netdev_dbg(dev, "Restarting the EPIC chip, Rx %d/%d Tx %d/%d.\n",
 790                   ep->cur_rx, ep->dirty_rx, ep->dirty_tx, ep->cur_tx);
 791        udelay(1);
 792
 793        /* This magic is documented in SMSC app note 7.15 */
 794        for (i = 16; i > 0; i--)
 795                ew32(TEST1, 0x0008);
 796
 797#ifdef __BIG_ENDIAN
 798        ew32(GENCTL, 0x0432 | (RX_FIFO_THRESH << 8));
 799#else
 800        ew32(GENCTL, 0x0412 | (RX_FIFO_THRESH << 8));
 801#endif
 802        ew32(MIICfg, dev->if_port == 1 ? 0x13 : 0x12);
 803        if (ep->chip_flags & MII_PWRDWN)
 804                ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
 805
 806        for (i = 0; i < 3; i++)
 807                ew32(LAN0 + i*4, le16_to_cpu(((__le16*)dev->dev_addr)[i]));
 808
 809        ep->tx_threshold = TX_FIFO_THRESH;
 810        ew32(TxThresh, ep->tx_threshold);
 811        ew32(TxCtrl, ep->mii.full_duplex ? 0x7f : 0x79);
 812        ew32(PRxCDAR, ep->rx_ring_dma +
 813             (ep->cur_rx % RX_RING_SIZE) * sizeof(struct epic_rx_desc));
 814        ew32(PTxCDAR, ep->tx_ring_dma +
 815             (ep->dirty_tx % TX_RING_SIZE) * sizeof(struct epic_tx_desc));
 816
 817        /* Start the chip's Rx process. */
 818        set_rx_mode(dev);
 819        ew32(COMMAND, StartRx | RxQueued);
 820
 821        /* Enable interrupts by setting the interrupt mask. */
 822        ew32(INTMASK, RxError | RxHeader | EpicNapiEvent | CntFull |
 823             ((ep->chip_flags & TYPE2_INTR) ? PCIBusErr175 : PCIBusErr170) |
 824             TxUnderrun);
 825
 826        netdev_dbg(dev, "epic_restart() done, cmd status %4.4x, ctl %4.4x interrupt %4.4x.\n",
 827                   er32(COMMAND), er32(GENCTL), er32(INTSTAT));
 828}
 829
 830static void check_media(struct net_device *dev)
 831{
 832        struct epic_private *ep = netdev_priv(dev);
 833        void __iomem *ioaddr = ep->ioaddr;
 834        int mii_lpa = ep->mii_phy_cnt ? mdio_read(dev, ep->phys[0], MII_LPA) : 0;
 835        int negotiated = mii_lpa & ep->mii.advertising;
 836        int duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
 837
 838        if (ep->mii.force_media)
 839                return;
 840        if (mii_lpa == 0xffff)          /* Bogus read */
 841                return;
 842        if (ep->mii.full_duplex != duplex) {
 843                ep->mii.full_duplex = duplex;
 844                netdev_info(dev, "Setting %s-duplex based on MII #%d link partner capability of %4.4x.\n",
 845                            ep->mii.full_duplex ? "full" : "half",
 846                            ep->phys[0], mii_lpa);
 847                ew32(TxCtrl, ep->mii.full_duplex ? 0x7F : 0x79);
 848        }
 849}
 850
 851static void epic_timer(unsigned long data)
 852{
 853        struct net_device *dev = (struct net_device *)data;
 854        struct epic_private *ep = netdev_priv(dev);
 855        void __iomem *ioaddr = ep->ioaddr;
 856        int next_tick = 5*HZ;
 857
 858        if (debug > 3) {
 859                netdev_dbg(dev, "Media monitor tick, Tx status %8.8x.\n",
 860                           er32(TxSTAT));
 861                netdev_dbg(dev, "Other registers are IntMask %4.4x IntStatus %4.4x RxStatus %4.4x.\n",
 862                           er32(INTMASK), er32(INTSTAT), er32(RxSTAT));
 863        }
 864
 865        check_media(dev);
 866
 867        ep->timer.expires = jiffies + next_tick;
 868        add_timer(&ep->timer);
 869}
 870
 871static void epic_tx_timeout(struct net_device *dev)
 872{
 873        struct epic_private *ep = netdev_priv(dev);
 874        void __iomem *ioaddr = ep->ioaddr;
 875
 876        if (debug > 0) {
 877                netdev_warn(dev, "Transmit timeout using MII device, Tx status %4.4x.\n",
 878                            er16(TxSTAT));
 879                if (debug > 1) {
 880                        netdev_dbg(dev, "Tx indices: dirty_tx %d, cur_tx %d.\n",
 881                                   ep->dirty_tx, ep->cur_tx);
 882                }
 883        }
 884        if (er16(TxSTAT) & 0x10) {              /* Tx FIFO underflow. */
 885                dev->stats.tx_fifo_errors++;
 886                ew32(COMMAND, RestartTx);
 887        } else {
 888                epic_restart(dev);
 889                ew32(COMMAND, TxQueued);
 890        }
 891
 892        dev->trans_start = jiffies; /* prevent tx timeout */
 893        dev->stats.tx_errors++;
 894        if (!ep->tx_full)
 895                netif_wake_queue(dev);
 896}
 897
 898/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
 899static void epic_init_ring(struct net_device *dev)
 900{
 901        struct epic_private *ep = netdev_priv(dev);
 902        int i;
 903
 904        ep->tx_full = 0;
 905        ep->dirty_tx = ep->cur_tx = 0;
 906        ep->cur_rx = ep->dirty_rx = 0;
 907        ep->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
 908
 909        /* Initialize all Rx descriptors. */
 910        for (i = 0; i < RX_RING_SIZE; i++) {
 911                ep->rx_ring[i].rxstatus = 0;
 912                ep->rx_ring[i].buflength = ep->rx_buf_sz;
 913                ep->rx_ring[i].next = ep->rx_ring_dma +
 914                                      (i+1)*sizeof(struct epic_rx_desc);
 915                ep->rx_skbuff[i] = NULL;
 916        }
 917        /* Mark the last entry as wrapping the ring. */
 918        ep->rx_ring[i-1].next = ep->rx_ring_dma;
 919
 920        /* Fill in the Rx buffers.  Handle allocation failure gracefully. */
 921        for (i = 0; i < RX_RING_SIZE; i++) {
 922                struct sk_buff *skb = netdev_alloc_skb(dev, ep->rx_buf_sz + 2);
 923                ep->rx_skbuff[i] = skb;
 924                if (skb == NULL)
 925                        break;
 926                skb_reserve(skb, 2);    /* 16 byte align the IP header. */
 927                ep->rx_ring[i].bufaddr = pci_map_single(ep->pci_dev,
 928                        skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
 929                ep->rx_ring[i].rxstatus = DescOwn;
 930        }
 931        ep->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
 932
 933        /* The Tx buffer descriptor is filled in as needed, but we
 934           do need to clear the ownership bit. */
 935        for (i = 0; i < TX_RING_SIZE; i++) {
 936                ep->tx_skbuff[i] = NULL;
 937                ep->tx_ring[i].txstatus = 0x0000;
 938                ep->tx_ring[i].next = ep->tx_ring_dma +
 939                        (i+1)*sizeof(struct epic_tx_desc);
 940        }
 941        ep->tx_ring[i-1].next = ep->tx_ring_dma;
 942}
 943
 944static netdev_tx_t epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
 945{
 946        struct epic_private *ep = netdev_priv(dev);
 947        void __iomem *ioaddr = ep->ioaddr;
 948        int entry, free_count;
 949        u32 ctrl_word;
 950        unsigned long flags;
 951
 952        if (skb_padto(skb, ETH_ZLEN))
 953                return NETDEV_TX_OK;
 954
 955        /* Caution: the write order is important here, set the field with the
 956           "ownership" bit last. */
 957
 958        /* Calculate the next Tx descriptor entry. */
 959        spin_lock_irqsave(&ep->lock, flags);
 960        free_count = ep->cur_tx - ep->dirty_tx;
 961        entry = ep->cur_tx % TX_RING_SIZE;
 962
 963        ep->tx_skbuff[entry] = skb;
 964        ep->tx_ring[entry].bufaddr = pci_map_single(ep->pci_dev, skb->data,
 965                                                    skb->len, PCI_DMA_TODEVICE);
 966        if (free_count < TX_QUEUE_LEN/2) {/* Typical path */
 967                ctrl_word = 0x100000; /* No interrupt */
 968        } else if (free_count == TX_QUEUE_LEN/2) {
 969                ctrl_word = 0x140000; /* Tx-done intr. */
 970        } else if (free_count < TX_QUEUE_LEN - 1) {
 971                ctrl_word = 0x100000; /* No Tx-done intr. */
 972        } else {
 973                /* Leave room for an additional entry. */
 974                ctrl_word = 0x140000; /* Tx-done intr. */
 975                ep->tx_full = 1;
 976        }
 977        ep->tx_ring[entry].buflength = ctrl_word | skb->len;
 978        ep->tx_ring[entry].txstatus =
 979                ((skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN) << 16)
 980                            | DescOwn;
 981
 982        ep->cur_tx++;
 983        if (ep->tx_full)
 984                netif_stop_queue(dev);
 985
 986        spin_unlock_irqrestore(&ep->lock, flags);
 987        /* Trigger an immediate transmit demand. */
 988        ew32(COMMAND, TxQueued);
 989
 990        if (debug > 4)
 991                netdev_dbg(dev, "Queued Tx packet size %d to slot %d, flag %2.2x Tx status %8.8x.\n",
 992                           skb->len, entry, ctrl_word, er32(TxSTAT));
 993
 994        return NETDEV_TX_OK;
 995}
 996
 997static void epic_tx_error(struct net_device *dev, struct epic_private *ep,
 998                          int status)
 999{
1000        struct net_device_stats *stats = &dev->stats;
1001
1002#ifndef final_version
1003        /* There was an major error, log it. */
1004        if (debug > 1)
1005                netdev_dbg(dev, "Transmit error, Tx status %8.8x.\n",
1006                           status);
1007#endif
1008        stats->tx_errors++;
1009        if (status & 0x1050)
1010                stats->tx_aborted_errors++;
1011        if (status & 0x0008)
1012                stats->tx_carrier_errors++;
1013        if (status & 0x0040)
1014                stats->tx_window_errors++;
1015        if (status & 0x0010)
1016                stats->tx_fifo_errors++;
1017}
1018
1019static void epic_tx(struct net_device *dev, struct epic_private *ep)
1020{
1021        unsigned int dirty_tx, cur_tx;
1022
1023        /*
1024         * Note: if this lock becomes a problem we can narrow the locked
1025         * region at the cost of occasionally grabbing the lock more times.
1026         */
1027        cur_tx = ep->cur_tx;
1028        for (dirty_tx = ep->dirty_tx; cur_tx - dirty_tx > 0; dirty_tx++) {
1029                struct sk_buff *skb;
1030                int entry = dirty_tx % TX_RING_SIZE;
1031                int txstatus = ep->tx_ring[entry].txstatus;
1032
1033                if (txstatus & DescOwn)
1034                        break;  /* It still hasn't been Txed */
1035
1036                if (likely(txstatus & 0x0001)) {
1037                        dev->stats.collisions += (txstatus >> 8) & 15;
1038                        dev->stats.tx_packets++;
1039                        dev->stats.tx_bytes += ep->tx_skbuff[entry]->len;
1040                } else
1041                        epic_tx_error(dev, ep, txstatus);
1042
1043                /* Free the original skb. */
1044                skb = ep->tx_skbuff[entry];
1045                pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr,
1046                                 skb->len, PCI_DMA_TODEVICE);
1047                dev_kfree_skb_irq(skb);
1048                ep->tx_skbuff[entry] = NULL;
1049        }
1050
1051#ifndef final_version
1052        if (cur_tx - dirty_tx > TX_RING_SIZE) {
1053                netdev_warn(dev, "Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
1054                            dirty_tx, cur_tx, ep->tx_full);
1055                dirty_tx += TX_RING_SIZE;
1056        }
1057#endif
1058        ep->dirty_tx = dirty_tx;
1059        if (ep->tx_full && cur_tx - dirty_tx < TX_QUEUE_LEN - 4) {
1060                /* The ring is no longer full, allow new TX entries. */
1061                ep->tx_full = 0;
1062                netif_wake_queue(dev);
1063        }
1064}
1065
1066/* The interrupt handler does all of the Rx thread work and cleans up
1067   after the Tx thread. */
1068static irqreturn_t epic_interrupt(int irq, void *dev_instance)
1069{
1070        struct net_device *dev = dev_instance;
1071        struct epic_private *ep = netdev_priv(dev);
1072        void __iomem *ioaddr = ep->ioaddr;
1073        unsigned int handled = 0;
1074        int status;
1075
1076        status = er32(INTSTAT);
1077        /* Acknowledge all of the current interrupt sources ASAP. */
1078        ew32(INTSTAT, status & EpicNormalEvent);
1079
1080        if (debug > 4) {
1081                netdev_dbg(dev, "Interrupt, status=%#8.8x new intstat=%#8.8x.\n",
1082                           status, er32(INTSTAT));
1083        }
1084
1085        if ((status & IntrSummary) == 0)
1086                goto out;
1087
1088        handled = 1;
1089
1090        if ((status & EpicNapiEvent) && !ep->reschedule_in_poll) {
1091                spin_lock(&ep->napi_lock);
1092                if (napi_schedule_prep(&ep->napi)) {
1093                        epic_napi_irq_off(dev, ep);
1094                        __napi_schedule(&ep->napi);
1095                } else
1096                        ep->reschedule_in_poll++;
1097                spin_unlock(&ep->napi_lock);
1098        }
1099        status &= ~EpicNapiEvent;
1100
1101        /* Check uncommon events all at once. */
1102        if (status & (CntFull | TxUnderrun | PCIBusErr170 | PCIBusErr175)) {
1103                struct net_device_stats *stats = &dev->stats;
1104
1105                if (status == EpicRemoved)
1106                        goto out;
1107
1108                /* Always update the error counts to avoid overhead later. */
1109                stats->rx_missed_errors += er8(MPCNT);
1110                stats->rx_frame_errors  += er8(ALICNT);
1111                stats->rx_crc_errors    += er8(CRCCNT);
1112
1113                if (status & TxUnderrun) { /* Tx FIFO underflow. */
1114                        stats->tx_fifo_errors++;
1115                        ew32(TxThresh, ep->tx_threshold += 128);
1116                        /* Restart the transmit process. */
1117                        ew32(COMMAND, RestartTx);
1118                }
1119                if (status & PCIBusErr170) {
1120                        netdev_err(dev, "PCI Bus Error! status %4.4x.\n",
1121                                   status);
1122                        epic_pause(dev);
1123                        epic_restart(dev);
1124                }
1125                /* Clear all error sources. */
1126                ew32(INTSTAT, status & 0x7f18);
1127        }
1128
1129out:
1130        if (debug > 3) {
1131                netdev_dbg(dev, "exit interrupt, intr_status=%#4.4x.\n",
1132                           status);
1133        }
1134
1135        return IRQ_RETVAL(handled);
1136}
1137
1138static int epic_rx(struct net_device *dev, int budget)
1139{
1140        struct epic_private *ep = netdev_priv(dev);
1141        int entry = ep->cur_rx % RX_RING_SIZE;
1142        int rx_work_limit = ep->dirty_rx + RX_RING_SIZE - ep->cur_rx;
1143        int work_done = 0;
1144
1145        if (debug > 4)
1146                netdev_dbg(dev, " In epic_rx(), entry %d %8.8x.\n", entry,
1147                           ep->rx_ring[entry].rxstatus);
1148
1149        if (rx_work_limit > budget)
1150                rx_work_limit = budget;
1151
1152        /* If we own the next entry, it's a new packet. Send it up. */
1153        while ((ep->rx_ring[entry].rxstatus & DescOwn) == 0) {
1154                int status = ep->rx_ring[entry].rxstatus;
1155
1156                if (debug > 4)
1157                        netdev_dbg(dev, "  epic_rx() status was %8.8x.\n",
1158                                   status);
1159                if (--rx_work_limit < 0)
1160                        break;
1161                if (status & 0x2006) {
1162                        if (debug > 2)
1163                                netdev_dbg(dev, "epic_rx() error status was %8.8x.\n",
1164                                           status);
1165                        if (status & 0x2000) {
1166                                netdev_warn(dev, "Oversized Ethernet frame spanned multiple buffers, status %4.4x!\n",
1167                                            status);
1168                                dev->stats.rx_length_errors++;
1169                        } else if (status & 0x0006)
1170                                /* Rx Frame errors are counted in hardware. */
1171                                dev->stats.rx_errors++;
1172                } else {
1173                        /* Malloc up new buffer, compatible with net-2e. */
1174                        /* Omit the four octet CRC from the length. */
1175                        short pkt_len = (status >> 16) - 4;
1176                        struct sk_buff *skb;
1177
1178                        if (pkt_len > PKT_BUF_SZ - 4) {
1179                                netdev_err(dev, "Oversized Ethernet frame, status %x %d bytes.\n",
1180                                           status, pkt_len);
1181                                pkt_len = 1514;
1182                        }
1183                        /* Check if the packet is long enough to accept without copying
1184                           to a minimally-sized skbuff. */
1185                        if (pkt_len < rx_copybreak &&
1186                            (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
1187                                skb_reserve(skb, 2);    /* 16 byte align the IP header */
1188                                pci_dma_sync_single_for_cpu(ep->pci_dev,
1189                                                            ep->rx_ring[entry].bufaddr,
1190                                                            ep->rx_buf_sz,
1191                                                            PCI_DMA_FROMDEVICE);
1192                                skb_copy_to_linear_data(skb, ep->rx_skbuff[entry]->data, pkt_len);
1193                                skb_put(skb, pkt_len);
1194                                pci_dma_sync_single_for_device(ep->pci_dev,
1195                                                               ep->rx_ring[entry].bufaddr,
1196                                                               ep->rx_buf_sz,
1197                                                               PCI_DMA_FROMDEVICE);
1198                        } else {
1199                                pci_unmap_single(ep->pci_dev,
1200                                        ep->rx_ring[entry].bufaddr,
1201                                        ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1202                                skb_put(skb = ep->rx_skbuff[entry], pkt_len);
1203                                ep->rx_skbuff[entry] = NULL;
1204                        }
1205                        skb->protocol = eth_type_trans(skb, dev);
1206                        netif_receive_skb(skb);
1207                        dev->stats.rx_packets++;
1208                        dev->stats.rx_bytes += pkt_len;
1209                }
1210                work_done++;
1211                entry = (++ep->cur_rx) % RX_RING_SIZE;
1212        }
1213
1214        /* Refill the Rx ring buffers. */
1215        for (; ep->cur_rx - ep->dirty_rx > 0; ep->dirty_rx++) {
1216                entry = ep->dirty_rx % RX_RING_SIZE;
1217                if (ep->rx_skbuff[entry] == NULL) {
1218                        struct sk_buff *skb;
1219                        skb = ep->rx_skbuff[entry] = netdev_alloc_skb(dev, ep->rx_buf_sz + 2);
1220                        if (skb == NULL)
1221                                break;
1222                        skb_reserve(skb, 2);    /* Align IP on 16 byte boundaries */
1223                        ep->rx_ring[entry].bufaddr = pci_map_single(ep->pci_dev,
1224                                skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1225                        work_done++;
1226                }
1227                /* AV: shouldn't we add a barrier here? */
1228                ep->rx_ring[entry].rxstatus = DescOwn;
1229        }
1230        return work_done;
1231}
1232
1233static void epic_rx_err(struct net_device *dev, struct epic_private *ep)
1234{
1235        void __iomem *ioaddr = ep->ioaddr;
1236        int status;
1237
1238        status = er32(INTSTAT);
1239
1240        if (status == EpicRemoved)
1241                return;
1242        if (status & RxOverflow)        /* Missed a Rx frame. */
1243                dev->stats.rx_errors++;
1244        if (status & (RxOverflow | RxFull))
1245                ew16(COMMAND, RxQueued);
1246}
1247
1248static int epic_poll(struct napi_struct *napi, int budget)
1249{
1250        struct epic_private *ep = container_of(napi, struct epic_private, napi);
1251        struct net_device *dev = ep->mii.dev;
1252        int work_done = 0;
1253        void __iomem *ioaddr = ep->ioaddr;
1254
1255rx_action:
1256
1257        epic_tx(dev, ep);
1258
1259        work_done += epic_rx(dev, budget);
1260
1261        epic_rx_err(dev, ep);
1262
1263        if (work_done < budget) {
1264                unsigned long flags;
1265                int more;
1266
1267                /* A bit baroque but it avoids a (space hungry) spin_unlock */
1268
1269                spin_lock_irqsave(&ep->napi_lock, flags);
1270
1271                more = ep->reschedule_in_poll;
1272                if (!more) {
1273                        __napi_complete(napi);
1274                        ew32(INTSTAT, EpicNapiEvent);
1275                        epic_napi_irq_on(dev, ep);
1276                } else
1277                        ep->reschedule_in_poll--;
1278
1279                spin_unlock_irqrestore(&ep->napi_lock, flags);
1280
1281                if (more)
1282                        goto rx_action;
1283        }
1284
1285        return work_done;
1286}
1287
1288static int epic_close(struct net_device *dev)
1289{
1290        struct epic_private *ep = netdev_priv(dev);
1291        struct pci_dev *pdev = ep->pci_dev;
1292        void __iomem *ioaddr = ep->ioaddr;
1293        struct sk_buff *skb;
1294        int i;
1295
1296        netif_stop_queue(dev);
1297        napi_disable(&ep->napi);
1298
1299        if (debug > 1)
1300                netdev_dbg(dev, "Shutting down ethercard, status was %2.2x.\n",
1301                           er32(INTSTAT));
1302
1303        del_timer_sync(&ep->timer);
1304
1305        epic_disable_int(dev, ep);
1306
1307        free_irq(pdev->irq, dev);
1308
1309        epic_pause(dev);
1310
1311        /* Free all the skbuffs in the Rx queue. */
1312        for (i = 0; i < RX_RING_SIZE; i++) {
1313                skb = ep->rx_skbuff[i];
1314                ep->rx_skbuff[i] = NULL;
1315                ep->rx_ring[i].rxstatus = 0;            /* Not owned by Epic chip. */
1316                ep->rx_ring[i].buflength = 0;
1317                if (skb) {
1318                        pci_unmap_single(pdev, ep->rx_ring[i].bufaddr,
1319                                         ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1320                        dev_kfree_skb(skb);
1321                }
1322                ep->rx_ring[i].bufaddr = 0xBADF00D0; /* An invalid address. */
1323        }
1324        for (i = 0; i < TX_RING_SIZE; i++) {
1325                skb = ep->tx_skbuff[i];
1326                ep->tx_skbuff[i] = NULL;
1327                if (!skb)
1328                        continue;
1329                pci_unmap_single(pdev, ep->tx_ring[i].bufaddr, skb->len,
1330                                 PCI_DMA_TODEVICE);
1331                dev_kfree_skb(skb);
1332        }
1333
1334        /* Green! Leave the chip in low-power mode. */
1335        ew32(GENCTL, 0x0008);
1336
1337        return 0;
1338}
1339
1340static struct net_device_stats *epic_get_stats(struct net_device *dev)
1341{
1342        struct epic_private *ep = netdev_priv(dev);
1343        void __iomem *ioaddr = ep->ioaddr;
1344
1345        if (netif_running(dev)) {
1346                struct net_device_stats *stats = &dev->stats;
1347
1348                stats->rx_missed_errors += er8(MPCNT);
1349                stats->rx_frame_errors  += er8(ALICNT);
1350                stats->rx_crc_errors    += er8(CRCCNT);
1351        }
1352
1353        return &dev->stats;
1354}
1355
1356/* Set or clear the multicast filter for this adaptor.
1357   Note that we only use exclusion around actually queueing the
1358   new frame, not around filling ep->setup_frame.  This is non-deterministic
1359   when re-entered but still correct. */
1360
1361static void set_rx_mode(struct net_device *dev)
1362{
1363        struct epic_private *ep = netdev_priv(dev);
1364        void __iomem *ioaddr = ep->ioaddr;
1365        unsigned char mc_filter[8];              /* Multicast hash filter */
1366        int i;
1367
1368        if (dev->flags & IFF_PROMISC) {                 /* Set promiscuous. */
1369                ew32(RxCtrl, 0x002c);
1370                /* Unconditionally log net taps. */
1371                memset(mc_filter, 0xff, sizeof(mc_filter));
1372        } else if ((!netdev_mc_empty(dev)) || (dev->flags & IFF_ALLMULTI)) {
1373                /* There is apparently a chip bug, so the multicast filter
1374                   is never enabled. */
1375                /* Too many to filter perfectly -- accept all multicasts. */
1376                memset(mc_filter, 0xff, sizeof(mc_filter));
1377                ew32(RxCtrl, 0x000c);
1378        } else if (netdev_mc_empty(dev)) {
1379                ew32(RxCtrl, 0x0004);
1380                return;
1381        } else {                                        /* Never executed, for now. */
1382                struct netdev_hw_addr *ha;
1383
1384                memset(mc_filter, 0, sizeof(mc_filter));
1385                netdev_for_each_mc_addr(ha, dev) {
1386                        unsigned int bit_nr =
1387                                ether_crc_le(ETH_ALEN, ha->addr) & 0x3f;
1388                        mc_filter[bit_nr >> 3] |= (1 << bit_nr);
1389                }
1390        }
1391        /* ToDo: perhaps we need to stop the Tx and Rx process here? */
1392        if (memcmp(mc_filter, ep->mc_filter, sizeof(mc_filter))) {
1393                for (i = 0; i < 4; i++)
1394                        ew16(MC0 + i*4, ((u16 *)mc_filter)[i]);
1395                memcpy(ep->mc_filter, mc_filter, sizeof(mc_filter));
1396        }
1397}
1398
1399static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1400{
1401        struct epic_private *np = netdev_priv(dev);
1402
1403        strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1404        strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1405        strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
1406}
1407
1408static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1409{
1410        struct epic_private *np = netdev_priv(dev);
1411        int rc;
1412
1413        spin_lock_irq(&np->lock);
1414        rc = mii_ethtool_gset(&np->mii, cmd);
1415        spin_unlock_irq(&np->lock);
1416
1417        return rc;
1418}
1419
1420static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1421{
1422        struct epic_private *np = netdev_priv(dev);
1423        int rc;
1424
1425        spin_lock_irq(&np->lock);
1426        rc = mii_ethtool_sset(&np->mii, cmd);
1427        spin_unlock_irq(&np->lock);
1428
1429        return rc;
1430}
1431
1432static int netdev_nway_reset(struct net_device *dev)
1433{
1434        struct epic_private *np = netdev_priv(dev);
1435        return mii_nway_restart(&np->mii);
1436}
1437
1438static u32 netdev_get_link(struct net_device *dev)
1439{
1440        struct epic_private *np = netdev_priv(dev);
1441        return mii_link_ok(&np->mii);
1442}
1443
1444static u32 netdev_get_msglevel(struct net_device *dev)
1445{
1446        return debug;
1447}
1448
1449static void netdev_set_msglevel(struct net_device *dev, u32 value)
1450{
1451        debug = value;
1452}
1453
1454static int ethtool_begin(struct net_device *dev)
1455{
1456        struct epic_private *ep = netdev_priv(dev);
1457        void __iomem *ioaddr = ep->ioaddr;
1458
1459        /* power-up, if interface is down */
1460        if (!netif_running(dev)) {
1461                ew32(GENCTL, 0x0200);
1462                ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
1463        }
1464        return 0;
1465}
1466
1467static void ethtool_complete(struct net_device *dev)
1468{
1469        struct epic_private *ep = netdev_priv(dev);
1470        void __iomem *ioaddr = ep->ioaddr;
1471
1472        /* power-down, if interface is down */
1473        if (!netif_running(dev)) {
1474                ew32(GENCTL, 0x0008);
1475                ew32(NVCTL, (er32(NVCTL) & ~0x483c) | 0x0000);
1476        }
1477}
1478
1479static const struct ethtool_ops netdev_ethtool_ops = {
1480        .get_drvinfo            = netdev_get_drvinfo,
1481        .get_settings           = netdev_get_settings,
1482        .set_settings           = netdev_set_settings,
1483        .nway_reset             = netdev_nway_reset,
1484        .get_link               = netdev_get_link,
1485        .get_msglevel           = netdev_get_msglevel,
1486        .set_msglevel           = netdev_set_msglevel,
1487        .begin                  = ethtool_begin,
1488        .complete               = ethtool_complete
1489};
1490
1491static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1492{
1493        struct epic_private *np = netdev_priv(dev);
1494        void __iomem *ioaddr = np->ioaddr;
1495        struct mii_ioctl_data *data = if_mii(rq);
1496        int rc;
1497
1498        /* power-up, if interface is down */
1499        if (! netif_running(dev)) {
1500                ew32(GENCTL, 0x0200);
1501                ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
1502        }
1503
1504        /* all non-ethtool ioctls (the SIOC[GS]MIIxxx ioctls) */
1505        spin_lock_irq(&np->lock);
1506        rc = generic_mii_ioctl(&np->mii, data, cmd, NULL);
1507        spin_unlock_irq(&np->lock);
1508
1509        /* power-down, if interface is down */
1510        if (! netif_running(dev)) {
1511                ew32(GENCTL, 0x0008);
1512                ew32(NVCTL, (er32(NVCTL) & ~0x483c) | 0x0000);
1513        }
1514        return rc;
1515}
1516
1517
1518static void epic_remove_one(struct pci_dev *pdev)
1519{
1520        struct net_device *dev = pci_get_drvdata(pdev);
1521        struct epic_private *ep = netdev_priv(dev);
1522
1523        pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
1524        pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma);
1525        unregister_netdev(dev);
1526        pci_iounmap(pdev, ep->ioaddr);
1527        pci_release_regions(pdev);
1528        free_netdev(dev);
1529        pci_disable_device(pdev);
1530        /* pci_power_off(pdev, -1); */
1531}
1532
1533
1534#ifdef CONFIG_PM
1535
1536static int epic_suspend (struct pci_dev *pdev, pm_message_t state)
1537{
1538        struct net_device *dev = pci_get_drvdata(pdev);
1539        struct epic_private *ep = netdev_priv(dev);
1540        void __iomem *ioaddr = ep->ioaddr;
1541
1542        if (!netif_running(dev))
1543                return 0;
1544        epic_pause(dev);
1545        /* Put the chip into low-power mode. */
1546        ew32(GENCTL, 0x0008);
1547        /* pci_power_off(pdev, -1); */
1548        return 0;
1549}
1550
1551
1552static int epic_resume (struct pci_dev *pdev)
1553{
1554        struct net_device *dev = pci_get_drvdata(pdev);
1555
1556        if (!netif_running(dev))
1557                return 0;
1558        epic_restart(dev);
1559        /* pci_power_on(pdev); */
1560        return 0;
1561}
1562
1563#endif /* CONFIG_PM */
1564
1565
1566static struct pci_driver epic_driver = {
1567        .name           = DRV_NAME,
1568        .id_table       = epic_pci_tbl,
1569        .probe          = epic_init_one,
1570        .remove         = epic_remove_one,
1571#ifdef CONFIG_PM
1572        .suspend        = epic_suspend,
1573        .resume         = epic_resume,
1574#endif /* CONFIG_PM */
1575};
1576
1577
1578static int __init epic_init (void)
1579{
1580/* when a module, this is printed whether or not devices are found in probe */
1581#ifdef MODULE
1582        pr_info("%s%s\n", version, version2);
1583#endif
1584
1585        return pci_register_driver(&epic_driver);
1586}
1587
1588
1589static void __exit epic_cleanup (void)
1590{
1591        pci_unregister_driver (&epic_driver);
1592}
1593
1594
1595module_init(epic_init);
1596module_exit(epic_cleanup);
1597