linux/drivers/net/ethernet/smsc/epic100.c
<<
>>
Prefs
   1/* epic100.c: A SMC 83c170 EPIC/100 Fast Ethernet driver for Linux. */
   2/*
   3        Written/copyright 1997-2001 by Donald Becker.
   4
   5        This software may be used and distributed according to the terms of
   6        the GNU General Public License (GPL), incorporated herein by reference.
   7        Drivers based on or derived from this code fall under the GPL and must
   8        retain the authorship, copyright and license notice.  This file is not
   9        a complete program and may only be used when the entire operating
  10        system is licensed under the GPL.
  11
  12        This driver is for the SMC83c170/175 "EPIC" series, as used on the
  13        SMC EtherPower II 9432 PCI adapter, and several CardBus cards.
  14
  15        The author may be reached as becker@scyld.com, or C/O
  16        Scyld Computing Corporation
  17        410 Severn Ave., Suite 210
  18        Annapolis MD 21403
  19
  20        Information and updates available at
  21        http://www.scyld.com/network/epic100.html
  22        [this link no longer provides anything useful -jgarzik]
  23
  24        ---------------------------------------------------------------------
  25
  26*/
  27
  28#define DRV_NAME        "epic100"
  29#define DRV_VERSION     "2.1"
  30#define DRV_RELDATE     "Sept 11, 2006"
  31
  32/* The user-configurable values.
  33   These may be modified when a driver module is loaded.*/
  34
  35static int debug = 1;                   /* 1 normal messages, 0 quiet .. 7 verbose. */
  36
  37/* Used to pass the full-duplex flag, etc. */
  38#define MAX_UNITS 8             /* More are supported, limit only on options */
  39static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
  40static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
  41
  42/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
  43   Setting to > 1518 effectively disables this feature. */
  44static int rx_copybreak;
  45
  46/* Operational parameters that are set at compile time. */
  47
  48/* Keep the ring sizes a power of two for operational efficiency.
  49   The compiler will convert <unsigned>'%'<2^N> into a bit mask.
  50   Making the Tx ring too large decreases the effectiveness of channel
  51   bonding and packet priority.
  52   There are no ill effects from too-large receive rings. */
  53#define TX_RING_SIZE    256
  54#define TX_QUEUE_LEN    240             /* Limit ring entries actually used.  */
  55#define RX_RING_SIZE    256
  56#define TX_TOTAL_SIZE   TX_RING_SIZE*sizeof(struct epic_tx_desc)
  57#define RX_TOTAL_SIZE   RX_RING_SIZE*sizeof(struct epic_rx_desc)
  58
  59/* Operational parameters that usually are not changed. */
  60/* Time in jiffies before concluding the transmitter is hung. */
  61#define TX_TIMEOUT  (2*HZ)
  62
  63#define PKT_BUF_SZ              1536                    /* Size of each temporary Rx buffer.*/
  64
  65/* Bytes transferred to chip before transmission starts. */
  66/* Initial threshold, increased on underflow, rounded down to 4 byte units. */
  67#define TX_FIFO_THRESH 256
  68#define RX_FIFO_THRESH 1                /* 0-3, 0==32, 64,96, or 3==128 bytes  */
  69
  70#include <linux/module.h>
  71#include <linux/kernel.h>
  72#include <linux/string.h>
  73#include <linux/timer.h>
  74#include <linux/errno.h>
  75#include <linux/ioport.h>
  76#include <linux/interrupt.h>
  77#include <linux/pci.h>
  78#include <linux/delay.h>
  79#include <linux/netdevice.h>
  80#include <linux/etherdevice.h>
  81#include <linux/skbuff.h>
  82#include <linux/init.h>
  83#include <linux/spinlock.h>
  84#include <linux/ethtool.h>
  85#include <linux/mii.h>
  86#include <linux/crc32.h>
  87#include <linux/bitops.h>
  88#include <asm/io.h>
  89#include <linux/uaccess.h>
  90#include <asm/byteorder.h>
  91
  92/* These identify the driver base version and may not be removed. */
  93static char version[] =
  94DRV_NAME ".c:v1.11 1/7/2001 Written by Donald Becker <becker@scyld.com>";
  95static char version2[] =
  96"  (unofficial 2.4.x kernel port, version " DRV_VERSION ", " DRV_RELDATE ")";
  97
  98MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
  99MODULE_DESCRIPTION("SMC 83c170 EPIC series Ethernet driver");
 100MODULE_LICENSE("GPL");
 101
 102module_param(debug, int, 0);
 103module_param(rx_copybreak, int, 0);
 104module_param_array(options, int, NULL, 0);
 105module_param_array(full_duplex, int, NULL, 0);
 106MODULE_PARM_DESC(debug, "EPIC/100 debug level (0-5)");
 107MODULE_PARM_DESC(options, "EPIC/100: Bits 0-3: media type, bit 4: full duplex");
 108MODULE_PARM_DESC(rx_copybreak, "EPIC/100 copy breakpoint for copy-only-tiny-frames");
 109MODULE_PARM_DESC(full_duplex, "EPIC/100 full duplex setting(s) (1)");
 110
 111/*
 112                                Theory of Operation
 113
 114I. Board Compatibility
 115
 116This device driver is designed for the SMC "EPIC/100", the SMC
 117single-chip Ethernet controllers for PCI.  This chip is used on
 118the SMC EtherPower II boards.
 119
 120II. Board-specific settings
 121
 122PCI bus devices are configured by the system at boot time, so no jumpers
 123need to be set on the board.  The system BIOS will assign the
 124PCI INTA signal to a (preferably otherwise unused) system IRQ line.
 125Note: Kernel versions earlier than 1.3.73 do not support shared PCI
 126interrupt lines.
 127
 128III. Driver operation
 129
 130IIIa. Ring buffers
 131
 132IVb. References
 133
 134http://www.smsc.com/media/Downloads_Public/discontinued/83c171.pdf
 135http://www.smsc.com/media/Downloads_Public/discontinued/83c175.pdf
 136http://scyld.com/expert/NWay.html
 137http://www.national.com/pf/DP/DP83840A.html
 138
 139IVc. Errata
 140
 141*/
 142
 143
 144enum chip_capability_flags { MII_PWRDWN=1, TYPE2_INTR=2, NO_MII=4 };
 145
 146#define EPIC_TOTAL_SIZE 0x100
 147#define USE_IO_OPS 1
 148
 149#ifdef USE_IO_OPS
 150#define EPIC_BAR        0
 151#else
 152#define EPIC_BAR        1
 153#endif
 154
 155typedef enum {
 156        SMSC_83C170_0,
 157        SMSC_83C170,
 158        SMSC_83C175,
 159} chip_t;
 160
 161
 162struct epic_chip_info {
 163        const char *name;
 164        int drv_flags;                          /* Driver use, intended as capability flags. */
 165};
 166
 167
 168/* indexed by chip_t */
 169static const struct epic_chip_info pci_id_tbl[] = {
 170        { "SMSC EPIC/100 83c170",       TYPE2_INTR | NO_MII | MII_PWRDWN },
 171        { "SMSC EPIC/100 83c170",       TYPE2_INTR },
 172        { "SMSC EPIC/C 83c175",         TYPE2_INTR | MII_PWRDWN },
 173};
 174
 175
 176static const struct pci_device_id epic_pci_tbl[] = {
 177        { 0x10B8, 0x0005, 0x1092, 0x0AB4, 0, 0, SMSC_83C170_0 },
 178        { 0x10B8, 0x0005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SMSC_83C170 },
 179        { 0x10B8, 0x0006, PCI_ANY_ID, PCI_ANY_ID,
 180          PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, SMSC_83C175 },
 181        { 0,}
 182};
 183MODULE_DEVICE_TABLE (pci, epic_pci_tbl);
 184
 185#define ew16(reg, val)  iowrite16(val, ioaddr + (reg))
 186#define ew32(reg, val)  iowrite32(val, ioaddr + (reg))
 187#define er8(reg)        ioread8(ioaddr + (reg))
 188#define er16(reg)       ioread16(ioaddr + (reg))
 189#define er32(reg)       ioread32(ioaddr + (reg))
 190
 191/* Offsets to registers, using the (ugh) SMC names. */
 192enum epic_registers {
 193  COMMAND=0, INTSTAT=4, INTMASK=8, GENCTL=0x0C, NVCTL=0x10, EECTL=0x14,
 194  PCIBurstCnt=0x18,
 195  TEST1=0x1C, CRCCNT=0x20, ALICNT=0x24, MPCNT=0x28,     /* Rx error counters. */
 196  MIICtrl=0x30, MIIData=0x34, MIICfg=0x38,
 197  LAN0=64,                                              /* MAC address. */
 198  MC0=80,                                               /* Multicast filter table. */
 199  RxCtrl=96, TxCtrl=112, TxSTAT=0x74,
 200  PRxCDAR=0x84, RxSTAT=0xA4, EarlyRx=0xB0, PTxCDAR=0xC4, TxThresh=0xDC,
 201};
 202
 203/* Interrupt register bits, using my own meaningful names. */
 204enum IntrStatus {
 205        TxIdle=0x40000, RxIdle=0x20000, IntrSummary=0x010000,
 206        PCIBusErr170=0x7000, PCIBusErr175=0x1000, PhyEvent175=0x8000,
 207        RxStarted=0x0800, RxEarlyWarn=0x0400, CntFull=0x0200, TxUnderrun=0x0100,
 208        TxEmpty=0x0080, TxDone=0x0020, RxError=0x0010,
 209        RxOverflow=0x0008, RxFull=0x0004, RxHeader=0x0002, RxDone=0x0001,
 210};
 211enum CommandBits {
 212        StopRx=1, StartRx=2, TxQueued=4, RxQueued=8,
 213        StopTxDMA=0x20, StopRxDMA=0x40, RestartTx=0x80,
 214};
 215
 216#define EpicRemoved     0xffffffff      /* Chip failed or removed (CardBus) */
 217
 218#define EpicNapiEvent   (TxEmpty | TxDone | \
 219                         RxDone | RxStarted | RxEarlyWarn | RxOverflow | RxFull)
 220#define EpicNormalEvent (0x0000ffff & ~EpicNapiEvent)
 221
 222static const u16 media2miictl[16] = {
 223        0, 0x0C00, 0x0C00, 0x2000,  0x0100, 0x2100, 0, 0,
 224        0, 0, 0, 0,  0, 0, 0, 0 };
 225
 226/*
 227 * The EPIC100 Rx and Tx buffer descriptors.  Note that these
 228 * really ARE host-endian; it's not a misannotation.  We tell
 229 * the card to byteswap them internally on big-endian hosts -
 230 * look for #ifdef __BIG_ENDIAN in epic_open().
 231 */
 232
 233struct epic_tx_desc {
 234        u32 txstatus;
 235        u32 bufaddr;
 236        u32 buflength;
 237        u32 next;
 238};
 239
 240struct epic_rx_desc {
 241        u32 rxstatus;
 242        u32 bufaddr;
 243        u32 buflength;
 244        u32 next;
 245};
 246
 247enum desc_status_bits {
 248        DescOwn=0x8000,
 249};
 250
 251#define PRIV_ALIGN      15      /* Required alignment mask */
 252struct epic_private {
 253        struct epic_rx_desc *rx_ring;
 254        struct epic_tx_desc *tx_ring;
 255        /* The saved address of a sent-in-place packet/buffer, for skfree(). */
 256        struct sk_buff* tx_skbuff[TX_RING_SIZE];
 257        /* The addresses of receive-in-place skbuffs. */
 258        struct sk_buff* rx_skbuff[RX_RING_SIZE];
 259
 260        dma_addr_t tx_ring_dma;
 261        dma_addr_t rx_ring_dma;
 262
 263        /* Ring pointers. */
 264        spinlock_t lock;                                /* Group with Tx control cache line. */
 265        spinlock_t napi_lock;
 266        struct napi_struct napi;
 267        unsigned int cur_tx, dirty_tx;
 268
 269        unsigned int cur_rx, dirty_rx;
 270        u32 irq_mask;
 271        unsigned int rx_buf_sz;                         /* Based on MTU+slack. */
 272
 273        void __iomem *ioaddr;
 274        struct pci_dev *pci_dev;                        /* PCI bus location. */
 275        int chip_id, chip_flags;
 276
 277        struct timer_list timer;                        /* Media selection timer. */
 278        int tx_threshold;
 279        unsigned char mc_filter[8];
 280        signed char phys[4];                            /* MII device addresses. */
 281        u16 advertising;                                        /* NWay media advertisement */
 282        int mii_phy_cnt;
 283        struct mii_if_info mii;
 284        unsigned int tx_full:1;                         /* The Tx queue is full. */
 285        unsigned int default_port:4;            /* Last dev->if_port value. */
 286};
 287
 288static int epic_open(struct net_device *dev);
 289static int read_eeprom(struct epic_private *, int);
 290static int mdio_read(struct net_device *dev, int phy_id, int location);
 291static void mdio_write(struct net_device *dev, int phy_id, int loc, int val);
 292static void epic_restart(struct net_device *dev);
 293static void epic_timer(struct timer_list *t);
 294static void epic_tx_timeout(struct net_device *dev, unsigned int txqueue);
 295static void epic_init_ring(struct net_device *dev);
 296static netdev_tx_t epic_start_xmit(struct sk_buff *skb,
 297                                   struct net_device *dev);
 298static int epic_rx(struct net_device *dev, int budget);
 299static int epic_poll(struct napi_struct *napi, int budget);
 300static irqreturn_t epic_interrupt(int irq, void *dev_instance);
 301static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 302static const struct ethtool_ops netdev_ethtool_ops;
 303static int epic_close(struct net_device *dev);
 304static struct net_device_stats *epic_get_stats(struct net_device *dev);
 305static void set_rx_mode(struct net_device *dev);
 306
 307static const struct net_device_ops epic_netdev_ops = {
 308        .ndo_open               = epic_open,
 309        .ndo_stop               = epic_close,
 310        .ndo_start_xmit         = epic_start_xmit,
 311        .ndo_tx_timeout         = epic_tx_timeout,
 312        .ndo_get_stats          = epic_get_stats,
 313        .ndo_set_rx_mode        = set_rx_mode,
 314        .ndo_do_ioctl           = netdev_ioctl,
 315        .ndo_set_mac_address    = eth_mac_addr,
 316        .ndo_validate_addr      = eth_validate_addr,
 317};
 318
 319static int epic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 320{
 321        static int card_idx = -1;
 322        void __iomem *ioaddr;
 323        int chip_idx = (int) ent->driver_data;
 324        int irq;
 325        struct net_device *dev;
 326        struct epic_private *ep;
 327        int i, ret, option = 0, duplex = 0;
 328        void *ring_space;
 329        dma_addr_t ring_dma;
 330
 331/* when built into the kernel, we only print version if device is found */
 332#ifndef MODULE
 333        pr_info_once("%s%s\n", version, version2);
 334#endif
 335
 336        card_idx++;
 337
 338        ret = pci_enable_device(pdev);
 339        if (ret)
 340                goto out;
 341        irq = pdev->irq;
 342
 343        if (pci_resource_len(pdev, 0) < EPIC_TOTAL_SIZE) {
 344                dev_err(&pdev->dev, "no PCI region space\n");
 345                ret = -ENODEV;
 346                goto err_out_disable;
 347        }
 348
 349        pci_set_master(pdev);
 350
 351        ret = pci_request_regions(pdev, DRV_NAME);
 352        if (ret < 0)
 353                goto err_out_disable;
 354
 355        ret = -ENOMEM;
 356
 357        dev = alloc_etherdev(sizeof (*ep));
 358        if (!dev)
 359                goto err_out_free_res;
 360
 361        SET_NETDEV_DEV(dev, &pdev->dev);
 362
 363        ioaddr = pci_iomap(pdev, EPIC_BAR, 0);
 364        if (!ioaddr) {
 365                dev_err(&pdev->dev, "ioremap failed\n");
 366                goto err_out_free_netdev;
 367        }
 368
 369        pci_set_drvdata(pdev, dev);
 370        ep = netdev_priv(dev);
 371        ep->ioaddr = ioaddr;
 372        ep->mii.dev = dev;
 373        ep->mii.mdio_read = mdio_read;
 374        ep->mii.mdio_write = mdio_write;
 375        ep->mii.phy_id_mask = 0x1f;
 376        ep->mii.reg_num_mask = 0x1f;
 377
 378        ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
 379        if (!ring_space)
 380                goto err_out_iounmap;
 381        ep->tx_ring = ring_space;
 382        ep->tx_ring_dma = ring_dma;
 383
 384        ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
 385        if (!ring_space)
 386                goto err_out_unmap_tx;
 387        ep->rx_ring = ring_space;
 388        ep->rx_ring_dma = ring_dma;
 389
 390        if (dev->mem_start) {
 391                option = dev->mem_start;
 392                duplex = (dev->mem_start & 16) ? 1 : 0;
 393        } else if (card_idx >= 0  &&  card_idx < MAX_UNITS) {
 394                if (options[card_idx] >= 0)
 395                        option = options[card_idx];
 396                if (full_duplex[card_idx] >= 0)
 397                        duplex = full_duplex[card_idx];
 398        }
 399
 400        spin_lock_init(&ep->lock);
 401        spin_lock_init(&ep->napi_lock);
 402
 403        /* Bring the chip out of low-power mode. */
 404        ew32(GENCTL, 0x4200);
 405        /* Magic?!  If we don't set this bit the MII interface won't work. */
 406        /* This magic is documented in SMSC app note 7.15 */
 407        for (i = 16; i > 0; i--)
 408                ew32(TEST1, 0x0008);
 409
 410        /* Turn on the MII transceiver. */
 411        ew32(MIICfg, 0x12);
 412        if (chip_idx == 1)
 413                ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
 414        ew32(GENCTL, 0x0200);
 415
 416        /* Note: the '175 does not have a serial EEPROM. */
 417        for (i = 0; i < 3; i++)
 418                ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(er16(LAN0 + i*4));
 419
 420        if (debug > 2) {
 421                dev_dbg(&pdev->dev, "EEPROM contents:\n");
 422                for (i = 0; i < 64; i++)
 423                        pr_cont(" %4.4x%s", read_eeprom(ep, i),
 424                                   i % 16 == 15 ? "\n" : "");
 425        }
 426
 427        ep->pci_dev = pdev;
 428        ep->chip_id = chip_idx;
 429        ep->chip_flags = pci_id_tbl[chip_idx].drv_flags;
 430        ep->irq_mask =
 431                (ep->chip_flags & TYPE2_INTR ?  PCIBusErr175 : PCIBusErr170)
 432                 | CntFull | TxUnderrun | EpicNapiEvent;
 433
 434        /* Find the connected MII xcvrs.
 435           Doing this in open() would allow detecting external xcvrs later, but
 436           takes much time and no cards have external MII. */
 437        {
 438                int phy, phy_idx = 0;
 439                for (phy = 1; phy < 32 && phy_idx < sizeof(ep->phys); phy++) {
 440                        int mii_status = mdio_read(dev, phy, MII_BMSR);
 441                        if (mii_status != 0xffff  &&  mii_status != 0x0000) {
 442                                ep->phys[phy_idx++] = phy;
 443                                dev_info(&pdev->dev,
 444                                        "MII transceiver #%d control "
 445                                        "%4.4x status %4.4x.\n",
 446                                        phy, mdio_read(dev, phy, 0), mii_status);
 447                        }
 448                }
 449                ep->mii_phy_cnt = phy_idx;
 450                if (phy_idx != 0) {
 451                        phy = ep->phys[0];
 452                        ep->mii.advertising = mdio_read(dev, phy, MII_ADVERTISE);
 453                        dev_info(&pdev->dev,
 454                                "Autonegotiation advertising %4.4x link "
 455                                   "partner %4.4x.\n",
 456                                   ep->mii.advertising, mdio_read(dev, phy, 5));
 457                } else if ( ! (ep->chip_flags & NO_MII)) {
 458                        dev_warn(&pdev->dev,
 459                                "***WARNING***: No MII transceiver found!\n");
 460                        /* Use the known PHY address of the EPII. */
 461                        ep->phys[0] = 3;
 462                }
 463                ep->mii.phy_id = ep->phys[0];
 464        }
 465
 466        /* Turn off the MII xcvr (175 only!), leave the chip in low-power mode. */
 467        if (ep->chip_flags & MII_PWRDWN)
 468                ew32(NVCTL, er32(NVCTL) & ~0x483c);
 469        ew32(GENCTL, 0x0008);
 470
 471        /* The lower four bits are the media type. */
 472        if (duplex) {
 473                ep->mii.force_media = ep->mii.full_duplex = 1;
 474                dev_info(&pdev->dev, "Forced full duplex requested.\n");
 475        }
 476        dev->if_port = ep->default_port = option;
 477
 478        /* The Epic-specific entries in the device structure. */
 479        dev->netdev_ops = &epic_netdev_ops;
 480        dev->ethtool_ops = &netdev_ethtool_ops;
 481        dev->watchdog_timeo = TX_TIMEOUT;
 482        netif_napi_add(dev, &ep->napi, epic_poll, 64);
 483
 484        ret = register_netdev(dev);
 485        if (ret < 0)
 486                goto err_out_unmap_rx;
 487
 488        netdev_info(dev, "%s at %lx, IRQ %d, %pM\n",
 489                    pci_id_tbl[chip_idx].name,
 490                    (long)pci_resource_start(pdev, EPIC_BAR), pdev->irq,
 491                    dev->dev_addr);
 492
 493out:
 494        return ret;
 495
 496err_out_unmap_rx:
 497        pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma);
 498err_out_unmap_tx:
 499        pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
 500err_out_iounmap:
 501        pci_iounmap(pdev, ioaddr);
 502err_out_free_netdev:
 503        free_netdev(dev);
 504err_out_free_res:
 505        pci_release_regions(pdev);
 506err_out_disable:
 507        pci_disable_device(pdev);
 508        goto out;
 509}
 510
 511/* Serial EEPROM section. */
 512
 513/*  EEPROM_Ctrl bits. */
 514#define EE_SHIFT_CLK    0x04    /* EEPROM shift clock. */
 515#define EE_CS                   0x02    /* EEPROM chip select. */
 516#define EE_DATA_WRITE   0x08    /* EEPROM chip data in. */
 517#define EE_WRITE_0              0x01
 518#define EE_WRITE_1              0x09
 519#define EE_DATA_READ    0x10    /* EEPROM chip data out. */
 520#define EE_ENB                  (0x0001 | EE_CS)
 521
 522/* Delay between EEPROM clock transitions.
 523   This serves to flush the operation to the PCI bus.
 524 */
 525
 526#define eeprom_delay()  er32(EECTL)
 527
 528/* The EEPROM commands include the alway-set leading bit. */
 529#define EE_WRITE_CMD    (5 << 6)
 530#define EE_READ64_CMD   (6 << 6)
 531#define EE_READ256_CMD  (6 << 8)
 532#define EE_ERASE_CMD    (7 << 6)
 533
 534static void epic_disable_int(struct net_device *dev, struct epic_private *ep)
 535{
 536        void __iomem *ioaddr = ep->ioaddr;
 537
 538        ew32(INTMASK, 0x00000000);
 539}
 540
 541static inline void __epic_pci_commit(void __iomem *ioaddr)
 542{
 543#ifndef USE_IO_OPS
 544        er32(INTMASK);
 545#endif
 546}
 547
 548static inline void epic_napi_irq_off(struct net_device *dev,
 549                                     struct epic_private *ep)
 550{
 551        void __iomem *ioaddr = ep->ioaddr;
 552
 553        ew32(INTMASK, ep->irq_mask & ~EpicNapiEvent);
 554        __epic_pci_commit(ioaddr);
 555}
 556
 557static inline void epic_napi_irq_on(struct net_device *dev,
 558                                    struct epic_private *ep)
 559{
 560        void __iomem *ioaddr = ep->ioaddr;
 561
 562        /* No need to commit possible posted write */
 563        ew32(INTMASK, ep->irq_mask | EpicNapiEvent);
 564}
 565
 566static int read_eeprom(struct epic_private *ep, int location)
 567{
 568        void __iomem *ioaddr = ep->ioaddr;
 569        int i;
 570        int retval = 0;
 571        int read_cmd = location |
 572                (er32(EECTL) & 0x40 ? EE_READ64_CMD : EE_READ256_CMD);
 573
 574        ew32(EECTL, EE_ENB & ~EE_CS);
 575        ew32(EECTL, EE_ENB);
 576
 577        /* Shift the read command bits out. */
 578        for (i = 12; i >= 0; i--) {
 579                short dataval = (read_cmd & (1 << i)) ? EE_WRITE_1 : EE_WRITE_0;
 580                ew32(EECTL, EE_ENB | dataval);
 581                eeprom_delay();
 582                ew32(EECTL, EE_ENB | dataval | EE_SHIFT_CLK);
 583                eeprom_delay();
 584        }
 585        ew32(EECTL, EE_ENB);
 586
 587        for (i = 16; i > 0; i--) {
 588                ew32(EECTL, EE_ENB | EE_SHIFT_CLK);
 589                eeprom_delay();
 590                retval = (retval << 1) | ((er32(EECTL) & EE_DATA_READ) ? 1 : 0);
 591                ew32(EECTL, EE_ENB);
 592                eeprom_delay();
 593        }
 594
 595        /* Terminate the EEPROM access. */
 596        ew32(EECTL, EE_ENB & ~EE_CS);
 597        return retval;
 598}
 599
 600#define MII_READOP              1
 601#define MII_WRITEOP             2
 602static int mdio_read(struct net_device *dev, int phy_id, int location)
 603{
 604        struct epic_private *ep = netdev_priv(dev);
 605        void __iomem *ioaddr = ep->ioaddr;
 606        int read_cmd = (phy_id << 9) | (location << 4) | MII_READOP;
 607        int i;
 608
 609        ew32(MIICtrl, read_cmd);
 610        /* Typical operation takes 25 loops. */
 611        for (i = 400; i > 0; i--) {
 612                barrier();
 613                if ((er32(MIICtrl) & MII_READOP) == 0) {
 614                        /* Work around read failure bug. */
 615                        if (phy_id == 1 && location < 6 &&
 616                            er16(MIIData) == 0xffff) {
 617                                ew32(MIICtrl, read_cmd);
 618                                continue;
 619                        }
 620                        return er16(MIIData);
 621                }
 622        }
 623        return 0xffff;
 624}
 625
 626static void mdio_write(struct net_device *dev, int phy_id, int loc, int value)
 627{
 628        struct epic_private *ep = netdev_priv(dev);
 629        void __iomem *ioaddr = ep->ioaddr;
 630        int i;
 631
 632        ew16(MIIData, value);
 633        ew32(MIICtrl, (phy_id << 9) | (loc << 4) | MII_WRITEOP);
 634        for (i = 10000; i > 0; i--) {
 635                barrier();
 636                if ((er32(MIICtrl) & MII_WRITEOP) == 0)
 637                        break;
 638        }
 639}
 640
 641
 642static int epic_open(struct net_device *dev)
 643{
 644        struct epic_private *ep = netdev_priv(dev);
 645        void __iomem *ioaddr = ep->ioaddr;
 646        const int irq = ep->pci_dev->irq;
 647        int rc, i;
 648
 649        /* Soft reset the chip. */
 650        ew32(GENCTL, 0x4001);
 651
 652        napi_enable(&ep->napi);
 653        rc = request_irq(irq, epic_interrupt, IRQF_SHARED, dev->name, dev);
 654        if (rc) {
 655                napi_disable(&ep->napi);
 656                return rc;
 657        }
 658
 659        epic_init_ring(dev);
 660
 661        ew32(GENCTL, 0x4000);
 662        /* This magic is documented in SMSC app note 7.15 */
 663        for (i = 16; i > 0; i--)
 664                ew32(TEST1, 0x0008);
 665
 666        /* Pull the chip out of low-power mode, enable interrupts, and set for
 667           PCI read multiple.  The MIIcfg setting and strange write order are
 668           required by the details of which bits are reset and the transceiver
 669           wiring on the Ositech CardBus card.
 670        */
 671#if 0
 672        ew32(MIICfg, dev->if_port == 1 ? 0x13 : 0x12);
 673#endif
 674        if (ep->chip_flags & MII_PWRDWN)
 675                ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
 676
 677        /* Tell the chip to byteswap descriptors on big-endian hosts */
 678#ifdef __BIG_ENDIAN
 679        ew32(GENCTL, 0x4432 | (RX_FIFO_THRESH << 8));
 680        er32(GENCTL);
 681        ew32(GENCTL, 0x0432 | (RX_FIFO_THRESH << 8));
 682#else
 683        ew32(GENCTL, 0x4412 | (RX_FIFO_THRESH << 8));
 684        er32(GENCTL);
 685        ew32(GENCTL, 0x0412 | (RX_FIFO_THRESH << 8));
 686#endif
 687
 688        udelay(20); /* Looks like EPII needs that if you want reliable RX init. FIXME: pci posting bug? */
 689
 690        for (i = 0; i < 3; i++)
 691                ew32(LAN0 + i*4, le16_to_cpu(((__le16*)dev->dev_addr)[i]));
 692
 693        ep->tx_threshold = TX_FIFO_THRESH;
 694        ew32(TxThresh, ep->tx_threshold);
 695
 696        if (media2miictl[dev->if_port & 15]) {
 697                if (ep->mii_phy_cnt)
 698                        mdio_write(dev, ep->phys[0], MII_BMCR, media2miictl[dev->if_port&15]);
 699                if (dev->if_port == 1) {
 700                        if (debug > 1)
 701                                netdev_info(dev, "Using the 10base2 transceiver, MII status %4.4x.\n",
 702                                            mdio_read(dev, ep->phys[0], MII_BMSR));
 703                }
 704        } else {
 705                int mii_lpa = mdio_read(dev, ep->phys[0], MII_LPA);
 706                if (mii_lpa != 0xffff) {
 707                        if ((mii_lpa & LPA_100FULL) || (mii_lpa & 0x01C0) == LPA_10FULL)
 708                                ep->mii.full_duplex = 1;
 709                        else if (! (mii_lpa & LPA_LPACK))
 710                                mdio_write(dev, ep->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
 711                        if (debug > 1)
 712                                netdev_info(dev, "Setting %s-duplex based on MII xcvr %d register read of %4.4x.\n",
 713                                            ep->mii.full_duplex ? "full"
 714                                                                : "half",
 715                                            ep->phys[0], mii_lpa);
 716                }
 717        }
 718
 719        ew32(TxCtrl, ep->mii.full_duplex ? 0x7f : 0x79);
 720        ew32(PRxCDAR, ep->rx_ring_dma);
 721        ew32(PTxCDAR, ep->tx_ring_dma);
 722
 723        /* Start the chip's Rx process. */
 724        set_rx_mode(dev);
 725        ew32(COMMAND, StartRx | RxQueued);
 726
 727        netif_start_queue(dev);
 728
 729        /* Enable interrupts by setting the interrupt mask. */
 730        ew32(INTMASK, RxError | RxHeader | EpicNapiEvent | CntFull |
 731             ((ep->chip_flags & TYPE2_INTR) ? PCIBusErr175 : PCIBusErr170) |
 732             TxUnderrun);
 733
 734        if (debug > 1) {
 735                netdev_dbg(dev, "epic_open() ioaddr %p IRQ %d status %4.4x %s-duplex.\n",
 736                           ioaddr, irq, er32(GENCTL),
 737                           ep->mii.full_duplex ? "full" : "half");
 738        }
 739
 740        /* Set the timer to switch to check for link beat and perhaps switch
 741           to an alternate media type. */
 742        timer_setup(&ep->timer, epic_timer, 0);
 743        ep->timer.expires = jiffies + 3*HZ;
 744        add_timer(&ep->timer);
 745
 746        return rc;
 747}
 748
 749/* Reset the chip to recover from a PCI transaction error.
 750   This may occur at interrupt time. */
 751static void epic_pause(struct net_device *dev)
 752{
 753        struct net_device_stats *stats = &dev->stats;
 754        struct epic_private *ep = netdev_priv(dev);
 755        void __iomem *ioaddr = ep->ioaddr;
 756
 757        netif_stop_queue (dev);
 758
 759        /* Disable interrupts by clearing the interrupt mask. */
 760        ew32(INTMASK, 0x00000000);
 761        /* Stop the chip's Tx and Rx DMA processes. */
 762        ew16(COMMAND, StopRx | StopTxDMA | StopRxDMA);
 763
 764        /* Update the error counts. */
 765        if (er16(COMMAND) != 0xffff) {
 766                stats->rx_missed_errors += er8(MPCNT);
 767                stats->rx_frame_errors  += er8(ALICNT);
 768                stats->rx_crc_errors    += er8(CRCCNT);
 769        }
 770
 771        /* Remove the packets on the Rx queue. */
 772        epic_rx(dev, RX_RING_SIZE);
 773}
 774
 775static void epic_restart(struct net_device *dev)
 776{
 777        struct epic_private *ep = netdev_priv(dev);
 778        void __iomem *ioaddr = ep->ioaddr;
 779        int i;
 780
 781        /* Soft reset the chip. */
 782        ew32(GENCTL, 0x4001);
 783
 784        netdev_dbg(dev, "Restarting the EPIC chip, Rx %d/%d Tx %d/%d.\n",
 785                   ep->cur_rx, ep->dirty_rx, ep->dirty_tx, ep->cur_tx);
 786        udelay(1);
 787
 788        /* This magic is documented in SMSC app note 7.15 */
 789        for (i = 16; i > 0; i--)
 790                ew32(TEST1, 0x0008);
 791
 792#ifdef __BIG_ENDIAN
 793        ew32(GENCTL, 0x0432 | (RX_FIFO_THRESH << 8));
 794#else
 795        ew32(GENCTL, 0x0412 | (RX_FIFO_THRESH << 8));
 796#endif
 797        ew32(MIICfg, dev->if_port == 1 ? 0x13 : 0x12);
 798        if (ep->chip_flags & MII_PWRDWN)
 799                ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
 800
 801        for (i = 0; i < 3; i++)
 802                ew32(LAN0 + i*4, le16_to_cpu(((__le16*)dev->dev_addr)[i]));
 803
 804        ep->tx_threshold = TX_FIFO_THRESH;
 805        ew32(TxThresh, ep->tx_threshold);
 806        ew32(TxCtrl, ep->mii.full_duplex ? 0x7f : 0x79);
 807        ew32(PRxCDAR, ep->rx_ring_dma +
 808             (ep->cur_rx % RX_RING_SIZE) * sizeof(struct epic_rx_desc));
 809        ew32(PTxCDAR, ep->tx_ring_dma +
 810             (ep->dirty_tx % TX_RING_SIZE) * sizeof(struct epic_tx_desc));
 811
 812        /* Start the chip's Rx process. */
 813        set_rx_mode(dev);
 814        ew32(COMMAND, StartRx | RxQueued);
 815
 816        /* Enable interrupts by setting the interrupt mask. */
 817        ew32(INTMASK, RxError | RxHeader | EpicNapiEvent | CntFull |
 818             ((ep->chip_flags & TYPE2_INTR) ? PCIBusErr175 : PCIBusErr170) |
 819             TxUnderrun);
 820
 821        netdev_dbg(dev, "epic_restart() done, cmd status %4.4x, ctl %4.4x interrupt %4.4x.\n",
 822                   er32(COMMAND), er32(GENCTL), er32(INTSTAT));
 823}
 824
 825static void check_media(struct net_device *dev)
 826{
 827        struct epic_private *ep = netdev_priv(dev);
 828        void __iomem *ioaddr = ep->ioaddr;
 829        int mii_lpa = ep->mii_phy_cnt ? mdio_read(dev, ep->phys[0], MII_LPA) : 0;
 830        int negotiated = mii_lpa & ep->mii.advertising;
 831        int duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
 832
 833        if (ep->mii.force_media)
 834                return;
 835        if (mii_lpa == 0xffff)          /* Bogus read */
 836                return;
 837        if (ep->mii.full_duplex != duplex) {
 838                ep->mii.full_duplex = duplex;
 839                netdev_info(dev, "Setting %s-duplex based on MII #%d link partner capability of %4.4x.\n",
 840                            ep->mii.full_duplex ? "full" : "half",
 841                            ep->phys[0], mii_lpa);
 842                ew32(TxCtrl, ep->mii.full_duplex ? 0x7F : 0x79);
 843        }
 844}
 845
 846static void epic_timer(struct timer_list *t)
 847{
 848        struct epic_private *ep = from_timer(ep, t, timer);
 849        struct net_device *dev = ep->mii.dev;
 850        void __iomem *ioaddr = ep->ioaddr;
 851        int next_tick = 5*HZ;
 852
 853        if (debug > 3) {
 854                netdev_dbg(dev, "Media monitor tick, Tx status %8.8x.\n",
 855                           er32(TxSTAT));
 856                netdev_dbg(dev, "Other registers are IntMask %4.4x IntStatus %4.4x RxStatus %4.4x.\n",
 857                           er32(INTMASK), er32(INTSTAT), er32(RxSTAT));
 858        }
 859
 860        check_media(dev);
 861
 862        ep->timer.expires = jiffies + next_tick;
 863        add_timer(&ep->timer);
 864}
 865
 866static void epic_tx_timeout(struct net_device *dev, unsigned int txqueue)
 867{
 868        struct epic_private *ep = netdev_priv(dev);
 869        void __iomem *ioaddr = ep->ioaddr;
 870
 871        if (debug > 0) {
 872                netdev_warn(dev, "Transmit timeout using MII device, Tx status %4.4x.\n",
 873                            er16(TxSTAT));
 874                if (debug > 1) {
 875                        netdev_dbg(dev, "Tx indices: dirty_tx %d, cur_tx %d.\n",
 876                                   ep->dirty_tx, ep->cur_tx);
 877                }
 878        }
 879        if (er16(TxSTAT) & 0x10) {              /* Tx FIFO underflow. */
 880                dev->stats.tx_fifo_errors++;
 881                ew32(COMMAND, RestartTx);
 882        } else {
 883                epic_restart(dev);
 884                ew32(COMMAND, TxQueued);
 885        }
 886
 887        netif_trans_update(dev); /* prevent tx timeout */
 888        dev->stats.tx_errors++;
 889        if (!ep->tx_full)
 890                netif_wake_queue(dev);
 891}
 892
 893/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
 894static void epic_init_ring(struct net_device *dev)
 895{
 896        struct epic_private *ep = netdev_priv(dev);
 897        int i;
 898
 899        ep->tx_full = 0;
 900        ep->dirty_tx = ep->cur_tx = 0;
 901        ep->cur_rx = ep->dirty_rx = 0;
 902        ep->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
 903
 904        /* Initialize all Rx descriptors. */
 905        for (i = 0; i < RX_RING_SIZE; i++) {
 906                ep->rx_ring[i].rxstatus = 0;
 907                ep->rx_ring[i].buflength = ep->rx_buf_sz;
 908                ep->rx_ring[i].next = ep->rx_ring_dma +
 909                                      (i+1)*sizeof(struct epic_rx_desc);
 910                ep->rx_skbuff[i] = NULL;
 911        }
 912        /* Mark the last entry as wrapping the ring. */
 913        ep->rx_ring[i-1].next = ep->rx_ring_dma;
 914
 915        /* Fill in the Rx buffers.  Handle allocation failure gracefully. */
 916        for (i = 0; i < RX_RING_SIZE; i++) {
 917                struct sk_buff *skb = netdev_alloc_skb(dev, ep->rx_buf_sz + 2);
 918                ep->rx_skbuff[i] = skb;
 919                if (skb == NULL)
 920                        break;
 921                skb_reserve(skb, 2);    /* 16 byte align the IP header. */
 922                ep->rx_ring[i].bufaddr = pci_map_single(ep->pci_dev,
 923                        skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
 924                ep->rx_ring[i].rxstatus = DescOwn;
 925        }
 926        ep->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
 927
 928        /* The Tx buffer descriptor is filled in as needed, but we
 929           do need to clear the ownership bit. */
 930        for (i = 0; i < TX_RING_SIZE; i++) {
 931                ep->tx_skbuff[i] = NULL;
 932                ep->tx_ring[i].txstatus = 0x0000;
 933                ep->tx_ring[i].next = ep->tx_ring_dma +
 934                        (i+1)*sizeof(struct epic_tx_desc);
 935        }
 936        ep->tx_ring[i-1].next = ep->tx_ring_dma;
 937}
 938
 939static netdev_tx_t epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
 940{
 941        struct epic_private *ep = netdev_priv(dev);
 942        void __iomem *ioaddr = ep->ioaddr;
 943        int entry, free_count;
 944        u32 ctrl_word;
 945        unsigned long flags;
 946
 947        if (skb_padto(skb, ETH_ZLEN))
 948                return NETDEV_TX_OK;
 949
 950        /* Caution: the write order is important here, set the field with the
 951           "ownership" bit last. */
 952
 953        /* Calculate the next Tx descriptor entry. */
 954        spin_lock_irqsave(&ep->lock, flags);
 955        free_count = ep->cur_tx - ep->dirty_tx;
 956        entry = ep->cur_tx % TX_RING_SIZE;
 957
 958        ep->tx_skbuff[entry] = skb;
 959        ep->tx_ring[entry].bufaddr = pci_map_single(ep->pci_dev, skb->data,
 960                                                    skb->len, PCI_DMA_TODEVICE);
 961        if (free_count < TX_QUEUE_LEN/2) {/* Typical path */
 962                ctrl_word = 0x100000; /* No interrupt */
 963        } else if (free_count == TX_QUEUE_LEN/2) {
 964                ctrl_word = 0x140000; /* Tx-done intr. */
 965        } else if (free_count < TX_QUEUE_LEN - 1) {
 966                ctrl_word = 0x100000; /* No Tx-done intr. */
 967        } else {
 968                /* Leave room for an additional entry. */
 969                ctrl_word = 0x140000; /* Tx-done intr. */
 970                ep->tx_full = 1;
 971        }
 972        ep->tx_ring[entry].buflength = ctrl_word | skb->len;
 973        ep->tx_ring[entry].txstatus =
 974                ((skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN) << 16)
 975                            | DescOwn;
 976
 977        ep->cur_tx++;
 978        if (ep->tx_full)
 979                netif_stop_queue(dev);
 980
 981        spin_unlock_irqrestore(&ep->lock, flags);
 982        /* Trigger an immediate transmit demand. */
 983        ew32(COMMAND, TxQueued);
 984
 985        if (debug > 4)
 986                netdev_dbg(dev, "Queued Tx packet size %d to slot %d, flag %2.2x Tx status %8.8x.\n",
 987                           skb->len, entry, ctrl_word, er32(TxSTAT));
 988
 989        return NETDEV_TX_OK;
 990}
 991
 992static void epic_tx_error(struct net_device *dev, struct epic_private *ep,
 993                          int status)
 994{
 995        struct net_device_stats *stats = &dev->stats;
 996
 997#ifndef final_version
 998        /* There was an major error, log it. */
 999        if (debug > 1)
1000                netdev_dbg(dev, "Transmit error, Tx status %8.8x.\n",
1001                           status);
1002#endif
1003        stats->tx_errors++;
1004        if (status & 0x1050)
1005                stats->tx_aborted_errors++;
1006        if (status & 0x0008)
1007                stats->tx_carrier_errors++;
1008        if (status & 0x0040)
1009                stats->tx_window_errors++;
1010        if (status & 0x0010)
1011                stats->tx_fifo_errors++;
1012}
1013
1014static void epic_tx(struct net_device *dev, struct epic_private *ep)
1015{
1016        unsigned int dirty_tx, cur_tx;
1017
1018        /*
1019         * Note: if this lock becomes a problem we can narrow the locked
1020         * region at the cost of occasionally grabbing the lock more times.
1021         */
1022        cur_tx = ep->cur_tx;
1023        for (dirty_tx = ep->dirty_tx; cur_tx - dirty_tx > 0; dirty_tx++) {
1024                struct sk_buff *skb;
1025                int entry = dirty_tx % TX_RING_SIZE;
1026                int txstatus = ep->tx_ring[entry].txstatus;
1027
1028                if (txstatus & DescOwn)
1029                        break;  /* It still hasn't been Txed */
1030
1031                if (likely(txstatus & 0x0001)) {
1032                        dev->stats.collisions += (txstatus >> 8) & 15;
1033                        dev->stats.tx_packets++;
1034                        dev->stats.tx_bytes += ep->tx_skbuff[entry]->len;
1035                } else
1036                        epic_tx_error(dev, ep, txstatus);
1037
1038                /* Free the original skb. */
1039                skb = ep->tx_skbuff[entry];
1040                pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr,
1041                                 skb->len, PCI_DMA_TODEVICE);
1042                dev_kfree_skb_irq(skb);
1043                ep->tx_skbuff[entry] = NULL;
1044        }
1045
1046#ifndef final_version
1047        if (cur_tx - dirty_tx > TX_RING_SIZE) {
1048                netdev_warn(dev, "Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
1049                            dirty_tx, cur_tx, ep->tx_full);
1050                dirty_tx += TX_RING_SIZE;
1051        }
1052#endif
1053        ep->dirty_tx = dirty_tx;
1054        if (ep->tx_full && cur_tx - dirty_tx < TX_QUEUE_LEN - 4) {
1055                /* The ring is no longer full, allow new TX entries. */
1056                ep->tx_full = 0;
1057                netif_wake_queue(dev);
1058        }
1059}
1060
1061/* The interrupt handler does all of the Rx thread work and cleans up
1062   after the Tx thread. */
1063static irqreturn_t epic_interrupt(int irq, void *dev_instance)
1064{
1065        struct net_device *dev = dev_instance;
1066        struct epic_private *ep = netdev_priv(dev);
1067        void __iomem *ioaddr = ep->ioaddr;
1068        unsigned int handled = 0;
1069        int status;
1070
1071        status = er32(INTSTAT);
1072        /* Acknowledge all of the current interrupt sources ASAP. */
1073        ew32(INTSTAT, status & EpicNormalEvent);
1074
1075        if (debug > 4) {
1076                netdev_dbg(dev, "Interrupt, status=%#8.8x new intstat=%#8.8x.\n",
1077                           status, er32(INTSTAT));
1078        }
1079
1080        if ((status & IntrSummary) == 0)
1081                goto out;
1082
1083        handled = 1;
1084
1085        if (status & EpicNapiEvent) {
1086                spin_lock(&ep->napi_lock);
1087                if (napi_schedule_prep(&ep->napi)) {
1088                        epic_napi_irq_off(dev, ep);
1089                        __napi_schedule(&ep->napi);
1090                }
1091                spin_unlock(&ep->napi_lock);
1092        }
1093        status &= ~EpicNapiEvent;
1094
1095        /* Check uncommon events all at once. */
1096        if (status & (CntFull | TxUnderrun | PCIBusErr170 | PCIBusErr175)) {
1097                struct net_device_stats *stats = &dev->stats;
1098
1099                if (status == EpicRemoved)
1100                        goto out;
1101
1102                /* Always update the error counts to avoid overhead later. */
1103                stats->rx_missed_errors += er8(MPCNT);
1104                stats->rx_frame_errors  += er8(ALICNT);
1105                stats->rx_crc_errors    += er8(CRCCNT);
1106
1107                if (status & TxUnderrun) { /* Tx FIFO underflow. */
1108                        stats->tx_fifo_errors++;
1109                        ew32(TxThresh, ep->tx_threshold += 128);
1110                        /* Restart the transmit process. */
1111                        ew32(COMMAND, RestartTx);
1112                }
1113                if (status & PCIBusErr170) {
1114                        netdev_err(dev, "PCI Bus Error! status %4.4x.\n",
1115                                   status);
1116                        epic_pause(dev);
1117                        epic_restart(dev);
1118                }
1119                /* Clear all error sources. */
1120                ew32(INTSTAT, status & 0x7f18);
1121        }
1122
1123out:
1124        if (debug > 3) {
1125                netdev_dbg(dev, "exit interrupt, intr_status=%#4.4x.\n",
1126                           status);
1127        }
1128
1129        return IRQ_RETVAL(handled);
1130}
1131
1132static int epic_rx(struct net_device *dev, int budget)
1133{
1134        struct epic_private *ep = netdev_priv(dev);
1135        int entry = ep->cur_rx % RX_RING_SIZE;
1136        int rx_work_limit = ep->dirty_rx + RX_RING_SIZE - ep->cur_rx;
1137        int work_done = 0;
1138
1139        if (debug > 4)
1140                netdev_dbg(dev, " In epic_rx(), entry %d %8.8x.\n", entry,
1141                           ep->rx_ring[entry].rxstatus);
1142
1143        if (rx_work_limit > budget)
1144                rx_work_limit = budget;
1145
1146        /* If we own the next entry, it's a new packet. Send it up. */
1147        while ((ep->rx_ring[entry].rxstatus & DescOwn) == 0) {
1148                int status = ep->rx_ring[entry].rxstatus;
1149
1150                if (debug > 4)
1151                        netdev_dbg(dev, "  epic_rx() status was %8.8x.\n",
1152                                   status);
1153                if (--rx_work_limit < 0)
1154                        break;
1155                if (status & 0x2006) {
1156                        if (debug > 2)
1157                                netdev_dbg(dev, "epic_rx() error status was %8.8x.\n",
1158                                           status);
1159                        if (status & 0x2000) {
1160                                netdev_warn(dev, "Oversized Ethernet frame spanned multiple buffers, status %4.4x!\n",
1161                                            status);
1162                                dev->stats.rx_length_errors++;
1163                        } else if (status & 0x0006)
1164                                /* Rx Frame errors are counted in hardware. */
1165                                dev->stats.rx_errors++;
1166                } else {
1167                        /* Malloc up new buffer, compatible with net-2e. */
1168                        /* Omit the four octet CRC from the length. */
1169                        short pkt_len = (status >> 16) - 4;
1170                        struct sk_buff *skb;
1171
1172                        if (pkt_len > PKT_BUF_SZ - 4) {
1173                                netdev_err(dev, "Oversized Ethernet frame, status %x %d bytes.\n",
1174                                           status, pkt_len);
1175                                pkt_len = 1514;
1176                        }
1177                        /* Check if the packet is long enough to accept without copying
1178                           to a minimally-sized skbuff. */
1179                        if (pkt_len < rx_copybreak &&
1180                            (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
1181                                skb_reserve(skb, 2);    /* 16 byte align the IP header */
1182                                pci_dma_sync_single_for_cpu(ep->pci_dev,
1183                                                            ep->rx_ring[entry].bufaddr,
1184                                                            ep->rx_buf_sz,
1185                                                            PCI_DMA_FROMDEVICE);
1186                                skb_copy_to_linear_data(skb, ep->rx_skbuff[entry]->data, pkt_len);
1187                                skb_put(skb, pkt_len);
1188                                pci_dma_sync_single_for_device(ep->pci_dev,
1189                                                               ep->rx_ring[entry].bufaddr,
1190                                                               ep->rx_buf_sz,
1191                                                               PCI_DMA_FROMDEVICE);
1192                        } else {
1193                                pci_unmap_single(ep->pci_dev,
1194                                        ep->rx_ring[entry].bufaddr,
1195                                        ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1196                                skb_put(skb = ep->rx_skbuff[entry], pkt_len);
1197                                ep->rx_skbuff[entry] = NULL;
1198                        }
1199                        skb->protocol = eth_type_trans(skb, dev);
1200                        netif_receive_skb(skb);
1201                        dev->stats.rx_packets++;
1202                        dev->stats.rx_bytes += pkt_len;
1203                }
1204                work_done++;
1205                entry = (++ep->cur_rx) % RX_RING_SIZE;
1206        }
1207
1208        /* Refill the Rx ring buffers. */
1209        for (; ep->cur_rx - ep->dirty_rx > 0; ep->dirty_rx++) {
1210                entry = ep->dirty_rx % RX_RING_SIZE;
1211                if (ep->rx_skbuff[entry] == NULL) {
1212                        struct sk_buff *skb;
1213                        skb = ep->rx_skbuff[entry] = netdev_alloc_skb(dev, ep->rx_buf_sz + 2);
1214                        if (skb == NULL)
1215                                break;
1216                        skb_reserve(skb, 2);    /* Align IP on 16 byte boundaries */
1217                        ep->rx_ring[entry].bufaddr = pci_map_single(ep->pci_dev,
1218                                skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1219                        work_done++;
1220                }
1221                /* AV: shouldn't we add a barrier here? */
1222                ep->rx_ring[entry].rxstatus = DescOwn;
1223        }
1224        return work_done;
1225}
1226
1227static void epic_rx_err(struct net_device *dev, struct epic_private *ep)
1228{
1229        void __iomem *ioaddr = ep->ioaddr;
1230        int status;
1231
1232        status = er32(INTSTAT);
1233
1234        if (status == EpicRemoved)
1235                return;
1236        if (status & RxOverflow)        /* Missed a Rx frame. */
1237                dev->stats.rx_errors++;
1238        if (status & (RxOverflow | RxFull))
1239                ew16(COMMAND, RxQueued);
1240}
1241
1242static int epic_poll(struct napi_struct *napi, int budget)
1243{
1244        struct epic_private *ep = container_of(napi, struct epic_private, napi);
1245        struct net_device *dev = ep->mii.dev;
1246        void __iomem *ioaddr = ep->ioaddr;
1247        int work_done;
1248
1249        epic_tx(dev, ep);
1250
1251        work_done = epic_rx(dev, budget);
1252
1253        epic_rx_err(dev, ep);
1254
1255        if (work_done < budget && napi_complete_done(napi, work_done)) {
1256                unsigned long flags;
1257
1258                spin_lock_irqsave(&ep->napi_lock, flags);
1259
1260                ew32(INTSTAT, EpicNapiEvent);
1261                epic_napi_irq_on(dev, ep);
1262                spin_unlock_irqrestore(&ep->napi_lock, flags);
1263        }
1264
1265        return work_done;
1266}
1267
1268static int epic_close(struct net_device *dev)
1269{
1270        struct epic_private *ep = netdev_priv(dev);
1271        struct pci_dev *pdev = ep->pci_dev;
1272        void __iomem *ioaddr = ep->ioaddr;
1273        struct sk_buff *skb;
1274        int i;
1275
1276        netif_stop_queue(dev);
1277        napi_disable(&ep->napi);
1278
1279        if (debug > 1)
1280                netdev_dbg(dev, "Shutting down ethercard, status was %2.2x.\n",
1281                           er32(INTSTAT));
1282
1283        del_timer_sync(&ep->timer);
1284
1285        epic_disable_int(dev, ep);
1286
1287        free_irq(pdev->irq, dev);
1288
1289        epic_pause(dev);
1290
1291        /* Free all the skbuffs in the Rx queue. */
1292        for (i = 0; i < RX_RING_SIZE; i++) {
1293                skb = ep->rx_skbuff[i];
1294                ep->rx_skbuff[i] = NULL;
1295                ep->rx_ring[i].rxstatus = 0;            /* Not owned by Epic chip. */
1296                ep->rx_ring[i].buflength = 0;
1297                if (skb) {
1298                        pci_unmap_single(pdev, ep->rx_ring[i].bufaddr,
1299                                         ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1300                        dev_kfree_skb(skb);
1301                }
1302                ep->rx_ring[i].bufaddr = 0xBADF00D0; /* An invalid address. */
1303        }
1304        for (i = 0; i < TX_RING_SIZE; i++) {
1305                skb = ep->tx_skbuff[i];
1306                ep->tx_skbuff[i] = NULL;
1307                if (!skb)
1308                        continue;
1309                pci_unmap_single(pdev, ep->tx_ring[i].bufaddr, skb->len,
1310                                 PCI_DMA_TODEVICE);
1311                dev_kfree_skb(skb);
1312        }
1313
1314        /* Green! Leave the chip in low-power mode. */
1315        ew32(GENCTL, 0x0008);
1316
1317        return 0;
1318}
1319
1320static struct net_device_stats *epic_get_stats(struct net_device *dev)
1321{
1322        struct epic_private *ep = netdev_priv(dev);
1323        void __iomem *ioaddr = ep->ioaddr;
1324
1325        if (netif_running(dev)) {
1326                struct net_device_stats *stats = &dev->stats;
1327
1328                stats->rx_missed_errors += er8(MPCNT);
1329                stats->rx_frame_errors  += er8(ALICNT);
1330                stats->rx_crc_errors    += er8(CRCCNT);
1331        }
1332
1333        return &dev->stats;
1334}
1335
1336/* Set or clear the multicast filter for this adaptor.
1337   Note that we only use exclusion around actually queueing the
1338   new frame, not around filling ep->setup_frame.  This is non-deterministic
1339   when re-entered but still correct. */
1340
1341static void set_rx_mode(struct net_device *dev)
1342{
1343        struct epic_private *ep = netdev_priv(dev);
1344        void __iomem *ioaddr = ep->ioaddr;
1345        unsigned char mc_filter[8];              /* Multicast hash filter */
1346        int i;
1347
1348        if (dev->flags & IFF_PROMISC) {                 /* Set promiscuous. */
1349                ew32(RxCtrl, 0x002c);
1350                /* Unconditionally log net taps. */
1351                memset(mc_filter, 0xff, sizeof(mc_filter));
1352        } else if ((!netdev_mc_empty(dev)) || (dev->flags & IFF_ALLMULTI)) {
1353                /* There is apparently a chip bug, so the multicast filter
1354                   is never enabled. */
1355                /* Too many to filter perfectly -- accept all multicasts. */
1356                memset(mc_filter, 0xff, sizeof(mc_filter));
1357                ew32(RxCtrl, 0x000c);
1358        } else if (netdev_mc_empty(dev)) {
1359                ew32(RxCtrl, 0x0004);
1360                return;
1361        } else {                                        /* Never executed, for now. */
1362                struct netdev_hw_addr *ha;
1363
1364                memset(mc_filter, 0, sizeof(mc_filter));
1365                netdev_for_each_mc_addr(ha, dev) {
1366                        unsigned int bit_nr =
1367                                ether_crc_le(ETH_ALEN, ha->addr) & 0x3f;
1368                        mc_filter[bit_nr >> 3] |= (1 << bit_nr);
1369                }
1370        }
1371        /* ToDo: perhaps we need to stop the Tx and Rx process here? */
1372        if (memcmp(mc_filter, ep->mc_filter, sizeof(mc_filter))) {
1373                for (i = 0; i < 4; i++)
1374                        ew16(MC0 + i*4, ((u16 *)mc_filter)[i]);
1375                memcpy(ep->mc_filter, mc_filter, sizeof(mc_filter));
1376        }
1377}
1378
1379static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1380{
1381        struct epic_private *np = netdev_priv(dev);
1382
1383        strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1384        strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1385        strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
1386}
1387
1388static int netdev_get_link_ksettings(struct net_device *dev,
1389                                     struct ethtool_link_ksettings *cmd)
1390{
1391        struct epic_private *np = netdev_priv(dev);
1392
1393        spin_lock_irq(&np->lock);
1394        mii_ethtool_get_link_ksettings(&np->mii, cmd);
1395        spin_unlock_irq(&np->lock);
1396
1397        return 0;
1398}
1399
1400static int netdev_set_link_ksettings(struct net_device *dev,
1401                                     const struct ethtool_link_ksettings *cmd)
1402{
1403        struct epic_private *np = netdev_priv(dev);
1404        int rc;
1405
1406        spin_lock_irq(&np->lock);
1407        rc = mii_ethtool_set_link_ksettings(&np->mii, cmd);
1408        spin_unlock_irq(&np->lock);
1409
1410        return rc;
1411}
1412
1413static int netdev_nway_reset(struct net_device *dev)
1414{
1415        struct epic_private *np = netdev_priv(dev);
1416        return mii_nway_restart(&np->mii);
1417}
1418
1419static u32 netdev_get_link(struct net_device *dev)
1420{
1421        struct epic_private *np = netdev_priv(dev);
1422        return mii_link_ok(&np->mii);
1423}
1424
1425static u32 netdev_get_msglevel(struct net_device *dev)
1426{
1427        return debug;
1428}
1429
1430static void netdev_set_msglevel(struct net_device *dev, u32 value)
1431{
1432        debug = value;
1433}
1434
1435static int ethtool_begin(struct net_device *dev)
1436{
1437        struct epic_private *ep = netdev_priv(dev);
1438        void __iomem *ioaddr = ep->ioaddr;
1439
1440        /* power-up, if interface is down */
1441        if (!netif_running(dev)) {
1442                ew32(GENCTL, 0x0200);
1443                ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
1444        }
1445        return 0;
1446}
1447
1448static void ethtool_complete(struct net_device *dev)
1449{
1450        struct epic_private *ep = netdev_priv(dev);
1451        void __iomem *ioaddr = ep->ioaddr;
1452
1453        /* power-down, if interface is down */
1454        if (!netif_running(dev)) {
1455                ew32(GENCTL, 0x0008);
1456                ew32(NVCTL, (er32(NVCTL) & ~0x483c) | 0x0000);
1457        }
1458}
1459
1460static const struct ethtool_ops netdev_ethtool_ops = {
1461        .get_drvinfo            = netdev_get_drvinfo,
1462        .nway_reset             = netdev_nway_reset,
1463        .get_link               = netdev_get_link,
1464        .get_msglevel           = netdev_get_msglevel,
1465        .set_msglevel           = netdev_set_msglevel,
1466        .begin                  = ethtool_begin,
1467        .complete               = ethtool_complete,
1468        .get_link_ksettings     = netdev_get_link_ksettings,
1469        .set_link_ksettings     = netdev_set_link_ksettings,
1470};
1471
1472static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1473{
1474        struct epic_private *np = netdev_priv(dev);
1475        void __iomem *ioaddr = np->ioaddr;
1476        struct mii_ioctl_data *data = if_mii(rq);
1477        int rc;
1478
1479        /* power-up, if interface is down */
1480        if (! netif_running(dev)) {
1481                ew32(GENCTL, 0x0200);
1482                ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
1483        }
1484
1485        /* all non-ethtool ioctls (the SIOC[GS]MIIxxx ioctls) */
1486        spin_lock_irq(&np->lock);
1487        rc = generic_mii_ioctl(&np->mii, data, cmd, NULL);
1488        spin_unlock_irq(&np->lock);
1489
1490        /* power-down, if interface is down */
1491        if (! netif_running(dev)) {
1492                ew32(GENCTL, 0x0008);
1493                ew32(NVCTL, (er32(NVCTL) & ~0x483c) | 0x0000);
1494        }
1495        return rc;
1496}
1497
1498
1499static void epic_remove_one(struct pci_dev *pdev)
1500{
1501        struct net_device *dev = pci_get_drvdata(pdev);
1502        struct epic_private *ep = netdev_priv(dev);
1503
1504        pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
1505        pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma);
1506        unregister_netdev(dev);
1507        pci_iounmap(pdev, ep->ioaddr);
1508        pci_release_regions(pdev);
1509        free_netdev(dev);
1510        pci_disable_device(pdev);
1511        /* pci_power_off(pdev, -1); */
1512}
1513
1514
1515#ifdef CONFIG_PM
1516
1517static int epic_suspend (struct pci_dev *pdev, pm_message_t state)
1518{
1519        struct net_device *dev = pci_get_drvdata(pdev);
1520        struct epic_private *ep = netdev_priv(dev);
1521        void __iomem *ioaddr = ep->ioaddr;
1522
1523        if (!netif_running(dev))
1524                return 0;
1525        epic_pause(dev);
1526        /* Put the chip into low-power mode. */
1527        ew32(GENCTL, 0x0008);
1528        /* pci_power_off(pdev, -1); */
1529        return 0;
1530}
1531
1532
1533static int epic_resume (struct pci_dev *pdev)
1534{
1535        struct net_device *dev = pci_get_drvdata(pdev);
1536
1537        if (!netif_running(dev))
1538                return 0;
1539        epic_restart(dev);
1540        /* pci_power_on(pdev); */
1541        return 0;
1542}
1543
1544#endif /* CONFIG_PM */
1545
1546
1547static struct pci_driver epic_driver = {
1548        .name           = DRV_NAME,
1549        .id_table       = epic_pci_tbl,
1550        .probe          = epic_init_one,
1551        .remove         = epic_remove_one,
1552#ifdef CONFIG_PM
1553        .suspend        = epic_suspend,
1554        .resume         = epic_resume,
1555#endif /* CONFIG_PM */
1556};
1557
1558
1559static int __init epic_init (void)
1560{
1561/* when a module, this is printed whether or not devices are found in probe */
1562#ifdef MODULE
1563        pr_info("%s%s\n", version, version2);
1564#endif
1565
1566        return pci_register_driver(&epic_driver);
1567}
1568
1569
1570static void __exit epic_cleanup (void)
1571{
1572        pci_unregister_driver (&epic_driver);
1573}
1574
1575
1576module_init(epic_init);
1577module_exit(epic_cleanup);
1578