linux/drivers/net/lance.c
<<
>>
Prefs
   1/* lance.c: An AMD LANCE/PCnet ethernet driver for Linux. */
   2/*
   3        Written/copyright 1993-1998 by Donald Becker.
   4
   5        Copyright 1993 United States Government as represented by the
   6        Director, National Security Agency.
   7        This software may be used and distributed according to the terms
   8        of the GNU General Public License, incorporated herein by reference.
   9
  10        This driver is for the Allied Telesis AT1500 and HP J2405A, and should work
  11        with most other LANCE-based bus-master (NE2100/NE2500) ethercards.
  12
  13        The author may be reached as becker@scyld.com, or C/O
  14        Scyld Computing Corporation
  15        410 Severn Ave., Suite 210
  16        Annapolis MD 21403
  17
  18        Andrey V. Savochkin:
  19        - alignment problem with 1.3.* kernel and some minor changes.
  20        Thomas Bogendoerfer (tsbogend@bigbug.franken.de):
  21        - added support for Linux/Alpha, but removed most of it, because
  22        it worked only for the PCI chip.
  23      - added hook for the 32bit lance driver
  24      - added PCnetPCI II (79C970A) to chip table
  25        Paul Gortmaker (gpg109@rsphy1.anu.edu.au):
  26        - hopefully fix above so Linux/Alpha can use ISA cards too.
  27    8/20/96 Fixed 7990 autoIRQ failure and reversed unneeded alignment -djb
  28    v1.12 10/27/97 Module support -djb
  29    v1.14  2/3/98 Module support modified, made PCI support optional -djb
  30    v1.15 5/27/99 Fixed bug in the cleanup_module(). dev->priv was freed
  31                  before unregister_netdev() which caused NULL pointer
  32                  reference later in the chain (in rtnetlink_fill_ifinfo())
  33                  -- Mika Kuoppala <miku@iki.fi>
  34
  35    Forward ported v1.14 to 2.1.129, merged the PCI and misc changes from
  36    the 2.1 version of the old driver - Alan Cox
  37
  38    Get rid of check_region, check kmalloc return in lance_probe1
  39    Arnaldo Carvalho de Melo <acme@conectiva.com.br> - 11/01/2001
  40
  41        Reworked detection, added support for Racal InterLan EtherBlaster cards
  42        Vesselin Kostadinov <vesok at yahoo dot com > - 22/4/2004
  43*/
  44
  45static const char version[] = "lance.c:v1.16 2006/11/09 dplatt@3do.com, becker@cesdis.gsfc.nasa.gov\n";
  46
  47#include <linux/module.h>
  48#include <linux/kernel.h>
  49#include <linux/string.h>
  50#include <linux/delay.h>
  51#include <linux/errno.h>
  52#include <linux/ioport.h>
  53#include <linux/slab.h>
  54#include <linux/interrupt.h>
  55#include <linux/pci.h>
  56#include <linux/init.h>
  57#include <linux/netdevice.h>
  58#include <linux/etherdevice.h>
  59#include <linux/skbuff.h>
  60#include <linux/mm.h>
  61#include <linux/bitops.h>
  62
  63#include <asm/io.h>
  64#include <asm/dma.h>
  65
  66static unsigned int lance_portlist[] __initdata = { 0x300, 0x320, 0x340, 0x360, 0};
  67static int lance_probe1(struct net_device *dev, int ioaddr, int irq, int options);
  68static int __init do_lance_probe(struct net_device *dev);
  69
  70
  71static struct card {
  72        char id_offset14;
  73        char id_offset15;
  74} cards[] = {
  75        {       //"normal"
  76                .id_offset14 = 0x57,
  77                .id_offset15 = 0x57,
  78        },
  79        {       //NI6510EB
  80                .id_offset14 = 0x52,
  81                .id_offset15 = 0x44,
  82        },
  83        {       //Racal InterLan EtherBlaster
  84                .id_offset14 = 0x52,
  85                .id_offset15 = 0x49,
  86        },
  87};
  88#define NUM_CARDS 3
  89
  90#ifdef LANCE_DEBUG
  91static int lance_debug = LANCE_DEBUG;
  92#else
  93static int lance_debug = 1;
  94#endif
  95
  96/*
  97                                Theory of Operation
  98
  99I. Board Compatibility
 100
 101This device driver is designed for the AMD 79C960, the "PCnet-ISA
 102single-chip ethernet controller for ISA".  This chip is used in a wide
 103variety of boards from vendors such as Allied Telesis, HP, Kingston,
 104and Boca.  This driver is also intended to work with older AMD 7990
 105designs, such as the NE1500 and NE2100, and newer 79C961.  For convenience,
 106I use the name LANCE to refer to all of the AMD chips, even though it properly
 107refers only to the original 7990.
 108
 109II. Board-specific settings
 110
 111The driver is designed to work the boards that use the faster
 112bus-master mode, rather than in shared memory mode.      (Only older designs
 113have on-board buffer memory needed to support the slower shared memory mode.)
 114
 115Most ISA boards have jumpered settings for the I/O base, IRQ line, and DMA
 116channel.  This driver probes the likely base addresses:
 117{0x300, 0x320, 0x340, 0x360}.
 118After the board is found it generates a DMA-timeout interrupt and uses
 119autoIRQ to find the IRQ line.  The DMA channel can be set with the low bits
 120of the otherwise-unused dev->mem_start value (aka PARAM1).  If unset it is
 121probed for by enabling each free DMA channel in turn and checking if
 122initialization succeeds.
 123
 124The HP-J2405A board is an exception: with this board it is easy to read the
 125EEPROM-set values for the base, IRQ, and DMA.  (Of course you must already
 126_know_ the base address -- that field is for writing the EEPROM.)
 127
 128III. Driver operation
 129
 130IIIa. Ring buffers
 131The LANCE uses ring buffers of Tx and Rx descriptors.  Each entry describes
 132the base and length of the data buffer, along with status bits.  The length
 133of these buffers is set by LANCE_LOG_{RX,TX}_BUFFERS, which is log_2() of
 134the buffer length (rather than being directly the buffer length) for
 135implementation ease.  The current values are 2 (Tx) and 4 (Rx), which leads to
 136ring sizes of 4 (Tx) and 16 (Rx).  Increasing the number of ring entries
 137needlessly uses extra space and reduces the chance that an upper layer will
 138be able to reorder queued Tx packets based on priority.  Decreasing the number
 139of entries makes it more difficult to achieve back-to-back packet transmission
 140and increases the chance that Rx ring will overflow.  (Consider the worst case
 141of receiving back-to-back minimum-sized packets.)
 142
 143The LANCE has the capability to "chain" both Rx and Tx buffers, but this driver
 144statically allocates full-sized (slightly oversized -- PKT_BUF_SZ) buffers to
 145avoid the administrative overhead. For the Rx side this avoids dynamically
 146allocating full-sized buffers "just in case", at the expense of a
 147memory-to-memory data copy for each packet received.  For most systems this
 148is a good tradeoff: the Rx buffer will always be in low memory, the copy
 149is inexpensive, and it primes the cache for later packet processing.  For Tx
 150the buffers are only used when needed as low-memory bounce buffers.
 151
 152IIIB. 16M memory limitations.
 153For the ISA bus master mode all structures used directly by the LANCE,
 154the initialization block, Rx and Tx rings, and data buffers, must be
 155accessible from the ISA bus, i.e. in the lower 16M of real memory.
 156This is a problem for current Linux kernels on >16M machines. The network
 157devices are initialized after memory initialization, and the kernel doles out
 158memory from the top of memory downward.  The current solution is to have a
 159special network initialization routine that's called before memory
 160initialization; this will eventually be generalized for all network devices.
 161As mentioned before, low-memory "bounce-buffers" are used when needed.
 162
 163IIIC. Synchronization
 164The driver runs as two independent, single-threaded flows of control.  One
 165is the send-packet routine, which enforces single-threaded use by the
 166dev->tbusy flag.  The other thread is the interrupt handler, which is single
 167threaded by the hardware and other software.
 168
 169The send packet thread has partial control over the Tx ring and 'dev->tbusy'
 170flag.  It sets the tbusy flag whenever it's queuing a Tx packet. If the next
 171queue slot is empty, it clears the tbusy flag when finished otherwise it sets
 172the 'lp->tx_full' flag.
 173
 174The interrupt handler has exclusive control over the Rx ring and records stats
 175from the Tx ring. (The Tx-done interrupt can't be selectively turned off, so
 176we can't avoid the interrupt overhead by having the Tx routine reap the Tx
 177stats.)  After reaping the stats, it marks the queue entry as empty by setting
 178the 'base' to zero. Iff the 'lp->tx_full' flag is set, it clears both the
 179tx_full and tbusy flags.
 180
 181*/
 182
 183/* Set the number of Tx and Rx buffers, using Log_2(# buffers).
 184   Reasonable default values are 16 Tx buffers, and 16 Rx buffers.
 185   That translates to 4 and 4 (16 == 2^^4).
 186   This is a compile-time option for efficiency.
 187   */
 188#ifndef LANCE_LOG_TX_BUFFERS
 189#define LANCE_LOG_TX_BUFFERS 4
 190#define LANCE_LOG_RX_BUFFERS 4
 191#endif
 192
 193#define TX_RING_SIZE                    (1 << (LANCE_LOG_TX_BUFFERS))
 194#define TX_RING_MOD_MASK                (TX_RING_SIZE - 1)
 195#define TX_RING_LEN_BITS                ((LANCE_LOG_TX_BUFFERS) << 29)
 196
 197#define RX_RING_SIZE                    (1 << (LANCE_LOG_RX_BUFFERS))
 198#define RX_RING_MOD_MASK                (RX_RING_SIZE - 1)
 199#define RX_RING_LEN_BITS                ((LANCE_LOG_RX_BUFFERS) << 29)
 200
 201#define PKT_BUF_SZ              1544
 202
 203/* Offsets from base I/O address. */
 204#define LANCE_DATA 0x10
 205#define LANCE_ADDR 0x12
 206#define LANCE_RESET 0x14
 207#define LANCE_BUS_IF 0x16
 208#define LANCE_TOTAL_SIZE 0x18
 209
 210#define TX_TIMEOUT      20
 211
 212/* The LANCE Rx and Tx ring descriptors. */
 213struct lance_rx_head {
 214        s32 base;
 215        s16 buf_length;                 /* This length is 2s complement (negative)! */
 216        s16 msg_length;                 /* This length is "normal". */
 217};
 218
 219struct lance_tx_head {
 220        s32 base;
 221        s16 length;                             /* Length is 2s complement (negative)! */
 222        s16 misc;
 223};
 224
 225/* The LANCE initialization block, described in databook. */
 226struct lance_init_block {
 227        u16 mode;               /* Pre-set mode (reg. 15) */
 228        u8  phys_addr[6]; /* Physical ethernet address */
 229        u32 filter[2];                  /* Multicast filter (unused). */
 230        /* Receive and transmit ring base, along with extra bits. */
 231        u32  rx_ring;                   /* Tx and Rx ring base pointers */
 232        u32  tx_ring;
 233};
 234
 235struct lance_private {
 236        /* The Tx and Rx ring entries must be aligned on 8-byte boundaries. */
 237        struct lance_rx_head rx_ring[RX_RING_SIZE];
 238        struct lance_tx_head tx_ring[TX_RING_SIZE];
 239        struct lance_init_block init_block;
 240        const char *name;
 241        /* The saved address of a sent-in-place packet/buffer, for skfree(). */
 242        struct sk_buff* tx_skbuff[TX_RING_SIZE];
 243        /* The addresses of receive-in-place skbuffs. */
 244        struct sk_buff* rx_skbuff[RX_RING_SIZE];
 245        unsigned long rx_buffs;         /* Address of Rx and Tx buffers. */
 246        /* Tx low-memory "bounce buffer" address. */
 247        char (*tx_bounce_buffs)[PKT_BUF_SZ];
 248        int cur_rx, cur_tx;                     /* The next free ring entry */
 249        int dirty_rx, dirty_tx;         /* The ring entries to be free()ed. */
 250        int dma;
 251        struct net_device_stats stats;
 252        unsigned char chip_version;     /* See lance_chip_type. */
 253        spinlock_t devlock;
 254};
 255
 256#define LANCE_MUST_PAD          0x00000001
 257#define LANCE_ENABLE_AUTOSELECT 0x00000002
 258#define LANCE_MUST_REINIT_RING  0x00000004
 259#define LANCE_MUST_UNRESET      0x00000008
 260#define LANCE_HAS_MISSED_FRAME  0x00000010
 261
 262/* A mapping from the chip ID number to the part number and features.
 263   These are from the datasheets -- in real life the '970 version
 264   reportedly has the same ID as the '965. */
 265static struct lance_chip_type {
 266        int id_number;
 267        const char *name;
 268        int flags;
 269} chip_table[] = {
 270        {0x0000, "LANCE 7990",                          /* Ancient lance chip.  */
 271                LANCE_MUST_PAD + LANCE_MUST_UNRESET},
 272        {0x0003, "PCnet/ISA 79C960",            /* 79C960 PCnet/ISA.  */
 273                LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
 274                        LANCE_HAS_MISSED_FRAME},
 275        {0x2260, "PCnet/ISA+ 79C961",           /* 79C961 PCnet/ISA+, Plug-n-Play.  */
 276                LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
 277                        LANCE_HAS_MISSED_FRAME},
 278        {0x2420, "PCnet/PCI 79C970",            /* 79C970 or 79C974 PCnet-SCSI, PCI. */
 279                LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
 280                        LANCE_HAS_MISSED_FRAME},
 281        /* Bug: the PCnet/PCI actually uses the PCnet/VLB ID number, so just call
 282                it the PCnet32. */
 283        {0x2430, "PCnet32",                                     /* 79C965 PCnet for VL bus. */
 284                LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
 285                        LANCE_HAS_MISSED_FRAME},
 286        {0x2621, "PCnet/PCI-II 79C970A",        /* 79C970A PCInetPCI II. */
 287                LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
 288                        LANCE_HAS_MISSED_FRAME},
 289        {0x0,    "PCnet (unknown)",
 290                LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
 291                        LANCE_HAS_MISSED_FRAME},
 292};
 293
 294enum {OLD_LANCE = 0, PCNET_ISA=1, PCNET_ISAP=2, PCNET_PCI=3, PCNET_VLB=4, PCNET_PCI_II=5, LANCE_UNKNOWN=6};
 295
 296
 297/* Non-zero if lance_probe1() needs to allocate low-memory bounce buffers.
 298   Assume yes until we know the memory size. */
 299static unsigned char lance_need_isa_bounce_buffers = 1;
 300
 301static int lance_open(struct net_device *dev);
 302static void lance_init_ring(struct net_device *dev, gfp_t mode);
 303static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
 304                                    struct net_device *dev);
 305static int lance_rx(struct net_device *dev);
 306static irqreturn_t lance_interrupt(int irq, void *dev_id);
 307static int lance_close(struct net_device *dev);
 308static struct net_device_stats *lance_get_stats(struct net_device *dev);
 309static void set_multicast_list(struct net_device *dev);
 310static void lance_tx_timeout (struct net_device *dev);
 311
 312
 313
 314#ifdef MODULE
 315#define MAX_CARDS               8       /* Max number of interfaces (cards) per module */
 316
 317static struct net_device *dev_lance[MAX_CARDS];
 318static int io[MAX_CARDS];
 319static int dma[MAX_CARDS];
 320static int irq[MAX_CARDS];
 321
 322module_param_array(io, int, NULL, 0);
 323module_param_array(dma, int, NULL, 0);
 324module_param_array(irq, int, NULL, 0);
 325module_param(lance_debug, int, 0);
 326MODULE_PARM_DESC(io, "LANCE/PCnet I/O base address(es),required");
 327MODULE_PARM_DESC(dma, "LANCE/PCnet ISA DMA channel (ignored for some devices)");
 328MODULE_PARM_DESC(irq, "LANCE/PCnet IRQ number (ignored for some devices)");
 329MODULE_PARM_DESC(lance_debug, "LANCE/PCnet debug level (0-7)");
 330
 331int __init init_module(void)
 332{
 333        struct net_device *dev;
 334        int this_dev, found = 0;
 335
 336        for (this_dev = 0; this_dev < MAX_CARDS; this_dev++) {
 337                if (io[this_dev] == 0)  {
 338                        if (this_dev != 0) /* only complain once */
 339                                break;
 340                        printk(KERN_NOTICE "lance.c: Module autoprobing not allowed. Append \"io=0xNNN\" value(s).\n");
 341                        return -EPERM;
 342                }
 343                dev = alloc_etherdev(0);
 344                if (!dev)
 345                        break;
 346                dev->irq = irq[this_dev];
 347                dev->base_addr = io[this_dev];
 348                dev->dma = dma[this_dev];
 349                if (do_lance_probe(dev) == 0) {
 350                        dev_lance[found++] = dev;
 351                        continue;
 352                }
 353                free_netdev(dev);
 354                break;
 355        }
 356        if (found != 0)
 357                return 0;
 358        return -ENXIO;
 359}
 360
 361static void cleanup_card(struct net_device *dev)
 362{
 363        struct lance_private *lp = dev->ml_priv;
 364        if (dev->dma != 4)
 365                free_dma(dev->dma);
 366        release_region(dev->base_addr, LANCE_TOTAL_SIZE);
 367        kfree(lp->tx_bounce_buffs);
 368        kfree((void*)lp->rx_buffs);
 369        kfree(lp);
 370}
 371
 372void __exit cleanup_module(void)
 373{
 374        int this_dev;
 375
 376        for (this_dev = 0; this_dev < MAX_CARDS; this_dev++) {
 377                struct net_device *dev = dev_lance[this_dev];
 378                if (dev) {
 379                        unregister_netdev(dev);
 380                        cleanup_card(dev);
 381                        free_netdev(dev);
 382                }
 383        }
 384}
 385#endif /* MODULE */
 386MODULE_LICENSE("GPL");
 387
 388
 389/* Starting in v2.1.*, the LANCE/PCnet probe is now similar to the other
 390   board probes now that kmalloc() can allocate ISA DMA-able regions.
 391   This also allows the LANCE driver to be used as a module.
 392   */
 393static int __init do_lance_probe(struct net_device *dev)
 394{
 395        unsigned int *port;
 396        int result;
 397
 398        if (high_memory <= phys_to_virt(16*1024*1024))
 399                lance_need_isa_bounce_buffers = 0;
 400
 401        for (port = lance_portlist; *port; port++) {
 402                int ioaddr = *port;
 403                struct resource *r = request_region(ioaddr, LANCE_TOTAL_SIZE,
 404                                                        "lance-probe");
 405
 406                if (r) {
 407                        /* Detect the card with minimal I/O reads */
 408                        char offset14 = inb(ioaddr + 14);
 409                        int card;
 410                        for (card = 0; card < NUM_CARDS; ++card)
 411                                if (cards[card].id_offset14 == offset14)
 412                                        break;
 413                        if (card < NUM_CARDS) {/*yes, the first byte matches*/
 414                                char offset15 = inb(ioaddr + 15);
 415                                for (card = 0; card < NUM_CARDS; ++card)
 416                                        if ((cards[card].id_offset14 == offset14) &&
 417                                                (cards[card].id_offset15 == offset15))
 418                                                break;
 419                        }
 420                        if (card < NUM_CARDS) { /*Signature OK*/
 421                                result = lance_probe1(dev, ioaddr, 0, 0);
 422                                if (!result) {
 423                                        struct lance_private *lp = dev->ml_priv;
 424                                        int ver = lp->chip_version;
 425
 426                                        r->name = chip_table[ver].name;
 427                                        return 0;
 428                                }
 429                        }
 430                        release_region(ioaddr, LANCE_TOTAL_SIZE);
 431                }
 432        }
 433        return -ENODEV;
 434}
 435
 436#ifndef MODULE
 437struct net_device * __init lance_probe(int unit)
 438{
 439        struct net_device *dev = alloc_etherdev(0);
 440        int err;
 441
 442        if (!dev)
 443                return ERR_PTR(-ENODEV);
 444
 445        sprintf(dev->name, "eth%d", unit);
 446        netdev_boot_setup_check(dev);
 447
 448        err = do_lance_probe(dev);
 449        if (err)
 450                goto out;
 451        return dev;
 452out:
 453        free_netdev(dev);
 454        return ERR_PTR(err);
 455}
 456#endif
 457
 458static const struct net_device_ops lance_netdev_ops = {
 459        .ndo_open               = lance_open,
 460        .ndo_start_xmit         = lance_start_xmit,
 461        .ndo_stop               = lance_close,
 462        .ndo_get_stats          = lance_get_stats,
 463        .ndo_set_multicast_list = set_multicast_list,
 464        .ndo_tx_timeout         = lance_tx_timeout,
 465        .ndo_change_mtu         = eth_change_mtu,
 466        .ndo_set_mac_address    = eth_mac_addr,
 467        .ndo_validate_addr      = eth_validate_addr,
 468};
 469
 470static int __init lance_probe1(struct net_device *dev, int ioaddr, int irq, int options)
 471{
 472        struct lance_private *lp;
 473        unsigned long dma_channels;     /* Mark spuriously-busy DMA channels */
 474        int i, reset_val, lance_version;
 475        const char *chipname;
 476        /* Flags for specific chips or boards. */
 477        unsigned char hpJ2405A = 0;     /* HP ISA adaptor */
 478        int hp_builtin = 0;             /* HP on-board ethernet. */
 479        static int did_version;         /* Already printed version info. */
 480        unsigned long flags;
 481        int err = -ENOMEM;
 482        void __iomem *bios;
 483
 484        /* First we look for special cases.
 485           Check for HP's on-board ethernet by looking for 'HP' in the BIOS.
 486           There are two HP versions, check the BIOS for the configuration port.
 487           This method provided by L. Julliard, Laurent_Julliard@grenoble.hp.com.
 488           */
 489        bios = ioremap(0xf00f0, 0x14);
 490        if (!bios)
 491                return -ENOMEM;
 492        if (readw(bios + 0x12) == 0x5048)  {
 493                static const short ioaddr_table[] = { 0x300, 0x320, 0x340, 0x360};
 494                int hp_port = (readl(bios + 1) & 1)  ? 0x499 : 0x99;
 495                /* We can have boards other than the built-in!  Verify this is on-board. */
 496                if ((inb(hp_port) & 0xc0) == 0x80
 497                        && ioaddr_table[inb(hp_port) & 3] == ioaddr)
 498                        hp_builtin = hp_port;
 499        }
 500        iounmap(bios);
 501        /* We also recognize the HP Vectra on-board here, but check below. */
 502        hpJ2405A = (inb(ioaddr) == 0x08 && inb(ioaddr+1) == 0x00
 503                                && inb(ioaddr+2) == 0x09);
 504
 505        /* Reset the LANCE.      */
 506        reset_val = inw(ioaddr+LANCE_RESET); /* Reset the LANCE */
 507
 508        /* The Un-Reset needed is only needed for the real NE2100, and will
 509           confuse the HP board. */
 510        if (!hpJ2405A)
 511                outw(reset_val, ioaddr+LANCE_RESET);
 512
 513        outw(0x0000, ioaddr+LANCE_ADDR); /* Switch to window 0 */
 514        if (inw(ioaddr+LANCE_DATA) != 0x0004)
 515                return -ENODEV;
 516
 517        /* Get the version of the chip. */
 518        outw(88, ioaddr+LANCE_ADDR);
 519        if (inw(ioaddr+LANCE_ADDR) != 88) {
 520                lance_version = 0;
 521        } else {                        /* Good, it's a newer chip. */
 522                int chip_version = inw(ioaddr+LANCE_DATA);
 523                outw(89, ioaddr+LANCE_ADDR);
 524                chip_version |= inw(ioaddr+LANCE_DATA) << 16;
 525                if (lance_debug > 2)
 526                        printk("  LANCE chip version is %#x.\n", chip_version);
 527                if ((chip_version & 0xfff) != 0x003)
 528                        return -ENODEV;
 529                chip_version = (chip_version >> 12) & 0xffff;
 530                for (lance_version = 1; chip_table[lance_version].id_number; lance_version++) {
 531                        if (chip_table[lance_version].id_number == chip_version)
 532                                break;
 533                }
 534        }
 535
 536        /* We can't allocate private data from alloc_etherdev() because it must
 537           a ISA DMA-able region. */
 538        chipname = chip_table[lance_version].name;
 539        printk("%s: %s at %#3x, ", dev->name, chipname, ioaddr);
 540
 541        /* There is a 16 byte station address PROM at the base address.
 542           The first six bytes are the station address. */
 543        for (i = 0; i < 6; i++)
 544                dev->dev_addr[i] = inb(ioaddr + i);
 545        printk("%pM", dev->dev_addr);
 546
 547        dev->base_addr = ioaddr;
 548        /* Make certain the data structures used by the LANCE are aligned and DMAble. */
 549
 550        lp = kzalloc(sizeof(*lp), GFP_DMA | GFP_KERNEL);
 551        if(lp==NULL)
 552                return -ENODEV;
 553        if (lance_debug > 6) printk(" (#0x%05lx)", (unsigned long)lp);
 554        dev->ml_priv = lp;
 555        lp->name = chipname;
 556        lp->rx_buffs = (unsigned long)kmalloc(PKT_BUF_SZ*RX_RING_SIZE,
 557                                                  GFP_DMA | GFP_KERNEL);
 558        if (!lp->rx_buffs)
 559                goto out_lp;
 560        if (lance_need_isa_bounce_buffers) {
 561                lp->tx_bounce_buffs = kmalloc(PKT_BUF_SZ*TX_RING_SIZE,
 562                                                  GFP_DMA | GFP_KERNEL);
 563                if (!lp->tx_bounce_buffs)
 564                        goto out_rx;
 565        } else
 566                lp->tx_bounce_buffs = NULL;
 567
 568        lp->chip_version = lance_version;
 569        spin_lock_init(&lp->devlock);
 570
 571        lp->init_block.mode = 0x0003;           /* Disable Rx and Tx. */
 572        for (i = 0; i < 6; i++)
 573                lp->init_block.phys_addr[i] = dev->dev_addr[i];
 574        lp->init_block.filter[0] = 0x00000000;
 575        lp->init_block.filter[1] = 0x00000000;
 576        lp->init_block.rx_ring = ((u32)isa_virt_to_bus(lp->rx_ring) & 0xffffff) | RX_RING_LEN_BITS;
 577        lp->init_block.tx_ring = ((u32)isa_virt_to_bus(lp->tx_ring) & 0xffffff) | TX_RING_LEN_BITS;
 578
 579        outw(0x0001, ioaddr+LANCE_ADDR);
 580        inw(ioaddr+LANCE_ADDR);
 581        outw((short) (u32) isa_virt_to_bus(&lp->init_block), ioaddr+LANCE_DATA);
 582        outw(0x0002, ioaddr+LANCE_ADDR);
 583        inw(ioaddr+LANCE_ADDR);
 584        outw(((u32)isa_virt_to_bus(&lp->init_block)) >> 16, ioaddr+LANCE_DATA);
 585        outw(0x0000, ioaddr+LANCE_ADDR);
 586        inw(ioaddr+LANCE_ADDR);
 587
 588        if (irq) {                                      /* Set iff PCI card. */
 589                dev->dma = 4;                   /* Native bus-master, no DMA channel needed. */
 590                dev->irq = irq;
 591        } else if (hp_builtin) {
 592                static const char dma_tbl[4] = {3, 5, 6, 0};
 593                static const char irq_tbl[4] = {3, 4, 5, 9};
 594                unsigned char port_val = inb(hp_builtin);
 595                dev->dma = dma_tbl[(port_val >> 4) & 3];
 596                dev->irq = irq_tbl[(port_val >> 2) & 3];
 597                printk(" HP Vectra IRQ %d DMA %d.\n", dev->irq, dev->dma);
 598        } else if (hpJ2405A) {
 599                static const char dma_tbl[4] = {3, 5, 6, 7};
 600                static const char irq_tbl[8] = {3, 4, 5, 9, 10, 11, 12, 15};
 601                short reset_val = inw(ioaddr+LANCE_RESET);
 602                dev->dma = dma_tbl[(reset_val >> 2) & 3];
 603                dev->irq = irq_tbl[(reset_val >> 4) & 7];
 604                printk(" HP J2405A IRQ %d DMA %d.\n", dev->irq, dev->dma);
 605        } else if (lance_version == PCNET_ISAP) {               /* The plug-n-play version. */
 606                short bus_info;
 607                outw(8, ioaddr+LANCE_ADDR);
 608                bus_info = inw(ioaddr+LANCE_BUS_IF);
 609                dev->dma = bus_info & 0x07;
 610                dev->irq = (bus_info >> 4) & 0x0F;
 611        } else {
 612                /* The DMA channel may be passed in PARAM1. */
 613                if (dev->mem_start & 0x07)
 614                        dev->dma = dev->mem_start & 0x07;
 615        }
 616
 617        if (dev->dma == 0) {
 618                /* Read the DMA channel status register, so that we can avoid
 619                   stuck DMA channels in the DMA detection below. */
 620                dma_channels = ((inb(DMA1_STAT_REG) >> 4) & 0x0f) |
 621                        (inb(DMA2_STAT_REG) & 0xf0);
 622        }
 623        err = -ENODEV;
 624        if (dev->irq >= 2)
 625                printk(" assigned IRQ %d", dev->irq);
 626        else if (lance_version != 0)  { /* 7990 boards need DMA detection first. */
 627                unsigned long irq_mask;
 628
 629                /* To auto-IRQ we enable the initialization-done and DMA error
 630                   interrupts. For ISA boards we get a DMA error, but VLB and PCI
 631                   boards will work. */
 632                irq_mask = probe_irq_on();
 633
 634                /* Trigger an initialization just for the interrupt. */
 635                outw(0x0041, ioaddr+LANCE_DATA);
 636
 637                mdelay(20);
 638                dev->irq = probe_irq_off(irq_mask);
 639                if (dev->irq)
 640                        printk(", probed IRQ %d", dev->irq);
 641                else {
 642                        printk(", failed to detect IRQ line.\n");
 643                        goto out_tx;
 644                }
 645
 646                /* Check for the initialization done bit, 0x0100, which means
 647                   that we don't need a DMA channel. */
 648                if (inw(ioaddr+LANCE_DATA) & 0x0100)
 649                        dev->dma = 4;
 650        }
 651
 652        if (dev->dma == 4) {
 653                printk(", no DMA needed.\n");
 654        } else if (dev->dma) {
 655                if (request_dma(dev->dma, chipname)) {
 656                        printk("DMA %d allocation failed.\n", dev->dma);
 657                        goto out_tx;
 658                } else
 659                        printk(", assigned DMA %d.\n", dev->dma);
 660        } else {                        /* OK, we have to auto-DMA. */
 661                for (i = 0; i < 4; i++) {
 662                        static const char dmas[] = { 5, 6, 7, 3 };
 663                        int dma = dmas[i];
 664                        int boguscnt;
 665
 666                        /* Don't enable a permanently busy DMA channel, or the machine
 667                           will hang. */
 668                        if (test_bit(dma, &dma_channels))
 669                                continue;
 670                        outw(0x7f04, ioaddr+LANCE_DATA); /* Clear the memory error bits. */
 671                        if (request_dma(dma, chipname))
 672                                continue;
 673
 674                        flags=claim_dma_lock();
 675                        set_dma_mode(dma, DMA_MODE_CASCADE);
 676                        enable_dma(dma);
 677                        release_dma_lock(flags);
 678
 679                        /* Trigger an initialization. */
 680                        outw(0x0001, ioaddr+LANCE_DATA);
 681                        for (boguscnt = 100; boguscnt > 0; --boguscnt)
 682                                if (inw(ioaddr+LANCE_DATA) & 0x0900)
 683                                        break;
 684                        if (inw(ioaddr+LANCE_DATA) & 0x0100) {
 685                                dev->dma = dma;
 686                                printk(", DMA %d.\n", dev->dma);
 687                                break;
 688                        } else {
 689                                flags=claim_dma_lock();
 690                                disable_dma(dma);
 691                                release_dma_lock(flags);
 692                                free_dma(dma);
 693                        }
 694                }
 695                if (i == 4) {                   /* Failure: bail. */
 696                        printk("DMA detection failed.\n");
 697                        goto out_tx;
 698                }
 699        }
 700
 701        if (lance_version == 0 && dev->irq == 0) {
 702                /* We may auto-IRQ now that we have a DMA channel. */
 703                /* Trigger an initialization just for the interrupt. */
 704                unsigned long irq_mask;
 705
 706                irq_mask = probe_irq_on();
 707                outw(0x0041, ioaddr+LANCE_DATA);
 708
 709                mdelay(40);
 710                dev->irq = probe_irq_off(irq_mask);
 711                if (dev->irq == 0) {
 712                        printk("  Failed to detect the 7990 IRQ line.\n");
 713                        goto out_dma;
 714                }
 715                printk("  Auto-IRQ detected IRQ%d.\n", dev->irq);
 716        }
 717
 718        if (chip_table[lp->chip_version].flags & LANCE_ENABLE_AUTOSELECT) {
 719                /* Turn on auto-select of media (10baseT or BNC) so that the user
 720                   can watch the LEDs even if the board isn't opened. */
 721                outw(0x0002, ioaddr+LANCE_ADDR);
 722                /* Don't touch 10base2 power bit. */
 723                outw(inw(ioaddr+LANCE_BUS_IF) | 0x0002, ioaddr+LANCE_BUS_IF);
 724        }
 725
 726        if (lance_debug > 0  &&  did_version++ == 0)
 727                printk(version);
 728
 729        /* The LANCE-specific entries in the device structure. */
 730        dev->netdev_ops = &lance_netdev_ops;
 731        dev->watchdog_timeo = TX_TIMEOUT;
 732
 733        err = register_netdev(dev);
 734        if (err)
 735                goto out_dma;
 736        return 0;
 737out_dma:
 738        if (dev->dma != 4)
 739                free_dma(dev->dma);
 740out_tx:
 741        kfree(lp->tx_bounce_buffs);
 742out_rx:
 743        kfree((void*)lp->rx_buffs);
 744out_lp:
 745        kfree(lp);
 746        return err;
 747}
 748
 749
 750static int
 751lance_open(struct net_device *dev)
 752{
 753        struct lance_private *lp = dev->ml_priv;
 754        int ioaddr = dev->base_addr;
 755        int i;
 756
 757        if (dev->irq == 0 ||
 758                request_irq(dev->irq, &lance_interrupt, 0, lp->name, dev)) {
 759                return -EAGAIN;
 760        }
 761
 762        /* We used to allocate DMA here, but that was silly.
 763           DMA lines can't be shared!  We now permanently allocate them. */
 764
 765        /* Reset the LANCE */
 766        inw(ioaddr+LANCE_RESET);
 767
 768        /* The DMA controller is used as a no-operation slave, "cascade mode". */
 769        if (dev->dma != 4) {
 770                unsigned long flags=claim_dma_lock();
 771                enable_dma(dev->dma);
 772                set_dma_mode(dev->dma, DMA_MODE_CASCADE);
 773                release_dma_lock(flags);
 774        }
 775
 776        /* Un-Reset the LANCE, needed only for the NE2100. */
 777        if (chip_table[lp->chip_version].flags & LANCE_MUST_UNRESET)
 778                outw(0, ioaddr+LANCE_RESET);
 779
 780        if (chip_table[lp->chip_version].flags & LANCE_ENABLE_AUTOSELECT) {
 781                /* This is 79C960-specific: Turn on auto-select of media (AUI, BNC). */
 782                outw(0x0002, ioaddr+LANCE_ADDR);
 783                /* Only touch autoselect bit. */
 784                outw(inw(ioaddr+LANCE_BUS_IF) | 0x0002, ioaddr+LANCE_BUS_IF);
 785        }
 786
 787        if (lance_debug > 1)
 788                printk("%s: lance_open() irq %d dma %d tx/rx rings %#x/%#x init %#x.\n",
 789                           dev->name, dev->irq, dev->dma,
 790                           (u32) isa_virt_to_bus(lp->tx_ring),
 791                           (u32) isa_virt_to_bus(lp->rx_ring),
 792                           (u32) isa_virt_to_bus(&lp->init_block));
 793
 794        lance_init_ring(dev, GFP_KERNEL);
 795        /* Re-initialize the LANCE, and start it when done. */
 796        outw(0x0001, ioaddr+LANCE_ADDR);
 797        outw((short) (u32) isa_virt_to_bus(&lp->init_block), ioaddr+LANCE_DATA);
 798        outw(0x0002, ioaddr+LANCE_ADDR);
 799        outw(((u32)isa_virt_to_bus(&lp->init_block)) >> 16, ioaddr+LANCE_DATA);
 800
 801        outw(0x0004, ioaddr+LANCE_ADDR);
 802        outw(0x0915, ioaddr+LANCE_DATA);
 803
 804        outw(0x0000, ioaddr+LANCE_ADDR);
 805        outw(0x0001, ioaddr+LANCE_DATA);
 806
 807        netif_start_queue (dev);
 808
 809        i = 0;
 810        while (i++ < 100)
 811                if (inw(ioaddr+LANCE_DATA) & 0x0100)
 812                        break;
 813        /*
 814         * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
 815         * reports that doing so triggers a bug in the '974.
 816         */
 817        outw(0x0042, ioaddr+LANCE_DATA);
 818
 819        if (lance_debug > 2)
 820                printk("%s: LANCE open after %d ticks, init block %#x csr0 %4.4x.\n",
 821                           dev->name, i, (u32) isa_virt_to_bus(&lp->init_block), inw(ioaddr+LANCE_DATA));
 822
 823        return 0;                                       /* Always succeed */
 824}
 825
 826/* The LANCE has been halted for one reason or another (busmaster memory
 827   arbitration error, Tx FIFO underflow, driver stopped it to reconfigure,
 828   etc.).  Modern LANCE variants always reload their ring-buffer
 829   configuration when restarted, so we must reinitialize our ring
 830   context before restarting.  As part of this reinitialization,
 831   find all packets still on the Tx ring and pretend that they had been
 832   sent (in effect, drop the packets on the floor) - the higher-level
 833   protocols will time out and retransmit.  It'd be better to shuffle
 834   these skbs to a temp list and then actually re-Tx them after
 835   restarting the chip, but I'm too lazy to do so right now.  dplatt@3do.com
 836*/
 837
 838static void
 839lance_purge_ring(struct net_device *dev)
 840{
 841        struct lance_private *lp = dev->ml_priv;
 842        int i;
 843
 844        /* Free all the skbuffs in the Rx and Tx queues. */
 845        for (i = 0; i < RX_RING_SIZE; i++) {
 846                struct sk_buff *skb = lp->rx_skbuff[i];
 847                lp->rx_skbuff[i] = NULL;
 848                lp->rx_ring[i].base = 0;                /* Not owned by LANCE chip. */
 849                if (skb)
 850                        dev_kfree_skb_any(skb);
 851        }
 852        for (i = 0; i < TX_RING_SIZE; i++) {
 853                if (lp->tx_skbuff[i]) {
 854                        dev_kfree_skb_any(lp->tx_skbuff[i]);
 855                        lp->tx_skbuff[i] = NULL;
 856                }
 857        }
 858}
 859
 860
 861/* Initialize the LANCE Rx and Tx rings. */
 862static void
 863lance_init_ring(struct net_device *dev, gfp_t gfp)
 864{
 865        struct lance_private *lp = dev->ml_priv;
 866        int i;
 867
 868        lp->cur_rx = lp->cur_tx = 0;
 869        lp->dirty_rx = lp->dirty_tx = 0;
 870
 871        for (i = 0; i < RX_RING_SIZE; i++) {
 872                struct sk_buff *skb;
 873                void *rx_buff;
 874
 875                skb = alloc_skb(PKT_BUF_SZ, GFP_DMA | gfp);
 876                lp->rx_skbuff[i] = skb;
 877                if (skb) {
 878                        skb->dev = dev;
 879                        rx_buff = skb->data;
 880                } else
 881                        rx_buff = kmalloc(PKT_BUF_SZ, GFP_DMA | gfp);
 882                if (rx_buff == NULL)
 883                        lp->rx_ring[i].base = 0;
 884                else
 885                        lp->rx_ring[i].base = (u32)isa_virt_to_bus(rx_buff) | 0x80000000;
 886                lp->rx_ring[i].buf_length = -PKT_BUF_SZ;
 887        }
 888        /* The Tx buffer address is filled in as needed, but we do need to clear
 889           the upper ownership bit. */
 890        for (i = 0; i < TX_RING_SIZE; i++) {
 891                lp->tx_skbuff[i] = NULL;
 892                lp->tx_ring[i].base = 0;
 893        }
 894
 895        lp->init_block.mode = 0x0000;
 896        for (i = 0; i < 6; i++)
 897                lp->init_block.phys_addr[i] = dev->dev_addr[i];
 898        lp->init_block.filter[0] = 0x00000000;
 899        lp->init_block.filter[1] = 0x00000000;
 900        lp->init_block.rx_ring = ((u32)isa_virt_to_bus(lp->rx_ring) & 0xffffff) | RX_RING_LEN_BITS;
 901        lp->init_block.tx_ring = ((u32)isa_virt_to_bus(lp->tx_ring) & 0xffffff) | TX_RING_LEN_BITS;
 902}
 903
 904static void
 905lance_restart(struct net_device *dev, unsigned int csr0_bits, int must_reinit)
 906{
 907        struct lance_private *lp = dev->ml_priv;
 908
 909        if (must_reinit ||
 910                (chip_table[lp->chip_version].flags & LANCE_MUST_REINIT_RING)) {
 911                lance_purge_ring(dev);
 912                lance_init_ring(dev, GFP_ATOMIC);
 913        }
 914        outw(0x0000,    dev->base_addr + LANCE_ADDR);
 915        outw(csr0_bits, dev->base_addr + LANCE_DATA);
 916}
 917
 918
 919static void lance_tx_timeout (struct net_device *dev)
 920{
 921        struct lance_private *lp = (struct lance_private *) dev->ml_priv;
 922        int ioaddr = dev->base_addr;
 923
 924        outw (0, ioaddr + LANCE_ADDR);
 925        printk ("%s: transmit timed out, status %4.4x, resetting.\n",
 926                dev->name, inw (ioaddr + LANCE_DATA));
 927        outw (0x0004, ioaddr + LANCE_DATA);
 928        lp->stats.tx_errors++;
 929#ifndef final_version
 930        if (lance_debug > 3) {
 931                int i;
 932                printk (" Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.",
 933                  lp->dirty_tx, lp->cur_tx, netif_queue_stopped(dev) ? " (full)" : "",
 934                        lp->cur_rx);
 935                for (i = 0; i < RX_RING_SIZE; i++)
 936                        printk ("%s %08x %04x %04x", i & 0x3 ? "" : "\n ",
 937                         lp->rx_ring[i].base, -lp->rx_ring[i].buf_length,
 938                                lp->rx_ring[i].msg_length);
 939                for (i = 0; i < TX_RING_SIZE; i++)
 940                        printk ("%s %08x %04x %04x", i & 0x3 ? "" : "\n ",
 941                             lp->tx_ring[i].base, -lp->tx_ring[i].length,
 942                                lp->tx_ring[i].misc);
 943                printk ("\n");
 944        }
 945#endif
 946        lance_restart (dev, 0x0043, 1);
 947
 948        dev->trans_start = jiffies;
 949        netif_wake_queue (dev);
 950}
 951
 952
 953static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
 954                                    struct net_device *dev)
 955{
 956        struct lance_private *lp = dev->ml_priv;
 957        int ioaddr = dev->base_addr;
 958        int entry;
 959        unsigned long flags;
 960
 961        spin_lock_irqsave(&lp->devlock, flags);
 962
 963        if (lance_debug > 3) {
 964                outw(0x0000, ioaddr+LANCE_ADDR);
 965                printk("%s: lance_start_xmit() called, csr0 %4.4x.\n", dev->name,
 966                           inw(ioaddr+LANCE_DATA));
 967                outw(0x0000, ioaddr+LANCE_DATA);
 968        }
 969
 970        /* Fill in a Tx ring entry */
 971
 972        /* Mask to ring buffer boundary. */
 973        entry = lp->cur_tx & TX_RING_MOD_MASK;
 974
 975        /* Caution: the write order is important here, set the base address
 976           with the "ownership" bits last. */
 977
 978        /* The old LANCE chips doesn't automatically pad buffers to min. size. */
 979        if (chip_table[lp->chip_version].flags & LANCE_MUST_PAD) {
 980                if (skb->len < ETH_ZLEN) {
 981                        if (skb_padto(skb, ETH_ZLEN))
 982                                goto out;
 983                        lp->tx_ring[entry].length = -ETH_ZLEN;
 984                }
 985                else
 986                        lp->tx_ring[entry].length = -skb->len;
 987        } else
 988                lp->tx_ring[entry].length = -skb->len;
 989
 990        lp->tx_ring[entry].misc = 0x0000;
 991
 992        lp->stats.tx_bytes += skb->len;
 993
 994        /* If any part of this buffer is >16M we must copy it to a low-memory
 995           buffer. */
 996        if ((u32)isa_virt_to_bus(skb->data) + skb->len > 0x01000000) {
 997                if (lance_debug > 5)
 998                        printk("%s: bouncing a high-memory packet (%#x).\n",
 999                                   dev->name, (u32)isa_virt_to_bus(skb->data));
1000                skb_copy_from_linear_data(skb, &lp->tx_bounce_buffs[entry], skb->len);
1001                lp->tx_ring[entry].base =
1002                        ((u32)isa_virt_to_bus((lp->tx_bounce_buffs + entry)) & 0xffffff) | 0x83000000;
1003                dev_kfree_skb(skb);
1004        } else {
1005                lp->tx_skbuff[entry] = skb;
1006                lp->tx_ring[entry].base = ((u32)isa_virt_to_bus(skb->data) & 0xffffff) | 0x83000000;
1007        }
1008        lp->cur_tx++;
1009
1010        /* Trigger an immediate send poll. */
1011        outw(0x0000, ioaddr+LANCE_ADDR);
1012        outw(0x0048, ioaddr+LANCE_DATA);
1013
1014        dev->trans_start = jiffies;
1015
1016        if ((lp->cur_tx - lp->dirty_tx) >= TX_RING_SIZE)
1017                netif_stop_queue(dev);
1018
1019out:
1020        spin_unlock_irqrestore(&lp->devlock, flags);
1021        return NETDEV_TX_OK;
1022}
1023
1024/* The LANCE interrupt handler. */
1025static irqreturn_t lance_interrupt(int irq, void *dev_id)
1026{
1027        struct net_device *dev = dev_id;
1028        struct lance_private *lp;
1029        int csr0, ioaddr, boguscnt=10;
1030        int must_restart;
1031
1032        ioaddr = dev->base_addr;
1033        lp = dev->ml_priv;
1034
1035        spin_lock (&lp->devlock);
1036
1037        outw(0x00, dev->base_addr + LANCE_ADDR);
1038        while ((csr0 = inw(dev->base_addr + LANCE_DATA)) & 0x8600
1039                   && --boguscnt >= 0) {
1040                /* Acknowledge all of the current interrupt sources ASAP. */
1041                outw(csr0 & ~0x004f, dev->base_addr + LANCE_DATA);
1042
1043                must_restart = 0;
1044
1045                if (lance_debug > 5)
1046                        printk("%s: interrupt  csr0=%#2.2x new csr=%#2.2x.\n",
1047                                   dev->name, csr0, inw(dev->base_addr + LANCE_DATA));
1048
1049                if (csr0 & 0x0400)                      /* Rx interrupt */
1050                        lance_rx(dev);
1051
1052                if (csr0 & 0x0200) {            /* Tx-done interrupt */
1053                        int dirty_tx = lp->dirty_tx;
1054
1055                        while (dirty_tx < lp->cur_tx) {
1056                                int entry = dirty_tx & TX_RING_MOD_MASK;
1057                                int status = lp->tx_ring[entry].base;
1058
1059                                if (status < 0)
1060                                        break;                  /* It still hasn't been Txed */
1061
1062                                lp->tx_ring[entry].base = 0;
1063
1064                                if (status & 0x40000000) {
1065                                        /* There was an major error, log it. */
1066                                        int err_status = lp->tx_ring[entry].misc;
1067                                        lp->stats.tx_errors++;
1068                                        if (err_status & 0x0400) lp->stats.tx_aborted_errors++;
1069                                        if (err_status & 0x0800) lp->stats.tx_carrier_errors++;
1070                                        if (err_status & 0x1000) lp->stats.tx_window_errors++;
1071                                        if (err_status & 0x4000) {
1072                                                /* Ackk!  On FIFO errors the Tx unit is turned off! */
1073                                                lp->stats.tx_fifo_errors++;
1074                                                /* Remove this verbosity later! */
1075                                                printk("%s: Tx FIFO error! Status %4.4x.\n",
1076                                                           dev->name, csr0);
1077                                                /* Restart the chip. */
1078                                                must_restart = 1;
1079                                        }
1080                                } else {
1081                                        if (status & 0x18000000)
1082                                                lp->stats.collisions++;
1083                                        lp->stats.tx_packets++;
1084                                }
1085
1086                                /* We must free the original skb if it's not a data-only copy
1087                                   in the bounce buffer. */
1088                                if (lp->tx_skbuff[entry]) {
1089                                        dev_kfree_skb_irq(lp->tx_skbuff[entry]);
1090                                        lp->tx_skbuff[entry] = NULL;
1091                                }
1092                                dirty_tx++;
1093                        }
1094
1095#ifndef final_version
1096                        if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) {
1097                                printk("out-of-sync dirty pointer, %d vs. %d, full=%s.\n",
1098                                           dirty_tx, lp->cur_tx,
1099                                           netif_queue_stopped(dev) ? "yes" : "no");
1100                                dirty_tx += TX_RING_SIZE;
1101                        }
1102#endif
1103
1104                        /* if the ring is no longer full, accept more packets */
1105                        if (netif_queue_stopped(dev) &&
1106                            dirty_tx > lp->cur_tx - TX_RING_SIZE + 2)
1107                                netif_wake_queue (dev);
1108
1109                        lp->dirty_tx = dirty_tx;
1110                }
1111
1112                /* Log misc errors. */
1113                if (csr0 & 0x4000) lp->stats.tx_errors++; /* Tx babble. */
1114                if (csr0 & 0x1000) lp->stats.rx_errors++; /* Missed a Rx frame. */
1115                if (csr0 & 0x0800) {
1116                        printk("%s: Bus master arbitration failure, status %4.4x.\n",
1117                                   dev->name, csr0);
1118                        /* Restart the chip. */
1119                        must_restart = 1;
1120                }
1121
1122                if (must_restart) {
1123                        /* stop the chip to clear the error condition, then restart */
1124                        outw(0x0000, dev->base_addr + LANCE_ADDR);
1125                        outw(0x0004, dev->base_addr + LANCE_DATA);
1126                        lance_restart(dev, 0x0002, 0);
1127                }
1128        }
1129
1130        /* Clear any other interrupt, and set interrupt enable. */
1131        outw(0x0000, dev->base_addr + LANCE_ADDR);
1132        outw(0x7940, dev->base_addr + LANCE_DATA);
1133
1134        if (lance_debug > 4)
1135                printk("%s: exiting interrupt, csr%d=%#4.4x.\n",
1136                           dev->name, inw(ioaddr + LANCE_ADDR),
1137                           inw(dev->base_addr + LANCE_DATA));
1138
1139        spin_unlock (&lp->devlock);
1140        return IRQ_HANDLED;
1141}
1142
1143static int
1144lance_rx(struct net_device *dev)
1145{
1146        struct lance_private *lp = dev->ml_priv;
1147        int entry = lp->cur_rx & RX_RING_MOD_MASK;
1148        int i;
1149
1150        /* If we own the next entry, it's a new packet. Send it up. */
1151        while (lp->rx_ring[entry].base >= 0) {
1152                int status = lp->rx_ring[entry].base >> 24;
1153
1154                if (status != 0x03) {                   /* There was an error. */
1155                        /* There is a tricky error noted by John Murphy,
1156                           <murf@perftech.com> to Russ Nelson: Even with full-sized
1157                           buffers it's possible for a jabber packet to use two
1158                           buffers, with only the last correctly noting the error. */
1159                        if (status & 0x01)      /* Only count a general error at the */
1160                                lp->stats.rx_errors++; /* end of a packet.*/
1161                        if (status & 0x20) lp->stats.rx_frame_errors++;
1162                        if (status & 0x10) lp->stats.rx_over_errors++;
1163                        if (status & 0x08) lp->stats.rx_crc_errors++;
1164                        if (status & 0x04) lp->stats.rx_fifo_errors++;
1165                        lp->rx_ring[entry].base &= 0x03ffffff;
1166                }
1167                else
1168                {
1169                        /* Malloc up new buffer, compatible with net3. */
1170                        short pkt_len = (lp->rx_ring[entry].msg_length & 0xfff)-4;
1171                        struct sk_buff *skb;
1172
1173                        if(pkt_len<60)
1174                        {
1175                                printk("%s: Runt packet!\n",dev->name);
1176                                lp->stats.rx_errors++;
1177                        }
1178                        else
1179                        {
1180                                skb = dev_alloc_skb(pkt_len+2);
1181                                if (skb == NULL)
1182                                {
1183                                        printk("%s: Memory squeeze, deferring packet.\n", dev->name);
1184                                        for (i=0; i < RX_RING_SIZE; i++)
1185                                                if (lp->rx_ring[(entry+i) & RX_RING_MOD_MASK].base < 0)
1186                                                        break;
1187
1188                                        if (i > RX_RING_SIZE -2)
1189                                        {
1190                                                lp->stats.rx_dropped++;
1191                                                lp->rx_ring[entry].base |= 0x80000000;
1192                                                lp->cur_rx++;
1193                                        }
1194                                        break;
1195                                }
1196                                skb_reserve(skb,2);     /* 16 byte align */
1197                                skb_put(skb,pkt_len);   /* Make room */
1198                                skb_copy_to_linear_data(skb,
1199                                        (unsigned char *)isa_bus_to_virt((lp->rx_ring[entry].base & 0x00ffffff)),
1200                                        pkt_len);
1201                                skb->protocol=eth_type_trans(skb,dev);
1202                                netif_rx(skb);
1203                                lp->stats.rx_packets++;
1204                                lp->stats.rx_bytes+=pkt_len;
1205                        }
1206                }
1207                /* The docs say that the buffer length isn't touched, but Andrew Boyd
1208                   of QNX reports that some revs of the 79C965 clear it. */
1209                lp->rx_ring[entry].buf_length = -PKT_BUF_SZ;
1210                lp->rx_ring[entry].base |= 0x80000000;
1211                entry = (++lp->cur_rx) & RX_RING_MOD_MASK;
1212        }
1213
1214        /* We should check that at least two ring entries are free.      If not,
1215           we should free one and mark stats->rx_dropped++. */
1216
1217        return 0;
1218}
1219
1220static int
1221lance_close(struct net_device *dev)
1222{
1223        int ioaddr = dev->base_addr;
1224        struct lance_private *lp = dev->ml_priv;
1225
1226        netif_stop_queue (dev);
1227
1228        if (chip_table[lp->chip_version].flags & LANCE_HAS_MISSED_FRAME) {
1229                outw(112, ioaddr+LANCE_ADDR);
1230                lp->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA);
1231        }
1232        outw(0, ioaddr+LANCE_ADDR);
1233
1234        if (lance_debug > 1)
1235                printk("%s: Shutting down ethercard, status was %2.2x.\n",
1236                           dev->name, inw(ioaddr+LANCE_DATA));
1237
1238        /* We stop the LANCE here -- it occasionally polls
1239           memory if we don't. */
1240        outw(0x0004, ioaddr+LANCE_DATA);
1241
1242        if (dev->dma != 4)
1243        {
1244                unsigned long flags=claim_dma_lock();
1245                disable_dma(dev->dma);
1246                release_dma_lock(flags);
1247        }
1248        free_irq(dev->irq, dev);
1249
1250        lance_purge_ring(dev);
1251
1252        return 0;
1253}
1254
1255static struct net_device_stats *lance_get_stats(struct net_device *dev)
1256{
1257        struct lance_private *lp = dev->ml_priv;
1258
1259        if (chip_table[lp->chip_version].flags & LANCE_HAS_MISSED_FRAME) {
1260                short ioaddr = dev->base_addr;
1261                short saved_addr;
1262                unsigned long flags;
1263
1264                spin_lock_irqsave(&lp->devlock, flags);
1265                saved_addr = inw(ioaddr+LANCE_ADDR);
1266                outw(112, ioaddr+LANCE_ADDR);
1267                lp->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA);
1268                outw(saved_addr, ioaddr+LANCE_ADDR);
1269                spin_unlock_irqrestore(&lp->devlock, flags);
1270        }
1271
1272        return &lp->stats;
1273}
1274
1275/* Set or clear the multicast filter for this adaptor.
1276 */
1277
1278static void set_multicast_list(struct net_device *dev)
1279{
1280        short ioaddr = dev->base_addr;
1281
1282        outw(0, ioaddr+LANCE_ADDR);
1283        outw(0x0004, ioaddr+LANCE_DATA); /* Temporarily stop the lance.  */
1284
1285        if (dev->flags&IFF_PROMISC) {
1286                outw(15, ioaddr+LANCE_ADDR);
1287                outw(0x8000, ioaddr+LANCE_DATA); /* Set promiscuous mode */
1288        } else {
1289                short multicast_table[4];
1290                int i;
1291                int num_addrs=dev->mc_count;
1292                if(dev->flags&IFF_ALLMULTI)
1293                        num_addrs=1;
1294                /* FIXIT: We don't use the multicast table, but rely on upper-layer filtering. */
1295                memset(multicast_table, (num_addrs == 0) ? 0 : -1, sizeof(multicast_table));
1296                for (i = 0; i < 4; i++) {
1297                        outw(8 + i, ioaddr+LANCE_ADDR);
1298                        outw(multicast_table[i], ioaddr+LANCE_DATA);
1299                }
1300                outw(15, ioaddr+LANCE_ADDR);
1301                outw(0x0000, ioaddr+LANCE_DATA); /* Unset promiscuous mode */
1302        }
1303
1304        lance_restart(dev, 0x0142, 0); /*  Resume normal operation */
1305
1306}
1307
1308