linux/drivers/net/ethernet/amd/lance.c
<<
>>
Prefs
   1/* lance.c: An AMD LANCE/PCnet ethernet driver for Linux. */
   2/*
   3        Written/copyright 1993-1998 by Donald Becker.
   4
   5        Copyright 1993 United States Government as represented by the
   6        Director, National Security Agency.
   7        This software may be used and distributed according to the terms
   8        of the GNU General Public License, incorporated herein by reference.
   9
  10        This driver is for the Allied Telesis AT1500 and HP J2405A, and should work
  11        with most other LANCE-based bus-master (NE2100/NE2500) ethercards.
  12
  13        The author may be reached as becker@scyld.com, or C/O
  14        Scyld Computing Corporation
  15        410 Severn Ave., Suite 210
  16        Annapolis MD 21403
  17
  18        Andrey V. Savochkin:
  19        - alignment problem with 1.3.* kernel and some minor changes.
  20        Thomas Bogendoerfer (tsbogend@bigbug.franken.de):
  21        - added support for Linux/Alpha, but removed most of it, because
  22        it worked only for the PCI chip.
  23      - added hook for the 32bit lance driver
  24      - added PCnetPCI II (79C970A) to chip table
  25        Paul Gortmaker (gpg109@rsphy1.anu.edu.au):
  26        - hopefully fix above so Linux/Alpha can use ISA cards too.
  27    8/20/96 Fixed 7990 autoIRQ failure and reversed unneeded alignment -djb
  28    v1.12 10/27/97 Module support -djb
  29    v1.14  2/3/98 Module support modified, made PCI support optional -djb
  30    v1.15 5/27/99 Fixed bug in the cleanup_module(). dev->priv was freed
  31                  before unregister_netdev() which caused NULL pointer
  32                  reference later in the chain (in rtnetlink_fill_ifinfo())
  33                  -- Mika Kuoppala <miku@iki.fi>
  34
  35    Forward ported v1.14 to 2.1.129, merged the PCI and misc changes from
  36    the 2.1 version of the old driver - Alan Cox
  37
  38    Get rid of check_region, check kmalloc return in lance_probe1
  39    Arnaldo Carvalho de Melo <acme@conectiva.com.br> - 11/01/2001
  40
  41        Reworked detection, added support for Racal InterLan EtherBlaster cards
  42        Vesselin Kostadinov <vesok at yahoo dot com > - 22/4/2004
  43*/
  44
  45static const char version[] = "lance.c:v1.16 2006/11/09 dplatt@3do.com, becker@cesdis.gsfc.nasa.gov\n";
  46
  47#include <linux/module.h>
  48#include <linux/kernel.h>
  49#include <linux/string.h>
  50#include <linux/delay.h>
  51#include <linux/errno.h>
  52#include <linux/ioport.h>
  53#include <linux/slab.h>
  54#include <linux/interrupt.h>
  55#include <linux/pci.h>
  56#include <linux/init.h>
  57#include <linux/netdevice.h>
  58#include <linux/etherdevice.h>
  59#include <linux/skbuff.h>
  60#include <linux/mm.h>
  61#include <linux/bitops.h>
  62
  63#include <asm/io.h>
  64#include <asm/dma.h>
  65
  66static unsigned int lance_portlist[] __initdata = { 0x300, 0x320, 0x340, 0x360, 0};
  67static int lance_probe1(struct net_device *dev, int ioaddr, int irq, int options);
  68static int __init do_lance_probe(struct net_device *dev);
  69
  70
  71static struct card {
  72        char id_offset14;
  73        char id_offset15;
  74} cards[] = {
  75        {       //"normal"
  76                .id_offset14 = 0x57,
  77                .id_offset15 = 0x57,
  78        },
  79        {       //NI6510EB
  80                .id_offset14 = 0x52,
  81                .id_offset15 = 0x44,
  82        },
  83        {       //Racal InterLan EtherBlaster
  84                .id_offset14 = 0x52,
  85                .id_offset15 = 0x49,
  86        },
  87};
  88#define NUM_CARDS 3
  89
  90#ifdef LANCE_DEBUG
  91static int lance_debug = LANCE_DEBUG;
  92#else
  93static int lance_debug = 1;
  94#endif
  95
  96/*
  97                                Theory of Operation
  98
  99I. Board Compatibility
 100
 101This device driver is designed for the AMD 79C960, the "PCnet-ISA
 102single-chip ethernet controller for ISA".  This chip is used in a wide
 103variety of boards from vendors such as Allied Telesis, HP, Kingston,
 104and Boca.  This driver is also intended to work with older AMD 7990
 105designs, such as the NE1500 and NE2100, and newer 79C961.  For convenience,
 106I use the name LANCE to refer to all of the AMD chips, even though it properly
 107refers only to the original 7990.
 108
 109II. Board-specific settings
 110
 111The driver is designed to work the boards that use the faster
 112bus-master mode, rather than in shared memory mode.      (Only older designs
 113have on-board buffer memory needed to support the slower shared memory mode.)
 114
 115Most ISA boards have jumpered settings for the I/O base, IRQ line, and DMA
 116channel.  This driver probes the likely base addresses:
 117{0x300, 0x320, 0x340, 0x360}.
 118After the board is found it generates a DMA-timeout interrupt and uses
 119autoIRQ to find the IRQ line.  The DMA channel can be set with the low bits
 120of the otherwise-unused dev->mem_start value (aka PARAM1).  If unset it is
 121probed for by enabling each free DMA channel in turn and checking if
 122initialization succeeds.
 123
 124The HP-J2405A board is an exception: with this board it is easy to read the
 125EEPROM-set values for the base, IRQ, and DMA.  (Of course you must already
 126_know_ the base address -- that field is for writing the EEPROM.)
 127
 128III. Driver operation
 129
 130IIIa. Ring buffers
 131The LANCE uses ring buffers of Tx and Rx descriptors.  Each entry describes
 132the base and length of the data buffer, along with status bits.  The length
 133of these buffers is set by LANCE_LOG_{RX,TX}_BUFFERS, which is log_2() of
 134the buffer length (rather than being directly the buffer length) for
 135implementation ease.  The current values are 2 (Tx) and 4 (Rx), which leads to
 136ring sizes of 4 (Tx) and 16 (Rx).  Increasing the number of ring entries
 137needlessly uses extra space and reduces the chance that an upper layer will
 138be able to reorder queued Tx packets based on priority.  Decreasing the number
 139of entries makes it more difficult to achieve back-to-back packet transmission
 140and increases the chance that Rx ring will overflow.  (Consider the worst case
 141of receiving back-to-back minimum-sized packets.)
 142
 143The LANCE has the capability to "chain" both Rx and Tx buffers, but this driver
 144statically allocates full-sized (slightly oversized -- PKT_BUF_SZ) buffers to
 145avoid the administrative overhead. For the Rx side this avoids dynamically
 146allocating full-sized buffers "just in case", at the expense of a
 147memory-to-memory data copy for each packet received.  For most systems this
 148is a good tradeoff: the Rx buffer will always be in low memory, the copy
 149is inexpensive, and it primes the cache for later packet processing.  For Tx
 150the buffers are only used when needed as low-memory bounce buffers.
 151
 152IIIB. 16M memory limitations.
 153For the ISA bus master mode all structures used directly by the LANCE,
 154the initialization block, Rx and Tx rings, and data buffers, must be
 155accessible from the ISA bus, i.e. in the lower 16M of real memory.
 156This is a problem for current Linux kernels on >16M machines. The network
 157devices are initialized after memory initialization, and the kernel doles out
 158memory from the top of memory downward.  The current solution is to have a
 159special network initialization routine that's called before memory
 160initialization; this will eventually be generalized for all network devices.
 161As mentioned before, low-memory "bounce-buffers" are used when needed.
 162
 163IIIC. Synchronization
 164The driver runs as two independent, single-threaded flows of control.  One
 165is the send-packet routine, which enforces single-threaded use by the
 166dev->tbusy flag.  The other thread is the interrupt handler, which is single
 167threaded by the hardware and other software.
 168
 169The send packet thread has partial control over the Tx ring and 'dev->tbusy'
 170flag.  It sets the tbusy flag whenever it's queuing a Tx packet. If the next
 171queue slot is empty, it clears the tbusy flag when finished otherwise it sets
 172the 'lp->tx_full' flag.
 173
 174The interrupt handler has exclusive control over the Rx ring and records stats
 175from the Tx ring. (The Tx-done interrupt can't be selectively turned off, so
 176we can't avoid the interrupt overhead by having the Tx routine reap the Tx
 177stats.)  After reaping the stats, it marks the queue entry as empty by setting
 178the 'base' to zero. Iff the 'lp->tx_full' flag is set, it clears both the
 179tx_full and tbusy flags.
 180
 181*/
 182
 183/* Set the number of Tx and Rx buffers, using Log_2(# buffers).
 184   Reasonable default values are 16 Tx buffers, and 16 Rx buffers.
 185   That translates to 4 and 4 (16 == 2^^4).
 186   This is a compile-time option for efficiency.
 187   */
 188#ifndef LANCE_LOG_TX_BUFFERS
 189#define LANCE_LOG_TX_BUFFERS 4
 190#define LANCE_LOG_RX_BUFFERS 4
 191#endif
 192
 193#define TX_RING_SIZE                    (1 << (LANCE_LOG_TX_BUFFERS))
 194#define TX_RING_MOD_MASK                (TX_RING_SIZE - 1)
 195#define TX_RING_LEN_BITS                ((LANCE_LOG_TX_BUFFERS) << 29)
 196
 197#define RX_RING_SIZE                    (1 << (LANCE_LOG_RX_BUFFERS))
 198#define RX_RING_MOD_MASK                (RX_RING_SIZE - 1)
 199#define RX_RING_LEN_BITS                ((LANCE_LOG_RX_BUFFERS) << 29)
 200
 201#define PKT_BUF_SZ              1544
 202
 203/* Offsets from base I/O address. */
 204#define LANCE_DATA 0x10
 205#define LANCE_ADDR 0x12
 206#define LANCE_RESET 0x14
 207#define LANCE_BUS_IF 0x16
 208#define LANCE_TOTAL_SIZE 0x18
 209
 210#define TX_TIMEOUT      (HZ/5)
 211
 212/* The LANCE Rx and Tx ring descriptors. */
 213struct lance_rx_head {
 214        s32 base;
 215        s16 buf_length;                 /* This length is 2s complement (negative)! */
 216        s16 msg_length;                 /* This length is "normal". */
 217};
 218
 219struct lance_tx_head {
 220        s32 base;
 221        s16 length;                             /* Length is 2s complement (negative)! */
 222        s16 misc;
 223};
 224
 225/* The LANCE initialization block, described in databook. */
 226struct lance_init_block {
 227        u16 mode;               /* Pre-set mode (reg. 15) */
 228        u8  phys_addr[6]; /* Physical ethernet address */
 229        u32 filter[2];                  /* Multicast filter (unused). */
 230        /* Receive and transmit ring base, along with extra bits. */
 231        u32  rx_ring;                   /* Tx and Rx ring base pointers */
 232        u32  tx_ring;
 233};
 234
 235struct lance_private {
 236        /* The Tx and Rx ring entries must be aligned on 8-byte boundaries. */
 237        struct lance_rx_head rx_ring[RX_RING_SIZE];
 238        struct lance_tx_head tx_ring[TX_RING_SIZE];
 239        struct lance_init_block init_block;
 240        const char *name;
 241        /* The saved address of a sent-in-place packet/buffer, for skfree(). */
 242        struct sk_buff* tx_skbuff[TX_RING_SIZE];
 243        /* The addresses of receive-in-place skbuffs. */
 244        struct sk_buff* rx_skbuff[RX_RING_SIZE];
 245        unsigned long rx_buffs;         /* Address of Rx and Tx buffers. */
 246        /* Tx low-memory "bounce buffer" address. */
 247        char (*tx_bounce_buffs)[PKT_BUF_SZ];
 248        int cur_rx, cur_tx;                     /* The next free ring entry */
 249        int dirty_rx, dirty_tx;         /* The ring entries to be free()ed. */
 250        int dma;
 251        unsigned char chip_version;     /* See lance_chip_type. */
 252        spinlock_t devlock;
 253};
 254
 255#define LANCE_MUST_PAD          0x00000001
 256#define LANCE_ENABLE_AUTOSELECT 0x00000002
 257#define LANCE_MUST_REINIT_RING  0x00000004
 258#define LANCE_MUST_UNRESET      0x00000008
 259#define LANCE_HAS_MISSED_FRAME  0x00000010
 260
 261/* A mapping from the chip ID number to the part number and features.
 262   These are from the datasheets -- in real life the '970 version
 263   reportedly has the same ID as the '965. */
 264static struct lance_chip_type {
 265        int id_number;
 266        const char *name;
 267        int flags;
 268} chip_table[] = {
 269        {0x0000, "LANCE 7990",                          /* Ancient lance chip.  */
 270                LANCE_MUST_PAD + LANCE_MUST_UNRESET},
 271        {0x0003, "PCnet/ISA 79C960",            /* 79C960 PCnet/ISA.  */
 272                LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
 273                        LANCE_HAS_MISSED_FRAME},
 274        {0x2260, "PCnet/ISA+ 79C961",           /* 79C961 PCnet/ISA+, Plug-n-Play.  */
 275                LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
 276                        LANCE_HAS_MISSED_FRAME},
 277        {0x2420, "PCnet/PCI 79C970",            /* 79C970 or 79C974 PCnet-SCSI, PCI. */
 278                LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
 279                        LANCE_HAS_MISSED_FRAME},
 280        /* Bug: the PCnet/PCI actually uses the PCnet/VLB ID number, so just call
 281                it the PCnet32. */
 282        {0x2430, "PCnet32",                                     /* 79C965 PCnet for VL bus. */
 283                LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
 284                        LANCE_HAS_MISSED_FRAME},
 285        {0x2621, "PCnet/PCI-II 79C970A",        /* 79C970A PCInetPCI II. */
 286                LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
 287                        LANCE_HAS_MISSED_FRAME},
 288        {0x0,    "PCnet (unknown)",
 289                LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
 290                        LANCE_HAS_MISSED_FRAME},
 291};
 292
 293enum {OLD_LANCE = 0, PCNET_ISA=1, PCNET_ISAP=2, PCNET_PCI=3, PCNET_VLB=4, PCNET_PCI_II=5, LANCE_UNKNOWN=6};
 294
 295
 296/* Non-zero if lance_probe1() needs to allocate low-memory bounce buffers.
 297   Assume yes until we know the memory size. */
 298static unsigned char lance_need_isa_bounce_buffers = 1;
 299
 300static int lance_open(struct net_device *dev);
 301static void lance_init_ring(struct net_device *dev, gfp_t mode);
 302static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
 303                                    struct net_device *dev);
 304static int lance_rx(struct net_device *dev);
 305static irqreturn_t lance_interrupt(int irq, void *dev_id);
 306static int lance_close(struct net_device *dev);
 307static struct net_device_stats *lance_get_stats(struct net_device *dev);
 308static void set_multicast_list(struct net_device *dev);
 309static void lance_tx_timeout (struct net_device *dev);
 310
 311
 312
 313#ifdef MODULE
 314#define MAX_CARDS               8       /* Max number of interfaces (cards) per module */
 315
 316static struct net_device *dev_lance[MAX_CARDS];
 317static int io[MAX_CARDS];
 318static int dma[MAX_CARDS];
 319static int irq[MAX_CARDS];
 320
 321module_param_hw_array(io, int, ioport, NULL, 0);
 322module_param_hw_array(dma, int, dma, NULL, 0);
 323module_param_hw_array(irq, int, irq, NULL, 0);
 324module_param(lance_debug, int, 0);
 325MODULE_PARM_DESC(io, "LANCE/PCnet I/O base address(es),required");
 326MODULE_PARM_DESC(dma, "LANCE/PCnet ISA DMA channel (ignored for some devices)");
 327MODULE_PARM_DESC(irq, "LANCE/PCnet IRQ number (ignored for some devices)");
 328MODULE_PARM_DESC(lance_debug, "LANCE/PCnet debug level (0-7)");
 329
 330int __init init_module(void)
 331{
 332        struct net_device *dev;
 333        int this_dev, found = 0;
 334
 335        for (this_dev = 0; this_dev < MAX_CARDS; this_dev++) {
 336                if (io[this_dev] == 0)  {
 337                        if (this_dev != 0) /* only complain once */
 338                                break;
 339                        printk(KERN_NOTICE "lance.c: Module autoprobing not allowed. Append \"io=0xNNN\" value(s).\n");
 340                        return -EPERM;
 341                }
 342                dev = alloc_etherdev(0);
 343                if (!dev)
 344                        break;
 345                dev->irq = irq[this_dev];
 346                dev->base_addr = io[this_dev];
 347                dev->dma = dma[this_dev];
 348                if (do_lance_probe(dev) == 0) {
 349                        dev_lance[found++] = dev;
 350                        continue;
 351                }
 352                free_netdev(dev);
 353                break;
 354        }
 355        if (found != 0)
 356                return 0;
 357        return -ENXIO;
 358}
 359
 360static void cleanup_card(struct net_device *dev)
 361{
 362        struct lance_private *lp = dev->ml_priv;
 363        if (dev->dma != 4)
 364                free_dma(dev->dma);
 365        release_region(dev->base_addr, LANCE_TOTAL_SIZE);
 366        kfree(lp->tx_bounce_buffs);
 367        kfree((void*)lp->rx_buffs);
 368        kfree(lp);
 369}
 370
 371void __exit cleanup_module(void)
 372{
 373        int this_dev;
 374
 375        for (this_dev = 0; this_dev < MAX_CARDS; this_dev++) {
 376                struct net_device *dev = dev_lance[this_dev];
 377                if (dev) {
 378                        unregister_netdev(dev);
 379                        cleanup_card(dev);
 380                        free_netdev(dev);
 381                }
 382        }
 383}
 384#endif /* MODULE */
 385MODULE_LICENSE("GPL");
 386
 387
 388/* Starting in v2.1.*, the LANCE/PCnet probe is now similar to the other
 389   board probes now that kmalloc() can allocate ISA DMA-able regions.
 390   This also allows the LANCE driver to be used as a module.
 391   */
 392static int __init do_lance_probe(struct net_device *dev)
 393{
 394        unsigned int *port;
 395        int result;
 396
 397        if (high_memory <= phys_to_virt(16*1024*1024))
 398                lance_need_isa_bounce_buffers = 0;
 399
 400        for (port = lance_portlist; *port; port++) {
 401                int ioaddr = *port;
 402                struct resource *r = request_region(ioaddr, LANCE_TOTAL_SIZE,
 403                                                        "lance-probe");
 404
 405                if (r) {
 406                        /* Detect the card with minimal I/O reads */
 407                        char offset14 = inb(ioaddr + 14);
 408                        int card;
 409                        for (card = 0; card < NUM_CARDS; ++card)
 410                                if (cards[card].id_offset14 == offset14)
 411                                        break;
 412                        if (card < NUM_CARDS) {/*yes, the first byte matches*/
 413                                char offset15 = inb(ioaddr + 15);
 414                                for (card = 0; card < NUM_CARDS; ++card)
 415                                        if ((cards[card].id_offset14 == offset14) &&
 416                                                (cards[card].id_offset15 == offset15))
 417                                                break;
 418                        }
 419                        if (card < NUM_CARDS) { /*Signature OK*/
 420                                result = lance_probe1(dev, ioaddr, 0, 0);
 421                                if (!result) {
 422                                        struct lance_private *lp = dev->ml_priv;
 423                                        int ver = lp->chip_version;
 424
 425                                        r->name = chip_table[ver].name;
 426                                        return 0;
 427                                }
 428                        }
 429                        release_region(ioaddr, LANCE_TOTAL_SIZE);
 430                }
 431        }
 432        return -ENODEV;
 433}
 434
 435#ifndef MODULE
 436struct net_device * __init lance_probe(int unit)
 437{
 438        struct net_device *dev = alloc_etherdev(0);
 439        int err;
 440
 441        if (!dev)
 442                return ERR_PTR(-ENODEV);
 443
 444        sprintf(dev->name, "eth%d", unit);
 445        netdev_boot_setup_check(dev);
 446
 447        err = do_lance_probe(dev);
 448        if (err)
 449                goto out;
 450        return dev;
 451out:
 452        free_netdev(dev);
 453        return ERR_PTR(err);
 454}
 455#endif
 456
 457static const struct net_device_ops lance_netdev_ops = {
 458        .ndo_open               = lance_open,
 459        .ndo_start_xmit         = lance_start_xmit,
 460        .ndo_stop               = lance_close,
 461        .ndo_get_stats          = lance_get_stats,
 462        .ndo_set_rx_mode        = set_multicast_list,
 463        .ndo_tx_timeout         = lance_tx_timeout,
 464        .ndo_set_mac_address    = eth_mac_addr,
 465        .ndo_validate_addr      = eth_validate_addr,
 466};
 467
 468static int __init lance_probe1(struct net_device *dev, int ioaddr, int irq, int options)
 469{
 470        struct lance_private *lp;
 471        unsigned long dma_channels;     /* Mark spuriously-busy DMA channels */
 472        int i, reset_val, lance_version;
 473        const char *chipname;
 474        /* Flags for specific chips or boards. */
 475        unsigned char hpJ2405A = 0;     /* HP ISA adaptor */
 476        int hp_builtin = 0;             /* HP on-board ethernet. */
 477        static int did_version;         /* Already printed version info. */
 478        unsigned long flags;
 479        int err = -ENOMEM;
 480        void __iomem *bios;
 481
 482        /* First we look for special cases.
 483           Check for HP's on-board ethernet by looking for 'HP' in the BIOS.
 484           There are two HP versions, check the BIOS for the configuration port.
 485           This method provided by L. Julliard, Laurent_Julliard@grenoble.hp.com.
 486           */
 487        bios = ioremap(0xf00f0, 0x14);
 488        if (!bios)
 489                return -ENOMEM;
 490        if (readw(bios + 0x12) == 0x5048)  {
 491                static const short ioaddr_table[] = { 0x300, 0x320, 0x340, 0x360};
 492                int hp_port = (readl(bios + 1) & 1)  ? 0x499 : 0x99;
 493                /* We can have boards other than the built-in!  Verify this is on-board. */
 494                if ((inb(hp_port) & 0xc0) == 0x80 &&
 495                    ioaddr_table[inb(hp_port) & 3] == ioaddr)
 496                        hp_builtin = hp_port;
 497        }
 498        iounmap(bios);
 499        /* We also recognize the HP Vectra on-board here, but check below. */
 500        hpJ2405A = (inb(ioaddr) == 0x08 && inb(ioaddr+1) == 0x00 &&
 501                    inb(ioaddr+2) == 0x09);
 502
 503        /* Reset the LANCE.      */
 504        reset_val = inw(ioaddr+LANCE_RESET); /* Reset the LANCE */
 505
 506        /* The Un-Reset needed is only needed for the real NE2100, and will
 507           confuse the HP board. */
 508        if (!hpJ2405A)
 509                outw(reset_val, ioaddr+LANCE_RESET);
 510
 511        outw(0x0000, ioaddr+LANCE_ADDR); /* Switch to window 0 */
 512        if (inw(ioaddr+LANCE_DATA) != 0x0004)
 513                return -ENODEV;
 514
 515        /* Get the version of the chip. */
 516        outw(88, ioaddr+LANCE_ADDR);
 517        if (inw(ioaddr+LANCE_ADDR) != 88) {
 518                lance_version = 0;
 519        } else {                        /* Good, it's a newer chip. */
 520                int chip_version = inw(ioaddr+LANCE_DATA);
 521                outw(89, ioaddr+LANCE_ADDR);
 522                chip_version |= inw(ioaddr+LANCE_DATA) << 16;
 523                if (lance_debug > 2)
 524                        printk("  LANCE chip version is %#x.\n", chip_version);
 525                if ((chip_version & 0xfff) != 0x003)
 526                        return -ENODEV;
 527                chip_version = (chip_version >> 12) & 0xffff;
 528                for (lance_version = 1; chip_table[lance_version].id_number; lance_version++) {
 529                        if (chip_table[lance_version].id_number == chip_version)
 530                                break;
 531                }
 532        }
 533
 534        /* We can't allocate private data from alloc_etherdev() because it must
 535           a ISA DMA-able region. */
 536        chipname = chip_table[lance_version].name;
 537        printk("%s: %s at %#3x, ", dev->name, chipname, ioaddr);
 538
 539        /* There is a 16 byte station address PROM at the base address.
 540           The first six bytes are the station address. */
 541        for (i = 0; i < 6; i++)
 542                dev->dev_addr[i] = inb(ioaddr + i);
 543        printk("%pM", dev->dev_addr);
 544
 545        dev->base_addr = ioaddr;
 546        /* Make certain the data structures used by the LANCE are aligned and DMAble. */
 547
 548        lp = kzalloc(sizeof(*lp), GFP_DMA | GFP_KERNEL);
 549        if (!lp)
 550                return -ENOMEM;
 551        if (lance_debug > 6) printk(" (#0x%05lx)", (unsigned long)lp);
 552        dev->ml_priv = lp;
 553        lp->name = chipname;
 554        lp->rx_buffs = (unsigned long)kmalloc_array(RX_RING_SIZE, PKT_BUF_SZ,
 555                                                    GFP_DMA | GFP_KERNEL);
 556        if (!lp->rx_buffs)
 557                goto out_lp;
 558        if (lance_need_isa_bounce_buffers) {
 559                lp->tx_bounce_buffs = kmalloc_array(TX_RING_SIZE, PKT_BUF_SZ,
 560                                                    GFP_DMA | GFP_KERNEL);
 561                if (!lp->tx_bounce_buffs)
 562                        goto out_rx;
 563        } else
 564                lp->tx_bounce_buffs = NULL;
 565
 566        lp->chip_version = lance_version;
 567        spin_lock_init(&lp->devlock);
 568
 569        lp->init_block.mode = 0x0003;           /* Disable Rx and Tx. */
 570        for (i = 0; i < 6; i++)
 571                lp->init_block.phys_addr[i] = dev->dev_addr[i];
 572        lp->init_block.filter[0] = 0x00000000;
 573        lp->init_block.filter[1] = 0x00000000;
 574        lp->init_block.rx_ring = ((u32)isa_virt_to_bus(lp->rx_ring) & 0xffffff) | RX_RING_LEN_BITS;
 575        lp->init_block.tx_ring = ((u32)isa_virt_to_bus(lp->tx_ring) & 0xffffff) | TX_RING_LEN_BITS;
 576
 577        outw(0x0001, ioaddr+LANCE_ADDR);
 578        inw(ioaddr+LANCE_ADDR);
 579        outw((short) (u32) isa_virt_to_bus(&lp->init_block), ioaddr+LANCE_DATA);
 580        outw(0x0002, ioaddr+LANCE_ADDR);
 581        inw(ioaddr+LANCE_ADDR);
 582        outw(((u32)isa_virt_to_bus(&lp->init_block)) >> 16, ioaddr+LANCE_DATA);
 583        outw(0x0000, ioaddr+LANCE_ADDR);
 584        inw(ioaddr+LANCE_ADDR);
 585
 586        if (irq) {                                      /* Set iff PCI card. */
 587                dev->dma = 4;                   /* Native bus-master, no DMA channel needed. */
 588                dev->irq = irq;
 589        } else if (hp_builtin) {
 590                static const char dma_tbl[4] = {3, 5, 6, 0};
 591                static const char irq_tbl[4] = {3, 4, 5, 9};
 592                unsigned char port_val = inb(hp_builtin);
 593                dev->dma = dma_tbl[(port_val >> 4) & 3];
 594                dev->irq = irq_tbl[(port_val >> 2) & 3];
 595                printk(" HP Vectra IRQ %d DMA %d.\n", dev->irq, dev->dma);
 596        } else if (hpJ2405A) {
 597                static const char dma_tbl[4] = {3, 5, 6, 7};
 598                static const char irq_tbl[8] = {3, 4, 5, 9, 10, 11, 12, 15};
 599                short reset_val = inw(ioaddr+LANCE_RESET);
 600                dev->dma = dma_tbl[(reset_val >> 2) & 3];
 601                dev->irq = irq_tbl[(reset_val >> 4) & 7];
 602                printk(" HP J2405A IRQ %d DMA %d.\n", dev->irq, dev->dma);
 603        } else if (lance_version == PCNET_ISAP) {               /* The plug-n-play version. */
 604                short bus_info;
 605                outw(8, ioaddr+LANCE_ADDR);
 606                bus_info = inw(ioaddr+LANCE_BUS_IF);
 607                dev->dma = bus_info & 0x07;
 608                dev->irq = (bus_info >> 4) & 0x0F;
 609        } else {
 610                /* The DMA channel may be passed in PARAM1. */
 611                if (dev->mem_start & 0x07)
 612                        dev->dma = dev->mem_start & 0x07;
 613        }
 614
 615        if (dev->dma == 0) {
 616                /* Read the DMA channel status register, so that we can avoid
 617                   stuck DMA channels in the DMA detection below. */
 618                dma_channels = ((inb(DMA1_STAT_REG) >> 4) & 0x0f) |
 619                        (inb(DMA2_STAT_REG) & 0xf0);
 620        }
 621        err = -ENODEV;
 622        if (dev->irq >= 2)
 623                printk(" assigned IRQ %d", dev->irq);
 624        else if (lance_version != 0)  { /* 7990 boards need DMA detection first. */
 625                unsigned long irq_mask;
 626
 627                /* To auto-IRQ we enable the initialization-done and DMA error
 628                   interrupts. For ISA boards we get a DMA error, but VLB and PCI
 629                   boards will work. */
 630                irq_mask = probe_irq_on();
 631
 632                /* Trigger an initialization just for the interrupt. */
 633                outw(0x0041, ioaddr+LANCE_DATA);
 634
 635                mdelay(20);
 636                dev->irq = probe_irq_off(irq_mask);
 637                if (dev->irq)
 638                        printk(", probed IRQ %d", dev->irq);
 639                else {
 640                        printk(", failed to detect IRQ line.\n");
 641                        goto out_tx;
 642                }
 643
 644                /* Check for the initialization done bit, 0x0100, which means
 645                   that we don't need a DMA channel. */
 646                if (inw(ioaddr+LANCE_DATA) & 0x0100)
 647                        dev->dma = 4;
 648        }
 649
 650        if (dev->dma == 4) {
 651                printk(", no DMA needed.\n");
 652        } else if (dev->dma) {
 653                if (request_dma(dev->dma, chipname)) {
 654                        printk("DMA %d allocation failed.\n", dev->dma);
 655                        goto out_tx;
 656                } else
 657                        printk(", assigned DMA %d.\n", dev->dma);
 658        } else {                        /* OK, we have to auto-DMA. */
 659                for (i = 0; i < 4; i++) {
 660                        static const char dmas[] = { 5, 6, 7, 3 };
 661                        int dma = dmas[i];
 662                        int boguscnt;
 663
 664                        /* Don't enable a permanently busy DMA channel, or the machine
 665                           will hang. */
 666                        if (test_bit(dma, &dma_channels))
 667                                continue;
 668                        outw(0x7f04, ioaddr+LANCE_DATA); /* Clear the memory error bits. */
 669                        if (request_dma(dma, chipname))
 670                                continue;
 671
 672                        flags=claim_dma_lock();
 673                        set_dma_mode(dma, DMA_MODE_CASCADE);
 674                        enable_dma(dma);
 675                        release_dma_lock(flags);
 676
 677                        /* Trigger an initialization. */
 678                        outw(0x0001, ioaddr+LANCE_DATA);
 679                        for (boguscnt = 100; boguscnt > 0; --boguscnt)
 680                                if (inw(ioaddr+LANCE_DATA) & 0x0900)
 681                                        break;
 682                        if (inw(ioaddr+LANCE_DATA) & 0x0100) {
 683                                dev->dma = dma;
 684                                printk(", DMA %d.\n", dev->dma);
 685                                break;
 686                        } else {
 687                                flags=claim_dma_lock();
 688                                disable_dma(dma);
 689                                release_dma_lock(flags);
 690                                free_dma(dma);
 691                        }
 692                }
 693                if (i == 4) {                   /* Failure: bail. */
 694                        printk("DMA detection failed.\n");
 695                        goto out_tx;
 696                }
 697        }
 698
 699        if (lance_version == 0 && dev->irq == 0) {
 700                /* We may auto-IRQ now that we have a DMA channel. */
 701                /* Trigger an initialization just for the interrupt. */
 702                unsigned long irq_mask;
 703
 704                irq_mask = probe_irq_on();
 705                outw(0x0041, ioaddr+LANCE_DATA);
 706
 707                mdelay(40);
 708                dev->irq = probe_irq_off(irq_mask);
 709                if (dev->irq == 0) {
 710                        printk("  Failed to detect the 7990 IRQ line.\n");
 711                        goto out_dma;
 712                }
 713                printk("  Auto-IRQ detected IRQ%d.\n", dev->irq);
 714        }
 715
 716        if (chip_table[lp->chip_version].flags & LANCE_ENABLE_AUTOSELECT) {
 717                /* Turn on auto-select of media (10baseT or BNC) so that the user
 718                   can watch the LEDs even if the board isn't opened. */
 719                outw(0x0002, ioaddr+LANCE_ADDR);
 720                /* Don't touch 10base2 power bit. */
 721                outw(inw(ioaddr+LANCE_BUS_IF) | 0x0002, ioaddr+LANCE_BUS_IF);
 722        }
 723
 724        if (lance_debug > 0  &&  did_version++ == 0)
 725                printk(version);
 726
 727        /* The LANCE-specific entries in the device structure. */
 728        dev->netdev_ops = &lance_netdev_ops;
 729        dev->watchdog_timeo = TX_TIMEOUT;
 730
 731        err = register_netdev(dev);
 732        if (err)
 733                goto out_dma;
 734        return 0;
 735out_dma:
 736        if (dev->dma != 4)
 737                free_dma(dev->dma);
 738out_tx:
 739        kfree(lp->tx_bounce_buffs);
 740out_rx:
 741        kfree((void*)lp->rx_buffs);
 742out_lp:
 743        kfree(lp);
 744        return err;
 745}
 746
 747
 748static int
 749lance_open(struct net_device *dev)
 750{
 751        struct lance_private *lp = dev->ml_priv;
 752        int ioaddr = dev->base_addr;
 753        int i;
 754
 755        if (dev->irq == 0 ||
 756                request_irq(dev->irq, lance_interrupt, 0, dev->name, dev)) {
 757                return -EAGAIN;
 758        }
 759
 760        /* We used to allocate DMA here, but that was silly.
 761           DMA lines can't be shared!  We now permanently allocate them. */
 762
 763        /* Reset the LANCE */
 764        inw(ioaddr+LANCE_RESET);
 765
 766        /* The DMA controller is used as a no-operation slave, "cascade mode". */
 767        if (dev->dma != 4) {
 768                unsigned long flags=claim_dma_lock();
 769                enable_dma(dev->dma);
 770                set_dma_mode(dev->dma, DMA_MODE_CASCADE);
 771                release_dma_lock(flags);
 772        }
 773
 774        /* Un-Reset the LANCE, needed only for the NE2100. */
 775        if (chip_table[lp->chip_version].flags & LANCE_MUST_UNRESET)
 776                outw(0, ioaddr+LANCE_RESET);
 777
 778        if (chip_table[lp->chip_version].flags & LANCE_ENABLE_AUTOSELECT) {
 779                /* This is 79C960-specific: Turn on auto-select of media (AUI, BNC). */
 780                outw(0x0002, ioaddr+LANCE_ADDR);
 781                /* Only touch autoselect bit. */
 782                outw(inw(ioaddr+LANCE_BUS_IF) | 0x0002, ioaddr+LANCE_BUS_IF);
 783        }
 784
 785        if (lance_debug > 1)
 786                printk("%s: lance_open() irq %d dma %d tx/rx rings %#x/%#x init %#x.\n",
 787                           dev->name, dev->irq, dev->dma,
 788                           (u32) isa_virt_to_bus(lp->tx_ring),
 789                           (u32) isa_virt_to_bus(lp->rx_ring),
 790                           (u32) isa_virt_to_bus(&lp->init_block));
 791
 792        lance_init_ring(dev, GFP_KERNEL);
 793        /* Re-initialize the LANCE, and start it when done. */
 794        outw(0x0001, ioaddr+LANCE_ADDR);
 795        outw((short) (u32) isa_virt_to_bus(&lp->init_block), ioaddr+LANCE_DATA);
 796        outw(0x0002, ioaddr+LANCE_ADDR);
 797        outw(((u32)isa_virt_to_bus(&lp->init_block)) >> 16, ioaddr+LANCE_DATA);
 798
 799        outw(0x0004, ioaddr+LANCE_ADDR);
 800        outw(0x0915, ioaddr+LANCE_DATA);
 801
 802        outw(0x0000, ioaddr+LANCE_ADDR);
 803        outw(0x0001, ioaddr+LANCE_DATA);
 804
 805        netif_start_queue (dev);
 806
 807        i = 0;
 808        while (i++ < 100)
 809                if (inw(ioaddr+LANCE_DATA) & 0x0100)
 810                        break;
 811        /*
 812         * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
 813         * reports that doing so triggers a bug in the '974.
 814         */
 815        outw(0x0042, ioaddr+LANCE_DATA);
 816
 817        if (lance_debug > 2)
 818                printk("%s: LANCE open after %d ticks, init block %#x csr0 %4.4x.\n",
 819                           dev->name, i, (u32) isa_virt_to_bus(&lp->init_block), inw(ioaddr+LANCE_DATA));
 820
 821        return 0;                                       /* Always succeed */
 822}
 823
 824/* The LANCE has been halted for one reason or another (busmaster memory
 825   arbitration error, Tx FIFO underflow, driver stopped it to reconfigure,
 826   etc.).  Modern LANCE variants always reload their ring-buffer
 827   configuration when restarted, so we must reinitialize our ring
 828   context before restarting.  As part of this reinitialization,
 829   find all packets still on the Tx ring and pretend that they had been
 830   sent (in effect, drop the packets on the floor) - the higher-level
 831   protocols will time out and retransmit.  It'd be better to shuffle
 832   these skbs to a temp list and then actually re-Tx them after
 833   restarting the chip, but I'm too lazy to do so right now.  dplatt@3do.com
 834*/
 835
 836static void
 837lance_purge_ring(struct net_device *dev)
 838{
 839        struct lance_private *lp = dev->ml_priv;
 840        int i;
 841
 842        /* Free all the skbuffs in the Rx and Tx queues. */
 843        for (i = 0; i < RX_RING_SIZE; i++) {
 844                struct sk_buff *skb = lp->rx_skbuff[i];
 845                lp->rx_skbuff[i] = NULL;
 846                lp->rx_ring[i].base = 0;                /* Not owned by LANCE chip. */
 847                if (skb)
 848                        dev_kfree_skb_any(skb);
 849        }
 850        for (i = 0; i < TX_RING_SIZE; i++) {
 851                if (lp->tx_skbuff[i]) {
 852                        dev_kfree_skb_any(lp->tx_skbuff[i]);
 853                        lp->tx_skbuff[i] = NULL;
 854                }
 855        }
 856}
 857
 858
 859/* Initialize the LANCE Rx and Tx rings. */
 860static void
 861lance_init_ring(struct net_device *dev, gfp_t gfp)
 862{
 863        struct lance_private *lp = dev->ml_priv;
 864        int i;
 865
 866        lp->cur_rx = lp->cur_tx = 0;
 867        lp->dirty_rx = lp->dirty_tx = 0;
 868
 869        for (i = 0; i < RX_RING_SIZE; i++) {
 870                struct sk_buff *skb;
 871                void *rx_buff;
 872
 873                skb = alloc_skb(PKT_BUF_SZ, GFP_DMA | gfp);
 874                lp->rx_skbuff[i] = skb;
 875                if (skb)
 876                        rx_buff = skb->data;
 877                else
 878                        rx_buff = kmalloc(PKT_BUF_SZ, GFP_DMA | gfp);
 879                if (rx_buff == NULL)
 880                        lp->rx_ring[i].base = 0;
 881                else
 882                        lp->rx_ring[i].base = (u32)isa_virt_to_bus(rx_buff) | 0x80000000;
 883                lp->rx_ring[i].buf_length = -PKT_BUF_SZ;
 884        }
 885        /* The Tx buffer address is filled in as needed, but we do need to clear
 886           the upper ownership bit. */
 887        for (i = 0; i < TX_RING_SIZE; i++) {
 888                lp->tx_skbuff[i] = NULL;
 889                lp->tx_ring[i].base = 0;
 890        }
 891
 892        lp->init_block.mode = 0x0000;
 893        for (i = 0; i < 6; i++)
 894                lp->init_block.phys_addr[i] = dev->dev_addr[i];
 895        lp->init_block.filter[0] = 0x00000000;
 896        lp->init_block.filter[1] = 0x00000000;
 897        lp->init_block.rx_ring = ((u32)isa_virt_to_bus(lp->rx_ring) & 0xffffff) | RX_RING_LEN_BITS;
 898        lp->init_block.tx_ring = ((u32)isa_virt_to_bus(lp->tx_ring) & 0xffffff) | TX_RING_LEN_BITS;
 899}
 900
 901static void
 902lance_restart(struct net_device *dev, unsigned int csr0_bits, int must_reinit)
 903{
 904        struct lance_private *lp = dev->ml_priv;
 905
 906        if (must_reinit ||
 907                (chip_table[lp->chip_version].flags & LANCE_MUST_REINIT_RING)) {
 908                lance_purge_ring(dev);
 909                lance_init_ring(dev, GFP_ATOMIC);
 910        }
 911        outw(0x0000,    dev->base_addr + LANCE_ADDR);
 912        outw(csr0_bits, dev->base_addr + LANCE_DATA);
 913}
 914
 915
 916static void lance_tx_timeout (struct net_device *dev)
 917{
 918        struct lance_private *lp = (struct lance_private *) dev->ml_priv;
 919        int ioaddr = dev->base_addr;
 920
 921        outw (0, ioaddr + LANCE_ADDR);
 922        printk ("%s: transmit timed out, status %4.4x, resetting.\n",
 923                dev->name, inw (ioaddr + LANCE_DATA));
 924        outw (0x0004, ioaddr + LANCE_DATA);
 925        dev->stats.tx_errors++;
 926#ifndef final_version
 927        if (lance_debug > 3) {
 928                int i;
 929                printk (" Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.",
 930                  lp->dirty_tx, lp->cur_tx, netif_queue_stopped(dev) ? " (full)" : "",
 931                        lp->cur_rx);
 932                for (i = 0; i < RX_RING_SIZE; i++)
 933                        printk ("%s %08x %04x %04x", i & 0x3 ? "" : "\n ",
 934                         lp->rx_ring[i].base, -lp->rx_ring[i].buf_length,
 935                                lp->rx_ring[i].msg_length);
 936                for (i = 0; i < TX_RING_SIZE; i++)
 937                        printk ("%s %08x %04x %04x", i & 0x3 ? "" : "\n ",
 938                             lp->tx_ring[i].base, -lp->tx_ring[i].length,
 939                                lp->tx_ring[i].misc);
 940                printk ("\n");
 941        }
 942#endif
 943        lance_restart (dev, 0x0043, 1);
 944
 945        netif_trans_update(dev); /* prevent tx timeout */
 946        netif_wake_queue (dev);
 947}
 948
 949
 950static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
 951                                    struct net_device *dev)
 952{
 953        struct lance_private *lp = dev->ml_priv;
 954        int ioaddr = dev->base_addr;
 955        int entry;
 956        unsigned long flags;
 957
 958        spin_lock_irqsave(&lp->devlock, flags);
 959
 960        if (lance_debug > 3) {
 961                outw(0x0000, ioaddr+LANCE_ADDR);
 962                printk("%s: lance_start_xmit() called, csr0 %4.4x.\n", dev->name,
 963                           inw(ioaddr+LANCE_DATA));
 964                outw(0x0000, ioaddr+LANCE_DATA);
 965        }
 966
 967        /* Fill in a Tx ring entry */
 968
 969        /* Mask to ring buffer boundary. */
 970        entry = lp->cur_tx & TX_RING_MOD_MASK;
 971
 972        /* Caution: the write order is important here, set the base address
 973           with the "ownership" bits last. */
 974
 975        /* The old LANCE chips doesn't automatically pad buffers to min. size. */
 976        if (chip_table[lp->chip_version].flags & LANCE_MUST_PAD) {
 977                if (skb->len < ETH_ZLEN) {
 978                        if (skb_padto(skb, ETH_ZLEN))
 979                                goto out;
 980                        lp->tx_ring[entry].length = -ETH_ZLEN;
 981                }
 982                else
 983                        lp->tx_ring[entry].length = -skb->len;
 984        } else
 985                lp->tx_ring[entry].length = -skb->len;
 986
 987        lp->tx_ring[entry].misc = 0x0000;
 988
 989        dev->stats.tx_bytes += skb->len;
 990
 991        /* If any part of this buffer is >16M we must copy it to a low-memory
 992           buffer. */
 993        if ((u32)isa_virt_to_bus(skb->data) + skb->len > 0x01000000) {
 994                if (lance_debug > 5)
 995                        printk("%s: bouncing a high-memory packet (%#x).\n",
 996                                   dev->name, (u32)isa_virt_to_bus(skb->data));
 997                skb_copy_from_linear_data(skb, &lp->tx_bounce_buffs[entry], skb->len);
 998                lp->tx_ring[entry].base =
 999                        ((u32)isa_virt_to_bus((lp->tx_bounce_buffs + entry)) & 0xffffff) | 0x83000000;
1000                dev_kfree_skb(skb);
1001        } else {
1002                lp->tx_skbuff[entry] = skb;
1003                lp->tx_ring[entry].base = ((u32)isa_virt_to_bus(skb->data) & 0xffffff) | 0x83000000;
1004        }
1005        lp->cur_tx++;
1006
1007        /* Trigger an immediate send poll. */
1008        outw(0x0000, ioaddr+LANCE_ADDR);
1009        outw(0x0048, ioaddr+LANCE_DATA);
1010
1011        if ((lp->cur_tx - lp->dirty_tx) >= TX_RING_SIZE)
1012                netif_stop_queue(dev);
1013
1014out:
1015        spin_unlock_irqrestore(&lp->devlock, flags);
1016        return NETDEV_TX_OK;
1017}
1018
1019/* The LANCE interrupt handler. */
1020static irqreturn_t lance_interrupt(int irq, void *dev_id)
1021{
1022        struct net_device *dev = dev_id;
1023        struct lance_private *lp;
1024        int csr0, ioaddr, boguscnt=10;
1025        int must_restart;
1026
1027        ioaddr = dev->base_addr;
1028        lp = dev->ml_priv;
1029
1030        spin_lock (&lp->devlock);
1031
1032        outw(0x00, dev->base_addr + LANCE_ADDR);
1033        while ((csr0 = inw(dev->base_addr + LANCE_DATA)) & 0x8600 &&
1034               --boguscnt >= 0) {
1035                /* Acknowledge all of the current interrupt sources ASAP. */
1036                outw(csr0 & ~0x004f, dev->base_addr + LANCE_DATA);
1037
1038                must_restart = 0;
1039
1040                if (lance_debug > 5)
1041                        printk("%s: interrupt  csr0=%#2.2x new csr=%#2.2x.\n",
1042                                   dev->name, csr0, inw(dev->base_addr + LANCE_DATA));
1043
1044                if (csr0 & 0x0400)                      /* Rx interrupt */
1045                        lance_rx(dev);
1046
1047                if (csr0 & 0x0200) {            /* Tx-done interrupt */
1048                        int dirty_tx = lp->dirty_tx;
1049
1050                        while (dirty_tx < lp->cur_tx) {
1051                                int entry = dirty_tx & TX_RING_MOD_MASK;
1052                                int status = lp->tx_ring[entry].base;
1053
1054                                if (status < 0)
1055                                        break;                  /* It still hasn't been Txed */
1056
1057                                lp->tx_ring[entry].base = 0;
1058
1059                                if (status & 0x40000000) {
1060                                        /* There was an major error, log it. */
1061                                        int err_status = lp->tx_ring[entry].misc;
1062                                        dev->stats.tx_errors++;
1063                                        if (err_status & 0x0400)
1064                                                dev->stats.tx_aborted_errors++;
1065                                        if (err_status & 0x0800)
1066                                                dev->stats.tx_carrier_errors++;
1067                                        if (err_status & 0x1000)
1068                                                dev->stats.tx_window_errors++;
1069                                        if (err_status & 0x4000) {
1070                                                /* Ackk!  On FIFO errors the Tx unit is turned off! */
1071                                                dev->stats.tx_fifo_errors++;
1072                                                /* Remove this verbosity later! */
1073                                                printk("%s: Tx FIFO error! Status %4.4x.\n",
1074                                                           dev->name, csr0);
1075                                                /* Restart the chip. */
1076                                                must_restart = 1;
1077                                        }
1078                                } else {
1079                                        if (status & 0x18000000)
1080                                                dev->stats.collisions++;
1081                                        dev->stats.tx_packets++;
1082                                }
1083
1084                                /* We must free the original skb if it's not a data-only copy
1085                                   in the bounce buffer. */
1086                                if (lp->tx_skbuff[entry]) {
1087                                        dev_kfree_skb_irq(lp->tx_skbuff[entry]);
1088                                        lp->tx_skbuff[entry] = NULL;
1089                                }
1090                                dirty_tx++;
1091                        }
1092
1093#ifndef final_version
1094                        if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) {
1095                                printk("out-of-sync dirty pointer, %d vs. %d, full=%s.\n",
1096                                           dirty_tx, lp->cur_tx,
1097                                           netif_queue_stopped(dev) ? "yes" : "no");
1098                                dirty_tx += TX_RING_SIZE;
1099                        }
1100#endif
1101
1102                        /* if the ring is no longer full, accept more packets */
1103                        if (netif_queue_stopped(dev) &&
1104                            dirty_tx > lp->cur_tx - TX_RING_SIZE + 2)
1105                                netif_wake_queue (dev);
1106
1107                        lp->dirty_tx = dirty_tx;
1108                }
1109
1110                /* Log misc errors. */
1111                if (csr0 & 0x4000)
1112                        dev->stats.tx_errors++; /* Tx babble. */
1113                if (csr0 & 0x1000)
1114                        dev->stats.rx_errors++; /* Missed a Rx frame. */
1115                if (csr0 & 0x0800) {
1116                        printk("%s: Bus master arbitration failure, status %4.4x.\n",
1117                                   dev->name, csr0);
1118                        /* Restart the chip. */
1119                        must_restart = 1;
1120                }
1121
1122                if (must_restart) {
1123                        /* stop the chip to clear the error condition, then restart */
1124                        outw(0x0000, dev->base_addr + LANCE_ADDR);
1125                        outw(0x0004, dev->base_addr + LANCE_DATA);
1126                        lance_restart(dev, 0x0002, 0);
1127                }
1128        }
1129
1130        /* Clear any other interrupt, and set interrupt enable. */
1131        outw(0x0000, dev->base_addr + LANCE_ADDR);
1132        outw(0x7940, dev->base_addr + LANCE_DATA);
1133
1134        if (lance_debug > 4)
1135                printk("%s: exiting interrupt, csr%d=%#4.4x.\n",
1136                           dev->name, inw(ioaddr + LANCE_ADDR),
1137                           inw(dev->base_addr + LANCE_DATA));
1138
1139        spin_unlock (&lp->devlock);
1140        return IRQ_HANDLED;
1141}
1142
1143static int
1144lance_rx(struct net_device *dev)
1145{
1146        struct lance_private *lp = dev->ml_priv;
1147        int entry = lp->cur_rx & RX_RING_MOD_MASK;
1148        int i;
1149
1150        /* If we own the next entry, it's a new packet. Send it up. */
1151        while (lp->rx_ring[entry].base >= 0) {
1152                int status = lp->rx_ring[entry].base >> 24;
1153
1154                if (status != 0x03) {                   /* There was an error. */
1155                        /* There is a tricky error noted by John Murphy,
1156                           <murf@perftech.com> to Russ Nelson: Even with full-sized
1157                           buffers it's possible for a jabber packet to use two
1158                           buffers, with only the last correctly noting the error. */
1159                        if (status & 0x01)      /* Only count a general error at the */
1160                                dev->stats.rx_errors++; /* end of a packet.*/
1161                        if (status & 0x20)
1162                                dev->stats.rx_frame_errors++;
1163                        if (status & 0x10)
1164                                dev->stats.rx_over_errors++;
1165                        if (status & 0x08)
1166                                dev->stats.rx_crc_errors++;
1167                        if (status & 0x04)
1168                                dev->stats.rx_fifo_errors++;
1169                        lp->rx_ring[entry].base &= 0x03ffffff;
1170                }
1171                else
1172                {
1173                        /* Malloc up new buffer, compatible with net3. */
1174                        short pkt_len = (lp->rx_ring[entry].msg_length & 0xfff)-4;
1175                        struct sk_buff *skb;
1176
1177                        if(pkt_len<60)
1178                        {
1179                                printk("%s: Runt packet!\n",dev->name);
1180                                dev->stats.rx_errors++;
1181                        }
1182                        else
1183                        {
1184                                skb = dev_alloc_skb(pkt_len+2);
1185                                if (skb == NULL)
1186                                {
1187                                        printk("%s: Memory squeeze, deferring packet.\n", dev->name);
1188                                        for (i=0; i < RX_RING_SIZE; i++)
1189                                                if (lp->rx_ring[(entry+i) & RX_RING_MOD_MASK].base < 0)
1190                                                        break;
1191
1192                                        if (i > RX_RING_SIZE -2)
1193                                        {
1194                                                dev->stats.rx_dropped++;
1195                                                lp->rx_ring[entry].base |= 0x80000000;
1196                                                lp->cur_rx++;
1197                                        }
1198                                        break;
1199                                }
1200                                skb_reserve(skb,2);     /* 16 byte align */
1201                                skb_put(skb,pkt_len);   /* Make room */
1202                                skb_copy_to_linear_data(skb,
1203                                        (unsigned char *)isa_bus_to_virt((lp->rx_ring[entry].base & 0x00ffffff)),
1204                                        pkt_len);
1205                                skb->protocol=eth_type_trans(skb,dev);
1206                                netif_rx(skb);
1207                                dev->stats.rx_packets++;
1208                                dev->stats.rx_bytes += pkt_len;
1209                        }
1210                }
1211                /* The docs say that the buffer length isn't touched, but Andrew Boyd
1212                   of QNX reports that some revs of the 79C965 clear it. */
1213                lp->rx_ring[entry].buf_length = -PKT_BUF_SZ;
1214                lp->rx_ring[entry].base |= 0x80000000;
1215                entry = (++lp->cur_rx) & RX_RING_MOD_MASK;
1216        }
1217
1218        /* We should check that at least two ring entries are free.      If not,
1219           we should free one and mark stats->rx_dropped++. */
1220
1221        return 0;
1222}
1223
1224static int
1225lance_close(struct net_device *dev)
1226{
1227        int ioaddr = dev->base_addr;
1228        struct lance_private *lp = dev->ml_priv;
1229
1230        netif_stop_queue (dev);
1231
1232        if (chip_table[lp->chip_version].flags & LANCE_HAS_MISSED_FRAME) {
1233                outw(112, ioaddr+LANCE_ADDR);
1234                dev->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA);
1235        }
1236        outw(0, ioaddr+LANCE_ADDR);
1237
1238        if (lance_debug > 1)
1239                printk("%s: Shutting down ethercard, status was %2.2x.\n",
1240                           dev->name, inw(ioaddr+LANCE_DATA));
1241
1242        /* We stop the LANCE here -- it occasionally polls
1243           memory if we don't. */
1244        outw(0x0004, ioaddr+LANCE_DATA);
1245
1246        if (dev->dma != 4)
1247        {
1248                unsigned long flags=claim_dma_lock();
1249                disable_dma(dev->dma);
1250                release_dma_lock(flags);
1251        }
1252        free_irq(dev->irq, dev);
1253
1254        lance_purge_ring(dev);
1255
1256        return 0;
1257}
1258
1259static struct net_device_stats *lance_get_stats(struct net_device *dev)
1260{
1261        struct lance_private *lp = dev->ml_priv;
1262
1263        if (chip_table[lp->chip_version].flags & LANCE_HAS_MISSED_FRAME) {
1264                short ioaddr = dev->base_addr;
1265                short saved_addr;
1266                unsigned long flags;
1267
1268                spin_lock_irqsave(&lp->devlock, flags);
1269                saved_addr = inw(ioaddr+LANCE_ADDR);
1270                outw(112, ioaddr+LANCE_ADDR);
1271                dev->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA);
1272                outw(saved_addr, ioaddr+LANCE_ADDR);
1273                spin_unlock_irqrestore(&lp->devlock, flags);
1274        }
1275
1276        return &dev->stats;
1277}
1278
1279/* Set or clear the multicast filter for this adaptor.
1280 */
1281
1282static void set_multicast_list(struct net_device *dev)
1283{
1284        short ioaddr = dev->base_addr;
1285
1286        outw(0, ioaddr+LANCE_ADDR);
1287        outw(0x0004, ioaddr+LANCE_DATA); /* Temporarily stop the lance.  */
1288
1289        if (dev->flags&IFF_PROMISC) {
1290                outw(15, ioaddr+LANCE_ADDR);
1291                outw(0x8000, ioaddr+LANCE_DATA); /* Set promiscuous mode */
1292        } else {
1293                short multicast_table[4];
1294                int i;
1295                int num_addrs=netdev_mc_count(dev);
1296                if(dev->flags&IFF_ALLMULTI)
1297                        num_addrs=1;
1298                /* FIXIT: We don't use the multicast table, but rely on upper-layer filtering. */
1299                memset(multicast_table, (num_addrs == 0) ? 0 : -1, sizeof(multicast_table));
1300                for (i = 0; i < 4; i++) {
1301                        outw(8 + i, ioaddr+LANCE_ADDR);
1302                        outw(multicast_table[i], ioaddr+LANCE_DATA);
1303                }
1304                outw(15, ioaddr+LANCE_ADDR);
1305                outw(0x0000, ioaddr+LANCE_DATA); /* Unset promiscuous mode */
1306        }
1307
1308        lance_restart(dev, 0x0142, 0); /*  Resume normal operation */
1309
1310}
1311
1312