linux/drivers/net/ethernet/amd/lance.c
<<
>>
Prefs
   1/* lance.c: An AMD LANCE/PCnet ethernet driver for Linux. */
   2/*
   3        Written/copyright 1993-1998 by Donald Becker.
   4
   5        Copyright 1993 United States Government as represented by the
   6        Director, National Security Agency.
   7        This software may be used and distributed according to the terms
   8        of the GNU General Public License, incorporated herein by reference.
   9
  10        This driver is for the Allied Telesis AT1500 and HP J2405A, and should work
  11        with most other LANCE-based bus-master (NE2100/NE2500) ethercards.
  12
  13        The author may be reached as becker@scyld.com, or C/O
  14        Scyld Computing Corporation
  15        410 Severn Ave., Suite 210
  16        Annapolis MD 21403
  17
  18        Andrey V. Savochkin:
  19        - alignment problem with 1.3.* kernel and some minor changes.
  20        Thomas Bogendoerfer (tsbogend@bigbug.franken.de):
  21        - added support for Linux/Alpha, but removed most of it, because
  22        it worked only for the PCI chip.
  23      - added hook for the 32bit lance driver
  24      - added PCnetPCI II (79C970A) to chip table
  25        Paul Gortmaker (gpg109@rsphy1.anu.edu.au):
  26        - hopefully fix above so Linux/Alpha can use ISA cards too.
  27    8/20/96 Fixed 7990 autoIRQ failure and reversed unneeded alignment -djb
  28    v1.12 10/27/97 Module support -djb
  29    v1.14  2/3/98 Module support modified, made PCI support optional -djb
  30    v1.15 5/27/99 Fixed bug in the cleanup_module(). dev->priv was freed
  31                  before unregister_netdev() which caused NULL pointer
  32                  reference later in the chain (in rtnetlink_fill_ifinfo())
  33                  -- Mika Kuoppala <miku@iki.fi>
  34
  35    Forward ported v1.14 to 2.1.129, merged the PCI and misc changes from
  36    the 2.1 version of the old driver - Alan Cox
  37
  38    Get rid of check_region, check kmalloc return in lance_probe1
  39    Arnaldo Carvalho de Melo <acme@conectiva.com.br> - 11/01/2001
  40
  41        Reworked detection, added support for Racal InterLan EtherBlaster cards
  42        Vesselin Kostadinov <vesok at yahoo dot com > - 22/4/2004
  43*/
  44
  45static const char version[] = "lance.c:v1.16 2006/11/09 dplatt@3do.com, becker@cesdis.gsfc.nasa.gov\n";
  46
  47#include <linux/module.h>
  48#include <linux/kernel.h>
  49#include <linux/string.h>
  50#include <linux/delay.h>
  51#include <linux/errno.h>
  52#include <linux/ioport.h>
  53#include <linux/slab.h>
  54#include <linux/interrupt.h>
  55#include <linux/pci.h>
  56#include <linux/init.h>
  57#include <linux/netdevice.h>
  58#include <linux/etherdevice.h>
  59#include <linux/skbuff.h>
  60#include <linux/mm.h>
  61#include <linux/bitops.h>
  62
  63#include <asm/io.h>
  64#include <asm/dma.h>
  65
  66static unsigned int lance_portlist[] __initdata = { 0x300, 0x320, 0x340, 0x360, 0};
  67static int lance_probe1(struct net_device *dev, int ioaddr, int irq, int options);
  68static int __init do_lance_probe(struct net_device *dev);
  69
  70
  71static struct card {
  72        char id_offset14;
  73        char id_offset15;
  74} cards[] = {
  75        {       //"normal"
  76                .id_offset14 = 0x57,
  77                .id_offset15 = 0x57,
  78        },
  79        {       //NI6510EB
  80                .id_offset14 = 0x52,
  81                .id_offset15 = 0x44,
  82        },
  83        {       //Racal InterLan EtherBlaster
  84                .id_offset14 = 0x52,
  85                .id_offset15 = 0x49,
  86        },
  87};
  88#define NUM_CARDS 3
  89
  90#ifdef LANCE_DEBUG
  91static int lance_debug = LANCE_DEBUG;
  92#else
  93static int lance_debug = 1;
  94#endif
  95
  96/*
  97                                Theory of Operation
  98
  99I. Board Compatibility
 100
 101This device driver is designed for the AMD 79C960, the "PCnet-ISA
 102single-chip ethernet controller for ISA".  This chip is used in a wide
 103variety of boards from vendors such as Allied Telesis, HP, Kingston,
 104and Boca.  This driver is also intended to work with older AMD 7990
 105designs, such as the NE1500 and NE2100, and newer 79C961.  For convenience,
 106I use the name LANCE to refer to all of the AMD chips, even though it properly
 107refers only to the original 7990.
 108
 109II. Board-specific settings
 110
 111The driver is designed to work the boards that use the faster
 112bus-master mode, rather than in shared memory mode.      (Only older designs
 113have on-board buffer memory needed to support the slower shared memory mode.)
 114
 115Most ISA boards have jumpered settings for the I/O base, IRQ line, and DMA
 116channel.  This driver probes the likely base addresses:
 117{0x300, 0x320, 0x340, 0x360}.
 118After the board is found it generates a DMA-timeout interrupt and uses
 119autoIRQ to find the IRQ line.  The DMA channel can be set with the low bits
 120of the otherwise-unused dev->mem_start value (aka PARAM1).  If unset it is
 121probed for by enabling each free DMA channel in turn and checking if
 122initialization succeeds.
 123
 124The HP-J2405A board is an exception: with this board it is easy to read the
 125EEPROM-set values for the base, IRQ, and DMA.  (Of course you must already
 126_know_ the base address -- that field is for writing the EEPROM.)
 127
 128III. Driver operation
 129
 130IIIa. Ring buffers
 131The LANCE uses ring buffers of Tx and Rx descriptors.  Each entry describes
 132the base and length of the data buffer, along with status bits.  The length
 133of these buffers is set by LANCE_LOG_{RX,TX}_BUFFERS, which is log_2() of
 134the buffer length (rather than being directly the buffer length) for
 135implementation ease.  The current values are 2 (Tx) and 4 (Rx), which leads to
 136ring sizes of 4 (Tx) and 16 (Rx).  Increasing the number of ring entries
 137needlessly uses extra space and reduces the chance that an upper layer will
 138be able to reorder queued Tx packets based on priority.  Decreasing the number
 139of entries makes it more difficult to achieve back-to-back packet transmission
 140and increases the chance that Rx ring will overflow.  (Consider the worst case
 141of receiving back-to-back minimum-sized packets.)
 142
 143The LANCE has the capability to "chain" both Rx and Tx buffers, but this driver
 144statically allocates full-sized (slightly oversized -- PKT_BUF_SZ) buffers to
 145avoid the administrative overhead. For the Rx side this avoids dynamically
 146allocating full-sized buffers "just in case", at the expense of a
 147memory-to-memory data copy for each packet received.  For most systems this
 148is a good tradeoff: the Rx buffer will always be in low memory, the copy
 149is inexpensive, and it primes the cache for later packet processing.  For Tx
 150the buffers are only used when needed as low-memory bounce buffers.
 151
 152IIIB. 16M memory limitations.
 153For the ISA bus master mode all structures used directly by the LANCE,
 154the initialization block, Rx and Tx rings, and data buffers, must be
 155accessible from the ISA bus, i.e. in the lower 16M of real memory.
 156This is a problem for current Linux kernels on >16M machines. The network
 157devices are initialized after memory initialization, and the kernel doles out
 158memory from the top of memory downward.  The current solution is to have a
 159special network initialization routine that's called before memory
 160initialization; this will eventually be generalized for all network devices.
 161As mentioned before, low-memory "bounce-buffers" are used when needed.
 162
 163IIIC. Synchronization
 164The driver runs as two independent, single-threaded flows of control.  One
 165is the send-packet routine, which enforces single-threaded use by the
 166dev->tbusy flag.  The other thread is the interrupt handler, which is single
 167threaded by the hardware and other software.
 168
 169The send packet thread has partial control over the Tx ring and 'dev->tbusy'
 170flag.  It sets the tbusy flag whenever it's queuing a Tx packet. If the next
 171queue slot is empty, it clears the tbusy flag when finished otherwise it sets
 172the 'lp->tx_full' flag.
 173
 174The interrupt handler has exclusive control over the Rx ring and records stats
 175from the Tx ring. (The Tx-done interrupt can't be selectively turned off, so
 176we can't avoid the interrupt overhead by having the Tx routine reap the Tx
 177stats.)  After reaping the stats, it marks the queue entry as empty by setting
 178the 'base' to zero. Iff the 'lp->tx_full' flag is set, it clears both the
 179tx_full and tbusy flags.
 180
 181*/
 182
 183/* Set the number of Tx and Rx buffers, using Log_2(# buffers).
 184   Reasonable default values are 16 Tx buffers, and 16 Rx buffers.
 185   That translates to 4 and 4 (16 == 2^^4).
 186   This is a compile-time option for efficiency.
 187   */
 188#ifndef LANCE_LOG_TX_BUFFERS
 189#define LANCE_LOG_TX_BUFFERS 4
 190#define LANCE_LOG_RX_BUFFERS 4
 191#endif
 192
 193#define TX_RING_SIZE                    (1 << (LANCE_LOG_TX_BUFFERS))
 194#define TX_RING_MOD_MASK                (TX_RING_SIZE - 1)
 195#define TX_RING_LEN_BITS                ((LANCE_LOG_TX_BUFFERS) << 29)
 196
 197#define RX_RING_SIZE                    (1 << (LANCE_LOG_RX_BUFFERS))
 198#define RX_RING_MOD_MASK                (RX_RING_SIZE - 1)
 199#define RX_RING_LEN_BITS                ((LANCE_LOG_RX_BUFFERS) << 29)
 200
 201#define PKT_BUF_SZ              1544
 202
 203/* Offsets from base I/O address. */
 204#define LANCE_DATA 0x10
 205#define LANCE_ADDR 0x12
 206#define LANCE_RESET 0x14
 207#define LANCE_BUS_IF 0x16
 208#define LANCE_TOTAL_SIZE 0x18
 209
 210#define TX_TIMEOUT      (HZ/5)
 211
 212/* The LANCE Rx and Tx ring descriptors. */
 213struct lance_rx_head {
 214        s32 base;
 215        s16 buf_length;                 /* This length is 2s complement (negative)! */
 216        s16 msg_length;                 /* This length is "normal". */
 217};
 218
 219struct lance_tx_head {
 220        s32 base;
 221        s16 length;                             /* Length is 2s complement (negative)! */
 222        s16 misc;
 223};
 224
 225/* The LANCE initialization block, described in databook. */
 226struct lance_init_block {
 227        u16 mode;               /* Pre-set mode (reg. 15) */
 228        u8  phys_addr[6]; /* Physical ethernet address */
 229        u32 filter[2];                  /* Multicast filter (unused). */
 230        /* Receive and transmit ring base, along with extra bits. */
 231        u32  rx_ring;                   /* Tx and Rx ring base pointers */
 232        u32  tx_ring;
 233};
 234
 235struct lance_private {
 236        /* The Tx and Rx ring entries must be aligned on 8-byte boundaries. */
 237        struct lance_rx_head rx_ring[RX_RING_SIZE];
 238        struct lance_tx_head tx_ring[TX_RING_SIZE];
 239        struct lance_init_block init_block;
 240        const char *name;
 241        /* The saved address of a sent-in-place packet/buffer, for skfree(). */
 242        struct sk_buff* tx_skbuff[TX_RING_SIZE];
 243        /* The addresses of receive-in-place skbuffs. */
 244        struct sk_buff* rx_skbuff[RX_RING_SIZE];
 245        unsigned long rx_buffs;         /* Address of Rx and Tx buffers. */
 246        /* Tx low-memory "bounce buffer" address. */
 247        char (*tx_bounce_buffs)[PKT_BUF_SZ];
 248        int cur_rx, cur_tx;                     /* The next free ring entry */
 249        int dirty_rx, dirty_tx;         /* The ring entries to be free()ed. */
 250        int dma;
 251        unsigned char chip_version;     /* See lance_chip_type. */
 252        spinlock_t devlock;
 253};
 254
 255#define LANCE_MUST_PAD          0x00000001
 256#define LANCE_ENABLE_AUTOSELECT 0x00000002
 257#define LANCE_MUST_REINIT_RING  0x00000004
 258#define LANCE_MUST_UNRESET      0x00000008
 259#define LANCE_HAS_MISSED_FRAME  0x00000010
 260
 261/* A mapping from the chip ID number to the part number and features.
 262   These are from the datasheets -- in real life the '970 version
 263   reportedly has the same ID as the '965. */
 264static struct lance_chip_type {
 265        int id_number;
 266        const char *name;
 267        int flags;
 268} chip_table[] = {
 269        {0x0000, "LANCE 7990",                          /* Ancient lance chip.  */
 270                LANCE_MUST_PAD + LANCE_MUST_UNRESET},
 271        {0x0003, "PCnet/ISA 79C960",            /* 79C960 PCnet/ISA.  */
 272                LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
 273                        LANCE_HAS_MISSED_FRAME},
 274        {0x2260, "PCnet/ISA+ 79C961",           /* 79C961 PCnet/ISA+, Plug-n-Play.  */
 275                LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
 276                        LANCE_HAS_MISSED_FRAME},
 277        {0x2420, "PCnet/PCI 79C970",            /* 79C970 or 79C974 PCnet-SCSI, PCI. */
 278                LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
 279                        LANCE_HAS_MISSED_FRAME},
 280        /* Bug: the PCnet/PCI actually uses the PCnet/VLB ID number, so just call
 281                it the PCnet32. */
 282        {0x2430, "PCnet32",                                     /* 79C965 PCnet for VL bus. */
 283                LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
 284                        LANCE_HAS_MISSED_FRAME},
 285        {0x2621, "PCnet/PCI-II 79C970A",        /* 79C970A PCInetPCI II. */
 286                LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
 287                        LANCE_HAS_MISSED_FRAME},
 288        {0x0,    "PCnet (unknown)",
 289                LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
 290                        LANCE_HAS_MISSED_FRAME},
 291};
 292
 293enum {OLD_LANCE = 0, PCNET_ISA=1, PCNET_ISAP=2, PCNET_PCI=3, PCNET_VLB=4, PCNET_PCI_II=5, LANCE_UNKNOWN=6};
 294
 295
 296/* Non-zero if lance_probe1() needs to allocate low-memory bounce buffers.
 297   Assume yes until we know the memory size. */
 298static unsigned char lance_need_isa_bounce_buffers = 1;
 299
 300static int lance_open(struct net_device *dev);
 301static void lance_init_ring(struct net_device *dev, gfp_t mode);
 302static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
 303                                    struct net_device *dev);
 304static int lance_rx(struct net_device *dev);
 305static irqreturn_t lance_interrupt(int irq, void *dev_id);
 306static int lance_close(struct net_device *dev);
 307static struct net_device_stats *lance_get_stats(struct net_device *dev);
 308static void set_multicast_list(struct net_device *dev);
 309static void lance_tx_timeout (struct net_device *dev);
 310
 311
 312
 313#ifdef MODULE
 314#define MAX_CARDS               8       /* Max number of interfaces (cards) per module */
 315
 316static struct net_device *dev_lance[MAX_CARDS];
 317static int io[MAX_CARDS];
 318static int dma[MAX_CARDS];
 319static int irq[MAX_CARDS];
 320
 321module_param_array(io, int, NULL, 0);
 322module_param_array(dma, int, NULL, 0);
 323module_param_array(irq, int, NULL, 0);
 324module_param(lance_debug, int, 0);
 325MODULE_PARM_DESC(io, "LANCE/PCnet I/O base address(es),required");
 326MODULE_PARM_DESC(dma, "LANCE/PCnet ISA DMA channel (ignored for some devices)");
 327MODULE_PARM_DESC(irq, "LANCE/PCnet IRQ number (ignored for some devices)");
 328MODULE_PARM_DESC(lance_debug, "LANCE/PCnet debug level (0-7)");
 329
 330int __init init_module(void)
 331{
 332        struct net_device *dev;
 333        int this_dev, found = 0;
 334
 335        for (this_dev = 0; this_dev < MAX_CARDS; this_dev++) {
 336                if (io[this_dev] == 0)  {
 337                        if (this_dev != 0) /* only complain once */
 338                                break;
 339                        printk(KERN_NOTICE "lance.c: Module autoprobing not allowed. Append \"io=0xNNN\" value(s).\n");
 340                        return -EPERM;
 341                }
 342                dev = alloc_etherdev(0);
 343                if (!dev)
 344                        break;
 345                dev->irq = irq[this_dev];
 346                dev->base_addr = io[this_dev];
 347                dev->dma = dma[this_dev];
 348                if (do_lance_probe(dev) == 0) {
 349                        dev_lance[found++] = dev;
 350                        continue;
 351                }
 352                free_netdev(dev);
 353                break;
 354        }
 355        if (found != 0)
 356                return 0;
 357        return -ENXIO;
 358}
 359
 360static void cleanup_card(struct net_device *dev)
 361{
 362        struct lance_private *lp = dev->ml_priv;
 363        if (dev->dma != 4)
 364                free_dma(dev->dma);
 365        release_region(dev->base_addr, LANCE_TOTAL_SIZE);
 366        kfree(lp->tx_bounce_buffs);
 367        kfree((void*)lp->rx_buffs);
 368        kfree(lp);
 369}
 370
 371void __exit cleanup_module(void)
 372{
 373        int this_dev;
 374
 375        for (this_dev = 0; this_dev < MAX_CARDS; this_dev++) {
 376                struct net_device *dev = dev_lance[this_dev];
 377                if (dev) {
 378                        unregister_netdev(dev);
 379                        cleanup_card(dev);
 380                        free_netdev(dev);
 381                }
 382        }
 383}
 384#endif /* MODULE */
 385MODULE_LICENSE("GPL");
 386
 387
 388/* Starting in v2.1.*, the LANCE/PCnet probe is now similar to the other
 389   board probes now that kmalloc() can allocate ISA DMA-able regions.
 390   This also allows the LANCE driver to be used as a module.
 391   */
 392static int __init do_lance_probe(struct net_device *dev)
 393{
 394        unsigned int *port;
 395        int result;
 396
 397        if (high_memory <= phys_to_virt(16*1024*1024))
 398                lance_need_isa_bounce_buffers = 0;
 399
 400        for (port = lance_portlist; *port; port++) {
 401                int ioaddr = *port;
 402                struct resource *r = request_region(ioaddr, LANCE_TOTAL_SIZE,
 403                                                        "lance-probe");
 404
 405                if (r) {
 406                        /* Detect the card with minimal I/O reads */
 407                        char offset14 = inb(ioaddr + 14);
 408                        int card;
 409                        for (card = 0; card < NUM_CARDS; ++card)
 410                                if (cards[card].id_offset14 == offset14)
 411                                        break;
 412                        if (card < NUM_CARDS) {/*yes, the first byte matches*/
 413                                char offset15 = inb(ioaddr + 15);
 414                                for (card = 0; card < NUM_CARDS; ++card)
 415                                        if ((cards[card].id_offset14 == offset14) &&
 416                                                (cards[card].id_offset15 == offset15))
 417                                                break;
 418                        }
 419                        if (card < NUM_CARDS) { /*Signature OK*/
 420                                result = lance_probe1(dev, ioaddr, 0, 0);
 421                                if (!result) {
 422                                        struct lance_private *lp = dev->ml_priv;
 423                                        int ver = lp->chip_version;
 424
 425                                        r->name = chip_table[ver].name;
 426                                        return 0;
 427                                }
 428                        }
 429                        release_region(ioaddr, LANCE_TOTAL_SIZE);
 430                }
 431        }
 432        return -ENODEV;
 433}
 434
 435#ifndef MODULE
 436struct net_device * __init lance_probe(int unit)
 437{
 438        struct net_device *dev = alloc_etherdev(0);
 439        int err;
 440
 441        if (!dev)
 442                return ERR_PTR(-ENODEV);
 443
 444        sprintf(dev->name, "eth%d", unit);
 445        netdev_boot_setup_check(dev);
 446
 447        err = do_lance_probe(dev);
 448        if (err)
 449                goto out;
 450        return dev;
 451out:
 452        free_netdev(dev);
 453        return ERR_PTR(err);
 454}
 455#endif
 456
 457static const struct net_device_ops lance_netdev_ops = {
 458        .ndo_open               = lance_open,
 459        .ndo_start_xmit         = lance_start_xmit,
 460        .ndo_stop               = lance_close,
 461        .ndo_get_stats          = lance_get_stats,
 462        .ndo_set_rx_mode        = set_multicast_list,
 463        .ndo_tx_timeout         = lance_tx_timeout,
 464        .ndo_change_mtu         = eth_change_mtu,
 465        .ndo_set_mac_address    = eth_mac_addr,
 466        .ndo_validate_addr      = eth_validate_addr,
 467};
 468
 469static int __init lance_probe1(struct net_device *dev, int ioaddr, int irq, int options)
 470{
 471        struct lance_private *lp;
 472        unsigned long dma_channels;     /* Mark spuriously-busy DMA channels */
 473        int i, reset_val, lance_version;
 474        const char *chipname;
 475        /* Flags for specific chips or boards. */
 476        unsigned char hpJ2405A = 0;     /* HP ISA adaptor */
 477        int hp_builtin = 0;             /* HP on-board ethernet. */
 478        static int did_version;         /* Already printed version info. */
 479        unsigned long flags;
 480        int err = -ENOMEM;
 481        void __iomem *bios;
 482
 483        /* First we look for special cases.
 484           Check for HP's on-board ethernet by looking for 'HP' in the BIOS.
 485           There are two HP versions, check the BIOS for the configuration port.
 486           This method provided by L. Julliard, Laurent_Julliard@grenoble.hp.com.
 487           */
 488        bios = ioremap(0xf00f0, 0x14);
 489        if (!bios)
 490                return -ENOMEM;
 491        if (readw(bios + 0x12) == 0x5048)  {
 492                static const short ioaddr_table[] = { 0x300, 0x320, 0x340, 0x360};
 493                int hp_port = (readl(bios + 1) & 1)  ? 0x499 : 0x99;
 494                /* We can have boards other than the built-in!  Verify this is on-board. */
 495                if ((inb(hp_port) & 0xc0) == 0x80 &&
 496                    ioaddr_table[inb(hp_port) & 3] == ioaddr)
 497                        hp_builtin = hp_port;
 498        }
 499        iounmap(bios);
 500        /* We also recognize the HP Vectra on-board here, but check below. */
 501        hpJ2405A = (inb(ioaddr) == 0x08 && inb(ioaddr+1) == 0x00 &&
 502                    inb(ioaddr+2) == 0x09);
 503
 504        /* Reset the LANCE.      */
 505        reset_val = inw(ioaddr+LANCE_RESET); /* Reset the LANCE */
 506
 507        /* The Un-Reset needed is only needed for the real NE2100, and will
 508           confuse the HP board. */
 509        if (!hpJ2405A)
 510                outw(reset_val, ioaddr+LANCE_RESET);
 511
 512        outw(0x0000, ioaddr+LANCE_ADDR); /* Switch to window 0 */
 513        if (inw(ioaddr+LANCE_DATA) != 0x0004)
 514                return -ENODEV;
 515
 516        /* Get the version of the chip. */
 517        outw(88, ioaddr+LANCE_ADDR);
 518        if (inw(ioaddr+LANCE_ADDR) != 88) {
 519                lance_version = 0;
 520        } else {                        /* Good, it's a newer chip. */
 521                int chip_version = inw(ioaddr+LANCE_DATA);
 522                outw(89, ioaddr+LANCE_ADDR);
 523                chip_version |= inw(ioaddr+LANCE_DATA) << 16;
 524                if (lance_debug > 2)
 525                        printk("  LANCE chip version is %#x.\n", chip_version);
 526                if ((chip_version & 0xfff) != 0x003)
 527                        return -ENODEV;
 528                chip_version = (chip_version >> 12) & 0xffff;
 529                for (lance_version = 1; chip_table[lance_version].id_number; lance_version++) {
 530                        if (chip_table[lance_version].id_number == chip_version)
 531                                break;
 532                }
 533        }
 534
 535        /* We can't allocate private data from alloc_etherdev() because it must
 536           a ISA DMA-able region. */
 537        chipname = chip_table[lance_version].name;
 538        printk("%s: %s at %#3x, ", dev->name, chipname, ioaddr);
 539
 540        /* There is a 16 byte station address PROM at the base address.
 541           The first six bytes are the station address. */
 542        for (i = 0; i < 6; i++)
 543                dev->dev_addr[i] = inb(ioaddr + i);
 544        printk("%pM", dev->dev_addr);
 545
 546        dev->base_addr = ioaddr;
 547        /* Make certain the data structures used by the LANCE are aligned and DMAble. */
 548
 549        lp = kzalloc(sizeof(*lp), GFP_DMA | GFP_KERNEL);
 550        if (!lp)
 551                return -ENOMEM;
 552        if (lance_debug > 6) printk(" (#0x%05lx)", (unsigned long)lp);
 553        dev->ml_priv = lp;
 554        lp->name = chipname;
 555        lp->rx_buffs = (unsigned long)kmalloc(PKT_BUF_SZ*RX_RING_SIZE,
 556                                                  GFP_DMA | GFP_KERNEL);
 557        if (!lp->rx_buffs)
 558                goto out_lp;
 559        if (lance_need_isa_bounce_buffers) {
 560                lp->tx_bounce_buffs = kmalloc(PKT_BUF_SZ*TX_RING_SIZE,
 561                                                  GFP_DMA | GFP_KERNEL);
 562                if (!lp->tx_bounce_buffs)
 563                        goto out_rx;
 564        } else
 565                lp->tx_bounce_buffs = NULL;
 566
 567        lp->chip_version = lance_version;
 568        spin_lock_init(&lp->devlock);
 569
 570        lp->init_block.mode = 0x0003;           /* Disable Rx and Tx. */
 571        for (i = 0; i < 6; i++)
 572                lp->init_block.phys_addr[i] = dev->dev_addr[i];
 573        lp->init_block.filter[0] = 0x00000000;
 574        lp->init_block.filter[1] = 0x00000000;
 575        lp->init_block.rx_ring = ((u32)isa_virt_to_bus(lp->rx_ring) & 0xffffff) | RX_RING_LEN_BITS;
 576        lp->init_block.tx_ring = ((u32)isa_virt_to_bus(lp->tx_ring) & 0xffffff) | TX_RING_LEN_BITS;
 577
 578        outw(0x0001, ioaddr+LANCE_ADDR);
 579        inw(ioaddr+LANCE_ADDR);
 580        outw((short) (u32) isa_virt_to_bus(&lp->init_block), ioaddr+LANCE_DATA);
 581        outw(0x0002, ioaddr+LANCE_ADDR);
 582        inw(ioaddr+LANCE_ADDR);
 583        outw(((u32)isa_virt_to_bus(&lp->init_block)) >> 16, ioaddr+LANCE_DATA);
 584        outw(0x0000, ioaddr+LANCE_ADDR);
 585        inw(ioaddr+LANCE_ADDR);
 586
 587        if (irq) {                                      /* Set iff PCI card. */
 588                dev->dma = 4;                   /* Native bus-master, no DMA channel needed. */
 589                dev->irq = irq;
 590        } else if (hp_builtin) {
 591                static const char dma_tbl[4] = {3, 5, 6, 0};
 592                static const char irq_tbl[4] = {3, 4, 5, 9};
 593                unsigned char port_val = inb(hp_builtin);
 594                dev->dma = dma_tbl[(port_val >> 4) & 3];
 595                dev->irq = irq_tbl[(port_val >> 2) & 3];
 596                printk(" HP Vectra IRQ %d DMA %d.\n", dev->irq, dev->dma);
 597        } else if (hpJ2405A) {
 598                static const char dma_tbl[4] = {3, 5, 6, 7};
 599                static const char irq_tbl[8] = {3, 4, 5, 9, 10, 11, 12, 15};
 600                short reset_val = inw(ioaddr+LANCE_RESET);
 601                dev->dma = dma_tbl[(reset_val >> 2) & 3];
 602                dev->irq = irq_tbl[(reset_val >> 4) & 7];
 603                printk(" HP J2405A IRQ %d DMA %d.\n", dev->irq, dev->dma);
 604        } else if (lance_version == PCNET_ISAP) {               /* The plug-n-play version. */
 605                short bus_info;
 606                outw(8, ioaddr+LANCE_ADDR);
 607                bus_info = inw(ioaddr+LANCE_BUS_IF);
 608                dev->dma = bus_info & 0x07;
 609                dev->irq = (bus_info >> 4) & 0x0F;
 610        } else {
 611                /* The DMA channel may be passed in PARAM1. */
 612                if (dev->mem_start & 0x07)
 613                        dev->dma = dev->mem_start & 0x07;
 614        }
 615
 616        if (dev->dma == 0) {
 617                /* Read the DMA channel status register, so that we can avoid
 618                   stuck DMA channels in the DMA detection below. */
 619                dma_channels = ((inb(DMA1_STAT_REG) >> 4) & 0x0f) |
 620                        (inb(DMA2_STAT_REG) & 0xf0);
 621        }
 622        err = -ENODEV;
 623        if (dev->irq >= 2)
 624                printk(" assigned IRQ %d", dev->irq);
 625        else if (lance_version != 0)  { /* 7990 boards need DMA detection first. */
 626                unsigned long irq_mask;
 627
 628                /* To auto-IRQ we enable the initialization-done and DMA error
 629                   interrupts. For ISA boards we get a DMA error, but VLB and PCI
 630                   boards will work. */
 631                irq_mask = probe_irq_on();
 632
 633                /* Trigger an initialization just for the interrupt. */
 634                outw(0x0041, ioaddr+LANCE_DATA);
 635
 636                mdelay(20);
 637                dev->irq = probe_irq_off(irq_mask);
 638                if (dev->irq)
 639                        printk(", probed IRQ %d", dev->irq);
 640                else {
 641                        printk(", failed to detect IRQ line.\n");
 642                        goto out_tx;
 643                }
 644
 645                /* Check for the initialization done bit, 0x0100, which means
 646                   that we don't need a DMA channel. */
 647                if (inw(ioaddr+LANCE_DATA) & 0x0100)
 648                        dev->dma = 4;
 649        }
 650
 651        if (dev->dma == 4) {
 652                printk(", no DMA needed.\n");
 653        } else if (dev->dma) {
 654                if (request_dma(dev->dma, chipname)) {
 655                        printk("DMA %d allocation failed.\n", dev->dma);
 656                        goto out_tx;
 657                } else
 658                        printk(", assigned DMA %d.\n", dev->dma);
 659        } else {                        /* OK, we have to auto-DMA. */
 660                for (i = 0; i < 4; i++) {
 661                        static const char dmas[] = { 5, 6, 7, 3 };
 662                        int dma = dmas[i];
 663                        int boguscnt;
 664
 665                        /* Don't enable a permanently busy DMA channel, or the machine
 666                           will hang. */
 667                        if (test_bit(dma, &dma_channels))
 668                                continue;
 669                        outw(0x7f04, ioaddr+LANCE_DATA); /* Clear the memory error bits. */
 670                        if (request_dma(dma, chipname))
 671                                continue;
 672
 673                        flags=claim_dma_lock();
 674                        set_dma_mode(dma, DMA_MODE_CASCADE);
 675                        enable_dma(dma);
 676                        release_dma_lock(flags);
 677
 678                        /* Trigger an initialization. */
 679                        outw(0x0001, ioaddr+LANCE_DATA);
 680                        for (boguscnt = 100; boguscnt > 0; --boguscnt)
 681                                if (inw(ioaddr+LANCE_DATA) & 0x0900)
 682                                        break;
 683                        if (inw(ioaddr+LANCE_DATA) & 0x0100) {
 684                                dev->dma = dma;
 685                                printk(", DMA %d.\n", dev->dma);
 686                                break;
 687                        } else {
 688                                flags=claim_dma_lock();
 689                                disable_dma(dma);
 690                                release_dma_lock(flags);
 691                                free_dma(dma);
 692                        }
 693                }
 694                if (i == 4) {                   /* Failure: bail. */
 695                        printk("DMA detection failed.\n");
 696                        goto out_tx;
 697                }
 698        }
 699
 700        if (lance_version == 0 && dev->irq == 0) {
 701                /* We may auto-IRQ now that we have a DMA channel. */
 702                /* Trigger an initialization just for the interrupt. */
 703                unsigned long irq_mask;
 704
 705                irq_mask = probe_irq_on();
 706                outw(0x0041, ioaddr+LANCE_DATA);
 707
 708                mdelay(40);
 709                dev->irq = probe_irq_off(irq_mask);
 710                if (dev->irq == 0) {
 711                        printk("  Failed to detect the 7990 IRQ line.\n");
 712                        goto out_dma;
 713                }
 714                printk("  Auto-IRQ detected IRQ%d.\n", dev->irq);
 715        }
 716
 717        if (chip_table[lp->chip_version].flags & LANCE_ENABLE_AUTOSELECT) {
 718                /* Turn on auto-select of media (10baseT or BNC) so that the user
 719                   can watch the LEDs even if the board isn't opened. */
 720                outw(0x0002, ioaddr+LANCE_ADDR);
 721                /* Don't touch 10base2 power bit. */
 722                outw(inw(ioaddr+LANCE_BUS_IF) | 0x0002, ioaddr+LANCE_BUS_IF);
 723        }
 724
 725        if (lance_debug > 0  &&  did_version++ == 0)
 726                printk(version);
 727
 728        /* The LANCE-specific entries in the device structure. */
 729        dev->netdev_ops = &lance_netdev_ops;
 730        dev->watchdog_timeo = TX_TIMEOUT;
 731
 732        err = register_netdev(dev);
 733        if (err)
 734                goto out_dma;
 735        return 0;
 736out_dma:
 737        if (dev->dma != 4)
 738                free_dma(dev->dma);
 739out_tx:
 740        kfree(lp->tx_bounce_buffs);
 741out_rx:
 742        kfree((void*)lp->rx_buffs);
 743out_lp:
 744        kfree(lp);
 745        return err;
 746}
 747
 748
 749static int
 750lance_open(struct net_device *dev)
 751{
 752        struct lance_private *lp = dev->ml_priv;
 753        int ioaddr = dev->base_addr;
 754        int i;
 755
 756        if (dev->irq == 0 ||
 757                request_irq(dev->irq, lance_interrupt, 0, dev->name, dev)) {
 758                return -EAGAIN;
 759        }
 760
 761        /* We used to allocate DMA here, but that was silly.
 762           DMA lines can't be shared!  We now permanently allocate them. */
 763
 764        /* Reset the LANCE */
 765        inw(ioaddr+LANCE_RESET);
 766
 767        /* The DMA controller is used as a no-operation slave, "cascade mode". */
 768        if (dev->dma != 4) {
 769                unsigned long flags=claim_dma_lock();
 770                enable_dma(dev->dma);
 771                set_dma_mode(dev->dma, DMA_MODE_CASCADE);
 772                release_dma_lock(flags);
 773        }
 774
 775        /* Un-Reset the LANCE, needed only for the NE2100. */
 776        if (chip_table[lp->chip_version].flags & LANCE_MUST_UNRESET)
 777                outw(0, ioaddr+LANCE_RESET);
 778
 779        if (chip_table[lp->chip_version].flags & LANCE_ENABLE_AUTOSELECT) {
 780                /* This is 79C960-specific: Turn on auto-select of media (AUI, BNC). */
 781                outw(0x0002, ioaddr+LANCE_ADDR);
 782                /* Only touch autoselect bit. */
 783                outw(inw(ioaddr+LANCE_BUS_IF) | 0x0002, ioaddr+LANCE_BUS_IF);
 784        }
 785
 786        if (lance_debug > 1)
 787                printk("%s: lance_open() irq %d dma %d tx/rx rings %#x/%#x init %#x.\n",
 788                           dev->name, dev->irq, dev->dma,
 789                           (u32) isa_virt_to_bus(lp->tx_ring),
 790                           (u32) isa_virt_to_bus(lp->rx_ring),
 791                           (u32) isa_virt_to_bus(&lp->init_block));
 792
 793        lance_init_ring(dev, GFP_KERNEL);
 794        /* Re-initialize the LANCE, and start it when done. */
 795        outw(0x0001, ioaddr+LANCE_ADDR);
 796        outw((short) (u32) isa_virt_to_bus(&lp->init_block), ioaddr+LANCE_DATA);
 797        outw(0x0002, ioaddr+LANCE_ADDR);
 798        outw(((u32)isa_virt_to_bus(&lp->init_block)) >> 16, ioaddr+LANCE_DATA);
 799
 800        outw(0x0004, ioaddr+LANCE_ADDR);
 801        outw(0x0915, ioaddr+LANCE_DATA);
 802
 803        outw(0x0000, ioaddr+LANCE_ADDR);
 804        outw(0x0001, ioaddr+LANCE_DATA);
 805
 806        netif_start_queue (dev);
 807
 808        i = 0;
 809        while (i++ < 100)
 810                if (inw(ioaddr+LANCE_DATA) & 0x0100)
 811                        break;
 812        /*
 813         * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
 814         * reports that doing so triggers a bug in the '974.
 815         */
 816        outw(0x0042, ioaddr+LANCE_DATA);
 817
 818        if (lance_debug > 2)
 819                printk("%s: LANCE open after %d ticks, init block %#x csr0 %4.4x.\n",
 820                           dev->name, i, (u32) isa_virt_to_bus(&lp->init_block), inw(ioaddr+LANCE_DATA));
 821
 822        return 0;                                       /* Always succeed */
 823}
 824
 825/* The LANCE has been halted for one reason or another (busmaster memory
 826   arbitration error, Tx FIFO underflow, driver stopped it to reconfigure,
 827   etc.).  Modern LANCE variants always reload their ring-buffer
 828   configuration when restarted, so we must reinitialize our ring
 829   context before restarting.  As part of this reinitialization,
 830   find all packets still on the Tx ring and pretend that they had been
 831   sent (in effect, drop the packets on the floor) - the higher-level
 832   protocols will time out and retransmit.  It'd be better to shuffle
 833   these skbs to a temp list and then actually re-Tx them after
 834   restarting the chip, but I'm too lazy to do so right now.  dplatt@3do.com
 835*/
 836
 837static void
 838lance_purge_ring(struct net_device *dev)
 839{
 840        struct lance_private *lp = dev->ml_priv;
 841        int i;
 842
 843        /* Free all the skbuffs in the Rx and Tx queues. */
 844        for (i = 0; i < RX_RING_SIZE; i++) {
 845                struct sk_buff *skb = lp->rx_skbuff[i];
 846                lp->rx_skbuff[i] = NULL;
 847                lp->rx_ring[i].base = 0;                /* Not owned by LANCE chip. */
 848                if (skb)
 849                        dev_kfree_skb_any(skb);
 850        }
 851        for (i = 0; i < TX_RING_SIZE; i++) {
 852                if (lp->tx_skbuff[i]) {
 853                        dev_kfree_skb_any(lp->tx_skbuff[i]);
 854                        lp->tx_skbuff[i] = NULL;
 855                }
 856        }
 857}
 858
 859
 860/* Initialize the LANCE Rx and Tx rings. */
 861static void
 862lance_init_ring(struct net_device *dev, gfp_t gfp)
 863{
 864        struct lance_private *lp = dev->ml_priv;
 865        int i;
 866
 867        lp->cur_rx = lp->cur_tx = 0;
 868        lp->dirty_rx = lp->dirty_tx = 0;
 869
 870        for (i = 0; i < RX_RING_SIZE; i++) {
 871                struct sk_buff *skb;
 872                void *rx_buff;
 873
 874                skb = alloc_skb(PKT_BUF_SZ, GFP_DMA | gfp);
 875                lp->rx_skbuff[i] = skb;
 876                if (skb)
 877                        rx_buff = skb->data;
 878                else
 879                        rx_buff = kmalloc(PKT_BUF_SZ, GFP_DMA | gfp);
 880                if (rx_buff == NULL)
 881                        lp->rx_ring[i].base = 0;
 882                else
 883                        lp->rx_ring[i].base = (u32)isa_virt_to_bus(rx_buff) | 0x80000000;
 884                lp->rx_ring[i].buf_length = -PKT_BUF_SZ;
 885        }
 886        /* The Tx buffer address is filled in as needed, but we do need to clear
 887           the upper ownership bit. */
 888        for (i = 0; i < TX_RING_SIZE; i++) {
 889                lp->tx_skbuff[i] = NULL;
 890                lp->tx_ring[i].base = 0;
 891        }
 892
 893        lp->init_block.mode = 0x0000;
 894        for (i = 0; i < 6; i++)
 895                lp->init_block.phys_addr[i] = dev->dev_addr[i];
 896        lp->init_block.filter[0] = 0x00000000;
 897        lp->init_block.filter[1] = 0x00000000;
 898        lp->init_block.rx_ring = ((u32)isa_virt_to_bus(lp->rx_ring) & 0xffffff) | RX_RING_LEN_BITS;
 899        lp->init_block.tx_ring = ((u32)isa_virt_to_bus(lp->tx_ring) & 0xffffff) | TX_RING_LEN_BITS;
 900}
 901
 902static void
 903lance_restart(struct net_device *dev, unsigned int csr0_bits, int must_reinit)
 904{
 905        struct lance_private *lp = dev->ml_priv;
 906
 907        if (must_reinit ||
 908                (chip_table[lp->chip_version].flags & LANCE_MUST_REINIT_RING)) {
 909                lance_purge_ring(dev);
 910                lance_init_ring(dev, GFP_ATOMIC);
 911        }
 912        outw(0x0000,    dev->base_addr + LANCE_ADDR);
 913        outw(csr0_bits, dev->base_addr + LANCE_DATA);
 914}
 915
 916
 917static void lance_tx_timeout (struct net_device *dev)
 918{
 919        struct lance_private *lp = (struct lance_private *) dev->ml_priv;
 920        int ioaddr = dev->base_addr;
 921
 922        outw (0, ioaddr + LANCE_ADDR);
 923        printk ("%s: transmit timed out, status %4.4x, resetting.\n",
 924                dev->name, inw (ioaddr + LANCE_DATA));
 925        outw (0x0004, ioaddr + LANCE_DATA);
 926        dev->stats.tx_errors++;
 927#ifndef final_version
 928        if (lance_debug > 3) {
 929                int i;
 930                printk (" Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.",
 931                  lp->dirty_tx, lp->cur_tx, netif_queue_stopped(dev) ? " (full)" : "",
 932                        lp->cur_rx);
 933                for (i = 0; i < RX_RING_SIZE; i++)
 934                        printk ("%s %08x %04x %04x", i & 0x3 ? "" : "\n ",
 935                         lp->rx_ring[i].base, -lp->rx_ring[i].buf_length,
 936                                lp->rx_ring[i].msg_length);
 937                for (i = 0; i < TX_RING_SIZE; i++)
 938                        printk ("%s %08x %04x %04x", i & 0x3 ? "" : "\n ",
 939                             lp->tx_ring[i].base, -lp->tx_ring[i].length,
 940                                lp->tx_ring[i].misc);
 941                printk ("\n");
 942        }
 943#endif
 944        lance_restart (dev, 0x0043, 1);
 945
 946        netif_trans_update(dev); /* prevent tx timeout */
 947        netif_wake_queue (dev);
 948}
 949
 950
 951static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
 952                                    struct net_device *dev)
 953{
 954        struct lance_private *lp = dev->ml_priv;
 955        int ioaddr = dev->base_addr;
 956        int entry;
 957        unsigned long flags;
 958
 959        spin_lock_irqsave(&lp->devlock, flags);
 960
 961        if (lance_debug > 3) {
 962                outw(0x0000, ioaddr+LANCE_ADDR);
 963                printk("%s: lance_start_xmit() called, csr0 %4.4x.\n", dev->name,
 964                           inw(ioaddr+LANCE_DATA));
 965                outw(0x0000, ioaddr+LANCE_DATA);
 966        }
 967
 968        /* Fill in a Tx ring entry */
 969
 970        /* Mask to ring buffer boundary. */
 971        entry = lp->cur_tx & TX_RING_MOD_MASK;
 972
 973        /* Caution: the write order is important here, set the base address
 974           with the "ownership" bits last. */
 975
 976        /* The old LANCE chips doesn't automatically pad buffers to min. size. */
 977        if (chip_table[lp->chip_version].flags & LANCE_MUST_PAD) {
 978                if (skb->len < ETH_ZLEN) {
 979                        if (skb_padto(skb, ETH_ZLEN))
 980                                goto out;
 981                        lp->tx_ring[entry].length = -ETH_ZLEN;
 982                }
 983                else
 984                        lp->tx_ring[entry].length = -skb->len;
 985        } else
 986                lp->tx_ring[entry].length = -skb->len;
 987
 988        lp->tx_ring[entry].misc = 0x0000;
 989
 990        dev->stats.tx_bytes += skb->len;
 991
 992        /* If any part of this buffer is >16M we must copy it to a low-memory
 993           buffer. */
 994        if ((u32)isa_virt_to_bus(skb->data) + skb->len > 0x01000000) {
 995                if (lance_debug > 5)
 996                        printk("%s: bouncing a high-memory packet (%#x).\n",
 997                                   dev->name, (u32)isa_virt_to_bus(skb->data));
 998                skb_copy_from_linear_data(skb, &lp->tx_bounce_buffs[entry], skb->len);
 999                lp->tx_ring[entry].base =
1000                        ((u32)isa_virt_to_bus((lp->tx_bounce_buffs + entry)) & 0xffffff) | 0x83000000;
1001                dev_kfree_skb(skb);
1002        } else {
1003                lp->tx_skbuff[entry] = skb;
1004                lp->tx_ring[entry].base = ((u32)isa_virt_to_bus(skb->data) & 0xffffff) | 0x83000000;
1005        }
1006        lp->cur_tx++;
1007
1008        /* Trigger an immediate send poll. */
1009        outw(0x0000, ioaddr+LANCE_ADDR);
1010        outw(0x0048, ioaddr+LANCE_DATA);
1011
1012        if ((lp->cur_tx - lp->dirty_tx) >= TX_RING_SIZE)
1013                netif_stop_queue(dev);
1014
1015out:
1016        spin_unlock_irqrestore(&lp->devlock, flags);
1017        return NETDEV_TX_OK;
1018}
1019
1020/* The LANCE interrupt handler. */
1021static irqreturn_t lance_interrupt(int irq, void *dev_id)
1022{
1023        struct net_device *dev = dev_id;
1024        struct lance_private *lp;
1025        int csr0, ioaddr, boguscnt=10;
1026        int must_restart;
1027
1028        ioaddr = dev->base_addr;
1029        lp = dev->ml_priv;
1030
1031        spin_lock (&lp->devlock);
1032
1033        outw(0x00, dev->base_addr + LANCE_ADDR);
1034        while ((csr0 = inw(dev->base_addr + LANCE_DATA)) & 0x8600 &&
1035               --boguscnt >= 0) {
1036                /* Acknowledge all of the current interrupt sources ASAP. */
1037                outw(csr0 & ~0x004f, dev->base_addr + LANCE_DATA);
1038
1039                must_restart = 0;
1040
1041                if (lance_debug > 5)
1042                        printk("%s: interrupt  csr0=%#2.2x new csr=%#2.2x.\n",
1043                                   dev->name, csr0, inw(dev->base_addr + LANCE_DATA));
1044
1045                if (csr0 & 0x0400)                      /* Rx interrupt */
1046                        lance_rx(dev);
1047
1048                if (csr0 & 0x0200) {            /* Tx-done interrupt */
1049                        int dirty_tx = lp->dirty_tx;
1050
1051                        while (dirty_tx < lp->cur_tx) {
1052                                int entry = dirty_tx & TX_RING_MOD_MASK;
1053                                int status = lp->tx_ring[entry].base;
1054
1055                                if (status < 0)
1056                                        break;                  /* It still hasn't been Txed */
1057
1058                                lp->tx_ring[entry].base = 0;
1059
1060                                if (status & 0x40000000) {
1061                                        /* There was an major error, log it. */
1062                                        int err_status = lp->tx_ring[entry].misc;
1063                                        dev->stats.tx_errors++;
1064                                        if (err_status & 0x0400)
1065                                                dev->stats.tx_aborted_errors++;
1066                                        if (err_status & 0x0800)
1067                                                dev->stats.tx_carrier_errors++;
1068                                        if (err_status & 0x1000)
1069                                                dev->stats.tx_window_errors++;
1070                                        if (err_status & 0x4000) {
1071                                                /* Ackk!  On FIFO errors the Tx unit is turned off! */
1072                                                dev->stats.tx_fifo_errors++;
1073                                                /* Remove this verbosity later! */
1074                                                printk("%s: Tx FIFO error! Status %4.4x.\n",
1075                                                           dev->name, csr0);
1076                                                /* Restart the chip. */
1077                                                must_restart = 1;
1078                                        }
1079                                } else {
1080                                        if (status & 0x18000000)
1081                                                dev->stats.collisions++;
1082                                        dev->stats.tx_packets++;
1083                                }
1084
1085                                /* We must free the original skb if it's not a data-only copy
1086                                   in the bounce buffer. */
1087                                if (lp->tx_skbuff[entry]) {
1088                                        dev_kfree_skb_irq(lp->tx_skbuff[entry]);
1089                                        lp->tx_skbuff[entry] = NULL;
1090                                }
1091                                dirty_tx++;
1092                        }
1093
1094#ifndef final_version
1095                        if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) {
1096                                printk("out-of-sync dirty pointer, %d vs. %d, full=%s.\n",
1097                                           dirty_tx, lp->cur_tx,
1098                                           netif_queue_stopped(dev) ? "yes" : "no");
1099                                dirty_tx += TX_RING_SIZE;
1100                        }
1101#endif
1102
1103                        /* if the ring is no longer full, accept more packets */
1104                        if (netif_queue_stopped(dev) &&
1105                            dirty_tx > lp->cur_tx - TX_RING_SIZE + 2)
1106                                netif_wake_queue (dev);
1107
1108                        lp->dirty_tx = dirty_tx;
1109                }
1110
1111                /* Log misc errors. */
1112                if (csr0 & 0x4000)
1113                        dev->stats.tx_errors++; /* Tx babble. */
1114                if (csr0 & 0x1000)
1115                        dev->stats.rx_errors++; /* Missed a Rx frame. */
1116                if (csr0 & 0x0800) {
1117                        printk("%s: Bus master arbitration failure, status %4.4x.\n",
1118                                   dev->name, csr0);
1119                        /* Restart the chip. */
1120                        must_restart = 1;
1121                }
1122
1123                if (must_restart) {
1124                        /* stop the chip to clear the error condition, then restart */
1125                        outw(0x0000, dev->base_addr + LANCE_ADDR);
1126                        outw(0x0004, dev->base_addr + LANCE_DATA);
1127                        lance_restart(dev, 0x0002, 0);
1128                }
1129        }
1130
1131        /* Clear any other interrupt, and set interrupt enable. */
1132        outw(0x0000, dev->base_addr + LANCE_ADDR);
1133        outw(0x7940, dev->base_addr + LANCE_DATA);
1134
1135        if (lance_debug > 4)
1136                printk("%s: exiting interrupt, csr%d=%#4.4x.\n",
1137                           dev->name, inw(ioaddr + LANCE_ADDR),
1138                           inw(dev->base_addr + LANCE_DATA));
1139
1140        spin_unlock (&lp->devlock);
1141        return IRQ_HANDLED;
1142}
1143
1144static int
1145lance_rx(struct net_device *dev)
1146{
1147        struct lance_private *lp = dev->ml_priv;
1148        int entry = lp->cur_rx & RX_RING_MOD_MASK;
1149        int i;
1150
1151        /* If we own the next entry, it's a new packet. Send it up. */
1152        while (lp->rx_ring[entry].base >= 0) {
1153                int status = lp->rx_ring[entry].base >> 24;
1154
1155                if (status != 0x03) {                   /* There was an error. */
1156                        /* There is a tricky error noted by John Murphy,
1157                           <murf@perftech.com> to Russ Nelson: Even with full-sized
1158                           buffers it's possible for a jabber packet to use two
1159                           buffers, with only the last correctly noting the error. */
1160                        if (status & 0x01)      /* Only count a general error at the */
1161                                dev->stats.rx_errors++; /* end of a packet.*/
1162                        if (status & 0x20)
1163                                dev->stats.rx_frame_errors++;
1164                        if (status & 0x10)
1165                                dev->stats.rx_over_errors++;
1166                        if (status & 0x08)
1167                                dev->stats.rx_crc_errors++;
1168                        if (status & 0x04)
1169                                dev->stats.rx_fifo_errors++;
1170                        lp->rx_ring[entry].base &= 0x03ffffff;
1171                }
1172                else
1173                {
1174                        /* Malloc up new buffer, compatible with net3. */
1175                        short pkt_len = (lp->rx_ring[entry].msg_length & 0xfff)-4;
1176                        struct sk_buff *skb;
1177
1178                        if(pkt_len<60)
1179                        {
1180                                printk("%s: Runt packet!\n",dev->name);
1181                                dev->stats.rx_errors++;
1182                        }
1183                        else
1184                        {
1185                                skb = dev_alloc_skb(pkt_len+2);
1186                                if (skb == NULL)
1187                                {
1188                                        printk("%s: Memory squeeze, deferring packet.\n", dev->name);
1189                                        for (i=0; i < RX_RING_SIZE; i++)
1190                                                if (lp->rx_ring[(entry+i) & RX_RING_MOD_MASK].base < 0)
1191                                                        break;
1192
1193                                        if (i > RX_RING_SIZE -2)
1194                                        {
1195                                                dev->stats.rx_dropped++;
1196                                                lp->rx_ring[entry].base |= 0x80000000;
1197                                                lp->cur_rx++;
1198                                        }
1199                                        break;
1200                                }
1201                                skb_reserve(skb,2);     /* 16 byte align */
1202                                skb_put(skb,pkt_len);   /* Make room */
1203                                skb_copy_to_linear_data(skb,
1204                                        (unsigned char *)isa_bus_to_virt((lp->rx_ring[entry].base & 0x00ffffff)),
1205                                        pkt_len);
1206                                skb->protocol=eth_type_trans(skb,dev);
1207                                netif_rx(skb);
1208                                dev->stats.rx_packets++;
1209                                dev->stats.rx_bytes += pkt_len;
1210                        }
1211                }
1212                /* The docs say that the buffer length isn't touched, but Andrew Boyd
1213                   of QNX reports that some revs of the 79C965 clear it. */
1214                lp->rx_ring[entry].buf_length = -PKT_BUF_SZ;
1215                lp->rx_ring[entry].base |= 0x80000000;
1216                entry = (++lp->cur_rx) & RX_RING_MOD_MASK;
1217        }
1218
1219        /* We should check that at least two ring entries are free.      If not,
1220           we should free one and mark stats->rx_dropped++. */
1221
1222        return 0;
1223}
1224
1225static int
1226lance_close(struct net_device *dev)
1227{
1228        int ioaddr = dev->base_addr;
1229        struct lance_private *lp = dev->ml_priv;
1230
1231        netif_stop_queue (dev);
1232
1233        if (chip_table[lp->chip_version].flags & LANCE_HAS_MISSED_FRAME) {
1234                outw(112, ioaddr+LANCE_ADDR);
1235                dev->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA);
1236        }
1237        outw(0, ioaddr+LANCE_ADDR);
1238
1239        if (lance_debug > 1)
1240                printk("%s: Shutting down ethercard, status was %2.2x.\n",
1241                           dev->name, inw(ioaddr+LANCE_DATA));
1242
1243        /* We stop the LANCE here -- it occasionally polls
1244           memory if we don't. */
1245        outw(0x0004, ioaddr+LANCE_DATA);
1246
1247        if (dev->dma != 4)
1248        {
1249                unsigned long flags=claim_dma_lock();
1250                disable_dma(dev->dma);
1251                release_dma_lock(flags);
1252        }
1253        free_irq(dev->irq, dev);
1254
1255        lance_purge_ring(dev);
1256
1257        return 0;
1258}
1259
1260static struct net_device_stats *lance_get_stats(struct net_device *dev)
1261{
1262        struct lance_private *lp = dev->ml_priv;
1263
1264        if (chip_table[lp->chip_version].flags & LANCE_HAS_MISSED_FRAME) {
1265                short ioaddr = dev->base_addr;
1266                short saved_addr;
1267                unsigned long flags;
1268
1269                spin_lock_irqsave(&lp->devlock, flags);
1270                saved_addr = inw(ioaddr+LANCE_ADDR);
1271                outw(112, ioaddr+LANCE_ADDR);
1272                dev->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA);
1273                outw(saved_addr, ioaddr+LANCE_ADDR);
1274                spin_unlock_irqrestore(&lp->devlock, flags);
1275        }
1276
1277        return &dev->stats;
1278}
1279
1280/* Set or clear the multicast filter for this adaptor.
1281 */
1282
1283static void set_multicast_list(struct net_device *dev)
1284{
1285        short ioaddr = dev->base_addr;
1286
1287        outw(0, ioaddr+LANCE_ADDR);
1288        outw(0x0004, ioaddr+LANCE_DATA); /* Temporarily stop the lance.  */
1289
1290        if (dev->flags&IFF_PROMISC) {
1291                outw(15, ioaddr+LANCE_ADDR);
1292                outw(0x8000, ioaddr+LANCE_DATA); /* Set promiscuous mode */
1293        } else {
1294                short multicast_table[4];
1295                int i;
1296                int num_addrs=netdev_mc_count(dev);
1297                if(dev->flags&IFF_ALLMULTI)
1298                        num_addrs=1;
1299                /* FIXIT: We don't use the multicast table, but rely on upper-layer filtering. */
1300                memset(multicast_table, (num_addrs == 0) ? 0 : -1, sizeof(multicast_table));
1301                for (i = 0; i < 4; i++) {
1302                        outw(8 + i, ioaddr+LANCE_ADDR);
1303                        outw(multicast_table[i], ioaddr+LANCE_DATA);
1304                }
1305                outw(15, ioaddr+LANCE_ADDR);
1306                outw(0x0000, ioaddr+LANCE_DATA); /* Unset promiscuous mode */
1307        }
1308
1309        lance_restart(dev, 0x0142, 0); /*  Resume normal operation */
1310
1311}
1312
1313