linux/drivers/net/ethernet/amd/declance.c
<<
>>
Prefs
   1/*
   2 *    Lance ethernet driver for the MIPS processor based
   3 *      DECstation family
   4 *
   5 *
   6 *      adopted from sunlance.c by Richard van den Berg
   7 *
   8 *      Copyright (C) 2002, 2003, 2005, 2006  Maciej W. Rozycki
   9 *
  10 *      additional sources:
  11 *      - PMAD-AA TURBOchannel Ethernet Module Functional Specification,
  12 *        Revision 1.2
  13 *
  14 *      History:
  15 *
  16 *      v0.001: The kernel accepts the code and it shows the hardware address.
  17 *
  18 *      v0.002: Removed most sparc stuff, left only some module and dma stuff.
  19 *
  20 *      v0.003: Enhanced base address calculation from proposals by
  21 *              Harald Koerfgen and Thomas Riemer.
  22 *
  23 *      v0.004: lance-regs is pointing at the right addresses, added prom
  24 *              check. First start of address mapping and DMA.
  25 *
  26 *      v0.005: started to play around with LANCE-DMA. This driver will not
  27 *              work for non IOASIC lances. HK
  28 *
  29 *      v0.006: added pointer arrays to lance_private and setup routine for
  30 *              them in dec_lance_init. HK
  31 *
  32 *      v0.007: Big shit. The LANCE seems to use a different DMA mechanism to
  33 *              access the init block. This looks like one (short) word at a
  34 *              time, but the smallest amount the IOASIC can transfer is a
  35 *              (long) word. So we have a 2-2 padding here. Changed
  36 *              lance_init_block accordingly. The 16-16 padding for the buffers
  37 *              seems to be correct. HK
  38 *
  39 *      v0.008: mods to make PMAX_LANCE work. 01/09/1999 triemer
  40 *
  41 *      v0.009: Module support fixes, multiple interfaces support, various
  42 *              bits. macro
  43 *
  44 *      v0.010: Fixes for the PMAD mapping of the LANCE buffer and for the
  45 *              PMAX requirement to only use halfword accesses to the
  46 *              buffer. macro
  47 *
  48 *      v0.011: Converted the PMAD to the driver model. macro
  49 */
  50
  51#include <linux/crc32.h>
  52#include <linux/delay.h>
  53#include <linux/errno.h>
  54#include <linux/if_ether.h>
  55#include <linux/init.h>
  56#include <linux/kernel.h>
  57#include <linux/module.h>
  58#include <linux/netdevice.h>
  59#include <linux/etherdevice.h>
  60#include <linux/spinlock.h>
  61#include <linux/stddef.h>
  62#include <linux/string.h>
  63#include <linux/tc.h>
  64#include <linux/types.h>
  65
  66#include <asm/addrspace.h>
  67
  68#include <asm/dec/interrupts.h>
  69#include <asm/dec/ioasic.h>
  70#include <asm/dec/ioasic_addrs.h>
  71#include <asm/dec/kn01.h>
  72#include <asm/dec/machtype.h>
  73#include <asm/dec/system.h>
  74
  75static char version[] __devinitdata =
  76"declance.c: v0.011 by Linux MIPS DECstation task force\n";
  77
  78MODULE_AUTHOR("Linux MIPS DECstation task force");
  79MODULE_DESCRIPTION("DEC LANCE (DECstation onboard, PMAD-xx) driver");
  80MODULE_LICENSE("GPL");
  81
  82#define __unused __attribute__ ((unused))
  83
  84/*
  85 * card types
  86 */
  87#define ASIC_LANCE 1
  88#define PMAD_LANCE 2
  89#define PMAX_LANCE 3
  90
  91
  92#define LE_CSR0 0
  93#define LE_CSR1 1
  94#define LE_CSR2 2
  95#define LE_CSR3 3
  96
  97#define LE_MO_PROM      0x8000  /* Enable promiscuous mode */
  98
  99#define LE_C0_ERR       0x8000  /* Error: set if BAB, SQE, MISS or ME is set */
 100#define LE_C0_BABL      0x4000  /* BAB:  Babble: tx timeout. */
 101#define LE_C0_CERR      0x2000  /* SQE:  Signal quality error */
 102#define LE_C0_MISS      0x1000  /* MISS: Missed a packet */
 103#define LE_C0_MERR      0x0800  /* ME:   Memory error */
 104#define LE_C0_RINT      0x0400  /* Received interrupt */
 105#define LE_C0_TINT      0x0200  /* Transmitter Interrupt */
 106#define LE_C0_IDON      0x0100  /* IFIN: Init finished. */
 107#define LE_C0_INTR      0x0080  /* Interrupt or error */
 108#define LE_C0_INEA      0x0040  /* Interrupt enable */
 109#define LE_C0_RXON      0x0020  /* Receiver on */
 110#define LE_C0_TXON      0x0010  /* Transmitter on */
 111#define LE_C0_TDMD      0x0008  /* Transmitter demand */
 112#define LE_C0_STOP      0x0004  /* Stop the card */
 113#define LE_C0_STRT      0x0002  /* Start the card */
 114#define LE_C0_INIT      0x0001  /* Init the card */
 115
 116#define LE_C3_BSWP      0x4     /* SWAP */
 117#define LE_C3_ACON      0x2     /* ALE Control */
 118#define LE_C3_BCON      0x1     /* Byte control */
 119
 120/* Receive message descriptor 1 */
 121#define LE_R1_OWN       0x8000  /* Who owns the entry */
 122#define LE_R1_ERR       0x4000  /* Error: if FRA, OFL, CRC or BUF is set */
 123#define LE_R1_FRA       0x2000  /* FRA: Frame error */
 124#define LE_R1_OFL       0x1000  /* OFL: Frame overflow */
 125#define LE_R1_CRC       0x0800  /* CRC error */
 126#define LE_R1_BUF       0x0400  /* BUF: Buffer error */
 127#define LE_R1_SOP       0x0200  /* Start of packet */
 128#define LE_R1_EOP       0x0100  /* End of packet */
 129#define LE_R1_POK       0x0300  /* Packet is complete: SOP + EOP */
 130
 131/* Transmit message descriptor 1 */
 132#define LE_T1_OWN       0x8000  /* Lance owns the packet */
 133#define LE_T1_ERR       0x4000  /* Error summary */
 134#define LE_T1_EMORE     0x1000  /* Error: more than one retry needed */
 135#define LE_T1_EONE      0x0800  /* Error: one retry needed */
 136#define LE_T1_EDEF      0x0400  /* Error: deferred */
 137#define LE_T1_SOP       0x0200  /* Start of packet */
 138#define LE_T1_EOP       0x0100  /* End of packet */
 139#define LE_T1_POK       0x0300  /* Packet is complete: SOP + EOP */
 140
 141#define LE_T3_BUF       0x8000  /* Buffer error */
 142#define LE_T3_UFL       0x4000  /* Error underflow */
 143#define LE_T3_LCOL      0x1000  /* Error late collision */
 144#define LE_T3_CLOS      0x0800  /* Error carrier loss */
 145#define LE_T3_RTY       0x0400  /* Error retry */
 146#define LE_T3_TDR       0x03ff  /* Time Domain Reflectometry counter */
 147
 148/* Define: 2^4 Tx buffers and 2^4 Rx buffers */
 149
 150#ifndef LANCE_LOG_TX_BUFFERS
 151#define LANCE_LOG_TX_BUFFERS 4
 152#define LANCE_LOG_RX_BUFFERS 4
 153#endif
 154
 155#define TX_RING_SIZE                    (1 << (LANCE_LOG_TX_BUFFERS))
 156#define TX_RING_MOD_MASK                (TX_RING_SIZE - 1)
 157
 158#define RX_RING_SIZE                    (1 << (LANCE_LOG_RX_BUFFERS))
 159#define RX_RING_MOD_MASK                (RX_RING_SIZE - 1)
 160
 161#define PKT_BUF_SZ              1536
 162#define RX_BUFF_SIZE            PKT_BUF_SZ
 163#define TX_BUFF_SIZE            PKT_BUF_SZ
 164
 165#undef TEST_HITS
 166#define ZERO 0
 167
 168/*
 169 * The DS2100/3100 have a linear 64 kB buffer which supports halfword
 170 * accesses only.  Each halfword of the buffer is word-aligned in the
 171 * CPU address space.
 172 *
 173 * The PMAD-AA has a 128 kB buffer on-board.
 174 *
 175 * The IOASIC LANCE devices use a shared memory region.  This region
 176 * as seen from the CPU is (max) 128 kB long and has to be on an 128 kB
 177 * boundary.  The LANCE sees this as a 64 kB long continuous memory
 178 * region.
 179 *
 180 * The LANCE's DMA address is used as an index in this buffer and DMA
 181 * takes place in bursts of eight 16-bit words which are packed into
 182 * four 32-bit words by the IOASIC.  This leads to a strange padding:
 183 * 16 bytes of valid data followed by a 16 byte gap :-(.
 184 */
 185
 186struct lance_rx_desc {
 187        unsigned short rmd0;            /* low address of packet */
 188        unsigned short rmd1;            /* high address of packet
 189                                           and descriptor bits */
 190        short length;                   /* 2s complement (negative!)
 191                                           of buffer length */
 192        unsigned short mblength;        /* actual number of bytes received */
 193};
 194
 195struct lance_tx_desc {
 196        unsigned short tmd0;            /* low address of packet */
 197        unsigned short tmd1;            /* high address of packet
 198                                           and descriptor bits */
 199        short length;                   /* 2s complement (negative!)
 200                                           of buffer length */
 201        unsigned short misc;
 202};
 203
 204
 205/* First part of the LANCE initialization block, described in databook. */
 206struct lance_init_block {
 207        unsigned short mode;            /* pre-set mode (reg. 15) */
 208
 209        unsigned short phys_addr[3];    /* physical ethernet address */
 210        unsigned short filter[4];       /* multicast filter */
 211
 212        /* Receive and transmit ring base, along with extra bits. */
 213        unsigned short rx_ptr;          /* receive descriptor addr */
 214        unsigned short rx_len;          /* receive len and high addr */
 215        unsigned short tx_ptr;          /* transmit descriptor addr */
 216        unsigned short tx_len;          /* transmit len and high addr */
 217
 218        short gap[4];
 219
 220        /* The buffer descriptors */
 221        struct lance_rx_desc brx_ring[RX_RING_SIZE];
 222        struct lance_tx_desc btx_ring[TX_RING_SIZE];
 223};
 224
 225#define BUF_OFFSET_CPU sizeof(struct lance_init_block)
 226#define BUF_OFFSET_LNC sizeof(struct lance_init_block)
 227
 228#define shift_off(off, type)                                            \
 229        (type == ASIC_LANCE || type == PMAX_LANCE ? off << 1 : off)
 230
 231#define lib_off(rt, type)                                               \
 232        shift_off(offsetof(struct lance_init_block, rt), type)
 233
 234#define lib_ptr(ib, rt, type)                                           \
 235        ((volatile u16 *)((u8 *)(ib) + lib_off(rt, type)))
 236
 237#define rds_off(rt, type)                                               \
 238        shift_off(offsetof(struct lance_rx_desc, rt), type)
 239
 240#define rds_ptr(rd, rt, type)                                           \
 241        ((volatile u16 *)((u8 *)(rd) + rds_off(rt, type)))
 242
 243#define tds_off(rt, type)                                               \
 244        shift_off(offsetof(struct lance_tx_desc, rt), type)
 245
 246#define tds_ptr(td, rt, type)                                           \
 247        ((volatile u16 *)((u8 *)(td) + tds_off(rt, type)))
 248
 249struct lance_private {
 250        struct net_device *next;
 251        int type;
 252        int dma_irq;
 253        volatile struct lance_regs *ll;
 254
 255        spinlock_t      lock;
 256
 257        int rx_new, tx_new;
 258        int rx_old, tx_old;
 259
 260        unsigned short busmaster_regval;
 261
 262        struct timer_list       multicast_timer;
 263
 264        /* Pointers to the ring buffers as seen from the CPU */
 265        char *rx_buf_ptr_cpu[RX_RING_SIZE];
 266        char *tx_buf_ptr_cpu[TX_RING_SIZE];
 267
 268        /* Pointers to the ring buffers as seen from the LANCE */
 269        uint rx_buf_ptr_lnc[RX_RING_SIZE];
 270        uint tx_buf_ptr_lnc[TX_RING_SIZE];
 271};
 272
 273#define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\
 274                        lp->tx_old+TX_RING_MOD_MASK-lp->tx_new:\
 275                        lp->tx_old - lp->tx_new-1)
 276
 277/* The lance control ports are at an absolute address, machine and tc-slot
 278 * dependent.
 279 * DECstations do only 32-bit access and the LANCE uses 16 bit addresses,
 280 * so we have to give the structure an extra member making rap pointing
 281 * at the right address
 282 */
 283struct lance_regs {
 284        volatile unsigned short rdp;    /* register data port */
 285        unsigned short pad;
 286        volatile unsigned short rap;    /* register address port */
 287};
 288
 289int dec_lance_debug = 2;
 290
 291static struct tc_driver dec_lance_tc_driver;
 292static struct net_device *root_lance_dev;
 293
 294static inline void writereg(volatile unsigned short *regptr, short value)
 295{
 296        *regptr = value;
 297        iob();
 298}
 299
 300/* Load the CSR registers */
 301static void load_csrs(struct lance_private *lp)
 302{
 303        volatile struct lance_regs *ll = lp->ll;
 304        uint leptr;
 305
 306        /* The address space as seen from the LANCE
 307         * begins at address 0. HK
 308         */
 309        leptr = 0;
 310
 311        writereg(&ll->rap, LE_CSR1);
 312        writereg(&ll->rdp, (leptr & 0xFFFF));
 313        writereg(&ll->rap, LE_CSR2);
 314        writereg(&ll->rdp, leptr >> 16);
 315        writereg(&ll->rap, LE_CSR3);
 316        writereg(&ll->rdp, lp->busmaster_regval);
 317
 318        /* Point back to csr0 */
 319        writereg(&ll->rap, LE_CSR0);
 320}
 321
 322/*
 323 * Our specialized copy routines
 324 *
 325 */
 326static void cp_to_buf(const int type, void *to, const void *from, int len)
 327{
 328        unsigned short *tp;
 329        const unsigned short *fp;
 330        unsigned short clen;
 331        unsigned char *rtp;
 332        const unsigned char *rfp;
 333
 334        if (type == PMAD_LANCE) {
 335                memcpy(to, from, len);
 336        } else if (type == PMAX_LANCE) {
 337                clen = len >> 1;
 338                tp = to;
 339                fp = from;
 340
 341                while (clen--) {
 342                        *tp++ = *fp++;
 343                        tp++;
 344                }
 345
 346                clen = len & 1;
 347                rtp = tp;
 348                rfp = fp;
 349                while (clen--) {
 350                        *rtp++ = *rfp++;
 351                }
 352        } else {
 353                /*
 354                 * copy 16 Byte chunks
 355                 */
 356                clen = len >> 4;
 357                tp = to;
 358                fp = from;
 359                while (clen--) {
 360                        *tp++ = *fp++;
 361                        *tp++ = *fp++;
 362                        *tp++ = *fp++;
 363                        *tp++ = *fp++;
 364                        *tp++ = *fp++;
 365                        *tp++ = *fp++;
 366                        *tp++ = *fp++;
 367                        *tp++ = *fp++;
 368                        tp += 8;
 369                }
 370
 371                /*
 372                 * do the rest, if any.
 373                 */
 374                clen = len & 15;
 375                rtp = (unsigned char *) tp;
 376                rfp = (unsigned char *) fp;
 377                while (clen--) {
 378                        *rtp++ = *rfp++;
 379                }
 380        }
 381
 382        iob();
 383}
 384
 385static void cp_from_buf(const int type, void *to, const void *from, int len)
 386{
 387        unsigned short *tp;
 388        const unsigned short *fp;
 389        unsigned short clen;
 390        unsigned char *rtp;
 391        const unsigned char *rfp;
 392
 393        if (type == PMAD_LANCE) {
 394                memcpy(to, from, len);
 395        } else if (type == PMAX_LANCE) {
 396                clen = len >> 1;
 397                tp = to;
 398                fp = from;
 399                while (clen--) {
 400                        *tp++ = *fp++;
 401                        fp++;
 402                }
 403
 404                clen = len & 1;
 405
 406                rtp = tp;
 407                rfp = fp;
 408
 409                while (clen--) {
 410                        *rtp++ = *rfp++;
 411                }
 412        } else {
 413
 414                /*
 415                 * copy 16 Byte chunks
 416                 */
 417                clen = len >> 4;
 418                tp = to;
 419                fp = from;
 420                while (clen--) {
 421                        *tp++ = *fp++;
 422                        *tp++ = *fp++;
 423                        *tp++ = *fp++;
 424                        *tp++ = *fp++;
 425                        *tp++ = *fp++;
 426                        *tp++ = *fp++;
 427                        *tp++ = *fp++;
 428                        *tp++ = *fp++;
 429                        fp += 8;
 430                }
 431
 432                /*
 433                 * do the rest, if any.
 434                 */
 435                clen = len & 15;
 436                rtp = (unsigned char *) tp;
 437                rfp = (unsigned char *) fp;
 438                while (clen--) {
 439                        *rtp++ = *rfp++;
 440                }
 441
 442
 443        }
 444
 445}
 446
 447/* Setup the Lance Rx and Tx rings */
 448static void lance_init_ring(struct net_device *dev)
 449{
 450        struct lance_private *lp = netdev_priv(dev);
 451        volatile u16 *ib = (volatile u16 *)dev->mem_start;
 452        uint leptr;
 453        int i;
 454
 455        /* Lock out other processes while setting up hardware */
 456        netif_stop_queue(dev);
 457        lp->rx_new = lp->tx_new = 0;
 458        lp->rx_old = lp->tx_old = 0;
 459
 460        /* Copy the ethernet address to the lance init block.
 461         * XXX bit 0 of the physical address registers has to be zero
 462         */
 463        *lib_ptr(ib, phys_addr[0], lp->type) = (dev->dev_addr[1] << 8) |
 464                                     dev->dev_addr[0];
 465        *lib_ptr(ib, phys_addr[1], lp->type) = (dev->dev_addr[3] << 8) |
 466                                     dev->dev_addr[2];
 467        *lib_ptr(ib, phys_addr[2], lp->type) = (dev->dev_addr[5] << 8) |
 468                                     dev->dev_addr[4];
 469        /* Setup the initialization block */
 470
 471        /* Setup rx descriptor pointer */
 472        leptr = offsetof(struct lance_init_block, brx_ring);
 473        *lib_ptr(ib, rx_len, lp->type) = (LANCE_LOG_RX_BUFFERS << 13) |
 474                                         (leptr >> 16);
 475        *lib_ptr(ib, rx_ptr, lp->type) = leptr;
 476        if (ZERO)
 477                printk("RX ptr: %8.8x(%8.8x)\n",
 478                       leptr, lib_off(brx_ring, lp->type));
 479
 480        /* Setup tx descriptor pointer */
 481        leptr = offsetof(struct lance_init_block, btx_ring);
 482        *lib_ptr(ib, tx_len, lp->type) = (LANCE_LOG_TX_BUFFERS << 13) |
 483                                         (leptr >> 16);
 484        *lib_ptr(ib, tx_ptr, lp->type) = leptr;
 485        if (ZERO)
 486                printk("TX ptr: %8.8x(%8.8x)\n",
 487                       leptr, lib_off(btx_ring, lp->type));
 488
 489        if (ZERO)
 490                printk("TX rings:\n");
 491
 492        /* Setup the Tx ring entries */
 493        for (i = 0; i < TX_RING_SIZE; i++) {
 494                leptr = lp->tx_buf_ptr_lnc[i];
 495                *lib_ptr(ib, btx_ring[i].tmd0, lp->type) = leptr;
 496                *lib_ptr(ib, btx_ring[i].tmd1, lp->type) = (leptr >> 16) &
 497                                                           0xff;
 498                *lib_ptr(ib, btx_ring[i].length, lp->type) = 0xf000;
 499                                                /* The ones required by tmd2 */
 500                *lib_ptr(ib, btx_ring[i].misc, lp->type) = 0;
 501                if (i < 3 && ZERO)
 502                        printk("%d: 0x%8.8x(0x%8.8x)\n",
 503                               i, leptr, (uint)lp->tx_buf_ptr_cpu[i]);
 504        }
 505
 506        /* Setup the Rx ring entries */
 507        if (ZERO)
 508                printk("RX rings:\n");
 509        for (i = 0; i < RX_RING_SIZE; i++) {
 510                leptr = lp->rx_buf_ptr_lnc[i];
 511                *lib_ptr(ib, brx_ring[i].rmd0, lp->type) = leptr;
 512                *lib_ptr(ib, brx_ring[i].rmd1, lp->type) = ((leptr >> 16) &
 513                                                            0xff) |
 514                                                           LE_R1_OWN;
 515                *lib_ptr(ib, brx_ring[i].length, lp->type) = -RX_BUFF_SIZE |
 516                                                             0xf000;
 517                *lib_ptr(ib, brx_ring[i].mblength, lp->type) = 0;
 518                if (i < 3 && ZERO)
 519                        printk("%d: 0x%8.8x(0x%8.8x)\n",
 520                               i, leptr, (uint)lp->rx_buf_ptr_cpu[i]);
 521        }
 522        iob();
 523}
 524
 525static int init_restart_lance(struct lance_private *lp)
 526{
 527        volatile struct lance_regs *ll = lp->ll;
 528        int i;
 529
 530        writereg(&ll->rap, LE_CSR0);
 531        writereg(&ll->rdp, LE_C0_INIT);
 532
 533        /* Wait for the lance to complete initialization */
 534        for (i = 0; (i < 100) && !(ll->rdp & LE_C0_IDON); i++) {
 535                udelay(10);
 536        }
 537        if ((i == 100) || (ll->rdp & LE_C0_ERR)) {
 538                printk("LANCE unopened after %d ticks, csr0=%4.4x.\n",
 539                       i, ll->rdp);
 540                return -1;
 541        }
 542        if ((ll->rdp & LE_C0_ERR)) {
 543                printk("LANCE unopened after %d ticks, csr0=%4.4x.\n",
 544                       i, ll->rdp);
 545                return -1;
 546        }
 547        writereg(&ll->rdp, LE_C0_IDON);
 548        writereg(&ll->rdp, LE_C0_STRT);
 549        writereg(&ll->rdp, LE_C0_INEA);
 550
 551        return 0;
 552}
 553
 554static int lance_rx(struct net_device *dev)
 555{
 556        struct lance_private *lp = netdev_priv(dev);
 557        volatile u16 *ib = (volatile u16 *)dev->mem_start;
 558        volatile u16 *rd;
 559        unsigned short bits;
 560        int entry, len;
 561        struct sk_buff *skb;
 562
 563#ifdef TEST_HITS
 564        {
 565                int i;
 566
 567                printk("[");
 568                for (i = 0; i < RX_RING_SIZE; i++) {
 569                        if (i == lp->rx_new)
 570                                printk("%s", *lib_ptr(ib, brx_ring[i].rmd1,
 571                                                      lp->type) &
 572                                             LE_R1_OWN ? "_" : "X");
 573                        else
 574                                printk("%s", *lib_ptr(ib, brx_ring[i].rmd1,
 575                                                      lp->type) &
 576                                             LE_R1_OWN ? "." : "1");
 577                }
 578                printk("]");
 579        }
 580#endif
 581
 582        for (rd = lib_ptr(ib, brx_ring[lp->rx_new], lp->type);
 583             !((bits = *rds_ptr(rd, rmd1, lp->type)) & LE_R1_OWN);
 584             rd = lib_ptr(ib, brx_ring[lp->rx_new], lp->type)) {
 585                entry = lp->rx_new;
 586
 587                /* We got an incomplete frame? */
 588                if ((bits & LE_R1_POK) != LE_R1_POK) {
 589                        dev->stats.rx_over_errors++;
 590                        dev->stats.rx_errors++;
 591                } else if (bits & LE_R1_ERR) {
 592                        /* Count only the end frame as a rx error,
 593                         * not the beginning
 594                         */
 595                        if (bits & LE_R1_BUF)
 596                                dev->stats.rx_fifo_errors++;
 597                        if (bits & LE_R1_CRC)
 598                                dev->stats.rx_crc_errors++;
 599                        if (bits & LE_R1_OFL)
 600                                dev->stats.rx_over_errors++;
 601                        if (bits & LE_R1_FRA)
 602                                dev->stats.rx_frame_errors++;
 603                        if (bits & LE_R1_EOP)
 604                                dev->stats.rx_errors++;
 605                } else {
 606                        len = (*rds_ptr(rd, mblength, lp->type) & 0xfff) - 4;
 607                        skb = netdev_alloc_skb(dev, len + 2);
 608
 609                        if (skb == 0) {
 610                                printk("%s: Memory squeeze, deferring packet.\n",
 611                                       dev->name);
 612                                dev->stats.rx_dropped++;
 613                                *rds_ptr(rd, mblength, lp->type) = 0;
 614                                *rds_ptr(rd, rmd1, lp->type) =
 615                                        ((lp->rx_buf_ptr_lnc[entry] >> 16) &
 616                                         0xff) | LE_R1_OWN;
 617                                lp->rx_new = (entry + 1) & RX_RING_MOD_MASK;
 618                                return 0;
 619                        }
 620                        dev->stats.rx_bytes += len;
 621
 622                        skb_reserve(skb, 2);    /* 16 byte align */
 623                        skb_put(skb, len);      /* make room */
 624
 625                        cp_from_buf(lp->type, skb->data,
 626                                    lp->rx_buf_ptr_cpu[entry], len);
 627
 628                        skb->protocol = eth_type_trans(skb, dev);
 629                        netif_rx(skb);
 630                        dev->stats.rx_packets++;
 631                }
 632
 633                /* Return the packet to the pool */
 634                *rds_ptr(rd, mblength, lp->type) = 0;
 635                *rds_ptr(rd, length, lp->type) = -RX_BUFF_SIZE | 0xf000;
 636                *rds_ptr(rd, rmd1, lp->type) =
 637                        ((lp->rx_buf_ptr_lnc[entry] >> 16) & 0xff) | LE_R1_OWN;
 638                lp->rx_new = (entry + 1) & RX_RING_MOD_MASK;
 639        }
 640        return 0;
 641}
 642
 643static void lance_tx(struct net_device *dev)
 644{
 645        struct lance_private *lp = netdev_priv(dev);
 646        volatile u16 *ib = (volatile u16 *)dev->mem_start;
 647        volatile struct lance_regs *ll = lp->ll;
 648        volatile u16 *td;
 649        int i, j;
 650        int status;
 651
 652        j = lp->tx_old;
 653
 654        spin_lock(&lp->lock);
 655
 656        for (i = j; i != lp->tx_new; i = j) {
 657                td = lib_ptr(ib, btx_ring[i], lp->type);
 658                /* If we hit a packet not owned by us, stop */
 659                if (*tds_ptr(td, tmd1, lp->type) & LE_T1_OWN)
 660                        break;
 661
 662                if (*tds_ptr(td, tmd1, lp->type) & LE_T1_ERR) {
 663                        status = *tds_ptr(td, misc, lp->type);
 664
 665                        dev->stats.tx_errors++;
 666                        if (status & LE_T3_RTY)
 667                                dev->stats.tx_aborted_errors++;
 668                        if (status & LE_T3_LCOL)
 669                                dev->stats.tx_window_errors++;
 670
 671                        if (status & LE_T3_CLOS) {
 672                                dev->stats.tx_carrier_errors++;
 673                                printk("%s: Carrier Lost\n", dev->name);
 674                                /* Stop the lance */
 675                                writereg(&ll->rap, LE_CSR0);
 676                                writereg(&ll->rdp, LE_C0_STOP);
 677                                lance_init_ring(dev);
 678                                load_csrs(lp);
 679                                init_restart_lance(lp);
 680                                goto out;
 681                        }
 682                        /* Buffer errors and underflows turn off the
 683                         * transmitter, restart the adapter.
 684                         */
 685                        if (status & (LE_T3_BUF | LE_T3_UFL)) {
 686                                dev->stats.tx_fifo_errors++;
 687
 688                                printk("%s: Tx: ERR_BUF|ERR_UFL, restarting\n",
 689                                       dev->name);
 690                                /* Stop the lance */
 691                                writereg(&ll->rap, LE_CSR0);
 692                                writereg(&ll->rdp, LE_C0_STOP);
 693                                lance_init_ring(dev);
 694                                load_csrs(lp);
 695                                init_restart_lance(lp);
 696                                goto out;
 697                        }
 698                } else if ((*tds_ptr(td, tmd1, lp->type) & LE_T1_POK) ==
 699                           LE_T1_POK) {
 700                        /*
 701                         * So we don't count the packet more than once.
 702                         */
 703                        *tds_ptr(td, tmd1, lp->type) &= ~(LE_T1_POK);
 704
 705                        /* One collision before packet was sent. */
 706                        if (*tds_ptr(td, tmd1, lp->type) & LE_T1_EONE)
 707                                dev->stats.collisions++;
 708
 709                        /* More than one collision, be optimistic. */
 710                        if (*tds_ptr(td, tmd1, lp->type) & LE_T1_EMORE)
 711                                dev->stats.collisions += 2;
 712
 713                        dev->stats.tx_packets++;
 714                }
 715                j = (j + 1) & TX_RING_MOD_MASK;
 716        }
 717        lp->tx_old = j;
 718out:
 719        if (netif_queue_stopped(dev) &&
 720            TX_BUFFS_AVAIL > 0)
 721                netif_wake_queue(dev);
 722
 723        spin_unlock(&lp->lock);
 724}
 725
 726static irqreturn_t lance_dma_merr_int(int irq, void *dev_id)
 727{
 728        struct net_device *dev = dev_id;
 729
 730        printk(KERN_ERR "%s: DMA error\n", dev->name);
 731        return IRQ_HANDLED;
 732}
 733
 734static irqreturn_t lance_interrupt(int irq, void *dev_id)
 735{
 736        struct net_device *dev = dev_id;
 737        struct lance_private *lp = netdev_priv(dev);
 738        volatile struct lance_regs *ll = lp->ll;
 739        int csr0;
 740
 741        writereg(&ll->rap, LE_CSR0);
 742        csr0 = ll->rdp;
 743
 744        /* Acknowledge all the interrupt sources ASAP */
 745        writereg(&ll->rdp, csr0 & (LE_C0_INTR | LE_C0_TINT | LE_C0_RINT));
 746
 747        if ((csr0 & LE_C0_ERR)) {
 748                /* Clear the error condition */
 749                writereg(&ll->rdp, LE_C0_BABL | LE_C0_ERR | LE_C0_MISS |
 750                         LE_C0_CERR | LE_C0_MERR);
 751        }
 752        if (csr0 & LE_C0_RINT)
 753                lance_rx(dev);
 754
 755        if (csr0 & LE_C0_TINT)
 756                lance_tx(dev);
 757
 758        if (csr0 & LE_C0_BABL)
 759                dev->stats.tx_errors++;
 760
 761        if (csr0 & LE_C0_MISS)
 762                dev->stats.rx_errors++;
 763
 764        if (csr0 & LE_C0_MERR) {
 765                printk("%s: Memory error, status %04x\n", dev->name, csr0);
 766
 767                writereg(&ll->rdp, LE_C0_STOP);
 768
 769                lance_init_ring(dev);
 770                load_csrs(lp);
 771                init_restart_lance(lp);
 772                netif_wake_queue(dev);
 773        }
 774
 775        writereg(&ll->rdp, LE_C0_INEA);
 776        writereg(&ll->rdp, LE_C0_INEA);
 777        return IRQ_HANDLED;
 778}
 779
 780static int lance_open(struct net_device *dev)
 781{
 782        volatile u16 *ib = (volatile u16 *)dev->mem_start;
 783        struct lance_private *lp = netdev_priv(dev);
 784        volatile struct lance_regs *ll = lp->ll;
 785        int status = 0;
 786
 787        /* Stop the Lance */
 788        writereg(&ll->rap, LE_CSR0);
 789        writereg(&ll->rdp, LE_C0_STOP);
 790
 791        /* Set mode and clear multicast filter only at device open,
 792         * so that lance_init_ring() called at any error will not
 793         * forget multicast filters.
 794         *
 795         * BTW it is common bug in all lance drivers! --ANK
 796         */
 797        *lib_ptr(ib, mode, lp->type) = 0;
 798        *lib_ptr(ib, filter[0], lp->type) = 0;
 799        *lib_ptr(ib, filter[1], lp->type) = 0;
 800        *lib_ptr(ib, filter[2], lp->type) = 0;
 801        *lib_ptr(ib, filter[3], lp->type) = 0;
 802
 803        lance_init_ring(dev);
 804        load_csrs(lp);
 805
 806        netif_start_queue(dev);
 807
 808        /* Associate IRQ with lance_interrupt */
 809        if (request_irq(dev->irq, lance_interrupt, 0, "lance", dev)) {
 810                printk("%s: Can't get IRQ %d\n", dev->name, dev->irq);
 811                return -EAGAIN;
 812        }
 813        if (lp->dma_irq >= 0) {
 814                unsigned long flags;
 815
 816                if (request_irq(lp->dma_irq, lance_dma_merr_int, 0,
 817                                "lance error", dev)) {
 818                        free_irq(dev->irq, dev);
 819                        printk("%s: Can't get DMA IRQ %d\n", dev->name,
 820                                lp->dma_irq);
 821                        return -EAGAIN;
 822                }
 823
 824                spin_lock_irqsave(&ioasic_ssr_lock, flags);
 825
 826                fast_mb();
 827                /* Enable I/O ASIC LANCE DMA.  */
 828                ioasic_write(IO_REG_SSR,
 829                             ioasic_read(IO_REG_SSR) | IO_SSR_LANCE_DMA_EN);
 830
 831                fast_mb();
 832                spin_unlock_irqrestore(&ioasic_ssr_lock, flags);
 833        }
 834
 835        status = init_restart_lance(lp);
 836        return status;
 837}
 838
 839static int lance_close(struct net_device *dev)
 840{
 841        struct lance_private *lp = netdev_priv(dev);
 842        volatile struct lance_regs *ll = lp->ll;
 843
 844        netif_stop_queue(dev);
 845        del_timer_sync(&lp->multicast_timer);
 846
 847        /* Stop the card */
 848        writereg(&ll->rap, LE_CSR0);
 849        writereg(&ll->rdp, LE_C0_STOP);
 850
 851        if (lp->dma_irq >= 0) {
 852                unsigned long flags;
 853
 854                spin_lock_irqsave(&ioasic_ssr_lock, flags);
 855
 856                fast_mb();
 857                /* Disable I/O ASIC LANCE DMA.  */
 858                ioasic_write(IO_REG_SSR,
 859                             ioasic_read(IO_REG_SSR) & ~IO_SSR_LANCE_DMA_EN);
 860
 861                fast_iob();
 862                spin_unlock_irqrestore(&ioasic_ssr_lock, flags);
 863
 864                free_irq(lp->dma_irq, dev);
 865        }
 866        free_irq(dev->irq, dev);
 867        return 0;
 868}
 869
 870static inline int lance_reset(struct net_device *dev)
 871{
 872        struct lance_private *lp = netdev_priv(dev);
 873        volatile struct lance_regs *ll = lp->ll;
 874        int status;
 875
 876        /* Stop the lance */
 877        writereg(&ll->rap, LE_CSR0);
 878        writereg(&ll->rdp, LE_C0_STOP);
 879
 880        lance_init_ring(dev);
 881        load_csrs(lp);
 882        dev->trans_start = jiffies; /* prevent tx timeout */
 883        status = init_restart_lance(lp);
 884        return status;
 885}
 886
 887static void lance_tx_timeout(struct net_device *dev)
 888{
 889        struct lance_private *lp = netdev_priv(dev);
 890        volatile struct lance_regs *ll = lp->ll;
 891
 892        printk(KERN_ERR "%s: transmit timed out, status %04x, reset\n",
 893                dev->name, ll->rdp);
 894        lance_reset(dev);
 895        netif_wake_queue(dev);
 896}
 897
 898static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
 899{
 900        struct lance_private *lp = netdev_priv(dev);
 901        volatile struct lance_regs *ll = lp->ll;
 902        volatile u16 *ib = (volatile u16 *)dev->mem_start;
 903        unsigned long flags;
 904        int entry, len;
 905
 906        len = skb->len;
 907
 908        if (len < ETH_ZLEN) {
 909                if (skb_padto(skb, ETH_ZLEN))
 910                        return NETDEV_TX_OK;
 911                len = ETH_ZLEN;
 912        }
 913
 914        dev->stats.tx_bytes += len;
 915
 916        spin_lock_irqsave(&lp->lock, flags);
 917
 918        entry = lp->tx_new;
 919        *lib_ptr(ib, btx_ring[entry].length, lp->type) = (-len);
 920        *lib_ptr(ib, btx_ring[entry].misc, lp->type) = 0;
 921
 922        cp_to_buf(lp->type, lp->tx_buf_ptr_cpu[entry], skb->data, len);
 923
 924        /* Now, give the packet to the lance */
 925        *lib_ptr(ib, btx_ring[entry].tmd1, lp->type) =
 926                ((lp->tx_buf_ptr_lnc[entry] >> 16) & 0xff) |
 927                (LE_T1_POK | LE_T1_OWN);
 928        lp->tx_new = (entry + 1) & TX_RING_MOD_MASK;
 929
 930        if (TX_BUFFS_AVAIL <= 0)
 931                netif_stop_queue(dev);
 932
 933        /* Kick the lance: transmit now */
 934        writereg(&ll->rdp, LE_C0_INEA | LE_C0_TDMD);
 935
 936        spin_unlock_irqrestore(&lp->lock, flags);
 937
 938        dev_kfree_skb(skb);
 939
 940        return NETDEV_TX_OK;
 941}
 942
 943static void lance_load_multicast(struct net_device *dev)
 944{
 945        struct lance_private *lp = netdev_priv(dev);
 946        volatile u16 *ib = (volatile u16 *)dev->mem_start;
 947        struct netdev_hw_addr *ha;
 948        u32 crc;
 949
 950        /* set all multicast bits */
 951        if (dev->flags & IFF_ALLMULTI) {
 952                *lib_ptr(ib, filter[0], lp->type) = 0xffff;
 953                *lib_ptr(ib, filter[1], lp->type) = 0xffff;
 954                *lib_ptr(ib, filter[2], lp->type) = 0xffff;
 955                *lib_ptr(ib, filter[3], lp->type) = 0xffff;
 956                return;
 957        }
 958        /* clear the multicast filter */
 959        *lib_ptr(ib, filter[0], lp->type) = 0;
 960        *lib_ptr(ib, filter[1], lp->type) = 0;
 961        *lib_ptr(ib, filter[2], lp->type) = 0;
 962        *lib_ptr(ib, filter[3], lp->type) = 0;
 963
 964        /* Add addresses */
 965        netdev_for_each_mc_addr(ha, dev) {
 966                crc = ether_crc_le(ETH_ALEN, ha->addr);
 967                crc = crc >> 26;
 968                *lib_ptr(ib, filter[crc >> 4], lp->type) |= 1 << (crc & 0xf);
 969        }
 970}
 971
 972static void lance_set_multicast(struct net_device *dev)
 973{
 974        struct lance_private *lp = netdev_priv(dev);
 975        volatile u16 *ib = (volatile u16 *)dev->mem_start;
 976        volatile struct lance_regs *ll = lp->ll;
 977
 978        if (!netif_running(dev))
 979                return;
 980
 981        if (lp->tx_old != lp->tx_new) {
 982                mod_timer(&lp->multicast_timer, jiffies + 4 * HZ/100);
 983                netif_wake_queue(dev);
 984                return;
 985        }
 986
 987        netif_stop_queue(dev);
 988
 989        writereg(&ll->rap, LE_CSR0);
 990        writereg(&ll->rdp, LE_C0_STOP);
 991
 992        lance_init_ring(dev);
 993
 994        if (dev->flags & IFF_PROMISC) {
 995                *lib_ptr(ib, mode, lp->type) |= LE_MO_PROM;
 996        } else {
 997                *lib_ptr(ib, mode, lp->type) &= ~LE_MO_PROM;
 998                lance_load_multicast(dev);
 999        }
1000        load_csrs(lp);
1001        init_restart_lance(lp);
1002        netif_wake_queue(dev);
1003}
1004
1005static void lance_set_multicast_retry(unsigned long _opaque)
1006{
1007        struct net_device *dev = (struct net_device *) _opaque;
1008
1009        lance_set_multicast(dev);
1010}
1011
1012static const struct net_device_ops lance_netdev_ops = {
1013        .ndo_open               = lance_open,
1014        .ndo_stop               = lance_close,
1015        .ndo_start_xmit         = lance_start_xmit,
1016        .ndo_tx_timeout         = lance_tx_timeout,
1017        .ndo_set_rx_mode        = lance_set_multicast,
1018        .ndo_change_mtu         = eth_change_mtu,
1019        .ndo_validate_addr      = eth_validate_addr,
1020        .ndo_set_mac_address    = eth_mac_addr,
1021};
1022
1023static int __devinit dec_lance_probe(struct device *bdev, const int type)
1024{
1025        static unsigned version_printed;
1026        static const char fmt[] = "declance%d";
1027        char name[10];
1028        struct net_device *dev;
1029        struct lance_private *lp;
1030        volatile struct lance_regs *ll;
1031        resource_size_t start = 0, len = 0;
1032        int i, ret;
1033        unsigned long esar_base;
1034        unsigned char *esar;
1035
1036        if (dec_lance_debug && version_printed++ == 0)
1037                printk(version);
1038
1039        if (bdev)
1040                snprintf(name, sizeof(name), "%s", dev_name(bdev));
1041        else {
1042                i = 0;
1043                dev = root_lance_dev;
1044                while (dev) {
1045                        i++;
1046                        lp = netdev_priv(dev);
1047                        dev = lp->next;
1048                }
1049                snprintf(name, sizeof(name), fmt, i);
1050        }
1051
1052        dev = alloc_etherdev(sizeof(struct lance_private));
1053        if (!dev) {
1054                ret = -ENOMEM;
1055                goto err_out;
1056        }
1057
1058        /*
1059         * alloc_etherdev ensures the data structures used by the LANCE
1060         * are aligned.
1061         */
1062        lp = netdev_priv(dev);
1063        spin_lock_init(&lp->lock);
1064
1065        lp->type = type;
1066        switch (type) {
1067        case ASIC_LANCE:
1068                dev->base_addr = CKSEG1ADDR(dec_kn_slot_base + IOASIC_LANCE);
1069
1070                /* buffer space for the on-board LANCE shared memory */
1071                /*
1072                 * FIXME: ugly hack!
1073                 */
1074                dev->mem_start = CKSEG1ADDR(0x00020000);
1075                dev->mem_end = dev->mem_start + 0x00020000;
1076                dev->irq = dec_interrupt[DEC_IRQ_LANCE];
1077                esar_base = CKSEG1ADDR(dec_kn_slot_base + IOASIC_ESAR);
1078
1079                /* Workaround crash with booting KN04 2.1k from Disk */
1080                memset((void *)dev->mem_start, 0,
1081                       dev->mem_end - dev->mem_start);
1082
1083                /*
1084                 * setup the pointer arrays, this sucks [tm] :-(
1085                 */
1086                for (i = 0; i < RX_RING_SIZE; i++) {
1087                        lp->rx_buf_ptr_cpu[i] =
1088                                (char *)(dev->mem_start + 2 * BUF_OFFSET_CPU +
1089                                         2 * i * RX_BUFF_SIZE);
1090                        lp->rx_buf_ptr_lnc[i] =
1091                                (BUF_OFFSET_LNC + i * RX_BUFF_SIZE);
1092                }
1093                for (i = 0; i < TX_RING_SIZE; i++) {
1094                        lp->tx_buf_ptr_cpu[i] =
1095                                (char *)(dev->mem_start + 2 * BUF_OFFSET_CPU +
1096                                         2 * RX_RING_SIZE * RX_BUFF_SIZE +
1097                                         2 * i * TX_BUFF_SIZE);
1098                        lp->tx_buf_ptr_lnc[i] =
1099                                (BUF_OFFSET_LNC +
1100                                 RX_RING_SIZE * RX_BUFF_SIZE +
1101                                 i * TX_BUFF_SIZE);
1102                }
1103
1104                /* Setup I/O ASIC LANCE DMA.  */
1105                lp->dma_irq = dec_interrupt[DEC_IRQ_LANCE_MERR];
1106                ioasic_write(IO_REG_LANCE_DMA_P,
1107                             CPHYSADDR(dev->mem_start) << 3);
1108
1109                break;
1110#ifdef CONFIG_TC
1111        case PMAD_LANCE:
1112                dev_set_drvdata(bdev, dev);
1113
1114                start = to_tc_dev(bdev)->resource.start;
1115                len = to_tc_dev(bdev)->resource.end - start + 1;
1116                if (!request_mem_region(start, len, dev_name(bdev))) {
1117                        printk(KERN_ERR
1118                               "%s: Unable to reserve MMIO resource\n",
1119                               dev_name(bdev));
1120                        ret = -EBUSY;
1121                        goto err_out_dev;
1122                }
1123
1124                dev->mem_start = CKSEG1ADDR(start);
1125                dev->mem_end = dev->mem_start + 0x100000;
1126                dev->base_addr = dev->mem_start + 0x100000;
1127                dev->irq = to_tc_dev(bdev)->interrupt;
1128                esar_base = dev->mem_start + 0x1c0002;
1129                lp->dma_irq = -1;
1130
1131                for (i = 0; i < RX_RING_SIZE; i++) {
1132                        lp->rx_buf_ptr_cpu[i] =
1133                                (char *)(dev->mem_start + BUF_OFFSET_CPU +
1134                                         i * RX_BUFF_SIZE);
1135                        lp->rx_buf_ptr_lnc[i] =
1136                                (BUF_OFFSET_LNC + i * RX_BUFF_SIZE);
1137                }
1138                for (i = 0; i < TX_RING_SIZE; i++) {
1139                        lp->tx_buf_ptr_cpu[i] =
1140                                (char *)(dev->mem_start + BUF_OFFSET_CPU +
1141                                         RX_RING_SIZE * RX_BUFF_SIZE +
1142                                         i * TX_BUFF_SIZE);
1143                        lp->tx_buf_ptr_lnc[i] =
1144                                (BUF_OFFSET_LNC +
1145                                 RX_RING_SIZE * RX_BUFF_SIZE +
1146                                 i * TX_BUFF_SIZE);
1147                }
1148
1149                break;
1150#endif
1151        case PMAX_LANCE:
1152                dev->irq = dec_interrupt[DEC_IRQ_LANCE];
1153                dev->base_addr = CKSEG1ADDR(KN01_SLOT_BASE + KN01_LANCE);
1154                dev->mem_start = CKSEG1ADDR(KN01_SLOT_BASE + KN01_LANCE_MEM);
1155                dev->mem_end = dev->mem_start + KN01_SLOT_SIZE;
1156                esar_base = CKSEG1ADDR(KN01_SLOT_BASE + KN01_ESAR + 1);
1157                lp->dma_irq = -1;
1158
1159                /*
1160                 * setup the pointer arrays, this sucks [tm] :-(
1161                 */
1162                for (i = 0; i < RX_RING_SIZE; i++) {
1163                        lp->rx_buf_ptr_cpu[i] =
1164                                (char *)(dev->mem_start + 2 * BUF_OFFSET_CPU +
1165                                         2 * i * RX_BUFF_SIZE);
1166                        lp->rx_buf_ptr_lnc[i] =
1167                                (BUF_OFFSET_LNC + i * RX_BUFF_SIZE);
1168                }
1169                for (i = 0; i < TX_RING_SIZE; i++) {
1170                        lp->tx_buf_ptr_cpu[i] =
1171                                (char *)(dev->mem_start + 2 * BUF_OFFSET_CPU +
1172                                         2 * RX_RING_SIZE * RX_BUFF_SIZE +
1173                                         2 * i * TX_BUFF_SIZE);
1174                        lp->tx_buf_ptr_lnc[i] =
1175                                (BUF_OFFSET_LNC +
1176                                 RX_RING_SIZE * RX_BUFF_SIZE +
1177                                 i * TX_BUFF_SIZE);
1178                }
1179
1180                break;
1181
1182        default:
1183                printk(KERN_ERR "%s: declance_init called with unknown type\n",
1184                        name);
1185                ret = -ENODEV;
1186                goto err_out_dev;
1187        }
1188
1189        ll = (struct lance_regs *) dev->base_addr;
1190        esar = (unsigned char *) esar_base;
1191
1192        /* prom checks */
1193        /* First, check for test pattern */
1194        if (esar[0x60] != 0xff && esar[0x64] != 0x00 &&
1195            esar[0x68] != 0x55 && esar[0x6c] != 0xaa) {
1196                printk(KERN_ERR
1197                        "%s: Ethernet station address prom not found!\n",
1198                        name);
1199                ret = -ENODEV;
1200                goto err_out_resource;
1201        }
1202        /* Check the prom contents */
1203        for (i = 0; i < 8; i++) {
1204                if (esar[i * 4] != esar[0x3c - i * 4] &&
1205                    esar[i * 4] != esar[0x40 + i * 4] &&
1206                    esar[0x3c - i * 4] != esar[0x40 + i * 4]) {
1207                        printk(KERN_ERR "%s: Something is wrong with the "
1208                                "ethernet station address prom!\n", name);
1209                        ret = -ENODEV;
1210                        goto err_out_resource;
1211                }
1212        }
1213
1214        /* Copy the ethernet address to the device structure, later to the
1215         * lance initialization block so the lance gets it every time it's
1216         * (re)initialized.
1217         */
1218        switch (type) {
1219        case ASIC_LANCE:
1220                printk("%s: IOASIC onboard LANCE", name);
1221                break;
1222        case PMAD_LANCE:
1223                printk("%s: PMAD-AA", name);
1224                break;
1225        case PMAX_LANCE:
1226                printk("%s: PMAX onboard LANCE", name);
1227                break;
1228        }
1229        for (i = 0; i < 6; i++)
1230                dev->dev_addr[i] = esar[i * 4];
1231
1232        printk(", addr = %pM, irq = %d\n", dev->dev_addr, dev->irq);
1233
1234        dev->netdev_ops = &lance_netdev_ops;
1235        dev->watchdog_timeo = 5*HZ;
1236
1237        /* lp->ll is the location of the registers for lance card */
1238        lp->ll = ll;
1239
1240        /* busmaster_regval (CSR3) should be zero according to the PMAD-AA
1241         * specification.
1242         */
1243        lp->busmaster_regval = 0;
1244
1245        dev->dma = 0;
1246
1247        /* We cannot sleep if the chip is busy during a
1248         * multicast list update event, because such events
1249         * can occur from interrupts (ex. IPv6).  So we
1250         * use a timer to try again later when necessary. -DaveM
1251         */
1252        init_timer(&lp->multicast_timer);
1253        lp->multicast_timer.data = (unsigned long) dev;
1254        lp->multicast_timer.function = lance_set_multicast_retry;
1255
1256        ret = register_netdev(dev);
1257        if (ret) {
1258                printk(KERN_ERR
1259                        "%s: Unable to register netdev, aborting.\n", name);
1260                goto err_out_resource;
1261        }
1262
1263        if (!bdev) {
1264                lp->next = root_lance_dev;
1265                root_lance_dev = dev;
1266        }
1267
1268        printk("%s: registered as %s.\n", name, dev->name);
1269        return 0;
1270
1271err_out_resource:
1272        if (bdev)
1273                release_mem_region(start, len);
1274
1275err_out_dev:
1276        free_netdev(dev);
1277
1278err_out:
1279        return ret;
1280}
1281
1282static void __exit dec_lance_remove(struct device *bdev)
1283{
1284        struct net_device *dev = dev_get_drvdata(bdev);
1285        resource_size_t start, len;
1286
1287        unregister_netdev(dev);
1288        start = to_tc_dev(bdev)->resource.start;
1289        len = to_tc_dev(bdev)->resource.end - start + 1;
1290        release_mem_region(start, len);
1291        free_netdev(dev);
1292}
1293
1294/* Find all the lance cards on the system and initialize them */
1295static int __init dec_lance_platform_probe(void)
1296{
1297        int count = 0;
1298
1299        if (dec_interrupt[DEC_IRQ_LANCE] >= 0) {
1300                if (dec_interrupt[DEC_IRQ_LANCE_MERR] >= 0) {
1301                        if (dec_lance_probe(NULL, ASIC_LANCE) >= 0)
1302                                count++;
1303                } else if (!TURBOCHANNEL) {
1304                        if (dec_lance_probe(NULL, PMAX_LANCE) >= 0)
1305                                count++;
1306                }
1307        }
1308
1309        return (count > 0) ? 0 : -ENODEV;
1310}
1311
1312static void __exit dec_lance_platform_remove(void)
1313{
1314        while (root_lance_dev) {
1315                struct net_device *dev = root_lance_dev;
1316                struct lance_private *lp = netdev_priv(dev);
1317
1318                unregister_netdev(dev);
1319                root_lance_dev = lp->next;
1320                free_netdev(dev);
1321        }
1322}
1323
1324#ifdef CONFIG_TC
1325static int __devinit dec_lance_tc_probe(struct device *dev);
1326static int __exit dec_lance_tc_remove(struct device *dev);
1327
1328static const struct tc_device_id dec_lance_tc_table[] = {
1329        { "DEC     ", "PMAD-AA " },
1330        { }
1331};
1332MODULE_DEVICE_TABLE(tc, dec_lance_tc_table);
1333
1334static struct tc_driver dec_lance_tc_driver = {
1335        .id_table       = dec_lance_tc_table,
1336        .driver         = {
1337                .name   = "declance",
1338                .bus    = &tc_bus_type,
1339                .probe  = dec_lance_tc_probe,
1340                .remove = __exit_p(dec_lance_tc_remove),
1341        },
1342};
1343
1344static int __devinit dec_lance_tc_probe(struct device *dev)
1345{
1346        int status = dec_lance_probe(dev, PMAD_LANCE);
1347        if (!status)
1348                get_device(dev);
1349        return status;
1350}
1351
1352static int __exit dec_lance_tc_remove(struct device *dev)
1353{
1354        put_device(dev);
1355        dec_lance_remove(dev);
1356        return 0;
1357}
1358#endif
1359
1360static int __init dec_lance_init(void)
1361{
1362        int status;
1363
1364        status = tc_register_driver(&dec_lance_tc_driver);
1365        if (!status)
1366                dec_lance_platform_probe();
1367        return status;
1368}
1369
1370static void __exit dec_lance_exit(void)
1371{
1372        dec_lance_platform_remove();
1373        tc_unregister_driver(&dec_lance_tc_driver);
1374}
1375
1376
1377module_init(dec_lance_init);
1378module_exit(dec_lance_exit);
1379