linux/drivers/net/ethernet/amd/declance.c
<<
>>
Prefs
   1/*
   2 *    Lance ethernet driver for the MIPS processor based
   3 *      DECstation family
   4 *
   5 *
   6 *      adopted from sunlance.c by Richard van den Berg
   7 *
   8 *      Copyright (C) 2002, 2003, 2005, 2006  Maciej W. Rozycki
   9 *
  10 *      additional sources:
  11 *      - PMAD-AA TURBOchannel Ethernet Module Functional Specification,
  12 *        Revision 1.2
  13 *
  14 *      History:
  15 *
  16 *      v0.001: The kernel accepts the code and it shows the hardware address.
  17 *
  18 *      v0.002: Removed most sparc stuff, left only some module and dma stuff.
  19 *
  20 *      v0.003: Enhanced base address calculation from proposals by
  21 *              Harald Koerfgen and Thomas Riemer.
  22 *
  23 *      v0.004: lance-regs is pointing at the right addresses, added prom
  24 *              check. First start of address mapping and DMA.
  25 *
  26 *      v0.005: started to play around with LANCE-DMA. This driver will not
  27 *              work for non IOASIC lances. HK
  28 *
  29 *      v0.006: added pointer arrays to lance_private and setup routine for
  30 *              them in dec_lance_init. HK
  31 *
  32 *      v0.007: Big shit. The LANCE seems to use a different DMA mechanism to
  33 *              access the init block. This looks like one (short) word at a
  34 *              time, but the smallest amount the IOASIC can transfer is a
  35 *              (long) word. So we have a 2-2 padding here. Changed
  36 *              lance_init_block accordingly. The 16-16 padding for the buffers
  37 *              seems to be correct. HK
  38 *
  39 *      v0.008: mods to make PMAX_LANCE work. 01/09/1999 triemer
  40 *
  41 *      v0.009: Module support fixes, multiple interfaces support, various
  42 *              bits. macro
  43 *
  44 *      v0.010: Fixes for the PMAD mapping of the LANCE buffer and for the
  45 *              PMAX requirement to only use halfword accesses to the
  46 *              buffer. macro
  47 *
  48 *      v0.011: Converted the PMAD to the driver model. macro
  49 */
  50
  51#include <linux/crc32.h>
  52#include <linux/delay.h>
  53#include <linux/errno.h>
  54#include <linux/if_ether.h>
  55#include <linux/init.h>
  56#include <linux/kernel.h>
  57#include <linux/module.h>
  58#include <linux/netdevice.h>
  59#include <linux/etherdevice.h>
  60#include <linux/spinlock.h>
  61#include <linux/stddef.h>
  62#include <linux/string.h>
  63#include <linux/tc.h>
  64#include <linux/types.h>
  65
  66#include <asm/addrspace.h>
  67
  68#include <asm/dec/interrupts.h>
  69#include <asm/dec/ioasic.h>
  70#include <asm/dec/ioasic_addrs.h>
  71#include <asm/dec/kn01.h>
  72#include <asm/dec/machtype.h>
  73#include <asm/dec/system.h>
  74
  75static const char version[] =
  76"declance.c: v0.011 by Linux MIPS DECstation task force\n";
  77
  78MODULE_AUTHOR("Linux MIPS DECstation task force");
  79MODULE_DESCRIPTION("DEC LANCE (DECstation onboard, PMAD-xx) driver");
  80MODULE_LICENSE("GPL");
  81
  82#define __unused __attribute__ ((unused))
  83
  84/*
  85 * card types
  86 */
  87#define ASIC_LANCE 1
  88#define PMAD_LANCE 2
  89#define PMAX_LANCE 3
  90
  91
  92#define LE_CSR0 0
  93#define LE_CSR1 1
  94#define LE_CSR2 2
  95#define LE_CSR3 3
  96
  97#define LE_MO_PROM      0x8000  /* Enable promiscuous mode */
  98
  99#define LE_C0_ERR       0x8000  /* Error: set if BAB, SQE, MISS or ME is set */
 100#define LE_C0_BABL      0x4000  /* BAB:  Babble: tx timeout. */
 101#define LE_C0_CERR      0x2000  /* SQE:  Signal quality error */
 102#define LE_C0_MISS      0x1000  /* MISS: Missed a packet */
 103#define LE_C0_MERR      0x0800  /* ME:   Memory error */
 104#define LE_C0_RINT      0x0400  /* Received interrupt */
 105#define LE_C0_TINT      0x0200  /* Transmitter Interrupt */
 106#define LE_C0_IDON      0x0100  /* IFIN: Init finished. */
 107#define LE_C0_INTR      0x0080  /* Interrupt or error */
 108#define LE_C0_INEA      0x0040  /* Interrupt enable */
 109#define LE_C0_RXON      0x0020  /* Receiver on */
 110#define LE_C0_TXON      0x0010  /* Transmitter on */
 111#define LE_C0_TDMD      0x0008  /* Transmitter demand */
 112#define LE_C0_STOP      0x0004  /* Stop the card */
 113#define LE_C0_STRT      0x0002  /* Start the card */
 114#define LE_C0_INIT      0x0001  /* Init the card */
 115
 116#define LE_C3_BSWP      0x4     /* SWAP */
 117#define LE_C3_ACON      0x2     /* ALE Control */
 118#define LE_C3_BCON      0x1     /* Byte control */
 119
 120/* Receive message descriptor 1 */
 121#define LE_R1_OWN       0x8000  /* Who owns the entry */
 122#define LE_R1_ERR       0x4000  /* Error: if FRA, OFL, CRC or BUF is set */
 123#define LE_R1_FRA       0x2000  /* FRA: Frame error */
 124#define LE_R1_OFL       0x1000  /* OFL: Frame overflow */
 125#define LE_R1_CRC       0x0800  /* CRC error */
 126#define LE_R1_BUF       0x0400  /* BUF: Buffer error */
 127#define LE_R1_SOP       0x0200  /* Start of packet */
 128#define LE_R1_EOP       0x0100  /* End of packet */
 129#define LE_R1_POK       0x0300  /* Packet is complete: SOP + EOP */
 130
 131/* Transmit message descriptor 1 */
 132#define LE_T1_OWN       0x8000  /* Lance owns the packet */
 133#define LE_T1_ERR       0x4000  /* Error summary */
 134#define LE_T1_EMORE     0x1000  /* Error: more than one retry needed */
 135#define LE_T1_EONE      0x0800  /* Error: one retry needed */
 136#define LE_T1_EDEF      0x0400  /* Error: deferred */
 137#define LE_T1_SOP       0x0200  /* Start of packet */
 138#define LE_T1_EOP       0x0100  /* End of packet */
 139#define LE_T1_POK       0x0300  /* Packet is complete: SOP + EOP */
 140
 141#define LE_T3_BUF       0x8000  /* Buffer error */
 142#define LE_T3_UFL       0x4000  /* Error underflow */
 143#define LE_T3_LCOL      0x1000  /* Error late collision */
 144#define LE_T3_CLOS      0x0800  /* Error carrier loss */
 145#define LE_T3_RTY       0x0400  /* Error retry */
 146#define LE_T3_TDR       0x03ff  /* Time Domain Reflectometry counter */
 147
 148/* Define: 2^4 Tx buffers and 2^4 Rx buffers */
 149
 150#ifndef LANCE_LOG_TX_BUFFERS
 151#define LANCE_LOG_TX_BUFFERS 4
 152#define LANCE_LOG_RX_BUFFERS 4
 153#endif
 154
 155#define TX_RING_SIZE                    (1 << (LANCE_LOG_TX_BUFFERS))
 156#define TX_RING_MOD_MASK                (TX_RING_SIZE - 1)
 157
 158#define RX_RING_SIZE                    (1 << (LANCE_LOG_RX_BUFFERS))
 159#define RX_RING_MOD_MASK                (RX_RING_SIZE - 1)
 160
 161#define PKT_BUF_SZ              1536
 162#define RX_BUFF_SIZE            PKT_BUF_SZ
 163#define TX_BUFF_SIZE            PKT_BUF_SZ
 164
 165#undef TEST_HITS
 166#define ZERO 0
 167
 168/*
 169 * The DS2100/3100 have a linear 64 kB buffer which supports halfword
 170 * accesses only.  Each halfword of the buffer is word-aligned in the
 171 * CPU address space.
 172 *
 173 * The PMAD-AA has a 128 kB buffer on-board.
 174 *
 175 * The IOASIC LANCE devices use a shared memory region.  This region
 176 * as seen from the CPU is (max) 128 kB long and has to be on an 128 kB
 177 * boundary.  The LANCE sees this as a 64 kB long continuous memory
 178 * region.
 179 *
 180 * The LANCE's DMA address is used as an index in this buffer and DMA
 181 * takes place in bursts of eight 16-bit words which are packed into
 182 * four 32-bit words by the IOASIC.  This leads to a strange padding:
 183 * 16 bytes of valid data followed by a 16 byte gap :-(.
 184 */
 185
 186struct lance_rx_desc {
 187        unsigned short rmd0;            /* low address of packet */
 188        unsigned short rmd1;            /* high address of packet
 189                                           and descriptor bits */
 190        short length;                   /* 2s complement (negative!)
 191                                           of buffer length */
 192        unsigned short mblength;        /* actual number of bytes received */
 193};
 194
 195struct lance_tx_desc {
 196        unsigned short tmd0;            /* low address of packet */
 197        unsigned short tmd1;            /* high address of packet
 198                                           and descriptor bits */
 199        short length;                   /* 2s complement (negative!)
 200                                           of buffer length */
 201        unsigned short misc;
 202};
 203
 204
 205/* First part of the LANCE initialization block, described in databook. */
 206struct lance_init_block {
 207        unsigned short mode;            /* pre-set mode (reg. 15) */
 208
 209        unsigned short phys_addr[3];    /* physical ethernet address */
 210        unsigned short filter[4];       /* multicast filter */
 211
 212        /* Receive and transmit ring base, along with extra bits. */
 213        unsigned short rx_ptr;          /* receive descriptor addr */
 214        unsigned short rx_len;          /* receive len and high addr */
 215        unsigned short tx_ptr;          /* transmit descriptor addr */
 216        unsigned short tx_len;          /* transmit len and high addr */
 217
 218        short gap[4];
 219
 220        /* The buffer descriptors */
 221        struct lance_rx_desc brx_ring[RX_RING_SIZE];
 222        struct lance_tx_desc btx_ring[TX_RING_SIZE];
 223};
 224
 225#define BUF_OFFSET_CPU sizeof(struct lance_init_block)
 226#define BUF_OFFSET_LNC sizeof(struct lance_init_block)
 227
 228#define shift_off(off, type)                                            \
 229        (type == ASIC_LANCE || type == PMAX_LANCE ? off << 1 : off)
 230
 231#define lib_off(rt, type)                                               \
 232        shift_off(offsetof(struct lance_init_block, rt), type)
 233
 234#define lib_ptr(ib, rt, type)                                           \
 235        ((volatile u16 *)((u8 *)(ib) + lib_off(rt, type)))
 236
 237#define rds_off(rt, type)                                               \
 238        shift_off(offsetof(struct lance_rx_desc, rt), type)
 239
 240#define rds_ptr(rd, rt, type)                                           \
 241        ((volatile u16 *)((u8 *)(rd) + rds_off(rt, type)))
 242
 243#define tds_off(rt, type)                                               \
 244        shift_off(offsetof(struct lance_tx_desc, rt), type)
 245
 246#define tds_ptr(td, rt, type)                                           \
 247        ((volatile u16 *)((u8 *)(td) + tds_off(rt, type)))
 248
 249struct lance_private {
 250        struct net_device *next;
 251        int type;
 252        int dma_irq;
 253        volatile struct lance_regs *ll;
 254
 255        spinlock_t      lock;
 256
 257        int rx_new, tx_new;
 258        int rx_old, tx_old;
 259
 260        unsigned short busmaster_regval;
 261
 262        struct timer_list       multicast_timer;
 263        struct net_device       *dev;
 264
 265        /* Pointers to the ring buffers as seen from the CPU */
 266        char *rx_buf_ptr_cpu[RX_RING_SIZE];
 267        char *tx_buf_ptr_cpu[TX_RING_SIZE];
 268
 269        /* Pointers to the ring buffers as seen from the LANCE */
 270        uint rx_buf_ptr_lnc[RX_RING_SIZE];
 271        uint tx_buf_ptr_lnc[TX_RING_SIZE];
 272};
 273
 274#define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\
 275                        lp->tx_old+TX_RING_MOD_MASK-lp->tx_new:\
 276                        lp->tx_old - lp->tx_new-1)
 277
 278/* The lance control ports are at an absolute address, machine and tc-slot
 279 * dependent.
 280 * DECstations do only 32-bit access and the LANCE uses 16 bit addresses,
 281 * so we have to give the structure an extra member making rap pointing
 282 * at the right address
 283 */
 284struct lance_regs {
 285        volatile unsigned short rdp;    /* register data port */
 286        unsigned short pad;
 287        volatile unsigned short rap;    /* register address port */
 288};
 289
 290int dec_lance_debug = 2;
 291
 292static struct tc_driver dec_lance_tc_driver;
 293static struct net_device *root_lance_dev;
 294
 295static inline void writereg(volatile unsigned short *regptr, short value)
 296{
 297        *regptr = value;
 298        iob();
 299}
 300
 301/* Load the CSR registers */
 302static void load_csrs(struct lance_private *lp)
 303{
 304        volatile struct lance_regs *ll = lp->ll;
 305        uint leptr;
 306
 307        /* The address space as seen from the LANCE
 308         * begins at address 0. HK
 309         */
 310        leptr = 0;
 311
 312        writereg(&ll->rap, LE_CSR1);
 313        writereg(&ll->rdp, (leptr & 0xFFFF));
 314        writereg(&ll->rap, LE_CSR2);
 315        writereg(&ll->rdp, leptr >> 16);
 316        writereg(&ll->rap, LE_CSR3);
 317        writereg(&ll->rdp, lp->busmaster_regval);
 318
 319        /* Point back to csr0 */
 320        writereg(&ll->rap, LE_CSR0);
 321}
 322
 323/*
 324 * Our specialized copy routines
 325 *
 326 */
 327static void cp_to_buf(const int type, void *to, const void *from, int len)
 328{
 329        unsigned short *tp;
 330        const unsigned short *fp;
 331        unsigned short clen;
 332        unsigned char *rtp;
 333        const unsigned char *rfp;
 334
 335        if (type == PMAD_LANCE) {
 336                memcpy(to, from, len);
 337        } else if (type == PMAX_LANCE) {
 338                clen = len >> 1;
 339                tp = to;
 340                fp = from;
 341
 342                while (clen--) {
 343                        *tp++ = *fp++;
 344                        tp++;
 345                }
 346
 347                clen = len & 1;
 348                rtp = (unsigned char *)tp;
 349                rfp = (const unsigned char *)fp;
 350                while (clen--) {
 351                        *rtp++ = *rfp++;
 352                }
 353        } else {
 354                /*
 355                 * copy 16 Byte chunks
 356                 */
 357                clen = len >> 4;
 358                tp = to;
 359                fp = from;
 360                while (clen--) {
 361                        *tp++ = *fp++;
 362                        *tp++ = *fp++;
 363                        *tp++ = *fp++;
 364                        *tp++ = *fp++;
 365                        *tp++ = *fp++;
 366                        *tp++ = *fp++;
 367                        *tp++ = *fp++;
 368                        *tp++ = *fp++;
 369                        tp += 8;
 370                }
 371
 372                /*
 373                 * do the rest, if any.
 374                 */
 375                clen = len & 15;
 376                rtp = (unsigned char *)tp;
 377                rfp = (const unsigned char *)fp;
 378                while (clen--) {
 379                        *rtp++ = *rfp++;
 380                }
 381        }
 382
 383        iob();
 384}
 385
 386static void cp_from_buf(const int type, void *to, const void *from, int len)
 387{
 388        unsigned short *tp;
 389        const unsigned short *fp;
 390        unsigned short clen;
 391        unsigned char *rtp;
 392        const unsigned char *rfp;
 393
 394        if (type == PMAD_LANCE) {
 395                memcpy(to, from, len);
 396        } else if (type == PMAX_LANCE) {
 397                clen = len >> 1;
 398                tp = to;
 399                fp = from;
 400                while (clen--) {
 401                        *tp++ = *fp++;
 402                        fp++;
 403                }
 404
 405                clen = len & 1;
 406
 407                rtp = (unsigned char *)tp;
 408                rfp = (const unsigned char *)fp;
 409
 410                while (clen--) {
 411                        *rtp++ = *rfp++;
 412                }
 413        } else {
 414
 415                /*
 416                 * copy 16 Byte chunks
 417                 */
 418                clen = len >> 4;
 419                tp = to;
 420                fp = from;
 421                while (clen--) {
 422                        *tp++ = *fp++;
 423                        *tp++ = *fp++;
 424                        *tp++ = *fp++;
 425                        *tp++ = *fp++;
 426                        *tp++ = *fp++;
 427                        *tp++ = *fp++;
 428                        *tp++ = *fp++;
 429                        *tp++ = *fp++;
 430                        fp += 8;
 431                }
 432
 433                /*
 434                 * do the rest, if any.
 435                 */
 436                clen = len & 15;
 437                rtp = (unsigned char *)tp;
 438                rfp = (const unsigned char *)fp;
 439                while (clen--) {
 440                        *rtp++ = *rfp++;
 441                }
 442
 443
 444        }
 445
 446}
 447
 448/* Setup the Lance Rx and Tx rings */
 449static void lance_init_ring(struct net_device *dev)
 450{
 451        struct lance_private *lp = netdev_priv(dev);
 452        volatile u16 *ib = (volatile u16 *)dev->mem_start;
 453        uint leptr;
 454        int i;
 455
 456        /* Lock out other processes while setting up hardware */
 457        netif_stop_queue(dev);
 458        lp->rx_new = lp->tx_new = 0;
 459        lp->rx_old = lp->tx_old = 0;
 460
 461        /* Copy the ethernet address to the lance init block.
 462         * XXX bit 0 of the physical address registers has to be zero
 463         */
 464        *lib_ptr(ib, phys_addr[0], lp->type) = (dev->dev_addr[1] << 8) |
 465                                     dev->dev_addr[0];
 466        *lib_ptr(ib, phys_addr[1], lp->type) = (dev->dev_addr[3] << 8) |
 467                                     dev->dev_addr[2];
 468        *lib_ptr(ib, phys_addr[2], lp->type) = (dev->dev_addr[5] << 8) |
 469                                     dev->dev_addr[4];
 470        /* Setup the initialization block */
 471
 472        /* Setup rx descriptor pointer */
 473        leptr = offsetof(struct lance_init_block, brx_ring);
 474        *lib_ptr(ib, rx_len, lp->type) = (LANCE_LOG_RX_BUFFERS << 13) |
 475                                         (leptr >> 16);
 476        *lib_ptr(ib, rx_ptr, lp->type) = leptr;
 477        if (ZERO)
 478                printk("RX ptr: %8.8x(%8.8x)\n",
 479                       leptr, (uint)lib_off(brx_ring, lp->type));
 480
 481        /* Setup tx descriptor pointer */
 482        leptr = offsetof(struct lance_init_block, btx_ring);
 483        *lib_ptr(ib, tx_len, lp->type) = (LANCE_LOG_TX_BUFFERS << 13) |
 484                                         (leptr >> 16);
 485        *lib_ptr(ib, tx_ptr, lp->type) = leptr;
 486        if (ZERO)
 487                printk("TX ptr: %8.8x(%8.8x)\n",
 488                       leptr, (uint)lib_off(btx_ring, lp->type));
 489
 490        if (ZERO)
 491                printk("TX rings:\n");
 492
 493        /* Setup the Tx ring entries */
 494        for (i = 0; i < TX_RING_SIZE; i++) {
 495                leptr = lp->tx_buf_ptr_lnc[i];
 496                *lib_ptr(ib, btx_ring[i].tmd0, lp->type) = leptr;
 497                *lib_ptr(ib, btx_ring[i].tmd1, lp->type) = (leptr >> 16) &
 498                                                           0xff;
 499                *lib_ptr(ib, btx_ring[i].length, lp->type) = 0xf000;
 500                                                /* The ones required by tmd2 */
 501                *lib_ptr(ib, btx_ring[i].misc, lp->type) = 0;
 502                if (i < 3 && ZERO)
 503                        printk("%d: %8.8x(%p)\n",
 504                               i, leptr, lp->tx_buf_ptr_cpu[i]);
 505        }
 506
 507        /* Setup the Rx ring entries */
 508        if (ZERO)
 509                printk("RX rings:\n");
 510        for (i = 0; i < RX_RING_SIZE; i++) {
 511                leptr = lp->rx_buf_ptr_lnc[i];
 512                *lib_ptr(ib, brx_ring[i].rmd0, lp->type) = leptr;
 513                *lib_ptr(ib, brx_ring[i].rmd1, lp->type) = ((leptr >> 16) &
 514                                                            0xff) |
 515                                                           LE_R1_OWN;
 516                *lib_ptr(ib, brx_ring[i].length, lp->type) = -RX_BUFF_SIZE |
 517                                                             0xf000;
 518                *lib_ptr(ib, brx_ring[i].mblength, lp->type) = 0;
 519                if (i < 3 && ZERO)
 520                        printk("%d: %8.8x(%p)\n",
 521                               i, leptr, lp->rx_buf_ptr_cpu[i]);
 522        }
 523        iob();
 524}
 525
 526static int init_restart_lance(struct lance_private *lp)
 527{
 528        volatile struct lance_regs *ll = lp->ll;
 529        int i;
 530
 531        writereg(&ll->rap, LE_CSR0);
 532        writereg(&ll->rdp, LE_C0_INIT);
 533
 534        /* Wait for the lance to complete initialization */
 535        for (i = 0; (i < 100) && !(ll->rdp & LE_C0_IDON); i++) {
 536                udelay(10);
 537        }
 538        if ((i == 100) || (ll->rdp & LE_C0_ERR)) {
 539                printk("LANCE unopened after %d ticks, csr0=%4.4x.\n",
 540                       i, ll->rdp);
 541                return -1;
 542        }
 543        if ((ll->rdp & LE_C0_ERR)) {
 544                printk("LANCE unopened after %d ticks, csr0=%4.4x.\n",
 545                       i, ll->rdp);
 546                return -1;
 547        }
 548        writereg(&ll->rdp, LE_C0_IDON);
 549        writereg(&ll->rdp, LE_C0_STRT);
 550        writereg(&ll->rdp, LE_C0_INEA);
 551
 552        return 0;
 553}
 554
 555static int lance_rx(struct net_device *dev)
 556{
 557        struct lance_private *lp = netdev_priv(dev);
 558        volatile u16 *ib = (volatile u16 *)dev->mem_start;
 559        volatile u16 *rd;
 560        unsigned short bits;
 561        int entry, len;
 562        struct sk_buff *skb;
 563
 564#ifdef TEST_HITS
 565        {
 566                int i;
 567
 568                printk("[");
 569                for (i = 0; i < RX_RING_SIZE; i++) {
 570                        if (i == lp->rx_new)
 571                                printk("%s", *lib_ptr(ib, brx_ring[i].rmd1,
 572                                                      lp->type) &
 573                                             LE_R1_OWN ? "_" : "X");
 574                        else
 575                                printk("%s", *lib_ptr(ib, brx_ring[i].rmd1,
 576                                                      lp->type) &
 577                                             LE_R1_OWN ? "." : "1");
 578                }
 579                printk("]");
 580        }
 581#endif
 582
 583        for (rd = lib_ptr(ib, brx_ring[lp->rx_new], lp->type);
 584             !((bits = *rds_ptr(rd, rmd1, lp->type)) & LE_R1_OWN);
 585             rd = lib_ptr(ib, brx_ring[lp->rx_new], lp->type)) {
 586                entry = lp->rx_new;
 587
 588                /* We got an incomplete frame? */
 589                if ((bits & LE_R1_POK) != LE_R1_POK) {
 590                        dev->stats.rx_over_errors++;
 591                        dev->stats.rx_errors++;
 592                } else if (bits & LE_R1_ERR) {
 593                        /* Count only the end frame as a rx error,
 594                         * not the beginning
 595                         */
 596                        if (bits & LE_R1_BUF)
 597                                dev->stats.rx_fifo_errors++;
 598                        if (bits & LE_R1_CRC)
 599                                dev->stats.rx_crc_errors++;
 600                        if (bits & LE_R1_OFL)
 601                                dev->stats.rx_over_errors++;
 602                        if (bits & LE_R1_FRA)
 603                                dev->stats.rx_frame_errors++;
 604                        if (bits & LE_R1_EOP)
 605                                dev->stats.rx_errors++;
 606                } else {
 607                        len = (*rds_ptr(rd, mblength, lp->type) & 0xfff) - 4;
 608                        skb = netdev_alloc_skb(dev, len + 2);
 609
 610                        if (skb == 0) {
 611                                dev->stats.rx_dropped++;
 612                                *rds_ptr(rd, mblength, lp->type) = 0;
 613                                *rds_ptr(rd, rmd1, lp->type) =
 614                                        ((lp->rx_buf_ptr_lnc[entry] >> 16) &
 615                                         0xff) | LE_R1_OWN;
 616                                lp->rx_new = (entry + 1) & RX_RING_MOD_MASK;
 617                                return 0;
 618                        }
 619                        dev->stats.rx_bytes += len;
 620
 621                        skb_reserve(skb, 2);    /* 16 byte align */
 622                        skb_put(skb, len);      /* make room */
 623
 624                        cp_from_buf(lp->type, skb->data,
 625                                    lp->rx_buf_ptr_cpu[entry], len);
 626
 627                        skb->protocol = eth_type_trans(skb, dev);
 628                        netif_rx(skb);
 629                        dev->stats.rx_packets++;
 630                }
 631
 632                /* Return the packet to the pool */
 633                *rds_ptr(rd, mblength, lp->type) = 0;
 634                *rds_ptr(rd, length, lp->type) = -RX_BUFF_SIZE | 0xf000;
 635                *rds_ptr(rd, rmd1, lp->type) =
 636                        ((lp->rx_buf_ptr_lnc[entry] >> 16) & 0xff) | LE_R1_OWN;
 637                lp->rx_new = (entry + 1) & RX_RING_MOD_MASK;
 638        }
 639        return 0;
 640}
 641
 642static void lance_tx(struct net_device *dev)
 643{
 644        struct lance_private *lp = netdev_priv(dev);
 645        volatile u16 *ib = (volatile u16 *)dev->mem_start;
 646        volatile struct lance_regs *ll = lp->ll;
 647        volatile u16 *td;
 648        int i, j;
 649        int status;
 650
 651        j = lp->tx_old;
 652
 653        spin_lock(&lp->lock);
 654
 655        for (i = j; i != lp->tx_new; i = j) {
 656                td = lib_ptr(ib, btx_ring[i], lp->type);
 657                /* If we hit a packet not owned by us, stop */
 658                if (*tds_ptr(td, tmd1, lp->type) & LE_T1_OWN)
 659                        break;
 660
 661                if (*tds_ptr(td, tmd1, lp->type) & LE_T1_ERR) {
 662                        status = *tds_ptr(td, misc, lp->type);
 663
 664                        dev->stats.tx_errors++;
 665                        if (status & LE_T3_RTY)
 666                                dev->stats.tx_aborted_errors++;
 667                        if (status & LE_T3_LCOL)
 668                                dev->stats.tx_window_errors++;
 669
 670                        if (status & LE_T3_CLOS) {
 671                                dev->stats.tx_carrier_errors++;
 672                                printk("%s: Carrier Lost\n", dev->name);
 673                                /* Stop the lance */
 674                                writereg(&ll->rap, LE_CSR0);
 675                                writereg(&ll->rdp, LE_C0_STOP);
 676                                lance_init_ring(dev);
 677                                load_csrs(lp);
 678                                init_restart_lance(lp);
 679                                goto out;
 680                        }
 681                        /* Buffer errors and underflows turn off the
 682                         * transmitter, restart the adapter.
 683                         */
 684                        if (status & (LE_T3_BUF | LE_T3_UFL)) {
 685                                dev->stats.tx_fifo_errors++;
 686
 687                                printk("%s: Tx: ERR_BUF|ERR_UFL, restarting\n",
 688                                       dev->name);
 689                                /* Stop the lance */
 690                                writereg(&ll->rap, LE_CSR0);
 691                                writereg(&ll->rdp, LE_C0_STOP);
 692                                lance_init_ring(dev);
 693                                load_csrs(lp);
 694                                init_restart_lance(lp);
 695                                goto out;
 696                        }
 697                } else if ((*tds_ptr(td, tmd1, lp->type) & LE_T1_POK) ==
 698                           LE_T1_POK) {
 699                        /*
 700                         * So we don't count the packet more than once.
 701                         */
 702                        *tds_ptr(td, tmd1, lp->type) &= ~(LE_T1_POK);
 703
 704                        /* One collision before packet was sent. */
 705                        if (*tds_ptr(td, tmd1, lp->type) & LE_T1_EONE)
 706                                dev->stats.collisions++;
 707
 708                        /* More than one collision, be optimistic. */
 709                        if (*tds_ptr(td, tmd1, lp->type) & LE_T1_EMORE)
 710                                dev->stats.collisions += 2;
 711
 712                        dev->stats.tx_packets++;
 713                }
 714                j = (j + 1) & TX_RING_MOD_MASK;
 715        }
 716        lp->tx_old = j;
 717out:
 718        if (netif_queue_stopped(dev) &&
 719            TX_BUFFS_AVAIL > 0)
 720                netif_wake_queue(dev);
 721
 722        spin_unlock(&lp->lock);
 723}
 724
 725static irqreturn_t lance_dma_merr_int(int irq, void *dev_id)
 726{
 727        struct net_device *dev = dev_id;
 728
 729        printk(KERN_ERR "%s: DMA error\n", dev->name);
 730        return IRQ_HANDLED;
 731}
 732
 733static irqreturn_t lance_interrupt(int irq, void *dev_id)
 734{
 735        struct net_device *dev = dev_id;
 736        struct lance_private *lp = netdev_priv(dev);
 737        volatile struct lance_regs *ll = lp->ll;
 738        int csr0;
 739
 740        writereg(&ll->rap, LE_CSR0);
 741        csr0 = ll->rdp;
 742
 743        /* Acknowledge all the interrupt sources ASAP */
 744        writereg(&ll->rdp, csr0 & (LE_C0_INTR | LE_C0_TINT | LE_C0_RINT));
 745
 746        if ((csr0 & LE_C0_ERR)) {
 747                /* Clear the error condition */
 748                writereg(&ll->rdp, LE_C0_BABL | LE_C0_ERR | LE_C0_MISS |
 749                         LE_C0_CERR | LE_C0_MERR);
 750        }
 751        if (csr0 & LE_C0_RINT)
 752                lance_rx(dev);
 753
 754        if (csr0 & LE_C0_TINT)
 755                lance_tx(dev);
 756
 757        if (csr0 & LE_C0_BABL)
 758                dev->stats.tx_errors++;
 759
 760        if (csr0 & LE_C0_MISS)
 761                dev->stats.rx_errors++;
 762
 763        if (csr0 & LE_C0_MERR) {
 764                printk("%s: Memory error, status %04x\n", dev->name, csr0);
 765
 766                writereg(&ll->rdp, LE_C0_STOP);
 767
 768                lance_init_ring(dev);
 769                load_csrs(lp);
 770                init_restart_lance(lp);
 771                netif_wake_queue(dev);
 772        }
 773
 774        writereg(&ll->rdp, LE_C0_INEA);
 775        writereg(&ll->rdp, LE_C0_INEA);
 776        return IRQ_HANDLED;
 777}
 778
 779static int lance_open(struct net_device *dev)
 780{
 781        volatile u16 *ib = (volatile u16 *)dev->mem_start;
 782        struct lance_private *lp = netdev_priv(dev);
 783        volatile struct lance_regs *ll = lp->ll;
 784        int status = 0;
 785
 786        /* Stop the Lance */
 787        writereg(&ll->rap, LE_CSR0);
 788        writereg(&ll->rdp, LE_C0_STOP);
 789
 790        /* Set mode and clear multicast filter only at device open,
 791         * so that lance_init_ring() called at any error will not
 792         * forget multicast filters.
 793         *
 794         * BTW it is common bug in all lance drivers! --ANK
 795         */
 796        *lib_ptr(ib, mode, lp->type) = 0;
 797        *lib_ptr(ib, filter[0], lp->type) = 0;
 798        *lib_ptr(ib, filter[1], lp->type) = 0;
 799        *lib_ptr(ib, filter[2], lp->type) = 0;
 800        *lib_ptr(ib, filter[3], lp->type) = 0;
 801
 802        lance_init_ring(dev);
 803        load_csrs(lp);
 804
 805        netif_start_queue(dev);
 806
 807        /* Associate IRQ with lance_interrupt */
 808        if (request_irq(dev->irq, lance_interrupt, 0, "lance", dev)) {
 809                printk("%s: Can't get IRQ %d\n", dev->name, dev->irq);
 810                return -EAGAIN;
 811        }
 812        if (lp->dma_irq >= 0) {
 813                unsigned long flags;
 814
 815                if (request_irq(lp->dma_irq, lance_dma_merr_int, IRQF_ONESHOT,
 816                                "lance error", dev)) {
 817                        free_irq(dev->irq, dev);
 818                        printk("%s: Can't get DMA IRQ %d\n", dev->name,
 819                                lp->dma_irq);
 820                        return -EAGAIN;
 821                }
 822
 823                spin_lock_irqsave(&ioasic_ssr_lock, flags);
 824
 825                fast_mb();
 826                /* Enable I/O ASIC LANCE DMA.  */
 827                ioasic_write(IO_REG_SSR,
 828                             ioasic_read(IO_REG_SSR) | IO_SSR_LANCE_DMA_EN);
 829
 830                fast_mb();
 831                spin_unlock_irqrestore(&ioasic_ssr_lock, flags);
 832        }
 833
 834        status = init_restart_lance(lp);
 835        return status;
 836}
 837
 838static int lance_close(struct net_device *dev)
 839{
 840        struct lance_private *lp = netdev_priv(dev);
 841        volatile struct lance_regs *ll = lp->ll;
 842
 843        netif_stop_queue(dev);
 844        del_timer_sync(&lp->multicast_timer);
 845
 846        /* Stop the card */
 847        writereg(&ll->rap, LE_CSR0);
 848        writereg(&ll->rdp, LE_C0_STOP);
 849
 850        if (lp->dma_irq >= 0) {
 851                unsigned long flags;
 852
 853                spin_lock_irqsave(&ioasic_ssr_lock, flags);
 854
 855                fast_mb();
 856                /* Disable I/O ASIC LANCE DMA.  */
 857                ioasic_write(IO_REG_SSR,
 858                             ioasic_read(IO_REG_SSR) & ~IO_SSR_LANCE_DMA_EN);
 859
 860                fast_iob();
 861                spin_unlock_irqrestore(&ioasic_ssr_lock, flags);
 862
 863                free_irq(lp->dma_irq, dev);
 864        }
 865        free_irq(dev->irq, dev);
 866        return 0;
 867}
 868
 869static inline int lance_reset(struct net_device *dev)
 870{
 871        struct lance_private *lp = netdev_priv(dev);
 872        volatile struct lance_regs *ll = lp->ll;
 873        int status;
 874
 875        /* Stop the lance */
 876        writereg(&ll->rap, LE_CSR0);
 877        writereg(&ll->rdp, LE_C0_STOP);
 878
 879        lance_init_ring(dev);
 880        load_csrs(lp);
 881        netif_trans_update(dev); /* prevent tx timeout */
 882        status = init_restart_lance(lp);
 883        return status;
 884}
 885
 886static void lance_tx_timeout(struct net_device *dev)
 887{
 888        struct lance_private *lp = netdev_priv(dev);
 889        volatile struct lance_regs *ll = lp->ll;
 890
 891        printk(KERN_ERR "%s: transmit timed out, status %04x, reset\n",
 892                dev->name, ll->rdp);
 893        lance_reset(dev);
 894        netif_wake_queue(dev);
 895}
 896
 897static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
 898{
 899        struct lance_private *lp = netdev_priv(dev);
 900        volatile struct lance_regs *ll = lp->ll;
 901        volatile u16 *ib = (volatile u16 *)dev->mem_start;
 902        unsigned long flags;
 903        int entry, len;
 904
 905        len = skb->len;
 906
 907        if (len < ETH_ZLEN) {
 908                if (skb_padto(skb, ETH_ZLEN))
 909                        return NETDEV_TX_OK;
 910                len = ETH_ZLEN;
 911        }
 912
 913        dev->stats.tx_bytes += len;
 914
 915        spin_lock_irqsave(&lp->lock, flags);
 916
 917        entry = lp->tx_new;
 918        *lib_ptr(ib, btx_ring[entry].length, lp->type) = (-len);
 919        *lib_ptr(ib, btx_ring[entry].misc, lp->type) = 0;
 920
 921        cp_to_buf(lp->type, lp->tx_buf_ptr_cpu[entry], skb->data, len);
 922
 923        /* Now, give the packet to the lance */
 924        *lib_ptr(ib, btx_ring[entry].tmd1, lp->type) =
 925                ((lp->tx_buf_ptr_lnc[entry] >> 16) & 0xff) |
 926                (LE_T1_POK | LE_T1_OWN);
 927        lp->tx_new = (entry + 1) & TX_RING_MOD_MASK;
 928
 929        if (TX_BUFFS_AVAIL <= 0)
 930                netif_stop_queue(dev);
 931
 932        /* Kick the lance: transmit now */
 933        writereg(&ll->rdp, LE_C0_INEA | LE_C0_TDMD);
 934
 935        spin_unlock_irqrestore(&lp->lock, flags);
 936
 937        dev_kfree_skb(skb);
 938
 939        return NETDEV_TX_OK;
 940}
 941
 942static void lance_load_multicast(struct net_device *dev)
 943{
 944        struct lance_private *lp = netdev_priv(dev);
 945        volatile u16 *ib = (volatile u16 *)dev->mem_start;
 946        struct netdev_hw_addr *ha;
 947        u32 crc;
 948
 949        /* set all multicast bits */
 950        if (dev->flags & IFF_ALLMULTI) {
 951                *lib_ptr(ib, filter[0], lp->type) = 0xffff;
 952                *lib_ptr(ib, filter[1], lp->type) = 0xffff;
 953                *lib_ptr(ib, filter[2], lp->type) = 0xffff;
 954                *lib_ptr(ib, filter[3], lp->type) = 0xffff;
 955                return;
 956        }
 957        /* clear the multicast filter */
 958        *lib_ptr(ib, filter[0], lp->type) = 0;
 959        *lib_ptr(ib, filter[1], lp->type) = 0;
 960        *lib_ptr(ib, filter[2], lp->type) = 0;
 961        *lib_ptr(ib, filter[3], lp->type) = 0;
 962
 963        /* Add addresses */
 964        netdev_for_each_mc_addr(ha, dev) {
 965                crc = ether_crc_le(ETH_ALEN, ha->addr);
 966                crc = crc >> 26;
 967                *lib_ptr(ib, filter[crc >> 4], lp->type) |= 1 << (crc & 0xf);
 968        }
 969}
 970
 971static void lance_set_multicast(struct net_device *dev)
 972{
 973        struct lance_private *lp = netdev_priv(dev);
 974        volatile u16 *ib = (volatile u16 *)dev->mem_start;
 975        volatile struct lance_regs *ll = lp->ll;
 976
 977        if (!netif_running(dev))
 978                return;
 979
 980        if (lp->tx_old != lp->tx_new) {
 981                mod_timer(&lp->multicast_timer, jiffies + 4 * HZ/100);
 982                netif_wake_queue(dev);
 983                return;
 984        }
 985
 986        netif_stop_queue(dev);
 987
 988        writereg(&ll->rap, LE_CSR0);
 989        writereg(&ll->rdp, LE_C0_STOP);
 990
 991        lance_init_ring(dev);
 992
 993        if (dev->flags & IFF_PROMISC) {
 994                *lib_ptr(ib, mode, lp->type) |= LE_MO_PROM;
 995        } else {
 996                *lib_ptr(ib, mode, lp->type) &= ~LE_MO_PROM;
 997                lance_load_multicast(dev);
 998        }
 999        load_csrs(lp);
1000        init_restart_lance(lp);
1001        netif_wake_queue(dev);
1002}
1003
1004static void lance_set_multicast_retry(struct timer_list *t)
1005{
1006        struct lance_private *lp = from_timer(lp, t, multicast_timer);
1007        struct net_device *dev = lp->dev;
1008
1009        lance_set_multicast(dev);
1010}
1011
1012static const struct net_device_ops lance_netdev_ops = {
1013        .ndo_open               = lance_open,
1014        .ndo_stop               = lance_close,
1015        .ndo_start_xmit         = lance_start_xmit,
1016        .ndo_tx_timeout         = lance_tx_timeout,
1017        .ndo_set_rx_mode        = lance_set_multicast,
1018        .ndo_validate_addr      = eth_validate_addr,
1019        .ndo_set_mac_address    = eth_mac_addr,
1020};
1021
1022static int dec_lance_probe(struct device *bdev, const int type)
1023{
1024        static unsigned version_printed;
1025        static const char fmt[] = "declance%d";
1026        char name[10];
1027        struct net_device *dev;
1028        struct lance_private *lp;
1029        volatile struct lance_regs *ll;
1030        resource_size_t start = 0, len = 0;
1031        int i, ret;
1032        unsigned long esar_base;
1033        unsigned char *esar;
1034
1035        if (dec_lance_debug && version_printed++ == 0)
1036                printk(version);
1037
1038        if (bdev)
1039                snprintf(name, sizeof(name), "%s", dev_name(bdev));
1040        else {
1041                i = 0;
1042                dev = root_lance_dev;
1043                while (dev) {
1044                        i++;
1045                        lp = netdev_priv(dev);
1046                        dev = lp->next;
1047                }
1048                snprintf(name, sizeof(name), fmt, i);
1049        }
1050
1051        dev = alloc_etherdev(sizeof(struct lance_private));
1052        if (!dev) {
1053                ret = -ENOMEM;
1054                goto err_out;
1055        }
1056
1057        /*
1058         * alloc_etherdev ensures the data structures used by the LANCE
1059         * are aligned.
1060         */
1061        lp = netdev_priv(dev);
1062        spin_lock_init(&lp->lock);
1063
1064        lp->type = type;
1065        switch (type) {
1066        case ASIC_LANCE:
1067                dev->base_addr = CKSEG1ADDR(dec_kn_slot_base + IOASIC_LANCE);
1068
1069                /* buffer space for the on-board LANCE shared memory */
1070                /*
1071                 * FIXME: ugly hack!
1072                 */
1073                dev->mem_start = CKSEG1ADDR(0x00020000);
1074                dev->mem_end = dev->mem_start + 0x00020000;
1075                dev->irq = dec_interrupt[DEC_IRQ_LANCE];
1076                esar_base = CKSEG1ADDR(dec_kn_slot_base + IOASIC_ESAR);
1077
1078                /* Workaround crash with booting KN04 2.1k from Disk */
1079                memset((void *)dev->mem_start, 0,
1080                       dev->mem_end - dev->mem_start);
1081
1082                /*
1083                 * setup the pointer arrays, this sucks [tm] :-(
1084                 */
1085                for (i = 0; i < RX_RING_SIZE; i++) {
1086                        lp->rx_buf_ptr_cpu[i] =
1087                                (char *)(dev->mem_start + 2 * BUF_OFFSET_CPU +
1088                                         2 * i * RX_BUFF_SIZE);
1089                        lp->rx_buf_ptr_lnc[i] =
1090                                (BUF_OFFSET_LNC + i * RX_BUFF_SIZE);
1091                }
1092                for (i = 0; i < TX_RING_SIZE; i++) {
1093                        lp->tx_buf_ptr_cpu[i] =
1094                                (char *)(dev->mem_start + 2 * BUF_OFFSET_CPU +
1095                                         2 * RX_RING_SIZE * RX_BUFF_SIZE +
1096                                         2 * i * TX_BUFF_SIZE);
1097                        lp->tx_buf_ptr_lnc[i] =
1098                                (BUF_OFFSET_LNC +
1099                                 RX_RING_SIZE * RX_BUFF_SIZE +
1100                                 i * TX_BUFF_SIZE);
1101                }
1102
1103                /* Setup I/O ASIC LANCE DMA.  */
1104                lp->dma_irq = dec_interrupt[DEC_IRQ_LANCE_MERR];
1105                ioasic_write(IO_REG_LANCE_DMA_P,
1106                             CPHYSADDR(dev->mem_start) << 3);
1107
1108                break;
1109#ifdef CONFIG_TC
1110        case PMAD_LANCE:
1111                dev_set_drvdata(bdev, dev);
1112
1113                start = to_tc_dev(bdev)->resource.start;
1114                len = to_tc_dev(bdev)->resource.end - start + 1;
1115                if (!request_mem_region(start, len, dev_name(bdev))) {
1116                        printk(KERN_ERR
1117                               "%s: Unable to reserve MMIO resource\n",
1118                               dev_name(bdev));
1119                        ret = -EBUSY;
1120                        goto err_out_dev;
1121                }
1122
1123                dev->mem_start = CKSEG1ADDR(start);
1124                dev->mem_end = dev->mem_start + 0x100000;
1125                dev->base_addr = dev->mem_start + 0x100000;
1126                dev->irq = to_tc_dev(bdev)->interrupt;
1127                esar_base = dev->mem_start + 0x1c0002;
1128                lp->dma_irq = -1;
1129
1130                for (i = 0; i < RX_RING_SIZE; i++) {
1131                        lp->rx_buf_ptr_cpu[i] =
1132                                (char *)(dev->mem_start + BUF_OFFSET_CPU +
1133                                         i * RX_BUFF_SIZE);
1134                        lp->rx_buf_ptr_lnc[i] =
1135                                (BUF_OFFSET_LNC + i * RX_BUFF_SIZE);
1136                }
1137                for (i = 0; i < TX_RING_SIZE; i++) {
1138                        lp->tx_buf_ptr_cpu[i] =
1139                                (char *)(dev->mem_start + BUF_OFFSET_CPU +
1140                                         RX_RING_SIZE * RX_BUFF_SIZE +
1141                                         i * TX_BUFF_SIZE);
1142                        lp->tx_buf_ptr_lnc[i] =
1143                                (BUF_OFFSET_LNC +
1144                                 RX_RING_SIZE * RX_BUFF_SIZE +
1145                                 i * TX_BUFF_SIZE);
1146                }
1147
1148                break;
1149#endif
1150        case PMAX_LANCE:
1151                dev->irq = dec_interrupt[DEC_IRQ_LANCE];
1152                dev->base_addr = CKSEG1ADDR(KN01_SLOT_BASE + KN01_LANCE);
1153                dev->mem_start = CKSEG1ADDR(KN01_SLOT_BASE + KN01_LANCE_MEM);
1154                dev->mem_end = dev->mem_start + KN01_SLOT_SIZE;
1155                esar_base = CKSEG1ADDR(KN01_SLOT_BASE + KN01_ESAR + 1);
1156                lp->dma_irq = -1;
1157
1158                /*
1159                 * setup the pointer arrays, this sucks [tm] :-(
1160                 */
1161                for (i = 0; i < RX_RING_SIZE; i++) {
1162                        lp->rx_buf_ptr_cpu[i] =
1163                                (char *)(dev->mem_start + 2 * BUF_OFFSET_CPU +
1164                                         2 * i * RX_BUFF_SIZE);
1165                        lp->rx_buf_ptr_lnc[i] =
1166                                (BUF_OFFSET_LNC + i * RX_BUFF_SIZE);
1167                }
1168                for (i = 0; i < TX_RING_SIZE; i++) {
1169                        lp->tx_buf_ptr_cpu[i] =
1170                                (char *)(dev->mem_start + 2 * BUF_OFFSET_CPU +
1171                                         2 * RX_RING_SIZE * RX_BUFF_SIZE +
1172                                         2 * i * TX_BUFF_SIZE);
1173                        lp->tx_buf_ptr_lnc[i] =
1174                                (BUF_OFFSET_LNC +
1175                                 RX_RING_SIZE * RX_BUFF_SIZE +
1176                                 i * TX_BUFF_SIZE);
1177                }
1178
1179                break;
1180
1181        default:
1182                printk(KERN_ERR "%s: declance_init called with unknown type\n",
1183                        name);
1184                ret = -ENODEV;
1185                goto err_out_dev;
1186        }
1187
1188        ll = (struct lance_regs *) dev->base_addr;
1189        esar = (unsigned char *) esar_base;
1190
1191        /* prom checks */
1192        /* First, check for test pattern */
1193        if (esar[0x60] != 0xff && esar[0x64] != 0x00 &&
1194            esar[0x68] != 0x55 && esar[0x6c] != 0xaa) {
1195                printk(KERN_ERR
1196                        "%s: Ethernet station address prom not found!\n",
1197                        name);
1198                ret = -ENODEV;
1199                goto err_out_resource;
1200        }
1201        /* Check the prom contents */
1202        for (i = 0; i < 8; i++) {
1203                if (esar[i * 4] != esar[0x3c - i * 4] &&
1204                    esar[i * 4] != esar[0x40 + i * 4] &&
1205                    esar[0x3c - i * 4] != esar[0x40 + i * 4]) {
1206                        printk(KERN_ERR "%s: Something is wrong with the "
1207                                "ethernet station address prom!\n", name);
1208                        ret = -ENODEV;
1209                        goto err_out_resource;
1210                }
1211        }
1212
1213        /* Copy the ethernet address to the device structure, later to the
1214         * lance initialization block so the lance gets it every time it's
1215         * (re)initialized.
1216         */
1217        switch (type) {
1218        case ASIC_LANCE:
1219                printk("%s: IOASIC onboard LANCE", name);
1220                break;
1221        case PMAD_LANCE:
1222                printk("%s: PMAD-AA", name);
1223                break;
1224        case PMAX_LANCE:
1225                printk("%s: PMAX onboard LANCE", name);
1226                break;
1227        }
1228        for (i = 0; i < 6; i++)
1229                dev->dev_addr[i] = esar[i * 4];
1230
1231        printk(", addr = %pM, irq = %d\n", dev->dev_addr, dev->irq);
1232
1233        dev->netdev_ops = &lance_netdev_ops;
1234        dev->watchdog_timeo = 5*HZ;
1235
1236        /* lp->ll is the location of the registers for lance card */
1237        lp->ll = ll;
1238
1239        /* busmaster_regval (CSR3) should be zero according to the PMAD-AA
1240         * specification.
1241         */
1242        lp->busmaster_regval = 0;
1243
1244        dev->dma = 0;
1245
1246        /* We cannot sleep if the chip is busy during a
1247         * multicast list update event, because such events
1248         * can occur from interrupts (ex. IPv6).  So we
1249         * use a timer to try again later when necessary. -DaveM
1250         */
1251        lp->dev = dev;
1252        timer_setup(&lp->multicast_timer, lance_set_multicast_retry, 0);
1253
1254
1255        ret = register_netdev(dev);
1256        if (ret) {
1257                printk(KERN_ERR
1258                        "%s: Unable to register netdev, aborting.\n", name);
1259                goto err_out_resource;
1260        }
1261
1262        if (!bdev) {
1263                lp->next = root_lance_dev;
1264                root_lance_dev = dev;
1265        }
1266
1267        printk("%s: registered as %s.\n", name, dev->name);
1268        return 0;
1269
1270err_out_resource:
1271        if (bdev)
1272                release_mem_region(start, len);
1273
1274err_out_dev:
1275        free_netdev(dev);
1276
1277err_out:
1278        return ret;
1279}
1280
1281/* Find all the lance cards on the system and initialize them */
1282static int __init dec_lance_platform_probe(void)
1283{
1284        int count = 0;
1285
1286        if (dec_interrupt[DEC_IRQ_LANCE] >= 0) {
1287                if (dec_interrupt[DEC_IRQ_LANCE_MERR] >= 0) {
1288                        if (dec_lance_probe(NULL, ASIC_LANCE) >= 0)
1289                                count++;
1290                } else if (!TURBOCHANNEL) {
1291                        if (dec_lance_probe(NULL, PMAX_LANCE) >= 0)
1292                                count++;
1293                }
1294        }
1295
1296        return (count > 0) ? 0 : -ENODEV;
1297}
1298
1299static void __exit dec_lance_platform_remove(void)
1300{
1301        while (root_lance_dev) {
1302                struct net_device *dev = root_lance_dev;
1303                struct lance_private *lp = netdev_priv(dev);
1304
1305                unregister_netdev(dev);
1306                root_lance_dev = lp->next;
1307                free_netdev(dev);
1308        }
1309}
1310
1311#ifdef CONFIG_TC
1312static int dec_lance_tc_probe(struct device *dev);
1313static int dec_lance_tc_remove(struct device *dev);
1314
1315static const struct tc_device_id dec_lance_tc_table[] = {
1316        { "DEC     ", "PMAD-AA " },
1317        { }
1318};
1319MODULE_DEVICE_TABLE(tc, dec_lance_tc_table);
1320
1321static struct tc_driver dec_lance_tc_driver = {
1322        .id_table       = dec_lance_tc_table,
1323        .driver         = {
1324                .name   = "declance",
1325                .bus    = &tc_bus_type,
1326                .probe  = dec_lance_tc_probe,
1327                .remove = dec_lance_tc_remove,
1328        },
1329};
1330
1331static int dec_lance_tc_probe(struct device *dev)
1332{
1333        int status = dec_lance_probe(dev, PMAD_LANCE);
1334        if (!status)
1335                get_device(dev);
1336        return status;
1337}
1338
1339static void dec_lance_remove(struct device *bdev)
1340{
1341        struct net_device *dev = dev_get_drvdata(bdev);
1342        resource_size_t start, len;
1343
1344        unregister_netdev(dev);
1345        start = to_tc_dev(bdev)->resource.start;
1346        len = to_tc_dev(bdev)->resource.end - start + 1;
1347        release_mem_region(start, len);
1348        free_netdev(dev);
1349}
1350
1351static int dec_lance_tc_remove(struct device *dev)
1352{
1353        put_device(dev);
1354        dec_lance_remove(dev);
1355        return 0;
1356}
1357#endif
1358
1359static int __init dec_lance_init(void)
1360{
1361        int status;
1362
1363        status = tc_register_driver(&dec_lance_tc_driver);
1364        if (!status)
1365                dec_lance_platform_probe();
1366        return status;
1367}
1368
1369static void __exit dec_lance_exit(void)
1370{
1371        dec_lance_platform_remove();
1372        tc_unregister_driver(&dec_lance_tc_driver);
1373}
1374
1375
1376module_init(dec_lance_init);
1377module_exit(dec_lance_exit);
1378