linux/drivers/net/tulip/de2104x.c
<<
>>
Prefs
   1/* de2104x.c: A Linux PCI Ethernet driver for Intel/Digital 21040/1 chips. */
   2/*
   3        Copyright 2001,2003 Jeff Garzik <jgarzik@pobox.com>
   4
   5        Copyright 1994, 1995 Digital Equipment Corporation.         [de4x5.c]
   6        Written/copyright 1994-2001 by Donald Becker.               [tulip.c]
   7
   8        This software may be used and distributed according to the terms of
   9        the GNU General Public License (GPL), incorporated herein by reference.
  10        Drivers based on or derived from this code fall under the GPL and must
  11        retain the authorship, copyright and license notice.  This file is not
  12        a complete program and may only be used when the entire operating
  13        system is licensed under the GPL.
  14
  15        See the file COPYING in this distribution for more information.
  16
  17        TODO, in rough priority order:
  18        * Support forcing media type with a module parameter,
  19          like dl2k.c/sundance.c
  20        * Constants (module parms?) for Rx work limit
  21        * Complete reset on PciErr
  22        * Jumbo frames / dev->change_mtu
  23        * Adjust Rx FIFO threshold and Max Rx DMA burst on Rx FIFO error
  24        * Adjust Tx FIFO threshold and Max Tx DMA burst on Tx FIFO error
  25        * Implement Tx software interrupt mitigation via
  26          Tx descriptor bit
  27
  28 */
  29
  30#define DRV_NAME                "de2104x"
  31#define DRV_VERSION             "0.7"
  32#define DRV_RELDATE             "Mar 17, 2004"
  33
  34#include <linux/module.h>
  35#include <linux/kernel.h>
  36#include <linux/netdevice.h>
  37#include <linux/etherdevice.h>
  38#include <linux/init.h>
  39#include <linux/pci.h>
  40#include <linux/delay.h>
  41#include <linux/ethtool.h>
  42#include <linux/compiler.h>
  43#include <linux/rtnetlink.h>
  44#include <linux/crc32.h>
  45
  46#include <asm/io.h>
  47#include <asm/irq.h>
  48#include <asm/uaccess.h>
  49#include <asm/unaligned.h>
  50
  51/* These identify the driver base version and may not be removed. */
  52static char version[] =
  53KERN_INFO DRV_NAME " PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")\n";
  54
  55MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
  56MODULE_DESCRIPTION("Intel/Digital 21040/1 series PCI Ethernet driver");
  57MODULE_LICENSE("GPL");
  58MODULE_VERSION(DRV_VERSION);
  59
  60static int debug = -1;
  61module_param (debug, int, 0);
  62MODULE_PARM_DESC (debug, "de2104x bitmapped message enable number");
  63
  64/* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */
  65#if defined(__alpha__) || defined(__arm__) || defined(__hppa__) \
  66        || defined(CONFIG_SPARC) || defined(__ia64__) \
  67        || defined(__sh__) || defined(__mips__)
  68static int rx_copybreak = 1518;
  69#else
  70static int rx_copybreak = 100;
  71#endif
  72module_param (rx_copybreak, int, 0);
  73MODULE_PARM_DESC (rx_copybreak, "de2104x Breakpoint at which Rx packets are copied");
  74
  75#define PFX                     DRV_NAME ": "
  76
  77#define DE_DEF_MSG_ENABLE       (NETIF_MSG_DRV          | \
  78                                 NETIF_MSG_PROBE        | \
  79                                 NETIF_MSG_LINK         | \
  80                                 NETIF_MSG_IFDOWN       | \
  81                                 NETIF_MSG_IFUP         | \
  82                                 NETIF_MSG_RX_ERR       | \
  83                                 NETIF_MSG_TX_ERR)
  84
  85/* Descriptor skip length in 32 bit longwords. */
  86#ifndef CONFIG_DE2104X_DSL
  87#define DSL                     0
  88#else
  89#define DSL                     CONFIG_DE2104X_DSL
  90#endif
  91
  92#define DE_RX_RING_SIZE         64
  93#define DE_TX_RING_SIZE         64
  94#define DE_RING_BYTES           \
  95                ((sizeof(struct de_desc) * DE_RX_RING_SIZE) +   \
  96                (sizeof(struct de_desc) * DE_TX_RING_SIZE))
  97#define NEXT_TX(N)              (((N) + 1) & (DE_TX_RING_SIZE - 1))
  98#define NEXT_RX(N)              (((N) + 1) & (DE_RX_RING_SIZE - 1))
  99#define TX_BUFFS_AVAIL(CP)                                      \
 100        (((CP)->tx_tail <= (CP)->tx_head) ?                     \
 101          (CP)->tx_tail + (DE_TX_RING_SIZE - 1) - (CP)->tx_head :       \
 102          (CP)->tx_tail - (CP)->tx_head - 1)
 103
 104#define PKT_BUF_SZ              1536    /* Size of each temporary Rx buffer.*/
 105#define RX_OFFSET               2
 106
 107#define DE_SETUP_SKB            ((struct sk_buff *) 1)
 108#define DE_DUMMY_SKB            ((struct sk_buff *) 2)
 109#define DE_SETUP_FRAME_WORDS    96
 110#define DE_EEPROM_WORDS         256
 111#define DE_EEPROM_SIZE          (DE_EEPROM_WORDS * sizeof(u16))
 112#define DE_MAX_MEDIA            5
 113
 114#define DE_MEDIA_TP_AUTO        0
 115#define DE_MEDIA_BNC            1
 116#define DE_MEDIA_AUI            2
 117#define DE_MEDIA_TP             3
 118#define DE_MEDIA_TP_FD          4
 119#define DE_MEDIA_INVALID        DE_MAX_MEDIA
 120#define DE_MEDIA_FIRST          0
 121#define DE_MEDIA_LAST           (DE_MAX_MEDIA - 1)
 122#define DE_AUI_BNC              (SUPPORTED_AUI | SUPPORTED_BNC)
 123
 124#define DE_TIMER_LINK           (60 * HZ)
 125#define DE_TIMER_NO_LINK        (5 * HZ)
 126
 127#define DE_NUM_REGS             16
 128#define DE_REGS_SIZE            (DE_NUM_REGS * sizeof(u32))
 129#define DE_REGS_VER             1
 130
 131/* Time in jiffies before concluding the transmitter is hung. */
 132#define TX_TIMEOUT              (6*HZ)
 133
 134/* This is a mysterious value that can be written to CSR11 in the 21040 (only)
 135   to support a pre-NWay full-duplex signaling mechanism using short frames.
 136   No one knows what it should be, but if left at its default value some
 137   10base2(!) packets trigger a full-duplex-request interrupt. */
 138#define FULL_DUPLEX_MAGIC       0x6969
 139
 140enum {
 141        /* NIC registers */
 142        BusMode                 = 0x00,
 143        TxPoll                  = 0x08,
 144        RxPoll                  = 0x10,
 145        RxRingAddr              = 0x18,
 146        TxRingAddr              = 0x20,
 147        MacStatus               = 0x28,
 148        MacMode                 = 0x30,
 149        IntrMask                = 0x38,
 150        RxMissed                = 0x40,
 151        ROMCmd                  = 0x48,
 152        CSR11                   = 0x58,
 153        SIAStatus               = 0x60,
 154        CSR13                   = 0x68,
 155        CSR14                   = 0x70,
 156        CSR15                   = 0x78,
 157        PCIPM                   = 0x40,
 158
 159        /* BusMode bits */
 160        CmdReset                = (1 << 0),
 161        CacheAlign16            = 0x00008000,
 162        BurstLen4               = 0x00000400,
 163        DescSkipLen             = (DSL << 2),
 164
 165        /* Rx/TxPoll bits */
 166        NormalTxPoll            = (1 << 0),
 167        NormalRxPoll            = (1 << 0),
 168
 169        /* Tx/Rx descriptor status bits */
 170        DescOwn                 = (1 << 31),
 171        RxError                 = (1 << 15),
 172        RxErrLong               = (1 << 7),
 173        RxErrCRC                = (1 << 1),
 174        RxErrFIFO               = (1 << 0),
 175        RxErrRunt               = (1 << 11),
 176        RxErrFrame              = (1 << 14),
 177        RingEnd                 = (1 << 25),
 178        FirstFrag               = (1 << 29),
 179        LastFrag                = (1 << 30),
 180        TxError                 = (1 << 15),
 181        TxFIFOUnder             = (1 << 1),
 182        TxLinkFail              = (1 << 2) | (1 << 10) | (1 << 11),
 183        TxMaxCol                = (1 << 8),
 184        TxOWC                   = (1 << 9),
 185        TxJabber                = (1 << 14),
 186        SetupFrame              = (1 << 27),
 187        TxSwInt                 = (1 << 31),
 188
 189        /* MacStatus bits */
 190        IntrOK                  = (1 << 16),
 191        IntrErr                 = (1 << 15),
 192        RxIntr                  = (1 << 6),
 193        RxEmpty                 = (1 << 7),
 194        TxIntr                  = (1 << 0),
 195        TxEmpty                 = (1 << 2),
 196        PciErr                  = (1 << 13),
 197        TxState                 = (1 << 22) | (1 << 21) | (1 << 20),
 198        RxState                 = (1 << 19) | (1 << 18) | (1 << 17),
 199        LinkFail                = (1 << 12),
 200        LinkPass                = (1 << 4),
 201        RxStopped               = (1 << 8),
 202        TxStopped               = (1 << 1),
 203
 204        /* MacMode bits */
 205        TxEnable                = (1 << 13),
 206        RxEnable                = (1 << 1),
 207        RxTx                    = TxEnable | RxEnable,
 208        FullDuplex              = (1 << 9),
 209        AcceptAllMulticast      = (1 << 7),
 210        AcceptAllPhys           = (1 << 6),
 211        BOCnt                   = (1 << 5),
 212        MacModeClear            = (1<<12) | (1<<11) | (1<<10) | (1<<8) | (1<<3) |
 213                                  RxTx | BOCnt | AcceptAllPhys | AcceptAllMulticast,
 214
 215        /* ROMCmd bits */
 216        EE_SHIFT_CLK            = 0x02, /* EEPROM shift clock. */
 217        EE_CS                   = 0x01, /* EEPROM chip select. */
 218        EE_DATA_WRITE           = 0x04, /* Data from the Tulip to EEPROM. */
 219        EE_WRITE_0              = 0x01,
 220        EE_WRITE_1              = 0x05,
 221        EE_DATA_READ            = 0x08, /* Data from the EEPROM chip. */
 222        EE_ENB                  = (0x4800 | EE_CS),
 223
 224        /* The EEPROM commands include the alway-set leading bit. */
 225        EE_READ_CMD             = 6,
 226
 227        /* RxMissed bits */
 228        RxMissedOver            = (1 << 16),
 229        RxMissedMask            = 0xffff,
 230
 231        /* SROM-related bits */
 232        SROMC0InfoLeaf          = 27,
 233        MediaBlockMask          = 0x3f,
 234        MediaCustomCSRs         = (1 << 6),
 235
 236        /* PCIPM bits */
 237        PM_Sleep                = (1 << 31),
 238        PM_Snooze               = (1 << 30),
 239        PM_Mask                 = PM_Sleep | PM_Snooze,
 240
 241        /* SIAStatus bits */
 242        NWayState               = (1 << 14) | (1 << 13) | (1 << 12),
 243        NWayRestart             = (1 << 12),
 244        NonselPortActive        = (1 << 9),
 245        LinkFailStatus          = (1 << 2),
 246        NetCxnErr               = (1 << 1),
 247};
 248
 249static const u32 de_intr_mask =
 250        IntrOK | IntrErr | RxIntr | RxEmpty | TxIntr | TxEmpty |
 251        LinkPass | LinkFail | PciErr;
 252
 253/*
 254 * Set the programmable burst length to 4 longwords for all:
 255 * DMA errors result without these values. Cache align 16 long.
 256 */
 257static const u32 de_bus_mode = CacheAlign16 | BurstLen4 | DescSkipLen;
 258
 259struct de_srom_media_block {
 260        u8                      opts;
 261        u16                     csr13;
 262        u16                     csr14;
 263        u16                     csr15;
 264} __attribute__((packed));
 265
 266struct de_srom_info_leaf {
 267        u16                     default_media;
 268        u8                      n_blocks;
 269        u8                      unused;
 270} __attribute__((packed));
 271
 272struct de_desc {
 273        __le32                  opts1;
 274        __le32                  opts2;
 275        __le32                  addr1;
 276        __le32                  addr2;
 277#if DSL
 278        __le32                  skip[DSL];
 279#endif
 280};
 281
 282struct media_info {
 283        u16                     type;   /* DE_MEDIA_xxx */
 284        u16                     csr13;
 285        u16                     csr14;
 286        u16                     csr15;
 287};
 288
 289struct ring_info {
 290        struct sk_buff          *skb;
 291        dma_addr_t              mapping;
 292};
 293
 294struct de_private {
 295        unsigned                tx_head;
 296        unsigned                tx_tail;
 297        unsigned                rx_tail;
 298
 299        void                    __iomem *regs;
 300        struct net_device       *dev;
 301        spinlock_t              lock;
 302
 303        struct de_desc          *rx_ring;
 304        struct de_desc          *tx_ring;
 305        struct ring_info        tx_skb[DE_TX_RING_SIZE];
 306        struct ring_info        rx_skb[DE_RX_RING_SIZE];
 307        unsigned                rx_buf_sz;
 308        dma_addr_t              ring_dma;
 309
 310        u32                     msg_enable;
 311
 312        struct net_device_stats net_stats;
 313
 314        struct pci_dev          *pdev;
 315
 316        u16                     setup_frame[DE_SETUP_FRAME_WORDS];
 317
 318        u32                     media_type;
 319        u32                     media_supported;
 320        u32                     media_advertise;
 321        struct media_info       media[DE_MAX_MEDIA];
 322        struct timer_list       media_timer;
 323
 324        u8                      *ee_data;
 325        unsigned                board_idx;
 326        unsigned                de21040 : 1;
 327        unsigned                media_lock : 1;
 328};
 329
 330
 331static void de_set_rx_mode (struct net_device *dev);
 332static void de_tx (struct de_private *de);
 333static void de_clean_rings (struct de_private *de);
 334static void de_media_interrupt (struct de_private *de, u32 status);
 335static void de21040_media_timer (unsigned long data);
 336static void de21041_media_timer (unsigned long data);
 337static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media);
 338
 339
 340static struct pci_device_id de_pci_tbl[] = {
 341        { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP,
 342          PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
 343        { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_PLUS,
 344          PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
 345        { },
 346};
 347MODULE_DEVICE_TABLE(pci, de_pci_tbl);
 348
 349static const char * const media_name[DE_MAX_MEDIA] = {
 350        "10baseT auto",
 351        "BNC",
 352        "AUI",
 353        "10baseT-HD",
 354        "10baseT-FD"
 355};
 356
 357/* 21040 transceiver register settings:
 358 * TP AUTO(unused), BNC(unused), AUI, TP, TP FD*/
 359static u16 t21040_csr13[] = { 0, 0, 0x8F09, 0x8F01, 0x8F01, };
 360static u16 t21040_csr14[] = { 0, 0, 0x0705, 0xFFFF, 0xFFFD, };
 361static u16 t21040_csr15[] = { 0, 0, 0x0006, 0x0000, 0x0000, };
 362
 363/* 21041 transceiver register settings: TP AUTO, BNC, AUI, TP, TP FD*/
 364static u16 t21041_csr13[] = { 0xEF01, 0xEF09, 0xEF09, 0xEF01, 0xEF09, };
 365static u16 t21041_csr14[] = { 0xFFFF, 0xF7FD, 0xF7FD, 0x6F3F, 0x6F3D, };
 366static u16 t21041_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, };
 367
 368
 369#define dr32(reg)               readl(de->regs + (reg))
 370#define dw32(reg,val)           writel((val), de->regs + (reg))
 371
 372
 373static void de_rx_err_acct (struct de_private *de, unsigned rx_tail,
 374                            u32 status, u32 len)
 375{
 376        if (netif_msg_rx_err (de))
 377                printk (KERN_DEBUG
 378                        "%s: rx err, slot %d status 0x%x len %d\n",
 379                        de->dev->name, rx_tail, status, len);
 380
 381        if ((status & 0x38000300) != 0x0300) {
 382                /* Ingore earlier buffers. */
 383                if ((status & 0xffff) != 0x7fff) {
 384                        if (netif_msg_rx_err(de))
 385                                printk(KERN_WARNING "%s: Oversized Ethernet frame "
 386                                           "spanned multiple buffers, status %8.8x!\n",
 387                                           de->dev->name, status);
 388                        de->net_stats.rx_length_errors++;
 389                }
 390        } else if (status & RxError) {
 391                /* There was a fatal error. */
 392                de->net_stats.rx_errors++; /* end of a packet.*/
 393                if (status & 0x0890) de->net_stats.rx_length_errors++;
 394                if (status & RxErrCRC) de->net_stats.rx_crc_errors++;
 395                if (status & RxErrFIFO) de->net_stats.rx_fifo_errors++;
 396        }
 397}
 398
 399static void de_rx (struct de_private *de)
 400{
 401        unsigned rx_tail = de->rx_tail;
 402        unsigned rx_work = DE_RX_RING_SIZE;
 403        unsigned drop = 0;
 404        int rc;
 405
 406        while (--rx_work) {
 407                u32 status, len;
 408                dma_addr_t mapping;
 409                struct sk_buff *skb, *copy_skb;
 410                unsigned copying_skb, buflen;
 411
 412                skb = de->rx_skb[rx_tail].skb;
 413                BUG_ON(!skb);
 414                rmb();
 415                status = le32_to_cpu(de->rx_ring[rx_tail].opts1);
 416                if (status & DescOwn)
 417                        break;
 418
 419                len = ((status >> 16) & 0x7ff) - 4;
 420                mapping = de->rx_skb[rx_tail].mapping;
 421
 422                if (unlikely(drop)) {
 423                        de->net_stats.rx_dropped++;
 424                        goto rx_next;
 425                }
 426
 427                if (unlikely((status & 0x38008300) != 0x0300)) {
 428                        de_rx_err_acct(de, rx_tail, status, len);
 429                        goto rx_next;
 430                }
 431
 432                copying_skb = (len <= rx_copybreak);
 433
 434                if (unlikely(netif_msg_rx_status(de)))
 435                        printk(KERN_DEBUG "%s: rx slot %d status 0x%x len %d copying? %d\n",
 436                               de->dev->name, rx_tail, status, len,
 437                               copying_skb);
 438
 439                buflen = copying_skb ? (len + RX_OFFSET) : de->rx_buf_sz;
 440                copy_skb = dev_alloc_skb (buflen);
 441                if (unlikely(!copy_skb)) {
 442                        de->net_stats.rx_dropped++;
 443                        drop = 1;
 444                        rx_work = 100;
 445                        goto rx_next;
 446                }
 447
 448                if (!copying_skb) {
 449                        pci_unmap_single(de->pdev, mapping,
 450                                         buflen, PCI_DMA_FROMDEVICE);
 451                        skb_put(skb, len);
 452
 453                        mapping =
 454                        de->rx_skb[rx_tail].mapping =
 455                                pci_map_single(de->pdev, copy_skb->data,
 456                                               buflen, PCI_DMA_FROMDEVICE);
 457                        de->rx_skb[rx_tail].skb = copy_skb;
 458                } else {
 459                        pci_dma_sync_single_for_cpu(de->pdev, mapping, len, PCI_DMA_FROMDEVICE);
 460                        skb_reserve(copy_skb, RX_OFFSET);
 461                        skb_copy_from_linear_data(skb, skb_put(copy_skb, len),
 462                                                  len);
 463                        pci_dma_sync_single_for_device(de->pdev, mapping, len, PCI_DMA_FROMDEVICE);
 464
 465                        /* We'll reuse the original ring buffer. */
 466                        skb = copy_skb;
 467                }
 468
 469                skb->protocol = eth_type_trans (skb, de->dev);
 470
 471                de->net_stats.rx_packets++;
 472                de->net_stats.rx_bytes += skb->len;
 473                rc = netif_rx (skb);
 474                if (rc == NET_RX_DROP)
 475                        drop = 1;
 476
 477rx_next:
 478                if (rx_tail == (DE_RX_RING_SIZE - 1))
 479                        de->rx_ring[rx_tail].opts2 =
 480                                cpu_to_le32(RingEnd | de->rx_buf_sz);
 481                else
 482                        de->rx_ring[rx_tail].opts2 = cpu_to_le32(de->rx_buf_sz);
 483                de->rx_ring[rx_tail].addr1 = cpu_to_le32(mapping);
 484                wmb();
 485                de->rx_ring[rx_tail].opts1 = cpu_to_le32(DescOwn);
 486                rx_tail = NEXT_RX(rx_tail);
 487        }
 488
 489        if (!rx_work)
 490                printk(KERN_WARNING "%s: rx work limit reached\n", de->dev->name);
 491
 492        de->rx_tail = rx_tail;
 493}
 494
 495static irqreturn_t de_interrupt (int irq, void *dev_instance)
 496{
 497        struct net_device *dev = dev_instance;
 498        struct de_private *de = netdev_priv(dev);
 499        u32 status;
 500
 501        status = dr32(MacStatus);
 502        if ((!(status & (IntrOK|IntrErr))) || (status == 0xFFFF))
 503                return IRQ_NONE;
 504
 505        if (netif_msg_intr(de))
 506                printk(KERN_DEBUG "%s: intr, status %08x mode %08x desc %u/%u/%u\n",
 507                        dev->name, status, dr32(MacMode), de->rx_tail, de->tx_head, de->tx_tail);
 508
 509        dw32(MacStatus, status);
 510
 511        if (status & (RxIntr | RxEmpty)) {
 512                de_rx(de);
 513                if (status & RxEmpty)
 514                        dw32(RxPoll, NormalRxPoll);
 515        }
 516
 517        spin_lock(&de->lock);
 518
 519        if (status & (TxIntr | TxEmpty))
 520                de_tx(de);
 521
 522        if (status & (LinkPass | LinkFail))
 523                de_media_interrupt(de, status);
 524
 525        spin_unlock(&de->lock);
 526
 527        if (status & PciErr) {
 528                u16 pci_status;
 529
 530                pci_read_config_word(de->pdev, PCI_STATUS, &pci_status);
 531                pci_write_config_word(de->pdev, PCI_STATUS, pci_status);
 532                printk(KERN_ERR "%s: PCI bus error, status=%08x, PCI status=%04x\n",
 533                       dev->name, status, pci_status);
 534        }
 535
 536        return IRQ_HANDLED;
 537}
 538
 539static void de_tx (struct de_private *de)
 540{
 541        unsigned tx_head = de->tx_head;
 542        unsigned tx_tail = de->tx_tail;
 543
 544        while (tx_tail != tx_head) {
 545                struct sk_buff *skb;
 546                u32 status;
 547
 548                rmb();
 549                status = le32_to_cpu(de->tx_ring[tx_tail].opts1);
 550                if (status & DescOwn)
 551                        break;
 552
 553                skb = de->tx_skb[tx_tail].skb;
 554                BUG_ON(!skb);
 555                if (unlikely(skb == DE_DUMMY_SKB))
 556                        goto next;
 557
 558                if (unlikely(skb == DE_SETUP_SKB)) {
 559                        pci_unmap_single(de->pdev, de->tx_skb[tx_tail].mapping,
 560                                         sizeof(de->setup_frame), PCI_DMA_TODEVICE);
 561                        goto next;
 562                }
 563
 564                pci_unmap_single(de->pdev, de->tx_skb[tx_tail].mapping,
 565                                 skb->len, PCI_DMA_TODEVICE);
 566
 567                if (status & LastFrag) {
 568                        if (status & TxError) {
 569                                if (netif_msg_tx_err(de))
 570                                        printk(KERN_DEBUG "%s: tx err, status 0x%x\n",
 571                                               de->dev->name, status);
 572                                de->net_stats.tx_errors++;
 573                                if (status & TxOWC)
 574                                        de->net_stats.tx_window_errors++;
 575                                if (status & TxMaxCol)
 576                                        de->net_stats.tx_aborted_errors++;
 577                                if (status & TxLinkFail)
 578                                        de->net_stats.tx_carrier_errors++;
 579                                if (status & TxFIFOUnder)
 580                                        de->net_stats.tx_fifo_errors++;
 581                        } else {
 582                                de->net_stats.tx_packets++;
 583                                de->net_stats.tx_bytes += skb->len;
 584                                if (netif_msg_tx_done(de))
 585                                        printk(KERN_DEBUG "%s: tx done, slot %d\n", de->dev->name, tx_tail);
 586                        }
 587                        dev_kfree_skb_irq(skb);
 588                }
 589
 590next:
 591                de->tx_skb[tx_tail].skb = NULL;
 592
 593                tx_tail = NEXT_TX(tx_tail);
 594        }
 595
 596        de->tx_tail = tx_tail;
 597
 598        if (netif_queue_stopped(de->dev) && (TX_BUFFS_AVAIL(de) > (DE_TX_RING_SIZE / 4)))
 599                netif_wake_queue(de->dev);
 600}
 601
 602static netdev_tx_t de_start_xmit (struct sk_buff *skb,
 603                                        struct net_device *dev)
 604{
 605        struct de_private *de = netdev_priv(dev);
 606        unsigned int entry, tx_free;
 607        u32 mapping, len, flags = FirstFrag | LastFrag;
 608        struct de_desc *txd;
 609
 610        spin_lock_irq(&de->lock);
 611
 612        tx_free = TX_BUFFS_AVAIL(de);
 613        if (tx_free == 0) {
 614                netif_stop_queue(dev);
 615                spin_unlock_irq(&de->lock);
 616                return NETDEV_TX_BUSY;
 617        }
 618        tx_free--;
 619
 620        entry = de->tx_head;
 621
 622        txd = &de->tx_ring[entry];
 623
 624        len = skb->len;
 625        mapping = pci_map_single(de->pdev, skb->data, len, PCI_DMA_TODEVICE);
 626        if (entry == (DE_TX_RING_SIZE - 1))
 627                flags |= RingEnd;
 628        if (!tx_free || (tx_free == (DE_TX_RING_SIZE / 2)))
 629                flags |= TxSwInt;
 630        flags |= len;
 631        txd->opts2 = cpu_to_le32(flags);
 632        txd->addr1 = cpu_to_le32(mapping);
 633
 634        de->tx_skb[entry].skb = skb;
 635        de->tx_skb[entry].mapping = mapping;
 636        wmb();
 637
 638        txd->opts1 = cpu_to_le32(DescOwn);
 639        wmb();
 640
 641        de->tx_head = NEXT_TX(entry);
 642        if (netif_msg_tx_queued(de))
 643                printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n",
 644                       dev->name, entry, skb->len);
 645
 646        if (tx_free == 0)
 647                netif_stop_queue(dev);
 648
 649        spin_unlock_irq(&de->lock);
 650
 651        /* Trigger an immediate transmit demand. */
 652        dw32(TxPoll, NormalTxPoll);
 653        dev->trans_start = jiffies;
 654
 655        return NETDEV_TX_OK;
 656}
 657
 658/* Set or clear the multicast filter for this adaptor.
 659   Note that we only use exclusion around actually queueing the
 660   new frame, not around filling de->setup_frame.  This is non-deterministic
 661   when re-entered but still correct. */
 662
 663#undef set_bit_le
 664#define set_bit_le(i,p) do { ((char *)(p))[(i)/8] |= (1<<((i)%8)); } while(0)
 665
 666static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
 667{
 668        struct de_private *de = netdev_priv(dev);
 669        u16 hash_table[32];
 670        struct dev_mc_list *mclist;
 671        int i;
 672        u16 *eaddrs;
 673
 674        memset(hash_table, 0, sizeof(hash_table));
 675        set_bit_le(255, hash_table);                    /* Broadcast entry */
 676        /* This should work on big-endian machines as well. */
 677        for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
 678             i++, mclist = mclist->next) {
 679                int index = ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x1ff;
 680
 681                set_bit_le(index, hash_table);
 682
 683                for (i = 0; i < 32; i++) {
 684                        *setup_frm++ = hash_table[i];
 685                        *setup_frm++ = hash_table[i];
 686                }
 687                setup_frm = &de->setup_frame[13*6];
 688        }
 689
 690        /* Fill the final entry with our physical address. */
 691        eaddrs = (u16 *)dev->dev_addr;
 692        *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
 693        *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
 694        *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
 695}
 696
 697static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
 698{
 699        struct de_private *de = netdev_priv(dev);
 700        struct dev_mc_list *mclist;
 701        int i;
 702        u16 *eaddrs;
 703
 704        /* We have <= 14 addresses so we can use the wonderful
 705           16 address perfect filtering of the Tulip. */
 706        for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
 707             i++, mclist = mclist->next) {
 708                eaddrs = (u16 *)mclist->dmi_addr;
 709                *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
 710                *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
 711                *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
 712        }
 713        /* Fill the unused entries with the broadcast address. */
 714        memset(setup_frm, 0xff, (15-i)*12);
 715        setup_frm = &de->setup_frame[15*6];
 716
 717        /* Fill the final entry with our physical address. */
 718        eaddrs = (u16 *)dev->dev_addr;
 719        *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
 720        *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
 721        *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
 722}
 723
 724
 725static void __de_set_rx_mode (struct net_device *dev)
 726{
 727        struct de_private *de = netdev_priv(dev);
 728        u32 macmode;
 729        unsigned int entry;
 730        u32 mapping;
 731        struct de_desc *txd;
 732        struct de_desc *dummy_txd = NULL;
 733
 734        macmode = dr32(MacMode) & ~(AcceptAllMulticast | AcceptAllPhys);
 735
 736        if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
 737                macmode |= AcceptAllMulticast | AcceptAllPhys;
 738                goto out;
 739        }
 740
 741        if ((dev->mc_count > 1000) || (dev->flags & IFF_ALLMULTI)) {
 742                /* Too many to filter well -- accept all multicasts. */
 743                macmode |= AcceptAllMulticast;
 744                goto out;
 745        }
 746
 747        /* Note that only the low-address shortword of setup_frame is valid!
 748           The values are doubled for big-endian architectures. */
 749        if (dev->mc_count > 14) /* Must use a multicast hash table. */
 750                build_setup_frame_hash (de->setup_frame, dev);
 751        else
 752                build_setup_frame_perfect (de->setup_frame, dev);
 753
 754        /*
 755         * Now add this frame to the Tx list.
 756         */
 757
 758        entry = de->tx_head;
 759
 760        /* Avoid a chip errata by prefixing a dummy entry. */
 761        if (entry != 0) {
 762                de->tx_skb[entry].skb = DE_DUMMY_SKB;
 763
 764                dummy_txd = &de->tx_ring[entry];
 765                dummy_txd->opts2 = (entry == (DE_TX_RING_SIZE - 1)) ?
 766                                   cpu_to_le32(RingEnd) : 0;
 767                dummy_txd->addr1 = 0;
 768
 769                /* Must set DescOwned later to avoid race with chip */
 770
 771                entry = NEXT_TX(entry);
 772        }
 773
 774        de->tx_skb[entry].skb = DE_SETUP_SKB;
 775        de->tx_skb[entry].mapping = mapping =
 776            pci_map_single (de->pdev, de->setup_frame,
 777                            sizeof (de->setup_frame), PCI_DMA_TODEVICE);
 778
 779        /* Put the setup frame on the Tx list. */
 780        txd = &de->tx_ring[entry];
 781        if (entry == (DE_TX_RING_SIZE - 1))
 782                txd->opts2 = cpu_to_le32(SetupFrame | RingEnd | sizeof (de->setup_frame));
 783        else
 784                txd->opts2 = cpu_to_le32(SetupFrame | sizeof (de->setup_frame));
 785        txd->addr1 = cpu_to_le32(mapping);
 786        wmb();
 787
 788        txd->opts1 = cpu_to_le32(DescOwn);
 789        wmb();
 790
 791        if (dummy_txd) {
 792                dummy_txd->opts1 = cpu_to_le32(DescOwn);
 793                wmb();
 794        }
 795
 796        de->tx_head = NEXT_TX(entry);
 797
 798        if (TX_BUFFS_AVAIL(de) == 0)
 799                netif_stop_queue(dev);
 800
 801        /* Trigger an immediate transmit demand. */
 802        dw32(TxPoll, NormalTxPoll);
 803
 804out:
 805        if (macmode != dr32(MacMode))
 806                dw32(MacMode, macmode);
 807}
 808
 809static void de_set_rx_mode (struct net_device *dev)
 810{
 811        unsigned long flags;
 812        struct de_private *de = netdev_priv(dev);
 813
 814        spin_lock_irqsave (&de->lock, flags);
 815        __de_set_rx_mode(dev);
 816        spin_unlock_irqrestore (&de->lock, flags);
 817}
 818
 819static inline void de_rx_missed(struct de_private *de, u32 rx_missed)
 820{
 821        if (unlikely(rx_missed & RxMissedOver))
 822                de->net_stats.rx_missed_errors += RxMissedMask;
 823        else
 824                de->net_stats.rx_missed_errors += (rx_missed & RxMissedMask);
 825}
 826
 827static void __de_get_stats(struct de_private *de)
 828{
 829        u32 tmp = dr32(RxMissed); /* self-clearing */
 830
 831        de_rx_missed(de, tmp);
 832}
 833
 834static struct net_device_stats *de_get_stats(struct net_device *dev)
 835{
 836        struct de_private *de = netdev_priv(dev);
 837
 838        /* The chip only need report frame silently dropped. */
 839        spin_lock_irq(&de->lock);
 840        if (netif_running(dev) && netif_device_present(dev))
 841                __de_get_stats(de);
 842        spin_unlock_irq(&de->lock);
 843
 844        return &de->net_stats;
 845}
 846
 847static inline int de_is_running (struct de_private *de)
 848{
 849        return (dr32(MacStatus) & (RxState | TxState)) ? 1 : 0;
 850}
 851
 852static void de_stop_rxtx (struct de_private *de)
 853{
 854        u32 macmode;
 855        unsigned int i = 1300/100;
 856
 857        macmode = dr32(MacMode);
 858        if (macmode & RxTx) {
 859                dw32(MacMode, macmode & ~RxTx);
 860                dr32(MacMode);
 861        }
 862
 863        /* wait until in-flight frame completes.
 864         * Max time @ 10BT: 1500*8b/10Mbps == 1200us (+ 100us margin)
 865         * Typically expect this loop to end in < 50 us on 100BT.
 866         */
 867        while (--i) {
 868                if (!de_is_running(de))
 869                        return;
 870                udelay(100);
 871        }
 872
 873        printk(KERN_WARNING "%s: timeout expired stopping DMA\n", de->dev->name);
 874}
 875
 876static inline void de_start_rxtx (struct de_private *de)
 877{
 878        u32 macmode;
 879
 880        macmode = dr32(MacMode);
 881        if ((macmode & RxTx) != RxTx) {
 882                dw32(MacMode, macmode | RxTx);
 883                dr32(MacMode);
 884        }
 885}
 886
 887static void de_stop_hw (struct de_private *de)
 888{
 889
 890        udelay(5);
 891        dw32(IntrMask, 0);
 892
 893        de_stop_rxtx(de);
 894
 895        dw32(MacStatus, dr32(MacStatus));
 896
 897        udelay(10);
 898
 899        de->rx_tail = 0;
 900        de->tx_head = de->tx_tail = 0;
 901}
 902
 903static void de_link_up(struct de_private *de)
 904{
 905        if (!netif_carrier_ok(de->dev)) {
 906                netif_carrier_on(de->dev);
 907                if (netif_msg_link(de))
 908                        printk(KERN_INFO "%s: link up, media %s\n",
 909                               de->dev->name, media_name[de->media_type]);
 910        }
 911}
 912
 913static void de_link_down(struct de_private *de)
 914{
 915        if (netif_carrier_ok(de->dev)) {
 916                netif_carrier_off(de->dev);
 917                if (netif_msg_link(de))
 918                        printk(KERN_INFO "%s: link down\n", de->dev->name);
 919        }
 920}
 921
 922static void de_set_media (struct de_private *de)
 923{
 924        unsigned media = de->media_type;
 925        u32 macmode = dr32(MacMode);
 926
 927        if (de_is_running(de))
 928                printk(KERN_WARNING "%s: chip is running while changing media!\n", de->dev->name);
 929
 930        if (de->de21040)
 931                dw32(CSR11, FULL_DUPLEX_MAGIC);
 932        dw32(CSR13, 0); /* Reset phy */
 933        dw32(CSR14, de->media[media].csr14);
 934        dw32(CSR15, de->media[media].csr15);
 935        dw32(CSR13, de->media[media].csr13);
 936
 937        /* must delay 10ms before writing to other registers,
 938         * especially CSR6
 939         */
 940        mdelay(10);
 941
 942        if (media == DE_MEDIA_TP_FD)
 943                macmode |= FullDuplex;
 944        else
 945                macmode &= ~FullDuplex;
 946
 947        if (netif_msg_link(de)) {
 948                printk(KERN_INFO
 949                       "%s: set link %s\n"
 950                       "%s:    mode 0x%x, sia 0x%x,0x%x,0x%x,0x%x\n"
 951                       "%s:    set mode 0x%x, set sia 0x%x,0x%x,0x%x\n",
 952                       de->dev->name, media_name[media],
 953                       de->dev->name, dr32(MacMode), dr32(SIAStatus),
 954                       dr32(CSR13), dr32(CSR14), dr32(CSR15),
 955                       de->dev->name, macmode, de->media[media].csr13,
 956                       de->media[media].csr14, de->media[media].csr15);
 957        }
 958        if (macmode != dr32(MacMode))
 959                dw32(MacMode, macmode);
 960}
 961
 962static void de_next_media (struct de_private *de, u32 *media,
 963                           unsigned int n_media)
 964{
 965        unsigned int i;
 966
 967        for (i = 0; i < n_media; i++) {
 968                if (de_ok_to_advertise(de, media[i])) {
 969                        de->media_type = media[i];
 970                        return;
 971                }
 972        }
 973}
 974
 975static void de21040_media_timer (unsigned long data)
 976{
 977        struct de_private *de = (struct de_private *) data;
 978        struct net_device *dev = de->dev;
 979        u32 status = dr32(SIAStatus);
 980        unsigned int carrier;
 981        unsigned long flags;
 982
 983        carrier = (status & NetCxnErr) ? 0 : 1;
 984
 985        if (carrier) {
 986                if (de->media_type != DE_MEDIA_AUI && (status & LinkFailStatus))
 987                        goto no_link_yet;
 988
 989                de->media_timer.expires = jiffies + DE_TIMER_LINK;
 990                add_timer(&de->media_timer);
 991                if (!netif_carrier_ok(dev))
 992                        de_link_up(de);
 993                else
 994                        if (netif_msg_timer(de))
 995                                printk(KERN_INFO "%s: %s link ok, status %x\n",
 996                                       dev->name, media_name[de->media_type],
 997                                       status);
 998                return;
 999        }
1000
1001        de_link_down(de);
1002
1003        if (de->media_lock)
1004                return;
1005
1006        if (de->media_type == DE_MEDIA_AUI) {
1007                u32 next_state = DE_MEDIA_TP;
1008                de_next_media(de, &next_state, 1);
1009        } else {
1010                u32 next_state = DE_MEDIA_AUI;
1011                de_next_media(de, &next_state, 1);
1012        }
1013
1014        spin_lock_irqsave(&de->lock, flags);
1015        de_stop_rxtx(de);
1016        spin_unlock_irqrestore(&de->lock, flags);
1017        de_set_media(de);
1018        de_start_rxtx(de);
1019
1020no_link_yet:
1021        de->media_timer.expires = jiffies + DE_TIMER_NO_LINK;
1022        add_timer(&de->media_timer);
1023
1024        if (netif_msg_timer(de))
1025                printk(KERN_INFO "%s: no link, trying media %s, status %x\n",
1026                       dev->name, media_name[de->media_type], status);
1027}
1028
1029static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media)
1030{
1031        switch (new_media) {
1032        case DE_MEDIA_TP_AUTO:
1033                if (!(de->media_advertise & ADVERTISED_Autoneg))
1034                        return 0;
1035                if (!(de->media_advertise & (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full)))
1036                        return 0;
1037                break;
1038        case DE_MEDIA_BNC:
1039                if (!(de->media_advertise & ADVERTISED_BNC))
1040                        return 0;
1041                break;
1042        case DE_MEDIA_AUI:
1043                if (!(de->media_advertise & ADVERTISED_AUI))
1044                        return 0;
1045                break;
1046        case DE_MEDIA_TP:
1047                if (!(de->media_advertise & ADVERTISED_10baseT_Half))
1048                        return 0;
1049                break;
1050        case DE_MEDIA_TP_FD:
1051                if (!(de->media_advertise & ADVERTISED_10baseT_Full))
1052                        return 0;
1053                break;
1054        }
1055
1056        return 1;
1057}
1058
1059static void de21041_media_timer (unsigned long data)
1060{
1061        struct de_private *de = (struct de_private *) data;
1062        struct net_device *dev = de->dev;
1063        u32 status = dr32(SIAStatus);
1064        unsigned int carrier;
1065        unsigned long flags;
1066
1067        carrier = (status & NetCxnErr) ? 0 : 1;
1068
1069        if (carrier) {
1070                if ((de->media_type == DE_MEDIA_TP_AUTO ||
1071                     de->media_type == DE_MEDIA_TP ||
1072                     de->media_type == DE_MEDIA_TP_FD) &&
1073                    (status & LinkFailStatus))
1074                        goto no_link_yet;
1075
1076                de->media_timer.expires = jiffies + DE_TIMER_LINK;
1077                add_timer(&de->media_timer);
1078                if (!netif_carrier_ok(dev))
1079                        de_link_up(de);
1080                else
1081                        if (netif_msg_timer(de))
1082                                printk(KERN_INFO "%s: %s link ok, mode %x status %x\n",
1083                                       dev->name, media_name[de->media_type],
1084                                       dr32(MacMode), status);
1085                return;
1086        }
1087
1088        de_link_down(de);
1089
1090        /* if media type locked, don't switch media */
1091        if (de->media_lock)
1092                goto set_media;
1093
1094        /* if activity detected, use that as hint for new media type */
1095        if (status & NonselPortActive) {
1096                unsigned int have_media = 1;
1097
1098                /* if AUI/BNC selected, then activity is on TP port */
1099                if (de->media_type == DE_MEDIA_AUI ||
1100                    de->media_type == DE_MEDIA_BNC) {
1101                        if (de_ok_to_advertise(de, DE_MEDIA_TP_AUTO))
1102                                de->media_type = DE_MEDIA_TP_AUTO;
1103                        else
1104                                have_media = 0;
1105                }
1106
1107                /* TP selected.  If there is only TP and BNC, then it's BNC */
1108                else if (((de->media_supported & DE_AUI_BNC) == SUPPORTED_BNC) &&
1109                         de_ok_to_advertise(de, DE_MEDIA_BNC))
1110                        de->media_type = DE_MEDIA_BNC;
1111
1112                /* TP selected.  If there is only TP and AUI, then it's AUI */
1113                else if (((de->media_supported & DE_AUI_BNC) == SUPPORTED_AUI) &&
1114                         de_ok_to_advertise(de, DE_MEDIA_AUI))
1115                        de->media_type = DE_MEDIA_AUI;
1116
1117                /* otherwise, ignore the hint */
1118                else
1119                        have_media = 0;
1120
1121                if (have_media)
1122                        goto set_media;
1123        }
1124
1125        /*
1126         * Absent or ambiguous activity hint, move to next advertised
1127         * media state.  If de->media_type is left unchanged, this
1128         * simply resets the PHY and reloads the current media settings.
1129         */
1130        if (de->media_type == DE_MEDIA_AUI) {
1131                u32 next_states[] = { DE_MEDIA_BNC, DE_MEDIA_TP_AUTO };
1132                de_next_media(de, next_states, ARRAY_SIZE(next_states));
1133        } else if (de->media_type == DE_MEDIA_BNC) {
1134                u32 next_states[] = { DE_MEDIA_TP_AUTO, DE_MEDIA_AUI };
1135                de_next_media(de, next_states, ARRAY_SIZE(next_states));
1136        } else {
1137                u32 next_states[] = { DE_MEDIA_AUI, DE_MEDIA_BNC, DE_MEDIA_TP_AUTO };
1138                de_next_media(de, next_states, ARRAY_SIZE(next_states));
1139        }
1140
1141set_media:
1142        spin_lock_irqsave(&de->lock, flags);
1143        de_stop_rxtx(de);
1144        spin_unlock_irqrestore(&de->lock, flags);
1145        de_set_media(de);
1146        de_start_rxtx(de);
1147
1148no_link_yet:
1149        de->media_timer.expires = jiffies + DE_TIMER_NO_LINK;
1150        add_timer(&de->media_timer);
1151
1152        if (netif_msg_timer(de))
1153                printk(KERN_INFO "%s: no link, trying media %s, status %x\n",
1154                       dev->name, media_name[de->media_type], status);
1155}
1156
1157static void de_media_interrupt (struct de_private *de, u32 status)
1158{
1159        if (status & LinkPass) {
1160                de_link_up(de);
1161                mod_timer(&de->media_timer, jiffies + DE_TIMER_LINK);
1162                return;
1163        }
1164
1165        BUG_ON(!(status & LinkFail));
1166
1167        if (netif_carrier_ok(de->dev)) {
1168                de_link_down(de);
1169                mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
1170        }
1171}
1172
1173static int de_reset_mac (struct de_private *de)
1174{
1175        u32 status, tmp;
1176
1177        /*
1178         * Reset MAC.  de4x5.c and tulip.c examined for "advice"
1179         * in this area.
1180         */
1181
1182        if (dr32(BusMode) == 0xffffffff)
1183                return -EBUSY;
1184
1185        /* Reset the chip, holding bit 0 set at least 50 PCI cycles. */
1186        dw32 (BusMode, CmdReset);
1187        mdelay (1);
1188
1189        dw32 (BusMode, de_bus_mode);
1190        mdelay (1);
1191
1192        for (tmp = 0; tmp < 5; tmp++) {
1193                dr32 (BusMode);
1194                mdelay (1);
1195        }
1196
1197        mdelay (1);
1198
1199        status = dr32(MacStatus);
1200        if (status & (RxState | TxState))
1201                return -EBUSY;
1202        if (status == 0xffffffff)
1203                return -ENODEV;
1204        return 0;
1205}
1206
1207static void de_adapter_wake (struct de_private *de)
1208{
1209        u32 pmctl;
1210
1211        if (de->de21040)
1212                return;
1213
1214        pci_read_config_dword(de->pdev, PCIPM, &pmctl);
1215        if (pmctl & PM_Mask) {
1216                pmctl &= ~PM_Mask;
1217                pci_write_config_dword(de->pdev, PCIPM, pmctl);
1218
1219                /* de4x5.c delays, so we do too */
1220                msleep(10);
1221        }
1222}
1223
1224static void de_adapter_sleep (struct de_private *de)
1225{
1226        u32 pmctl;
1227
1228        if (de->de21040)
1229                return;
1230
1231        pci_read_config_dword(de->pdev, PCIPM, &pmctl);
1232        pmctl |= PM_Sleep;
1233        pci_write_config_dword(de->pdev, PCIPM, pmctl);
1234}
1235
1236static int de_init_hw (struct de_private *de)
1237{
1238        struct net_device *dev = de->dev;
1239        u32 macmode;
1240        int rc;
1241
1242        de_adapter_wake(de);
1243
1244        macmode = dr32(MacMode) & ~MacModeClear;
1245
1246        rc = de_reset_mac(de);
1247        if (rc)
1248                return rc;
1249
1250        de_set_media(de); /* reset phy */
1251
1252        dw32(RxRingAddr, de->ring_dma);
1253        dw32(TxRingAddr, de->ring_dma + (sizeof(struct de_desc) * DE_RX_RING_SIZE));
1254
1255        dw32(MacMode, RxTx | macmode);
1256
1257        dr32(RxMissed); /* self-clearing */
1258
1259        dw32(IntrMask, de_intr_mask);
1260
1261        de_set_rx_mode(dev);
1262
1263        return 0;
1264}
1265
1266static int de_refill_rx (struct de_private *de)
1267{
1268        unsigned i;
1269
1270        for (i = 0; i < DE_RX_RING_SIZE; i++) {
1271                struct sk_buff *skb;
1272
1273                skb = dev_alloc_skb(de->rx_buf_sz);
1274                if (!skb)
1275                        goto err_out;
1276
1277                skb->dev = de->dev;
1278
1279                de->rx_skb[i].mapping = pci_map_single(de->pdev,
1280                        skb->data, de->rx_buf_sz, PCI_DMA_FROMDEVICE);
1281                de->rx_skb[i].skb = skb;
1282
1283                de->rx_ring[i].opts1 = cpu_to_le32(DescOwn);
1284                if (i == (DE_RX_RING_SIZE - 1))
1285                        de->rx_ring[i].opts2 =
1286                                cpu_to_le32(RingEnd | de->rx_buf_sz);
1287                else
1288                        de->rx_ring[i].opts2 = cpu_to_le32(de->rx_buf_sz);
1289                de->rx_ring[i].addr1 = cpu_to_le32(de->rx_skb[i].mapping);
1290                de->rx_ring[i].addr2 = 0;
1291        }
1292
1293        return 0;
1294
1295err_out:
1296        de_clean_rings(de);
1297        return -ENOMEM;
1298}
1299
1300static int de_init_rings (struct de_private *de)
1301{
1302        memset(de->tx_ring, 0, sizeof(struct de_desc) * DE_TX_RING_SIZE);
1303        de->tx_ring[DE_TX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
1304
1305        de->rx_tail = 0;
1306        de->tx_head = de->tx_tail = 0;
1307
1308        return de_refill_rx (de);
1309}
1310
1311static int de_alloc_rings (struct de_private *de)
1312{
1313        de->rx_ring = pci_alloc_consistent(de->pdev, DE_RING_BYTES, &de->ring_dma);
1314        if (!de->rx_ring)
1315                return -ENOMEM;
1316        de->tx_ring = &de->rx_ring[DE_RX_RING_SIZE];
1317        return de_init_rings(de);
1318}
1319
1320static void de_clean_rings (struct de_private *de)
1321{
1322        unsigned i;
1323
1324        memset(de->rx_ring, 0, sizeof(struct de_desc) * DE_RX_RING_SIZE);
1325        de->rx_ring[DE_RX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
1326        wmb();
1327        memset(de->tx_ring, 0, sizeof(struct de_desc) * DE_TX_RING_SIZE);
1328        de->tx_ring[DE_TX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
1329        wmb();
1330
1331        for (i = 0; i < DE_RX_RING_SIZE; i++) {
1332                if (de->rx_skb[i].skb) {
1333                        pci_unmap_single(de->pdev, de->rx_skb[i].mapping,
1334                                         de->rx_buf_sz, PCI_DMA_FROMDEVICE);
1335                        dev_kfree_skb(de->rx_skb[i].skb);
1336                }
1337        }
1338
1339        for (i = 0; i < DE_TX_RING_SIZE; i++) {
1340                struct sk_buff *skb = de->tx_skb[i].skb;
1341                if ((skb) && (skb != DE_DUMMY_SKB)) {
1342                        if (skb != DE_SETUP_SKB) {
1343                                de->net_stats.tx_dropped++;
1344                                pci_unmap_single(de->pdev,
1345                                        de->tx_skb[i].mapping,
1346                                        skb->len, PCI_DMA_TODEVICE);
1347                                dev_kfree_skb(skb);
1348                        } else {
1349                                pci_unmap_single(de->pdev,
1350                                        de->tx_skb[i].mapping,
1351                                        sizeof(de->setup_frame),
1352                                        PCI_DMA_TODEVICE);
1353                        }
1354                }
1355        }
1356
1357        memset(&de->rx_skb, 0, sizeof(struct ring_info) * DE_RX_RING_SIZE);
1358        memset(&de->tx_skb, 0, sizeof(struct ring_info) * DE_TX_RING_SIZE);
1359}
1360
1361static void de_free_rings (struct de_private *de)
1362{
1363        de_clean_rings(de);
1364        pci_free_consistent(de->pdev, DE_RING_BYTES, de->rx_ring, de->ring_dma);
1365        de->rx_ring = NULL;
1366        de->tx_ring = NULL;
1367}
1368
1369static int de_open (struct net_device *dev)
1370{
1371        struct de_private *de = netdev_priv(dev);
1372        int rc;
1373
1374        if (netif_msg_ifup(de))
1375                printk(KERN_DEBUG "%s: enabling interface\n", dev->name);
1376
1377        de->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1378
1379        rc = de_alloc_rings(de);
1380        if (rc) {
1381                printk(KERN_ERR "%s: ring allocation failure, err=%d\n",
1382                       dev->name, rc);
1383                return rc;
1384        }
1385
1386        dw32(IntrMask, 0);
1387
1388        rc = request_irq(dev->irq, de_interrupt, IRQF_SHARED, dev->name, dev);
1389        if (rc) {
1390                printk(KERN_ERR "%s: IRQ %d request failure, err=%d\n",
1391                       dev->name, dev->irq, rc);
1392                goto err_out_free;
1393        }
1394
1395        rc = de_init_hw(de);
1396        if (rc) {
1397                printk(KERN_ERR "%s: h/w init failure, err=%d\n",
1398                       dev->name, rc);
1399                goto err_out_free_irq;
1400        }
1401
1402        netif_start_queue(dev);
1403        mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
1404
1405        return 0;
1406
1407err_out_free_irq:
1408        free_irq(dev->irq, dev);
1409err_out_free:
1410        de_free_rings(de);
1411        return rc;
1412}
1413
1414static int de_close (struct net_device *dev)
1415{
1416        struct de_private *de = netdev_priv(dev);
1417        unsigned long flags;
1418
1419        if (netif_msg_ifdown(de))
1420                printk(KERN_DEBUG "%s: disabling interface\n", dev->name);
1421
1422        del_timer_sync(&de->media_timer);
1423
1424        spin_lock_irqsave(&de->lock, flags);
1425        de_stop_hw(de);
1426        netif_stop_queue(dev);
1427        netif_carrier_off(dev);
1428        spin_unlock_irqrestore(&de->lock, flags);
1429
1430        free_irq(dev->irq, dev);
1431
1432        de_free_rings(de);
1433        de_adapter_sleep(de);
1434        return 0;
1435}
1436
1437static void de_tx_timeout (struct net_device *dev)
1438{
1439        struct de_private *de = netdev_priv(dev);
1440
1441        printk(KERN_DEBUG "%s: NIC status %08x mode %08x sia %08x desc %u/%u/%u\n",
1442               dev->name, dr32(MacStatus), dr32(MacMode), dr32(SIAStatus),
1443               de->rx_tail, de->tx_head, de->tx_tail);
1444
1445        del_timer_sync(&de->media_timer);
1446
1447        disable_irq(dev->irq);
1448        spin_lock_irq(&de->lock);
1449
1450        de_stop_hw(de);
1451        netif_stop_queue(dev);
1452        netif_carrier_off(dev);
1453
1454        spin_unlock_irq(&de->lock);
1455        enable_irq(dev->irq);
1456
1457        /* Update the error counts. */
1458        __de_get_stats(de);
1459
1460        synchronize_irq(dev->irq);
1461        de_clean_rings(de);
1462
1463        de_init_rings(de);
1464
1465        de_init_hw(de);
1466
1467        netif_wake_queue(dev);
1468}
1469
1470static void __de_get_regs(struct de_private *de, u8 *buf)
1471{
1472        int i;
1473        u32 *rbuf = (u32 *)buf;
1474
1475        /* read all CSRs */
1476        for (i = 0; i < DE_NUM_REGS; i++)
1477                rbuf[i] = dr32(i * 8);
1478
1479        /* handle self-clearing RxMissed counter, CSR8 */
1480        de_rx_missed(de, rbuf[8]);
1481}
1482
1483static int __de_get_settings(struct de_private *de, struct ethtool_cmd *ecmd)
1484{
1485        ecmd->supported = de->media_supported;
1486        ecmd->transceiver = XCVR_INTERNAL;
1487        ecmd->phy_address = 0;
1488        ecmd->advertising = de->media_advertise;
1489
1490        switch (de->media_type) {
1491        case DE_MEDIA_AUI:
1492                ecmd->port = PORT_AUI;
1493                ecmd->speed = 5;
1494                break;
1495        case DE_MEDIA_BNC:
1496                ecmd->port = PORT_BNC;
1497                ecmd->speed = 2;
1498                break;
1499        default:
1500                ecmd->port = PORT_TP;
1501                ecmd->speed = SPEED_10;
1502                break;
1503        }
1504
1505        if (dr32(MacMode) & FullDuplex)
1506                ecmd->duplex = DUPLEX_FULL;
1507        else
1508                ecmd->duplex = DUPLEX_HALF;
1509
1510        if (de->media_lock)
1511                ecmd->autoneg = AUTONEG_DISABLE;
1512        else
1513                ecmd->autoneg = AUTONEG_ENABLE;
1514
1515        /* ignore maxtxpkt, maxrxpkt for now */
1516
1517        return 0;
1518}
1519
1520static int __de_set_settings(struct de_private *de, struct ethtool_cmd *ecmd)
1521{
1522        u32 new_media;
1523        unsigned int media_lock;
1524
1525        if (ecmd->speed != SPEED_10 && ecmd->speed != 5 && ecmd->speed != 2)
1526                return -EINVAL;
1527        if (de->de21040 && ecmd->speed == 2)
1528                return -EINVAL;
1529        if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
1530                return -EINVAL;
1531        if (ecmd->port != PORT_TP && ecmd->port != PORT_AUI && ecmd->port != PORT_BNC)
1532                return -EINVAL;
1533        if (de->de21040 && ecmd->port == PORT_BNC)
1534                return -EINVAL;
1535        if (ecmd->transceiver != XCVR_INTERNAL)
1536                return -EINVAL;
1537        if (ecmd->autoneg != AUTONEG_DISABLE && ecmd->autoneg != AUTONEG_ENABLE)
1538                return -EINVAL;
1539        if (ecmd->advertising & ~de->media_supported)
1540                return -EINVAL;
1541        if (ecmd->autoneg == AUTONEG_ENABLE &&
1542            (!(ecmd->advertising & ADVERTISED_Autoneg)))
1543                return -EINVAL;
1544
1545        switch (ecmd->port) {
1546        case PORT_AUI:
1547                new_media = DE_MEDIA_AUI;
1548                if (!(ecmd->advertising & ADVERTISED_AUI))
1549                        return -EINVAL;
1550                break;
1551        case PORT_BNC:
1552                new_media = DE_MEDIA_BNC;
1553                if (!(ecmd->advertising & ADVERTISED_BNC))
1554                        return -EINVAL;
1555                break;
1556        default:
1557                if (ecmd->autoneg == AUTONEG_ENABLE)
1558                        new_media = DE_MEDIA_TP_AUTO;
1559                else if (ecmd->duplex == DUPLEX_FULL)
1560                        new_media = DE_MEDIA_TP_FD;
1561                else
1562                        new_media = DE_MEDIA_TP;
1563                if (!(ecmd->advertising & ADVERTISED_TP))
1564                        return -EINVAL;
1565                if (!(ecmd->advertising & (ADVERTISED_10baseT_Full | ADVERTISED_10baseT_Half)))
1566                        return -EINVAL;
1567                break;
1568        }
1569
1570        media_lock = (ecmd->autoneg == AUTONEG_ENABLE) ? 0 : 1;
1571
1572        if ((new_media == de->media_type) &&
1573            (media_lock == de->media_lock) &&
1574            (ecmd->advertising == de->media_advertise))
1575                return 0; /* nothing to change */
1576
1577        de_link_down(de);
1578        de_stop_rxtx(de);
1579
1580        de->media_type = new_media;
1581        de->media_lock = media_lock;
1582        de->media_advertise = ecmd->advertising;
1583        de_set_media(de);
1584
1585        return 0;
1586}
1587
1588static void de_get_drvinfo (struct net_device *dev,struct ethtool_drvinfo *info)
1589{
1590        struct de_private *de = netdev_priv(dev);
1591
1592        strcpy (info->driver, DRV_NAME);
1593        strcpy (info->version, DRV_VERSION);
1594        strcpy (info->bus_info, pci_name(de->pdev));
1595        info->eedump_len = DE_EEPROM_SIZE;
1596}
1597
1598static int de_get_regs_len(struct net_device *dev)
1599{
1600        return DE_REGS_SIZE;
1601}
1602
1603static int de_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1604{
1605        struct de_private *de = netdev_priv(dev);
1606        int rc;
1607
1608        spin_lock_irq(&de->lock);
1609        rc = __de_get_settings(de, ecmd);
1610        spin_unlock_irq(&de->lock);
1611
1612        return rc;
1613}
1614
1615static int de_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1616{
1617        struct de_private *de = netdev_priv(dev);
1618        int rc;
1619
1620        spin_lock_irq(&de->lock);
1621        rc = __de_set_settings(de, ecmd);
1622        spin_unlock_irq(&de->lock);
1623
1624        return rc;
1625}
1626
1627static u32 de_get_msglevel(struct net_device *dev)
1628{
1629        struct de_private *de = netdev_priv(dev);
1630
1631        return de->msg_enable;
1632}
1633
1634static void de_set_msglevel(struct net_device *dev, u32 msglvl)
1635{
1636        struct de_private *de = netdev_priv(dev);
1637
1638        de->msg_enable = msglvl;
1639}
1640
1641static int de_get_eeprom(struct net_device *dev,
1642                         struct ethtool_eeprom *eeprom, u8 *data)
1643{
1644        struct de_private *de = netdev_priv(dev);
1645
1646        if (!de->ee_data)
1647                return -EOPNOTSUPP;
1648        if ((eeprom->offset != 0) || (eeprom->magic != 0) ||
1649            (eeprom->len != DE_EEPROM_SIZE))
1650                return -EINVAL;
1651        memcpy(data, de->ee_data, eeprom->len);
1652
1653        return 0;
1654}
1655
1656static int de_nway_reset(struct net_device *dev)
1657{
1658        struct de_private *de = netdev_priv(dev);
1659        u32 status;
1660
1661        if (de->media_type != DE_MEDIA_TP_AUTO)
1662                return -EINVAL;
1663        if (netif_carrier_ok(de->dev))
1664                de_link_down(de);
1665
1666        status = dr32(SIAStatus);
1667        dw32(SIAStatus, (status & ~NWayState) | NWayRestart);
1668        if (netif_msg_link(de))
1669                printk(KERN_INFO "%s: link nway restart, status %x,%x\n",
1670                       de->dev->name, status, dr32(SIAStatus));
1671        return 0;
1672}
1673
1674static void de_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1675                        void *data)
1676{
1677        struct de_private *de = netdev_priv(dev);
1678
1679        regs->version = (DE_REGS_VER << 2) | de->de21040;
1680
1681        spin_lock_irq(&de->lock);
1682        __de_get_regs(de, data);
1683        spin_unlock_irq(&de->lock);
1684}
1685
1686static const struct ethtool_ops de_ethtool_ops = {
1687        .get_link               = ethtool_op_get_link,
1688        .get_drvinfo            = de_get_drvinfo,
1689        .get_regs_len           = de_get_regs_len,
1690        .get_settings           = de_get_settings,
1691        .set_settings           = de_set_settings,
1692        .get_msglevel           = de_get_msglevel,
1693        .set_msglevel           = de_set_msglevel,
1694        .get_eeprom             = de_get_eeprom,
1695        .nway_reset             = de_nway_reset,
1696        .get_regs               = de_get_regs,
1697};
1698
1699static void __devinit de21040_get_mac_address (struct de_private *de)
1700{
1701        unsigned i;
1702
1703        dw32 (ROMCmd, 0);       /* Reset the pointer with a dummy write. */
1704        udelay(5);
1705
1706        for (i = 0; i < 6; i++) {
1707                int value, boguscnt = 100000;
1708                do {
1709                        value = dr32(ROMCmd);
1710                } while (value < 0 && --boguscnt > 0);
1711                de->dev->dev_addr[i] = value;
1712                udelay(1);
1713                if (boguscnt <= 0)
1714                        printk(KERN_WARNING PFX "timeout reading 21040 MAC address byte %u\n", i);
1715        }
1716}
1717
1718static void __devinit de21040_get_media_info(struct de_private *de)
1719{
1720        unsigned int i;
1721
1722        de->media_type = DE_MEDIA_TP;
1723        de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Full |
1724                               SUPPORTED_10baseT_Half | SUPPORTED_AUI;
1725        de->media_advertise = de->media_supported;
1726
1727        for (i = 0; i < DE_MAX_MEDIA; i++) {
1728                switch (i) {
1729                case DE_MEDIA_AUI:
1730                case DE_MEDIA_TP:
1731                case DE_MEDIA_TP_FD:
1732                        de->media[i].type = i;
1733                        de->media[i].csr13 = t21040_csr13[i];
1734                        de->media[i].csr14 = t21040_csr14[i];
1735                        de->media[i].csr15 = t21040_csr15[i];
1736                        break;
1737                default:
1738                        de->media[i].type = DE_MEDIA_INVALID;
1739                        break;
1740                }
1741        }
1742}
1743
1744/* Note: this routine returns extra data bits for size detection. */
1745static unsigned __devinit tulip_read_eeprom(void __iomem *regs, int location, int addr_len)
1746{
1747        int i;
1748        unsigned retval = 0;
1749        void __iomem *ee_addr = regs + ROMCmd;
1750        int read_cmd = location | (EE_READ_CMD << addr_len);
1751
1752        writel(EE_ENB & ~EE_CS, ee_addr);
1753        writel(EE_ENB, ee_addr);
1754
1755        /* Shift the read command bits out. */
1756        for (i = 4 + addr_len; i >= 0; i--) {
1757                short dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0;
1758                writel(EE_ENB | dataval, ee_addr);
1759                readl(ee_addr);
1760                writel(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
1761                readl(ee_addr);
1762                retval = (retval << 1) | ((readl(ee_addr) & EE_DATA_READ) ? 1 : 0);
1763        }
1764        writel(EE_ENB, ee_addr);
1765        readl(ee_addr);
1766
1767        for (i = 16; i > 0; i--) {
1768                writel(EE_ENB | EE_SHIFT_CLK, ee_addr);
1769                readl(ee_addr);
1770                retval = (retval << 1) | ((readl(ee_addr) & EE_DATA_READ) ? 1 : 0);
1771                writel(EE_ENB, ee_addr);
1772                readl(ee_addr);
1773        }
1774
1775        /* Terminate the EEPROM access. */
1776        writel(EE_ENB & ~EE_CS, ee_addr);
1777        return retval;
1778}
1779
1780static void __devinit de21041_get_srom_info (struct de_private *de)
1781{
1782        unsigned i, sa_offset = 0, ofs;
1783        u8 ee_data[DE_EEPROM_SIZE + 6] = {};
1784        unsigned ee_addr_size = tulip_read_eeprom(de->regs, 0xff, 8) & 0x40000 ? 8 : 6;
1785        struct de_srom_info_leaf *il;
1786        void *bufp;
1787
1788        /* download entire eeprom */
1789        for (i = 0; i < DE_EEPROM_WORDS; i++)
1790                ((__le16 *)ee_data)[i] =
1791                        cpu_to_le16(tulip_read_eeprom(de->regs, i, ee_addr_size));
1792
1793        /* DEC now has a specification but early board makers
1794           just put the address in the first EEPROM locations. */
1795        /* This does  memcmp(eedata, eedata+16, 8) */
1796
1797#ifndef CONFIG_MIPS_COBALT
1798
1799        for (i = 0; i < 8; i ++)
1800                if (ee_data[i] != ee_data[16+i])
1801                        sa_offset = 20;
1802
1803#endif
1804
1805        /* store MAC address */
1806        for (i = 0; i < 6; i ++)
1807                de->dev->dev_addr[i] = ee_data[i + sa_offset];
1808
1809        /* get offset of controller 0 info leaf.  ignore 2nd byte. */
1810        ofs = ee_data[SROMC0InfoLeaf];
1811        if (ofs >= (sizeof(ee_data) - sizeof(struct de_srom_info_leaf) - sizeof(struct de_srom_media_block)))
1812                goto bad_srom;
1813
1814        /* get pointer to info leaf */
1815        il = (struct de_srom_info_leaf *) &ee_data[ofs];
1816
1817        /* paranoia checks */
1818        if (il->n_blocks == 0)
1819                goto bad_srom;
1820        if ((sizeof(ee_data) - ofs) <
1821            (sizeof(struct de_srom_info_leaf) + (sizeof(struct de_srom_media_block) * il->n_blocks)))
1822                goto bad_srom;
1823
1824        /* get default media type */
1825        switch (get_unaligned(&il->default_media)) {
1826        case 0x0001:  de->media_type = DE_MEDIA_BNC; break;
1827        case 0x0002:  de->media_type = DE_MEDIA_AUI; break;
1828        case 0x0204:  de->media_type = DE_MEDIA_TP_FD; break;
1829        default: de->media_type = DE_MEDIA_TP_AUTO; break;
1830        }
1831
1832        if (netif_msg_probe(de))
1833                printk(KERN_INFO "de%d: SROM leaf offset %u, default media %s\n",
1834                       de->board_idx, ofs,
1835                       media_name[de->media_type]);
1836
1837        /* init SIA register values to defaults */
1838        for (i = 0; i < DE_MAX_MEDIA; i++) {
1839                de->media[i].type = DE_MEDIA_INVALID;
1840                de->media[i].csr13 = 0xffff;
1841                de->media[i].csr14 = 0xffff;
1842                de->media[i].csr15 = 0xffff;
1843        }
1844
1845        /* parse media blocks to see what medias are supported,
1846         * and if any custom CSR values are provided
1847         */
1848        bufp = ((void *)il) + sizeof(*il);
1849        for (i = 0; i < il->n_blocks; i++) {
1850                struct de_srom_media_block *ib = bufp;
1851                unsigned idx;
1852
1853                /* index based on media type in media block */
1854                switch(ib->opts & MediaBlockMask) {
1855                case 0: /* 10baseT */
1856                        de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Half
1857                                          | SUPPORTED_Autoneg;
1858                        idx = DE_MEDIA_TP;
1859                        de->media[DE_MEDIA_TP_AUTO].type = DE_MEDIA_TP_AUTO;
1860                        break;
1861                case 1: /* BNC */
1862                        de->media_supported |= SUPPORTED_BNC;
1863                        idx = DE_MEDIA_BNC;
1864                        break;
1865                case 2: /* AUI */
1866                        de->media_supported |= SUPPORTED_AUI;
1867                        idx = DE_MEDIA_AUI;
1868                        break;
1869                case 4: /* 10baseT-FD */
1870                        de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Full
1871                                          | SUPPORTED_Autoneg;
1872                        idx = DE_MEDIA_TP_FD;
1873                        de->media[DE_MEDIA_TP_AUTO].type = DE_MEDIA_TP_AUTO;
1874                        break;
1875                default:
1876                        goto bad_srom;
1877                }
1878
1879                de->media[idx].type = idx;
1880
1881                if (netif_msg_probe(de))
1882                        printk(KERN_INFO "de%d:   media block #%u: %s",
1883                               de->board_idx, i,
1884                               media_name[de->media[idx].type]);
1885
1886                bufp += sizeof (ib->opts);
1887
1888                if (ib->opts & MediaCustomCSRs) {
1889                        de->media[idx].csr13 = get_unaligned(&ib->csr13);
1890                        de->media[idx].csr14 = get_unaligned(&ib->csr14);
1891                        de->media[idx].csr15 = get_unaligned(&ib->csr15);
1892                        bufp += sizeof(ib->csr13) + sizeof(ib->csr14) +
1893                                sizeof(ib->csr15);
1894
1895                        if (netif_msg_probe(de))
1896                                printk(" (%x,%x,%x)\n",
1897                                       de->media[idx].csr13,
1898                                       de->media[idx].csr14,
1899                                       de->media[idx].csr15);
1900
1901                } else if (netif_msg_probe(de))
1902                        printk("\n");
1903
1904                if (bufp > ((void *)&ee_data[DE_EEPROM_SIZE - 3]))
1905                        break;
1906        }
1907
1908        de->media_advertise = de->media_supported;
1909
1910fill_defaults:
1911        /* fill in defaults, for cases where custom CSRs not used */
1912        for (i = 0; i < DE_MAX_MEDIA; i++) {
1913                if (de->media[i].csr13 == 0xffff)
1914                        de->media[i].csr13 = t21041_csr13[i];
1915                if (de->media[i].csr14 == 0xffff)
1916                        de->media[i].csr14 = t21041_csr14[i];
1917                if (de->media[i].csr15 == 0xffff)
1918                        de->media[i].csr15 = t21041_csr15[i];
1919        }
1920
1921        de->ee_data = kmemdup(&ee_data[0], DE_EEPROM_SIZE, GFP_KERNEL);
1922
1923        return;
1924
1925bad_srom:
1926        /* for error cases, it's ok to assume we support all these */
1927        for (i = 0; i < DE_MAX_MEDIA; i++)
1928                de->media[i].type = i;
1929        de->media_supported =
1930                SUPPORTED_10baseT_Half |
1931                SUPPORTED_10baseT_Full |
1932                SUPPORTED_Autoneg |
1933                SUPPORTED_TP |
1934                SUPPORTED_AUI |
1935                SUPPORTED_BNC;
1936        goto fill_defaults;
1937}
1938
1939static const struct net_device_ops de_netdev_ops = {
1940        .ndo_open               = de_open,
1941        .ndo_stop               = de_close,
1942        .ndo_set_multicast_list = de_set_rx_mode,
1943        .ndo_start_xmit         = de_start_xmit,
1944        .ndo_get_stats          = de_get_stats,
1945        .ndo_tx_timeout         = de_tx_timeout,
1946        .ndo_change_mtu         = eth_change_mtu,
1947        .ndo_set_mac_address    = eth_mac_addr,
1948        .ndo_validate_addr      = eth_validate_addr,
1949};
1950
1951static int __devinit de_init_one (struct pci_dev *pdev,
1952                                  const struct pci_device_id *ent)
1953{
1954        struct net_device *dev;
1955        struct de_private *de;
1956        int rc;
1957        void __iomem *regs;
1958        unsigned long pciaddr;
1959        static int board_idx = -1;
1960
1961        board_idx++;
1962
1963#ifndef MODULE
1964        if (board_idx == 0)
1965                printk("%s", version);
1966#endif
1967
1968        /* allocate a new ethernet device structure, and fill in defaults */
1969        dev = alloc_etherdev(sizeof(struct de_private));
1970        if (!dev)
1971                return -ENOMEM;
1972
1973        dev->netdev_ops = &de_netdev_ops;
1974        SET_NETDEV_DEV(dev, &pdev->dev);
1975        dev->ethtool_ops = &de_ethtool_ops;
1976        dev->watchdog_timeo = TX_TIMEOUT;
1977
1978        de = netdev_priv(dev);
1979        de->de21040 = ent->driver_data == 0 ? 1 : 0;
1980        de->pdev = pdev;
1981        de->dev = dev;
1982        de->msg_enable = (debug < 0 ? DE_DEF_MSG_ENABLE : debug);
1983        de->board_idx = board_idx;
1984        spin_lock_init (&de->lock);
1985        init_timer(&de->media_timer);
1986        if (de->de21040)
1987                de->media_timer.function = de21040_media_timer;
1988        else
1989                de->media_timer.function = de21041_media_timer;
1990        de->media_timer.data = (unsigned long) de;
1991
1992        netif_carrier_off(dev);
1993        netif_stop_queue(dev);
1994
1995        /* wake up device, assign resources */
1996        rc = pci_enable_device(pdev);
1997        if (rc)
1998                goto err_out_free;
1999
2000        /* reserve PCI resources to ensure driver atomicity */
2001        rc = pci_request_regions(pdev, DRV_NAME);
2002        if (rc)
2003                goto err_out_disable;
2004
2005        /* check for invalid IRQ value */
2006        if (pdev->irq < 2) {
2007                rc = -EIO;
2008                printk(KERN_ERR PFX "invalid irq (%d) for pci dev %s\n",
2009                       pdev->irq, pci_name(pdev));
2010                goto err_out_res;
2011        }
2012
2013        dev->irq = pdev->irq;
2014
2015        /* obtain and check validity of PCI I/O address */
2016        pciaddr = pci_resource_start(pdev, 1);
2017        if (!pciaddr) {
2018                rc = -EIO;
2019                printk(KERN_ERR PFX "no MMIO resource for pci dev %s\n",
2020                       pci_name(pdev));
2021                goto err_out_res;
2022        }
2023        if (pci_resource_len(pdev, 1) < DE_REGS_SIZE) {
2024                rc = -EIO;
2025                printk(KERN_ERR PFX "MMIO resource (%llx) too small on pci dev %s\n",
2026                       (unsigned long long)pci_resource_len(pdev, 1), pci_name(pdev));
2027                goto err_out_res;
2028        }
2029
2030        /* remap CSR registers */
2031        regs = ioremap_nocache(pciaddr, DE_REGS_SIZE);
2032        if (!regs) {
2033                rc = -EIO;
2034                printk(KERN_ERR PFX "Cannot map PCI MMIO (%llx@%lx) on pci dev %s\n",
2035                        (unsigned long long)pci_resource_len(pdev, 1),
2036                        pciaddr, pci_name(pdev));
2037                goto err_out_res;
2038        }
2039        dev->base_addr = (unsigned long) regs;
2040        de->regs = regs;
2041
2042        de_adapter_wake(de);
2043
2044        /* make sure hardware is not running */
2045        rc = de_reset_mac(de);
2046        if (rc) {
2047                printk(KERN_ERR PFX "Cannot reset MAC, pci dev %s\n",
2048                       pci_name(pdev));
2049                goto err_out_iomap;
2050        }
2051
2052        /* get MAC address, initialize default media type and
2053         * get list of supported media
2054         */
2055        if (de->de21040) {
2056                de21040_get_mac_address(de);
2057                de21040_get_media_info(de);
2058        } else {
2059                de21041_get_srom_info(de);
2060        }
2061
2062        /* register new network interface with kernel */
2063        rc = register_netdev(dev);
2064        if (rc)
2065                goto err_out_iomap;
2066
2067        /* print info about board and interface just registered */
2068        printk (KERN_INFO "%s: %s at 0x%lx, %pM, IRQ %d\n",
2069                dev->name,
2070                de->de21040 ? "21040" : "21041",
2071                dev->base_addr,
2072                dev->dev_addr,
2073                dev->irq);
2074
2075        pci_set_drvdata(pdev, dev);
2076
2077        /* enable busmastering */
2078        pci_set_master(pdev);
2079
2080        /* put adapter to sleep */
2081        de_adapter_sleep(de);
2082
2083        return 0;
2084
2085err_out_iomap:
2086        kfree(de->ee_data);
2087        iounmap(regs);
2088err_out_res:
2089        pci_release_regions(pdev);
2090err_out_disable:
2091        pci_disable_device(pdev);
2092err_out_free:
2093        free_netdev(dev);
2094        return rc;
2095}
2096
2097static void __devexit de_remove_one (struct pci_dev *pdev)
2098{
2099        struct net_device *dev = pci_get_drvdata(pdev);
2100        struct de_private *de = netdev_priv(dev);
2101
2102        BUG_ON(!dev);
2103        unregister_netdev(dev);
2104        kfree(de->ee_data);
2105        iounmap(de->regs);
2106        pci_release_regions(pdev);
2107        pci_disable_device(pdev);
2108        pci_set_drvdata(pdev, NULL);
2109        free_netdev(dev);
2110}
2111
2112#ifdef CONFIG_PM
2113
2114static int de_suspend (struct pci_dev *pdev, pm_message_t state)
2115{
2116        struct net_device *dev = pci_get_drvdata (pdev);
2117        struct de_private *de = netdev_priv(dev);
2118
2119        rtnl_lock();
2120        if (netif_running (dev)) {
2121                del_timer_sync(&de->media_timer);
2122
2123                disable_irq(dev->irq);
2124                spin_lock_irq(&de->lock);
2125
2126                de_stop_hw(de);
2127                netif_stop_queue(dev);
2128                netif_device_detach(dev);
2129                netif_carrier_off(dev);
2130
2131                spin_unlock_irq(&de->lock);
2132                enable_irq(dev->irq);
2133
2134                /* Update the error counts. */
2135                __de_get_stats(de);
2136
2137                synchronize_irq(dev->irq);
2138                de_clean_rings(de);
2139
2140                de_adapter_sleep(de);
2141                pci_disable_device(pdev);
2142        } else {
2143                netif_device_detach(dev);
2144        }
2145        rtnl_unlock();
2146        return 0;
2147}
2148
2149static int de_resume (struct pci_dev *pdev)
2150{
2151        struct net_device *dev = pci_get_drvdata (pdev);
2152        struct de_private *de = netdev_priv(dev);
2153        int retval = 0;
2154
2155        rtnl_lock();
2156        if (netif_device_present(dev))
2157                goto out;
2158        if (!netif_running(dev))
2159                goto out_attach;
2160        if ((retval = pci_enable_device(pdev))) {
2161                printk (KERN_ERR "%s: pci_enable_device failed in resume\n",
2162                        dev->name);
2163                goto out;
2164        }
2165        de_init_hw(de);
2166out_attach:
2167        netif_device_attach(dev);
2168out:
2169        rtnl_unlock();
2170        return 0;
2171}
2172
2173#endif /* CONFIG_PM */
2174
2175static struct pci_driver de_driver = {
2176        .name           = DRV_NAME,
2177        .id_table       = de_pci_tbl,
2178        .probe          = de_init_one,
2179        .remove         = __devexit_p(de_remove_one),
2180#ifdef CONFIG_PM
2181        .suspend        = de_suspend,
2182        .resume         = de_resume,
2183#endif
2184};
2185
2186static int __init de_init (void)
2187{
2188#ifdef MODULE
2189        printk("%s", version);
2190#endif
2191        return pci_register_driver(&de_driver);
2192}
2193
2194static void __exit de_exit (void)
2195{
2196        pci_unregister_driver (&de_driver);
2197}
2198
2199module_init(de_init);
2200module_exit(de_exit);
2201