linux/drivers/net/ethernet/rdc/r6040.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * RDC R6040 Fast Ethernet MAC support
   4 *
   5 * Copyright (C) 2004 Sten Wang <sten.wang@rdc.com.tw>
   6 * Copyright (C) 2007
   7 *      Daniel Gimpelevich <daniel@gimpelevich.san-francisco.ca.us>
   8 * Copyright (C) 2007-2012 Florian Fainelli <f.fainelli@gmail.com>
   9*/
  10
  11#include <linux/kernel.h>
  12#include <linux/module.h>
  13#include <linux/moduleparam.h>
  14#include <linux/string.h>
  15#include <linux/timer.h>
  16#include <linux/errno.h>
  17#include <linux/ioport.h>
  18#include <linux/interrupt.h>
  19#include <linux/pci.h>
  20#include <linux/netdevice.h>
  21#include <linux/etherdevice.h>
  22#include <linux/skbuff.h>
  23#include <linux/delay.h>
  24#include <linux/mii.h>
  25#include <linux/ethtool.h>
  26#include <linux/crc32.h>
  27#include <linux/spinlock.h>
  28#include <linux/bitops.h>
  29#include <linux/io.h>
  30#include <linux/irq.h>
  31#include <linux/uaccess.h>
  32#include <linux/phy.h>
  33
  34#include <asm/processor.h>
  35
  36#define DRV_NAME        "r6040"
  37#define DRV_VERSION     "0.29"
  38#define DRV_RELDATE     "04Jul2016"
  39
  40/* Time in jiffies before concluding the transmitter is hung. */
  41#define TX_TIMEOUT      (6000 * HZ / 1000)
  42
  43/* RDC MAC I/O Size */
  44#define R6040_IO_SIZE   256
  45
  46/* MAX RDC MAC */
  47#define MAX_MAC         2
  48
  49/* MAC registers */
  50#define MCR0            0x00    /* Control register 0 */
  51#define  MCR0_RCVEN     0x0002  /* Receive enable */
  52#define  MCR0_PROMISC   0x0020  /* Promiscuous mode */
  53#define  MCR0_HASH_EN   0x0100  /* Enable multicast hash table function */
  54#define  MCR0_XMTEN     0x1000  /* Transmission enable */
  55#define  MCR0_FD        0x8000  /* Full/Half duplex */
  56#define MCR1            0x04    /* Control register 1 */
  57#define  MAC_RST        0x0001  /* Reset the MAC */
  58#define MBCR            0x08    /* Bus control */
  59#define MT_ICR          0x0C    /* TX interrupt control */
  60#define MR_ICR          0x10    /* RX interrupt control */
  61#define MTPR            0x14    /* TX poll command register */
  62#define  TM2TX          0x0001  /* Trigger MAC to transmit */
  63#define MR_BSR          0x18    /* RX buffer size */
  64#define MR_DCR          0x1A    /* RX descriptor control */
  65#define MLSR            0x1C    /* Last status */
  66#define  TX_FIFO_UNDR   0x0200  /* TX FIFO under-run */
  67#define  TX_EXCEEDC     0x2000  /* Transmit exceed collision */
  68#define  TX_LATEC       0x4000  /* Transmit late collision */
  69#define MMDIO           0x20    /* MDIO control register */
  70#define  MDIO_WRITE     0x4000  /* MDIO write */
  71#define  MDIO_READ      0x2000  /* MDIO read */
  72#define MMRD            0x24    /* MDIO read data register */
  73#define MMWD            0x28    /* MDIO write data register */
  74#define MTD_SA0         0x2C    /* TX descriptor start address 0 */
  75#define MTD_SA1         0x30    /* TX descriptor start address 1 */
  76#define MRD_SA0         0x34    /* RX descriptor start address 0 */
  77#define MRD_SA1         0x38    /* RX descriptor start address 1 */
  78#define MISR            0x3C    /* Status register */
  79#define MIER            0x40    /* INT enable register */
  80#define  MSK_INT        0x0000  /* Mask off interrupts */
  81#define  RX_FINISH      0x0001  /* RX finished */
  82#define  RX_NO_DESC     0x0002  /* No RX descriptor available */
  83#define  RX_FIFO_FULL   0x0004  /* RX FIFO full */
  84#define  RX_EARLY       0x0008  /* RX early */
  85#define  TX_FINISH      0x0010  /* TX finished */
  86#define  TX_EARLY       0x0080  /* TX early */
  87#define  EVENT_OVRFL    0x0100  /* Event counter overflow */
  88#define  LINK_CHANGED   0x0200  /* PHY link changed */
  89#define ME_CISR         0x44    /* Event counter INT status */
  90#define ME_CIER         0x48    /* Event counter INT enable  */
  91#define MR_CNT          0x50    /* Successfully received packet counter */
  92#define ME_CNT0         0x52    /* Event counter 0 */
  93#define ME_CNT1         0x54    /* Event counter 1 */
  94#define ME_CNT2         0x56    /* Event counter 2 */
  95#define ME_CNT3         0x58    /* Event counter 3 */
  96#define MT_CNT          0x5A    /* Successfully transmit packet counter */
  97#define ME_CNT4         0x5C    /* Event counter 4 */
  98#define MP_CNT          0x5E    /* Pause frame counter register */
  99#define MAR0            0x60    /* Hash table 0 */
 100#define MAR1            0x62    /* Hash table 1 */
 101#define MAR2            0x64    /* Hash table 2 */
 102#define MAR3            0x66    /* Hash table 3 */
 103#define MID_0L          0x68    /* Multicast address MID0 Low */
 104#define MID_0M          0x6A    /* Multicast address MID0 Medium */
 105#define MID_0H          0x6C    /* Multicast address MID0 High */
 106#define MID_1L          0x70    /* MID1 Low */
 107#define MID_1M          0x72    /* MID1 Medium */
 108#define MID_1H          0x74    /* MID1 High */
 109#define MID_2L          0x78    /* MID2 Low */
 110#define MID_2M          0x7A    /* MID2 Medium */
 111#define MID_2H          0x7C    /* MID2 High */
 112#define MID_3L          0x80    /* MID3 Low */
 113#define MID_3M          0x82    /* MID3 Medium */
 114#define MID_3H          0x84    /* MID3 High */
 115#define PHY_CC          0x88    /* PHY status change configuration register */
 116#define  SCEN           0x8000  /* PHY status change enable */
 117#define  PHYAD_SHIFT    8       /* PHY address shift */
 118#define  TMRDIV_SHIFT   0       /* Timer divider shift */
 119#define PHY_ST          0x8A    /* PHY status register */
 120#define MAC_SM          0xAC    /* MAC status machine */
 121#define  MAC_SM_RST     0x0002  /* MAC status machine reset */
 122#define MD_CSC          0xb6    /* MDC speed control register */
 123#define  MD_CSC_DEFAULT 0x0030
 124#define MAC_ID          0xBE    /* Identifier register */
 125
 126#define TX_DCNT         0x80    /* TX descriptor count */
 127#define RX_DCNT         0x80    /* RX descriptor count */
 128#define MAX_BUF_SIZE    0x600
 129#define RX_DESC_SIZE    (RX_DCNT * sizeof(struct r6040_descriptor))
 130#define TX_DESC_SIZE    (TX_DCNT * sizeof(struct r6040_descriptor))
 131#define MBCR_DEFAULT    0x012A  /* MAC Bus Control Register */
 132#define MCAST_MAX       3       /* Max number multicast addresses to filter */
 133
 134#define MAC_DEF_TIMEOUT 2048    /* Default MAC read/write operation timeout */
 135
 136/* Descriptor status */
 137#define DSC_OWNER_MAC   0x8000  /* MAC is the owner of this descriptor */
 138#define DSC_RX_OK       0x4000  /* RX was successful */
 139#define DSC_RX_ERR      0x0800  /* RX PHY error */
 140#define DSC_RX_ERR_DRI  0x0400  /* RX dribble packet */
 141#define DSC_RX_ERR_BUF  0x0200  /* RX length exceeds buffer size */
 142#define DSC_RX_ERR_LONG 0x0100  /* RX length > maximum packet length */
 143#define DSC_RX_ERR_RUNT 0x0080  /* RX packet length < 64 byte */
 144#define DSC_RX_ERR_CRC  0x0040  /* RX CRC error */
 145#define DSC_RX_BCAST    0x0020  /* RX broadcast (no error) */
 146#define DSC_RX_MCAST    0x0010  /* RX multicast (no error) */
 147#define DSC_RX_MCH_HIT  0x0008  /* RX multicast hit in hash table (no error) */
 148#define DSC_RX_MIDH_HIT 0x0004  /* RX MID table hit (no error) */
 149#define DSC_RX_IDX_MID_MASK 3   /* RX mask for the index of matched MIDx */
 150
 151MODULE_AUTHOR("Sten Wang <sten.wang@rdc.com.tw>,"
 152        "Daniel Gimpelevich <daniel@gimpelevich.san-francisco.ca.us>,"
 153        "Florian Fainelli <f.fainelli@gmail.com>");
 154MODULE_LICENSE("GPL");
 155MODULE_DESCRIPTION("RDC R6040 NAPI PCI FastEthernet driver");
 156MODULE_VERSION(DRV_VERSION " " DRV_RELDATE);
 157
 158/* RX and TX interrupts that we handle */
 159#define RX_INTS                 (RX_FIFO_FULL | RX_NO_DESC | RX_FINISH)
 160#define TX_INTS                 (TX_FINISH)
 161#define INT_MASK                (RX_INTS | TX_INTS)
 162
 163struct r6040_descriptor {
 164        u16     status, len;            /* 0-3 */
 165        __le32  buf;                    /* 4-7 */
 166        __le32  ndesc;                  /* 8-B */
 167        u32     rev1;                   /* C-F */
 168        char    *vbufp;                 /* 10-13 */
 169        struct r6040_descriptor *vndescp;       /* 14-17 */
 170        struct sk_buff *skb_ptr;        /* 18-1B */
 171        u32     rev2;                   /* 1C-1F */
 172} __aligned(32);
 173
 174struct r6040_private {
 175        spinlock_t lock;                /* driver lock */
 176        struct pci_dev *pdev;
 177        struct r6040_descriptor *rx_insert_ptr;
 178        struct r6040_descriptor *rx_remove_ptr;
 179        struct r6040_descriptor *tx_insert_ptr;
 180        struct r6040_descriptor *tx_remove_ptr;
 181        struct r6040_descriptor *rx_ring;
 182        struct r6040_descriptor *tx_ring;
 183        dma_addr_t rx_ring_dma;
 184        dma_addr_t tx_ring_dma;
 185        u16     tx_free_desc;
 186        u16     mcr0;
 187        struct net_device *dev;
 188        struct mii_bus *mii_bus;
 189        struct napi_struct napi;
 190        void __iomem *base;
 191        int old_link;
 192        int old_duplex;
 193};
 194
 195static char version[] = DRV_NAME
 196        ": RDC R6040 NAPI net driver,"
 197        "version "DRV_VERSION " (" DRV_RELDATE ")";
 198
 199/* Read a word data from PHY Chip */
 200static int r6040_phy_read(void __iomem *ioaddr, int phy_addr, int reg)
 201{
 202        int limit = MAC_DEF_TIMEOUT;
 203        u16 cmd;
 204
 205        iowrite16(MDIO_READ | reg | (phy_addr << 8), ioaddr + MMDIO);
 206        /* Wait for the read bit to be cleared */
 207        while (limit--) {
 208                cmd = ioread16(ioaddr + MMDIO);
 209                if (!(cmd & MDIO_READ))
 210                        break;
 211                udelay(1);
 212        }
 213
 214        if (limit < 0)
 215                return -ETIMEDOUT;
 216
 217        return ioread16(ioaddr + MMRD);
 218}
 219
 220/* Write a word data from PHY Chip */
 221static int r6040_phy_write(void __iomem *ioaddr,
 222                                        int phy_addr, int reg, u16 val)
 223{
 224        int limit = MAC_DEF_TIMEOUT;
 225        u16 cmd;
 226
 227        iowrite16(val, ioaddr + MMWD);
 228        /* Write the command to the MDIO bus */
 229        iowrite16(MDIO_WRITE | reg | (phy_addr << 8), ioaddr + MMDIO);
 230        /* Wait for the write bit to be cleared */
 231        while (limit--) {
 232                cmd = ioread16(ioaddr + MMDIO);
 233                if (!(cmd & MDIO_WRITE))
 234                        break;
 235                udelay(1);
 236        }
 237
 238        return (limit < 0) ? -ETIMEDOUT : 0;
 239}
 240
 241static int r6040_mdiobus_read(struct mii_bus *bus, int phy_addr, int reg)
 242{
 243        struct net_device *dev = bus->priv;
 244        struct r6040_private *lp = netdev_priv(dev);
 245        void __iomem *ioaddr = lp->base;
 246
 247        return r6040_phy_read(ioaddr, phy_addr, reg);
 248}
 249
 250static int r6040_mdiobus_write(struct mii_bus *bus, int phy_addr,
 251                                                int reg, u16 value)
 252{
 253        struct net_device *dev = bus->priv;
 254        struct r6040_private *lp = netdev_priv(dev);
 255        void __iomem *ioaddr = lp->base;
 256
 257        return r6040_phy_write(ioaddr, phy_addr, reg, value);
 258}
 259
 260static void r6040_free_txbufs(struct net_device *dev)
 261{
 262        struct r6040_private *lp = netdev_priv(dev);
 263        int i;
 264
 265        for (i = 0; i < TX_DCNT; i++) {
 266                if (lp->tx_insert_ptr->skb_ptr) {
 267                        dma_unmap_single(&lp->pdev->dev,
 268                                         le32_to_cpu(lp->tx_insert_ptr->buf),
 269                                         MAX_BUF_SIZE, DMA_TO_DEVICE);
 270                        dev_kfree_skb(lp->tx_insert_ptr->skb_ptr);
 271                        lp->tx_insert_ptr->skb_ptr = NULL;
 272                }
 273                lp->tx_insert_ptr = lp->tx_insert_ptr->vndescp;
 274        }
 275}
 276
 277static void r6040_free_rxbufs(struct net_device *dev)
 278{
 279        struct r6040_private *lp = netdev_priv(dev);
 280        int i;
 281
 282        for (i = 0; i < RX_DCNT; i++) {
 283                if (lp->rx_insert_ptr->skb_ptr) {
 284                        dma_unmap_single(&lp->pdev->dev,
 285                                         le32_to_cpu(lp->rx_insert_ptr->buf),
 286                                         MAX_BUF_SIZE, DMA_FROM_DEVICE);
 287                        dev_kfree_skb(lp->rx_insert_ptr->skb_ptr);
 288                        lp->rx_insert_ptr->skb_ptr = NULL;
 289                }
 290                lp->rx_insert_ptr = lp->rx_insert_ptr->vndescp;
 291        }
 292}
 293
 294static void r6040_init_ring_desc(struct r6040_descriptor *desc_ring,
 295                                 dma_addr_t desc_dma, int size)
 296{
 297        struct r6040_descriptor *desc = desc_ring;
 298        dma_addr_t mapping = desc_dma;
 299
 300        while (size-- > 0) {
 301                mapping += sizeof(*desc);
 302                desc->ndesc = cpu_to_le32(mapping);
 303                desc->vndescp = desc + 1;
 304                desc++;
 305        }
 306        desc--;
 307        desc->ndesc = cpu_to_le32(desc_dma);
 308        desc->vndescp = desc_ring;
 309}
 310
 311static void r6040_init_txbufs(struct net_device *dev)
 312{
 313        struct r6040_private *lp = netdev_priv(dev);
 314
 315        lp->tx_free_desc = TX_DCNT;
 316
 317        lp->tx_remove_ptr = lp->tx_insert_ptr = lp->tx_ring;
 318        r6040_init_ring_desc(lp->tx_ring, lp->tx_ring_dma, TX_DCNT);
 319}
 320
 321static int r6040_alloc_rxbufs(struct net_device *dev)
 322{
 323        struct r6040_private *lp = netdev_priv(dev);
 324        struct r6040_descriptor *desc;
 325        struct sk_buff *skb;
 326        int rc;
 327
 328        lp->rx_remove_ptr = lp->rx_insert_ptr = lp->rx_ring;
 329        r6040_init_ring_desc(lp->rx_ring, lp->rx_ring_dma, RX_DCNT);
 330
 331        /* Allocate skbs for the rx descriptors */
 332        desc = lp->rx_ring;
 333        do {
 334                skb = netdev_alloc_skb(dev, MAX_BUF_SIZE);
 335                if (!skb) {
 336                        rc = -ENOMEM;
 337                        goto err_exit;
 338                }
 339                desc->skb_ptr = skb;
 340                desc->buf = cpu_to_le32(dma_map_single(&lp->pdev->dev,
 341                                                       desc->skb_ptr->data,
 342                                                       MAX_BUF_SIZE,
 343                                                       DMA_FROM_DEVICE));
 344                desc->status = DSC_OWNER_MAC;
 345                desc = desc->vndescp;
 346        } while (desc != lp->rx_ring);
 347
 348        return 0;
 349
 350err_exit:
 351        /* Deallocate all previously allocated skbs */
 352        r6040_free_rxbufs(dev);
 353        return rc;
 354}
 355
 356static void r6040_reset_mac(struct r6040_private *lp)
 357{
 358        void __iomem *ioaddr = lp->base;
 359        int limit = MAC_DEF_TIMEOUT;
 360        u16 cmd, md_csc;
 361
 362        md_csc = ioread16(ioaddr + MD_CSC);
 363        iowrite16(MAC_RST, ioaddr + MCR1);
 364        while (limit--) {
 365                cmd = ioread16(ioaddr + MCR1);
 366                if (cmd & MAC_RST)
 367                        break;
 368        }
 369
 370        /* Reset internal state machine */
 371        iowrite16(MAC_SM_RST, ioaddr + MAC_SM);
 372        iowrite16(0, ioaddr + MAC_SM);
 373        mdelay(5);
 374
 375        /* Restore MDIO clock frequency */
 376        if (md_csc != MD_CSC_DEFAULT)
 377                iowrite16(md_csc, ioaddr + MD_CSC);
 378}
 379
 380static void r6040_init_mac_regs(struct net_device *dev)
 381{
 382        struct r6040_private *lp = netdev_priv(dev);
 383        void __iomem *ioaddr = lp->base;
 384
 385        /* Mask Off Interrupt */
 386        iowrite16(MSK_INT, ioaddr + MIER);
 387
 388        /* Reset RDC MAC */
 389        r6040_reset_mac(lp);
 390
 391        /* MAC Bus Control Register */
 392        iowrite16(MBCR_DEFAULT, ioaddr + MBCR);
 393
 394        /* Buffer Size Register */
 395        iowrite16(MAX_BUF_SIZE, ioaddr + MR_BSR);
 396
 397        /* Write TX ring start address */
 398        iowrite16(lp->tx_ring_dma, ioaddr + MTD_SA0);
 399        iowrite16(lp->tx_ring_dma >> 16, ioaddr + MTD_SA1);
 400
 401        /* Write RX ring start address */
 402        iowrite16(lp->rx_ring_dma, ioaddr + MRD_SA0);
 403        iowrite16(lp->rx_ring_dma >> 16, ioaddr + MRD_SA1);
 404
 405        /* Set interrupt waiting time and packet numbers */
 406        iowrite16(0, ioaddr + MT_ICR);
 407        iowrite16(0, ioaddr + MR_ICR);
 408
 409        /* Enable interrupts */
 410        iowrite16(INT_MASK, ioaddr + MIER);
 411
 412        /* Enable TX and RX */
 413        iowrite16(lp->mcr0 | MCR0_RCVEN, ioaddr);
 414
 415        /* Let TX poll the descriptors
 416         * we may got called by r6040_tx_timeout which has left
 417         * some unsent tx buffers */
 418        iowrite16(TM2TX, ioaddr + MTPR);
 419}
 420
 421static void r6040_tx_timeout(struct net_device *dev, unsigned int txqueue)
 422{
 423        struct r6040_private *priv = netdev_priv(dev);
 424        void __iomem *ioaddr = priv->base;
 425
 426        netdev_warn(dev, "transmit timed out, int enable %4.4x "
 427                "status %4.4x\n",
 428                ioread16(ioaddr + MIER),
 429                ioread16(ioaddr + MISR));
 430
 431        dev->stats.tx_errors++;
 432
 433        /* Reset MAC and re-init all registers */
 434        r6040_init_mac_regs(dev);
 435}
 436
 437static struct net_device_stats *r6040_get_stats(struct net_device *dev)
 438{
 439        struct r6040_private *priv = netdev_priv(dev);
 440        void __iomem *ioaddr = priv->base;
 441        unsigned long flags;
 442
 443        spin_lock_irqsave(&priv->lock, flags);
 444        dev->stats.rx_crc_errors += ioread8(ioaddr + ME_CNT1);
 445        dev->stats.multicast += ioread8(ioaddr + ME_CNT0);
 446        spin_unlock_irqrestore(&priv->lock, flags);
 447
 448        return &dev->stats;
 449}
 450
 451/* Stop RDC MAC and Free the allocated resource */
 452static void r6040_down(struct net_device *dev)
 453{
 454        struct r6040_private *lp = netdev_priv(dev);
 455        void __iomem *ioaddr = lp->base;
 456        u16 *adrp;
 457
 458        /* Stop MAC */
 459        iowrite16(MSK_INT, ioaddr + MIER);      /* Mask Off Interrupt */
 460
 461        /* Reset RDC MAC */
 462        r6040_reset_mac(lp);
 463
 464        /* Restore MAC Address to MIDx */
 465        adrp = (u16 *) dev->dev_addr;
 466        iowrite16(adrp[0], ioaddr + MID_0L);
 467        iowrite16(adrp[1], ioaddr + MID_0M);
 468        iowrite16(adrp[2], ioaddr + MID_0H);
 469}
 470
 471static int r6040_close(struct net_device *dev)
 472{
 473        struct r6040_private *lp = netdev_priv(dev);
 474        struct pci_dev *pdev = lp->pdev;
 475
 476        phy_stop(dev->phydev);
 477        napi_disable(&lp->napi);
 478        netif_stop_queue(dev);
 479
 480        spin_lock_irq(&lp->lock);
 481        r6040_down(dev);
 482
 483        /* Free RX buffer */
 484        r6040_free_rxbufs(dev);
 485
 486        /* Free TX buffer */
 487        r6040_free_txbufs(dev);
 488
 489        spin_unlock_irq(&lp->lock);
 490
 491        free_irq(dev->irq, dev);
 492
 493        /* Free Descriptor memory */
 494        if (lp->rx_ring) {
 495                dma_free_coherent(&pdev->dev, RX_DESC_SIZE, lp->rx_ring,
 496                                  lp->rx_ring_dma);
 497                lp->rx_ring = NULL;
 498        }
 499
 500        if (lp->tx_ring) {
 501                dma_free_coherent(&pdev->dev, TX_DESC_SIZE, lp->tx_ring,
 502                                  lp->tx_ring_dma);
 503                lp->tx_ring = NULL;
 504        }
 505
 506        return 0;
 507}
 508
 509static int r6040_rx(struct net_device *dev, int limit)
 510{
 511        struct r6040_private *priv = netdev_priv(dev);
 512        struct r6040_descriptor *descptr = priv->rx_remove_ptr;
 513        struct sk_buff *skb_ptr, *new_skb;
 514        int count = 0;
 515        u16 err;
 516
 517        /* Limit not reached and the descriptor belongs to the CPU */
 518        while (count < limit && !(descptr->status & DSC_OWNER_MAC)) {
 519                /* Read the descriptor status */
 520                err = descptr->status;
 521                /* Global error status set */
 522                if (err & DSC_RX_ERR) {
 523                        /* RX dribble */
 524                        if (err & DSC_RX_ERR_DRI)
 525                                dev->stats.rx_frame_errors++;
 526                        /* Buffer length exceeded */
 527                        if (err & DSC_RX_ERR_BUF)
 528                                dev->stats.rx_length_errors++;
 529                        /* Packet too long */
 530                        if (err & DSC_RX_ERR_LONG)
 531                                dev->stats.rx_length_errors++;
 532                        /* Packet < 64 bytes */
 533                        if (err & DSC_RX_ERR_RUNT)
 534                                dev->stats.rx_length_errors++;
 535                        /* CRC error */
 536                        if (err & DSC_RX_ERR_CRC) {
 537                                spin_lock(&priv->lock);
 538                                dev->stats.rx_crc_errors++;
 539                                spin_unlock(&priv->lock);
 540                        }
 541                        goto next_descr;
 542                }
 543
 544                /* Packet successfully received */
 545                new_skb = netdev_alloc_skb(dev, MAX_BUF_SIZE);
 546                if (!new_skb) {
 547                        dev->stats.rx_dropped++;
 548                        goto next_descr;
 549                }
 550                skb_ptr = descptr->skb_ptr;
 551                skb_ptr->dev = priv->dev;
 552
 553                /* Do not count the CRC */
 554                skb_put(skb_ptr, descptr->len - ETH_FCS_LEN);
 555                dma_unmap_single(&priv->pdev->dev, le32_to_cpu(descptr->buf),
 556                                 MAX_BUF_SIZE, DMA_FROM_DEVICE);
 557                skb_ptr->protocol = eth_type_trans(skb_ptr, priv->dev);
 558
 559                /* Send to upper layer */
 560                netif_receive_skb(skb_ptr);
 561                dev->stats.rx_packets++;
 562                dev->stats.rx_bytes += descptr->len - ETH_FCS_LEN;
 563
 564                /* put new skb into descriptor */
 565                descptr->skb_ptr = new_skb;
 566                descptr->buf = cpu_to_le32(dma_map_single(&priv->pdev->dev,
 567                                                          descptr->skb_ptr->data,
 568                                                          MAX_BUF_SIZE,
 569                                                          DMA_FROM_DEVICE));
 570
 571next_descr:
 572                /* put the descriptor back to the MAC */
 573                descptr->status = DSC_OWNER_MAC;
 574                descptr = descptr->vndescp;
 575                count++;
 576        }
 577        priv->rx_remove_ptr = descptr;
 578
 579        return count;
 580}
 581
 582static void r6040_tx(struct net_device *dev)
 583{
 584        struct r6040_private *priv = netdev_priv(dev);
 585        struct r6040_descriptor *descptr;
 586        void __iomem *ioaddr = priv->base;
 587        struct sk_buff *skb_ptr;
 588        u16 err;
 589
 590        spin_lock(&priv->lock);
 591        descptr = priv->tx_remove_ptr;
 592        while (priv->tx_free_desc < TX_DCNT) {
 593                /* Check for errors */
 594                err = ioread16(ioaddr + MLSR);
 595
 596                if (err & TX_FIFO_UNDR)
 597                        dev->stats.tx_fifo_errors++;
 598                if (err & (TX_EXCEEDC | TX_LATEC))
 599                        dev->stats.tx_carrier_errors++;
 600
 601                if (descptr->status & DSC_OWNER_MAC)
 602                        break; /* Not complete */
 603                skb_ptr = descptr->skb_ptr;
 604
 605                /* Statistic Counter */
 606                dev->stats.tx_packets++;
 607                dev->stats.tx_bytes += skb_ptr->len;
 608
 609                dma_unmap_single(&priv->pdev->dev, le32_to_cpu(descptr->buf),
 610                                 skb_ptr->len, DMA_TO_DEVICE);
 611                /* Free buffer */
 612                dev_kfree_skb(skb_ptr);
 613                descptr->skb_ptr = NULL;
 614                /* To next descriptor */
 615                descptr = descptr->vndescp;
 616                priv->tx_free_desc++;
 617        }
 618        priv->tx_remove_ptr = descptr;
 619
 620        if (priv->tx_free_desc)
 621                netif_wake_queue(dev);
 622        spin_unlock(&priv->lock);
 623}
 624
 625static int r6040_poll(struct napi_struct *napi, int budget)
 626{
 627        struct r6040_private *priv =
 628                container_of(napi, struct r6040_private, napi);
 629        struct net_device *dev = priv->dev;
 630        void __iomem *ioaddr = priv->base;
 631        int work_done;
 632
 633        r6040_tx(dev);
 634
 635        work_done = r6040_rx(dev, budget);
 636
 637        if (work_done < budget) {
 638                napi_complete_done(napi, work_done);
 639                /* Enable RX/TX interrupt */
 640                iowrite16(ioread16(ioaddr + MIER) | RX_INTS | TX_INTS,
 641                          ioaddr + MIER);
 642        }
 643        return work_done;
 644}
 645
 646/* The RDC interrupt handler. */
 647static irqreturn_t r6040_interrupt(int irq, void *dev_id)
 648{
 649        struct net_device *dev = dev_id;
 650        struct r6040_private *lp = netdev_priv(dev);
 651        void __iomem *ioaddr = lp->base;
 652        u16 misr, status;
 653
 654        /* Save MIER */
 655        misr = ioread16(ioaddr + MIER);
 656        /* Mask off RDC MAC interrupt */
 657        iowrite16(MSK_INT, ioaddr + MIER);
 658        /* Read MISR status and clear */
 659        status = ioread16(ioaddr + MISR);
 660
 661        if (status == 0x0000 || status == 0xffff) {
 662                /* Restore RDC MAC interrupt */
 663                iowrite16(misr, ioaddr + MIER);
 664                return IRQ_NONE;
 665        }
 666
 667        /* RX interrupt request */
 668        if (status & (RX_INTS | TX_INTS)) {
 669                if (status & RX_NO_DESC) {
 670                        /* RX descriptor unavailable */
 671                        dev->stats.rx_dropped++;
 672                        dev->stats.rx_missed_errors++;
 673                }
 674                if (status & RX_FIFO_FULL)
 675                        dev->stats.rx_fifo_errors++;
 676
 677                if (likely(napi_schedule_prep(&lp->napi))) {
 678                        /* Mask off RX interrupt */
 679                        misr &= ~(RX_INTS | TX_INTS);
 680                        __napi_schedule_irqoff(&lp->napi);
 681                }
 682        }
 683
 684        /* Restore RDC MAC interrupt */
 685        iowrite16(misr, ioaddr + MIER);
 686
 687        return IRQ_HANDLED;
 688}
 689
 690#ifdef CONFIG_NET_POLL_CONTROLLER
 691static void r6040_poll_controller(struct net_device *dev)
 692{
 693        disable_irq(dev->irq);
 694        r6040_interrupt(dev->irq, dev);
 695        enable_irq(dev->irq);
 696}
 697#endif
 698
 699/* Init RDC MAC */
 700static int r6040_up(struct net_device *dev)
 701{
 702        struct r6040_private *lp = netdev_priv(dev);
 703        void __iomem *ioaddr = lp->base;
 704        int ret;
 705
 706        /* Initialise and alloc RX/TX buffers */
 707        r6040_init_txbufs(dev);
 708        ret = r6040_alloc_rxbufs(dev);
 709        if (ret)
 710                return ret;
 711
 712        /* improve performance (by RDC guys) */
 713        r6040_phy_write(ioaddr, 30, 17,
 714                        (r6040_phy_read(ioaddr, 30, 17) | 0x4000));
 715        r6040_phy_write(ioaddr, 30, 17,
 716                        ~((~r6040_phy_read(ioaddr, 30, 17)) | 0x2000));
 717        r6040_phy_write(ioaddr, 0, 19, 0x0000);
 718        r6040_phy_write(ioaddr, 0, 30, 0x01F0);
 719
 720        /* Initialize all MAC registers */
 721        r6040_init_mac_regs(dev);
 722
 723        phy_start(dev->phydev);
 724
 725        return 0;
 726}
 727
 728
 729/* Read/set MAC address routines */
 730static void r6040_mac_address(struct net_device *dev)
 731{
 732        struct r6040_private *lp = netdev_priv(dev);
 733        void __iomem *ioaddr = lp->base;
 734        u16 *adrp;
 735
 736        /* Reset MAC */
 737        r6040_reset_mac(lp);
 738
 739        /* Restore MAC Address */
 740        adrp = (u16 *) dev->dev_addr;
 741        iowrite16(adrp[0], ioaddr + MID_0L);
 742        iowrite16(adrp[1], ioaddr + MID_0M);
 743        iowrite16(adrp[2], ioaddr + MID_0H);
 744}
 745
 746static int r6040_open(struct net_device *dev)
 747{
 748        struct r6040_private *lp = netdev_priv(dev);
 749        int ret;
 750
 751        /* Request IRQ and Register interrupt handler */
 752        ret = request_irq(dev->irq, r6040_interrupt,
 753                IRQF_SHARED, dev->name, dev);
 754        if (ret)
 755                goto out;
 756
 757        /* Set MAC address */
 758        r6040_mac_address(dev);
 759
 760        /* Allocate Descriptor memory */
 761        lp->rx_ring =
 762                dma_alloc_coherent(&lp->pdev->dev, RX_DESC_SIZE,
 763                                   &lp->rx_ring_dma, GFP_KERNEL);
 764        if (!lp->rx_ring) {
 765                ret = -ENOMEM;
 766                goto err_free_irq;
 767        }
 768
 769        lp->tx_ring =
 770                dma_alloc_coherent(&lp->pdev->dev, TX_DESC_SIZE,
 771                                   &lp->tx_ring_dma, GFP_KERNEL);
 772        if (!lp->tx_ring) {
 773                ret = -ENOMEM;
 774                goto err_free_rx_ring;
 775        }
 776
 777        ret = r6040_up(dev);
 778        if (ret)
 779                goto err_free_tx_ring;
 780
 781        napi_enable(&lp->napi);
 782        netif_start_queue(dev);
 783
 784        return 0;
 785
 786err_free_tx_ring:
 787        dma_free_coherent(&lp->pdev->dev, TX_DESC_SIZE, lp->tx_ring,
 788                          lp->tx_ring_dma);
 789err_free_rx_ring:
 790        dma_free_coherent(&lp->pdev->dev, RX_DESC_SIZE, lp->rx_ring,
 791                          lp->rx_ring_dma);
 792err_free_irq:
 793        free_irq(dev->irq, dev);
 794out:
 795        return ret;
 796}
 797
 798static netdev_tx_t r6040_start_xmit(struct sk_buff *skb,
 799                                    struct net_device *dev)
 800{
 801        struct r6040_private *lp = netdev_priv(dev);
 802        struct r6040_descriptor *descptr;
 803        void __iomem *ioaddr = lp->base;
 804        unsigned long flags;
 805
 806        if (skb_put_padto(skb, ETH_ZLEN) < 0)
 807                return NETDEV_TX_OK;
 808
 809        /* Critical Section */
 810        spin_lock_irqsave(&lp->lock, flags);
 811
 812        /* TX resource check */
 813        if (!lp->tx_free_desc) {
 814                spin_unlock_irqrestore(&lp->lock, flags);
 815                netif_stop_queue(dev);
 816                netdev_err(dev, ": no tx descriptor\n");
 817                return NETDEV_TX_BUSY;
 818        }
 819
 820        /* Set TX descriptor & Transmit it */
 821        lp->tx_free_desc--;
 822        descptr = lp->tx_insert_ptr;
 823        descptr->len = skb->len;
 824        descptr->skb_ptr = skb;
 825        descptr->buf = cpu_to_le32(dma_map_single(&lp->pdev->dev, skb->data,
 826                                                  skb->len, DMA_TO_DEVICE));
 827        descptr->status = DSC_OWNER_MAC;
 828
 829        skb_tx_timestamp(skb);
 830
 831        /* Trigger the MAC to check the TX descriptor */
 832        if (!netdev_xmit_more() || netif_queue_stopped(dev))
 833                iowrite16(TM2TX, ioaddr + MTPR);
 834        lp->tx_insert_ptr = descptr->vndescp;
 835
 836        /* If no tx resource, stop */
 837        if (!lp->tx_free_desc)
 838                netif_stop_queue(dev);
 839
 840        spin_unlock_irqrestore(&lp->lock, flags);
 841
 842        return NETDEV_TX_OK;
 843}
 844
 845static void r6040_multicast_list(struct net_device *dev)
 846{
 847        struct r6040_private *lp = netdev_priv(dev);
 848        void __iomem *ioaddr = lp->base;
 849        unsigned long flags;
 850        struct netdev_hw_addr *ha;
 851        int i;
 852        u16 *adrp;
 853        u16 hash_table[4] = { 0 };
 854
 855        spin_lock_irqsave(&lp->lock, flags);
 856
 857        /* Keep our MAC Address */
 858        adrp = (u16 *)dev->dev_addr;
 859        iowrite16(adrp[0], ioaddr + MID_0L);
 860        iowrite16(adrp[1], ioaddr + MID_0M);
 861        iowrite16(adrp[2], ioaddr + MID_0H);
 862
 863        /* Clear AMCP & PROM bits */
 864        lp->mcr0 = ioread16(ioaddr + MCR0) & ~(MCR0_PROMISC | MCR0_HASH_EN);
 865
 866        /* Promiscuous mode */
 867        if (dev->flags & IFF_PROMISC)
 868                lp->mcr0 |= MCR0_PROMISC;
 869
 870        /* Enable multicast hash table function to
 871         * receive all multicast packets. */
 872        else if (dev->flags & IFF_ALLMULTI) {
 873                lp->mcr0 |= MCR0_HASH_EN;
 874
 875                for (i = 0; i < MCAST_MAX ; i++) {
 876                        iowrite16(0, ioaddr + MID_1L + 8 * i);
 877                        iowrite16(0, ioaddr + MID_1M + 8 * i);
 878                        iowrite16(0, ioaddr + MID_1H + 8 * i);
 879                }
 880
 881                for (i = 0; i < 4; i++)
 882                        hash_table[i] = 0xffff;
 883        }
 884        /* Use internal multicast address registers if the number of
 885         * multicast addresses is not greater than MCAST_MAX. */
 886        else if (netdev_mc_count(dev) <= MCAST_MAX) {
 887                i = 0;
 888                netdev_for_each_mc_addr(ha, dev) {
 889                        u16 *adrp = (u16 *) ha->addr;
 890                        iowrite16(adrp[0], ioaddr + MID_1L + 8 * i);
 891                        iowrite16(adrp[1], ioaddr + MID_1M + 8 * i);
 892                        iowrite16(adrp[2], ioaddr + MID_1H + 8 * i);
 893                        i++;
 894                }
 895                while (i < MCAST_MAX) {
 896                        iowrite16(0, ioaddr + MID_1L + 8 * i);
 897                        iowrite16(0, ioaddr + MID_1M + 8 * i);
 898                        iowrite16(0, ioaddr + MID_1H + 8 * i);
 899                        i++;
 900                }
 901        }
 902        /* Otherwise, Enable multicast hash table function. */
 903        else {
 904                u32 crc;
 905
 906                lp->mcr0 |= MCR0_HASH_EN;
 907
 908                for (i = 0; i < MCAST_MAX ; i++) {
 909                        iowrite16(0, ioaddr + MID_1L + 8 * i);
 910                        iowrite16(0, ioaddr + MID_1M + 8 * i);
 911                        iowrite16(0, ioaddr + MID_1H + 8 * i);
 912                }
 913
 914                /* Build multicast hash table */
 915                netdev_for_each_mc_addr(ha, dev) {
 916                        u8 *addrs = ha->addr;
 917
 918                        crc = ether_crc(ETH_ALEN, addrs);
 919                        crc >>= 26;
 920                        hash_table[crc >> 4] |= 1 << (crc & 0xf);
 921                }
 922        }
 923
 924        iowrite16(lp->mcr0, ioaddr + MCR0);
 925
 926        /* Fill the MAC hash tables with their values */
 927        if (lp->mcr0 & MCR0_HASH_EN) {
 928                iowrite16(hash_table[0], ioaddr + MAR0);
 929                iowrite16(hash_table[1], ioaddr + MAR1);
 930                iowrite16(hash_table[2], ioaddr + MAR2);
 931                iowrite16(hash_table[3], ioaddr + MAR3);
 932        }
 933
 934        spin_unlock_irqrestore(&lp->lock, flags);
 935}
 936
 937static void netdev_get_drvinfo(struct net_device *dev,
 938                        struct ethtool_drvinfo *info)
 939{
 940        struct r6040_private *rp = netdev_priv(dev);
 941
 942        strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
 943        strlcpy(info->version, DRV_VERSION, sizeof(info->version));
 944        strlcpy(info->bus_info, pci_name(rp->pdev), sizeof(info->bus_info));
 945}
 946
 947static const struct ethtool_ops netdev_ethtool_ops = {
 948        .get_drvinfo            = netdev_get_drvinfo,
 949        .get_link               = ethtool_op_get_link,
 950        .get_ts_info            = ethtool_op_get_ts_info,
 951        .get_link_ksettings     = phy_ethtool_get_link_ksettings,
 952        .set_link_ksettings     = phy_ethtool_set_link_ksettings,
 953        .nway_reset             = phy_ethtool_nway_reset,
 954};
 955
 956static const struct net_device_ops r6040_netdev_ops = {
 957        .ndo_open               = r6040_open,
 958        .ndo_stop               = r6040_close,
 959        .ndo_start_xmit         = r6040_start_xmit,
 960        .ndo_get_stats          = r6040_get_stats,
 961        .ndo_set_rx_mode        = r6040_multicast_list,
 962        .ndo_validate_addr      = eth_validate_addr,
 963        .ndo_set_mac_address    = eth_mac_addr,
 964        .ndo_eth_ioctl          = phy_do_ioctl,
 965        .ndo_tx_timeout         = r6040_tx_timeout,
 966#ifdef CONFIG_NET_POLL_CONTROLLER
 967        .ndo_poll_controller    = r6040_poll_controller,
 968#endif
 969};
 970
 971static void r6040_adjust_link(struct net_device *dev)
 972{
 973        struct r6040_private *lp = netdev_priv(dev);
 974        struct phy_device *phydev = dev->phydev;
 975        int status_changed = 0;
 976        void __iomem *ioaddr = lp->base;
 977
 978        BUG_ON(!phydev);
 979
 980        if (lp->old_link != phydev->link) {
 981                status_changed = 1;
 982                lp->old_link = phydev->link;
 983        }
 984
 985        /* reflect duplex change */
 986        if (phydev->link && (lp->old_duplex != phydev->duplex)) {
 987                lp->mcr0 |= (phydev->duplex == DUPLEX_FULL ? MCR0_FD : 0);
 988                iowrite16(lp->mcr0, ioaddr);
 989
 990                status_changed = 1;
 991                lp->old_duplex = phydev->duplex;
 992        }
 993
 994        if (status_changed)
 995                phy_print_status(phydev);
 996}
 997
 998static int r6040_mii_probe(struct net_device *dev)
 999{
1000        struct r6040_private *lp = netdev_priv(dev);
1001        struct phy_device *phydev = NULL;
1002
1003        phydev = phy_find_first(lp->mii_bus);
1004        if (!phydev) {
1005                dev_err(&lp->pdev->dev, "no PHY found\n");
1006                return -ENODEV;
1007        }
1008
1009        phydev = phy_connect(dev, phydev_name(phydev), &r6040_adjust_link,
1010                             PHY_INTERFACE_MODE_MII);
1011
1012        if (IS_ERR(phydev)) {
1013                dev_err(&lp->pdev->dev, "could not attach to PHY\n");
1014                return PTR_ERR(phydev);
1015        }
1016
1017        phy_set_max_speed(phydev, SPEED_100);
1018
1019        lp->old_link = 0;
1020        lp->old_duplex = -1;
1021
1022        phy_attached_info(phydev);
1023
1024        return 0;
1025}
1026
1027static int r6040_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1028{
1029        struct net_device *dev;
1030        struct r6040_private *lp;
1031        void __iomem *ioaddr;
1032        int err, io_size = R6040_IO_SIZE;
1033        static int card_idx = -1;
1034        int bar = 0;
1035        u16 *adrp;
1036
1037        pr_info("%s\n", version);
1038
1039        err = pci_enable_device(pdev);
1040        if (err)
1041                goto err_out;
1042
1043        /* this should always be supported */
1044        err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
1045        if (err) {
1046                dev_err(&pdev->dev, "32-bit PCI DMA addresses not supported by the card\n");
1047                goto err_out_disable_dev;
1048        }
1049        err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
1050        if (err) {
1051                dev_err(&pdev->dev, "32-bit PCI DMA addresses not supported by the card\n");
1052                goto err_out_disable_dev;
1053        }
1054
1055        /* IO Size check */
1056        if (pci_resource_len(pdev, bar) < io_size) {
1057                dev_err(&pdev->dev, "Insufficient PCI resources, aborting\n");
1058                err = -EIO;
1059                goto err_out_disable_dev;
1060        }
1061
1062        pci_set_master(pdev);
1063
1064        dev = alloc_etherdev(sizeof(struct r6040_private));
1065        if (!dev) {
1066                err = -ENOMEM;
1067                goto err_out_disable_dev;
1068        }
1069        SET_NETDEV_DEV(dev, &pdev->dev);
1070        lp = netdev_priv(dev);
1071
1072        err = pci_request_regions(pdev, DRV_NAME);
1073
1074        if (err) {
1075                dev_err(&pdev->dev, "Failed to request PCI regions\n");
1076                goto err_out_free_dev;
1077        }
1078
1079        ioaddr = pci_iomap(pdev, bar, io_size);
1080        if (!ioaddr) {
1081                dev_err(&pdev->dev, "ioremap failed for device\n");
1082                err = -EIO;
1083                goto err_out_free_res;
1084        }
1085
1086        /* If PHY status change register is still set to zero it means the
1087         * bootloader didn't initialize it, so we set it to:
1088         * - enable phy status change
1089         * - enable all phy addresses
1090         * - set to lowest timer divider */
1091        if (ioread16(ioaddr + PHY_CC) == 0)
1092                iowrite16(SCEN | PHY_MAX_ADDR << PHYAD_SHIFT |
1093                                7 << TMRDIV_SHIFT, ioaddr + PHY_CC);
1094
1095        /* Init system & device */
1096        lp->base = ioaddr;
1097        dev->irq = pdev->irq;
1098
1099        spin_lock_init(&lp->lock);
1100        pci_set_drvdata(pdev, dev);
1101
1102        /* Set MAC address */
1103        card_idx++;
1104
1105        adrp = (u16 *)dev->dev_addr;
1106        adrp[0] = ioread16(ioaddr + MID_0L);
1107        adrp[1] = ioread16(ioaddr + MID_0M);
1108        adrp[2] = ioread16(ioaddr + MID_0H);
1109
1110        /* Some bootloader/BIOSes do not initialize
1111         * MAC address, warn about that */
1112        if (!(adrp[0] || adrp[1] || adrp[2])) {
1113                netdev_warn(dev, "MAC address not initialized, "
1114                                        "generating random\n");
1115                eth_hw_addr_random(dev);
1116        }
1117
1118        /* Link new device into r6040_root_dev */
1119        lp->pdev = pdev;
1120        lp->dev = dev;
1121
1122        /* Init RDC private data */
1123        lp->mcr0 = MCR0_XMTEN | MCR0_RCVEN;
1124
1125        /* The RDC-specific entries in the device structure. */
1126        dev->netdev_ops = &r6040_netdev_ops;
1127        dev->ethtool_ops = &netdev_ethtool_ops;
1128        dev->watchdog_timeo = TX_TIMEOUT;
1129
1130        netif_napi_add(dev, &lp->napi, r6040_poll, 64);
1131
1132        lp->mii_bus = mdiobus_alloc();
1133        if (!lp->mii_bus) {
1134                dev_err(&pdev->dev, "mdiobus_alloc() failed\n");
1135                err = -ENOMEM;
1136                goto err_out_unmap;
1137        }
1138
1139        lp->mii_bus->priv = dev;
1140        lp->mii_bus->read = r6040_mdiobus_read;
1141        lp->mii_bus->write = r6040_mdiobus_write;
1142        lp->mii_bus->name = "r6040_eth_mii";
1143        snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
1144                dev_name(&pdev->dev), card_idx);
1145
1146        err = mdiobus_register(lp->mii_bus);
1147        if (err) {
1148                dev_err(&pdev->dev, "failed to register MII bus\n");
1149                goto err_out_mdio;
1150        }
1151
1152        err = r6040_mii_probe(dev);
1153        if (err) {
1154                dev_err(&pdev->dev, "failed to probe MII bus\n");
1155                goto err_out_mdio_unregister;
1156        }
1157
1158        /* Register net device. After this dev->name assign */
1159        err = register_netdev(dev);
1160        if (err) {
1161                dev_err(&pdev->dev, "Failed to register net device\n");
1162                goto err_out_mdio_unregister;
1163        }
1164        return 0;
1165
1166err_out_mdio_unregister:
1167        mdiobus_unregister(lp->mii_bus);
1168err_out_mdio:
1169        mdiobus_free(lp->mii_bus);
1170err_out_unmap:
1171        netif_napi_del(&lp->napi);
1172        pci_iounmap(pdev, ioaddr);
1173err_out_free_res:
1174        pci_release_regions(pdev);
1175err_out_free_dev:
1176        free_netdev(dev);
1177err_out_disable_dev:
1178        pci_disable_device(pdev);
1179err_out:
1180        return err;
1181}
1182
1183static void r6040_remove_one(struct pci_dev *pdev)
1184{
1185        struct net_device *dev = pci_get_drvdata(pdev);
1186        struct r6040_private *lp = netdev_priv(dev);
1187
1188        unregister_netdev(dev);
1189        mdiobus_unregister(lp->mii_bus);
1190        mdiobus_free(lp->mii_bus);
1191        netif_napi_del(&lp->napi);
1192        pci_iounmap(pdev, lp->base);
1193        pci_release_regions(pdev);
1194        free_netdev(dev);
1195        pci_disable_device(pdev);
1196}
1197
1198
1199static const struct pci_device_id r6040_pci_tbl[] = {
1200        { PCI_DEVICE(PCI_VENDOR_ID_RDC, 0x6040) },
1201        { 0 }
1202};
1203MODULE_DEVICE_TABLE(pci, r6040_pci_tbl);
1204
1205static struct pci_driver r6040_driver = {
1206        .name           = DRV_NAME,
1207        .id_table       = r6040_pci_tbl,
1208        .probe          = r6040_init_one,
1209        .remove         = r6040_remove_one,
1210};
1211
1212module_pci_driver(r6040_driver);
1213