linux/drivers/net/ethernet/amd/au1000_eth.c
<<
>>
Prefs
   1/*
   2 *
   3 * Alchemy Au1x00 ethernet driver
   4 *
   5 * Copyright 2001-2003, 2006 MontaVista Software Inc.
   6 * Copyright 2002 TimeSys Corp.
   7 * Added ethtool/mii-tool support,
   8 * Copyright 2004 Matt Porter <mporter@kernel.crashing.org>
   9 * Update: 2004 Bjoern Riemer, riemer@fokus.fraunhofer.de
  10 * or riemer@riemer-nt.de: fixed the link beat detection with
  11 * ioctls (SIOCGMIIPHY)
  12 * Copyright 2006 Herbert Valerio Riedel <hvr@gnu.org>
  13 *  converted to use linux-2.6.x's PHY framework
  14 *
  15 * Author: MontaVista Software, Inc.
  16 *              ppopov@mvista.com or source@mvista.com
  17 *
  18 * ########################################################################
  19 *
  20 *  This program is free software; you can distribute it and/or modify it
  21 *  under the terms of the GNU General Public License (Version 2) as
  22 *  published by the Free Software Foundation.
  23 *
  24 *  This program is distributed in the hope it will be useful, but WITHOUT
  25 *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  26 *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  27 *  for more details.
  28 *
  29 *  You should have received a copy of the GNU General Public License along
  30 *  with this program; if not, see <http://www.gnu.org/licenses/>.
  31 *
  32 * ########################################################################
  33 *
  34 *
  35 */
  36#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  37
  38#include <linux/capability.h>
  39#include <linux/dma-mapping.h>
  40#include <linux/module.h>
  41#include <linux/kernel.h>
  42#include <linux/string.h>
  43#include <linux/timer.h>
  44#include <linux/errno.h>
  45#include <linux/in.h>
  46#include <linux/ioport.h>
  47#include <linux/bitops.h>
  48#include <linux/slab.h>
  49#include <linux/interrupt.h>
  50#include <linux/netdevice.h>
  51#include <linux/etherdevice.h>
  52#include <linux/ethtool.h>
  53#include <linux/mii.h>
  54#include <linux/skbuff.h>
  55#include <linux/delay.h>
  56#include <linux/crc32.h>
  57#include <linux/phy.h>
  58#include <linux/platform_device.h>
  59#include <linux/cpu.h>
  60#include <linux/io.h>
  61
  62#include <asm/mipsregs.h>
  63#include <asm/irq.h>
  64#include <asm/processor.h>
  65
  66#include <au1000.h>
  67#include <au1xxx_eth.h>
  68#include <prom.h>
  69
  70#include "au1000_eth.h"
  71
  72#ifdef AU1000_ETH_DEBUG
  73static int au1000_debug = 5;
  74#else
  75static int au1000_debug = 3;
  76#endif
  77
  78#define AU1000_DEF_MSG_ENABLE   (NETIF_MSG_DRV  | \
  79                                NETIF_MSG_PROBE | \
  80                                NETIF_MSG_LINK)
  81
  82#define DRV_NAME        "au1000_eth"
  83#define DRV_VERSION     "1.7"
  84#define DRV_AUTHOR      "Pete Popov <ppopov@embeddedalley.com>"
  85#define DRV_DESC        "Au1xxx on-chip Ethernet driver"
  86
  87MODULE_AUTHOR(DRV_AUTHOR);
  88MODULE_DESCRIPTION(DRV_DESC);
  89MODULE_LICENSE("GPL");
  90MODULE_VERSION(DRV_VERSION);
  91
  92/* AU1000 MAC registers and bits */
  93#define MAC_CONTROL             0x0
  94#  define MAC_RX_ENABLE         (1 << 2)
  95#  define MAC_TX_ENABLE         (1 << 3)
  96#  define MAC_DEF_CHECK         (1 << 5)
  97#  define MAC_SET_BL(X)         (((X) & 0x3) << 6)
  98#  define MAC_AUTO_PAD          (1 << 8)
  99#  define MAC_DISABLE_RETRY     (1 << 10)
 100#  define MAC_DISABLE_BCAST     (1 << 11)
 101#  define MAC_LATE_COL          (1 << 12)
 102#  define MAC_HASH_MODE         (1 << 13)
 103#  define MAC_HASH_ONLY         (1 << 15)
 104#  define MAC_PASS_ALL          (1 << 16)
 105#  define MAC_INVERSE_FILTER    (1 << 17)
 106#  define MAC_PROMISCUOUS       (1 << 18)
 107#  define MAC_PASS_ALL_MULTI    (1 << 19)
 108#  define MAC_FULL_DUPLEX       (1 << 20)
 109#  define MAC_NORMAL_MODE       0
 110#  define MAC_INT_LOOPBACK      (1 << 21)
 111#  define MAC_EXT_LOOPBACK      (1 << 22)
 112#  define MAC_DISABLE_RX_OWN    (1 << 23)
 113#  define MAC_BIG_ENDIAN        (1 << 30)
 114#  define MAC_RX_ALL            (1 << 31)
 115#define MAC_ADDRESS_HIGH        0x4
 116#define MAC_ADDRESS_LOW         0x8
 117#define MAC_MCAST_HIGH          0xC
 118#define MAC_MCAST_LOW           0x10
 119#define MAC_MII_CNTRL           0x14
 120#  define MAC_MII_BUSY          (1 << 0)
 121#  define MAC_MII_READ          0
 122#  define MAC_MII_WRITE         (1 << 1)
 123#  define MAC_SET_MII_SELECT_REG(X) (((X) & 0x1f) << 6)
 124#  define MAC_SET_MII_SELECT_PHY(X) (((X) & 0x1f) << 11)
 125#define MAC_MII_DATA            0x18
 126#define MAC_FLOW_CNTRL          0x1C
 127#  define MAC_FLOW_CNTRL_BUSY   (1 << 0)
 128#  define MAC_FLOW_CNTRL_ENABLE (1 << 1)
 129#  define MAC_PASS_CONTROL      (1 << 2)
 130#  define MAC_SET_PAUSE(X)      (((X) & 0xffff) << 16)
 131#define MAC_VLAN1_TAG           0x20
 132#define MAC_VLAN2_TAG           0x24
 133
 134/* Ethernet Controller Enable */
 135#  define MAC_EN_CLOCK_ENABLE   (1 << 0)
 136#  define MAC_EN_RESET0         (1 << 1)
 137#  define MAC_EN_TOSS           (0 << 2)
 138#  define MAC_EN_CACHEABLE      (1 << 3)
 139#  define MAC_EN_RESET1         (1 << 4)
 140#  define MAC_EN_RESET2         (1 << 5)
 141#  define MAC_DMA_RESET         (1 << 6)
 142
 143/* Ethernet Controller DMA Channels */
 144/* offsets from MAC_TX_RING_ADDR address */
 145#define MAC_TX_BUFF0_STATUS     0x0
 146#  define TX_FRAME_ABORTED      (1 << 0)
 147#  define TX_JAB_TIMEOUT        (1 << 1)
 148#  define TX_NO_CARRIER         (1 << 2)
 149#  define TX_LOSS_CARRIER       (1 << 3)
 150#  define TX_EXC_DEF            (1 << 4)
 151#  define TX_LATE_COLL_ABORT    (1 << 5)
 152#  define TX_EXC_COLL           (1 << 6)
 153#  define TX_UNDERRUN           (1 << 7)
 154#  define TX_DEFERRED           (1 << 8)
 155#  define TX_LATE_COLL          (1 << 9)
 156#  define TX_COLL_CNT_MASK      (0xF << 10)
 157#  define TX_PKT_RETRY          (1 << 31)
 158#define MAC_TX_BUFF0_ADDR       0x4
 159#  define TX_DMA_ENABLE         (1 << 0)
 160#  define TX_T_DONE             (1 << 1)
 161#  define TX_GET_DMA_BUFFER(X)  (((X) >> 2) & 0x3)
 162#define MAC_TX_BUFF0_LEN        0x8
 163#define MAC_TX_BUFF1_STATUS     0x10
 164#define MAC_TX_BUFF1_ADDR       0x14
 165#define MAC_TX_BUFF1_LEN        0x18
 166#define MAC_TX_BUFF2_STATUS     0x20
 167#define MAC_TX_BUFF2_ADDR       0x24
 168#define MAC_TX_BUFF2_LEN        0x28
 169#define MAC_TX_BUFF3_STATUS     0x30
 170#define MAC_TX_BUFF3_ADDR       0x34
 171#define MAC_TX_BUFF3_LEN        0x38
 172
 173/* offsets from MAC_RX_RING_ADDR */
 174#define MAC_RX_BUFF0_STATUS     0x0
 175#  define RX_FRAME_LEN_MASK     0x3fff
 176#  define RX_WDOG_TIMER         (1 << 14)
 177#  define RX_RUNT               (1 << 15)
 178#  define RX_OVERLEN            (1 << 16)
 179#  define RX_COLL               (1 << 17)
 180#  define RX_ETHER              (1 << 18)
 181#  define RX_MII_ERROR          (1 << 19)
 182#  define RX_DRIBBLING          (1 << 20)
 183#  define RX_CRC_ERROR          (1 << 21)
 184#  define RX_VLAN1              (1 << 22)
 185#  define RX_VLAN2              (1 << 23)
 186#  define RX_LEN_ERROR          (1 << 24)
 187#  define RX_CNTRL_FRAME        (1 << 25)
 188#  define RX_U_CNTRL_FRAME      (1 << 26)
 189#  define RX_MCAST_FRAME        (1 << 27)
 190#  define RX_BCAST_FRAME        (1 << 28)
 191#  define RX_FILTER_FAIL        (1 << 29)
 192#  define RX_PACKET_FILTER      (1 << 30)
 193#  define RX_MISSED_FRAME       (1 << 31)
 194
 195#  define RX_ERROR (RX_WDOG_TIMER | RX_RUNT | RX_OVERLEN |  \
 196                    RX_COLL | RX_MII_ERROR | RX_CRC_ERROR | \
 197                    RX_LEN_ERROR | RX_U_CNTRL_FRAME | RX_MISSED_FRAME)
 198#define MAC_RX_BUFF0_ADDR       0x4
 199#  define RX_DMA_ENABLE         (1 << 0)
 200#  define RX_T_DONE             (1 << 1)
 201#  define RX_GET_DMA_BUFFER(X)  (((X) >> 2) & 0x3)
 202#  define RX_SET_BUFF_ADDR(X)   ((X) & 0xffffffc0)
 203#define MAC_RX_BUFF1_STATUS     0x10
 204#define MAC_RX_BUFF1_ADDR       0x14
 205#define MAC_RX_BUFF2_STATUS     0x20
 206#define MAC_RX_BUFF2_ADDR       0x24
 207#define MAC_RX_BUFF3_STATUS     0x30
 208#define MAC_RX_BUFF3_ADDR       0x34
 209
 210/*
 211 * Theory of operation
 212 *
 213 * The Au1000 MACs use a simple rx and tx descriptor ring scheme.
 214 * There are four receive and four transmit descriptors.  These
 215 * descriptors are not in memory; rather, they are just a set of
 216 * hardware registers.
 217 *
 218 * Since the Au1000 has a coherent data cache, the receive and
 219 * transmit buffers are allocated from the KSEG0 segment. The
 220 * hardware registers, however, are still mapped at KSEG1 to
 221 * make sure there's no out-of-order writes, and that all writes
 222 * complete immediately.
 223 */
 224
 225/*
 226 * board-specific configurations
 227 *
 228 * PHY detection algorithm
 229 *
 230 * If phy_static_config is undefined, the PHY setup is
 231 * autodetected:
 232 *
 233 * mii_probe() first searches the current MAC's MII bus for a PHY,
 234 * selecting the first (or last, if phy_search_highest_addr is
 235 * defined) PHY address not already claimed by another netdev.
 236 *
 237 * If nothing was found that way when searching for the 2nd ethernet
 238 * controller's PHY and phy1_search_mac0 is defined, then
 239 * the first MII bus is searched as well for an unclaimed PHY; this is
 240 * needed in case of a dual-PHY accessible only through the MAC0's MII
 241 * bus.
 242 *
 243 * Finally, if no PHY is found, then the corresponding ethernet
 244 * controller is not registered to the network subsystem.
 245 */
 246
 247/* autodetection defaults: phy1_search_mac0 */
 248
 249/* static PHY setup
 250 *
 251 * most boards PHY setup should be detectable properly with the
 252 * autodetection algorithm in mii_probe(), but in some cases (e.g. if
 253 * you have a switch attached, or want to use the PHY's interrupt
 254 * notification capabilities) you can provide a static PHY
 255 * configuration here
 256 *
 257 * IRQs may only be set, if a PHY address was configured
 258 * If a PHY address is given, also a bus id is required to be set
 259 *
 260 * ps: make sure the used irqs are configured properly in the board
 261 * specific irq-map
 262 */
 263
 264static void au1000_enable_mac(struct net_device *dev, int force_reset)
 265{
 266        unsigned long flags;
 267        struct au1000_private *aup = netdev_priv(dev);
 268
 269        spin_lock_irqsave(&aup->lock, flags);
 270
 271        if (force_reset || (!aup->mac_enabled)) {
 272                writel(MAC_EN_CLOCK_ENABLE, aup->enable);
 273                wmb(); /* drain writebuffer */
 274                mdelay(2);
 275                writel((MAC_EN_RESET0 | MAC_EN_RESET1 | MAC_EN_RESET2
 276                                | MAC_EN_CLOCK_ENABLE), aup->enable);
 277                wmb(); /* drain writebuffer */
 278                mdelay(2);
 279
 280                aup->mac_enabled = 1;
 281        }
 282
 283        spin_unlock_irqrestore(&aup->lock, flags);
 284}
 285
 286/*
 287 * MII operations
 288 */
 289static int au1000_mdio_read(struct net_device *dev, int phy_addr, int reg)
 290{
 291        struct au1000_private *aup = netdev_priv(dev);
 292        u32 *const mii_control_reg = &aup->mac->mii_control;
 293        u32 *const mii_data_reg = &aup->mac->mii_data;
 294        u32 timedout = 20;
 295        u32 mii_control;
 296
 297        while (readl(mii_control_reg) & MAC_MII_BUSY) {
 298                mdelay(1);
 299                if (--timedout == 0) {
 300                        netdev_err(dev, "read_MII busy timeout!!\n");
 301                        return -1;
 302                }
 303        }
 304
 305        mii_control = MAC_SET_MII_SELECT_REG(reg) |
 306                MAC_SET_MII_SELECT_PHY(phy_addr) | MAC_MII_READ;
 307
 308        writel(mii_control, mii_control_reg);
 309
 310        timedout = 20;
 311        while (readl(mii_control_reg) & MAC_MII_BUSY) {
 312                mdelay(1);
 313                if (--timedout == 0) {
 314                        netdev_err(dev, "mdio_read busy timeout!!\n");
 315                        return -1;
 316                }
 317        }
 318        return readl(mii_data_reg);
 319}
 320
 321static void au1000_mdio_write(struct net_device *dev, int phy_addr,
 322                              int reg, u16 value)
 323{
 324        struct au1000_private *aup = netdev_priv(dev);
 325        u32 *const mii_control_reg = &aup->mac->mii_control;
 326        u32 *const mii_data_reg = &aup->mac->mii_data;
 327        u32 timedout = 20;
 328        u32 mii_control;
 329
 330        while (readl(mii_control_reg) & MAC_MII_BUSY) {
 331                mdelay(1);
 332                if (--timedout == 0) {
 333                        netdev_err(dev, "mdio_write busy timeout!!\n");
 334                        return;
 335                }
 336        }
 337
 338        mii_control = MAC_SET_MII_SELECT_REG(reg) |
 339                MAC_SET_MII_SELECT_PHY(phy_addr) | MAC_MII_WRITE;
 340
 341        writel(value, mii_data_reg);
 342        writel(mii_control, mii_control_reg);
 343}
 344
 345static int au1000_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum)
 346{
 347        struct net_device *const dev = bus->priv;
 348
 349        /* make sure the MAC associated with this
 350         * mii_bus is enabled
 351         */
 352        au1000_enable_mac(dev, 0);
 353
 354        return au1000_mdio_read(dev, phy_addr, regnum);
 355}
 356
 357static int au1000_mdiobus_write(struct mii_bus *bus, int phy_addr, int regnum,
 358                                u16 value)
 359{
 360        struct net_device *const dev = bus->priv;
 361
 362        /* make sure the MAC associated with this
 363         * mii_bus is enabled
 364         */
 365        au1000_enable_mac(dev, 0);
 366
 367        au1000_mdio_write(dev, phy_addr, regnum, value);
 368        return 0;
 369}
 370
 371static int au1000_mdiobus_reset(struct mii_bus *bus)
 372{
 373        struct net_device *const dev = bus->priv;
 374
 375        /* make sure the MAC associated with this
 376         * mii_bus is enabled
 377         */
 378        au1000_enable_mac(dev, 0);
 379
 380        return 0;
 381}
 382
 383static void au1000_hard_stop(struct net_device *dev)
 384{
 385        struct au1000_private *aup = netdev_priv(dev);
 386        u32 reg;
 387
 388        netif_dbg(aup, drv, dev, "hard stop\n");
 389
 390        reg = readl(&aup->mac->control);
 391        reg &= ~(MAC_RX_ENABLE | MAC_TX_ENABLE);
 392        writel(reg, &aup->mac->control);
 393        wmb(); /* drain writebuffer */
 394        mdelay(10);
 395}
 396
 397static void au1000_enable_rx_tx(struct net_device *dev)
 398{
 399        struct au1000_private *aup = netdev_priv(dev);
 400        u32 reg;
 401
 402        netif_dbg(aup, hw, dev, "enable_rx_tx\n");
 403
 404        reg = readl(&aup->mac->control);
 405        reg |= (MAC_RX_ENABLE | MAC_TX_ENABLE);
 406        writel(reg, &aup->mac->control);
 407        wmb(); /* drain writebuffer */
 408        mdelay(10);
 409}
 410
 411static void
 412au1000_adjust_link(struct net_device *dev)
 413{
 414        struct au1000_private *aup = netdev_priv(dev);
 415        struct phy_device *phydev = dev->phydev;
 416        unsigned long flags;
 417        u32 reg;
 418
 419        int status_change = 0;
 420
 421        BUG_ON(!phydev);
 422
 423        spin_lock_irqsave(&aup->lock, flags);
 424
 425        if (phydev->link && (aup->old_speed != phydev->speed)) {
 426                /* speed changed */
 427
 428                switch (phydev->speed) {
 429                case SPEED_10:
 430                case SPEED_100:
 431                        break;
 432                default:
 433                        netdev_warn(dev, "Speed (%d) is not 10/100 ???\n",
 434                                                        phydev->speed);
 435                        break;
 436                }
 437
 438                aup->old_speed = phydev->speed;
 439
 440                status_change = 1;
 441        }
 442
 443        if (phydev->link && (aup->old_duplex != phydev->duplex)) {
 444                /* duplex mode changed */
 445
 446                /* switching duplex mode requires to disable rx and tx! */
 447                au1000_hard_stop(dev);
 448
 449                reg = readl(&aup->mac->control);
 450                if (DUPLEX_FULL == phydev->duplex) {
 451                        reg |= MAC_FULL_DUPLEX;
 452                        reg &= ~MAC_DISABLE_RX_OWN;
 453                } else {
 454                        reg &= ~MAC_FULL_DUPLEX;
 455                        reg |= MAC_DISABLE_RX_OWN;
 456                }
 457                writel(reg, &aup->mac->control);
 458                wmb(); /* drain writebuffer */
 459                mdelay(1);
 460
 461                au1000_enable_rx_tx(dev);
 462                aup->old_duplex = phydev->duplex;
 463
 464                status_change = 1;
 465        }
 466
 467        if (phydev->link != aup->old_link) {
 468                /* link state changed */
 469
 470                if (!phydev->link) {
 471                        /* link went down */
 472                        aup->old_speed = 0;
 473                        aup->old_duplex = -1;
 474                }
 475
 476                aup->old_link = phydev->link;
 477                status_change = 1;
 478        }
 479
 480        spin_unlock_irqrestore(&aup->lock, flags);
 481
 482        if (status_change) {
 483                if (phydev->link)
 484                        netdev_info(dev, "link up (%d/%s)\n",
 485                               phydev->speed,
 486                               DUPLEX_FULL == phydev->duplex ? "Full" : "Half");
 487                else
 488                        netdev_info(dev, "link down\n");
 489        }
 490}
 491
 492static int au1000_mii_probe(struct net_device *dev)
 493{
 494        struct au1000_private *const aup = netdev_priv(dev);
 495        struct phy_device *phydev = NULL;
 496        int phy_addr;
 497
 498        if (aup->phy_static_config) {
 499                BUG_ON(aup->mac_id < 0 || aup->mac_id > 1);
 500
 501                if (aup->phy_addr)
 502                        phydev = mdiobus_get_phy(aup->mii_bus, aup->phy_addr);
 503                else
 504                        netdev_info(dev, "using PHY-less setup\n");
 505                return 0;
 506        }
 507
 508        /* find the first (lowest address) PHY
 509         * on the current MAC's MII bus
 510         */
 511        for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++)
 512                if (mdiobus_get_phy(aup->mii_bus, phy_addr)) {
 513                        phydev = mdiobus_get_phy(aup->mii_bus, phy_addr);
 514                        if (!aup->phy_search_highest_addr)
 515                                /* break out with first one found */
 516                                break;
 517                }
 518
 519        if (aup->phy1_search_mac0) {
 520                /* try harder to find a PHY */
 521                if (!phydev && (aup->mac_id == 1)) {
 522                        /* no PHY found, maybe we have a dual PHY? */
 523                        dev_info(&dev->dev, ": no PHY found on MAC1, "
 524                                "let's see if it's attached to MAC0...\n");
 525
 526                        /* find the first (lowest address) non-attached
 527                         * PHY on the MAC0 MII bus
 528                         */
 529                        for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
 530                                struct phy_device *const tmp_phydev =
 531                                        mdiobus_get_phy(aup->mii_bus,
 532                                                        phy_addr);
 533
 534                                if (aup->mac_id == 1)
 535                                        break;
 536
 537                                /* no PHY here... */
 538                                if (!tmp_phydev)
 539                                        continue;
 540
 541                                /* already claimed by MAC0 */
 542                                if (tmp_phydev->attached_dev)
 543                                        continue;
 544
 545                                phydev = tmp_phydev;
 546                                break; /* found it */
 547                        }
 548                }
 549        }
 550
 551        if (!phydev) {
 552                netdev_err(dev, "no PHY found\n");
 553                return -1;
 554        }
 555
 556        /* now we are supposed to have a proper phydev, to attach to... */
 557        BUG_ON(phydev->attached_dev);
 558
 559        phydev = phy_connect(dev, phydev_name(phydev),
 560                             &au1000_adjust_link, PHY_INTERFACE_MODE_MII);
 561
 562        if (IS_ERR(phydev)) {
 563                netdev_err(dev, "Could not attach to PHY\n");
 564                return PTR_ERR(phydev);
 565        }
 566
 567        /* mask with MAC supported features */
 568        phydev->supported &= (SUPPORTED_10baseT_Half
 569                              | SUPPORTED_10baseT_Full
 570                              | SUPPORTED_100baseT_Half
 571                              | SUPPORTED_100baseT_Full
 572                              | SUPPORTED_Autoneg
 573                              /* | SUPPORTED_Pause | SUPPORTED_Asym_Pause */
 574                              | SUPPORTED_MII
 575                              | SUPPORTED_TP);
 576
 577        phydev->advertising = phydev->supported;
 578
 579        aup->old_link = 0;
 580        aup->old_speed = 0;
 581        aup->old_duplex = -1;
 582
 583        phy_attached_info(phydev);
 584
 585        return 0;
 586}
 587
 588
 589/*
 590 * Buffer allocation/deallocation routines. The buffer descriptor returned
 591 * has the virtual and dma address of a buffer suitable for
 592 * both, receive and transmit operations.
 593 */
 594static struct db_dest *au1000_GetFreeDB(struct au1000_private *aup)
 595{
 596        struct db_dest *pDB;
 597        pDB = aup->pDBfree;
 598
 599        if (pDB)
 600                aup->pDBfree = pDB->pnext;
 601
 602        return pDB;
 603}
 604
 605void au1000_ReleaseDB(struct au1000_private *aup, struct db_dest *pDB)
 606{
 607        struct db_dest *pDBfree = aup->pDBfree;
 608        if (pDBfree)
 609                pDBfree->pnext = pDB;
 610        aup->pDBfree = pDB;
 611}
 612
 613static void au1000_reset_mac_unlocked(struct net_device *dev)
 614{
 615        struct au1000_private *const aup = netdev_priv(dev);
 616        int i;
 617
 618        au1000_hard_stop(dev);
 619
 620        writel(MAC_EN_CLOCK_ENABLE, aup->enable);
 621        wmb(); /* drain writebuffer */
 622        mdelay(2);
 623        writel(0, aup->enable);
 624        wmb(); /* drain writebuffer */
 625        mdelay(2);
 626
 627        aup->tx_full = 0;
 628        for (i = 0; i < NUM_RX_DMA; i++) {
 629                /* reset control bits */
 630                aup->rx_dma_ring[i]->buff_stat &= ~0xf;
 631        }
 632        for (i = 0; i < NUM_TX_DMA; i++) {
 633                /* reset control bits */
 634                aup->tx_dma_ring[i]->buff_stat &= ~0xf;
 635        }
 636
 637        aup->mac_enabled = 0;
 638
 639}
 640
 641static void au1000_reset_mac(struct net_device *dev)
 642{
 643        struct au1000_private *const aup = netdev_priv(dev);
 644        unsigned long flags;
 645
 646        netif_dbg(aup, hw, dev, "reset mac, aup %x\n",
 647                                        (unsigned)aup);
 648
 649        spin_lock_irqsave(&aup->lock, flags);
 650
 651        au1000_reset_mac_unlocked(dev);
 652
 653        spin_unlock_irqrestore(&aup->lock, flags);
 654}
 655
 656/*
 657 * Setup the receive and transmit "rings".  These pointers are the addresses
 658 * of the rx and tx MAC DMA registers so they are fixed by the hardware --
 659 * these are not descriptors sitting in memory.
 660 */
 661static void
 662au1000_setup_hw_rings(struct au1000_private *aup, void __iomem *tx_base)
 663{
 664        int i;
 665
 666        for (i = 0; i < NUM_RX_DMA; i++) {
 667                aup->rx_dma_ring[i] = (struct rx_dma *)
 668                        (tx_base + 0x100 + sizeof(struct rx_dma) * i);
 669        }
 670        for (i = 0; i < NUM_TX_DMA; i++) {
 671                aup->tx_dma_ring[i] = (struct tx_dma *)
 672                        (tx_base + sizeof(struct tx_dma) * i);
 673        }
 674}
 675
 676/*
 677 * ethtool operations
 678 */
 679
 680static void
 681au1000_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
 682{
 683        struct au1000_private *aup = netdev_priv(dev);
 684
 685        strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
 686        strlcpy(info->version, DRV_VERSION, sizeof(info->version));
 687        snprintf(info->bus_info, sizeof(info->bus_info), "%s %d", DRV_NAME,
 688                 aup->mac_id);
 689}
 690
 691static void au1000_set_msglevel(struct net_device *dev, u32 value)
 692{
 693        struct au1000_private *aup = netdev_priv(dev);
 694        aup->msg_enable = value;
 695}
 696
 697static u32 au1000_get_msglevel(struct net_device *dev)
 698{
 699        struct au1000_private *aup = netdev_priv(dev);
 700        return aup->msg_enable;
 701}
 702
 703static const struct ethtool_ops au1000_ethtool_ops = {
 704        .get_drvinfo = au1000_get_drvinfo,
 705        .get_link = ethtool_op_get_link,
 706        .get_msglevel = au1000_get_msglevel,
 707        .set_msglevel = au1000_set_msglevel,
 708        .get_link_ksettings = phy_ethtool_get_link_ksettings,
 709        .set_link_ksettings = phy_ethtool_set_link_ksettings,
 710};
 711
 712
 713/*
 714 * Initialize the interface.
 715 *
 716 * When the device powers up, the clocks are disabled and the
 717 * mac is in reset state.  When the interface is closed, we
 718 * do the same -- reset the device and disable the clocks to
 719 * conserve power. Thus, whenever au1000_init() is called,
 720 * the device should already be in reset state.
 721 */
 722static int au1000_init(struct net_device *dev)
 723{
 724        struct au1000_private *aup = netdev_priv(dev);
 725        unsigned long flags;
 726        int i;
 727        u32 control;
 728
 729        netif_dbg(aup, hw, dev, "au1000_init\n");
 730
 731        /* bring the device out of reset */
 732        au1000_enable_mac(dev, 1);
 733
 734        spin_lock_irqsave(&aup->lock, flags);
 735
 736        writel(0, &aup->mac->control);
 737        aup->tx_head = (aup->tx_dma_ring[0]->buff_stat & 0xC) >> 2;
 738        aup->tx_tail = aup->tx_head;
 739        aup->rx_head = (aup->rx_dma_ring[0]->buff_stat & 0xC) >> 2;
 740
 741        writel(dev->dev_addr[5]<<8 | dev->dev_addr[4],
 742                                        &aup->mac->mac_addr_high);
 743        writel(dev->dev_addr[3]<<24 | dev->dev_addr[2]<<16 |
 744                dev->dev_addr[1]<<8 | dev->dev_addr[0],
 745                                        &aup->mac->mac_addr_low);
 746
 747
 748        for (i = 0; i < NUM_RX_DMA; i++)
 749                aup->rx_dma_ring[i]->buff_stat |= RX_DMA_ENABLE;
 750
 751        wmb(); /* drain writebuffer */
 752
 753        control = MAC_RX_ENABLE | MAC_TX_ENABLE;
 754#ifndef CONFIG_CPU_LITTLE_ENDIAN
 755        control |= MAC_BIG_ENDIAN;
 756#endif
 757        if (dev->phydev) {
 758                if (dev->phydev->link && (DUPLEX_FULL == dev->phydev->duplex))
 759                        control |= MAC_FULL_DUPLEX;
 760                else
 761                        control |= MAC_DISABLE_RX_OWN;
 762        } else { /* PHY-less op, assume full-duplex */
 763                control |= MAC_FULL_DUPLEX;
 764        }
 765
 766        writel(control, &aup->mac->control);
 767        writel(0x8100, &aup->mac->vlan1_tag); /* activate vlan support */
 768        wmb(); /* drain writebuffer */
 769
 770        spin_unlock_irqrestore(&aup->lock, flags);
 771        return 0;
 772}
 773
 774static inline void au1000_update_rx_stats(struct net_device *dev, u32 status)
 775{
 776        struct net_device_stats *ps = &dev->stats;
 777
 778        ps->rx_packets++;
 779        if (status & RX_MCAST_FRAME)
 780                ps->multicast++;
 781
 782        if (status & RX_ERROR) {
 783                ps->rx_errors++;
 784                if (status & RX_MISSED_FRAME)
 785                        ps->rx_missed_errors++;
 786                if (status & (RX_OVERLEN | RX_RUNT | RX_LEN_ERROR))
 787                        ps->rx_length_errors++;
 788                if (status & RX_CRC_ERROR)
 789                        ps->rx_crc_errors++;
 790                if (status & RX_COLL)
 791                        ps->collisions++;
 792        } else
 793                ps->rx_bytes += status & RX_FRAME_LEN_MASK;
 794
 795}
 796
 797/*
 798 * Au1000 receive routine.
 799 */
 800static int au1000_rx(struct net_device *dev)
 801{
 802        struct au1000_private *aup = netdev_priv(dev);
 803        struct sk_buff *skb;
 804        struct rx_dma *prxd;
 805        u32 buff_stat, status;
 806        struct db_dest *pDB;
 807        u32     frmlen;
 808
 809        netif_dbg(aup, rx_status, dev, "au1000_rx head %d\n", aup->rx_head);
 810
 811        prxd = aup->rx_dma_ring[aup->rx_head];
 812        buff_stat = prxd->buff_stat;
 813        while (buff_stat & RX_T_DONE)  {
 814                status = prxd->status;
 815                pDB = aup->rx_db_inuse[aup->rx_head];
 816                au1000_update_rx_stats(dev, status);
 817                if (!(status & RX_ERROR))  {
 818
 819                        /* good frame */
 820                        frmlen = (status & RX_FRAME_LEN_MASK);
 821                        frmlen -= 4; /* Remove FCS */
 822                        skb = netdev_alloc_skb(dev, frmlen + 2);
 823                        if (skb == NULL) {
 824                                dev->stats.rx_dropped++;
 825                                continue;
 826                        }
 827                        skb_reserve(skb, 2);    /* 16 byte IP header align */
 828                        skb_copy_to_linear_data(skb,
 829                                (unsigned char *)pDB->vaddr, frmlen);
 830                        skb_put(skb, frmlen);
 831                        skb->protocol = eth_type_trans(skb, dev);
 832                        netif_rx(skb);  /* pass the packet to upper layers */
 833                } else {
 834                        if (au1000_debug > 4) {
 835                                pr_err("rx_error(s):");
 836                                if (status & RX_MISSED_FRAME)
 837                                        pr_cont(" miss");
 838                                if (status & RX_WDOG_TIMER)
 839                                        pr_cont(" wdog");
 840                                if (status & RX_RUNT)
 841                                        pr_cont(" runt");
 842                                if (status & RX_OVERLEN)
 843                                        pr_cont(" overlen");
 844                                if (status & RX_COLL)
 845                                        pr_cont(" coll");
 846                                if (status & RX_MII_ERROR)
 847                                        pr_cont(" mii error");
 848                                if (status & RX_CRC_ERROR)
 849                                        pr_cont(" crc error");
 850                                if (status & RX_LEN_ERROR)
 851                                        pr_cont(" len error");
 852                                if (status & RX_U_CNTRL_FRAME)
 853                                        pr_cont(" u control frame");
 854                                pr_cont("\n");
 855                        }
 856                }
 857                prxd->buff_stat = (u32)(pDB->dma_addr | RX_DMA_ENABLE);
 858                aup->rx_head = (aup->rx_head + 1) & (NUM_RX_DMA - 1);
 859                wmb(); /* drain writebuffer */
 860
 861                /* next descriptor */
 862                prxd = aup->rx_dma_ring[aup->rx_head];
 863                buff_stat = prxd->buff_stat;
 864        }
 865        return 0;
 866}
 867
 868static void au1000_update_tx_stats(struct net_device *dev, u32 status)
 869{
 870        struct net_device_stats *ps = &dev->stats;
 871
 872        if (status & TX_FRAME_ABORTED) {
 873                if (!dev->phydev || (DUPLEX_FULL == dev->phydev->duplex)) {
 874                        if (status & (TX_JAB_TIMEOUT | TX_UNDERRUN)) {
 875                                /* any other tx errors are only valid
 876                                 * in half duplex mode
 877                                 */
 878                                ps->tx_errors++;
 879                                ps->tx_aborted_errors++;
 880                        }
 881                } else {
 882                        ps->tx_errors++;
 883                        ps->tx_aborted_errors++;
 884                        if (status & (TX_NO_CARRIER | TX_LOSS_CARRIER))
 885                                ps->tx_carrier_errors++;
 886                }
 887        }
 888}
 889
 890/*
 891 * Called from the interrupt service routine to acknowledge
 892 * the TX DONE bits.  This is a must if the irq is setup as
 893 * edge triggered.
 894 */
 895static void au1000_tx_ack(struct net_device *dev)
 896{
 897        struct au1000_private *aup = netdev_priv(dev);
 898        struct tx_dma *ptxd;
 899
 900        ptxd = aup->tx_dma_ring[aup->tx_tail];
 901
 902        while (ptxd->buff_stat & TX_T_DONE) {
 903                au1000_update_tx_stats(dev, ptxd->status);
 904                ptxd->buff_stat &= ~TX_T_DONE;
 905                ptxd->len = 0;
 906                wmb(); /* drain writebuffer */
 907
 908                aup->tx_tail = (aup->tx_tail + 1) & (NUM_TX_DMA - 1);
 909                ptxd = aup->tx_dma_ring[aup->tx_tail];
 910
 911                if (aup->tx_full) {
 912                        aup->tx_full = 0;
 913                        netif_wake_queue(dev);
 914                }
 915        }
 916}
 917
 918/*
 919 * Au1000 interrupt service routine.
 920 */
 921static irqreturn_t au1000_interrupt(int irq, void *dev_id)
 922{
 923        struct net_device *dev = dev_id;
 924
 925        /* Handle RX interrupts first to minimize chance of overrun */
 926
 927        au1000_rx(dev);
 928        au1000_tx_ack(dev);
 929        return IRQ_RETVAL(1);
 930}
 931
 932static int au1000_open(struct net_device *dev)
 933{
 934        int retval;
 935        struct au1000_private *aup = netdev_priv(dev);
 936
 937        netif_dbg(aup, drv, dev, "open: dev=%p\n", dev);
 938
 939        retval = request_irq(dev->irq, au1000_interrupt, 0,
 940                                        dev->name, dev);
 941        if (retval) {
 942                netdev_err(dev, "unable to get IRQ %d\n", dev->irq);
 943                return retval;
 944        }
 945
 946        retval = au1000_init(dev);
 947        if (retval) {
 948                netdev_err(dev, "error in au1000_init\n");
 949                free_irq(dev->irq, dev);
 950                return retval;
 951        }
 952
 953        if (dev->phydev) {
 954                /* cause the PHY state machine to schedule a link state check */
 955                dev->phydev->state = PHY_CHANGELINK;
 956                phy_start(dev->phydev);
 957        }
 958
 959        netif_start_queue(dev);
 960
 961        netif_dbg(aup, drv, dev, "open: Initialization done.\n");
 962
 963        return 0;
 964}
 965
 966static int au1000_close(struct net_device *dev)
 967{
 968        unsigned long flags;
 969        struct au1000_private *const aup = netdev_priv(dev);
 970
 971        netif_dbg(aup, drv, dev, "close: dev=%p\n", dev);
 972
 973        if (dev->phydev)
 974                phy_stop(dev->phydev);
 975
 976        spin_lock_irqsave(&aup->lock, flags);
 977
 978        au1000_reset_mac_unlocked(dev);
 979
 980        /* stop the device */
 981        netif_stop_queue(dev);
 982
 983        /* disable the interrupt */
 984        free_irq(dev->irq, dev);
 985        spin_unlock_irqrestore(&aup->lock, flags);
 986
 987        return 0;
 988}
 989
 990/*
 991 * Au1000 transmit routine.
 992 */
 993static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev)
 994{
 995        struct au1000_private *aup = netdev_priv(dev);
 996        struct net_device_stats *ps = &dev->stats;
 997        struct tx_dma *ptxd;
 998        u32 buff_stat;
 999        struct db_dest *pDB;
1000        int i;
1001
1002        netif_dbg(aup, tx_queued, dev, "tx: aup %x len=%d, data=%p, head %d\n",
1003                                (unsigned)aup, skb->len,
1004                                skb->data, aup->tx_head);
1005
1006        ptxd = aup->tx_dma_ring[aup->tx_head];
1007        buff_stat = ptxd->buff_stat;
1008        if (buff_stat & TX_DMA_ENABLE) {
1009                /* We've wrapped around and the transmitter is still busy */
1010                netif_stop_queue(dev);
1011                aup->tx_full = 1;
1012                return NETDEV_TX_BUSY;
1013        } else if (buff_stat & TX_T_DONE) {
1014                au1000_update_tx_stats(dev, ptxd->status);
1015                ptxd->len = 0;
1016        }
1017
1018        if (aup->tx_full) {
1019                aup->tx_full = 0;
1020                netif_wake_queue(dev);
1021        }
1022
1023        pDB = aup->tx_db_inuse[aup->tx_head];
1024        skb_copy_from_linear_data(skb, (void *)pDB->vaddr, skb->len);
1025        if (skb->len < ETH_ZLEN) {
1026                for (i = skb->len; i < ETH_ZLEN; i++)
1027                        ((char *)pDB->vaddr)[i] = 0;
1028
1029                ptxd->len = ETH_ZLEN;
1030        } else
1031                ptxd->len = skb->len;
1032
1033        ps->tx_packets++;
1034        ps->tx_bytes += ptxd->len;
1035
1036        ptxd->buff_stat = pDB->dma_addr | TX_DMA_ENABLE;
1037        wmb(); /* drain writebuffer */
1038        dev_kfree_skb(skb);
1039        aup->tx_head = (aup->tx_head + 1) & (NUM_TX_DMA - 1);
1040        return NETDEV_TX_OK;
1041}
1042
1043/*
1044 * The Tx ring has been full longer than the watchdog timeout
1045 * value. The transmitter must be hung?
1046 */
1047static void au1000_tx_timeout(struct net_device *dev)
1048{
1049        netdev_err(dev, "au1000_tx_timeout: dev=%p\n", dev);
1050        au1000_reset_mac(dev);
1051        au1000_init(dev);
1052        netif_trans_update(dev); /* prevent tx timeout */
1053        netif_wake_queue(dev);
1054}
1055
1056static void au1000_multicast_list(struct net_device *dev)
1057{
1058        struct au1000_private *aup = netdev_priv(dev);
1059        u32 reg;
1060
1061        netif_dbg(aup, drv, dev, "%s: flags=%x\n", __func__, dev->flags);
1062        reg = readl(&aup->mac->control);
1063        if (dev->flags & IFF_PROMISC) {                 /* Set promiscuous. */
1064                reg |= MAC_PROMISCUOUS;
1065        } else if ((dev->flags & IFF_ALLMULTI)  ||
1066                           netdev_mc_count(dev) > MULTICAST_FILTER_LIMIT) {
1067                reg |= MAC_PASS_ALL_MULTI;
1068                reg &= ~MAC_PROMISCUOUS;
1069                netdev_info(dev, "Pass all multicast\n");
1070        } else {
1071                struct netdev_hw_addr *ha;
1072                u32 mc_filter[2];       /* Multicast hash filter */
1073
1074                mc_filter[1] = mc_filter[0] = 0;
1075                netdev_for_each_mc_addr(ha, dev)
1076                        set_bit(ether_crc(ETH_ALEN, ha->addr)>>26,
1077                                        (long *)mc_filter);
1078                writel(mc_filter[1], &aup->mac->multi_hash_high);
1079                writel(mc_filter[0], &aup->mac->multi_hash_low);
1080                reg &= ~MAC_PROMISCUOUS;
1081                reg |= MAC_HASH_MODE;
1082        }
1083        writel(reg, &aup->mac->control);
1084}
1085
1086static int au1000_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1087{
1088        if (!netif_running(dev))
1089                return -EINVAL;
1090
1091        if (!dev->phydev)
1092                return -EINVAL; /* PHY not controllable */
1093
1094        return phy_mii_ioctl(dev->phydev, rq, cmd);
1095}
1096
1097static const struct net_device_ops au1000_netdev_ops = {
1098        .ndo_open               = au1000_open,
1099        .ndo_stop               = au1000_close,
1100        .ndo_start_xmit         = au1000_tx,
1101        .ndo_set_rx_mode        = au1000_multicast_list,
1102        .ndo_do_ioctl           = au1000_ioctl,
1103        .ndo_tx_timeout         = au1000_tx_timeout,
1104        .ndo_set_mac_address    = eth_mac_addr,
1105        .ndo_validate_addr      = eth_validate_addr,
1106};
1107
1108static int au1000_probe(struct platform_device *pdev)
1109{
1110        struct au1000_private *aup = NULL;
1111        struct au1000_eth_platform_data *pd;
1112        struct net_device *dev = NULL;
1113        struct db_dest *pDB, *pDBfree;
1114        int irq, i, err = 0;
1115        struct resource *base, *macen, *macdma;
1116
1117        base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1118        if (!base) {
1119                dev_err(&pdev->dev, "failed to retrieve base register\n");
1120                err = -ENODEV;
1121                goto out;
1122        }
1123
1124        macen = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1125        if (!macen) {
1126                dev_err(&pdev->dev, "failed to retrieve MAC Enable register\n");
1127                err = -ENODEV;
1128                goto out;
1129        }
1130
1131        irq = platform_get_irq(pdev, 0);
1132        if (irq < 0) {
1133                dev_err(&pdev->dev, "failed to retrieve IRQ\n");
1134                err = -ENODEV;
1135                goto out;
1136        }
1137
1138        macdma = platform_get_resource(pdev, IORESOURCE_MEM, 2);
1139        if (!macdma) {
1140                dev_err(&pdev->dev, "failed to retrieve MACDMA registers\n");
1141                err = -ENODEV;
1142                goto out;
1143        }
1144
1145        if (!request_mem_region(base->start, resource_size(base),
1146                                                        pdev->name)) {
1147                dev_err(&pdev->dev, "failed to request memory region for base registers\n");
1148                err = -ENXIO;
1149                goto out;
1150        }
1151
1152        if (!request_mem_region(macen->start, resource_size(macen),
1153                                                        pdev->name)) {
1154                dev_err(&pdev->dev, "failed to request memory region for MAC enable register\n");
1155                err = -ENXIO;
1156                goto err_request;
1157        }
1158
1159        if (!request_mem_region(macdma->start, resource_size(macdma),
1160                                                        pdev->name)) {
1161                dev_err(&pdev->dev, "failed to request MACDMA memory region\n");
1162                err = -ENXIO;
1163                goto err_macdma;
1164        }
1165
1166        dev = alloc_etherdev(sizeof(struct au1000_private));
1167        if (!dev) {
1168                err = -ENOMEM;
1169                goto err_alloc;
1170        }
1171
1172        SET_NETDEV_DEV(dev, &pdev->dev);
1173        platform_set_drvdata(pdev, dev);
1174        aup = netdev_priv(dev);
1175
1176        spin_lock_init(&aup->lock);
1177        aup->msg_enable = (au1000_debug < 4 ?
1178                                AU1000_DEF_MSG_ENABLE : au1000_debug);
1179
1180        /* Allocate the data buffers
1181         * Snooping works fine with eth on all au1xxx
1182         */
1183        aup->vaddr = (u32)dma_alloc_noncoherent(NULL, MAX_BUF_SIZE *
1184                                                (NUM_TX_BUFFS + NUM_RX_BUFFS),
1185                                                &aup->dma_addr, 0);
1186        if (!aup->vaddr) {
1187                dev_err(&pdev->dev, "failed to allocate data buffers\n");
1188                err = -ENOMEM;
1189                goto err_vaddr;
1190        }
1191
1192        /* aup->mac is the base address of the MAC's registers */
1193        aup->mac = (struct mac_reg *)
1194                        ioremap_nocache(base->start, resource_size(base));
1195        if (!aup->mac) {
1196                dev_err(&pdev->dev, "failed to ioremap MAC registers\n");
1197                err = -ENXIO;
1198                goto err_remap1;
1199        }
1200
1201        /* Setup some variables for quick register address access */
1202        aup->enable = (u32 *)ioremap_nocache(macen->start,
1203                                                resource_size(macen));
1204        if (!aup->enable) {
1205                dev_err(&pdev->dev, "failed to ioremap MAC enable register\n");
1206                err = -ENXIO;
1207                goto err_remap2;
1208        }
1209        aup->mac_id = pdev->id;
1210
1211        aup->macdma = ioremap_nocache(macdma->start, resource_size(macdma));
1212        if (!aup->macdma) {
1213                dev_err(&pdev->dev, "failed to ioremap MACDMA registers\n");
1214                err = -ENXIO;
1215                goto err_remap3;
1216        }
1217
1218        au1000_setup_hw_rings(aup, aup->macdma);
1219
1220        writel(0, aup->enable);
1221        aup->mac_enabled = 0;
1222
1223        pd = dev_get_platdata(&pdev->dev);
1224        if (!pd) {
1225                dev_info(&pdev->dev, "no platform_data passed,"
1226                                        " PHY search on MAC0\n");
1227                aup->phy1_search_mac0 = 1;
1228        } else {
1229                if (is_valid_ether_addr(pd->mac)) {
1230                        memcpy(dev->dev_addr, pd->mac, ETH_ALEN);
1231                } else {
1232                        /* Set a random MAC since no valid provided by platform_data. */
1233                        eth_hw_addr_random(dev);
1234                }
1235
1236                aup->phy_static_config = pd->phy_static_config;
1237                aup->phy_search_highest_addr = pd->phy_search_highest_addr;
1238                aup->phy1_search_mac0 = pd->phy1_search_mac0;
1239                aup->phy_addr = pd->phy_addr;
1240                aup->phy_busid = pd->phy_busid;
1241                aup->phy_irq = pd->phy_irq;
1242        }
1243
1244        if (aup->phy_busid > 0) {
1245                dev_err(&pdev->dev, "MAC0-associated PHY attached 2nd MACs MII bus not supported yet\n");
1246                err = -ENODEV;
1247                goto err_mdiobus_alloc;
1248        }
1249
1250        aup->mii_bus = mdiobus_alloc();
1251        if (aup->mii_bus == NULL) {
1252                dev_err(&pdev->dev, "failed to allocate mdiobus structure\n");
1253                err = -ENOMEM;
1254                goto err_mdiobus_alloc;
1255        }
1256
1257        aup->mii_bus->priv = dev;
1258        aup->mii_bus->read = au1000_mdiobus_read;
1259        aup->mii_bus->write = au1000_mdiobus_write;
1260        aup->mii_bus->reset = au1000_mdiobus_reset;
1261        aup->mii_bus->name = "au1000_eth_mii";
1262        snprintf(aup->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
1263                pdev->name, aup->mac_id);
1264
1265        /* if known, set corresponding PHY IRQs */
1266        if (aup->phy_static_config)
1267                if (aup->phy_irq && aup->phy_busid == aup->mac_id)
1268                        aup->mii_bus->irq[aup->phy_addr] = aup->phy_irq;
1269
1270        err = mdiobus_register(aup->mii_bus);
1271        if (err) {
1272                dev_err(&pdev->dev, "failed to register MDIO bus\n");
1273                goto err_mdiobus_reg;
1274        }
1275
1276        err = au1000_mii_probe(dev);
1277        if (err != 0)
1278                goto err_out;
1279
1280        pDBfree = NULL;
1281        /* setup the data buffer descriptors and attach a buffer to each one */
1282        pDB = aup->db;
1283        for (i = 0; i < (NUM_TX_BUFFS+NUM_RX_BUFFS); i++) {
1284                pDB->pnext = pDBfree;
1285                pDBfree = pDB;
1286                pDB->vaddr = (u32 *)((unsigned)aup->vaddr + MAX_BUF_SIZE*i);
1287                pDB->dma_addr = (dma_addr_t)virt_to_bus(pDB->vaddr);
1288                pDB++;
1289        }
1290        aup->pDBfree = pDBfree;
1291
1292        err = -ENODEV;
1293        for (i = 0; i < NUM_RX_DMA; i++) {
1294                pDB = au1000_GetFreeDB(aup);
1295                if (!pDB)
1296                        goto err_out;
1297
1298                aup->rx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
1299                aup->rx_db_inuse[i] = pDB;
1300        }
1301
1302        err = -ENODEV;
1303        for (i = 0; i < NUM_TX_DMA; i++) {
1304                pDB = au1000_GetFreeDB(aup);
1305                if (!pDB)
1306                        goto err_out;
1307
1308                aup->tx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
1309                aup->tx_dma_ring[i]->len = 0;
1310                aup->tx_db_inuse[i] = pDB;
1311        }
1312
1313        dev->base_addr = base->start;
1314        dev->irq = irq;
1315        dev->netdev_ops = &au1000_netdev_ops;
1316        dev->ethtool_ops = &au1000_ethtool_ops;
1317        dev->watchdog_timeo = ETH_TX_TIMEOUT;
1318
1319        /*
1320         * The boot code uses the ethernet controller, so reset it to start
1321         * fresh.  au1000_init() expects that the device is in reset state.
1322         */
1323        au1000_reset_mac(dev);
1324
1325        err = register_netdev(dev);
1326        if (err) {
1327                netdev_err(dev, "Cannot register net device, aborting.\n");
1328                goto err_out;
1329        }
1330
1331        netdev_info(dev, "Au1xx0 Ethernet found at 0x%lx, irq %d\n",
1332                        (unsigned long)base->start, irq);
1333
1334        pr_info_once("%s version %s %s\n", DRV_NAME, DRV_VERSION, DRV_AUTHOR);
1335
1336        return 0;
1337
1338err_out:
1339        if (aup->mii_bus != NULL)
1340                mdiobus_unregister(aup->mii_bus);
1341
1342        /* here we should have a valid dev plus aup-> register addresses
1343         * so we can reset the mac properly.
1344         */
1345        au1000_reset_mac(dev);
1346
1347        for (i = 0; i < NUM_RX_DMA; i++) {
1348                if (aup->rx_db_inuse[i])
1349                        au1000_ReleaseDB(aup, aup->rx_db_inuse[i]);
1350        }
1351        for (i = 0; i < NUM_TX_DMA; i++) {
1352                if (aup->tx_db_inuse[i])
1353                        au1000_ReleaseDB(aup, aup->tx_db_inuse[i]);
1354        }
1355err_mdiobus_reg:
1356        mdiobus_free(aup->mii_bus);
1357err_mdiobus_alloc:
1358        iounmap(aup->macdma);
1359err_remap3:
1360        iounmap(aup->enable);
1361err_remap2:
1362        iounmap(aup->mac);
1363err_remap1:
1364        dma_free_noncoherent(NULL, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS),
1365                             (void *)aup->vaddr, aup->dma_addr);
1366err_vaddr:
1367        free_netdev(dev);
1368err_alloc:
1369        release_mem_region(macdma->start, resource_size(macdma));
1370err_macdma:
1371        release_mem_region(macen->start, resource_size(macen));
1372err_request:
1373        release_mem_region(base->start, resource_size(base));
1374out:
1375        return err;
1376}
1377
1378static int au1000_remove(struct platform_device *pdev)
1379{
1380        struct net_device *dev = platform_get_drvdata(pdev);
1381        struct au1000_private *aup = netdev_priv(dev);
1382        int i;
1383        struct resource *base, *macen;
1384
1385        unregister_netdev(dev);
1386        mdiobus_unregister(aup->mii_bus);
1387        mdiobus_free(aup->mii_bus);
1388
1389        for (i = 0; i < NUM_RX_DMA; i++)
1390                if (aup->rx_db_inuse[i])
1391                        au1000_ReleaseDB(aup, aup->rx_db_inuse[i]);
1392
1393        for (i = 0; i < NUM_TX_DMA; i++)
1394                if (aup->tx_db_inuse[i])
1395                        au1000_ReleaseDB(aup, aup->tx_db_inuse[i]);
1396
1397        dma_free_noncoherent(NULL, MAX_BUF_SIZE *
1398                        (NUM_TX_BUFFS + NUM_RX_BUFFS),
1399                        (void *)aup->vaddr, aup->dma_addr);
1400
1401        iounmap(aup->macdma);
1402        iounmap(aup->mac);
1403        iounmap(aup->enable);
1404
1405        base = platform_get_resource(pdev, IORESOURCE_MEM, 2);
1406        release_mem_region(base->start, resource_size(base));
1407
1408        base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1409        release_mem_region(base->start, resource_size(base));
1410
1411        macen = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1412        release_mem_region(macen->start, resource_size(macen));
1413
1414        free_netdev(dev);
1415
1416        return 0;
1417}
1418
1419static struct platform_driver au1000_eth_driver = {
1420        .probe  = au1000_probe,
1421        .remove = au1000_remove,
1422        .driver = {
1423                .name   = "au1000-eth",
1424        },
1425};
1426
1427module_platform_driver(au1000_eth_driver);
1428
1429MODULE_ALIAS("platform:au1000-eth");
1430