linux/drivers/net/ethernet/lantiq_etop.c
<<
>>
Prefs
   1/*
   2 *   This program is free software; you can redistribute it and/or modify it
   3 *   under the terms of the GNU General Public License version 2 as published
   4 *   by the Free Software Foundation.
   5 *
   6 *   This program is distributed in the hope that it will be useful,
   7 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
   8 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   9 *   GNU General Public License for more details.
  10 *
  11 *   You should have received a copy of the GNU General Public License
  12 *   along with this program; if not, see <http://www.gnu.org/licenses/>.
  13 *
  14 *   Copyright (C) 2011 John Crispin <blogic@openwrt.org>
  15 */
  16
  17#include <linux/kernel.h>
  18#include <linux/slab.h>
  19#include <linux/errno.h>
  20#include <linux/types.h>
  21#include <linux/interrupt.h>
  22#include <linux/uaccess.h>
  23#include <linux/in.h>
  24#include <linux/netdevice.h>
  25#include <linux/etherdevice.h>
  26#include <linux/phy.h>
  27#include <linux/ip.h>
  28#include <linux/tcp.h>
  29#include <linux/skbuff.h>
  30#include <linux/mm.h>
  31#include <linux/platform_device.h>
  32#include <linux/ethtool.h>
  33#include <linux/init.h>
  34#include <linux/delay.h>
  35#include <linux/io.h>
  36#include <linux/dma-mapping.h>
  37#include <linux/module.h>
  38
  39#include <asm/checksum.h>
  40
  41#include <lantiq_soc.h>
  42#include <xway_dma.h>
  43#include <lantiq_platform.h>
  44
  45#define LTQ_ETOP_MDIO           0x11804
  46#define MDIO_REQUEST            0x80000000
  47#define MDIO_READ               0x40000000
  48#define MDIO_ADDR_MASK          0x1f
  49#define MDIO_ADDR_OFFSET        0x15
  50#define MDIO_REG_MASK           0x1f
  51#define MDIO_REG_OFFSET         0x10
  52#define MDIO_VAL_MASK           0xffff
  53
  54#define PPE32_CGEN              0x800
  55#define LQ_PPE32_ENET_MAC_CFG   0x1840
  56
  57#define LTQ_ETOP_ENETS0         0x11850
  58#define LTQ_ETOP_MAC_DA0        0x1186C
  59#define LTQ_ETOP_MAC_DA1        0x11870
  60#define LTQ_ETOP_CFG            0x16020
  61#define LTQ_ETOP_IGPLEN         0x16080
  62
  63#define MAX_DMA_CHAN            0x8
  64#define MAX_DMA_CRC_LEN         0x4
  65#define MAX_DMA_DATA_LEN        0x600
  66
  67#define ETOP_FTCU               BIT(28)
  68#define ETOP_MII_MASK           0xf
  69#define ETOP_MII_NORMAL         0xd
  70#define ETOP_MII_REVERSE        0xe
  71#define ETOP_PLEN_UNDER         0x40
  72#define ETOP_CGEN               0x800
  73
  74/* use 2 static channels for TX/RX */
  75#define LTQ_ETOP_TX_CHANNEL     1
  76#define LTQ_ETOP_RX_CHANNEL     6
  77#define IS_TX(x)                (x == LTQ_ETOP_TX_CHANNEL)
  78#define IS_RX(x)                (x == LTQ_ETOP_RX_CHANNEL)
  79
  80#define ltq_etop_r32(x)         ltq_r32(ltq_etop_membase + (x))
  81#define ltq_etop_w32(x, y)      ltq_w32(x, ltq_etop_membase + (y))
  82#define ltq_etop_w32_mask(x, y, z)      \
  83                ltq_w32_mask(x, y, ltq_etop_membase + (z))
  84
  85#define DRV_VERSION     "1.0"
  86
  87static void __iomem *ltq_etop_membase;
  88
  89struct ltq_etop_chan {
  90        int idx;
  91        int tx_free;
  92        struct net_device *netdev;
  93        struct napi_struct napi;
  94        struct ltq_dma_channel dma;
  95        struct sk_buff *skb[LTQ_DESC_NUM];
  96};
  97
  98struct ltq_etop_priv {
  99        struct net_device *netdev;
 100        struct platform_device *pdev;
 101        struct ltq_eth_data *pldata;
 102        struct resource *res;
 103
 104        struct mii_bus *mii_bus;
 105
 106        struct ltq_etop_chan ch[MAX_DMA_CHAN];
 107        int tx_free[MAX_DMA_CHAN >> 1];
 108
 109        spinlock_t lock;
 110};
 111
 112static int
 113ltq_etop_alloc_skb(struct ltq_etop_chan *ch)
 114{
 115        ch->skb[ch->dma.desc] = netdev_alloc_skb(ch->netdev, MAX_DMA_DATA_LEN);
 116        if (!ch->skb[ch->dma.desc])
 117                return -ENOMEM;
 118        ch->dma.desc_base[ch->dma.desc].addr = dma_map_single(NULL,
 119                ch->skb[ch->dma.desc]->data, MAX_DMA_DATA_LEN,
 120                DMA_FROM_DEVICE);
 121        ch->dma.desc_base[ch->dma.desc].addr =
 122                CPHYSADDR(ch->skb[ch->dma.desc]->data);
 123        ch->dma.desc_base[ch->dma.desc].ctl =
 124                LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) |
 125                MAX_DMA_DATA_LEN;
 126        skb_reserve(ch->skb[ch->dma.desc], NET_IP_ALIGN);
 127        return 0;
 128}
 129
 130static void
 131ltq_etop_hw_receive(struct ltq_etop_chan *ch)
 132{
 133        struct ltq_etop_priv *priv = netdev_priv(ch->netdev);
 134        struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
 135        struct sk_buff *skb = ch->skb[ch->dma.desc];
 136        int len = (desc->ctl & LTQ_DMA_SIZE_MASK) - MAX_DMA_CRC_LEN;
 137        unsigned long flags;
 138
 139        spin_lock_irqsave(&priv->lock, flags);
 140        if (ltq_etop_alloc_skb(ch)) {
 141                netdev_err(ch->netdev,
 142                        "failed to allocate new rx buffer, stopping DMA\n");
 143                ltq_dma_close(&ch->dma);
 144        }
 145        ch->dma.desc++;
 146        ch->dma.desc %= LTQ_DESC_NUM;
 147        spin_unlock_irqrestore(&priv->lock, flags);
 148
 149        skb_put(skb, len);
 150        skb->protocol = eth_type_trans(skb, ch->netdev);
 151        netif_receive_skb(skb);
 152}
 153
 154static int
 155ltq_etop_poll_rx(struct napi_struct *napi, int budget)
 156{
 157        struct ltq_etop_chan *ch = container_of(napi,
 158                                struct ltq_etop_chan, napi);
 159        int work_done = 0;
 160
 161        while (work_done < budget) {
 162                struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
 163
 164                if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) != LTQ_DMA_C)
 165                        break;
 166                ltq_etop_hw_receive(ch);
 167                work_done++;
 168        }
 169        if (work_done < budget) {
 170                napi_complete_done(&ch->napi, work_done);
 171                ltq_dma_ack_irq(&ch->dma);
 172        }
 173        return work_done;
 174}
 175
 176static int
 177ltq_etop_poll_tx(struct napi_struct *napi, int budget)
 178{
 179        struct ltq_etop_chan *ch =
 180                container_of(napi, struct ltq_etop_chan, napi);
 181        struct ltq_etop_priv *priv = netdev_priv(ch->netdev);
 182        struct netdev_queue *txq =
 183                netdev_get_tx_queue(ch->netdev, ch->idx >> 1);
 184        unsigned long flags;
 185
 186        spin_lock_irqsave(&priv->lock, flags);
 187        while ((ch->dma.desc_base[ch->tx_free].ctl &
 188                        (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) {
 189                dev_kfree_skb_any(ch->skb[ch->tx_free]);
 190                ch->skb[ch->tx_free] = NULL;
 191                memset(&ch->dma.desc_base[ch->tx_free], 0,
 192                        sizeof(struct ltq_dma_desc));
 193                ch->tx_free++;
 194                ch->tx_free %= LTQ_DESC_NUM;
 195        }
 196        spin_unlock_irqrestore(&priv->lock, flags);
 197
 198        if (netif_tx_queue_stopped(txq))
 199                netif_tx_start_queue(txq);
 200        napi_complete(&ch->napi);
 201        ltq_dma_ack_irq(&ch->dma);
 202        return 1;
 203}
 204
 205static irqreturn_t
 206ltq_etop_dma_irq(int irq, void *_priv)
 207{
 208        struct ltq_etop_priv *priv = _priv;
 209        int ch = irq - LTQ_DMA_CH0_INT;
 210
 211        napi_schedule(&priv->ch[ch].napi);
 212        return IRQ_HANDLED;
 213}
 214
 215static void
 216ltq_etop_free_channel(struct net_device *dev, struct ltq_etop_chan *ch)
 217{
 218        struct ltq_etop_priv *priv = netdev_priv(dev);
 219
 220        ltq_dma_free(&ch->dma);
 221        if (ch->dma.irq)
 222                free_irq(ch->dma.irq, priv);
 223        if (IS_RX(ch->idx)) {
 224                int desc;
 225                for (desc = 0; desc < LTQ_DESC_NUM; desc++)
 226                        dev_kfree_skb_any(ch->skb[ch->dma.desc]);
 227        }
 228}
 229
 230static void
 231ltq_etop_hw_exit(struct net_device *dev)
 232{
 233        struct ltq_etop_priv *priv = netdev_priv(dev);
 234        int i;
 235
 236        ltq_pmu_disable(PMU_PPE);
 237        for (i = 0; i < MAX_DMA_CHAN; i++)
 238                if (IS_TX(i) || IS_RX(i))
 239                        ltq_etop_free_channel(dev, &priv->ch[i]);
 240}
 241
 242static int
 243ltq_etop_hw_init(struct net_device *dev)
 244{
 245        struct ltq_etop_priv *priv = netdev_priv(dev);
 246        int i;
 247
 248        ltq_pmu_enable(PMU_PPE);
 249
 250        switch (priv->pldata->mii_mode) {
 251        case PHY_INTERFACE_MODE_RMII:
 252                ltq_etop_w32_mask(ETOP_MII_MASK,
 253                        ETOP_MII_REVERSE, LTQ_ETOP_CFG);
 254                break;
 255
 256        case PHY_INTERFACE_MODE_MII:
 257                ltq_etop_w32_mask(ETOP_MII_MASK,
 258                        ETOP_MII_NORMAL, LTQ_ETOP_CFG);
 259                break;
 260
 261        default:
 262                netdev_err(dev, "unknown mii mode %d\n",
 263                        priv->pldata->mii_mode);
 264                return -ENOTSUPP;
 265        }
 266
 267        /* enable crc generation */
 268        ltq_etop_w32(PPE32_CGEN, LQ_PPE32_ENET_MAC_CFG);
 269
 270        ltq_dma_init_port(DMA_PORT_ETOP);
 271
 272        for (i = 0; i < MAX_DMA_CHAN; i++) {
 273                int irq = LTQ_DMA_CH0_INT + i;
 274                struct ltq_etop_chan *ch = &priv->ch[i];
 275
 276                ch->idx = ch->dma.nr = i;
 277
 278                if (IS_TX(i)) {
 279                        ltq_dma_alloc_tx(&ch->dma);
 280                        request_irq(irq, ltq_etop_dma_irq, 0, "etop_tx", priv);
 281                } else if (IS_RX(i)) {
 282                        ltq_dma_alloc_rx(&ch->dma);
 283                        for (ch->dma.desc = 0; ch->dma.desc < LTQ_DESC_NUM;
 284                                        ch->dma.desc++)
 285                                if (ltq_etop_alloc_skb(ch))
 286                                        return -ENOMEM;
 287                        ch->dma.desc = 0;
 288                        request_irq(irq, ltq_etop_dma_irq, 0, "etop_rx", priv);
 289                }
 290                ch->dma.irq = irq;
 291        }
 292        return 0;
 293}
 294
 295static void
 296ltq_etop_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
 297{
 298        strlcpy(info->driver, "Lantiq ETOP", sizeof(info->driver));
 299        strlcpy(info->bus_info, "internal", sizeof(info->bus_info));
 300        strlcpy(info->version, DRV_VERSION, sizeof(info->version));
 301}
 302
 303static const struct ethtool_ops ltq_etop_ethtool_ops = {
 304        .get_drvinfo = ltq_etop_get_drvinfo,
 305        .nway_reset = phy_ethtool_nway_reset,
 306        .get_link_ksettings = phy_ethtool_get_link_ksettings,
 307        .set_link_ksettings = phy_ethtool_set_link_ksettings,
 308};
 309
 310static int
 311ltq_etop_mdio_wr(struct mii_bus *bus, int phy_addr, int phy_reg, u16 phy_data)
 312{
 313        u32 val = MDIO_REQUEST |
 314                ((phy_addr & MDIO_ADDR_MASK) << MDIO_ADDR_OFFSET) |
 315                ((phy_reg & MDIO_REG_MASK) << MDIO_REG_OFFSET) |
 316                phy_data;
 317
 318        while (ltq_etop_r32(LTQ_ETOP_MDIO) & MDIO_REQUEST)
 319                ;
 320        ltq_etop_w32(val, LTQ_ETOP_MDIO);
 321        return 0;
 322}
 323
 324static int
 325ltq_etop_mdio_rd(struct mii_bus *bus, int phy_addr, int phy_reg)
 326{
 327        u32 val = MDIO_REQUEST | MDIO_READ |
 328                ((phy_addr & MDIO_ADDR_MASK) << MDIO_ADDR_OFFSET) |
 329                ((phy_reg & MDIO_REG_MASK) << MDIO_REG_OFFSET);
 330
 331        while (ltq_etop_r32(LTQ_ETOP_MDIO) & MDIO_REQUEST)
 332                ;
 333        ltq_etop_w32(val, LTQ_ETOP_MDIO);
 334        while (ltq_etop_r32(LTQ_ETOP_MDIO) & MDIO_REQUEST)
 335                ;
 336        val = ltq_etop_r32(LTQ_ETOP_MDIO) & MDIO_VAL_MASK;
 337        return val;
 338}
 339
 340static void
 341ltq_etop_mdio_link(struct net_device *dev)
 342{
 343        /* nothing to do  */
 344}
 345
 346static int
 347ltq_etop_mdio_probe(struct net_device *dev)
 348{
 349        struct ltq_etop_priv *priv = netdev_priv(dev);
 350        struct phy_device *phydev;
 351
 352        phydev = phy_find_first(priv->mii_bus);
 353
 354        if (!phydev) {
 355                netdev_err(dev, "no PHY found\n");
 356                return -ENODEV;
 357        }
 358
 359        phydev = phy_connect(dev, phydev_name(phydev),
 360                             &ltq_etop_mdio_link, priv->pldata->mii_mode);
 361
 362        if (IS_ERR(phydev)) {
 363                netdev_err(dev, "Could not attach to PHY\n");
 364                return PTR_ERR(phydev);
 365        }
 366
 367        phy_set_max_speed(phydev, SPEED_100);
 368
 369        phy_attached_info(phydev);
 370
 371        return 0;
 372}
 373
 374static int
 375ltq_etop_mdio_init(struct net_device *dev)
 376{
 377        struct ltq_etop_priv *priv = netdev_priv(dev);
 378        int err;
 379
 380        priv->mii_bus = mdiobus_alloc();
 381        if (!priv->mii_bus) {
 382                netdev_err(dev, "failed to allocate mii bus\n");
 383                err = -ENOMEM;
 384                goto err_out;
 385        }
 386
 387        priv->mii_bus->priv = dev;
 388        priv->mii_bus->read = ltq_etop_mdio_rd;
 389        priv->mii_bus->write = ltq_etop_mdio_wr;
 390        priv->mii_bus->name = "ltq_mii";
 391        snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
 392                priv->pdev->name, priv->pdev->id);
 393        if (mdiobus_register(priv->mii_bus)) {
 394                err = -ENXIO;
 395                goto err_out_free_mdiobus;
 396        }
 397
 398        if (ltq_etop_mdio_probe(dev)) {
 399                err = -ENXIO;
 400                goto err_out_unregister_bus;
 401        }
 402        return 0;
 403
 404err_out_unregister_bus:
 405        mdiobus_unregister(priv->mii_bus);
 406err_out_free_mdiobus:
 407        mdiobus_free(priv->mii_bus);
 408err_out:
 409        return err;
 410}
 411
 412static void
 413ltq_etop_mdio_cleanup(struct net_device *dev)
 414{
 415        struct ltq_etop_priv *priv = netdev_priv(dev);
 416
 417        phy_disconnect(dev->phydev);
 418        mdiobus_unregister(priv->mii_bus);
 419        mdiobus_free(priv->mii_bus);
 420}
 421
 422static int
 423ltq_etop_open(struct net_device *dev)
 424{
 425        struct ltq_etop_priv *priv = netdev_priv(dev);
 426        int i;
 427
 428        for (i = 0; i < MAX_DMA_CHAN; i++) {
 429                struct ltq_etop_chan *ch = &priv->ch[i];
 430
 431                if (!IS_TX(i) && (!IS_RX(i)))
 432                        continue;
 433                ltq_dma_open(&ch->dma);
 434                napi_enable(&ch->napi);
 435        }
 436        phy_start(dev->phydev);
 437        netif_tx_start_all_queues(dev);
 438        return 0;
 439}
 440
 441static int
 442ltq_etop_stop(struct net_device *dev)
 443{
 444        struct ltq_etop_priv *priv = netdev_priv(dev);
 445        int i;
 446
 447        netif_tx_stop_all_queues(dev);
 448        phy_stop(dev->phydev);
 449        for (i = 0; i < MAX_DMA_CHAN; i++) {
 450                struct ltq_etop_chan *ch = &priv->ch[i];
 451
 452                if (!IS_RX(i) && !IS_TX(i))
 453                        continue;
 454                napi_disable(&ch->napi);
 455                ltq_dma_close(&ch->dma);
 456        }
 457        return 0;
 458}
 459
 460static int
 461ltq_etop_tx(struct sk_buff *skb, struct net_device *dev)
 462{
 463        int queue = skb_get_queue_mapping(skb);
 464        struct netdev_queue *txq = netdev_get_tx_queue(dev, queue);
 465        struct ltq_etop_priv *priv = netdev_priv(dev);
 466        struct ltq_etop_chan *ch = &priv->ch[(queue << 1) | 1];
 467        struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
 468        int len;
 469        unsigned long flags;
 470        u32 byte_offset;
 471
 472        len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
 473
 474        if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) || ch->skb[ch->dma.desc]) {
 475                dev_kfree_skb_any(skb);
 476                netdev_err(dev, "tx ring full\n");
 477                netif_tx_stop_queue(txq);
 478                return NETDEV_TX_BUSY;
 479        }
 480
 481        /* dma needs to start on a 16 byte aligned address */
 482        byte_offset = CPHYSADDR(skb->data) % 16;
 483        ch->skb[ch->dma.desc] = skb;
 484
 485        netif_trans_update(dev);
 486
 487        spin_lock_irqsave(&priv->lock, flags);
 488        desc->addr = ((unsigned int) dma_map_single(NULL, skb->data, len,
 489                                                DMA_TO_DEVICE)) - byte_offset;
 490        wmb();
 491        desc->ctl = LTQ_DMA_OWN | LTQ_DMA_SOP | LTQ_DMA_EOP |
 492                LTQ_DMA_TX_OFFSET(byte_offset) | (len & LTQ_DMA_SIZE_MASK);
 493        ch->dma.desc++;
 494        ch->dma.desc %= LTQ_DESC_NUM;
 495        spin_unlock_irqrestore(&priv->lock, flags);
 496
 497        if (ch->dma.desc_base[ch->dma.desc].ctl & LTQ_DMA_OWN)
 498                netif_tx_stop_queue(txq);
 499
 500        return NETDEV_TX_OK;
 501}
 502
 503static int
 504ltq_etop_change_mtu(struct net_device *dev, int new_mtu)
 505{
 506        struct ltq_etop_priv *priv = netdev_priv(dev);
 507        unsigned long flags;
 508
 509        dev->mtu = new_mtu;
 510
 511        spin_lock_irqsave(&priv->lock, flags);
 512        ltq_etop_w32((ETOP_PLEN_UNDER << 16) | new_mtu, LTQ_ETOP_IGPLEN);
 513        spin_unlock_irqrestore(&priv->lock, flags);
 514
 515        return 0;
 516}
 517
 518static int
 519ltq_etop_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 520{
 521        /* TODO: mii-toll reports "No MII transceiver present!." ?!*/
 522        return phy_mii_ioctl(dev->phydev, rq, cmd);
 523}
 524
 525static int
 526ltq_etop_set_mac_address(struct net_device *dev, void *p)
 527{
 528        int ret = eth_mac_addr(dev, p);
 529
 530        if (!ret) {
 531                struct ltq_etop_priv *priv = netdev_priv(dev);
 532                unsigned long flags;
 533
 534                /* store the mac for the unicast filter */
 535                spin_lock_irqsave(&priv->lock, flags);
 536                ltq_etop_w32(*((u32 *)dev->dev_addr), LTQ_ETOP_MAC_DA0);
 537                ltq_etop_w32(*((u16 *)&dev->dev_addr[4]) << 16,
 538                        LTQ_ETOP_MAC_DA1);
 539                spin_unlock_irqrestore(&priv->lock, flags);
 540        }
 541        return ret;
 542}
 543
 544static void
 545ltq_etop_set_multicast_list(struct net_device *dev)
 546{
 547        struct ltq_etop_priv *priv = netdev_priv(dev);
 548        unsigned long flags;
 549
 550        /* ensure that the unicast filter is not enabled in promiscious mode */
 551        spin_lock_irqsave(&priv->lock, flags);
 552        if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI))
 553                ltq_etop_w32_mask(ETOP_FTCU, 0, LTQ_ETOP_ENETS0);
 554        else
 555                ltq_etop_w32_mask(0, ETOP_FTCU, LTQ_ETOP_ENETS0);
 556        spin_unlock_irqrestore(&priv->lock, flags);
 557}
 558
 559static int
 560ltq_etop_init(struct net_device *dev)
 561{
 562        struct ltq_etop_priv *priv = netdev_priv(dev);
 563        struct sockaddr mac;
 564        int err;
 565        bool random_mac = false;
 566
 567        dev->watchdog_timeo = 10 * HZ;
 568        err = ltq_etop_hw_init(dev);
 569        if (err)
 570                goto err_hw;
 571        ltq_etop_change_mtu(dev, 1500);
 572
 573        memcpy(&mac, &priv->pldata->mac, sizeof(struct sockaddr));
 574        if (!is_valid_ether_addr(mac.sa_data)) {
 575                pr_warn("etop: invalid MAC, using random\n");
 576                eth_random_addr(mac.sa_data);
 577                random_mac = true;
 578        }
 579
 580        err = ltq_etop_set_mac_address(dev, &mac);
 581        if (err)
 582                goto err_netdev;
 583
 584        /* Set addr_assign_type here, ltq_etop_set_mac_address would reset it. */
 585        if (random_mac)
 586                dev->addr_assign_type = NET_ADDR_RANDOM;
 587
 588        ltq_etop_set_multicast_list(dev);
 589        err = ltq_etop_mdio_init(dev);
 590        if (err)
 591                goto err_netdev;
 592        return 0;
 593
 594err_netdev:
 595        unregister_netdev(dev);
 596        free_netdev(dev);
 597err_hw:
 598        ltq_etop_hw_exit(dev);
 599        return err;
 600}
 601
 602static void
 603ltq_etop_tx_timeout(struct net_device *dev, unsigned int txqueue)
 604{
 605        int err;
 606
 607        ltq_etop_hw_exit(dev);
 608        err = ltq_etop_hw_init(dev);
 609        if (err)
 610                goto err_hw;
 611        netif_trans_update(dev);
 612        netif_wake_queue(dev);
 613        return;
 614
 615err_hw:
 616        ltq_etop_hw_exit(dev);
 617        netdev_err(dev, "failed to restart etop after TX timeout\n");
 618}
 619
 620static const struct net_device_ops ltq_eth_netdev_ops = {
 621        .ndo_open = ltq_etop_open,
 622        .ndo_stop = ltq_etop_stop,
 623        .ndo_start_xmit = ltq_etop_tx,
 624        .ndo_change_mtu = ltq_etop_change_mtu,
 625        .ndo_do_ioctl = ltq_etop_ioctl,
 626        .ndo_set_mac_address = ltq_etop_set_mac_address,
 627        .ndo_validate_addr = eth_validate_addr,
 628        .ndo_set_rx_mode = ltq_etop_set_multicast_list,
 629        .ndo_select_queue = dev_pick_tx_zero,
 630        .ndo_init = ltq_etop_init,
 631        .ndo_tx_timeout = ltq_etop_tx_timeout,
 632};
 633
 634static int __init
 635ltq_etop_probe(struct platform_device *pdev)
 636{
 637        struct net_device *dev;
 638        struct ltq_etop_priv *priv;
 639        struct resource *res;
 640        int err;
 641        int i;
 642
 643        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 644        if (!res) {
 645                dev_err(&pdev->dev, "failed to get etop resource\n");
 646                err = -ENOENT;
 647                goto err_out;
 648        }
 649
 650        res = devm_request_mem_region(&pdev->dev, res->start,
 651                resource_size(res), dev_name(&pdev->dev));
 652        if (!res) {
 653                dev_err(&pdev->dev, "failed to request etop resource\n");
 654                err = -EBUSY;
 655                goto err_out;
 656        }
 657
 658        ltq_etop_membase = devm_ioremap_nocache(&pdev->dev,
 659                res->start, resource_size(res));
 660        if (!ltq_etop_membase) {
 661                dev_err(&pdev->dev, "failed to remap etop engine %d\n",
 662                        pdev->id);
 663                err = -ENOMEM;
 664                goto err_out;
 665        }
 666
 667        dev = alloc_etherdev_mq(sizeof(struct ltq_etop_priv), 4);
 668        if (!dev) {
 669                err = -ENOMEM;
 670                goto err_out;
 671        }
 672        strcpy(dev->name, "eth%d");
 673        dev->netdev_ops = &ltq_eth_netdev_ops;
 674        dev->ethtool_ops = &ltq_etop_ethtool_ops;
 675        priv = netdev_priv(dev);
 676        priv->res = res;
 677        priv->pdev = pdev;
 678        priv->pldata = dev_get_platdata(&pdev->dev);
 679        priv->netdev = dev;
 680        spin_lock_init(&priv->lock);
 681        SET_NETDEV_DEV(dev, &pdev->dev);
 682
 683        for (i = 0; i < MAX_DMA_CHAN; i++) {
 684                if (IS_TX(i))
 685                        netif_napi_add(dev, &priv->ch[i].napi,
 686                                ltq_etop_poll_tx, 8);
 687                else if (IS_RX(i))
 688                        netif_napi_add(dev, &priv->ch[i].napi,
 689                                ltq_etop_poll_rx, 32);
 690                priv->ch[i].netdev = dev;
 691        }
 692
 693        err = register_netdev(dev);
 694        if (err)
 695                goto err_free;
 696
 697        platform_set_drvdata(pdev, dev);
 698        return 0;
 699
 700err_free:
 701        free_netdev(dev);
 702err_out:
 703        return err;
 704}
 705
 706static int
 707ltq_etop_remove(struct platform_device *pdev)
 708{
 709        struct net_device *dev = platform_get_drvdata(pdev);
 710
 711        if (dev) {
 712                netif_tx_stop_all_queues(dev);
 713                ltq_etop_hw_exit(dev);
 714                ltq_etop_mdio_cleanup(dev);
 715                unregister_netdev(dev);
 716        }
 717        return 0;
 718}
 719
 720static struct platform_driver ltq_mii_driver = {
 721        .remove = ltq_etop_remove,
 722        .driver = {
 723                .name = "ltq_etop",
 724        },
 725};
 726
 727int __init
 728init_ltq_etop(void)
 729{
 730        int ret = platform_driver_probe(&ltq_mii_driver, ltq_etop_probe);
 731
 732        if (ret)
 733                pr_err("ltq_etop: Error registering platform driver!");
 734        return ret;
 735}
 736
 737static void __exit
 738exit_ltq_etop(void)
 739{
 740        platform_driver_unregister(&ltq_mii_driver);
 741}
 742
 743module_init(init_ltq_etop);
 744module_exit(exit_ltq_etop);
 745
 746MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
 747MODULE_DESCRIPTION("Lantiq SoC ETOP");
 748MODULE_LICENSE("GPL");
 749