linux/drivers/net/bcm63xx_enet.c
<<
>>
Prefs
   1/*
   2 * Driver for BCM963xx builtin Ethernet mac
   3 *
   4 * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation; either version 2 of the License, or
   9 * (at your option) any later version.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program; if not, write to the Free Software
  18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  19 */
  20#include <linux/init.h>
  21#include <linux/module.h>
  22#include <linux/clk.h>
  23#include <linux/etherdevice.h>
  24#include <linux/delay.h>
  25#include <linux/ethtool.h>
  26#include <linux/crc32.h>
  27#include <linux/err.h>
  28#include <linux/dma-mapping.h>
  29#include <linux/platform_device.h>
  30#include <linux/if_vlan.h>
  31
  32#include <bcm63xx_dev_enet.h>
  33#include "bcm63xx_enet.h"
  34
  35static char bcm_enet_driver_name[] = "bcm63xx_enet";
  36static char bcm_enet_driver_version[] = "1.0";
  37
  38static int copybreak __read_mostly = 128;
  39module_param(copybreak, int, 0);
  40MODULE_PARM_DESC(copybreak, "Receive copy threshold");
  41
  42/* io memory shared between all devices */
  43static void __iomem *bcm_enet_shared_base;
  44
  45/*
  46 * io helpers to access mac registers
  47 */
  48static inline u32 enet_readl(struct bcm_enet_priv *priv, u32 off)
  49{
  50        return bcm_readl(priv->base + off);
  51}
  52
  53static inline void enet_writel(struct bcm_enet_priv *priv,
  54                               u32 val, u32 off)
  55{
  56        bcm_writel(val, priv->base + off);
  57}
  58
  59/*
  60 * io helpers to access shared registers
  61 */
  62static inline u32 enet_dma_readl(struct bcm_enet_priv *priv, u32 off)
  63{
  64        return bcm_readl(bcm_enet_shared_base + off);
  65}
  66
  67static inline void enet_dma_writel(struct bcm_enet_priv *priv,
  68                                       u32 val, u32 off)
  69{
  70        bcm_writel(val, bcm_enet_shared_base + off);
  71}
  72
  73/*
  74 * write given data into mii register and wait for transfer to end
  75 * with timeout (average measured transfer time is 25us)
  76 */
  77static int do_mdio_op(struct bcm_enet_priv *priv, unsigned int data)
  78{
  79        int limit;
  80
  81        /* make sure mii interrupt status is cleared */
  82        enet_writel(priv, ENET_IR_MII, ENET_IR_REG);
  83
  84        enet_writel(priv, data, ENET_MIIDATA_REG);
  85        wmb();
  86
  87        /* busy wait on mii interrupt bit, with timeout */
  88        limit = 1000;
  89        do {
  90                if (enet_readl(priv, ENET_IR_REG) & ENET_IR_MII)
  91                        break;
  92                udelay(1);
  93        } while (limit-- > 0);
  94
  95        return (limit < 0) ? 1 : 0;
  96}
  97
  98/*
  99 * MII internal read callback
 100 */
 101static int bcm_enet_mdio_read(struct bcm_enet_priv *priv, int mii_id,
 102                              int regnum)
 103{
 104        u32 tmp, val;
 105
 106        tmp = regnum << ENET_MIIDATA_REG_SHIFT;
 107        tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT;
 108        tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT;
 109        tmp |= ENET_MIIDATA_OP_READ_MASK;
 110
 111        if (do_mdio_op(priv, tmp))
 112                return -1;
 113
 114        val = enet_readl(priv, ENET_MIIDATA_REG);
 115        val &= 0xffff;
 116        return val;
 117}
 118
 119/*
 120 * MII internal write callback
 121 */
 122static int bcm_enet_mdio_write(struct bcm_enet_priv *priv, int mii_id,
 123                               int regnum, u16 value)
 124{
 125        u32 tmp;
 126
 127        tmp = (value & 0xffff) << ENET_MIIDATA_DATA_SHIFT;
 128        tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT;
 129        tmp |= regnum << ENET_MIIDATA_REG_SHIFT;
 130        tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT;
 131        tmp |= ENET_MIIDATA_OP_WRITE_MASK;
 132
 133        (void)do_mdio_op(priv, tmp);
 134        return 0;
 135}
 136
 137/*
 138 * MII read callback from phylib
 139 */
 140static int bcm_enet_mdio_read_phylib(struct mii_bus *bus, int mii_id,
 141                                     int regnum)
 142{
 143        return bcm_enet_mdio_read(bus->priv, mii_id, regnum);
 144}
 145
 146/*
 147 * MII write callback from phylib
 148 */
 149static int bcm_enet_mdio_write_phylib(struct mii_bus *bus, int mii_id,
 150                                      int regnum, u16 value)
 151{
 152        return bcm_enet_mdio_write(bus->priv, mii_id, regnum, value);
 153}
 154
 155/*
 156 * MII read callback from mii core
 157 */
 158static int bcm_enet_mdio_read_mii(struct net_device *dev, int mii_id,
 159                                  int regnum)
 160{
 161        return bcm_enet_mdio_read(netdev_priv(dev), mii_id, regnum);
 162}
 163
 164/*
 165 * MII write callback from mii core
 166 */
 167static void bcm_enet_mdio_write_mii(struct net_device *dev, int mii_id,
 168                                    int regnum, int value)
 169{
 170        bcm_enet_mdio_write(netdev_priv(dev), mii_id, regnum, value);
 171}
 172
 173/*
 174 * refill rx queue
 175 */
 176static int bcm_enet_refill_rx(struct net_device *dev)
 177{
 178        struct bcm_enet_priv *priv;
 179
 180        priv = netdev_priv(dev);
 181
 182        while (priv->rx_desc_count < priv->rx_ring_size) {
 183                struct bcm_enet_desc *desc;
 184                struct sk_buff *skb;
 185                dma_addr_t p;
 186                int desc_idx;
 187                u32 len_stat;
 188
 189                desc_idx = priv->rx_dirty_desc;
 190                desc = &priv->rx_desc_cpu[desc_idx];
 191
 192                if (!priv->rx_skb[desc_idx]) {
 193                        skb = netdev_alloc_skb(dev, priv->rx_skb_size);
 194                        if (!skb)
 195                                break;
 196                        priv->rx_skb[desc_idx] = skb;
 197
 198                        p = dma_map_single(&priv->pdev->dev, skb->data,
 199                                           priv->rx_skb_size,
 200                                           DMA_FROM_DEVICE);
 201                        desc->address = p;
 202                }
 203
 204                len_stat = priv->rx_skb_size << DMADESC_LENGTH_SHIFT;
 205                len_stat |= DMADESC_OWNER_MASK;
 206                if (priv->rx_dirty_desc == priv->rx_ring_size - 1) {
 207                        len_stat |= DMADESC_WRAP_MASK;
 208                        priv->rx_dirty_desc = 0;
 209                } else {
 210                        priv->rx_dirty_desc++;
 211                }
 212                wmb();
 213                desc->len_stat = len_stat;
 214
 215                priv->rx_desc_count++;
 216
 217                /* tell dma engine we allocated one buffer */
 218                enet_dma_writel(priv, 1, ENETDMA_BUFALLOC_REG(priv->rx_chan));
 219        }
 220
 221        /* If rx ring is still empty, set a timer to try allocating
 222         * again at a later time. */
 223        if (priv->rx_desc_count == 0 && netif_running(dev)) {
 224                dev_warn(&priv->pdev->dev, "unable to refill rx ring\n");
 225                priv->rx_timeout.expires = jiffies + HZ;
 226                add_timer(&priv->rx_timeout);
 227        }
 228
 229        return 0;
 230}
 231
 232/*
 233 * timer callback to defer refill rx queue in case we're OOM
 234 */
 235static void bcm_enet_refill_rx_timer(unsigned long data)
 236{
 237        struct net_device *dev;
 238        struct bcm_enet_priv *priv;
 239
 240        dev = (struct net_device *)data;
 241        priv = netdev_priv(dev);
 242
 243        spin_lock(&priv->rx_lock);
 244        bcm_enet_refill_rx((struct net_device *)data);
 245        spin_unlock(&priv->rx_lock);
 246}
 247
 248/*
 249 * extract packet from rx queue
 250 */
 251static int bcm_enet_receive_queue(struct net_device *dev, int budget)
 252{
 253        struct bcm_enet_priv *priv;
 254        struct device *kdev;
 255        int processed;
 256
 257        priv = netdev_priv(dev);
 258        kdev = &priv->pdev->dev;
 259        processed = 0;
 260
 261        /* don't scan ring further than number of refilled
 262         * descriptor */
 263        if (budget > priv->rx_desc_count)
 264                budget = priv->rx_desc_count;
 265
 266        do {
 267                struct bcm_enet_desc *desc;
 268                struct sk_buff *skb;
 269                int desc_idx;
 270                u32 len_stat;
 271                unsigned int len;
 272
 273                desc_idx = priv->rx_curr_desc;
 274                desc = &priv->rx_desc_cpu[desc_idx];
 275
 276                /* make sure we actually read the descriptor status at
 277                 * each loop */
 278                rmb();
 279
 280                len_stat = desc->len_stat;
 281
 282                /* break if dma ownership belongs to hw */
 283                if (len_stat & DMADESC_OWNER_MASK)
 284                        break;
 285
 286                processed++;
 287                priv->rx_curr_desc++;
 288                if (priv->rx_curr_desc == priv->rx_ring_size)
 289                        priv->rx_curr_desc = 0;
 290                priv->rx_desc_count--;
 291
 292                /* if the packet does not have start of packet _and_
 293                 * end of packet flag set, then just recycle it */
 294                if ((len_stat & DMADESC_ESOP_MASK) != DMADESC_ESOP_MASK) {
 295                        priv->stats.rx_dropped++;
 296                        continue;
 297                }
 298
 299                /* recycle packet if it's marked as bad */
 300                if (unlikely(len_stat & DMADESC_ERR_MASK)) {
 301                        priv->stats.rx_errors++;
 302
 303                        if (len_stat & DMADESC_OVSIZE_MASK)
 304                                priv->stats.rx_length_errors++;
 305                        if (len_stat & DMADESC_CRC_MASK)
 306                                priv->stats.rx_crc_errors++;
 307                        if (len_stat & DMADESC_UNDER_MASK)
 308                                priv->stats.rx_frame_errors++;
 309                        if (len_stat & DMADESC_OV_MASK)
 310                                priv->stats.rx_fifo_errors++;
 311                        continue;
 312                }
 313
 314                /* valid packet */
 315                skb = priv->rx_skb[desc_idx];
 316                len = (len_stat & DMADESC_LENGTH_MASK) >> DMADESC_LENGTH_SHIFT;
 317                /* don't include FCS */
 318                len -= 4;
 319
 320                if (len < copybreak) {
 321                        struct sk_buff *nskb;
 322
 323                        nskb = netdev_alloc_skb(dev, len + NET_IP_ALIGN);
 324                        if (!nskb) {
 325                                /* forget packet, just rearm desc */
 326                                priv->stats.rx_dropped++;
 327                                continue;
 328                        }
 329
 330                        /* since we're copying the data, we can align
 331                         * them properly */
 332                        skb_reserve(nskb, NET_IP_ALIGN);
 333                        dma_sync_single_for_cpu(kdev, desc->address,
 334                                                len, DMA_FROM_DEVICE);
 335                        memcpy(nskb->data, skb->data, len);
 336                        dma_sync_single_for_device(kdev, desc->address,
 337                                                   len, DMA_FROM_DEVICE);
 338                        skb = nskb;
 339                } else {
 340                        dma_unmap_single(&priv->pdev->dev, desc->address,
 341                                         priv->rx_skb_size, DMA_FROM_DEVICE);
 342                        priv->rx_skb[desc_idx] = NULL;
 343                }
 344
 345                skb_put(skb, len);
 346                skb->dev = dev;
 347                skb->protocol = eth_type_trans(skb, dev);
 348                priv->stats.rx_packets++;
 349                priv->stats.rx_bytes += len;
 350                dev->last_rx = jiffies;
 351                netif_receive_skb(skb);
 352
 353        } while (--budget > 0);
 354
 355        if (processed || !priv->rx_desc_count) {
 356                bcm_enet_refill_rx(dev);
 357
 358                /* kick rx dma */
 359                enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK,
 360                                ENETDMA_CHANCFG_REG(priv->rx_chan));
 361        }
 362
 363        return processed;
 364}
 365
 366
 367/*
 368 * try to or force reclaim of transmitted buffers
 369 */
 370static int bcm_enet_tx_reclaim(struct net_device *dev, int force)
 371{
 372        struct bcm_enet_priv *priv;
 373        int released;
 374
 375        priv = netdev_priv(dev);
 376        released = 0;
 377
 378        while (priv->tx_desc_count < priv->tx_ring_size) {
 379                struct bcm_enet_desc *desc;
 380                struct sk_buff *skb;
 381
 382                /* We run in a bh and fight against start_xmit, which
 383                 * is called with bh disabled  */
 384                spin_lock(&priv->tx_lock);
 385
 386                desc = &priv->tx_desc_cpu[priv->tx_dirty_desc];
 387
 388                if (!force && (desc->len_stat & DMADESC_OWNER_MASK)) {
 389                        spin_unlock(&priv->tx_lock);
 390                        break;
 391                }
 392
 393                /* ensure other field of the descriptor were not read
 394                 * before we checked ownership */
 395                rmb();
 396
 397                skb = priv->tx_skb[priv->tx_dirty_desc];
 398                priv->tx_skb[priv->tx_dirty_desc] = NULL;
 399                dma_unmap_single(&priv->pdev->dev, desc->address, skb->len,
 400                                 DMA_TO_DEVICE);
 401
 402                priv->tx_dirty_desc++;
 403                if (priv->tx_dirty_desc == priv->tx_ring_size)
 404                        priv->tx_dirty_desc = 0;
 405                priv->tx_desc_count++;
 406
 407                spin_unlock(&priv->tx_lock);
 408
 409                if (desc->len_stat & DMADESC_UNDER_MASK)
 410                        priv->stats.tx_errors++;
 411
 412                dev_kfree_skb(skb);
 413                released++;
 414        }
 415
 416        if (netif_queue_stopped(dev) && released)
 417                netif_wake_queue(dev);
 418
 419        return released;
 420}
 421
 422/*
 423 * poll func, called by network core
 424 */
 425static int bcm_enet_poll(struct napi_struct *napi, int budget)
 426{
 427        struct bcm_enet_priv *priv;
 428        struct net_device *dev;
 429        int tx_work_done, rx_work_done;
 430
 431        priv = container_of(napi, struct bcm_enet_priv, napi);
 432        dev = priv->net_dev;
 433
 434        /* ack interrupts */
 435        enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
 436                        ENETDMA_IR_REG(priv->rx_chan));
 437        enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
 438                        ENETDMA_IR_REG(priv->tx_chan));
 439
 440        /* reclaim sent skb */
 441        tx_work_done = bcm_enet_tx_reclaim(dev, 0);
 442
 443        spin_lock(&priv->rx_lock);
 444        rx_work_done = bcm_enet_receive_queue(dev, budget);
 445        spin_unlock(&priv->rx_lock);
 446
 447        if (rx_work_done >= budget || tx_work_done > 0) {
 448                /* rx/tx queue is not yet empty/clean */
 449                return rx_work_done;
 450        }
 451
 452        /* no more packet in rx/tx queue, remove device from poll
 453         * queue */
 454        napi_complete(napi);
 455
 456        /* restore rx/tx interrupt */
 457        enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
 458                        ENETDMA_IRMASK_REG(priv->rx_chan));
 459        enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
 460                        ENETDMA_IRMASK_REG(priv->tx_chan));
 461
 462        return rx_work_done;
 463}
 464
 465/*
 466 * mac interrupt handler
 467 */
 468static irqreturn_t bcm_enet_isr_mac(int irq, void *dev_id)
 469{
 470        struct net_device *dev;
 471        struct bcm_enet_priv *priv;
 472        u32 stat;
 473
 474        dev = dev_id;
 475        priv = netdev_priv(dev);
 476
 477        stat = enet_readl(priv, ENET_IR_REG);
 478        if (!(stat & ENET_IR_MIB))
 479                return IRQ_NONE;
 480
 481        /* clear & mask interrupt */
 482        enet_writel(priv, ENET_IR_MIB, ENET_IR_REG);
 483        enet_writel(priv, 0, ENET_IRMASK_REG);
 484
 485        /* read mib registers in workqueue */
 486        schedule_work(&priv->mib_update_task);
 487
 488        return IRQ_HANDLED;
 489}
 490
 491/*
 492 * rx/tx dma interrupt handler
 493 */
 494static irqreturn_t bcm_enet_isr_dma(int irq, void *dev_id)
 495{
 496        struct net_device *dev;
 497        struct bcm_enet_priv *priv;
 498
 499        dev = dev_id;
 500        priv = netdev_priv(dev);
 501
 502        /* mask rx/tx interrupts */
 503        enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan));
 504        enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan));
 505
 506        napi_schedule(&priv->napi);
 507
 508        return IRQ_HANDLED;
 509}
 510
 511/*
 512 * tx request callback
 513 */
 514static int bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
 515{
 516        struct bcm_enet_priv *priv;
 517        struct bcm_enet_desc *desc;
 518        u32 len_stat;
 519        int ret;
 520
 521        priv = netdev_priv(dev);
 522
 523        /* lock against tx reclaim */
 524        spin_lock(&priv->tx_lock);
 525
 526        /* make sure  the tx hw queue  is not full,  should not happen
 527         * since we stop queue before it's the case */
 528        if (unlikely(!priv->tx_desc_count)) {
 529                netif_stop_queue(dev);
 530                dev_err(&priv->pdev->dev, "xmit called with no tx desc "
 531                        "available?\n");
 532                ret = NETDEV_TX_BUSY;
 533                goto out_unlock;
 534        }
 535
 536        /* point to the next available desc */
 537        desc = &priv->tx_desc_cpu[priv->tx_curr_desc];
 538        priv->tx_skb[priv->tx_curr_desc] = skb;
 539
 540        /* fill descriptor */
 541        desc->address = dma_map_single(&priv->pdev->dev, skb->data, skb->len,
 542                                       DMA_TO_DEVICE);
 543
 544        len_stat = (skb->len << DMADESC_LENGTH_SHIFT) & DMADESC_LENGTH_MASK;
 545        len_stat |= DMADESC_ESOP_MASK |
 546                DMADESC_APPEND_CRC |
 547                DMADESC_OWNER_MASK;
 548
 549        priv->tx_curr_desc++;
 550        if (priv->tx_curr_desc == priv->tx_ring_size) {
 551                priv->tx_curr_desc = 0;
 552                len_stat |= DMADESC_WRAP_MASK;
 553        }
 554        priv->tx_desc_count--;
 555
 556        /* dma might be already polling, make sure we update desc
 557         * fields in correct order */
 558        wmb();
 559        desc->len_stat = len_stat;
 560        wmb();
 561
 562        /* kick tx dma */
 563        enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK,
 564                        ENETDMA_CHANCFG_REG(priv->tx_chan));
 565
 566        /* stop queue if no more desc available */
 567        if (!priv->tx_desc_count)
 568                netif_stop_queue(dev);
 569
 570        priv->stats.tx_bytes += skb->len;
 571        priv->stats.tx_packets++;
 572        dev->trans_start = jiffies;
 573        ret = NETDEV_TX_OK;
 574
 575out_unlock:
 576        spin_unlock(&priv->tx_lock);
 577        return ret;
 578}
 579
 580/*
 581 * Change the interface's mac address.
 582 */
 583static int bcm_enet_set_mac_address(struct net_device *dev, void *p)
 584{
 585        struct bcm_enet_priv *priv;
 586        struct sockaddr *addr = p;
 587        u32 val;
 588
 589        priv = netdev_priv(dev);
 590        memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
 591
 592        /* use perfect match register 0 to store my mac address */
 593        val = (dev->dev_addr[2] << 24) | (dev->dev_addr[3] << 16) |
 594                (dev->dev_addr[4] << 8) | dev->dev_addr[5];
 595        enet_writel(priv, val, ENET_PML_REG(0));
 596
 597        val = (dev->dev_addr[0] << 8 | dev->dev_addr[1]);
 598        val |= ENET_PMH_DATAVALID_MASK;
 599        enet_writel(priv, val, ENET_PMH_REG(0));
 600
 601        return 0;
 602}
 603
 604/*
 605 * Change rx mode (promiscous/allmulti) and update multicast list
 606 */
 607static void bcm_enet_set_multicast_list(struct net_device *dev)
 608{
 609        struct bcm_enet_priv *priv;
 610        struct dev_mc_list *mc_list;
 611        u32 val;
 612        int i;
 613
 614        priv = netdev_priv(dev);
 615
 616        val = enet_readl(priv, ENET_RXCFG_REG);
 617
 618        if (dev->flags & IFF_PROMISC)
 619                val |= ENET_RXCFG_PROMISC_MASK;
 620        else
 621                val &= ~ENET_RXCFG_PROMISC_MASK;
 622
 623        /* only 3 perfect match registers left, first one is used for
 624         * own mac address */
 625        if ((dev->flags & IFF_ALLMULTI) || dev->mc_count > 3)
 626                val |= ENET_RXCFG_ALLMCAST_MASK;
 627        else
 628                val &= ~ENET_RXCFG_ALLMCAST_MASK;
 629
 630        /* no need to set perfect match registers if we catch all
 631         * multicast */
 632        if (val & ENET_RXCFG_ALLMCAST_MASK) {
 633                enet_writel(priv, val, ENET_RXCFG_REG);
 634                return;
 635        }
 636
 637        for (i = 0, mc_list = dev->mc_list;
 638             (mc_list != NULL) && (i < dev->mc_count) && (i < 3);
 639             i++, mc_list = mc_list->next) {
 640                u8 *dmi_addr;
 641                u32 tmp;
 642
 643                /* filter non ethernet address */
 644                if (mc_list->dmi_addrlen != 6)
 645                        continue;
 646
 647                /* update perfect match registers */
 648                dmi_addr = mc_list->dmi_addr;
 649                tmp = (dmi_addr[2] << 24) | (dmi_addr[3] << 16) |
 650                        (dmi_addr[4] << 8) | dmi_addr[5];
 651                enet_writel(priv, tmp, ENET_PML_REG(i + 1));
 652
 653                tmp = (dmi_addr[0] << 8 | dmi_addr[1]);
 654                tmp |= ENET_PMH_DATAVALID_MASK;
 655                enet_writel(priv, tmp, ENET_PMH_REG(i + 1));
 656        }
 657
 658        for (; i < 3; i++) {
 659                enet_writel(priv, 0, ENET_PML_REG(i + 1));
 660                enet_writel(priv, 0, ENET_PMH_REG(i + 1));
 661        }
 662
 663        enet_writel(priv, val, ENET_RXCFG_REG);
 664}
 665
 666/*
 667 * set mac duplex parameters
 668 */
 669static void bcm_enet_set_duplex(struct bcm_enet_priv *priv, int fullduplex)
 670{
 671        u32 val;
 672
 673        val = enet_readl(priv, ENET_TXCTL_REG);
 674        if (fullduplex)
 675                val |= ENET_TXCTL_FD_MASK;
 676        else
 677                val &= ~ENET_TXCTL_FD_MASK;
 678        enet_writel(priv, val, ENET_TXCTL_REG);
 679}
 680
 681/*
 682 * set mac flow control parameters
 683 */
 684static void bcm_enet_set_flow(struct bcm_enet_priv *priv, int rx_en, int tx_en)
 685{
 686        u32 val;
 687
 688        /* rx flow control (pause frame handling) */
 689        val = enet_readl(priv, ENET_RXCFG_REG);
 690        if (rx_en)
 691                val |= ENET_RXCFG_ENFLOW_MASK;
 692        else
 693                val &= ~ENET_RXCFG_ENFLOW_MASK;
 694        enet_writel(priv, val, ENET_RXCFG_REG);
 695
 696        /* tx flow control (pause frame generation) */
 697        val = enet_dma_readl(priv, ENETDMA_CFG_REG);
 698        if (tx_en)
 699                val |= ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan);
 700        else
 701                val &= ~ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan);
 702        enet_dma_writel(priv, val, ENETDMA_CFG_REG);
 703}
 704
 705/*
 706 * link changed callback (from phylib)
 707 */
 708static void bcm_enet_adjust_phy_link(struct net_device *dev)
 709{
 710        struct bcm_enet_priv *priv;
 711        struct phy_device *phydev;
 712        int status_changed;
 713
 714        priv = netdev_priv(dev);
 715        phydev = priv->phydev;
 716        status_changed = 0;
 717
 718        if (priv->old_link != phydev->link) {
 719                status_changed = 1;
 720                priv->old_link = phydev->link;
 721        }
 722
 723        /* reflect duplex change in mac configuration */
 724        if (phydev->link && phydev->duplex != priv->old_duplex) {
 725                bcm_enet_set_duplex(priv,
 726                                    (phydev->duplex == DUPLEX_FULL) ? 1 : 0);
 727                status_changed = 1;
 728                priv->old_duplex = phydev->duplex;
 729        }
 730
 731        /* enable flow control if remote advertise it (trust phylib to
 732         * check that duplex is full */
 733        if (phydev->link && phydev->pause != priv->old_pause) {
 734                int rx_pause_en, tx_pause_en;
 735
 736                if (phydev->pause) {
 737                        /* pause was advertised by lpa and us */
 738                        rx_pause_en = 1;
 739                        tx_pause_en = 1;
 740                } else if (!priv->pause_auto) {
 741                        /* pause setting overrided by user */
 742                        rx_pause_en = priv->pause_rx;
 743                        tx_pause_en = priv->pause_tx;
 744                } else {
 745                        rx_pause_en = 0;
 746                        tx_pause_en = 0;
 747                }
 748
 749                bcm_enet_set_flow(priv, rx_pause_en, tx_pause_en);
 750                status_changed = 1;
 751                priv->old_pause = phydev->pause;
 752        }
 753
 754        if (status_changed) {
 755                pr_info("%s: link %s", dev->name, phydev->link ?
 756                        "UP" : "DOWN");
 757                if (phydev->link)
 758                        pr_cont(" - %d/%s - flow control %s", phydev->speed,
 759                               DUPLEX_FULL == phydev->duplex ? "full" : "half",
 760                               phydev->pause == 1 ? "rx&tx" : "off");
 761
 762                pr_cont("\n");
 763        }
 764}
 765
 766/*
 767 * link changed callback (if phylib is not used)
 768 */
 769static void bcm_enet_adjust_link(struct net_device *dev)
 770{
 771        struct bcm_enet_priv *priv;
 772
 773        priv = netdev_priv(dev);
 774        bcm_enet_set_duplex(priv, priv->force_duplex_full);
 775        bcm_enet_set_flow(priv, priv->pause_rx, priv->pause_tx);
 776        netif_carrier_on(dev);
 777
 778        pr_info("%s: link forced UP - %d/%s - flow control %s/%s\n",
 779                dev->name,
 780                priv->force_speed_100 ? 100 : 10,
 781                priv->force_duplex_full ? "full" : "half",
 782                priv->pause_rx ? "rx" : "off",
 783                priv->pause_tx ? "tx" : "off");
 784}
 785
 786/*
 787 * open callback, allocate dma rings & buffers and start rx operation
 788 */
 789static int bcm_enet_open(struct net_device *dev)
 790{
 791        struct bcm_enet_priv *priv;
 792        struct sockaddr addr;
 793        struct device *kdev;
 794        struct phy_device *phydev;
 795        int i, ret;
 796        unsigned int size;
 797        char phy_id[MII_BUS_ID_SIZE + 3];
 798        void *p;
 799        u32 val;
 800
 801        priv = netdev_priv(dev);
 802        kdev = &priv->pdev->dev;
 803
 804        if (priv->has_phy) {
 805                /* connect to PHY */
 806                snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
 807                         priv->mac_id ? "1" : "0", priv->phy_id);
 808
 809                phydev = phy_connect(dev, phy_id, &bcm_enet_adjust_phy_link, 0,
 810                                     PHY_INTERFACE_MODE_MII);
 811
 812                if (IS_ERR(phydev)) {
 813                        dev_err(kdev, "could not attach to PHY\n");
 814                        return PTR_ERR(phydev);
 815                }
 816
 817                /* mask with MAC supported features */
 818                phydev->supported &= (SUPPORTED_10baseT_Half |
 819                                      SUPPORTED_10baseT_Full |
 820                                      SUPPORTED_100baseT_Half |
 821                                      SUPPORTED_100baseT_Full |
 822                                      SUPPORTED_Autoneg |
 823                                      SUPPORTED_Pause |
 824                                      SUPPORTED_MII);
 825                phydev->advertising = phydev->supported;
 826
 827                if (priv->pause_auto && priv->pause_rx && priv->pause_tx)
 828                        phydev->advertising |= SUPPORTED_Pause;
 829                else
 830                        phydev->advertising &= ~SUPPORTED_Pause;
 831
 832                dev_info(kdev, "attached PHY at address %d [%s]\n",
 833                         phydev->addr, phydev->drv->name);
 834
 835                priv->old_link = 0;
 836                priv->old_duplex = -1;
 837                priv->old_pause = -1;
 838                priv->phydev = phydev;
 839        }
 840
 841        /* mask all interrupts and request them */
 842        enet_writel(priv, 0, ENET_IRMASK_REG);
 843        enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan));
 844        enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan));
 845
 846        ret = request_irq(dev->irq, bcm_enet_isr_mac, 0, dev->name, dev);
 847        if (ret)
 848                goto out_phy_disconnect;
 849
 850        ret = request_irq(priv->irq_rx, bcm_enet_isr_dma,
 851                          IRQF_SAMPLE_RANDOM | IRQF_DISABLED, dev->name, dev);
 852        if (ret)
 853                goto out_freeirq;
 854
 855        ret = request_irq(priv->irq_tx, bcm_enet_isr_dma,
 856                          IRQF_DISABLED, dev->name, dev);
 857        if (ret)
 858                goto out_freeirq_rx;
 859
 860        /* initialize perfect match registers */
 861        for (i = 0; i < 4; i++) {
 862                enet_writel(priv, 0, ENET_PML_REG(i));
 863                enet_writel(priv, 0, ENET_PMH_REG(i));
 864        }
 865
 866        /* write device mac address */
 867        memcpy(addr.sa_data, dev->dev_addr, ETH_ALEN);
 868        bcm_enet_set_mac_address(dev, &addr);
 869
 870        /* allocate rx dma ring */
 871        size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
 872        p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
 873        if (!p) {
 874                dev_err(kdev, "cannot allocate rx ring %u\n", size);
 875                ret = -ENOMEM;
 876                goto out_freeirq_tx;
 877        }
 878
 879        memset(p, 0, size);
 880        priv->rx_desc_alloc_size = size;
 881        priv->rx_desc_cpu = p;
 882
 883        /* allocate tx dma ring */
 884        size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
 885        p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
 886        if (!p) {
 887                dev_err(kdev, "cannot allocate tx ring\n");
 888                ret = -ENOMEM;
 889                goto out_free_rx_ring;
 890        }
 891
 892        memset(p, 0, size);
 893        priv->tx_desc_alloc_size = size;
 894        priv->tx_desc_cpu = p;
 895
 896        priv->tx_skb = kzalloc(sizeof(struct sk_buff *) * priv->tx_ring_size,
 897                               GFP_KERNEL);
 898        if (!priv->tx_skb) {
 899                dev_err(kdev, "cannot allocate rx skb queue\n");
 900                ret = -ENOMEM;
 901                goto out_free_tx_ring;
 902        }
 903
 904        priv->tx_desc_count = priv->tx_ring_size;
 905        priv->tx_dirty_desc = 0;
 906        priv->tx_curr_desc = 0;
 907        spin_lock_init(&priv->tx_lock);
 908
 909        /* init & fill rx ring with skbs */
 910        priv->rx_skb = kzalloc(sizeof(struct sk_buff *) * priv->rx_ring_size,
 911                               GFP_KERNEL);
 912        if (!priv->rx_skb) {
 913                dev_err(kdev, "cannot allocate rx skb queue\n");
 914                ret = -ENOMEM;
 915                goto out_free_tx_skb;
 916        }
 917
 918        priv->rx_desc_count = 0;
 919        priv->rx_dirty_desc = 0;
 920        priv->rx_curr_desc = 0;
 921
 922        /* initialize flow control buffer allocation */
 923        enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
 924                        ENETDMA_BUFALLOC_REG(priv->rx_chan));
 925
 926        if (bcm_enet_refill_rx(dev)) {
 927                dev_err(kdev, "cannot allocate rx skb queue\n");
 928                ret = -ENOMEM;
 929                goto out;
 930        }
 931
 932        /* write rx & tx ring addresses */
 933        enet_dma_writel(priv, priv->rx_desc_dma,
 934                        ENETDMA_RSTART_REG(priv->rx_chan));
 935        enet_dma_writel(priv, priv->tx_desc_dma,
 936                        ENETDMA_RSTART_REG(priv->tx_chan));
 937
 938        /* clear remaining state ram for rx & tx channel */
 939        enet_dma_writel(priv, 0, ENETDMA_SRAM2_REG(priv->rx_chan));
 940        enet_dma_writel(priv, 0, ENETDMA_SRAM2_REG(priv->tx_chan));
 941        enet_dma_writel(priv, 0, ENETDMA_SRAM3_REG(priv->rx_chan));
 942        enet_dma_writel(priv, 0, ENETDMA_SRAM3_REG(priv->tx_chan));
 943        enet_dma_writel(priv, 0, ENETDMA_SRAM4_REG(priv->rx_chan));
 944        enet_dma_writel(priv, 0, ENETDMA_SRAM4_REG(priv->tx_chan));
 945
 946        /* set max rx/tx length */
 947        enet_writel(priv, priv->hw_mtu, ENET_RXMAXLEN_REG);
 948        enet_writel(priv, priv->hw_mtu, ENET_TXMAXLEN_REG);
 949
 950        /* set dma maximum burst len */
 951        enet_dma_writel(priv, BCMENET_DMA_MAXBURST,
 952                        ENETDMA_MAXBURST_REG(priv->rx_chan));
 953        enet_dma_writel(priv, BCMENET_DMA_MAXBURST,
 954                        ENETDMA_MAXBURST_REG(priv->tx_chan));
 955
 956        /* set correct transmit fifo watermark */
 957        enet_writel(priv, BCMENET_TX_FIFO_TRESH, ENET_TXWMARK_REG);
 958
 959        /* set flow control low/high threshold to 1/3 / 2/3 */
 960        val = priv->rx_ring_size / 3;
 961        enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan));
 962        val = (priv->rx_ring_size * 2) / 3;
 963        enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan));
 964
 965        /* all set, enable mac and interrupts, start dma engine and
 966         * kick rx dma channel */
 967        wmb();
 968        enet_writel(priv, ENET_CTL_ENABLE_MASK, ENET_CTL_REG);
 969        enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
 970        enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK,
 971                        ENETDMA_CHANCFG_REG(priv->rx_chan));
 972
 973        /* watch "mib counters about to overflow" interrupt */
 974        enet_writel(priv, ENET_IR_MIB, ENET_IR_REG);
 975        enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG);
 976
 977        /* watch "packet transferred" interrupt in rx and tx */
 978        enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
 979                        ENETDMA_IR_REG(priv->rx_chan));
 980        enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
 981                        ENETDMA_IR_REG(priv->tx_chan));
 982
 983        /* make sure we enable napi before rx interrupt  */
 984        napi_enable(&priv->napi);
 985
 986        enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
 987                        ENETDMA_IRMASK_REG(priv->rx_chan));
 988        enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
 989                        ENETDMA_IRMASK_REG(priv->tx_chan));
 990
 991        if (priv->has_phy)
 992                phy_start(priv->phydev);
 993        else
 994                bcm_enet_adjust_link(dev);
 995
 996        netif_start_queue(dev);
 997        return 0;
 998
 999out:
1000        for (i = 0; i < priv->rx_ring_size; i++) {
1001                struct bcm_enet_desc *desc;
1002
1003                if (!priv->rx_skb[i])
1004                        continue;
1005
1006                desc = &priv->rx_desc_cpu[i];
1007                dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
1008                                 DMA_FROM_DEVICE);
1009                kfree_skb(priv->rx_skb[i]);
1010        }
1011        kfree(priv->rx_skb);
1012
1013out_free_tx_skb:
1014        kfree(priv->tx_skb);
1015
1016out_free_tx_ring:
1017        dma_free_coherent(kdev, priv->tx_desc_alloc_size,
1018                          priv->tx_desc_cpu, priv->tx_desc_dma);
1019
1020out_free_rx_ring:
1021        dma_free_coherent(kdev, priv->rx_desc_alloc_size,
1022                          priv->rx_desc_cpu, priv->rx_desc_dma);
1023
1024out_freeirq_tx:
1025        free_irq(priv->irq_tx, dev);
1026
1027out_freeirq_rx:
1028        free_irq(priv->irq_rx, dev);
1029
1030out_freeirq:
1031        free_irq(dev->irq, dev);
1032
1033out_phy_disconnect:
1034        phy_disconnect(priv->phydev);
1035
1036        return ret;
1037}
1038
1039/*
1040 * disable mac
1041 */
1042static void bcm_enet_disable_mac(struct bcm_enet_priv *priv)
1043{
1044        int limit;
1045        u32 val;
1046
1047        val = enet_readl(priv, ENET_CTL_REG);
1048        val |= ENET_CTL_DISABLE_MASK;
1049        enet_writel(priv, val, ENET_CTL_REG);
1050
1051        limit = 1000;
1052        do {
1053                u32 val;
1054
1055                val = enet_readl(priv, ENET_CTL_REG);
1056                if (!(val & ENET_CTL_DISABLE_MASK))
1057                        break;
1058                udelay(1);
1059        } while (limit--);
1060}
1061
1062/*
1063 * disable dma in given channel
1064 */
1065static void bcm_enet_disable_dma(struct bcm_enet_priv *priv, int chan)
1066{
1067        int limit;
1068
1069        enet_dma_writel(priv, 0, ENETDMA_CHANCFG_REG(chan));
1070
1071        limit = 1000;
1072        do {
1073                u32 val;
1074
1075                val = enet_dma_readl(priv, ENETDMA_CHANCFG_REG(chan));
1076                if (!(val & ENETDMA_CHANCFG_EN_MASK))
1077                        break;
1078                udelay(1);
1079        } while (limit--);
1080}
1081
1082/*
1083 * stop callback
1084 */
1085static int bcm_enet_stop(struct net_device *dev)
1086{
1087        struct bcm_enet_priv *priv;
1088        struct device *kdev;
1089        int i;
1090
1091        priv = netdev_priv(dev);
1092        kdev = &priv->pdev->dev;
1093
1094        netif_stop_queue(dev);
1095        napi_disable(&priv->napi);
1096        if (priv->has_phy)
1097                phy_stop(priv->phydev);
1098        del_timer_sync(&priv->rx_timeout);
1099
1100        /* mask all interrupts */
1101        enet_writel(priv, 0, ENET_IRMASK_REG);
1102        enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan));
1103        enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan));
1104
1105        /* make sure no mib update is scheduled */
1106        flush_scheduled_work();
1107
1108        /* disable dma & mac */
1109        bcm_enet_disable_dma(priv, priv->tx_chan);
1110        bcm_enet_disable_dma(priv, priv->rx_chan);
1111        bcm_enet_disable_mac(priv);
1112
1113        /* force reclaim of all tx buffers */
1114        bcm_enet_tx_reclaim(dev, 1);
1115
1116        /* free the rx skb ring */
1117        for (i = 0; i < priv->rx_ring_size; i++) {
1118                struct bcm_enet_desc *desc;
1119
1120                if (!priv->rx_skb[i])
1121                        continue;
1122
1123                desc = &priv->rx_desc_cpu[i];
1124                dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
1125                                 DMA_FROM_DEVICE);
1126                kfree_skb(priv->rx_skb[i]);
1127        }
1128
1129        /* free remaining allocated memory */
1130        kfree(priv->rx_skb);
1131        kfree(priv->tx_skb);
1132        dma_free_coherent(kdev, priv->rx_desc_alloc_size,
1133                          priv->rx_desc_cpu, priv->rx_desc_dma);
1134        dma_free_coherent(kdev, priv->tx_desc_alloc_size,
1135                          priv->tx_desc_cpu, priv->tx_desc_dma);
1136        free_irq(priv->irq_tx, dev);
1137        free_irq(priv->irq_rx, dev);
1138        free_irq(dev->irq, dev);
1139
1140        /* release phy */
1141        if (priv->has_phy) {
1142                phy_disconnect(priv->phydev);
1143                priv->phydev = NULL;
1144        }
1145
1146        return 0;
1147}
1148
1149/*
1150 * core request to return device rx/tx stats
1151 */
1152static struct net_device_stats *bcm_enet_get_stats(struct net_device *dev)
1153{
1154        struct bcm_enet_priv *priv;
1155
1156        priv = netdev_priv(dev);
1157        return &priv->stats;
1158}
1159
1160/*
1161 * ethtool callbacks
1162 */
1163struct bcm_enet_stats {
1164        char stat_string[ETH_GSTRING_LEN];
1165        int sizeof_stat;
1166        int stat_offset;
1167        int mib_reg;
1168};
1169
1170#define GEN_STAT(m) sizeof(((struct bcm_enet_priv *)0)->m),             \
1171                     offsetof(struct bcm_enet_priv, m)
1172
1173static const struct bcm_enet_stats bcm_enet_gstrings_stats[] = {
1174        { "rx_packets", GEN_STAT(stats.rx_packets), -1 },
1175        { "tx_packets", GEN_STAT(stats.tx_packets), -1 },
1176        { "rx_bytes", GEN_STAT(stats.rx_bytes), -1 },
1177        { "tx_bytes", GEN_STAT(stats.tx_bytes), -1 },
1178        { "rx_errors", GEN_STAT(stats.rx_errors), -1 },
1179        { "tx_errors", GEN_STAT(stats.tx_errors), -1 },
1180        { "rx_dropped", GEN_STAT(stats.rx_dropped), -1 },
1181        { "tx_dropped", GEN_STAT(stats.tx_dropped), -1 },
1182
1183        { "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETH_MIB_RX_GD_OCTETS},
1184        { "rx_good_pkts", GEN_STAT(mib.rx_gd_pkts), ETH_MIB_RX_GD_PKTS },
1185        { "rx_broadcast", GEN_STAT(mib.rx_brdcast), ETH_MIB_RX_BRDCAST },
1186        { "rx_multicast", GEN_STAT(mib.rx_mult), ETH_MIB_RX_MULT },
1187        { "rx_64_octets", GEN_STAT(mib.rx_64), ETH_MIB_RX_64 },
1188        { "rx_65_127_oct", GEN_STAT(mib.rx_65_127), ETH_MIB_RX_65_127 },
1189        { "rx_128_255_oct", GEN_STAT(mib.rx_128_255), ETH_MIB_RX_128_255 },
1190        { "rx_256_511_oct", GEN_STAT(mib.rx_256_511), ETH_MIB_RX_256_511 },
1191        { "rx_512_1023_oct", GEN_STAT(mib.rx_512_1023), ETH_MIB_RX_512_1023 },
1192        { "rx_1024_max_oct", GEN_STAT(mib.rx_1024_max), ETH_MIB_RX_1024_MAX },
1193        { "rx_jabber", GEN_STAT(mib.rx_jab), ETH_MIB_RX_JAB },
1194        { "rx_oversize", GEN_STAT(mib.rx_ovr), ETH_MIB_RX_OVR },
1195        { "rx_fragment", GEN_STAT(mib.rx_frag), ETH_MIB_RX_FRAG },
1196        { "rx_dropped", GEN_STAT(mib.rx_drop), ETH_MIB_RX_DROP },
1197        { "rx_crc_align", GEN_STAT(mib.rx_crc_align), ETH_MIB_RX_CRC_ALIGN },
1198        { "rx_undersize", GEN_STAT(mib.rx_und), ETH_MIB_RX_UND },
1199        { "rx_crc", GEN_STAT(mib.rx_crc), ETH_MIB_RX_CRC },
1200        { "rx_align", GEN_STAT(mib.rx_align), ETH_MIB_RX_ALIGN },
1201        { "rx_symbol_error", GEN_STAT(mib.rx_sym), ETH_MIB_RX_SYM },
1202        { "rx_pause", GEN_STAT(mib.rx_pause), ETH_MIB_RX_PAUSE },
1203        { "rx_control", GEN_STAT(mib.rx_cntrl), ETH_MIB_RX_CNTRL },
1204
1205        { "tx_good_octets", GEN_STAT(mib.tx_gd_octets), ETH_MIB_TX_GD_OCTETS },
1206        { "tx_good_pkts", GEN_STAT(mib.tx_gd_pkts), ETH_MIB_TX_GD_PKTS },
1207        { "tx_broadcast", GEN_STAT(mib.tx_brdcast), ETH_MIB_TX_BRDCAST },
1208        { "tx_multicast", GEN_STAT(mib.tx_mult), ETH_MIB_TX_MULT },
1209        { "tx_64_oct", GEN_STAT(mib.tx_64), ETH_MIB_TX_64 },
1210        { "tx_65_127_oct", GEN_STAT(mib.tx_65_127), ETH_MIB_TX_65_127 },
1211        { "tx_128_255_oct", GEN_STAT(mib.tx_128_255), ETH_MIB_TX_128_255 },
1212        { "tx_256_511_oct", GEN_STAT(mib.tx_256_511), ETH_MIB_TX_256_511 },
1213        { "tx_512_1023_oct", GEN_STAT(mib.tx_512_1023), ETH_MIB_TX_512_1023},
1214        { "tx_1024_max_oct", GEN_STAT(mib.tx_1024_max), ETH_MIB_TX_1024_MAX },
1215        { "tx_jabber", GEN_STAT(mib.tx_jab), ETH_MIB_TX_JAB },
1216        { "tx_oversize", GEN_STAT(mib.tx_ovr), ETH_MIB_TX_OVR },
1217        { "tx_fragment", GEN_STAT(mib.tx_frag), ETH_MIB_TX_FRAG },
1218        { "tx_underrun", GEN_STAT(mib.tx_underrun), ETH_MIB_TX_UNDERRUN },
1219        { "tx_collisions", GEN_STAT(mib.tx_col), ETH_MIB_TX_COL },
1220        { "tx_single_collision", GEN_STAT(mib.tx_1_col), ETH_MIB_TX_1_COL },
1221        { "tx_multiple_collision", GEN_STAT(mib.tx_m_col), ETH_MIB_TX_M_COL },
1222        { "tx_excess_collision", GEN_STAT(mib.tx_ex_col), ETH_MIB_TX_EX_COL },
1223        { "tx_late_collision", GEN_STAT(mib.tx_late), ETH_MIB_TX_LATE },
1224        { "tx_deferred", GEN_STAT(mib.tx_def), ETH_MIB_TX_DEF },
1225        { "tx_carrier_sense", GEN_STAT(mib.tx_crs), ETH_MIB_TX_CRS },
1226        { "tx_pause", GEN_STAT(mib.tx_pause), ETH_MIB_TX_PAUSE },
1227
1228};
1229
1230#define BCM_ENET_STATS_LEN      \
1231        (sizeof(bcm_enet_gstrings_stats) / sizeof(struct bcm_enet_stats))
1232
1233static const u32 unused_mib_regs[] = {
1234        ETH_MIB_TX_ALL_OCTETS,
1235        ETH_MIB_TX_ALL_PKTS,
1236        ETH_MIB_RX_ALL_OCTETS,
1237        ETH_MIB_RX_ALL_PKTS,
1238};
1239
1240
1241static void bcm_enet_get_drvinfo(struct net_device *netdev,
1242                                 struct ethtool_drvinfo *drvinfo)
1243{
1244        strncpy(drvinfo->driver, bcm_enet_driver_name, 32);
1245        strncpy(drvinfo->version, bcm_enet_driver_version, 32);
1246        strncpy(drvinfo->fw_version, "N/A", 32);
1247        strncpy(drvinfo->bus_info, "bcm63xx", 32);
1248        drvinfo->n_stats = BCM_ENET_STATS_LEN;
1249}
1250
1251static int bcm_enet_get_stats_count(struct net_device *netdev)
1252{
1253        return BCM_ENET_STATS_LEN;
1254}
1255
1256static void bcm_enet_get_strings(struct net_device *netdev,
1257                                 u32 stringset, u8 *data)
1258{
1259        int i;
1260
1261        switch (stringset) {
1262        case ETH_SS_STATS:
1263                for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
1264                        memcpy(data + i * ETH_GSTRING_LEN,
1265                               bcm_enet_gstrings_stats[i].stat_string,
1266                               ETH_GSTRING_LEN);
1267                }
1268                break;
1269        }
1270}
1271
1272static void update_mib_counters(struct bcm_enet_priv *priv)
1273{
1274        int i;
1275
1276        for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
1277                const struct bcm_enet_stats *s;
1278                u32 val;
1279                char *p;
1280
1281                s = &bcm_enet_gstrings_stats[i];
1282                if (s->mib_reg == -1)
1283                        continue;
1284
1285                val = enet_readl(priv, ENET_MIB_REG(s->mib_reg));
1286                p = (char *)priv + s->stat_offset;
1287
1288                if (s->sizeof_stat == sizeof(u64))
1289                        *(u64 *)p += val;
1290                else
1291                        *(u32 *)p += val;
1292        }
1293
1294        /* also empty unused mib counters to make sure mib counter
1295         * overflow interrupt is cleared */
1296        for (i = 0; i < ARRAY_SIZE(unused_mib_regs); i++)
1297                (void)enet_readl(priv, ENET_MIB_REG(unused_mib_regs[i]));
1298}
1299
1300static void bcm_enet_update_mib_counters_defer(struct work_struct *t)
1301{
1302        struct bcm_enet_priv *priv;
1303
1304        priv = container_of(t, struct bcm_enet_priv, mib_update_task);
1305        mutex_lock(&priv->mib_update_lock);
1306        update_mib_counters(priv);
1307        mutex_unlock(&priv->mib_update_lock);
1308
1309        /* reenable mib interrupt */
1310        if (netif_running(priv->net_dev))
1311                enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG);
1312}
1313
1314static void bcm_enet_get_ethtool_stats(struct net_device *netdev,
1315                                       struct ethtool_stats *stats,
1316                                       u64 *data)
1317{
1318        struct bcm_enet_priv *priv;
1319        int i;
1320
1321        priv = netdev_priv(netdev);
1322
1323        mutex_lock(&priv->mib_update_lock);
1324        update_mib_counters(priv);
1325
1326        for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
1327                const struct bcm_enet_stats *s;
1328                char *p;
1329
1330                s = &bcm_enet_gstrings_stats[i];
1331                p = (char *)priv + s->stat_offset;
1332                data[i] = (s->sizeof_stat == sizeof(u64)) ?
1333                        *(u64 *)p : *(u32 *)p;
1334        }
1335        mutex_unlock(&priv->mib_update_lock);
1336}
1337
1338static int bcm_enet_get_settings(struct net_device *dev,
1339                                 struct ethtool_cmd *cmd)
1340{
1341        struct bcm_enet_priv *priv;
1342
1343        priv = netdev_priv(dev);
1344
1345        cmd->maxrxpkt = 0;
1346        cmd->maxtxpkt = 0;
1347
1348        if (priv->has_phy) {
1349                if (!priv->phydev)
1350                        return -ENODEV;
1351                return phy_ethtool_gset(priv->phydev, cmd);
1352        } else {
1353                cmd->autoneg = 0;
1354                cmd->speed = (priv->force_speed_100) ? SPEED_100 : SPEED_10;
1355                cmd->duplex = (priv->force_duplex_full) ?
1356                        DUPLEX_FULL : DUPLEX_HALF;
1357                cmd->supported = ADVERTISED_10baseT_Half  |
1358                        ADVERTISED_10baseT_Full |
1359                        ADVERTISED_100baseT_Half |
1360                        ADVERTISED_100baseT_Full;
1361                cmd->advertising = 0;
1362                cmd->port = PORT_MII;
1363                cmd->transceiver = XCVR_EXTERNAL;
1364        }
1365        return 0;
1366}
1367
1368static int bcm_enet_set_settings(struct net_device *dev,
1369                                 struct ethtool_cmd *cmd)
1370{
1371        struct bcm_enet_priv *priv;
1372
1373        priv = netdev_priv(dev);
1374        if (priv->has_phy) {
1375                if (!priv->phydev)
1376                        return -ENODEV;
1377                return phy_ethtool_sset(priv->phydev, cmd);
1378        } else {
1379
1380                if (cmd->autoneg ||
1381                    (cmd->speed != SPEED_100 && cmd->speed != SPEED_10) ||
1382                    cmd->port != PORT_MII)
1383                        return -EINVAL;
1384
1385                priv->force_speed_100 = (cmd->speed == SPEED_100) ? 1 : 0;
1386                priv->force_duplex_full = (cmd->duplex == DUPLEX_FULL) ? 1 : 0;
1387
1388                if (netif_running(dev))
1389                        bcm_enet_adjust_link(dev);
1390                return 0;
1391        }
1392}
1393
1394static void bcm_enet_get_ringparam(struct net_device *dev,
1395                                   struct ethtool_ringparam *ering)
1396{
1397        struct bcm_enet_priv *priv;
1398
1399        priv = netdev_priv(dev);
1400
1401        /* rx/tx ring is actually only limited by memory */
1402        ering->rx_max_pending = 8192;
1403        ering->tx_max_pending = 8192;
1404        ering->rx_mini_max_pending = 0;
1405        ering->rx_jumbo_max_pending = 0;
1406        ering->rx_pending = priv->rx_ring_size;
1407        ering->tx_pending = priv->tx_ring_size;
1408}
1409
1410static int bcm_enet_set_ringparam(struct net_device *dev,
1411                                  struct ethtool_ringparam *ering)
1412{
1413        struct bcm_enet_priv *priv;
1414        int was_running;
1415
1416        priv = netdev_priv(dev);
1417
1418        was_running = 0;
1419        if (netif_running(dev)) {
1420                bcm_enet_stop(dev);
1421                was_running = 1;
1422        }
1423
1424        priv->rx_ring_size = ering->rx_pending;
1425        priv->tx_ring_size = ering->tx_pending;
1426
1427        if (was_running) {
1428                int err;
1429
1430                err = bcm_enet_open(dev);
1431                if (err)
1432                        dev_close(dev);
1433                else
1434                        bcm_enet_set_multicast_list(dev);
1435        }
1436        return 0;
1437}
1438
1439static void bcm_enet_get_pauseparam(struct net_device *dev,
1440                                    struct ethtool_pauseparam *ecmd)
1441{
1442        struct bcm_enet_priv *priv;
1443
1444        priv = netdev_priv(dev);
1445        ecmd->autoneg = priv->pause_auto;
1446        ecmd->rx_pause = priv->pause_rx;
1447        ecmd->tx_pause = priv->pause_tx;
1448}
1449
1450static int bcm_enet_set_pauseparam(struct net_device *dev,
1451                                   struct ethtool_pauseparam *ecmd)
1452{
1453        struct bcm_enet_priv *priv;
1454
1455        priv = netdev_priv(dev);
1456
1457        if (priv->has_phy) {
1458                if (ecmd->autoneg && (ecmd->rx_pause != ecmd->tx_pause)) {
1459                        /* asymetric pause mode not supported,
1460                         * actually possible but integrated PHY has RO
1461                         * asym_pause bit */
1462                        return -EINVAL;
1463                }
1464        } else {
1465                /* no pause autoneg on direct mii connection */
1466                if (ecmd->autoneg)
1467                        return -EINVAL;
1468        }
1469
1470        priv->pause_auto = ecmd->autoneg;
1471        priv->pause_rx = ecmd->rx_pause;
1472        priv->pause_tx = ecmd->tx_pause;
1473
1474        return 0;
1475}
1476
1477static struct ethtool_ops bcm_enet_ethtool_ops = {
1478        .get_strings            = bcm_enet_get_strings,
1479        .get_stats_count        = bcm_enet_get_stats_count,
1480        .get_ethtool_stats      = bcm_enet_get_ethtool_stats,
1481        .get_settings           = bcm_enet_get_settings,
1482        .set_settings           = bcm_enet_set_settings,
1483        .get_drvinfo            = bcm_enet_get_drvinfo,
1484        .get_link               = ethtool_op_get_link,
1485        .get_ringparam          = bcm_enet_get_ringparam,
1486        .set_ringparam          = bcm_enet_set_ringparam,
1487        .get_pauseparam         = bcm_enet_get_pauseparam,
1488        .set_pauseparam         = bcm_enet_set_pauseparam,
1489};
1490
1491static int bcm_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1492{
1493        struct bcm_enet_priv *priv;
1494
1495        priv = netdev_priv(dev);
1496        if (priv->has_phy) {
1497                if (!priv->phydev)
1498                        return -ENODEV;
1499                return phy_mii_ioctl(priv->phydev, if_mii(rq), cmd);
1500        } else {
1501                struct mii_if_info mii;
1502
1503                mii.dev = dev;
1504                mii.mdio_read = bcm_enet_mdio_read_mii;
1505                mii.mdio_write = bcm_enet_mdio_write_mii;
1506                mii.phy_id = 0;
1507                mii.phy_id_mask = 0x3f;
1508                mii.reg_num_mask = 0x1f;
1509                return generic_mii_ioctl(&mii, if_mii(rq), cmd, NULL);
1510        }
1511}
1512
1513/*
1514 * calculate actual hardware mtu
1515 */
1516static int compute_hw_mtu(struct bcm_enet_priv *priv, int mtu)
1517{
1518        int actual_mtu;
1519
1520        actual_mtu = mtu;
1521
1522        /* add ethernet header + vlan tag size */
1523        actual_mtu += VLAN_ETH_HLEN;
1524
1525        if (actual_mtu < 64 || actual_mtu > BCMENET_MAX_MTU)
1526                return -EINVAL;
1527
1528        /*
1529         * setup maximum size before we get overflow mark in
1530         * descriptor, note that this will not prevent reception of
1531         * big frames, they will be split into multiple buffers
1532         * anyway
1533         */
1534        priv->hw_mtu = actual_mtu;
1535
1536        /*
1537         * align rx buffer size to dma burst len, account FCS since
1538         * it's appended
1539         */
1540        priv->rx_skb_size = ALIGN(actual_mtu + ETH_FCS_LEN,
1541                                  BCMENET_DMA_MAXBURST * 4);
1542        return 0;
1543}
1544
1545/*
1546 * adjust mtu, can't be called while device is running
1547 */
1548static int bcm_enet_change_mtu(struct net_device *dev, int new_mtu)
1549{
1550        int ret;
1551
1552        if (netif_running(dev))
1553                return -EBUSY;
1554
1555        ret = compute_hw_mtu(netdev_priv(dev), new_mtu);
1556        if (ret)
1557                return ret;
1558        dev->mtu = new_mtu;
1559        return 0;
1560}
1561
1562/*
1563 * preinit hardware to allow mii operation while device is down
1564 */
1565static void bcm_enet_hw_preinit(struct bcm_enet_priv *priv)
1566{
1567        u32 val;
1568        int limit;
1569
1570        /* make sure mac is disabled */
1571        bcm_enet_disable_mac(priv);
1572
1573        /* soft reset mac */
1574        val = ENET_CTL_SRESET_MASK;
1575        enet_writel(priv, val, ENET_CTL_REG);
1576        wmb();
1577
1578        limit = 1000;
1579        do {
1580                val = enet_readl(priv, ENET_CTL_REG);
1581                if (!(val & ENET_CTL_SRESET_MASK))
1582                        break;
1583                udelay(1);
1584        } while (limit--);
1585
1586        /* select correct mii interface */
1587        val = enet_readl(priv, ENET_CTL_REG);
1588        if (priv->use_external_mii)
1589                val |= ENET_CTL_EPHYSEL_MASK;
1590        else
1591                val &= ~ENET_CTL_EPHYSEL_MASK;
1592        enet_writel(priv, val, ENET_CTL_REG);
1593
1594        /* turn on mdc clock */
1595        enet_writel(priv, (0x1f << ENET_MIISC_MDCFREQDIV_SHIFT) |
1596                    ENET_MIISC_PREAMBLEEN_MASK, ENET_MIISC_REG);
1597
1598        /* set mib counters to self-clear when read */
1599        val = enet_readl(priv, ENET_MIBCTL_REG);
1600        val |= ENET_MIBCTL_RDCLEAR_MASK;
1601        enet_writel(priv, val, ENET_MIBCTL_REG);
1602}
1603
1604static const struct net_device_ops bcm_enet_ops = {
1605        .ndo_open               = bcm_enet_open,
1606        .ndo_stop               = bcm_enet_stop,
1607        .ndo_start_xmit         = bcm_enet_start_xmit,
1608        .ndo_get_stats          = bcm_enet_get_stats,
1609        .ndo_set_mac_address    = bcm_enet_set_mac_address,
1610        .ndo_set_multicast_list = bcm_enet_set_multicast_list,
1611        .ndo_do_ioctl           = bcm_enet_ioctl,
1612        .ndo_change_mtu         = bcm_enet_change_mtu,
1613#ifdef CONFIG_NET_POLL_CONTROLLER
1614        .ndo_poll_controller = bcm_enet_netpoll,
1615#endif
1616};
1617
1618/*
1619 * allocate netdevice, request register memory and register device.
1620 */
1621static int __devinit bcm_enet_probe(struct platform_device *pdev)
1622{
1623        struct bcm_enet_priv *priv;
1624        struct net_device *dev;
1625        struct bcm63xx_enet_platform_data *pd;
1626        struct resource *res_mem, *res_irq, *res_irq_rx, *res_irq_tx;
1627        struct mii_bus *bus;
1628        const char *clk_name;
1629        unsigned int iomem_size;
1630        int i, ret;
1631
1632        /* stop if shared driver failed, assume driver->probe will be
1633         * called in the same order we register devices (correct ?) */
1634        if (!bcm_enet_shared_base)
1635                return -ENODEV;
1636
1637        res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1638        res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1639        res_irq_rx = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
1640        res_irq_tx = platform_get_resource(pdev, IORESOURCE_IRQ, 2);
1641        if (!res_mem || !res_irq || !res_irq_rx || !res_irq_tx)
1642                return -ENODEV;
1643
1644        ret = 0;
1645        dev = alloc_etherdev(sizeof(*priv));
1646        if (!dev)
1647                return -ENOMEM;
1648        priv = netdev_priv(dev);
1649        memset(priv, 0, sizeof(*priv));
1650
1651        ret = compute_hw_mtu(priv, dev->mtu);
1652        if (ret)
1653                goto out;
1654
1655        iomem_size = res_mem->end - res_mem->start + 1;
1656        if (!request_mem_region(res_mem->start, iomem_size, "bcm63xx_enet")) {
1657                ret = -EBUSY;
1658                goto out;
1659        }
1660
1661        priv->base = ioremap(res_mem->start, iomem_size);
1662        if (priv->base == NULL) {
1663                ret = -ENOMEM;
1664                goto out_release_mem;
1665        }
1666        dev->irq = priv->irq = res_irq->start;
1667        priv->irq_rx = res_irq_rx->start;
1668        priv->irq_tx = res_irq_tx->start;
1669        priv->mac_id = pdev->id;
1670
1671        /* get rx & tx dma channel id for this mac */
1672        if (priv->mac_id == 0) {
1673                priv->rx_chan = 0;
1674                priv->tx_chan = 1;
1675                clk_name = "enet0";
1676        } else {
1677                priv->rx_chan = 2;
1678                priv->tx_chan = 3;
1679                clk_name = "enet1";
1680        }
1681
1682        priv->mac_clk = clk_get(&pdev->dev, clk_name);
1683        if (IS_ERR(priv->mac_clk)) {
1684                ret = PTR_ERR(priv->mac_clk);
1685                goto out_unmap;
1686        }
1687        clk_enable(priv->mac_clk);
1688
1689        /* initialize default and fetch platform data */
1690        priv->rx_ring_size = BCMENET_DEF_RX_DESC;
1691        priv->tx_ring_size = BCMENET_DEF_TX_DESC;
1692
1693        pd = pdev->dev.platform_data;
1694        if (pd) {
1695                memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
1696                priv->has_phy = pd->has_phy;
1697                priv->phy_id = pd->phy_id;
1698                priv->has_phy_interrupt = pd->has_phy_interrupt;
1699                priv->phy_interrupt = pd->phy_interrupt;
1700                priv->use_external_mii = !pd->use_internal_phy;
1701                priv->pause_auto = pd->pause_auto;
1702                priv->pause_rx = pd->pause_rx;
1703                priv->pause_tx = pd->pause_tx;
1704                priv->force_duplex_full = pd->force_duplex_full;
1705                priv->force_speed_100 = pd->force_speed_100;
1706        }
1707
1708        if (priv->mac_id == 0 && priv->has_phy && !priv->use_external_mii) {
1709                /* using internal PHY, enable clock */
1710                priv->phy_clk = clk_get(&pdev->dev, "ephy");
1711                if (IS_ERR(priv->phy_clk)) {
1712                        ret = PTR_ERR(priv->phy_clk);
1713                        priv->phy_clk = NULL;
1714                        goto out_put_clk_mac;
1715                }
1716                clk_enable(priv->phy_clk);
1717        }
1718
1719        /* do minimal hardware init to be able to probe mii bus */
1720        bcm_enet_hw_preinit(priv);
1721
1722        /* MII bus registration */
1723        if (priv->has_phy) {
1724
1725                priv->mii_bus = mdiobus_alloc();
1726                if (!priv->mii_bus) {
1727                        ret = -ENOMEM;
1728                        goto out_uninit_hw;
1729                }
1730
1731                bus = priv->mii_bus;
1732                bus->name = "bcm63xx_enet MII bus";
1733                bus->parent = &pdev->dev;
1734                bus->priv = priv;
1735                bus->read = bcm_enet_mdio_read_phylib;
1736                bus->write = bcm_enet_mdio_write_phylib;
1737                sprintf(bus->id, "%d", priv->mac_id);
1738
1739                /* only probe bus where we think the PHY is, because
1740                 * the mdio read operation return 0 instead of 0xffff
1741                 * if a slave is not present on hw */
1742                bus->phy_mask = ~(1 << priv->phy_id);
1743
1744                bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
1745                if (!bus->irq) {
1746                        ret = -ENOMEM;
1747                        goto out_free_mdio;
1748                }
1749
1750                if (priv->has_phy_interrupt)
1751                        bus->irq[priv->phy_id] = priv->phy_interrupt;
1752                else
1753                        bus->irq[priv->phy_id] = PHY_POLL;
1754
1755                ret = mdiobus_register(bus);
1756                if (ret) {
1757                        dev_err(&pdev->dev, "unable to register mdio bus\n");
1758                        goto out_free_mdio;
1759                }
1760        } else {
1761
1762                /* run platform code to initialize PHY device */
1763                if (pd->mii_config &&
1764                    pd->mii_config(dev, 1, bcm_enet_mdio_read_mii,
1765                                   bcm_enet_mdio_write_mii)) {
1766                        dev_err(&pdev->dev, "unable to configure mdio bus\n");
1767                        goto out_uninit_hw;
1768                }
1769        }
1770
1771        spin_lock_init(&priv->rx_lock);
1772
1773        /* init rx timeout (used for oom) */
1774        init_timer(&priv->rx_timeout);
1775        priv->rx_timeout.function = bcm_enet_refill_rx_timer;
1776        priv->rx_timeout.data = (unsigned long)dev;
1777
1778        /* init the mib update lock&work */
1779        mutex_init(&priv->mib_update_lock);
1780        INIT_WORK(&priv->mib_update_task, bcm_enet_update_mib_counters_defer);
1781
1782        /* zero mib counters */
1783        for (i = 0; i < ENET_MIB_REG_COUNT; i++)
1784                enet_writel(priv, 0, ENET_MIB_REG(i));
1785
1786        /* register netdevice */
1787        dev->netdev_ops = &bcm_enet_ops;
1788        netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16);
1789
1790        SET_ETHTOOL_OPS(dev, &bcm_enet_ethtool_ops);
1791        SET_NETDEV_DEV(dev, &pdev->dev);
1792
1793        ret = register_netdev(dev);
1794        if (ret)
1795                goto out_unregister_mdio;
1796
1797        netif_carrier_off(dev);
1798        platform_set_drvdata(pdev, dev);
1799        priv->pdev = pdev;
1800        priv->net_dev = dev;
1801
1802        return 0;
1803
1804out_unregister_mdio:
1805        if (priv->mii_bus) {
1806                mdiobus_unregister(priv->mii_bus);
1807                kfree(priv->mii_bus->irq);
1808        }
1809
1810out_free_mdio:
1811        if (priv->mii_bus)
1812                mdiobus_free(priv->mii_bus);
1813
1814out_uninit_hw:
1815        /* turn off mdc clock */
1816        enet_writel(priv, 0, ENET_MIISC_REG);
1817        if (priv->phy_clk) {
1818                clk_disable(priv->phy_clk);
1819                clk_put(priv->phy_clk);
1820        }
1821
1822out_put_clk_mac:
1823        clk_disable(priv->mac_clk);
1824        clk_put(priv->mac_clk);
1825
1826out_unmap:
1827        iounmap(priv->base);
1828
1829out_release_mem:
1830        release_mem_region(res_mem->start, iomem_size);
1831out:
1832        free_netdev(dev);
1833        return ret;
1834}
1835
1836
1837/*
1838 * exit func, stops hardware and unregisters netdevice
1839 */
1840static int __devexit bcm_enet_remove(struct platform_device *pdev)
1841{
1842        struct bcm_enet_priv *priv;
1843        struct net_device *dev;
1844        struct resource *res;
1845
1846        /* stop netdevice */
1847        dev = platform_get_drvdata(pdev);
1848        priv = netdev_priv(dev);
1849        unregister_netdev(dev);
1850
1851        /* turn off mdc clock */
1852        enet_writel(priv, 0, ENET_MIISC_REG);
1853
1854        if (priv->has_phy) {
1855                mdiobus_unregister(priv->mii_bus);
1856                kfree(priv->mii_bus->irq);
1857                mdiobus_free(priv->mii_bus);
1858        } else {
1859                struct bcm63xx_enet_platform_data *pd;
1860
1861                pd = pdev->dev.platform_data;
1862                if (pd && pd->mii_config)
1863                        pd->mii_config(dev, 0, bcm_enet_mdio_read_mii,
1864                                       bcm_enet_mdio_write_mii);
1865        }
1866
1867        /* release device resources */
1868        iounmap(priv->base);
1869        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1870        release_mem_region(res->start, res->end - res->start + 1);
1871
1872        /* disable hw block clocks */
1873        if (priv->phy_clk) {
1874                clk_disable(priv->phy_clk);
1875                clk_put(priv->phy_clk);
1876        }
1877        clk_disable(priv->mac_clk);
1878        clk_put(priv->mac_clk);
1879
1880        platform_set_drvdata(pdev, NULL);
1881        free_netdev(dev);
1882        return 0;
1883}
1884
1885struct platform_driver bcm63xx_enet_driver = {
1886        .probe  = bcm_enet_probe,
1887        .remove = __devexit_p(bcm_enet_remove),
1888        .driver = {
1889                .name   = "bcm63xx_enet",
1890                .owner  = THIS_MODULE,
1891        },
1892};
1893
1894/*
1895 * reserve & remap memory space shared between all macs
1896 */
1897static int __devinit bcm_enet_shared_probe(struct platform_device *pdev)
1898{
1899        struct resource *res;
1900        unsigned int iomem_size;
1901
1902        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1903        if (!res)
1904                return -ENODEV;
1905
1906        iomem_size = res->end - res->start + 1;
1907        if (!request_mem_region(res->start, iomem_size, "bcm63xx_enet_dma"))
1908                return -EBUSY;
1909
1910        bcm_enet_shared_base = ioremap(res->start, iomem_size);
1911        if (!bcm_enet_shared_base) {
1912                release_mem_region(res->start, iomem_size);
1913                return -ENOMEM;
1914        }
1915        return 0;
1916}
1917
1918static int __devexit bcm_enet_shared_remove(struct platform_device *pdev)
1919{
1920        struct resource *res;
1921
1922        iounmap(bcm_enet_shared_base);
1923        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1924        release_mem_region(res->start, res->end - res->start + 1);
1925        return 0;
1926}
1927
1928/*
1929 * this "shared" driver is needed because both macs share a single
1930 * address space
1931 */
1932struct platform_driver bcm63xx_enet_shared_driver = {
1933        .probe  = bcm_enet_shared_probe,
1934        .remove = __devexit_p(bcm_enet_shared_remove),
1935        .driver = {
1936                .name   = "bcm63xx_enet_shared",
1937                .owner  = THIS_MODULE,
1938        },
1939};
1940
1941/*
1942 * entry point
1943 */
1944static int __init bcm_enet_init(void)
1945{
1946        int ret;
1947
1948        ret = platform_driver_register(&bcm63xx_enet_shared_driver);
1949        if (ret)
1950                return ret;
1951
1952        ret = platform_driver_register(&bcm63xx_enet_driver);
1953        if (ret)
1954                platform_driver_unregister(&bcm63xx_enet_shared_driver);
1955
1956        return ret;
1957}
1958
1959static void __exit bcm_enet_exit(void)
1960{
1961        platform_driver_unregister(&bcm63xx_enet_driver);
1962        platform_driver_unregister(&bcm63xx_enet_shared_driver);
1963}
1964
1965
1966module_init(bcm_enet_init);
1967module_exit(bcm_enet_exit);
1968
1969MODULE_DESCRIPTION("BCM63xx internal ethernet mac driver");
1970MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>");
1971MODULE_LICENSE("GPL");
1972