linux/drivers/net/bcm63xx_enet.c
<<
>>
Prefs
   1/*
   2 * Driver for BCM963xx builtin Ethernet mac
   3 *
   4 * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation; either version 2 of the License, or
   9 * (at your option) any later version.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program; if not, write to the Free Software
  18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  19 */
  20#include <linux/init.h>
  21#include <linux/module.h>
  22#include <linux/clk.h>
  23#include <linux/etherdevice.h>
  24#include <linux/slab.h>
  25#include <linux/delay.h>
  26#include <linux/ethtool.h>
  27#include <linux/crc32.h>
  28#include <linux/err.h>
  29#include <linux/dma-mapping.h>
  30#include <linux/platform_device.h>
  31#include <linux/if_vlan.h>
  32
  33#include <bcm63xx_dev_enet.h>
  34#include "bcm63xx_enet.h"
  35
  36static char bcm_enet_driver_name[] = "bcm63xx_enet";
  37static char bcm_enet_driver_version[] = "1.0";
  38
  39static int copybreak __read_mostly = 128;
  40module_param(copybreak, int, 0);
  41MODULE_PARM_DESC(copybreak, "Receive copy threshold");
  42
  43/* io memory shared between all devices */
  44static void __iomem *bcm_enet_shared_base;
  45
  46/*
  47 * io helpers to access mac registers
  48 */
  49static inline u32 enet_readl(struct bcm_enet_priv *priv, u32 off)
  50{
  51        return bcm_readl(priv->base + off);
  52}
  53
  54static inline void enet_writel(struct bcm_enet_priv *priv,
  55                               u32 val, u32 off)
  56{
  57        bcm_writel(val, priv->base + off);
  58}
  59
  60/*
  61 * io helpers to access shared registers
  62 */
  63static inline u32 enet_dma_readl(struct bcm_enet_priv *priv, u32 off)
  64{
  65        return bcm_readl(bcm_enet_shared_base + off);
  66}
  67
  68static inline void enet_dma_writel(struct bcm_enet_priv *priv,
  69                                       u32 val, u32 off)
  70{
  71        bcm_writel(val, bcm_enet_shared_base + off);
  72}
  73
  74/*
  75 * write given data into mii register and wait for transfer to end
  76 * with timeout (average measured transfer time is 25us)
  77 */
  78static int do_mdio_op(struct bcm_enet_priv *priv, unsigned int data)
  79{
  80        int limit;
  81
  82        /* make sure mii interrupt status is cleared */
  83        enet_writel(priv, ENET_IR_MII, ENET_IR_REG);
  84
  85        enet_writel(priv, data, ENET_MIIDATA_REG);
  86        wmb();
  87
  88        /* busy wait on mii interrupt bit, with timeout */
  89        limit = 1000;
  90        do {
  91                if (enet_readl(priv, ENET_IR_REG) & ENET_IR_MII)
  92                        break;
  93                udelay(1);
  94        } while (limit-- > 0);
  95
  96        return (limit < 0) ? 1 : 0;
  97}
  98
  99/*
 100 * MII internal read callback
 101 */
 102static int bcm_enet_mdio_read(struct bcm_enet_priv *priv, int mii_id,
 103                              int regnum)
 104{
 105        u32 tmp, val;
 106
 107        tmp = regnum << ENET_MIIDATA_REG_SHIFT;
 108        tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT;
 109        tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT;
 110        tmp |= ENET_MIIDATA_OP_READ_MASK;
 111
 112        if (do_mdio_op(priv, tmp))
 113                return -1;
 114
 115        val = enet_readl(priv, ENET_MIIDATA_REG);
 116        val &= 0xffff;
 117        return val;
 118}
 119
 120/*
 121 * MII internal write callback
 122 */
 123static int bcm_enet_mdio_write(struct bcm_enet_priv *priv, int mii_id,
 124                               int regnum, u16 value)
 125{
 126        u32 tmp;
 127
 128        tmp = (value & 0xffff) << ENET_MIIDATA_DATA_SHIFT;
 129        tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT;
 130        tmp |= regnum << ENET_MIIDATA_REG_SHIFT;
 131        tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT;
 132        tmp |= ENET_MIIDATA_OP_WRITE_MASK;
 133
 134        (void)do_mdio_op(priv, tmp);
 135        return 0;
 136}
 137
 138/*
 139 * MII read callback from phylib
 140 */
 141static int bcm_enet_mdio_read_phylib(struct mii_bus *bus, int mii_id,
 142                                     int regnum)
 143{
 144        return bcm_enet_mdio_read(bus->priv, mii_id, regnum);
 145}
 146
 147/*
 148 * MII write callback from phylib
 149 */
 150static int bcm_enet_mdio_write_phylib(struct mii_bus *bus, int mii_id,
 151                                      int regnum, u16 value)
 152{
 153        return bcm_enet_mdio_write(bus->priv, mii_id, regnum, value);
 154}
 155
 156/*
 157 * MII read callback from mii core
 158 */
 159static int bcm_enet_mdio_read_mii(struct net_device *dev, int mii_id,
 160                                  int regnum)
 161{
 162        return bcm_enet_mdio_read(netdev_priv(dev), mii_id, regnum);
 163}
 164
 165/*
 166 * MII write callback from mii core
 167 */
 168static void bcm_enet_mdio_write_mii(struct net_device *dev, int mii_id,
 169                                    int regnum, int value)
 170{
 171        bcm_enet_mdio_write(netdev_priv(dev), mii_id, regnum, value);
 172}
 173
 174/*
 175 * refill rx queue
 176 */
 177static int bcm_enet_refill_rx(struct net_device *dev)
 178{
 179        struct bcm_enet_priv *priv;
 180
 181        priv = netdev_priv(dev);
 182
 183        while (priv->rx_desc_count < priv->rx_ring_size) {
 184                struct bcm_enet_desc *desc;
 185                struct sk_buff *skb;
 186                dma_addr_t p;
 187                int desc_idx;
 188                u32 len_stat;
 189
 190                desc_idx = priv->rx_dirty_desc;
 191                desc = &priv->rx_desc_cpu[desc_idx];
 192
 193                if (!priv->rx_skb[desc_idx]) {
 194                        skb = netdev_alloc_skb(dev, priv->rx_skb_size);
 195                        if (!skb)
 196                                break;
 197                        priv->rx_skb[desc_idx] = skb;
 198
 199                        p = dma_map_single(&priv->pdev->dev, skb->data,
 200                                           priv->rx_skb_size,
 201                                           DMA_FROM_DEVICE);
 202                        desc->address = p;
 203                }
 204
 205                len_stat = priv->rx_skb_size << DMADESC_LENGTH_SHIFT;
 206                len_stat |= DMADESC_OWNER_MASK;
 207                if (priv->rx_dirty_desc == priv->rx_ring_size - 1) {
 208                        len_stat |= DMADESC_WRAP_MASK;
 209                        priv->rx_dirty_desc = 0;
 210                } else {
 211                        priv->rx_dirty_desc++;
 212                }
 213                wmb();
 214                desc->len_stat = len_stat;
 215
 216                priv->rx_desc_count++;
 217
 218                /* tell dma engine we allocated one buffer */
 219                enet_dma_writel(priv, 1, ENETDMA_BUFALLOC_REG(priv->rx_chan));
 220        }
 221
 222        /* If rx ring is still empty, set a timer to try allocating
 223         * again at a later time. */
 224        if (priv->rx_desc_count == 0 && netif_running(dev)) {
 225                dev_warn(&priv->pdev->dev, "unable to refill rx ring\n");
 226                priv->rx_timeout.expires = jiffies + HZ;
 227                add_timer(&priv->rx_timeout);
 228        }
 229
 230        return 0;
 231}
 232
 233/*
 234 * timer callback to defer refill rx queue in case we're OOM
 235 */
 236static void bcm_enet_refill_rx_timer(unsigned long data)
 237{
 238        struct net_device *dev;
 239        struct bcm_enet_priv *priv;
 240
 241        dev = (struct net_device *)data;
 242        priv = netdev_priv(dev);
 243
 244        spin_lock(&priv->rx_lock);
 245        bcm_enet_refill_rx((struct net_device *)data);
 246        spin_unlock(&priv->rx_lock);
 247}
 248
 249/*
 250 * extract packet from rx queue
 251 */
 252static int bcm_enet_receive_queue(struct net_device *dev, int budget)
 253{
 254        struct bcm_enet_priv *priv;
 255        struct device *kdev;
 256        int processed;
 257
 258        priv = netdev_priv(dev);
 259        kdev = &priv->pdev->dev;
 260        processed = 0;
 261
 262        /* don't scan ring further than number of refilled
 263         * descriptor */
 264        if (budget > priv->rx_desc_count)
 265                budget = priv->rx_desc_count;
 266
 267        do {
 268                struct bcm_enet_desc *desc;
 269                struct sk_buff *skb;
 270                int desc_idx;
 271                u32 len_stat;
 272                unsigned int len;
 273
 274                desc_idx = priv->rx_curr_desc;
 275                desc = &priv->rx_desc_cpu[desc_idx];
 276
 277                /* make sure we actually read the descriptor status at
 278                 * each loop */
 279                rmb();
 280
 281                len_stat = desc->len_stat;
 282
 283                /* break if dma ownership belongs to hw */
 284                if (len_stat & DMADESC_OWNER_MASK)
 285                        break;
 286
 287                processed++;
 288                priv->rx_curr_desc++;
 289                if (priv->rx_curr_desc == priv->rx_ring_size)
 290                        priv->rx_curr_desc = 0;
 291                priv->rx_desc_count--;
 292
 293                /* if the packet does not have start of packet _and_
 294                 * end of packet flag set, then just recycle it */
 295                if ((len_stat & DMADESC_ESOP_MASK) != DMADESC_ESOP_MASK) {
 296                        dev->stats.rx_dropped++;
 297                        continue;
 298                }
 299
 300                /* recycle packet if it's marked as bad */
 301                if (unlikely(len_stat & DMADESC_ERR_MASK)) {
 302                        dev->stats.rx_errors++;
 303
 304                        if (len_stat & DMADESC_OVSIZE_MASK)
 305                                dev->stats.rx_length_errors++;
 306                        if (len_stat & DMADESC_CRC_MASK)
 307                                dev->stats.rx_crc_errors++;
 308                        if (len_stat & DMADESC_UNDER_MASK)
 309                                dev->stats.rx_frame_errors++;
 310                        if (len_stat & DMADESC_OV_MASK)
 311                                dev->stats.rx_fifo_errors++;
 312                        continue;
 313                }
 314
 315                /* valid packet */
 316                skb = priv->rx_skb[desc_idx];
 317                len = (len_stat & DMADESC_LENGTH_MASK) >> DMADESC_LENGTH_SHIFT;
 318                /* don't include FCS */
 319                len -= 4;
 320
 321                if (len < copybreak) {
 322                        struct sk_buff *nskb;
 323
 324                        nskb = netdev_alloc_skb_ip_align(dev, len);
 325                        if (!nskb) {
 326                                /* forget packet, just rearm desc */
 327                                dev->stats.rx_dropped++;
 328                                continue;
 329                        }
 330
 331                        dma_sync_single_for_cpu(kdev, desc->address,
 332                                                len, DMA_FROM_DEVICE);
 333                        memcpy(nskb->data, skb->data, len);
 334                        dma_sync_single_for_device(kdev, desc->address,
 335                                                   len, DMA_FROM_DEVICE);
 336                        skb = nskb;
 337                } else {
 338                        dma_unmap_single(&priv->pdev->dev, desc->address,
 339                                         priv->rx_skb_size, DMA_FROM_DEVICE);
 340                        priv->rx_skb[desc_idx] = NULL;
 341                }
 342
 343                skb_put(skb, len);
 344                skb->protocol = eth_type_trans(skb, dev);
 345                dev->stats.rx_packets++;
 346                dev->stats.rx_bytes += len;
 347                netif_receive_skb(skb);
 348
 349        } while (--budget > 0);
 350
 351        if (processed || !priv->rx_desc_count) {
 352                bcm_enet_refill_rx(dev);
 353
 354                /* kick rx dma */
 355                enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK,
 356                                ENETDMA_CHANCFG_REG(priv->rx_chan));
 357        }
 358
 359        return processed;
 360}
 361
 362
 363/*
 364 * try to or force reclaim of transmitted buffers
 365 */
 366static int bcm_enet_tx_reclaim(struct net_device *dev, int force)
 367{
 368        struct bcm_enet_priv *priv;
 369        int released;
 370
 371        priv = netdev_priv(dev);
 372        released = 0;
 373
 374        while (priv->tx_desc_count < priv->tx_ring_size) {
 375                struct bcm_enet_desc *desc;
 376                struct sk_buff *skb;
 377
 378                /* We run in a bh and fight against start_xmit, which
 379                 * is called with bh disabled  */
 380                spin_lock(&priv->tx_lock);
 381
 382                desc = &priv->tx_desc_cpu[priv->tx_dirty_desc];
 383
 384                if (!force && (desc->len_stat & DMADESC_OWNER_MASK)) {
 385                        spin_unlock(&priv->tx_lock);
 386                        break;
 387                }
 388
 389                /* ensure other field of the descriptor were not read
 390                 * before we checked ownership */
 391                rmb();
 392
 393                skb = priv->tx_skb[priv->tx_dirty_desc];
 394                priv->tx_skb[priv->tx_dirty_desc] = NULL;
 395                dma_unmap_single(&priv->pdev->dev, desc->address, skb->len,
 396                                 DMA_TO_DEVICE);
 397
 398                priv->tx_dirty_desc++;
 399                if (priv->tx_dirty_desc == priv->tx_ring_size)
 400                        priv->tx_dirty_desc = 0;
 401                priv->tx_desc_count++;
 402
 403                spin_unlock(&priv->tx_lock);
 404
 405                if (desc->len_stat & DMADESC_UNDER_MASK)
 406                        dev->stats.tx_errors++;
 407
 408                dev_kfree_skb(skb);
 409                released++;
 410        }
 411
 412        if (netif_queue_stopped(dev) && released)
 413                netif_wake_queue(dev);
 414
 415        return released;
 416}
 417
 418/*
 419 * poll func, called by network core
 420 */
 421static int bcm_enet_poll(struct napi_struct *napi, int budget)
 422{
 423        struct bcm_enet_priv *priv;
 424        struct net_device *dev;
 425        int tx_work_done, rx_work_done;
 426
 427        priv = container_of(napi, struct bcm_enet_priv, napi);
 428        dev = priv->net_dev;
 429
 430        /* ack interrupts */
 431        enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
 432                        ENETDMA_IR_REG(priv->rx_chan));
 433        enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
 434                        ENETDMA_IR_REG(priv->tx_chan));
 435
 436        /* reclaim sent skb */
 437        tx_work_done = bcm_enet_tx_reclaim(dev, 0);
 438
 439        spin_lock(&priv->rx_lock);
 440        rx_work_done = bcm_enet_receive_queue(dev, budget);
 441        spin_unlock(&priv->rx_lock);
 442
 443        if (rx_work_done >= budget || tx_work_done > 0) {
 444                /* rx/tx queue is not yet empty/clean */
 445                return rx_work_done;
 446        }
 447
 448        /* no more packet in rx/tx queue, remove device from poll
 449         * queue */
 450        napi_complete(napi);
 451
 452        /* restore rx/tx interrupt */
 453        enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
 454                        ENETDMA_IRMASK_REG(priv->rx_chan));
 455        enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
 456                        ENETDMA_IRMASK_REG(priv->tx_chan));
 457
 458        return rx_work_done;
 459}
 460
 461/*
 462 * mac interrupt handler
 463 */
 464static irqreturn_t bcm_enet_isr_mac(int irq, void *dev_id)
 465{
 466        struct net_device *dev;
 467        struct bcm_enet_priv *priv;
 468        u32 stat;
 469
 470        dev = dev_id;
 471        priv = netdev_priv(dev);
 472
 473        stat = enet_readl(priv, ENET_IR_REG);
 474        if (!(stat & ENET_IR_MIB))
 475                return IRQ_NONE;
 476
 477        /* clear & mask interrupt */
 478        enet_writel(priv, ENET_IR_MIB, ENET_IR_REG);
 479        enet_writel(priv, 0, ENET_IRMASK_REG);
 480
 481        /* read mib registers in workqueue */
 482        schedule_work(&priv->mib_update_task);
 483
 484        return IRQ_HANDLED;
 485}
 486
 487/*
 488 * rx/tx dma interrupt handler
 489 */
 490static irqreturn_t bcm_enet_isr_dma(int irq, void *dev_id)
 491{
 492        struct net_device *dev;
 493        struct bcm_enet_priv *priv;
 494
 495        dev = dev_id;
 496        priv = netdev_priv(dev);
 497
 498        /* mask rx/tx interrupts */
 499        enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan));
 500        enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan));
 501
 502        napi_schedule(&priv->napi);
 503
 504        return IRQ_HANDLED;
 505}
 506
 507/*
 508 * tx request callback
 509 */
 510static int bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
 511{
 512        struct bcm_enet_priv *priv;
 513        struct bcm_enet_desc *desc;
 514        u32 len_stat;
 515        int ret;
 516
 517        priv = netdev_priv(dev);
 518
 519        /* lock against tx reclaim */
 520        spin_lock(&priv->tx_lock);
 521
 522        /* make sure  the tx hw queue  is not full,  should not happen
 523         * since we stop queue before it's the case */
 524        if (unlikely(!priv->tx_desc_count)) {
 525                netif_stop_queue(dev);
 526                dev_err(&priv->pdev->dev, "xmit called with no tx desc "
 527                        "available?\n");
 528                ret = NETDEV_TX_BUSY;
 529                goto out_unlock;
 530        }
 531
 532        /* point to the next available desc */
 533        desc = &priv->tx_desc_cpu[priv->tx_curr_desc];
 534        priv->tx_skb[priv->tx_curr_desc] = skb;
 535
 536        /* fill descriptor */
 537        desc->address = dma_map_single(&priv->pdev->dev, skb->data, skb->len,
 538                                       DMA_TO_DEVICE);
 539
 540        len_stat = (skb->len << DMADESC_LENGTH_SHIFT) & DMADESC_LENGTH_MASK;
 541        len_stat |= DMADESC_ESOP_MASK |
 542                DMADESC_APPEND_CRC |
 543                DMADESC_OWNER_MASK;
 544
 545        priv->tx_curr_desc++;
 546        if (priv->tx_curr_desc == priv->tx_ring_size) {
 547                priv->tx_curr_desc = 0;
 548                len_stat |= DMADESC_WRAP_MASK;
 549        }
 550        priv->tx_desc_count--;
 551
 552        /* dma might be already polling, make sure we update desc
 553         * fields in correct order */
 554        wmb();
 555        desc->len_stat = len_stat;
 556        wmb();
 557
 558        /* kick tx dma */
 559        enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK,
 560                        ENETDMA_CHANCFG_REG(priv->tx_chan));
 561
 562        /* stop queue if no more desc available */
 563        if (!priv->tx_desc_count)
 564                netif_stop_queue(dev);
 565
 566        dev->stats.tx_bytes += skb->len;
 567        dev->stats.tx_packets++;
 568        ret = NETDEV_TX_OK;
 569
 570out_unlock:
 571        spin_unlock(&priv->tx_lock);
 572        return ret;
 573}
 574
 575/*
 576 * Change the interface's mac address.
 577 */
 578static int bcm_enet_set_mac_address(struct net_device *dev, void *p)
 579{
 580        struct bcm_enet_priv *priv;
 581        struct sockaddr *addr = p;
 582        u32 val;
 583
 584        priv = netdev_priv(dev);
 585        memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
 586
 587        /* use perfect match register 0 to store my mac address */
 588        val = (dev->dev_addr[2] << 24) | (dev->dev_addr[3] << 16) |
 589                (dev->dev_addr[4] << 8) | dev->dev_addr[5];
 590        enet_writel(priv, val, ENET_PML_REG(0));
 591
 592        val = (dev->dev_addr[0] << 8 | dev->dev_addr[1]);
 593        val |= ENET_PMH_DATAVALID_MASK;
 594        enet_writel(priv, val, ENET_PMH_REG(0));
 595
 596        return 0;
 597}
 598
 599/*
 600 * Change rx mode (promiscuous/allmulti) and update multicast list
 601 */
 602static void bcm_enet_set_multicast_list(struct net_device *dev)
 603{
 604        struct bcm_enet_priv *priv;
 605        struct netdev_hw_addr *ha;
 606        u32 val;
 607        int i;
 608
 609        priv = netdev_priv(dev);
 610
 611        val = enet_readl(priv, ENET_RXCFG_REG);
 612
 613        if (dev->flags & IFF_PROMISC)
 614                val |= ENET_RXCFG_PROMISC_MASK;
 615        else
 616                val &= ~ENET_RXCFG_PROMISC_MASK;
 617
 618        /* only 3 perfect match registers left, first one is used for
 619         * own mac address */
 620        if ((dev->flags & IFF_ALLMULTI) || netdev_mc_count(dev) > 3)
 621                val |= ENET_RXCFG_ALLMCAST_MASK;
 622        else
 623                val &= ~ENET_RXCFG_ALLMCAST_MASK;
 624
 625        /* no need to set perfect match registers if we catch all
 626         * multicast */
 627        if (val & ENET_RXCFG_ALLMCAST_MASK) {
 628                enet_writel(priv, val, ENET_RXCFG_REG);
 629                return;
 630        }
 631
 632        i = 0;
 633        netdev_for_each_mc_addr(ha, dev) {
 634                u8 *dmi_addr;
 635                u32 tmp;
 636
 637                if (i == 3)
 638                        break;
 639                /* update perfect match registers */
 640                dmi_addr = ha->addr;
 641                tmp = (dmi_addr[2] << 24) | (dmi_addr[3] << 16) |
 642                        (dmi_addr[4] << 8) | dmi_addr[5];
 643                enet_writel(priv, tmp, ENET_PML_REG(i + 1));
 644
 645                tmp = (dmi_addr[0] << 8 | dmi_addr[1]);
 646                tmp |= ENET_PMH_DATAVALID_MASK;
 647                enet_writel(priv, tmp, ENET_PMH_REG(i++ + 1));
 648        }
 649
 650        for (; i < 3; i++) {
 651                enet_writel(priv, 0, ENET_PML_REG(i + 1));
 652                enet_writel(priv, 0, ENET_PMH_REG(i + 1));
 653        }
 654
 655        enet_writel(priv, val, ENET_RXCFG_REG);
 656}
 657
 658/*
 659 * set mac duplex parameters
 660 */
 661static void bcm_enet_set_duplex(struct bcm_enet_priv *priv, int fullduplex)
 662{
 663        u32 val;
 664
 665        val = enet_readl(priv, ENET_TXCTL_REG);
 666        if (fullduplex)
 667                val |= ENET_TXCTL_FD_MASK;
 668        else
 669                val &= ~ENET_TXCTL_FD_MASK;
 670        enet_writel(priv, val, ENET_TXCTL_REG);
 671}
 672
 673/*
 674 * set mac flow control parameters
 675 */
 676static void bcm_enet_set_flow(struct bcm_enet_priv *priv, int rx_en, int tx_en)
 677{
 678        u32 val;
 679
 680        /* rx flow control (pause frame handling) */
 681        val = enet_readl(priv, ENET_RXCFG_REG);
 682        if (rx_en)
 683                val |= ENET_RXCFG_ENFLOW_MASK;
 684        else
 685                val &= ~ENET_RXCFG_ENFLOW_MASK;
 686        enet_writel(priv, val, ENET_RXCFG_REG);
 687
 688        /* tx flow control (pause frame generation) */
 689        val = enet_dma_readl(priv, ENETDMA_CFG_REG);
 690        if (tx_en)
 691                val |= ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan);
 692        else
 693                val &= ~ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan);
 694        enet_dma_writel(priv, val, ENETDMA_CFG_REG);
 695}
 696
 697/*
 698 * link changed callback (from phylib)
 699 */
 700static void bcm_enet_adjust_phy_link(struct net_device *dev)
 701{
 702        struct bcm_enet_priv *priv;
 703        struct phy_device *phydev;
 704        int status_changed;
 705
 706        priv = netdev_priv(dev);
 707        phydev = priv->phydev;
 708        status_changed = 0;
 709
 710        if (priv->old_link != phydev->link) {
 711                status_changed = 1;
 712                priv->old_link = phydev->link;
 713        }
 714
 715        /* reflect duplex change in mac configuration */
 716        if (phydev->link && phydev->duplex != priv->old_duplex) {
 717                bcm_enet_set_duplex(priv,
 718                                    (phydev->duplex == DUPLEX_FULL) ? 1 : 0);
 719                status_changed = 1;
 720                priv->old_duplex = phydev->duplex;
 721        }
 722
 723        /* enable flow control if remote advertise it (trust phylib to
 724         * check that duplex is full */
 725        if (phydev->link && phydev->pause != priv->old_pause) {
 726                int rx_pause_en, tx_pause_en;
 727
 728                if (phydev->pause) {
 729                        /* pause was advertised by lpa and us */
 730                        rx_pause_en = 1;
 731                        tx_pause_en = 1;
 732                } else if (!priv->pause_auto) {
 733                        /* pause setting overrided by user */
 734                        rx_pause_en = priv->pause_rx;
 735                        tx_pause_en = priv->pause_tx;
 736                } else {
 737                        rx_pause_en = 0;
 738                        tx_pause_en = 0;
 739                }
 740
 741                bcm_enet_set_flow(priv, rx_pause_en, tx_pause_en);
 742                status_changed = 1;
 743                priv->old_pause = phydev->pause;
 744        }
 745
 746        if (status_changed) {
 747                pr_info("%s: link %s", dev->name, phydev->link ?
 748                        "UP" : "DOWN");
 749                if (phydev->link)
 750                        pr_cont(" - %d/%s - flow control %s", phydev->speed,
 751                               DUPLEX_FULL == phydev->duplex ? "full" : "half",
 752                               phydev->pause == 1 ? "rx&tx" : "off");
 753
 754                pr_cont("\n");
 755        }
 756}
 757
 758/*
 759 * link changed callback (if phylib is not used)
 760 */
 761static void bcm_enet_adjust_link(struct net_device *dev)
 762{
 763        struct bcm_enet_priv *priv;
 764
 765        priv = netdev_priv(dev);
 766        bcm_enet_set_duplex(priv, priv->force_duplex_full);
 767        bcm_enet_set_flow(priv, priv->pause_rx, priv->pause_tx);
 768        netif_carrier_on(dev);
 769
 770        pr_info("%s: link forced UP - %d/%s - flow control %s/%s\n",
 771                dev->name,
 772                priv->force_speed_100 ? 100 : 10,
 773                priv->force_duplex_full ? "full" : "half",
 774                priv->pause_rx ? "rx" : "off",
 775                priv->pause_tx ? "tx" : "off");
 776}
 777
 778/*
 779 * open callback, allocate dma rings & buffers and start rx operation
 780 */
 781static int bcm_enet_open(struct net_device *dev)
 782{
 783        struct bcm_enet_priv *priv;
 784        struct sockaddr addr;
 785        struct device *kdev;
 786        struct phy_device *phydev;
 787        int i, ret;
 788        unsigned int size;
 789        char phy_id[MII_BUS_ID_SIZE + 3];
 790        void *p;
 791        u32 val;
 792
 793        priv = netdev_priv(dev);
 794        kdev = &priv->pdev->dev;
 795
 796        if (priv->has_phy) {
 797                /* connect to PHY */
 798                snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
 799                         priv->mac_id ? "1" : "0", priv->phy_id);
 800
 801                phydev = phy_connect(dev, phy_id, bcm_enet_adjust_phy_link, 0,
 802                                     PHY_INTERFACE_MODE_MII);
 803
 804                if (IS_ERR(phydev)) {
 805                        dev_err(kdev, "could not attach to PHY\n");
 806                        return PTR_ERR(phydev);
 807                }
 808
 809                /* mask with MAC supported features */
 810                phydev->supported &= (SUPPORTED_10baseT_Half |
 811                                      SUPPORTED_10baseT_Full |
 812                                      SUPPORTED_100baseT_Half |
 813                                      SUPPORTED_100baseT_Full |
 814                                      SUPPORTED_Autoneg |
 815                                      SUPPORTED_Pause |
 816                                      SUPPORTED_MII);
 817                phydev->advertising = phydev->supported;
 818
 819                if (priv->pause_auto && priv->pause_rx && priv->pause_tx)
 820                        phydev->advertising |= SUPPORTED_Pause;
 821                else
 822                        phydev->advertising &= ~SUPPORTED_Pause;
 823
 824                dev_info(kdev, "attached PHY at address %d [%s]\n",
 825                         phydev->addr, phydev->drv->name);
 826
 827                priv->old_link = 0;
 828                priv->old_duplex = -1;
 829                priv->old_pause = -1;
 830                priv->phydev = phydev;
 831        }
 832
 833        /* mask all interrupts and request them */
 834        enet_writel(priv, 0, ENET_IRMASK_REG);
 835        enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan));
 836        enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan));
 837
 838        ret = request_irq(dev->irq, bcm_enet_isr_mac, 0, dev->name, dev);
 839        if (ret)
 840                goto out_phy_disconnect;
 841
 842        ret = request_irq(priv->irq_rx, bcm_enet_isr_dma,
 843                          IRQF_SAMPLE_RANDOM | IRQF_DISABLED, dev->name, dev);
 844        if (ret)
 845                goto out_freeirq;
 846
 847        ret = request_irq(priv->irq_tx, bcm_enet_isr_dma,
 848                          IRQF_DISABLED, dev->name, dev);
 849        if (ret)
 850                goto out_freeirq_rx;
 851
 852        /* initialize perfect match registers */
 853        for (i = 0; i < 4; i++) {
 854                enet_writel(priv, 0, ENET_PML_REG(i));
 855                enet_writel(priv, 0, ENET_PMH_REG(i));
 856        }
 857
 858        /* write device mac address */
 859        memcpy(addr.sa_data, dev->dev_addr, ETH_ALEN);
 860        bcm_enet_set_mac_address(dev, &addr);
 861
 862        /* allocate rx dma ring */
 863        size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
 864        p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
 865        if (!p) {
 866                dev_err(kdev, "cannot allocate rx ring %u\n", size);
 867                ret = -ENOMEM;
 868                goto out_freeirq_tx;
 869        }
 870
 871        memset(p, 0, size);
 872        priv->rx_desc_alloc_size = size;
 873        priv->rx_desc_cpu = p;
 874
 875        /* allocate tx dma ring */
 876        size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
 877        p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
 878        if (!p) {
 879                dev_err(kdev, "cannot allocate tx ring\n");
 880                ret = -ENOMEM;
 881                goto out_free_rx_ring;
 882        }
 883
 884        memset(p, 0, size);
 885        priv->tx_desc_alloc_size = size;
 886        priv->tx_desc_cpu = p;
 887
 888        priv->tx_skb = kzalloc(sizeof(struct sk_buff *) * priv->tx_ring_size,
 889                               GFP_KERNEL);
 890        if (!priv->tx_skb) {
 891                dev_err(kdev, "cannot allocate rx skb queue\n");
 892                ret = -ENOMEM;
 893                goto out_free_tx_ring;
 894        }
 895
 896        priv->tx_desc_count = priv->tx_ring_size;
 897        priv->tx_dirty_desc = 0;
 898        priv->tx_curr_desc = 0;
 899        spin_lock_init(&priv->tx_lock);
 900
 901        /* init & fill rx ring with skbs */
 902        priv->rx_skb = kzalloc(sizeof(struct sk_buff *) * priv->rx_ring_size,
 903                               GFP_KERNEL);
 904        if (!priv->rx_skb) {
 905                dev_err(kdev, "cannot allocate rx skb queue\n");
 906                ret = -ENOMEM;
 907                goto out_free_tx_skb;
 908        }
 909
 910        priv->rx_desc_count = 0;
 911        priv->rx_dirty_desc = 0;
 912        priv->rx_curr_desc = 0;
 913
 914        /* initialize flow control buffer allocation */
 915        enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
 916                        ENETDMA_BUFALLOC_REG(priv->rx_chan));
 917
 918        if (bcm_enet_refill_rx(dev)) {
 919                dev_err(kdev, "cannot allocate rx skb queue\n");
 920                ret = -ENOMEM;
 921                goto out;
 922        }
 923
 924        /* write rx & tx ring addresses */
 925        enet_dma_writel(priv, priv->rx_desc_dma,
 926                        ENETDMA_RSTART_REG(priv->rx_chan));
 927        enet_dma_writel(priv, priv->tx_desc_dma,
 928                        ENETDMA_RSTART_REG(priv->tx_chan));
 929
 930        /* clear remaining state ram for rx & tx channel */
 931        enet_dma_writel(priv, 0, ENETDMA_SRAM2_REG(priv->rx_chan));
 932        enet_dma_writel(priv, 0, ENETDMA_SRAM2_REG(priv->tx_chan));
 933        enet_dma_writel(priv, 0, ENETDMA_SRAM3_REG(priv->rx_chan));
 934        enet_dma_writel(priv, 0, ENETDMA_SRAM3_REG(priv->tx_chan));
 935        enet_dma_writel(priv, 0, ENETDMA_SRAM4_REG(priv->rx_chan));
 936        enet_dma_writel(priv, 0, ENETDMA_SRAM4_REG(priv->tx_chan));
 937
 938        /* set max rx/tx length */
 939        enet_writel(priv, priv->hw_mtu, ENET_RXMAXLEN_REG);
 940        enet_writel(priv, priv->hw_mtu, ENET_TXMAXLEN_REG);
 941
 942        /* set dma maximum burst len */
 943        enet_dma_writel(priv, BCMENET_DMA_MAXBURST,
 944                        ENETDMA_MAXBURST_REG(priv->rx_chan));
 945        enet_dma_writel(priv, BCMENET_DMA_MAXBURST,
 946                        ENETDMA_MAXBURST_REG(priv->tx_chan));
 947
 948        /* set correct transmit fifo watermark */
 949        enet_writel(priv, BCMENET_TX_FIFO_TRESH, ENET_TXWMARK_REG);
 950
 951        /* set flow control low/high threshold to 1/3 / 2/3 */
 952        val = priv->rx_ring_size / 3;
 953        enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan));
 954        val = (priv->rx_ring_size * 2) / 3;
 955        enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan));
 956
 957        /* all set, enable mac and interrupts, start dma engine and
 958         * kick rx dma channel */
 959        wmb();
 960        val = enet_readl(priv, ENET_CTL_REG);
 961        val |= ENET_CTL_ENABLE_MASK;
 962        enet_writel(priv, val, ENET_CTL_REG);
 963        enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
 964        enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK,
 965                        ENETDMA_CHANCFG_REG(priv->rx_chan));
 966
 967        /* watch "mib counters about to overflow" interrupt */
 968        enet_writel(priv, ENET_IR_MIB, ENET_IR_REG);
 969        enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG);
 970
 971        /* watch "packet transferred" interrupt in rx and tx */
 972        enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
 973                        ENETDMA_IR_REG(priv->rx_chan));
 974        enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
 975                        ENETDMA_IR_REG(priv->tx_chan));
 976
 977        /* make sure we enable napi before rx interrupt  */
 978        napi_enable(&priv->napi);
 979
 980        enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
 981                        ENETDMA_IRMASK_REG(priv->rx_chan));
 982        enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
 983                        ENETDMA_IRMASK_REG(priv->tx_chan));
 984
 985        if (priv->has_phy)
 986                phy_start(priv->phydev);
 987        else
 988                bcm_enet_adjust_link(dev);
 989
 990        netif_start_queue(dev);
 991        return 0;
 992
 993out:
 994        for (i = 0; i < priv->rx_ring_size; i++) {
 995                struct bcm_enet_desc *desc;
 996
 997                if (!priv->rx_skb[i])
 998                        continue;
 999
1000                desc = &priv->rx_desc_cpu[i];
1001                dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
1002                                 DMA_FROM_DEVICE);
1003                kfree_skb(priv->rx_skb[i]);
1004        }
1005        kfree(priv->rx_skb);
1006
1007out_free_tx_skb:
1008        kfree(priv->tx_skb);
1009
1010out_free_tx_ring:
1011        dma_free_coherent(kdev, priv->tx_desc_alloc_size,
1012                          priv->tx_desc_cpu, priv->tx_desc_dma);
1013
1014out_free_rx_ring:
1015        dma_free_coherent(kdev, priv->rx_desc_alloc_size,
1016                          priv->rx_desc_cpu, priv->rx_desc_dma);
1017
1018out_freeirq_tx:
1019        free_irq(priv->irq_tx, dev);
1020
1021out_freeirq_rx:
1022        free_irq(priv->irq_rx, dev);
1023
1024out_freeirq:
1025        free_irq(dev->irq, dev);
1026
1027out_phy_disconnect:
1028        phy_disconnect(priv->phydev);
1029
1030        return ret;
1031}
1032
1033/*
1034 * disable mac
1035 */
1036static void bcm_enet_disable_mac(struct bcm_enet_priv *priv)
1037{
1038        int limit;
1039        u32 val;
1040
1041        val = enet_readl(priv, ENET_CTL_REG);
1042        val |= ENET_CTL_DISABLE_MASK;
1043        enet_writel(priv, val, ENET_CTL_REG);
1044
1045        limit = 1000;
1046        do {
1047                u32 val;
1048
1049                val = enet_readl(priv, ENET_CTL_REG);
1050                if (!(val & ENET_CTL_DISABLE_MASK))
1051                        break;
1052                udelay(1);
1053        } while (limit--);
1054}
1055
1056/*
1057 * disable dma in given channel
1058 */
1059static void bcm_enet_disable_dma(struct bcm_enet_priv *priv, int chan)
1060{
1061        int limit;
1062
1063        enet_dma_writel(priv, 0, ENETDMA_CHANCFG_REG(chan));
1064
1065        limit = 1000;
1066        do {
1067                u32 val;
1068
1069                val = enet_dma_readl(priv, ENETDMA_CHANCFG_REG(chan));
1070                if (!(val & ENETDMA_CHANCFG_EN_MASK))
1071                        break;
1072                udelay(1);
1073        } while (limit--);
1074}
1075
1076/*
1077 * stop callback
1078 */
1079static int bcm_enet_stop(struct net_device *dev)
1080{
1081        struct bcm_enet_priv *priv;
1082        struct device *kdev;
1083        int i;
1084
1085        priv = netdev_priv(dev);
1086        kdev = &priv->pdev->dev;
1087
1088        netif_stop_queue(dev);
1089        napi_disable(&priv->napi);
1090        if (priv->has_phy)
1091                phy_stop(priv->phydev);
1092        del_timer_sync(&priv->rx_timeout);
1093
1094        /* mask all interrupts */
1095        enet_writel(priv, 0, ENET_IRMASK_REG);
1096        enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan));
1097        enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan));
1098
1099        /* make sure no mib update is scheduled */
1100        cancel_work_sync(&priv->mib_update_task);
1101
1102        /* disable dma & mac */
1103        bcm_enet_disable_dma(priv, priv->tx_chan);
1104        bcm_enet_disable_dma(priv, priv->rx_chan);
1105        bcm_enet_disable_mac(priv);
1106
1107        /* force reclaim of all tx buffers */
1108        bcm_enet_tx_reclaim(dev, 1);
1109
1110        /* free the rx skb ring */
1111        for (i = 0; i < priv->rx_ring_size; i++) {
1112                struct bcm_enet_desc *desc;
1113
1114                if (!priv->rx_skb[i])
1115                        continue;
1116
1117                desc = &priv->rx_desc_cpu[i];
1118                dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
1119                                 DMA_FROM_DEVICE);
1120                kfree_skb(priv->rx_skb[i]);
1121        }
1122
1123        /* free remaining allocated memory */
1124        kfree(priv->rx_skb);
1125        kfree(priv->tx_skb);
1126        dma_free_coherent(kdev, priv->rx_desc_alloc_size,
1127                          priv->rx_desc_cpu, priv->rx_desc_dma);
1128        dma_free_coherent(kdev, priv->tx_desc_alloc_size,
1129                          priv->tx_desc_cpu, priv->tx_desc_dma);
1130        free_irq(priv->irq_tx, dev);
1131        free_irq(priv->irq_rx, dev);
1132        free_irq(dev->irq, dev);
1133
1134        /* release phy */
1135        if (priv->has_phy) {
1136                phy_disconnect(priv->phydev);
1137                priv->phydev = NULL;
1138        }
1139
1140        return 0;
1141}
1142
1143/*
1144 * ethtool callbacks
1145 */
1146struct bcm_enet_stats {
1147        char stat_string[ETH_GSTRING_LEN];
1148        int sizeof_stat;
1149        int stat_offset;
1150        int mib_reg;
1151};
1152
1153#define GEN_STAT(m) sizeof(((struct bcm_enet_priv *)0)->m),             \
1154                     offsetof(struct bcm_enet_priv, m)
1155#define DEV_STAT(m) sizeof(((struct net_device_stats *)0)->m),          \
1156                     offsetof(struct net_device_stats, m)
1157
1158static const struct bcm_enet_stats bcm_enet_gstrings_stats[] = {
1159        { "rx_packets", DEV_STAT(rx_packets), -1 },
1160        { "tx_packets", DEV_STAT(tx_packets), -1 },
1161        { "rx_bytes", DEV_STAT(rx_bytes), -1 },
1162        { "tx_bytes", DEV_STAT(tx_bytes), -1 },
1163        { "rx_errors", DEV_STAT(rx_errors), -1 },
1164        { "tx_errors", DEV_STAT(tx_errors), -1 },
1165        { "rx_dropped", DEV_STAT(rx_dropped), -1 },
1166        { "tx_dropped", DEV_STAT(tx_dropped), -1 },
1167
1168        { "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETH_MIB_RX_GD_OCTETS},
1169        { "rx_good_pkts", GEN_STAT(mib.rx_gd_pkts), ETH_MIB_RX_GD_PKTS },
1170        { "rx_broadcast", GEN_STAT(mib.rx_brdcast), ETH_MIB_RX_BRDCAST },
1171        { "rx_multicast", GEN_STAT(mib.rx_mult), ETH_MIB_RX_MULT },
1172        { "rx_64_octets", GEN_STAT(mib.rx_64), ETH_MIB_RX_64 },
1173        { "rx_65_127_oct", GEN_STAT(mib.rx_65_127), ETH_MIB_RX_65_127 },
1174        { "rx_128_255_oct", GEN_STAT(mib.rx_128_255), ETH_MIB_RX_128_255 },
1175        { "rx_256_511_oct", GEN_STAT(mib.rx_256_511), ETH_MIB_RX_256_511 },
1176        { "rx_512_1023_oct", GEN_STAT(mib.rx_512_1023), ETH_MIB_RX_512_1023 },
1177        { "rx_1024_max_oct", GEN_STAT(mib.rx_1024_max), ETH_MIB_RX_1024_MAX },
1178        { "rx_jabber", GEN_STAT(mib.rx_jab), ETH_MIB_RX_JAB },
1179        { "rx_oversize", GEN_STAT(mib.rx_ovr), ETH_MIB_RX_OVR },
1180        { "rx_fragment", GEN_STAT(mib.rx_frag), ETH_MIB_RX_FRAG },
1181        { "rx_dropped", GEN_STAT(mib.rx_drop), ETH_MIB_RX_DROP },
1182        { "rx_crc_align", GEN_STAT(mib.rx_crc_align), ETH_MIB_RX_CRC_ALIGN },
1183        { "rx_undersize", GEN_STAT(mib.rx_und), ETH_MIB_RX_UND },
1184        { "rx_crc", GEN_STAT(mib.rx_crc), ETH_MIB_RX_CRC },
1185        { "rx_align", GEN_STAT(mib.rx_align), ETH_MIB_RX_ALIGN },
1186        { "rx_symbol_error", GEN_STAT(mib.rx_sym), ETH_MIB_RX_SYM },
1187        { "rx_pause", GEN_STAT(mib.rx_pause), ETH_MIB_RX_PAUSE },
1188        { "rx_control", GEN_STAT(mib.rx_cntrl), ETH_MIB_RX_CNTRL },
1189
1190        { "tx_good_octets", GEN_STAT(mib.tx_gd_octets), ETH_MIB_TX_GD_OCTETS },
1191        { "tx_good_pkts", GEN_STAT(mib.tx_gd_pkts), ETH_MIB_TX_GD_PKTS },
1192        { "tx_broadcast", GEN_STAT(mib.tx_brdcast), ETH_MIB_TX_BRDCAST },
1193        { "tx_multicast", GEN_STAT(mib.tx_mult), ETH_MIB_TX_MULT },
1194        { "tx_64_oct", GEN_STAT(mib.tx_64), ETH_MIB_TX_64 },
1195        { "tx_65_127_oct", GEN_STAT(mib.tx_65_127), ETH_MIB_TX_65_127 },
1196        { "tx_128_255_oct", GEN_STAT(mib.tx_128_255), ETH_MIB_TX_128_255 },
1197        { "tx_256_511_oct", GEN_STAT(mib.tx_256_511), ETH_MIB_TX_256_511 },
1198        { "tx_512_1023_oct", GEN_STAT(mib.tx_512_1023), ETH_MIB_TX_512_1023},
1199        { "tx_1024_max_oct", GEN_STAT(mib.tx_1024_max), ETH_MIB_TX_1024_MAX },
1200        { "tx_jabber", GEN_STAT(mib.tx_jab), ETH_MIB_TX_JAB },
1201        { "tx_oversize", GEN_STAT(mib.tx_ovr), ETH_MIB_TX_OVR },
1202        { "tx_fragment", GEN_STAT(mib.tx_frag), ETH_MIB_TX_FRAG },
1203        { "tx_underrun", GEN_STAT(mib.tx_underrun), ETH_MIB_TX_UNDERRUN },
1204        { "tx_collisions", GEN_STAT(mib.tx_col), ETH_MIB_TX_COL },
1205        { "tx_single_collision", GEN_STAT(mib.tx_1_col), ETH_MIB_TX_1_COL },
1206        { "tx_multiple_collision", GEN_STAT(mib.tx_m_col), ETH_MIB_TX_M_COL },
1207        { "tx_excess_collision", GEN_STAT(mib.tx_ex_col), ETH_MIB_TX_EX_COL },
1208        { "tx_late_collision", GEN_STAT(mib.tx_late), ETH_MIB_TX_LATE },
1209        { "tx_deferred", GEN_STAT(mib.tx_def), ETH_MIB_TX_DEF },
1210        { "tx_carrier_sense", GEN_STAT(mib.tx_crs), ETH_MIB_TX_CRS },
1211        { "tx_pause", GEN_STAT(mib.tx_pause), ETH_MIB_TX_PAUSE },
1212
1213};
1214
1215#define BCM_ENET_STATS_LEN      \
1216        (sizeof(bcm_enet_gstrings_stats) / sizeof(struct bcm_enet_stats))
1217
1218static const u32 unused_mib_regs[] = {
1219        ETH_MIB_TX_ALL_OCTETS,
1220        ETH_MIB_TX_ALL_PKTS,
1221        ETH_MIB_RX_ALL_OCTETS,
1222        ETH_MIB_RX_ALL_PKTS,
1223};
1224
1225
1226static void bcm_enet_get_drvinfo(struct net_device *netdev,
1227                                 struct ethtool_drvinfo *drvinfo)
1228{
1229        strncpy(drvinfo->driver, bcm_enet_driver_name, 32);
1230        strncpy(drvinfo->version, bcm_enet_driver_version, 32);
1231        strncpy(drvinfo->fw_version, "N/A", 32);
1232        strncpy(drvinfo->bus_info, "bcm63xx", 32);
1233        drvinfo->n_stats = BCM_ENET_STATS_LEN;
1234}
1235
1236static int bcm_enet_get_sset_count(struct net_device *netdev,
1237                                        int string_set)
1238{
1239        switch (string_set) {
1240        case ETH_SS_STATS:
1241                return BCM_ENET_STATS_LEN;
1242        default:
1243                return -EINVAL;
1244        }
1245}
1246
1247static void bcm_enet_get_strings(struct net_device *netdev,
1248                                 u32 stringset, u8 *data)
1249{
1250        int i;
1251
1252        switch (stringset) {
1253        case ETH_SS_STATS:
1254                for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
1255                        memcpy(data + i * ETH_GSTRING_LEN,
1256                               bcm_enet_gstrings_stats[i].stat_string,
1257                               ETH_GSTRING_LEN);
1258                }
1259                break;
1260        }
1261}
1262
1263static void update_mib_counters(struct bcm_enet_priv *priv)
1264{
1265        int i;
1266
1267        for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
1268                const struct bcm_enet_stats *s;
1269                u32 val;
1270                char *p;
1271
1272                s = &bcm_enet_gstrings_stats[i];
1273                if (s->mib_reg == -1)
1274                        continue;
1275
1276                val = enet_readl(priv, ENET_MIB_REG(s->mib_reg));
1277                p = (char *)priv + s->stat_offset;
1278
1279                if (s->sizeof_stat == sizeof(u64))
1280                        *(u64 *)p += val;
1281                else
1282                        *(u32 *)p += val;
1283        }
1284
1285        /* also empty unused mib counters to make sure mib counter
1286         * overflow interrupt is cleared */
1287        for (i = 0; i < ARRAY_SIZE(unused_mib_regs); i++)
1288                (void)enet_readl(priv, ENET_MIB_REG(unused_mib_regs[i]));
1289}
1290
1291static void bcm_enet_update_mib_counters_defer(struct work_struct *t)
1292{
1293        struct bcm_enet_priv *priv;
1294
1295        priv = container_of(t, struct bcm_enet_priv, mib_update_task);
1296        mutex_lock(&priv->mib_update_lock);
1297        update_mib_counters(priv);
1298        mutex_unlock(&priv->mib_update_lock);
1299
1300        /* reenable mib interrupt */
1301        if (netif_running(priv->net_dev))
1302                enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG);
1303}
1304
1305static void bcm_enet_get_ethtool_stats(struct net_device *netdev,
1306                                       struct ethtool_stats *stats,
1307                                       u64 *data)
1308{
1309        struct bcm_enet_priv *priv;
1310        int i;
1311
1312        priv = netdev_priv(netdev);
1313
1314        mutex_lock(&priv->mib_update_lock);
1315        update_mib_counters(priv);
1316
1317        for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
1318                const struct bcm_enet_stats *s;
1319                char *p;
1320
1321                s = &bcm_enet_gstrings_stats[i];
1322                if (s->mib_reg == -1)
1323                        p = (char *)&netdev->stats;
1324                else
1325                        p = (char *)priv;
1326                p += s->stat_offset;
1327                data[i] = (s->sizeof_stat == sizeof(u64)) ?
1328                        *(u64 *)p : *(u32 *)p;
1329        }
1330        mutex_unlock(&priv->mib_update_lock);
1331}
1332
1333static int bcm_enet_get_settings(struct net_device *dev,
1334                                 struct ethtool_cmd *cmd)
1335{
1336        struct bcm_enet_priv *priv;
1337
1338        priv = netdev_priv(dev);
1339
1340        cmd->maxrxpkt = 0;
1341        cmd->maxtxpkt = 0;
1342
1343        if (priv->has_phy) {
1344                if (!priv->phydev)
1345                        return -ENODEV;
1346                return phy_ethtool_gset(priv->phydev, cmd);
1347        } else {
1348                cmd->autoneg = 0;
1349                cmd->speed = (priv->force_speed_100) ? SPEED_100 : SPEED_10;
1350                cmd->duplex = (priv->force_duplex_full) ?
1351                        DUPLEX_FULL : DUPLEX_HALF;
1352                cmd->supported = ADVERTISED_10baseT_Half  |
1353                        ADVERTISED_10baseT_Full |
1354                        ADVERTISED_100baseT_Half |
1355                        ADVERTISED_100baseT_Full;
1356                cmd->advertising = 0;
1357                cmd->port = PORT_MII;
1358                cmd->transceiver = XCVR_EXTERNAL;
1359        }
1360        return 0;
1361}
1362
1363static int bcm_enet_set_settings(struct net_device *dev,
1364                                 struct ethtool_cmd *cmd)
1365{
1366        struct bcm_enet_priv *priv;
1367
1368        priv = netdev_priv(dev);
1369        if (priv->has_phy) {
1370                if (!priv->phydev)
1371                        return -ENODEV;
1372                return phy_ethtool_sset(priv->phydev, cmd);
1373        } else {
1374
1375                if (cmd->autoneg ||
1376                    (cmd->speed != SPEED_100 && cmd->speed != SPEED_10) ||
1377                    cmd->port != PORT_MII)
1378                        return -EINVAL;
1379
1380                priv->force_speed_100 = (cmd->speed == SPEED_100) ? 1 : 0;
1381                priv->force_duplex_full = (cmd->duplex == DUPLEX_FULL) ? 1 : 0;
1382
1383                if (netif_running(dev))
1384                        bcm_enet_adjust_link(dev);
1385                return 0;
1386        }
1387}
1388
1389static void bcm_enet_get_ringparam(struct net_device *dev,
1390                                   struct ethtool_ringparam *ering)
1391{
1392        struct bcm_enet_priv *priv;
1393
1394        priv = netdev_priv(dev);
1395
1396        /* rx/tx ring is actually only limited by memory */
1397        ering->rx_max_pending = 8192;
1398        ering->tx_max_pending = 8192;
1399        ering->rx_mini_max_pending = 0;
1400        ering->rx_jumbo_max_pending = 0;
1401        ering->rx_pending = priv->rx_ring_size;
1402        ering->tx_pending = priv->tx_ring_size;
1403}
1404
1405static int bcm_enet_set_ringparam(struct net_device *dev,
1406                                  struct ethtool_ringparam *ering)
1407{
1408        struct bcm_enet_priv *priv;
1409        int was_running;
1410
1411        priv = netdev_priv(dev);
1412
1413        was_running = 0;
1414        if (netif_running(dev)) {
1415                bcm_enet_stop(dev);
1416                was_running = 1;
1417        }
1418
1419        priv->rx_ring_size = ering->rx_pending;
1420        priv->tx_ring_size = ering->tx_pending;
1421
1422        if (was_running) {
1423                int err;
1424
1425                err = bcm_enet_open(dev);
1426                if (err)
1427                        dev_close(dev);
1428                else
1429                        bcm_enet_set_multicast_list(dev);
1430        }
1431        return 0;
1432}
1433
1434static void bcm_enet_get_pauseparam(struct net_device *dev,
1435                                    struct ethtool_pauseparam *ecmd)
1436{
1437        struct bcm_enet_priv *priv;
1438
1439        priv = netdev_priv(dev);
1440        ecmd->autoneg = priv->pause_auto;
1441        ecmd->rx_pause = priv->pause_rx;
1442        ecmd->tx_pause = priv->pause_tx;
1443}
1444
1445static int bcm_enet_set_pauseparam(struct net_device *dev,
1446                                   struct ethtool_pauseparam *ecmd)
1447{
1448        struct bcm_enet_priv *priv;
1449
1450        priv = netdev_priv(dev);
1451
1452        if (priv->has_phy) {
1453                if (ecmd->autoneg && (ecmd->rx_pause != ecmd->tx_pause)) {
1454                        /* asymetric pause mode not supported,
1455                         * actually possible but integrated PHY has RO
1456                         * asym_pause bit */
1457                        return -EINVAL;
1458                }
1459        } else {
1460                /* no pause autoneg on direct mii connection */
1461                if (ecmd->autoneg)
1462                        return -EINVAL;
1463        }
1464
1465        priv->pause_auto = ecmd->autoneg;
1466        priv->pause_rx = ecmd->rx_pause;
1467        priv->pause_tx = ecmd->tx_pause;
1468
1469        return 0;
1470}
1471
1472static struct ethtool_ops bcm_enet_ethtool_ops = {
1473        .get_strings            = bcm_enet_get_strings,
1474        .get_sset_count         = bcm_enet_get_sset_count,
1475        .get_ethtool_stats      = bcm_enet_get_ethtool_stats,
1476        .get_settings           = bcm_enet_get_settings,
1477        .set_settings           = bcm_enet_set_settings,
1478        .get_drvinfo            = bcm_enet_get_drvinfo,
1479        .get_link               = ethtool_op_get_link,
1480        .get_ringparam          = bcm_enet_get_ringparam,
1481        .set_ringparam          = bcm_enet_set_ringparam,
1482        .get_pauseparam         = bcm_enet_get_pauseparam,
1483        .set_pauseparam         = bcm_enet_set_pauseparam,
1484};
1485
1486static int bcm_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1487{
1488        struct bcm_enet_priv *priv;
1489
1490        priv = netdev_priv(dev);
1491        if (priv->has_phy) {
1492                if (!priv->phydev)
1493                        return -ENODEV;
1494                return phy_mii_ioctl(priv->phydev, rq, cmd);
1495        } else {
1496                struct mii_if_info mii;
1497
1498                mii.dev = dev;
1499                mii.mdio_read = bcm_enet_mdio_read_mii;
1500                mii.mdio_write = bcm_enet_mdio_write_mii;
1501                mii.phy_id = 0;
1502                mii.phy_id_mask = 0x3f;
1503                mii.reg_num_mask = 0x1f;
1504                return generic_mii_ioctl(&mii, if_mii(rq), cmd, NULL);
1505        }
1506}
1507
1508/*
1509 * calculate actual hardware mtu
1510 */
1511static int compute_hw_mtu(struct bcm_enet_priv *priv, int mtu)
1512{
1513        int actual_mtu;
1514
1515        actual_mtu = mtu;
1516
1517        /* add ethernet header + vlan tag size */
1518        actual_mtu += VLAN_ETH_HLEN;
1519
1520        if (actual_mtu < 64 || actual_mtu > BCMENET_MAX_MTU)
1521                return -EINVAL;
1522
1523        /*
1524         * setup maximum size before we get overflow mark in
1525         * descriptor, note that this will not prevent reception of
1526         * big frames, they will be split into multiple buffers
1527         * anyway
1528         */
1529        priv->hw_mtu = actual_mtu;
1530
1531        /*
1532         * align rx buffer size to dma burst len, account FCS since
1533         * it's appended
1534         */
1535        priv->rx_skb_size = ALIGN(actual_mtu + ETH_FCS_LEN,
1536                                  BCMENET_DMA_MAXBURST * 4);
1537        return 0;
1538}
1539
1540/*
1541 * adjust mtu, can't be called while device is running
1542 */
1543static int bcm_enet_change_mtu(struct net_device *dev, int new_mtu)
1544{
1545        int ret;
1546
1547        if (netif_running(dev))
1548                return -EBUSY;
1549
1550        ret = compute_hw_mtu(netdev_priv(dev), new_mtu);
1551        if (ret)
1552                return ret;
1553        dev->mtu = new_mtu;
1554        return 0;
1555}
1556
1557/*
1558 * preinit hardware to allow mii operation while device is down
1559 */
1560static void bcm_enet_hw_preinit(struct bcm_enet_priv *priv)
1561{
1562        u32 val;
1563        int limit;
1564
1565        /* make sure mac is disabled */
1566        bcm_enet_disable_mac(priv);
1567
1568        /* soft reset mac */
1569        val = ENET_CTL_SRESET_MASK;
1570        enet_writel(priv, val, ENET_CTL_REG);
1571        wmb();
1572
1573        limit = 1000;
1574        do {
1575                val = enet_readl(priv, ENET_CTL_REG);
1576                if (!(val & ENET_CTL_SRESET_MASK))
1577                        break;
1578                udelay(1);
1579        } while (limit--);
1580
1581        /* select correct mii interface */
1582        val = enet_readl(priv, ENET_CTL_REG);
1583        if (priv->use_external_mii)
1584                val |= ENET_CTL_EPHYSEL_MASK;
1585        else
1586                val &= ~ENET_CTL_EPHYSEL_MASK;
1587        enet_writel(priv, val, ENET_CTL_REG);
1588
1589        /* turn on mdc clock */
1590        enet_writel(priv, (0x1f << ENET_MIISC_MDCFREQDIV_SHIFT) |
1591                    ENET_MIISC_PREAMBLEEN_MASK, ENET_MIISC_REG);
1592
1593        /* set mib counters to self-clear when read */
1594        val = enet_readl(priv, ENET_MIBCTL_REG);
1595        val |= ENET_MIBCTL_RDCLEAR_MASK;
1596        enet_writel(priv, val, ENET_MIBCTL_REG);
1597}
1598
1599static const struct net_device_ops bcm_enet_ops = {
1600        .ndo_open               = bcm_enet_open,
1601        .ndo_stop               = bcm_enet_stop,
1602        .ndo_start_xmit         = bcm_enet_start_xmit,
1603        .ndo_set_mac_address    = bcm_enet_set_mac_address,
1604        .ndo_set_multicast_list = bcm_enet_set_multicast_list,
1605        .ndo_do_ioctl           = bcm_enet_ioctl,
1606        .ndo_change_mtu         = bcm_enet_change_mtu,
1607#ifdef CONFIG_NET_POLL_CONTROLLER
1608        .ndo_poll_controller = bcm_enet_netpoll,
1609#endif
1610};
1611
1612/*
1613 * allocate netdevice, request register memory and register device.
1614 */
1615static int __devinit bcm_enet_probe(struct platform_device *pdev)
1616{
1617        struct bcm_enet_priv *priv;
1618        struct net_device *dev;
1619        struct bcm63xx_enet_platform_data *pd;
1620        struct resource *res_mem, *res_irq, *res_irq_rx, *res_irq_tx;
1621        struct mii_bus *bus;
1622        const char *clk_name;
1623        unsigned int iomem_size;
1624        int i, ret;
1625
1626        /* stop if shared driver failed, assume driver->probe will be
1627         * called in the same order we register devices (correct ?) */
1628        if (!bcm_enet_shared_base)
1629                return -ENODEV;
1630
1631        res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1632        res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1633        res_irq_rx = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
1634        res_irq_tx = platform_get_resource(pdev, IORESOURCE_IRQ, 2);
1635        if (!res_mem || !res_irq || !res_irq_rx || !res_irq_tx)
1636                return -ENODEV;
1637
1638        ret = 0;
1639        dev = alloc_etherdev(sizeof(*priv));
1640        if (!dev)
1641                return -ENOMEM;
1642        priv = netdev_priv(dev);
1643
1644        ret = compute_hw_mtu(priv, dev->mtu);
1645        if (ret)
1646                goto out;
1647
1648        iomem_size = res_mem->end - res_mem->start + 1;
1649        if (!request_mem_region(res_mem->start, iomem_size, "bcm63xx_enet")) {
1650                ret = -EBUSY;
1651                goto out;
1652        }
1653
1654        priv->base = ioremap(res_mem->start, iomem_size);
1655        if (priv->base == NULL) {
1656                ret = -ENOMEM;
1657                goto out_release_mem;
1658        }
1659        dev->irq = priv->irq = res_irq->start;
1660        priv->irq_rx = res_irq_rx->start;
1661        priv->irq_tx = res_irq_tx->start;
1662        priv->mac_id = pdev->id;
1663
1664        /* get rx & tx dma channel id for this mac */
1665        if (priv->mac_id == 0) {
1666                priv->rx_chan = 0;
1667                priv->tx_chan = 1;
1668                clk_name = "enet0";
1669        } else {
1670                priv->rx_chan = 2;
1671                priv->tx_chan = 3;
1672                clk_name = "enet1";
1673        }
1674
1675        priv->mac_clk = clk_get(&pdev->dev, clk_name);
1676        if (IS_ERR(priv->mac_clk)) {
1677                ret = PTR_ERR(priv->mac_clk);
1678                goto out_unmap;
1679        }
1680        clk_enable(priv->mac_clk);
1681
1682        /* initialize default and fetch platform data */
1683        priv->rx_ring_size = BCMENET_DEF_RX_DESC;
1684        priv->tx_ring_size = BCMENET_DEF_TX_DESC;
1685
1686        pd = pdev->dev.platform_data;
1687        if (pd) {
1688                memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
1689                priv->has_phy = pd->has_phy;
1690                priv->phy_id = pd->phy_id;
1691                priv->has_phy_interrupt = pd->has_phy_interrupt;
1692                priv->phy_interrupt = pd->phy_interrupt;
1693                priv->use_external_mii = !pd->use_internal_phy;
1694                priv->pause_auto = pd->pause_auto;
1695                priv->pause_rx = pd->pause_rx;
1696                priv->pause_tx = pd->pause_tx;
1697                priv->force_duplex_full = pd->force_duplex_full;
1698                priv->force_speed_100 = pd->force_speed_100;
1699        }
1700
1701        if (priv->mac_id == 0 && priv->has_phy && !priv->use_external_mii) {
1702                /* using internal PHY, enable clock */
1703                priv->phy_clk = clk_get(&pdev->dev, "ephy");
1704                if (IS_ERR(priv->phy_clk)) {
1705                        ret = PTR_ERR(priv->phy_clk);
1706                        priv->phy_clk = NULL;
1707                        goto out_put_clk_mac;
1708                }
1709                clk_enable(priv->phy_clk);
1710        }
1711
1712        /* do minimal hardware init to be able to probe mii bus */
1713        bcm_enet_hw_preinit(priv);
1714
1715        /* MII bus registration */
1716        if (priv->has_phy) {
1717
1718                priv->mii_bus = mdiobus_alloc();
1719                if (!priv->mii_bus) {
1720                        ret = -ENOMEM;
1721                        goto out_uninit_hw;
1722                }
1723
1724                bus = priv->mii_bus;
1725                bus->name = "bcm63xx_enet MII bus";
1726                bus->parent = &pdev->dev;
1727                bus->priv = priv;
1728                bus->read = bcm_enet_mdio_read_phylib;
1729                bus->write = bcm_enet_mdio_write_phylib;
1730                sprintf(bus->id, "%d", priv->mac_id);
1731
1732                /* only probe bus where we think the PHY is, because
1733                 * the mdio read operation return 0 instead of 0xffff
1734                 * if a slave is not present on hw */
1735                bus->phy_mask = ~(1 << priv->phy_id);
1736
1737                bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
1738                if (!bus->irq) {
1739                        ret = -ENOMEM;
1740                        goto out_free_mdio;
1741                }
1742
1743                if (priv->has_phy_interrupt)
1744                        bus->irq[priv->phy_id] = priv->phy_interrupt;
1745                else
1746                        bus->irq[priv->phy_id] = PHY_POLL;
1747
1748                ret = mdiobus_register(bus);
1749                if (ret) {
1750                        dev_err(&pdev->dev, "unable to register mdio bus\n");
1751                        goto out_free_mdio;
1752                }
1753        } else {
1754
1755                /* run platform code to initialize PHY device */
1756                if (pd->mii_config &&
1757                    pd->mii_config(dev, 1, bcm_enet_mdio_read_mii,
1758                                   bcm_enet_mdio_write_mii)) {
1759                        dev_err(&pdev->dev, "unable to configure mdio bus\n");
1760                        goto out_uninit_hw;
1761                }
1762        }
1763
1764        spin_lock_init(&priv->rx_lock);
1765
1766        /* init rx timeout (used for oom) */
1767        init_timer(&priv->rx_timeout);
1768        priv->rx_timeout.function = bcm_enet_refill_rx_timer;
1769        priv->rx_timeout.data = (unsigned long)dev;
1770
1771        /* init the mib update lock&work */
1772        mutex_init(&priv->mib_update_lock);
1773        INIT_WORK(&priv->mib_update_task, bcm_enet_update_mib_counters_defer);
1774
1775        /* zero mib counters */
1776        for (i = 0; i < ENET_MIB_REG_COUNT; i++)
1777                enet_writel(priv, 0, ENET_MIB_REG(i));
1778
1779        /* register netdevice */
1780        dev->netdev_ops = &bcm_enet_ops;
1781        netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16);
1782
1783        SET_ETHTOOL_OPS(dev, &bcm_enet_ethtool_ops);
1784        SET_NETDEV_DEV(dev, &pdev->dev);
1785
1786        ret = register_netdev(dev);
1787        if (ret)
1788                goto out_unregister_mdio;
1789
1790        netif_carrier_off(dev);
1791        platform_set_drvdata(pdev, dev);
1792        priv->pdev = pdev;
1793        priv->net_dev = dev;
1794
1795        return 0;
1796
1797out_unregister_mdio:
1798        if (priv->mii_bus) {
1799                mdiobus_unregister(priv->mii_bus);
1800                kfree(priv->mii_bus->irq);
1801        }
1802
1803out_free_mdio:
1804        if (priv->mii_bus)
1805                mdiobus_free(priv->mii_bus);
1806
1807out_uninit_hw:
1808        /* turn off mdc clock */
1809        enet_writel(priv, 0, ENET_MIISC_REG);
1810        if (priv->phy_clk) {
1811                clk_disable(priv->phy_clk);
1812                clk_put(priv->phy_clk);
1813        }
1814
1815out_put_clk_mac:
1816        clk_disable(priv->mac_clk);
1817        clk_put(priv->mac_clk);
1818
1819out_unmap:
1820        iounmap(priv->base);
1821
1822out_release_mem:
1823        release_mem_region(res_mem->start, iomem_size);
1824out:
1825        free_netdev(dev);
1826        return ret;
1827}
1828
1829
1830/*
1831 * exit func, stops hardware and unregisters netdevice
1832 */
1833static int __devexit bcm_enet_remove(struct platform_device *pdev)
1834{
1835        struct bcm_enet_priv *priv;
1836        struct net_device *dev;
1837        struct resource *res;
1838
1839        /* stop netdevice */
1840        dev = platform_get_drvdata(pdev);
1841        priv = netdev_priv(dev);
1842        unregister_netdev(dev);
1843
1844        /* turn off mdc clock */
1845        enet_writel(priv, 0, ENET_MIISC_REG);
1846
1847        if (priv->has_phy) {
1848                mdiobus_unregister(priv->mii_bus);
1849                kfree(priv->mii_bus->irq);
1850                mdiobus_free(priv->mii_bus);
1851        } else {
1852                struct bcm63xx_enet_platform_data *pd;
1853
1854                pd = pdev->dev.platform_data;
1855                if (pd && pd->mii_config)
1856                        pd->mii_config(dev, 0, bcm_enet_mdio_read_mii,
1857                                       bcm_enet_mdio_write_mii);
1858        }
1859
1860        /* release device resources */
1861        iounmap(priv->base);
1862        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1863        release_mem_region(res->start, res->end - res->start + 1);
1864
1865        /* disable hw block clocks */
1866        if (priv->phy_clk) {
1867                clk_disable(priv->phy_clk);
1868                clk_put(priv->phy_clk);
1869        }
1870        clk_disable(priv->mac_clk);
1871        clk_put(priv->mac_clk);
1872
1873        platform_set_drvdata(pdev, NULL);
1874        free_netdev(dev);
1875        return 0;
1876}
1877
1878struct platform_driver bcm63xx_enet_driver = {
1879        .probe  = bcm_enet_probe,
1880        .remove = __devexit_p(bcm_enet_remove),
1881        .driver = {
1882                .name   = "bcm63xx_enet",
1883                .owner  = THIS_MODULE,
1884        },
1885};
1886
1887/*
1888 * reserve & remap memory space shared between all macs
1889 */
1890static int __devinit bcm_enet_shared_probe(struct platform_device *pdev)
1891{
1892        struct resource *res;
1893        unsigned int iomem_size;
1894
1895        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1896        if (!res)
1897                return -ENODEV;
1898
1899        iomem_size = res->end - res->start + 1;
1900        if (!request_mem_region(res->start, iomem_size, "bcm63xx_enet_dma"))
1901                return -EBUSY;
1902
1903        bcm_enet_shared_base = ioremap(res->start, iomem_size);
1904        if (!bcm_enet_shared_base) {
1905                release_mem_region(res->start, iomem_size);
1906                return -ENOMEM;
1907        }
1908        return 0;
1909}
1910
1911static int __devexit bcm_enet_shared_remove(struct platform_device *pdev)
1912{
1913        struct resource *res;
1914
1915        iounmap(bcm_enet_shared_base);
1916        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1917        release_mem_region(res->start, res->end - res->start + 1);
1918        return 0;
1919}
1920
1921/*
1922 * this "shared" driver is needed because both macs share a single
1923 * address space
1924 */
1925struct platform_driver bcm63xx_enet_shared_driver = {
1926        .probe  = bcm_enet_shared_probe,
1927        .remove = __devexit_p(bcm_enet_shared_remove),
1928        .driver = {
1929                .name   = "bcm63xx_enet_shared",
1930                .owner  = THIS_MODULE,
1931        },
1932};
1933
1934/*
1935 * entry point
1936 */
1937static int __init bcm_enet_init(void)
1938{
1939        int ret;
1940
1941        ret = platform_driver_register(&bcm63xx_enet_shared_driver);
1942        if (ret)
1943                return ret;
1944
1945        ret = platform_driver_register(&bcm63xx_enet_driver);
1946        if (ret)
1947                platform_driver_unregister(&bcm63xx_enet_shared_driver);
1948
1949        return ret;
1950}
1951
1952static void __exit bcm_enet_exit(void)
1953{
1954        platform_driver_unregister(&bcm63xx_enet_driver);
1955        platform_driver_unregister(&bcm63xx_enet_shared_driver);
1956}
1957
1958
1959module_init(bcm_enet_init);
1960module_exit(bcm_enet_exit);
1961
1962MODULE_DESCRIPTION("BCM63xx internal ethernet mac driver");
1963MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>");
1964MODULE_LICENSE("GPL");
1965