linux/drivers/net/ethernet/broadcom/bcm63xx_enet.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Driver for BCM963xx builtin Ethernet mac
   4 *
   5 * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
   6 */
   7#include <linux/init.h>
   8#include <linux/interrupt.h>
   9#include <linux/module.h>
  10#include <linux/clk.h>
  11#include <linux/etherdevice.h>
  12#include <linux/slab.h>
  13#include <linux/delay.h>
  14#include <linux/ethtool.h>
  15#include <linux/crc32.h>
  16#include <linux/err.h>
  17#include <linux/dma-mapping.h>
  18#include <linux/platform_device.h>
  19#include <linux/if_vlan.h>
  20
  21#include <bcm63xx_dev_enet.h>
  22#include "bcm63xx_enet.h"
  23
  24static char bcm_enet_driver_name[] = "bcm63xx_enet";
  25
  26static int copybreak __read_mostly = 128;
  27module_param(copybreak, int, 0);
  28MODULE_PARM_DESC(copybreak, "Receive copy threshold");
  29
  30/* io registers memory shared between all devices */
  31static void __iomem *bcm_enet_shared_base[3];
  32
  33/*
  34 * io helpers to access mac registers
  35 */
  36static inline u32 enet_readl(struct bcm_enet_priv *priv, u32 off)
  37{
  38        return bcm_readl(priv->base + off);
  39}
  40
  41static inline void enet_writel(struct bcm_enet_priv *priv,
  42                               u32 val, u32 off)
  43{
  44        bcm_writel(val, priv->base + off);
  45}
  46
  47/*
  48 * io helpers to access switch registers
  49 */
  50static inline u32 enetsw_readl(struct bcm_enet_priv *priv, u32 off)
  51{
  52        return bcm_readl(priv->base + off);
  53}
  54
  55static inline void enetsw_writel(struct bcm_enet_priv *priv,
  56                                 u32 val, u32 off)
  57{
  58        bcm_writel(val, priv->base + off);
  59}
  60
  61static inline u16 enetsw_readw(struct bcm_enet_priv *priv, u32 off)
  62{
  63        return bcm_readw(priv->base + off);
  64}
  65
  66static inline void enetsw_writew(struct bcm_enet_priv *priv,
  67                                 u16 val, u32 off)
  68{
  69        bcm_writew(val, priv->base + off);
  70}
  71
  72static inline u8 enetsw_readb(struct bcm_enet_priv *priv, u32 off)
  73{
  74        return bcm_readb(priv->base + off);
  75}
  76
  77static inline void enetsw_writeb(struct bcm_enet_priv *priv,
  78                                 u8 val, u32 off)
  79{
  80        bcm_writeb(val, priv->base + off);
  81}
  82
  83
  84/* io helpers to access shared registers */
  85static inline u32 enet_dma_readl(struct bcm_enet_priv *priv, u32 off)
  86{
  87        return bcm_readl(bcm_enet_shared_base[0] + off);
  88}
  89
  90static inline void enet_dma_writel(struct bcm_enet_priv *priv,
  91                                       u32 val, u32 off)
  92{
  93        bcm_writel(val, bcm_enet_shared_base[0] + off);
  94}
  95
  96static inline u32 enet_dmac_readl(struct bcm_enet_priv *priv, u32 off, int chan)
  97{
  98        return bcm_readl(bcm_enet_shared_base[1] +
  99                bcm63xx_enetdmacreg(off) + chan * priv->dma_chan_width);
 100}
 101
 102static inline void enet_dmac_writel(struct bcm_enet_priv *priv,
 103                                       u32 val, u32 off, int chan)
 104{
 105        bcm_writel(val, bcm_enet_shared_base[1] +
 106                bcm63xx_enetdmacreg(off) + chan * priv->dma_chan_width);
 107}
 108
 109static inline u32 enet_dmas_readl(struct bcm_enet_priv *priv, u32 off, int chan)
 110{
 111        return bcm_readl(bcm_enet_shared_base[2] + off + chan * priv->dma_chan_width);
 112}
 113
 114static inline void enet_dmas_writel(struct bcm_enet_priv *priv,
 115                                       u32 val, u32 off, int chan)
 116{
 117        bcm_writel(val, bcm_enet_shared_base[2] + off + chan * priv->dma_chan_width);
 118}
 119
 120/*
 121 * write given data into mii register and wait for transfer to end
 122 * with timeout (average measured transfer time is 25us)
 123 */
 124static int do_mdio_op(struct bcm_enet_priv *priv, unsigned int data)
 125{
 126        int limit;
 127
 128        /* make sure mii interrupt status is cleared */
 129        enet_writel(priv, ENET_IR_MII, ENET_IR_REG);
 130
 131        enet_writel(priv, data, ENET_MIIDATA_REG);
 132        wmb();
 133
 134        /* busy wait on mii interrupt bit, with timeout */
 135        limit = 1000;
 136        do {
 137                if (enet_readl(priv, ENET_IR_REG) & ENET_IR_MII)
 138                        break;
 139                udelay(1);
 140        } while (limit-- > 0);
 141
 142        return (limit < 0) ? 1 : 0;
 143}
 144
 145/*
 146 * MII internal read callback
 147 */
 148static int bcm_enet_mdio_read(struct bcm_enet_priv *priv, int mii_id,
 149                              int regnum)
 150{
 151        u32 tmp, val;
 152
 153        tmp = regnum << ENET_MIIDATA_REG_SHIFT;
 154        tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT;
 155        tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT;
 156        tmp |= ENET_MIIDATA_OP_READ_MASK;
 157
 158        if (do_mdio_op(priv, tmp))
 159                return -1;
 160
 161        val = enet_readl(priv, ENET_MIIDATA_REG);
 162        val &= 0xffff;
 163        return val;
 164}
 165
 166/*
 167 * MII internal write callback
 168 */
 169static int bcm_enet_mdio_write(struct bcm_enet_priv *priv, int mii_id,
 170                               int regnum, u16 value)
 171{
 172        u32 tmp;
 173
 174        tmp = (value & 0xffff) << ENET_MIIDATA_DATA_SHIFT;
 175        tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT;
 176        tmp |= regnum << ENET_MIIDATA_REG_SHIFT;
 177        tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT;
 178        tmp |= ENET_MIIDATA_OP_WRITE_MASK;
 179
 180        (void)do_mdio_op(priv, tmp);
 181        return 0;
 182}
 183
 184/*
 185 * MII read callback from phylib
 186 */
 187static int bcm_enet_mdio_read_phylib(struct mii_bus *bus, int mii_id,
 188                                     int regnum)
 189{
 190        return bcm_enet_mdio_read(bus->priv, mii_id, regnum);
 191}
 192
 193/*
 194 * MII write callback from phylib
 195 */
 196static int bcm_enet_mdio_write_phylib(struct mii_bus *bus, int mii_id,
 197                                      int regnum, u16 value)
 198{
 199        return bcm_enet_mdio_write(bus->priv, mii_id, regnum, value);
 200}
 201
 202/*
 203 * MII read callback from mii core
 204 */
 205static int bcm_enet_mdio_read_mii(struct net_device *dev, int mii_id,
 206                                  int regnum)
 207{
 208        return bcm_enet_mdio_read(netdev_priv(dev), mii_id, regnum);
 209}
 210
 211/*
 212 * MII write callback from mii core
 213 */
 214static void bcm_enet_mdio_write_mii(struct net_device *dev, int mii_id,
 215                                    int regnum, int value)
 216{
 217        bcm_enet_mdio_write(netdev_priv(dev), mii_id, regnum, value);
 218}
 219
 220/*
 221 * refill rx queue
 222 */
 223static int bcm_enet_refill_rx(struct net_device *dev)
 224{
 225        struct bcm_enet_priv *priv;
 226
 227        priv = netdev_priv(dev);
 228
 229        while (priv->rx_desc_count < priv->rx_ring_size) {
 230                struct bcm_enet_desc *desc;
 231                struct sk_buff *skb;
 232                dma_addr_t p;
 233                int desc_idx;
 234                u32 len_stat;
 235
 236                desc_idx = priv->rx_dirty_desc;
 237                desc = &priv->rx_desc_cpu[desc_idx];
 238
 239                if (!priv->rx_skb[desc_idx]) {
 240                        skb = netdev_alloc_skb(dev, priv->rx_skb_size);
 241                        if (!skb)
 242                                break;
 243                        priv->rx_skb[desc_idx] = skb;
 244                        p = dma_map_single(&priv->pdev->dev, skb->data,
 245                                           priv->rx_skb_size,
 246                                           DMA_FROM_DEVICE);
 247                        desc->address = p;
 248                }
 249
 250                len_stat = priv->rx_skb_size << DMADESC_LENGTH_SHIFT;
 251                len_stat |= DMADESC_OWNER_MASK;
 252                if (priv->rx_dirty_desc == priv->rx_ring_size - 1) {
 253                        len_stat |= (DMADESC_WRAP_MASK >> priv->dma_desc_shift);
 254                        priv->rx_dirty_desc = 0;
 255                } else {
 256                        priv->rx_dirty_desc++;
 257                }
 258                wmb();
 259                desc->len_stat = len_stat;
 260
 261                priv->rx_desc_count++;
 262
 263                /* tell dma engine we allocated one buffer */
 264                if (priv->dma_has_sram)
 265                        enet_dma_writel(priv, 1, ENETDMA_BUFALLOC_REG(priv->rx_chan));
 266                else
 267                        enet_dmac_writel(priv, 1, ENETDMAC_BUFALLOC, priv->rx_chan);
 268        }
 269
 270        /* If rx ring is still empty, set a timer to try allocating
 271         * again at a later time. */
 272        if (priv->rx_desc_count == 0 && netif_running(dev)) {
 273                dev_warn(&priv->pdev->dev, "unable to refill rx ring\n");
 274                priv->rx_timeout.expires = jiffies + HZ;
 275                add_timer(&priv->rx_timeout);
 276        }
 277
 278        return 0;
 279}
 280
 281/*
 282 * timer callback to defer refill rx queue in case we're OOM
 283 */
 284static void bcm_enet_refill_rx_timer(struct timer_list *t)
 285{
 286        struct bcm_enet_priv *priv = from_timer(priv, t, rx_timeout);
 287        struct net_device *dev = priv->net_dev;
 288
 289        spin_lock(&priv->rx_lock);
 290        bcm_enet_refill_rx(dev);
 291        spin_unlock(&priv->rx_lock);
 292}
 293
 294/*
 295 * extract packet from rx queue
 296 */
 297static int bcm_enet_receive_queue(struct net_device *dev, int budget)
 298{
 299        struct bcm_enet_priv *priv;
 300        struct device *kdev;
 301        int processed;
 302
 303        priv = netdev_priv(dev);
 304        kdev = &priv->pdev->dev;
 305        processed = 0;
 306
 307        /* don't scan ring further than number of refilled
 308         * descriptor */
 309        if (budget > priv->rx_desc_count)
 310                budget = priv->rx_desc_count;
 311
 312        do {
 313                struct bcm_enet_desc *desc;
 314                struct sk_buff *skb;
 315                int desc_idx;
 316                u32 len_stat;
 317                unsigned int len;
 318
 319                desc_idx = priv->rx_curr_desc;
 320                desc = &priv->rx_desc_cpu[desc_idx];
 321
 322                /* make sure we actually read the descriptor status at
 323                 * each loop */
 324                rmb();
 325
 326                len_stat = desc->len_stat;
 327
 328                /* break if dma ownership belongs to hw */
 329                if (len_stat & DMADESC_OWNER_MASK)
 330                        break;
 331
 332                processed++;
 333                priv->rx_curr_desc++;
 334                if (priv->rx_curr_desc == priv->rx_ring_size)
 335                        priv->rx_curr_desc = 0;
 336                priv->rx_desc_count--;
 337
 338                /* if the packet does not have start of packet _and_
 339                 * end of packet flag set, then just recycle it */
 340                if ((len_stat & (DMADESC_ESOP_MASK >> priv->dma_desc_shift)) !=
 341                        (DMADESC_ESOP_MASK >> priv->dma_desc_shift)) {
 342                        dev->stats.rx_dropped++;
 343                        continue;
 344                }
 345
 346                /* recycle packet if it's marked as bad */
 347                if (!priv->enet_is_sw &&
 348                    unlikely(len_stat & DMADESC_ERR_MASK)) {
 349                        dev->stats.rx_errors++;
 350
 351                        if (len_stat & DMADESC_OVSIZE_MASK)
 352                                dev->stats.rx_length_errors++;
 353                        if (len_stat & DMADESC_CRC_MASK)
 354                                dev->stats.rx_crc_errors++;
 355                        if (len_stat & DMADESC_UNDER_MASK)
 356                                dev->stats.rx_frame_errors++;
 357                        if (len_stat & DMADESC_OV_MASK)
 358                                dev->stats.rx_fifo_errors++;
 359                        continue;
 360                }
 361
 362                /* valid packet */
 363                skb = priv->rx_skb[desc_idx];
 364                len = (len_stat & DMADESC_LENGTH_MASK) >> DMADESC_LENGTH_SHIFT;
 365                /* don't include FCS */
 366                len -= 4;
 367
 368                if (len < copybreak) {
 369                        struct sk_buff *nskb;
 370
 371                        nskb = napi_alloc_skb(&priv->napi, len);
 372                        if (!nskb) {
 373                                /* forget packet, just rearm desc */
 374                                dev->stats.rx_dropped++;
 375                                continue;
 376                        }
 377
 378                        dma_sync_single_for_cpu(kdev, desc->address,
 379                                                len, DMA_FROM_DEVICE);
 380                        memcpy(nskb->data, skb->data, len);
 381                        dma_sync_single_for_device(kdev, desc->address,
 382                                                   len, DMA_FROM_DEVICE);
 383                        skb = nskb;
 384                } else {
 385                        dma_unmap_single(&priv->pdev->dev, desc->address,
 386                                         priv->rx_skb_size, DMA_FROM_DEVICE);
 387                        priv->rx_skb[desc_idx] = NULL;
 388                }
 389
 390                skb_put(skb, len);
 391                skb->protocol = eth_type_trans(skb, dev);
 392                dev->stats.rx_packets++;
 393                dev->stats.rx_bytes += len;
 394                netif_receive_skb(skb);
 395
 396        } while (--budget > 0);
 397
 398        if (processed || !priv->rx_desc_count) {
 399                bcm_enet_refill_rx(dev);
 400
 401                /* kick rx dma */
 402                enet_dmac_writel(priv, priv->dma_chan_en_mask,
 403                                         ENETDMAC_CHANCFG, priv->rx_chan);
 404        }
 405
 406        return processed;
 407}
 408
 409
 410/*
 411 * try to or force reclaim of transmitted buffers
 412 */
 413static int bcm_enet_tx_reclaim(struct net_device *dev, int force)
 414{
 415        struct bcm_enet_priv *priv;
 416        int released;
 417
 418        priv = netdev_priv(dev);
 419        released = 0;
 420
 421        while (priv->tx_desc_count < priv->tx_ring_size) {
 422                struct bcm_enet_desc *desc;
 423                struct sk_buff *skb;
 424
 425                /* We run in a bh and fight against start_xmit, which
 426                 * is called with bh disabled  */
 427                spin_lock(&priv->tx_lock);
 428
 429                desc = &priv->tx_desc_cpu[priv->tx_dirty_desc];
 430
 431                if (!force && (desc->len_stat & DMADESC_OWNER_MASK)) {
 432                        spin_unlock(&priv->tx_lock);
 433                        break;
 434                }
 435
 436                /* ensure other field of the descriptor were not read
 437                 * before we checked ownership */
 438                rmb();
 439
 440                skb = priv->tx_skb[priv->tx_dirty_desc];
 441                priv->tx_skb[priv->tx_dirty_desc] = NULL;
 442                dma_unmap_single(&priv->pdev->dev, desc->address, skb->len,
 443                                 DMA_TO_DEVICE);
 444
 445                priv->tx_dirty_desc++;
 446                if (priv->tx_dirty_desc == priv->tx_ring_size)
 447                        priv->tx_dirty_desc = 0;
 448                priv->tx_desc_count++;
 449
 450                spin_unlock(&priv->tx_lock);
 451
 452                if (desc->len_stat & DMADESC_UNDER_MASK)
 453                        dev->stats.tx_errors++;
 454
 455                dev_kfree_skb(skb);
 456                released++;
 457        }
 458
 459        if (netif_queue_stopped(dev) && released)
 460                netif_wake_queue(dev);
 461
 462        return released;
 463}
 464
 465/*
 466 * poll func, called by network core
 467 */
 468static int bcm_enet_poll(struct napi_struct *napi, int budget)
 469{
 470        struct bcm_enet_priv *priv;
 471        struct net_device *dev;
 472        int rx_work_done;
 473
 474        priv = container_of(napi, struct bcm_enet_priv, napi);
 475        dev = priv->net_dev;
 476
 477        /* ack interrupts */
 478        enet_dmac_writel(priv, priv->dma_chan_int_mask,
 479                         ENETDMAC_IR, priv->rx_chan);
 480        enet_dmac_writel(priv, priv->dma_chan_int_mask,
 481                         ENETDMAC_IR, priv->tx_chan);
 482
 483        /* reclaim sent skb */
 484        bcm_enet_tx_reclaim(dev, 0);
 485
 486        spin_lock(&priv->rx_lock);
 487        rx_work_done = bcm_enet_receive_queue(dev, budget);
 488        spin_unlock(&priv->rx_lock);
 489
 490        if (rx_work_done >= budget) {
 491                /* rx queue is not yet empty/clean */
 492                return rx_work_done;
 493        }
 494
 495        /* no more packet in rx/tx queue, remove device from poll
 496         * queue */
 497        napi_complete_done(napi, rx_work_done);
 498
 499        /* restore rx/tx interrupt */
 500        enet_dmac_writel(priv, priv->dma_chan_int_mask,
 501                         ENETDMAC_IRMASK, priv->rx_chan);
 502        enet_dmac_writel(priv, priv->dma_chan_int_mask,
 503                         ENETDMAC_IRMASK, priv->tx_chan);
 504
 505        return rx_work_done;
 506}
 507
 508/*
 509 * mac interrupt handler
 510 */
 511static irqreturn_t bcm_enet_isr_mac(int irq, void *dev_id)
 512{
 513        struct net_device *dev;
 514        struct bcm_enet_priv *priv;
 515        u32 stat;
 516
 517        dev = dev_id;
 518        priv = netdev_priv(dev);
 519
 520        stat = enet_readl(priv, ENET_IR_REG);
 521        if (!(stat & ENET_IR_MIB))
 522                return IRQ_NONE;
 523
 524        /* clear & mask interrupt */
 525        enet_writel(priv, ENET_IR_MIB, ENET_IR_REG);
 526        enet_writel(priv, 0, ENET_IRMASK_REG);
 527
 528        /* read mib registers in workqueue */
 529        schedule_work(&priv->mib_update_task);
 530
 531        return IRQ_HANDLED;
 532}
 533
 534/*
 535 * rx/tx dma interrupt handler
 536 */
 537static irqreturn_t bcm_enet_isr_dma(int irq, void *dev_id)
 538{
 539        struct net_device *dev;
 540        struct bcm_enet_priv *priv;
 541
 542        dev = dev_id;
 543        priv = netdev_priv(dev);
 544
 545        /* mask rx/tx interrupts */
 546        enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
 547        enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
 548
 549        napi_schedule(&priv->napi);
 550
 551        return IRQ_HANDLED;
 552}
 553
 554/*
 555 * tx request callback
 556 */
 557static netdev_tx_t
 558bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
 559{
 560        struct bcm_enet_priv *priv;
 561        struct bcm_enet_desc *desc;
 562        u32 len_stat;
 563        netdev_tx_t ret;
 564
 565        priv = netdev_priv(dev);
 566
 567        /* lock against tx reclaim */
 568        spin_lock(&priv->tx_lock);
 569
 570        /* make sure  the tx hw queue  is not full,  should not happen
 571         * since we stop queue before it's the case */
 572        if (unlikely(!priv->tx_desc_count)) {
 573                netif_stop_queue(dev);
 574                dev_err(&priv->pdev->dev, "xmit called with no tx desc "
 575                        "available?\n");
 576                ret = NETDEV_TX_BUSY;
 577                goto out_unlock;
 578        }
 579
 580        /* pad small packets sent on a switch device */
 581        if (priv->enet_is_sw && skb->len < 64) {
 582                int needed = 64 - skb->len;
 583                char *data;
 584
 585                if (unlikely(skb_tailroom(skb) < needed)) {
 586                        struct sk_buff *nskb;
 587
 588                        nskb = skb_copy_expand(skb, 0, needed, GFP_ATOMIC);
 589                        if (!nskb) {
 590                                ret = NETDEV_TX_BUSY;
 591                                goto out_unlock;
 592                        }
 593                        dev_kfree_skb(skb);
 594                        skb = nskb;
 595                }
 596                data = skb_put_zero(skb, needed);
 597        }
 598
 599        /* point to the next available desc */
 600        desc = &priv->tx_desc_cpu[priv->tx_curr_desc];
 601        priv->tx_skb[priv->tx_curr_desc] = skb;
 602
 603        /* fill descriptor */
 604        desc->address = dma_map_single(&priv->pdev->dev, skb->data, skb->len,
 605                                       DMA_TO_DEVICE);
 606
 607        len_stat = (skb->len << DMADESC_LENGTH_SHIFT) & DMADESC_LENGTH_MASK;
 608        len_stat |= (DMADESC_ESOP_MASK >> priv->dma_desc_shift) |
 609                DMADESC_APPEND_CRC |
 610                DMADESC_OWNER_MASK;
 611
 612        priv->tx_curr_desc++;
 613        if (priv->tx_curr_desc == priv->tx_ring_size) {
 614                priv->tx_curr_desc = 0;
 615                len_stat |= (DMADESC_WRAP_MASK >> priv->dma_desc_shift);
 616        }
 617        priv->tx_desc_count--;
 618
 619        /* dma might be already polling, make sure we update desc
 620         * fields in correct order */
 621        wmb();
 622        desc->len_stat = len_stat;
 623        wmb();
 624
 625        /* kick tx dma */
 626        enet_dmac_writel(priv, priv->dma_chan_en_mask,
 627                                 ENETDMAC_CHANCFG, priv->tx_chan);
 628
 629        /* stop queue if no more desc available */
 630        if (!priv->tx_desc_count)
 631                netif_stop_queue(dev);
 632
 633        dev->stats.tx_bytes += skb->len;
 634        dev->stats.tx_packets++;
 635        ret = NETDEV_TX_OK;
 636
 637out_unlock:
 638        spin_unlock(&priv->tx_lock);
 639        return ret;
 640}
 641
 642/*
 643 * Change the interface's mac address.
 644 */
 645static int bcm_enet_set_mac_address(struct net_device *dev, void *p)
 646{
 647        struct bcm_enet_priv *priv;
 648        struct sockaddr *addr = p;
 649        u32 val;
 650
 651        priv = netdev_priv(dev);
 652        memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
 653
 654        /* use perfect match register 0 to store my mac address */
 655        val = (dev->dev_addr[2] << 24) | (dev->dev_addr[3] << 16) |
 656                (dev->dev_addr[4] << 8) | dev->dev_addr[5];
 657        enet_writel(priv, val, ENET_PML_REG(0));
 658
 659        val = (dev->dev_addr[0] << 8 | dev->dev_addr[1]);
 660        val |= ENET_PMH_DATAVALID_MASK;
 661        enet_writel(priv, val, ENET_PMH_REG(0));
 662
 663        return 0;
 664}
 665
 666/*
 667 * Change rx mode (promiscuous/allmulti) and update multicast list
 668 */
 669static void bcm_enet_set_multicast_list(struct net_device *dev)
 670{
 671        struct bcm_enet_priv *priv;
 672        struct netdev_hw_addr *ha;
 673        u32 val;
 674        int i;
 675
 676        priv = netdev_priv(dev);
 677
 678        val = enet_readl(priv, ENET_RXCFG_REG);
 679
 680        if (dev->flags & IFF_PROMISC)
 681                val |= ENET_RXCFG_PROMISC_MASK;
 682        else
 683                val &= ~ENET_RXCFG_PROMISC_MASK;
 684
 685        /* only 3 perfect match registers left, first one is used for
 686         * own mac address */
 687        if ((dev->flags & IFF_ALLMULTI) || netdev_mc_count(dev) > 3)
 688                val |= ENET_RXCFG_ALLMCAST_MASK;
 689        else
 690                val &= ~ENET_RXCFG_ALLMCAST_MASK;
 691
 692        /* no need to set perfect match registers if we catch all
 693         * multicast */
 694        if (val & ENET_RXCFG_ALLMCAST_MASK) {
 695                enet_writel(priv, val, ENET_RXCFG_REG);
 696                return;
 697        }
 698
 699        i = 0;
 700        netdev_for_each_mc_addr(ha, dev) {
 701                u8 *dmi_addr;
 702                u32 tmp;
 703
 704                if (i == 3)
 705                        break;
 706                /* update perfect match registers */
 707                dmi_addr = ha->addr;
 708                tmp = (dmi_addr[2] << 24) | (dmi_addr[3] << 16) |
 709                        (dmi_addr[4] << 8) | dmi_addr[5];
 710                enet_writel(priv, tmp, ENET_PML_REG(i + 1));
 711
 712                tmp = (dmi_addr[0] << 8 | dmi_addr[1]);
 713                tmp |= ENET_PMH_DATAVALID_MASK;
 714                enet_writel(priv, tmp, ENET_PMH_REG(i++ + 1));
 715        }
 716
 717        for (; i < 3; i++) {
 718                enet_writel(priv, 0, ENET_PML_REG(i + 1));
 719                enet_writel(priv, 0, ENET_PMH_REG(i + 1));
 720        }
 721
 722        enet_writel(priv, val, ENET_RXCFG_REG);
 723}
 724
 725/*
 726 * set mac duplex parameters
 727 */
 728static void bcm_enet_set_duplex(struct bcm_enet_priv *priv, int fullduplex)
 729{
 730        u32 val;
 731
 732        val = enet_readl(priv, ENET_TXCTL_REG);
 733        if (fullduplex)
 734                val |= ENET_TXCTL_FD_MASK;
 735        else
 736                val &= ~ENET_TXCTL_FD_MASK;
 737        enet_writel(priv, val, ENET_TXCTL_REG);
 738}
 739
 740/*
 741 * set mac flow control parameters
 742 */
 743static void bcm_enet_set_flow(struct bcm_enet_priv *priv, int rx_en, int tx_en)
 744{
 745        u32 val;
 746
 747        /* rx flow control (pause frame handling) */
 748        val = enet_readl(priv, ENET_RXCFG_REG);
 749        if (rx_en)
 750                val |= ENET_RXCFG_ENFLOW_MASK;
 751        else
 752                val &= ~ENET_RXCFG_ENFLOW_MASK;
 753        enet_writel(priv, val, ENET_RXCFG_REG);
 754
 755        if (!priv->dma_has_sram)
 756                return;
 757
 758        /* tx flow control (pause frame generation) */
 759        val = enet_dma_readl(priv, ENETDMA_CFG_REG);
 760        if (tx_en)
 761                val |= ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan);
 762        else
 763                val &= ~ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan);
 764        enet_dma_writel(priv, val, ENETDMA_CFG_REG);
 765}
 766
 767/*
 768 * link changed callback (from phylib)
 769 */
 770static void bcm_enet_adjust_phy_link(struct net_device *dev)
 771{
 772        struct bcm_enet_priv *priv;
 773        struct phy_device *phydev;
 774        int status_changed;
 775
 776        priv = netdev_priv(dev);
 777        phydev = dev->phydev;
 778        status_changed = 0;
 779
 780        if (priv->old_link != phydev->link) {
 781                status_changed = 1;
 782                priv->old_link = phydev->link;
 783        }
 784
 785        /* reflect duplex change in mac configuration */
 786        if (phydev->link && phydev->duplex != priv->old_duplex) {
 787                bcm_enet_set_duplex(priv,
 788                                    (phydev->duplex == DUPLEX_FULL) ? 1 : 0);
 789                status_changed = 1;
 790                priv->old_duplex = phydev->duplex;
 791        }
 792
 793        /* enable flow control if remote advertise it (trust phylib to
 794         * check that duplex is full */
 795        if (phydev->link && phydev->pause != priv->old_pause) {
 796                int rx_pause_en, tx_pause_en;
 797
 798                if (phydev->pause) {
 799                        /* pause was advertised by lpa and us */
 800                        rx_pause_en = 1;
 801                        tx_pause_en = 1;
 802                } else if (!priv->pause_auto) {
 803                        /* pause setting overridden by user */
 804                        rx_pause_en = priv->pause_rx;
 805                        tx_pause_en = priv->pause_tx;
 806                } else {
 807                        rx_pause_en = 0;
 808                        tx_pause_en = 0;
 809                }
 810
 811                bcm_enet_set_flow(priv, rx_pause_en, tx_pause_en);
 812                status_changed = 1;
 813                priv->old_pause = phydev->pause;
 814        }
 815
 816        if (status_changed) {
 817                pr_info("%s: link %s", dev->name, phydev->link ?
 818                        "UP" : "DOWN");
 819                if (phydev->link)
 820                        pr_cont(" - %d/%s - flow control %s", phydev->speed,
 821                               DUPLEX_FULL == phydev->duplex ? "full" : "half",
 822                               phydev->pause == 1 ? "rx&tx" : "off");
 823
 824                pr_cont("\n");
 825        }
 826}
 827
 828/*
 829 * link changed callback (if phylib is not used)
 830 */
 831static void bcm_enet_adjust_link(struct net_device *dev)
 832{
 833        struct bcm_enet_priv *priv;
 834
 835        priv = netdev_priv(dev);
 836        bcm_enet_set_duplex(priv, priv->force_duplex_full);
 837        bcm_enet_set_flow(priv, priv->pause_rx, priv->pause_tx);
 838        netif_carrier_on(dev);
 839
 840        pr_info("%s: link forced UP - %d/%s - flow control %s/%s\n",
 841                dev->name,
 842                priv->force_speed_100 ? 100 : 10,
 843                priv->force_duplex_full ? "full" : "half",
 844                priv->pause_rx ? "rx" : "off",
 845                priv->pause_tx ? "tx" : "off");
 846}
 847
 848/*
 849 * open callback, allocate dma rings & buffers and start rx operation
 850 */
 851static int bcm_enet_open(struct net_device *dev)
 852{
 853        struct bcm_enet_priv *priv;
 854        struct sockaddr addr;
 855        struct device *kdev;
 856        struct phy_device *phydev;
 857        int i, ret;
 858        unsigned int size;
 859        char phy_id[MII_BUS_ID_SIZE + 3];
 860        void *p;
 861        u32 val;
 862
 863        priv = netdev_priv(dev);
 864        kdev = &priv->pdev->dev;
 865
 866        if (priv->has_phy) {
 867                /* connect to PHY */
 868                snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
 869                         priv->mii_bus->id, priv->phy_id);
 870
 871                phydev = phy_connect(dev, phy_id, bcm_enet_adjust_phy_link,
 872                                     PHY_INTERFACE_MODE_MII);
 873
 874                if (IS_ERR(phydev)) {
 875                        dev_err(kdev, "could not attach to PHY\n");
 876                        return PTR_ERR(phydev);
 877                }
 878
 879                /* mask with MAC supported features */
 880                phy_support_sym_pause(phydev);
 881                phy_set_max_speed(phydev, SPEED_100);
 882                phy_set_sym_pause(phydev, priv->pause_rx, priv->pause_rx,
 883                                  priv->pause_auto);
 884
 885                phy_attached_info(phydev);
 886
 887                priv->old_link = 0;
 888                priv->old_duplex = -1;
 889                priv->old_pause = -1;
 890        } else {
 891                phydev = NULL;
 892        }
 893
 894        /* mask all interrupts and request them */
 895        enet_writel(priv, 0, ENET_IRMASK_REG);
 896        enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
 897        enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
 898
 899        ret = request_irq(dev->irq, bcm_enet_isr_mac, 0, dev->name, dev);
 900        if (ret)
 901                goto out_phy_disconnect;
 902
 903        ret = request_irq(priv->irq_rx, bcm_enet_isr_dma, 0,
 904                          dev->name, dev);
 905        if (ret)
 906                goto out_freeirq;
 907
 908        ret = request_irq(priv->irq_tx, bcm_enet_isr_dma,
 909                          0, dev->name, dev);
 910        if (ret)
 911                goto out_freeirq_rx;
 912
 913        /* initialize perfect match registers */
 914        for (i = 0; i < 4; i++) {
 915                enet_writel(priv, 0, ENET_PML_REG(i));
 916                enet_writel(priv, 0, ENET_PMH_REG(i));
 917        }
 918
 919        /* write device mac address */
 920        memcpy(addr.sa_data, dev->dev_addr, ETH_ALEN);
 921        bcm_enet_set_mac_address(dev, &addr);
 922
 923        /* allocate rx dma ring */
 924        size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
 925        p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
 926        if (!p) {
 927                ret = -ENOMEM;
 928                goto out_freeirq_tx;
 929        }
 930
 931        priv->rx_desc_alloc_size = size;
 932        priv->rx_desc_cpu = p;
 933
 934        /* allocate tx dma ring */
 935        size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
 936        p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
 937        if (!p) {
 938                ret = -ENOMEM;
 939                goto out_free_rx_ring;
 940        }
 941
 942        priv->tx_desc_alloc_size = size;
 943        priv->tx_desc_cpu = p;
 944
 945        priv->tx_skb = kcalloc(priv->tx_ring_size, sizeof(struct sk_buff *),
 946                               GFP_KERNEL);
 947        if (!priv->tx_skb) {
 948                ret = -ENOMEM;
 949                goto out_free_tx_ring;
 950        }
 951
 952        priv->tx_desc_count = priv->tx_ring_size;
 953        priv->tx_dirty_desc = 0;
 954        priv->tx_curr_desc = 0;
 955        spin_lock_init(&priv->tx_lock);
 956
 957        /* init & fill rx ring with skbs */
 958        priv->rx_skb = kcalloc(priv->rx_ring_size, sizeof(struct sk_buff *),
 959                               GFP_KERNEL);
 960        if (!priv->rx_skb) {
 961                ret = -ENOMEM;
 962                goto out_free_tx_skb;
 963        }
 964
 965        priv->rx_desc_count = 0;
 966        priv->rx_dirty_desc = 0;
 967        priv->rx_curr_desc = 0;
 968
 969        /* initialize flow control buffer allocation */
 970        if (priv->dma_has_sram)
 971                enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
 972                                ENETDMA_BUFALLOC_REG(priv->rx_chan));
 973        else
 974                enet_dmac_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
 975                                ENETDMAC_BUFALLOC, priv->rx_chan);
 976
 977        if (bcm_enet_refill_rx(dev)) {
 978                dev_err(kdev, "cannot allocate rx skb queue\n");
 979                ret = -ENOMEM;
 980                goto out;
 981        }
 982
 983        /* write rx & tx ring addresses */
 984        if (priv->dma_has_sram) {
 985                enet_dmas_writel(priv, priv->rx_desc_dma,
 986                                 ENETDMAS_RSTART_REG, priv->rx_chan);
 987                enet_dmas_writel(priv, priv->tx_desc_dma,
 988                         ENETDMAS_RSTART_REG, priv->tx_chan);
 989        } else {
 990                enet_dmac_writel(priv, priv->rx_desc_dma,
 991                                ENETDMAC_RSTART, priv->rx_chan);
 992                enet_dmac_writel(priv, priv->tx_desc_dma,
 993                                ENETDMAC_RSTART, priv->tx_chan);
 994        }
 995
 996        /* clear remaining state ram for rx & tx channel */
 997        if (priv->dma_has_sram) {
 998                enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->rx_chan);
 999                enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->tx_chan);
1000                enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->rx_chan);
1001                enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->tx_chan);
1002                enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->rx_chan);
1003                enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->tx_chan);
1004        } else {
1005                enet_dmac_writel(priv, 0, ENETDMAC_FC, priv->rx_chan);
1006                enet_dmac_writel(priv, 0, ENETDMAC_FC, priv->tx_chan);
1007        }
1008
1009        /* set max rx/tx length */
1010        enet_writel(priv, priv->hw_mtu, ENET_RXMAXLEN_REG);
1011        enet_writel(priv, priv->hw_mtu, ENET_TXMAXLEN_REG);
1012
1013        /* set dma maximum burst len */
1014        enet_dmac_writel(priv, priv->dma_maxburst,
1015                         ENETDMAC_MAXBURST, priv->rx_chan);
1016        enet_dmac_writel(priv, priv->dma_maxburst,
1017                         ENETDMAC_MAXBURST, priv->tx_chan);
1018
1019        /* set correct transmit fifo watermark */
1020        enet_writel(priv, BCMENET_TX_FIFO_TRESH, ENET_TXWMARK_REG);
1021
1022        /* set flow control low/high threshold to 1/3 / 2/3 */
1023        if (priv->dma_has_sram) {
1024                val = priv->rx_ring_size / 3;
1025                enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan));
1026                val = (priv->rx_ring_size * 2) / 3;
1027                enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan));
1028        } else {
1029                enet_dmac_writel(priv, 5, ENETDMAC_FC, priv->rx_chan);
1030                enet_dmac_writel(priv, priv->rx_ring_size, ENETDMAC_LEN, priv->rx_chan);
1031                enet_dmac_writel(priv, priv->tx_ring_size, ENETDMAC_LEN, priv->tx_chan);
1032        }
1033
1034        /* all set, enable mac and interrupts, start dma engine and
1035         * kick rx dma channel */
1036        wmb();
1037        val = enet_readl(priv, ENET_CTL_REG);
1038        val |= ENET_CTL_ENABLE_MASK;
1039        enet_writel(priv, val, ENET_CTL_REG);
1040        if (priv->dma_has_sram)
1041                enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
1042        enet_dmac_writel(priv, priv->dma_chan_en_mask,
1043                         ENETDMAC_CHANCFG, priv->rx_chan);
1044
1045        /* watch "mib counters about to overflow" interrupt */
1046        enet_writel(priv, ENET_IR_MIB, ENET_IR_REG);
1047        enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG);
1048
1049        /* watch "packet transferred" interrupt in rx and tx */
1050        enet_dmac_writel(priv, priv->dma_chan_int_mask,
1051                         ENETDMAC_IR, priv->rx_chan);
1052        enet_dmac_writel(priv, priv->dma_chan_int_mask,
1053                         ENETDMAC_IR, priv->tx_chan);
1054
1055        /* make sure we enable napi before rx interrupt  */
1056        napi_enable(&priv->napi);
1057
1058        enet_dmac_writel(priv, priv->dma_chan_int_mask,
1059                         ENETDMAC_IRMASK, priv->rx_chan);
1060        enet_dmac_writel(priv, priv->dma_chan_int_mask,
1061                         ENETDMAC_IRMASK, priv->tx_chan);
1062
1063        if (phydev)
1064                phy_start(phydev);
1065        else
1066                bcm_enet_adjust_link(dev);
1067
1068        netif_start_queue(dev);
1069        return 0;
1070
1071out:
1072        for (i = 0; i < priv->rx_ring_size; i++) {
1073                struct bcm_enet_desc *desc;
1074
1075                if (!priv->rx_skb[i])
1076                        continue;
1077
1078                desc = &priv->rx_desc_cpu[i];
1079                dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
1080                                 DMA_FROM_DEVICE);
1081                kfree_skb(priv->rx_skb[i]);
1082        }
1083        kfree(priv->rx_skb);
1084
1085out_free_tx_skb:
1086        kfree(priv->tx_skb);
1087
1088out_free_tx_ring:
1089        dma_free_coherent(kdev, priv->tx_desc_alloc_size,
1090                          priv->tx_desc_cpu, priv->tx_desc_dma);
1091
1092out_free_rx_ring:
1093        dma_free_coherent(kdev, priv->rx_desc_alloc_size,
1094                          priv->rx_desc_cpu, priv->rx_desc_dma);
1095
1096out_freeirq_tx:
1097        free_irq(priv->irq_tx, dev);
1098
1099out_freeirq_rx:
1100        free_irq(priv->irq_rx, dev);
1101
1102out_freeirq:
1103        free_irq(dev->irq, dev);
1104
1105out_phy_disconnect:
1106        if (phydev)
1107                phy_disconnect(phydev);
1108
1109        return ret;
1110}
1111
1112/*
1113 * disable mac
1114 */
1115static void bcm_enet_disable_mac(struct bcm_enet_priv *priv)
1116{
1117        int limit;
1118        u32 val;
1119
1120        val = enet_readl(priv, ENET_CTL_REG);
1121        val |= ENET_CTL_DISABLE_MASK;
1122        enet_writel(priv, val, ENET_CTL_REG);
1123
1124        limit = 1000;
1125        do {
1126                u32 val;
1127
1128                val = enet_readl(priv, ENET_CTL_REG);
1129                if (!(val & ENET_CTL_DISABLE_MASK))
1130                        break;
1131                udelay(1);
1132        } while (limit--);
1133}
1134
1135/*
1136 * disable dma in given channel
1137 */
1138static void bcm_enet_disable_dma(struct bcm_enet_priv *priv, int chan)
1139{
1140        int limit;
1141
1142        enet_dmac_writel(priv, 0, ENETDMAC_CHANCFG, chan);
1143
1144        limit = 1000;
1145        do {
1146                u32 val;
1147
1148                val = enet_dmac_readl(priv, ENETDMAC_CHANCFG, chan);
1149                if (!(val & ENETDMAC_CHANCFG_EN_MASK))
1150                        break;
1151                udelay(1);
1152        } while (limit--);
1153}
1154
1155/*
1156 * stop callback
1157 */
1158static int bcm_enet_stop(struct net_device *dev)
1159{
1160        struct bcm_enet_priv *priv;
1161        struct device *kdev;
1162        int i;
1163
1164        priv = netdev_priv(dev);
1165        kdev = &priv->pdev->dev;
1166
1167        netif_stop_queue(dev);
1168        napi_disable(&priv->napi);
1169        if (priv->has_phy)
1170                phy_stop(dev->phydev);
1171        del_timer_sync(&priv->rx_timeout);
1172
1173        /* mask all interrupts */
1174        enet_writel(priv, 0, ENET_IRMASK_REG);
1175        enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
1176        enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
1177
1178        /* make sure no mib update is scheduled */
1179        cancel_work_sync(&priv->mib_update_task);
1180
1181        /* disable dma & mac */
1182        bcm_enet_disable_dma(priv, priv->tx_chan);
1183        bcm_enet_disable_dma(priv, priv->rx_chan);
1184        bcm_enet_disable_mac(priv);
1185
1186        /* force reclaim of all tx buffers */
1187        bcm_enet_tx_reclaim(dev, 1);
1188
1189        /* free the rx skb ring */
1190        for (i = 0; i < priv->rx_ring_size; i++) {
1191                struct bcm_enet_desc *desc;
1192
1193                if (!priv->rx_skb[i])
1194                        continue;
1195
1196                desc = &priv->rx_desc_cpu[i];
1197                dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
1198                                 DMA_FROM_DEVICE);
1199                kfree_skb(priv->rx_skb[i]);
1200        }
1201
1202        /* free remaining allocated memory */
1203        kfree(priv->rx_skb);
1204        kfree(priv->tx_skb);
1205        dma_free_coherent(kdev, priv->rx_desc_alloc_size,
1206                          priv->rx_desc_cpu, priv->rx_desc_dma);
1207        dma_free_coherent(kdev, priv->tx_desc_alloc_size,
1208                          priv->tx_desc_cpu, priv->tx_desc_dma);
1209        free_irq(priv->irq_tx, dev);
1210        free_irq(priv->irq_rx, dev);
1211        free_irq(dev->irq, dev);
1212
1213        /* release phy */
1214        if (priv->has_phy)
1215                phy_disconnect(dev->phydev);
1216
1217        return 0;
1218}
1219
1220/*
1221 * ethtool callbacks
1222 */
1223struct bcm_enet_stats {
1224        char stat_string[ETH_GSTRING_LEN];
1225        int sizeof_stat;
1226        int stat_offset;
1227        int mib_reg;
1228};
1229
1230#define GEN_STAT(m) sizeof(((struct bcm_enet_priv *)0)->m),             \
1231                     offsetof(struct bcm_enet_priv, m)
1232#define DEV_STAT(m) sizeof(((struct net_device_stats *)0)->m),          \
1233                     offsetof(struct net_device_stats, m)
1234
1235static const struct bcm_enet_stats bcm_enet_gstrings_stats[] = {
1236        { "rx_packets", DEV_STAT(rx_packets), -1 },
1237        { "tx_packets", DEV_STAT(tx_packets), -1 },
1238        { "rx_bytes", DEV_STAT(rx_bytes), -1 },
1239        { "tx_bytes", DEV_STAT(tx_bytes), -1 },
1240        { "rx_errors", DEV_STAT(rx_errors), -1 },
1241        { "tx_errors", DEV_STAT(tx_errors), -1 },
1242        { "rx_dropped", DEV_STAT(rx_dropped), -1 },
1243        { "tx_dropped", DEV_STAT(tx_dropped), -1 },
1244
1245        { "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETH_MIB_RX_GD_OCTETS},
1246        { "rx_good_pkts", GEN_STAT(mib.rx_gd_pkts), ETH_MIB_RX_GD_PKTS },
1247        { "rx_broadcast", GEN_STAT(mib.rx_brdcast), ETH_MIB_RX_BRDCAST },
1248        { "rx_multicast", GEN_STAT(mib.rx_mult), ETH_MIB_RX_MULT },
1249        { "rx_64_octets", GEN_STAT(mib.rx_64), ETH_MIB_RX_64 },
1250        { "rx_65_127_oct", GEN_STAT(mib.rx_65_127), ETH_MIB_RX_65_127 },
1251        { "rx_128_255_oct", GEN_STAT(mib.rx_128_255), ETH_MIB_RX_128_255 },
1252        { "rx_256_511_oct", GEN_STAT(mib.rx_256_511), ETH_MIB_RX_256_511 },
1253        { "rx_512_1023_oct", GEN_STAT(mib.rx_512_1023), ETH_MIB_RX_512_1023 },
1254        { "rx_1024_max_oct", GEN_STAT(mib.rx_1024_max), ETH_MIB_RX_1024_MAX },
1255        { "rx_jabber", GEN_STAT(mib.rx_jab), ETH_MIB_RX_JAB },
1256        { "rx_oversize", GEN_STAT(mib.rx_ovr), ETH_MIB_RX_OVR },
1257        { "rx_fragment", GEN_STAT(mib.rx_frag), ETH_MIB_RX_FRAG },
1258        { "rx_dropped", GEN_STAT(mib.rx_drop), ETH_MIB_RX_DROP },
1259        { "rx_crc_align", GEN_STAT(mib.rx_crc_align), ETH_MIB_RX_CRC_ALIGN },
1260        { "rx_undersize", GEN_STAT(mib.rx_und), ETH_MIB_RX_UND },
1261        { "rx_crc", GEN_STAT(mib.rx_crc), ETH_MIB_RX_CRC },
1262        { "rx_align", GEN_STAT(mib.rx_align), ETH_MIB_RX_ALIGN },
1263        { "rx_symbol_error", GEN_STAT(mib.rx_sym), ETH_MIB_RX_SYM },
1264        { "rx_pause", GEN_STAT(mib.rx_pause), ETH_MIB_RX_PAUSE },
1265        { "rx_control", GEN_STAT(mib.rx_cntrl), ETH_MIB_RX_CNTRL },
1266
1267        { "tx_good_octets", GEN_STAT(mib.tx_gd_octets), ETH_MIB_TX_GD_OCTETS },
1268        { "tx_good_pkts", GEN_STAT(mib.tx_gd_pkts), ETH_MIB_TX_GD_PKTS },
1269        { "tx_broadcast", GEN_STAT(mib.tx_brdcast), ETH_MIB_TX_BRDCAST },
1270        { "tx_multicast", GEN_STAT(mib.tx_mult), ETH_MIB_TX_MULT },
1271        { "tx_64_oct", GEN_STAT(mib.tx_64), ETH_MIB_TX_64 },
1272        { "tx_65_127_oct", GEN_STAT(mib.tx_65_127), ETH_MIB_TX_65_127 },
1273        { "tx_128_255_oct", GEN_STAT(mib.tx_128_255), ETH_MIB_TX_128_255 },
1274        { "tx_256_511_oct", GEN_STAT(mib.tx_256_511), ETH_MIB_TX_256_511 },
1275        { "tx_512_1023_oct", GEN_STAT(mib.tx_512_1023), ETH_MIB_TX_512_1023},
1276        { "tx_1024_max_oct", GEN_STAT(mib.tx_1024_max), ETH_MIB_TX_1024_MAX },
1277        { "tx_jabber", GEN_STAT(mib.tx_jab), ETH_MIB_TX_JAB },
1278        { "tx_oversize", GEN_STAT(mib.tx_ovr), ETH_MIB_TX_OVR },
1279        { "tx_fragment", GEN_STAT(mib.tx_frag), ETH_MIB_TX_FRAG },
1280        { "tx_underrun", GEN_STAT(mib.tx_underrun), ETH_MIB_TX_UNDERRUN },
1281        { "tx_collisions", GEN_STAT(mib.tx_col), ETH_MIB_TX_COL },
1282        { "tx_single_collision", GEN_STAT(mib.tx_1_col), ETH_MIB_TX_1_COL },
1283        { "tx_multiple_collision", GEN_STAT(mib.tx_m_col), ETH_MIB_TX_M_COL },
1284        { "tx_excess_collision", GEN_STAT(mib.tx_ex_col), ETH_MIB_TX_EX_COL },
1285        { "tx_late_collision", GEN_STAT(mib.tx_late), ETH_MIB_TX_LATE },
1286        { "tx_deferred", GEN_STAT(mib.tx_def), ETH_MIB_TX_DEF },
1287        { "tx_carrier_sense", GEN_STAT(mib.tx_crs), ETH_MIB_TX_CRS },
1288        { "tx_pause", GEN_STAT(mib.tx_pause), ETH_MIB_TX_PAUSE },
1289
1290};
1291
1292#define BCM_ENET_STATS_LEN      ARRAY_SIZE(bcm_enet_gstrings_stats)
1293
1294static const u32 unused_mib_regs[] = {
1295        ETH_MIB_TX_ALL_OCTETS,
1296        ETH_MIB_TX_ALL_PKTS,
1297        ETH_MIB_RX_ALL_OCTETS,
1298        ETH_MIB_RX_ALL_PKTS,
1299};
1300
1301
1302static void bcm_enet_get_drvinfo(struct net_device *netdev,
1303                                 struct ethtool_drvinfo *drvinfo)
1304{
1305        strlcpy(drvinfo->driver, bcm_enet_driver_name, sizeof(drvinfo->driver));
1306        strlcpy(drvinfo->bus_info, "bcm63xx", sizeof(drvinfo->bus_info));
1307}
1308
1309static int bcm_enet_get_sset_count(struct net_device *netdev,
1310                                        int string_set)
1311{
1312        switch (string_set) {
1313        case ETH_SS_STATS:
1314                return BCM_ENET_STATS_LEN;
1315        default:
1316                return -EINVAL;
1317        }
1318}
1319
1320static void bcm_enet_get_strings(struct net_device *netdev,
1321                                 u32 stringset, u8 *data)
1322{
1323        int i;
1324
1325        switch (stringset) {
1326        case ETH_SS_STATS:
1327                for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
1328                        memcpy(data + i * ETH_GSTRING_LEN,
1329                               bcm_enet_gstrings_stats[i].stat_string,
1330                               ETH_GSTRING_LEN);
1331                }
1332                break;
1333        }
1334}
1335
1336static void update_mib_counters(struct bcm_enet_priv *priv)
1337{
1338        int i;
1339
1340        for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
1341                const struct bcm_enet_stats *s;
1342                u32 val;
1343                char *p;
1344
1345                s = &bcm_enet_gstrings_stats[i];
1346                if (s->mib_reg == -1)
1347                        continue;
1348
1349                val = enet_readl(priv, ENET_MIB_REG(s->mib_reg));
1350                p = (char *)priv + s->stat_offset;
1351
1352                if (s->sizeof_stat == sizeof(u64))
1353                        *(u64 *)p += val;
1354                else
1355                        *(u32 *)p += val;
1356        }
1357
1358        /* also empty unused mib counters to make sure mib counter
1359         * overflow interrupt is cleared */
1360        for (i = 0; i < ARRAY_SIZE(unused_mib_regs); i++)
1361                (void)enet_readl(priv, ENET_MIB_REG(unused_mib_regs[i]));
1362}
1363
1364static void bcm_enet_update_mib_counters_defer(struct work_struct *t)
1365{
1366        struct bcm_enet_priv *priv;
1367
1368        priv = container_of(t, struct bcm_enet_priv, mib_update_task);
1369        mutex_lock(&priv->mib_update_lock);
1370        update_mib_counters(priv);
1371        mutex_unlock(&priv->mib_update_lock);
1372
1373        /* reenable mib interrupt */
1374        if (netif_running(priv->net_dev))
1375                enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG);
1376}
1377
1378static void bcm_enet_get_ethtool_stats(struct net_device *netdev,
1379                                       struct ethtool_stats *stats,
1380                                       u64 *data)
1381{
1382        struct bcm_enet_priv *priv;
1383        int i;
1384
1385        priv = netdev_priv(netdev);
1386
1387        mutex_lock(&priv->mib_update_lock);
1388        update_mib_counters(priv);
1389
1390        for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
1391                const struct bcm_enet_stats *s;
1392                char *p;
1393
1394                s = &bcm_enet_gstrings_stats[i];
1395                if (s->mib_reg == -1)
1396                        p = (char *)&netdev->stats;
1397                else
1398                        p = (char *)priv;
1399                p += s->stat_offset;
1400                data[i] = (s->sizeof_stat == sizeof(u64)) ?
1401                        *(u64 *)p : *(u32 *)p;
1402        }
1403        mutex_unlock(&priv->mib_update_lock);
1404}
1405
1406static int bcm_enet_nway_reset(struct net_device *dev)
1407{
1408        struct bcm_enet_priv *priv;
1409
1410        priv = netdev_priv(dev);
1411        if (priv->has_phy)
1412                return phy_ethtool_nway_reset(dev);
1413
1414        return -EOPNOTSUPP;
1415}
1416
1417static int bcm_enet_get_link_ksettings(struct net_device *dev,
1418                                       struct ethtool_link_ksettings *cmd)
1419{
1420        struct bcm_enet_priv *priv;
1421        u32 supported, advertising;
1422
1423        priv = netdev_priv(dev);
1424
1425        if (priv->has_phy) {
1426                if (!dev->phydev)
1427                        return -ENODEV;
1428
1429                phy_ethtool_ksettings_get(dev->phydev, cmd);
1430
1431                return 0;
1432        } else {
1433                cmd->base.autoneg = 0;
1434                cmd->base.speed = (priv->force_speed_100) ?
1435                        SPEED_100 : SPEED_10;
1436                cmd->base.duplex = (priv->force_duplex_full) ?
1437                        DUPLEX_FULL : DUPLEX_HALF;
1438                supported = ADVERTISED_10baseT_Half |
1439                        ADVERTISED_10baseT_Full |
1440                        ADVERTISED_100baseT_Half |
1441                        ADVERTISED_100baseT_Full;
1442                advertising = 0;
1443                ethtool_convert_legacy_u32_to_link_mode(
1444                        cmd->link_modes.supported, supported);
1445                ethtool_convert_legacy_u32_to_link_mode(
1446                        cmd->link_modes.advertising, advertising);
1447                cmd->base.port = PORT_MII;
1448        }
1449        return 0;
1450}
1451
1452static int bcm_enet_set_link_ksettings(struct net_device *dev,
1453                                       const struct ethtool_link_ksettings *cmd)
1454{
1455        struct bcm_enet_priv *priv;
1456
1457        priv = netdev_priv(dev);
1458        if (priv->has_phy) {
1459                if (!dev->phydev)
1460                        return -ENODEV;
1461                return phy_ethtool_ksettings_set(dev->phydev, cmd);
1462        } else {
1463
1464                if (cmd->base.autoneg ||
1465                    (cmd->base.speed != SPEED_100 &&
1466                     cmd->base.speed != SPEED_10) ||
1467                    cmd->base.port != PORT_MII)
1468                        return -EINVAL;
1469
1470                priv->force_speed_100 =
1471                        (cmd->base.speed == SPEED_100) ? 1 : 0;
1472                priv->force_duplex_full =
1473                        (cmd->base.duplex == DUPLEX_FULL) ? 1 : 0;
1474
1475                if (netif_running(dev))
1476                        bcm_enet_adjust_link(dev);
1477                return 0;
1478        }
1479}
1480
1481static void bcm_enet_get_ringparam(struct net_device *dev,
1482                                   struct ethtool_ringparam *ering)
1483{
1484        struct bcm_enet_priv *priv;
1485
1486        priv = netdev_priv(dev);
1487
1488        /* rx/tx ring is actually only limited by memory */
1489        ering->rx_max_pending = 8192;
1490        ering->tx_max_pending = 8192;
1491        ering->rx_pending = priv->rx_ring_size;
1492        ering->tx_pending = priv->tx_ring_size;
1493}
1494
1495static int bcm_enet_set_ringparam(struct net_device *dev,
1496                                  struct ethtool_ringparam *ering)
1497{
1498        struct bcm_enet_priv *priv;
1499        int was_running;
1500
1501        priv = netdev_priv(dev);
1502
1503        was_running = 0;
1504        if (netif_running(dev)) {
1505                bcm_enet_stop(dev);
1506                was_running = 1;
1507        }
1508
1509        priv->rx_ring_size = ering->rx_pending;
1510        priv->tx_ring_size = ering->tx_pending;
1511
1512        if (was_running) {
1513                int err;
1514
1515                err = bcm_enet_open(dev);
1516                if (err)
1517                        dev_close(dev);
1518                else
1519                        bcm_enet_set_multicast_list(dev);
1520        }
1521        return 0;
1522}
1523
1524static void bcm_enet_get_pauseparam(struct net_device *dev,
1525                                    struct ethtool_pauseparam *ecmd)
1526{
1527        struct bcm_enet_priv *priv;
1528
1529        priv = netdev_priv(dev);
1530        ecmd->autoneg = priv->pause_auto;
1531        ecmd->rx_pause = priv->pause_rx;
1532        ecmd->tx_pause = priv->pause_tx;
1533}
1534
1535static int bcm_enet_set_pauseparam(struct net_device *dev,
1536                                   struct ethtool_pauseparam *ecmd)
1537{
1538        struct bcm_enet_priv *priv;
1539
1540        priv = netdev_priv(dev);
1541
1542        if (priv->has_phy) {
1543                if (ecmd->autoneg && (ecmd->rx_pause != ecmd->tx_pause)) {
1544                        /* asymetric pause mode not supported,
1545                         * actually possible but integrated PHY has RO
1546                         * asym_pause bit */
1547                        return -EINVAL;
1548                }
1549        } else {
1550                /* no pause autoneg on direct mii connection */
1551                if (ecmd->autoneg)
1552                        return -EINVAL;
1553        }
1554
1555        priv->pause_auto = ecmd->autoneg;
1556        priv->pause_rx = ecmd->rx_pause;
1557        priv->pause_tx = ecmd->tx_pause;
1558
1559        return 0;
1560}
1561
1562static const struct ethtool_ops bcm_enet_ethtool_ops = {
1563        .get_strings            = bcm_enet_get_strings,
1564        .get_sset_count         = bcm_enet_get_sset_count,
1565        .get_ethtool_stats      = bcm_enet_get_ethtool_stats,
1566        .nway_reset             = bcm_enet_nway_reset,
1567        .get_drvinfo            = bcm_enet_get_drvinfo,
1568        .get_link               = ethtool_op_get_link,
1569        .get_ringparam          = bcm_enet_get_ringparam,
1570        .set_ringparam          = bcm_enet_set_ringparam,
1571        .get_pauseparam         = bcm_enet_get_pauseparam,
1572        .set_pauseparam         = bcm_enet_set_pauseparam,
1573        .get_link_ksettings     = bcm_enet_get_link_ksettings,
1574        .set_link_ksettings     = bcm_enet_set_link_ksettings,
1575};
1576
1577static int bcm_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1578{
1579        struct bcm_enet_priv *priv;
1580
1581        priv = netdev_priv(dev);
1582        if (priv->has_phy) {
1583                if (!dev->phydev)
1584                        return -ENODEV;
1585                return phy_mii_ioctl(dev->phydev, rq, cmd);
1586        } else {
1587                struct mii_if_info mii;
1588
1589                mii.dev = dev;
1590                mii.mdio_read = bcm_enet_mdio_read_mii;
1591                mii.mdio_write = bcm_enet_mdio_write_mii;
1592                mii.phy_id = 0;
1593                mii.phy_id_mask = 0x3f;
1594                mii.reg_num_mask = 0x1f;
1595                return generic_mii_ioctl(&mii, if_mii(rq), cmd, NULL);
1596        }
1597}
1598
1599/*
1600 * adjust mtu, can't be called while device is running
1601 */
1602static int bcm_enet_change_mtu(struct net_device *dev, int new_mtu)
1603{
1604        struct bcm_enet_priv *priv = netdev_priv(dev);
1605        int actual_mtu = new_mtu;
1606
1607        if (netif_running(dev))
1608                return -EBUSY;
1609
1610        /* add ethernet header + vlan tag size */
1611        actual_mtu += VLAN_ETH_HLEN;
1612
1613        /*
1614         * setup maximum size before we get overflow mark in
1615         * descriptor, note that this will not prevent reception of
1616         * big frames, they will be split into multiple buffers
1617         * anyway
1618         */
1619        priv->hw_mtu = actual_mtu;
1620
1621        /*
1622         * align rx buffer size to dma burst len, account FCS since
1623         * it's appended
1624         */
1625        priv->rx_skb_size = ALIGN(actual_mtu + ETH_FCS_LEN,
1626                                  priv->dma_maxburst * 4);
1627
1628        dev->mtu = new_mtu;
1629        return 0;
1630}
1631
1632/*
1633 * preinit hardware to allow mii operation while device is down
1634 */
1635static void bcm_enet_hw_preinit(struct bcm_enet_priv *priv)
1636{
1637        u32 val;
1638        int limit;
1639
1640        /* make sure mac is disabled */
1641        bcm_enet_disable_mac(priv);
1642
1643        /* soft reset mac */
1644        val = ENET_CTL_SRESET_MASK;
1645        enet_writel(priv, val, ENET_CTL_REG);
1646        wmb();
1647
1648        limit = 1000;
1649        do {
1650                val = enet_readl(priv, ENET_CTL_REG);
1651                if (!(val & ENET_CTL_SRESET_MASK))
1652                        break;
1653                udelay(1);
1654        } while (limit--);
1655
1656        /* select correct mii interface */
1657        val = enet_readl(priv, ENET_CTL_REG);
1658        if (priv->use_external_mii)
1659                val |= ENET_CTL_EPHYSEL_MASK;
1660        else
1661                val &= ~ENET_CTL_EPHYSEL_MASK;
1662        enet_writel(priv, val, ENET_CTL_REG);
1663
1664        /* turn on mdc clock */
1665        enet_writel(priv, (0x1f << ENET_MIISC_MDCFREQDIV_SHIFT) |
1666                    ENET_MIISC_PREAMBLEEN_MASK, ENET_MIISC_REG);
1667
1668        /* set mib counters to self-clear when read */
1669        val = enet_readl(priv, ENET_MIBCTL_REG);
1670        val |= ENET_MIBCTL_RDCLEAR_MASK;
1671        enet_writel(priv, val, ENET_MIBCTL_REG);
1672}
1673
1674static const struct net_device_ops bcm_enet_ops = {
1675        .ndo_open               = bcm_enet_open,
1676        .ndo_stop               = bcm_enet_stop,
1677        .ndo_start_xmit         = bcm_enet_start_xmit,
1678        .ndo_set_mac_address    = bcm_enet_set_mac_address,
1679        .ndo_set_rx_mode        = bcm_enet_set_multicast_list,
1680        .ndo_do_ioctl           = bcm_enet_ioctl,
1681        .ndo_change_mtu         = bcm_enet_change_mtu,
1682};
1683
1684/*
1685 * allocate netdevice, request register memory and register device.
1686 */
1687static int bcm_enet_probe(struct platform_device *pdev)
1688{
1689        struct bcm_enet_priv *priv;
1690        struct net_device *dev;
1691        struct bcm63xx_enet_platform_data *pd;
1692        struct resource *res_irq, *res_irq_rx, *res_irq_tx;
1693        struct mii_bus *bus;
1694        int i, ret;
1695
1696        if (!bcm_enet_shared_base[0])
1697                return -EPROBE_DEFER;
1698
1699        res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1700        res_irq_rx = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
1701        res_irq_tx = platform_get_resource(pdev, IORESOURCE_IRQ, 2);
1702        if (!res_irq || !res_irq_rx || !res_irq_tx)
1703                return -ENODEV;
1704
1705        dev = alloc_etherdev(sizeof(*priv));
1706        if (!dev)
1707                return -ENOMEM;
1708        priv = netdev_priv(dev);
1709
1710        priv->enet_is_sw = false;
1711        priv->dma_maxburst = BCMENET_DMA_MAXBURST;
1712
1713        ret = bcm_enet_change_mtu(dev, dev->mtu);
1714        if (ret)
1715                goto out;
1716
1717        priv->base = devm_platform_ioremap_resource(pdev, 0);
1718        if (IS_ERR(priv->base)) {
1719                ret = PTR_ERR(priv->base);
1720                goto out;
1721        }
1722
1723        dev->irq = priv->irq = res_irq->start;
1724        priv->irq_rx = res_irq_rx->start;
1725        priv->irq_tx = res_irq_tx->start;
1726
1727        priv->mac_clk = devm_clk_get(&pdev->dev, "enet");
1728        if (IS_ERR(priv->mac_clk)) {
1729                ret = PTR_ERR(priv->mac_clk);
1730                goto out;
1731        }
1732        ret = clk_prepare_enable(priv->mac_clk);
1733        if (ret)
1734                goto out;
1735
1736        /* initialize default and fetch platform data */
1737        priv->rx_ring_size = BCMENET_DEF_RX_DESC;
1738        priv->tx_ring_size = BCMENET_DEF_TX_DESC;
1739
1740        pd = dev_get_platdata(&pdev->dev);
1741        if (pd) {
1742                memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
1743                priv->has_phy = pd->has_phy;
1744                priv->phy_id = pd->phy_id;
1745                priv->has_phy_interrupt = pd->has_phy_interrupt;
1746                priv->phy_interrupt = pd->phy_interrupt;
1747                priv->use_external_mii = !pd->use_internal_phy;
1748                priv->pause_auto = pd->pause_auto;
1749                priv->pause_rx = pd->pause_rx;
1750                priv->pause_tx = pd->pause_tx;
1751                priv->force_duplex_full = pd->force_duplex_full;
1752                priv->force_speed_100 = pd->force_speed_100;
1753                priv->dma_chan_en_mask = pd->dma_chan_en_mask;
1754                priv->dma_chan_int_mask = pd->dma_chan_int_mask;
1755                priv->dma_chan_width = pd->dma_chan_width;
1756                priv->dma_has_sram = pd->dma_has_sram;
1757                priv->dma_desc_shift = pd->dma_desc_shift;
1758                priv->rx_chan = pd->rx_chan;
1759                priv->tx_chan = pd->tx_chan;
1760        }
1761
1762        if (priv->has_phy && !priv->use_external_mii) {
1763                /* using internal PHY, enable clock */
1764                priv->phy_clk = devm_clk_get(&pdev->dev, "ephy");
1765                if (IS_ERR(priv->phy_clk)) {
1766                        ret = PTR_ERR(priv->phy_clk);
1767                        priv->phy_clk = NULL;
1768                        goto out_disable_clk_mac;
1769                }
1770                ret = clk_prepare_enable(priv->phy_clk);
1771                if (ret)
1772                        goto out_disable_clk_mac;
1773        }
1774
1775        /* do minimal hardware init to be able to probe mii bus */
1776        bcm_enet_hw_preinit(priv);
1777
1778        /* MII bus registration */
1779        if (priv->has_phy) {
1780
1781                priv->mii_bus = mdiobus_alloc();
1782                if (!priv->mii_bus) {
1783                        ret = -ENOMEM;
1784                        goto out_uninit_hw;
1785                }
1786
1787                bus = priv->mii_bus;
1788                bus->name = "bcm63xx_enet MII bus";
1789                bus->parent = &pdev->dev;
1790                bus->priv = priv;
1791                bus->read = bcm_enet_mdio_read_phylib;
1792                bus->write = bcm_enet_mdio_write_phylib;
1793                sprintf(bus->id, "%s-%d", pdev->name, pdev->id);
1794
1795                /* only probe bus where we think the PHY is, because
1796                 * the mdio read operation return 0 instead of 0xffff
1797                 * if a slave is not present on hw */
1798                bus->phy_mask = ~(1 << priv->phy_id);
1799
1800                if (priv->has_phy_interrupt)
1801                        bus->irq[priv->phy_id] = priv->phy_interrupt;
1802
1803                ret = mdiobus_register(bus);
1804                if (ret) {
1805                        dev_err(&pdev->dev, "unable to register mdio bus\n");
1806                        goto out_free_mdio;
1807                }
1808        } else {
1809
1810                /* run platform code to initialize PHY device */
1811                if (pd && pd->mii_config &&
1812                    pd->mii_config(dev, 1, bcm_enet_mdio_read_mii,
1813                                   bcm_enet_mdio_write_mii)) {
1814                        dev_err(&pdev->dev, "unable to configure mdio bus\n");
1815                        goto out_uninit_hw;
1816                }
1817        }
1818
1819        spin_lock_init(&priv->rx_lock);
1820
1821        /* init rx timeout (used for oom) */
1822        timer_setup(&priv->rx_timeout, bcm_enet_refill_rx_timer, 0);
1823
1824        /* init the mib update lock&work */
1825        mutex_init(&priv->mib_update_lock);
1826        INIT_WORK(&priv->mib_update_task, bcm_enet_update_mib_counters_defer);
1827
1828        /* zero mib counters */
1829        for (i = 0; i < ENET_MIB_REG_COUNT; i++)
1830                enet_writel(priv, 0, ENET_MIB_REG(i));
1831
1832        /* register netdevice */
1833        dev->netdev_ops = &bcm_enet_ops;
1834        netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16);
1835
1836        dev->ethtool_ops = &bcm_enet_ethtool_ops;
1837        /* MTU range: 46 - 2028 */
1838        dev->min_mtu = ETH_ZLEN - ETH_HLEN;
1839        dev->max_mtu = BCMENET_MAX_MTU - VLAN_ETH_HLEN;
1840        SET_NETDEV_DEV(dev, &pdev->dev);
1841
1842        ret = register_netdev(dev);
1843        if (ret)
1844                goto out_unregister_mdio;
1845
1846        netif_carrier_off(dev);
1847        platform_set_drvdata(pdev, dev);
1848        priv->pdev = pdev;
1849        priv->net_dev = dev;
1850
1851        return 0;
1852
1853out_unregister_mdio:
1854        if (priv->mii_bus)
1855                mdiobus_unregister(priv->mii_bus);
1856
1857out_free_mdio:
1858        if (priv->mii_bus)
1859                mdiobus_free(priv->mii_bus);
1860
1861out_uninit_hw:
1862        /* turn off mdc clock */
1863        enet_writel(priv, 0, ENET_MIISC_REG);
1864        clk_disable_unprepare(priv->phy_clk);
1865
1866out_disable_clk_mac:
1867        clk_disable_unprepare(priv->mac_clk);
1868out:
1869        free_netdev(dev);
1870        return ret;
1871}
1872
1873
1874/*
1875 * exit func, stops hardware and unregisters netdevice
1876 */
1877static int bcm_enet_remove(struct platform_device *pdev)
1878{
1879        struct bcm_enet_priv *priv;
1880        struct net_device *dev;
1881
1882        /* stop netdevice */
1883        dev = platform_get_drvdata(pdev);
1884        priv = netdev_priv(dev);
1885        unregister_netdev(dev);
1886
1887        /* turn off mdc clock */
1888        enet_writel(priv, 0, ENET_MIISC_REG);
1889
1890        if (priv->has_phy) {
1891                mdiobus_unregister(priv->mii_bus);
1892                mdiobus_free(priv->mii_bus);
1893        } else {
1894                struct bcm63xx_enet_platform_data *pd;
1895
1896                pd = dev_get_platdata(&pdev->dev);
1897                if (pd && pd->mii_config)
1898                        pd->mii_config(dev, 0, bcm_enet_mdio_read_mii,
1899                                       bcm_enet_mdio_write_mii);
1900        }
1901
1902        /* disable hw block clocks */
1903        clk_disable_unprepare(priv->phy_clk);
1904        clk_disable_unprepare(priv->mac_clk);
1905
1906        free_netdev(dev);
1907        return 0;
1908}
1909
1910struct platform_driver bcm63xx_enet_driver = {
1911        .probe  = bcm_enet_probe,
1912        .remove = bcm_enet_remove,
1913        .driver = {
1914                .name   = "bcm63xx_enet",
1915                .owner  = THIS_MODULE,
1916        },
1917};
1918
1919/*
1920 * switch mii access callbacks
1921 */
1922static int bcmenet_sw_mdio_read(struct bcm_enet_priv *priv,
1923                                int ext, int phy_id, int location)
1924{
1925        u32 reg;
1926        int ret;
1927
1928        spin_lock_bh(&priv->enetsw_mdio_lock);
1929        enetsw_writel(priv, 0, ENETSW_MDIOC_REG);
1930
1931        reg = ENETSW_MDIOC_RD_MASK |
1932                (phy_id << ENETSW_MDIOC_PHYID_SHIFT) |
1933                (location << ENETSW_MDIOC_REG_SHIFT);
1934
1935        if (ext)
1936                reg |= ENETSW_MDIOC_EXT_MASK;
1937
1938        enetsw_writel(priv, reg, ENETSW_MDIOC_REG);
1939        udelay(50);
1940        ret = enetsw_readw(priv, ENETSW_MDIOD_REG);
1941        spin_unlock_bh(&priv->enetsw_mdio_lock);
1942        return ret;
1943}
1944
1945static void bcmenet_sw_mdio_write(struct bcm_enet_priv *priv,
1946                                 int ext, int phy_id, int location,
1947                                 uint16_t data)
1948{
1949        u32 reg;
1950
1951        spin_lock_bh(&priv->enetsw_mdio_lock);
1952        enetsw_writel(priv, 0, ENETSW_MDIOC_REG);
1953
1954        reg = ENETSW_MDIOC_WR_MASK |
1955                (phy_id << ENETSW_MDIOC_PHYID_SHIFT) |
1956                (location << ENETSW_MDIOC_REG_SHIFT);
1957
1958        if (ext)
1959                reg |= ENETSW_MDIOC_EXT_MASK;
1960
1961        reg |= data;
1962
1963        enetsw_writel(priv, reg, ENETSW_MDIOC_REG);
1964        udelay(50);
1965        spin_unlock_bh(&priv->enetsw_mdio_lock);
1966}
1967
1968static inline int bcm_enet_port_is_rgmii(int portid)
1969{
1970        return portid >= ENETSW_RGMII_PORT0;
1971}
1972
1973/*
1974 * enet sw PHY polling
1975 */
1976static void swphy_poll_timer(struct timer_list *t)
1977{
1978        struct bcm_enet_priv *priv = from_timer(priv, t, swphy_poll);
1979        unsigned int i;
1980
1981        for (i = 0; i < priv->num_ports; i++) {
1982                struct bcm63xx_enetsw_port *port;
1983                int val, j, up, advertise, lpa, speed, duplex, media;
1984                int external_phy = bcm_enet_port_is_rgmii(i);
1985                u8 override;
1986
1987                port = &priv->used_ports[i];
1988                if (!port->used)
1989                        continue;
1990
1991                if (port->bypass_link)
1992                        continue;
1993
1994                /* dummy read to clear */
1995                for (j = 0; j < 2; j++)
1996                        val = bcmenet_sw_mdio_read(priv, external_phy,
1997                                                   port->phy_id, MII_BMSR);
1998
1999                if (val == 0xffff)
2000                        continue;
2001
2002                up = (val & BMSR_LSTATUS) ? 1 : 0;
2003                if (!(up ^ priv->sw_port_link[i]))
2004                        continue;
2005
2006                priv->sw_port_link[i] = up;
2007
2008                /* link changed */
2009                if (!up) {
2010                        dev_info(&priv->pdev->dev, "link DOWN on %s\n",
2011                                 port->name);
2012                        enetsw_writeb(priv, ENETSW_PORTOV_ENABLE_MASK,
2013                                      ENETSW_PORTOV_REG(i));
2014                        enetsw_writeb(priv, ENETSW_PTCTRL_RXDIS_MASK |
2015                                      ENETSW_PTCTRL_TXDIS_MASK,
2016                                      ENETSW_PTCTRL_REG(i));
2017                        continue;
2018                }
2019
2020                advertise = bcmenet_sw_mdio_read(priv, external_phy,
2021                                                 port->phy_id, MII_ADVERTISE);
2022
2023                lpa = bcmenet_sw_mdio_read(priv, external_phy, port->phy_id,
2024                                           MII_LPA);
2025
2026                /* figure out media and duplex from advertise and LPA values */
2027                media = mii_nway_result(lpa & advertise);
2028                duplex = (media & ADVERTISE_FULL) ? 1 : 0;
2029
2030                if (media & (ADVERTISE_100FULL | ADVERTISE_100HALF))
2031                        speed = 100;
2032                else
2033                        speed = 10;
2034
2035                if (val & BMSR_ESTATEN) {
2036                        advertise = bcmenet_sw_mdio_read(priv, external_phy,
2037                                                port->phy_id, MII_CTRL1000);
2038
2039                        lpa = bcmenet_sw_mdio_read(priv, external_phy,
2040                                                port->phy_id, MII_STAT1000);
2041
2042                        if (advertise & (ADVERTISE_1000FULL | ADVERTISE_1000HALF)
2043                                        && lpa & (LPA_1000FULL | LPA_1000HALF)) {
2044                                speed = 1000;
2045                                duplex = (lpa & LPA_1000FULL);
2046                        }
2047                }
2048
2049                dev_info(&priv->pdev->dev,
2050                         "link UP on %s, %dMbps, %s-duplex\n",
2051                         port->name, speed, duplex ? "full" : "half");
2052
2053                override = ENETSW_PORTOV_ENABLE_MASK |
2054                        ENETSW_PORTOV_LINKUP_MASK;
2055
2056                if (speed == 1000)
2057                        override |= ENETSW_IMPOV_1000_MASK;
2058                else if (speed == 100)
2059                        override |= ENETSW_IMPOV_100_MASK;
2060                if (duplex)
2061                        override |= ENETSW_IMPOV_FDX_MASK;
2062
2063                enetsw_writeb(priv, override, ENETSW_PORTOV_REG(i));
2064                enetsw_writeb(priv, 0, ENETSW_PTCTRL_REG(i));
2065        }
2066
2067        priv->swphy_poll.expires = jiffies + HZ;
2068        add_timer(&priv->swphy_poll);
2069}
2070
2071/*
2072 * open callback, allocate dma rings & buffers and start rx operation
2073 */
2074static int bcm_enetsw_open(struct net_device *dev)
2075{
2076        struct bcm_enet_priv *priv;
2077        struct device *kdev;
2078        int i, ret;
2079        unsigned int size;
2080        void *p;
2081        u32 val;
2082
2083        priv = netdev_priv(dev);
2084        kdev = &priv->pdev->dev;
2085
2086        /* mask all interrupts and request them */
2087        enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
2088        enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
2089
2090        ret = request_irq(priv->irq_rx, bcm_enet_isr_dma,
2091                          0, dev->name, dev);
2092        if (ret)
2093                goto out_freeirq;
2094
2095        if (priv->irq_tx != -1) {
2096                ret = request_irq(priv->irq_tx, bcm_enet_isr_dma,
2097                                  0, dev->name, dev);
2098                if (ret)
2099                        goto out_freeirq_rx;
2100        }
2101
2102        /* allocate rx dma ring */
2103        size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
2104        p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
2105        if (!p) {
2106                dev_err(kdev, "cannot allocate rx ring %u\n", size);
2107                ret = -ENOMEM;
2108                goto out_freeirq_tx;
2109        }
2110
2111        priv->rx_desc_alloc_size = size;
2112        priv->rx_desc_cpu = p;
2113
2114        /* allocate tx dma ring */
2115        size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
2116        p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
2117        if (!p) {
2118                dev_err(kdev, "cannot allocate tx ring\n");
2119                ret = -ENOMEM;
2120                goto out_free_rx_ring;
2121        }
2122
2123        priv->tx_desc_alloc_size = size;
2124        priv->tx_desc_cpu = p;
2125
2126        priv->tx_skb = kcalloc(priv->tx_ring_size, sizeof(struct sk_buff *),
2127                               GFP_KERNEL);
2128        if (!priv->tx_skb) {
2129                dev_err(kdev, "cannot allocate rx skb queue\n");
2130                ret = -ENOMEM;
2131                goto out_free_tx_ring;
2132        }
2133
2134        priv->tx_desc_count = priv->tx_ring_size;
2135        priv->tx_dirty_desc = 0;
2136        priv->tx_curr_desc = 0;
2137        spin_lock_init(&priv->tx_lock);
2138
2139        /* init & fill rx ring with skbs */
2140        priv->rx_skb = kcalloc(priv->rx_ring_size, sizeof(struct sk_buff *),
2141                               GFP_KERNEL);
2142        if (!priv->rx_skb) {
2143                dev_err(kdev, "cannot allocate rx skb queue\n");
2144                ret = -ENOMEM;
2145                goto out_free_tx_skb;
2146        }
2147
2148        priv->rx_desc_count = 0;
2149        priv->rx_dirty_desc = 0;
2150        priv->rx_curr_desc = 0;
2151
2152        /* disable all ports */
2153        for (i = 0; i < priv->num_ports; i++) {
2154                enetsw_writeb(priv, ENETSW_PORTOV_ENABLE_MASK,
2155                              ENETSW_PORTOV_REG(i));
2156                enetsw_writeb(priv, ENETSW_PTCTRL_RXDIS_MASK |
2157                              ENETSW_PTCTRL_TXDIS_MASK,
2158                              ENETSW_PTCTRL_REG(i));
2159
2160                priv->sw_port_link[i] = 0;
2161        }
2162
2163        /* reset mib */
2164        val = enetsw_readb(priv, ENETSW_GMCR_REG);
2165        val |= ENETSW_GMCR_RST_MIB_MASK;
2166        enetsw_writeb(priv, val, ENETSW_GMCR_REG);
2167        mdelay(1);
2168        val &= ~ENETSW_GMCR_RST_MIB_MASK;
2169        enetsw_writeb(priv, val, ENETSW_GMCR_REG);
2170        mdelay(1);
2171
2172        /* force CPU port state */
2173        val = enetsw_readb(priv, ENETSW_IMPOV_REG);
2174        val |= ENETSW_IMPOV_FORCE_MASK | ENETSW_IMPOV_LINKUP_MASK;
2175        enetsw_writeb(priv, val, ENETSW_IMPOV_REG);
2176
2177        /* enable switch forward engine */
2178        val = enetsw_readb(priv, ENETSW_SWMODE_REG);
2179        val |= ENETSW_SWMODE_FWD_EN_MASK;
2180        enetsw_writeb(priv, val, ENETSW_SWMODE_REG);
2181
2182        /* enable jumbo on all ports */
2183        enetsw_writel(priv, 0x1ff, ENETSW_JMBCTL_PORT_REG);
2184        enetsw_writew(priv, 9728, ENETSW_JMBCTL_MAXSIZE_REG);
2185
2186        /* initialize flow control buffer allocation */
2187        enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
2188                        ENETDMA_BUFALLOC_REG(priv->rx_chan));
2189
2190        if (bcm_enet_refill_rx(dev)) {
2191                dev_err(kdev, "cannot allocate rx skb queue\n");
2192                ret = -ENOMEM;
2193                goto out;
2194        }
2195
2196        /* write rx & tx ring addresses */
2197        enet_dmas_writel(priv, priv->rx_desc_dma,
2198                         ENETDMAS_RSTART_REG, priv->rx_chan);
2199        enet_dmas_writel(priv, priv->tx_desc_dma,
2200                         ENETDMAS_RSTART_REG, priv->tx_chan);
2201
2202        /* clear remaining state ram for rx & tx channel */
2203        enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->rx_chan);
2204        enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->tx_chan);
2205        enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->rx_chan);
2206        enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->tx_chan);
2207        enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->rx_chan);
2208        enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->tx_chan);
2209
2210        /* set dma maximum burst len */
2211        enet_dmac_writel(priv, priv->dma_maxburst,
2212                         ENETDMAC_MAXBURST, priv->rx_chan);
2213        enet_dmac_writel(priv, priv->dma_maxburst,
2214                         ENETDMAC_MAXBURST, priv->tx_chan);
2215
2216        /* set flow control low/high threshold to 1/3 / 2/3 */
2217        val = priv->rx_ring_size / 3;
2218        enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan));
2219        val = (priv->rx_ring_size * 2) / 3;
2220        enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan));
2221
2222        /* all set, enable mac and interrupts, start dma engine and
2223         * kick rx dma channel
2224         */
2225        wmb();
2226        enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
2227        enet_dmac_writel(priv, ENETDMAC_CHANCFG_EN_MASK,
2228                         ENETDMAC_CHANCFG, priv->rx_chan);
2229
2230        /* watch "packet transferred" interrupt in rx and tx */
2231        enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
2232                         ENETDMAC_IR, priv->rx_chan);
2233        enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
2234                         ENETDMAC_IR, priv->tx_chan);
2235
2236        /* make sure we enable napi before rx interrupt  */
2237        napi_enable(&priv->napi);
2238
2239        enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
2240                         ENETDMAC_IRMASK, priv->rx_chan);
2241        enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
2242                         ENETDMAC_IRMASK, priv->tx_chan);
2243
2244        netif_carrier_on(dev);
2245        netif_start_queue(dev);
2246
2247        /* apply override config for bypass_link ports here. */
2248        for (i = 0; i < priv->num_ports; i++) {
2249                struct bcm63xx_enetsw_port *port;
2250                u8 override;
2251                port = &priv->used_ports[i];
2252                if (!port->used)
2253                        continue;
2254
2255                if (!port->bypass_link)
2256                        continue;
2257
2258                override = ENETSW_PORTOV_ENABLE_MASK |
2259                        ENETSW_PORTOV_LINKUP_MASK;
2260
2261                switch (port->force_speed) {
2262                case 1000:
2263                        override |= ENETSW_IMPOV_1000_MASK;
2264                        break;
2265                case 100:
2266                        override |= ENETSW_IMPOV_100_MASK;
2267                        break;
2268                case 10:
2269                        break;
2270                default:
2271                        pr_warn("invalid forced speed on port %s: assume 10\n",
2272                               port->name);
2273                        break;
2274                }
2275
2276                if (port->force_duplex_full)
2277                        override |= ENETSW_IMPOV_FDX_MASK;
2278
2279
2280                enetsw_writeb(priv, override, ENETSW_PORTOV_REG(i));
2281                enetsw_writeb(priv, 0, ENETSW_PTCTRL_REG(i));
2282        }
2283
2284        /* start phy polling timer */
2285        timer_setup(&priv->swphy_poll, swphy_poll_timer, 0);
2286        mod_timer(&priv->swphy_poll, jiffies);
2287        return 0;
2288
2289out:
2290        for (i = 0; i < priv->rx_ring_size; i++) {
2291                struct bcm_enet_desc *desc;
2292
2293                if (!priv->rx_skb[i])
2294                        continue;
2295
2296                desc = &priv->rx_desc_cpu[i];
2297                dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
2298                                 DMA_FROM_DEVICE);
2299                kfree_skb(priv->rx_skb[i]);
2300        }
2301        kfree(priv->rx_skb);
2302
2303out_free_tx_skb:
2304        kfree(priv->tx_skb);
2305
2306out_free_tx_ring:
2307        dma_free_coherent(kdev, priv->tx_desc_alloc_size,
2308                          priv->tx_desc_cpu, priv->tx_desc_dma);
2309
2310out_free_rx_ring:
2311        dma_free_coherent(kdev, priv->rx_desc_alloc_size,
2312                          priv->rx_desc_cpu, priv->rx_desc_dma);
2313
2314out_freeirq_tx:
2315        if (priv->irq_tx != -1)
2316                free_irq(priv->irq_tx, dev);
2317
2318out_freeirq_rx:
2319        free_irq(priv->irq_rx, dev);
2320
2321out_freeirq:
2322        return ret;
2323}
2324
2325/* stop callback */
2326static int bcm_enetsw_stop(struct net_device *dev)
2327{
2328        struct bcm_enet_priv *priv;
2329        struct device *kdev;
2330        int i;
2331
2332        priv = netdev_priv(dev);
2333        kdev = &priv->pdev->dev;
2334
2335        del_timer_sync(&priv->swphy_poll);
2336        netif_stop_queue(dev);
2337        napi_disable(&priv->napi);
2338        del_timer_sync(&priv->rx_timeout);
2339
2340        /* mask all interrupts */
2341        enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
2342        enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
2343
2344        /* disable dma & mac */
2345        bcm_enet_disable_dma(priv, priv->tx_chan);
2346        bcm_enet_disable_dma(priv, priv->rx_chan);
2347
2348        /* force reclaim of all tx buffers */
2349        bcm_enet_tx_reclaim(dev, 1);
2350
2351        /* free the rx skb ring */
2352        for (i = 0; i < priv->rx_ring_size; i++) {
2353                struct bcm_enet_desc *desc;
2354
2355                if (!priv->rx_skb[i])
2356                        continue;
2357
2358                desc = &priv->rx_desc_cpu[i];
2359                dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
2360                                 DMA_FROM_DEVICE);
2361                kfree_skb(priv->rx_skb[i]);
2362        }
2363
2364        /* free remaining allocated memory */
2365        kfree(priv->rx_skb);
2366        kfree(priv->tx_skb);
2367        dma_free_coherent(kdev, priv->rx_desc_alloc_size,
2368                          priv->rx_desc_cpu, priv->rx_desc_dma);
2369        dma_free_coherent(kdev, priv->tx_desc_alloc_size,
2370                          priv->tx_desc_cpu, priv->tx_desc_dma);
2371        if (priv->irq_tx != -1)
2372                free_irq(priv->irq_tx, dev);
2373        free_irq(priv->irq_rx, dev);
2374
2375        return 0;
2376}
2377
2378/* try to sort out phy external status by walking the used_port field
2379 * in the bcm_enet_priv structure. in case the phy address is not
2380 * assigned to any physical port on the switch, assume it is external
2381 * (and yell at the user).
2382 */
2383static int bcm_enetsw_phy_is_external(struct bcm_enet_priv *priv, int phy_id)
2384{
2385        int i;
2386
2387        for (i = 0; i < priv->num_ports; ++i) {
2388                if (!priv->used_ports[i].used)
2389                        continue;
2390                if (priv->used_ports[i].phy_id == phy_id)
2391                        return bcm_enet_port_is_rgmii(i);
2392        }
2393
2394        printk_once(KERN_WARNING  "bcm63xx_enet: could not find a used port with phy_id %i, assuming phy is external\n",
2395                    phy_id);
2396        return 1;
2397}
2398
2399/* can't use bcmenet_sw_mdio_read directly as we need to sort out
2400 * external/internal status of the given phy_id first.
2401 */
2402static int bcm_enetsw_mii_mdio_read(struct net_device *dev, int phy_id,
2403                                    int location)
2404{
2405        struct bcm_enet_priv *priv;
2406
2407        priv = netdev_priv(dev);
2408        return bcmenet_sw_mdio_read(priv,
2409                                    bcm_enetsw_phy_is_external(priv, phy_id),
2410                                    phy_id, location);
2411}
2412
2413/* can't use bcmenet_sw_mdio_write directly as we need to sort out
2414 * external/internal status of the given phy_id first.
2415 */
2416static void bcm_enetsw_mii_mdio_write(struct net_device *dev, int phy_id,
2417                                      int location,
2418                                      int val)
2419{
2420        struct bcm_enet_priv *priv;
2421
2422        priv = netdev_priv(dev);
2423        bcmenet_sw_mdio_write(priv, bcm_enetsw_phy_is_external(priv, phy_id),
2424                              phy_id, location, val);
2425}
2426
2427static int bcm_enetsw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2428{
2429        struct mii_if_info mii;
2430
2431        mii.dev = dev;
2432        mii.mdio_read = bcm_enetsw_mii_mdio_read;
2433        mii.mdio_write = bcm_enetsw_mii_mdio_write;
2434        mii.phy_id = 0;
2435        mii.phy_id_mask = 0x3f;
2436        mii.reg_num_mask = 0x1f;
2437        return generic_mii_ioctl(&mii, if_mii(rq), cmd, NULL);
2438
2439}
2440
2441static const struct net_device_ops bcm_enetsw_ops = {
2442        .ndo_open               = bcm_enetsw_open,
2443        .ndo_stop               = bcm_enetsw_stop,
2444        .ndo_start_xmit         = bcm_enet_start_xmit,
2445        .ndo_change_mtu         = bcm_enet_change_mtu,
2446        .ndo_do_ioctl           = bcm_enetsw_ioctl,
2447};
2448
2449
2450static const struct bcm_enet_stats bcm_enetsw_gstrings_stats[] = {
2451        { "rx_packets", DEV_STAT(rx_packets), -1 },
2452        { "tx_packets", DEV_STAT(tx_packets), -1 },
2453        { "rx_bytes", DEV_STAT(rx_bytes), -1 },
2454        { "tx_bytes", DEV_STAT(tx_bytes), -1 },
2455        { "rx_errors", DEV_STAT(rx_errors), -1 },
2456        { "tx_errors", DEV_STAT(tx_errors), -1 },
2457        { "rx_dropped", DEV_STAT(rx_dropped), -1 },
2458        { "tx_dropped", DEV_STAT(tx_dropped), -1 },
2459
2460        { "tx_good_octets", GEN_STAT(mib.tx_gd_octets), ETHSW_MIB_RX_GD_OCT },
2461        { "tx_unicast", GEN_STAT(mib.tx_unicast), ETHSW_MIB_RX_BRDCAST },
2462        { "tx_broadcast", GEN_STAT(mib.tx_brdcast), ETHSW_MIB_RX_BRDCAST },
2463        { "tx_multicast", GEN_STAT(mib.tx_mult), ETHSW_MIB_RX_MULT },
2464        { "tx_64_octets", GEN_STAT(mib.tx_64), ETHSW_MIB_RX_64 },
2465        { "tx_65_127_oct", GEN_STAT(mib.tx_65_127), ETHSW_MIB_RX_65_127 },
2466        { "tx_128_255_oct", GEN_STAT(mib.tx_128_255), ETHSW_MIB_RX_128_255 },
2467        { "tx_256_511_oct", GEN_STAT(mib.tx_256_511), ETHSW_MIB_RX_256_511 },
2468        { "tx_512_1023_oct", GEN_STAT(mib.tx_512_1023), ETHSW_MIB_RX_512_1023},
2469        { "tx_1024_1522_oct", GEN_STAT(mib.tx_1024_max),
2470          ETHSW_MIB_RX_1024_1522 },
2471        { "tx_1523_2047_oct", GEN_STAT(mib.tx_1523_2047),
2472          ETHSW_MIB_RX_1523_2047 },
2473        { "tx_2048_4095_oct", GEN_STAT(mib.tx_2048_4095),
2474          ETHSW_MIB_RX_2048_4095 },
2475        { "tx_4096_8191_oct", GEN_STAT(mib.tx_4096_8191),
2476          ETHSW_MIB_RX_4096_8191 },
2477        { "tx_8192_9728_oct", GEN_STAT(mib.tx_8192_9728),
2478          ETHSW_MIB_RX_8192_9728 },
2479        { "tx_oversize", GEN_STAT(mib.tx_ovr), ETHSW_MIB_RX_OVR },
2480        { "tx_oversize_drop", GEN_STAT(mib.tx_ovr), ETHSW_MIB_RX_OVR_DISC },
2481        { "tx_dropped", GEN_STAT(mib.tx_drop), ETHSW_MIB_RX_DROP },
2482        { "tx_undersize", GEN_STAT(mib.tx_underrun), ETHSW_MIB_RX_UND },
2483        { "tx_pause", GEN_STAT(mib.tx_pause), ETHSW_MIB_RX_PAUSE },
2484
2485        { "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETHSW_MIB_TX_ALL_OCT },
2486        { "rx_broadcast", GEN_STAT(mib.rx_brdcast), ETHSW_MIB_TX_BRDCAST },
2487        { "rx_multicast", GEN_STAT(mib.rx_mult), ETHSW_MIB_TX_MULT },
2488        { "rx_unicast", GEN_STAT(mib.rx_unicast), ETHSW_MIB_TX_MULT },
2489        { "rx_pause", GEN_STAT(mib.rx_pause), ETHSW_MIB_TX_PAUSE },
2490        { "rx_dropped", GEN_STAT(mib.rx_drop), ETHSW_MIB_TX_DROP_PKTS },
2491
2492};
2493
2494#define BCM_ENETSW_STATS_LEN    \
2495        (sizeof(bcm_enetsw_gstrings_stats) / sizeof(struct bcm_enet_stats))
2496
2497static void bcm_enetsw_get_strings(struct net_device *netdev,
2498                                   u32 stringset, u8 *data)
2499{
2500        int i;
2501
2502        switch (stringset) {
2503        case ETH_SS_STATS:
2504                for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) {
2505                        memcpy(data + i * ETH_GSTRING_LEN,
2506                               bcm_enetsw_gstrings_stats[i].stat_string,
2507                               ETH_GSTRING_LEN);
2508                }
2509                break;
2510        }
2511}
2512
2513static int bcm_enetsw_get_sset_count(struct net_device *netdev,
2514                                     int string_set)
2515{
2516        switch (string_set) {
2517        case ETH_SS_STATS:
2518                return BCM_ENETSW_STATS_LEN;
2519        default:
2520                return -EINVAL;
2521        }
2522}
2523
2524static void bcm_enetsw_get_drvinfo(struct net_device *netdev,
2525                                   struct ethtool_drvinfo *drvinfo)
2526{
2527        strncpy(drvinfo->driver, bcm_enet_driver_name, sizeof(drvinfo->driver));
2528        strncpy(drvinfo->bus_info, "bcm63xx", sizeof(drvinfo->bus_info));
2529}
2530
2531static void bcm_enetsw_get_ethtool_stats(struct net_device *netdev,
2532                                         struct ethtool_stats *stats,
2533                                         u64 *data)
2534{
2535        struct bcm_enet_priv *priv;
2536        int i;
2537
2538        priv = netdev_priv(netdev);
2539
2540        for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) {
2541                const struct bcm_enet_stats *s;
2542                u32 lo, hi;
2543                char *p;
2544                int reg;
2545
2546                s = &bcm_enetsw_gstrings_stats[i];
2547
2548                reg = s->mib_reg;
2549                if (reg == -1)
2550                        continue;
2551
2552                lo = enetsw_readl(priv, ENETSW_MIB_REG(reg));
2553                p = (char *)priv + s->stat_offset;
2554
2555                if (s->sizeof_stat == sizeof(u64)) {
2556                        hi = enetsw_readl(priv, ENETSW_MIB_REG(reg + 1));
2557                        *(u64 *)p = ((u64)hi << 32 | lo);
2558                } else {
2559                        *(u32 *)p = lo;
2560                }
2561        }
2562
2563        for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) {
2564                const struct bcm_enet_stats *s;
2565                char *p;
2566
2567                s = &bcm_enetsw_gstrings_stats[i];
2568
2569                if (s->mib_reg == -1)
2570                        p = (char *)&netdev->stats + s->stat_offset;
2571                else
2572                        p = (char *)priv + s->stat_offset;
2573
2574                data[i] = (s->sizeof_stat == sizeof(u64)) ?
2575                        *(u64 *)p : *(u32 *)p;
2576        }
2577}
2578
2579static void bcm_enetsw_get_ringparam(struct net_device *dev,
2580                                     struct ethtool_ringparam *ering)
2581{
2582        struct bcm_enet_priv *priv;
2583
2584        priv = netdev_priv(dev);
2585
2586        /* rx/tx ring is actually only limited by memory */
2587        ering->rx_max_pending = 8192;
2588        ering->tx_max_pending = 8192;
2589        ering->rx_mini_max_pending = 0;
2590        ering->rx_jumbo_max_pending = 0;
2591        ering->rx_pending = priv->rx_ring_size;
2592        ering->tx_pending = priv->tx_ring_size;
2593}
2594
2595static int bcm_enetsw_set_ringparam(struct net_device *dev,
2596                                    struct ethtool_ringparam *ering)
2597{
2598        struct bcm_enet_priv *priv;
2599        int was_running;
2600
2601        priv = netdev_priv(dev);
2602
2603        was_running = 0;
2604        if (netif_running(dev)) {
2605                bcm_enetsw_stop(dev);
2606                was_running = 1;
2607        }
2608
2609        priv->rx_ring_size = ering->rx_pending;
2610        priv->tx_ring_size = ering->tx_pending;
2611
2612        if (was_running) {
2613                int err;
2614
2615                err = bcm_enetsw_open(dev);
2616                if (err)
2617                        dev_close(dev);
2618        }
2619        return 0;
2620}
2621
2622static const struct ethtool_ops bcm_enetsw_ethtool_ops = {
2623        .get_strings            = bcm_enetsw_get_strings,
2624        .get_sset_count         = bcm_enetsw_get_sset_count,
2625        .get_ethtool_stats      = bcm_enetsw_get_ethtool_stats,
2626        .get_drvinfo            = bcm_enetsw_get_drvinfo,
2627        .get_ringparam          = bcm_enetsw_get_ringparam,
2628        .set_ringparam          = bcm_enetsw_set_ringparam,
2629};
2630
2631/* allocate netdevice, request register memory and register device. */
2632static int bcm_enetsw_probe(struct platform_device *pdev)
2633{
2634        struct bcm_enet_priv *priv;
2635        struct net_device *dev;
2636        struct bcm63xx_enetsw_platform_data *pd;
2637        struct resource *res_mem;
2638        int ret, irq_rx, irq_tx;
2639
2640        if (!bcm_enet_shared_base[0])
2641                return -EPROBE_DEFER;
2642
2643        res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2644        irq_rx = platform_get_irq(pdev, 0);
2645        irq_tx = platform_get_irq(pdev, 1);
2646        if (!res_mem || irq_rx < 0)
2647                return -ENODEV;
2648
2649        ret = 0;
2650        dev = alloc_etherdev(sizeof(*priv));
2651        if (!dev)
2652                return -ENOMEM;
2653        priv = netdev_priv(dev);
2654
2655        /* initialize default and fetch platform data */
2656        priv->enet_is_sw = true;
2657        priv->irq_rx = irq_rx;
2658        priv->irq_tx = irq_tx;
2659        priv->rx_ring_size = BCMENET_DEF_RX_DESC;
2660        priv->tx_ring_size = BCMENET_DEF_TX_DESC;
2661        priv->dma_maxburst = BCMENETSW_DMA_MAXBURST;
2662
2663        pd = dev_get_platdata(&pdev->dev);
2664        if (pd) {
2665                memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
2666                memcpy(priv->used_ports, pd->used_ports,
2667                       sizeof(pd->used_ports));
2668                priv->num_ports = pd->num_ports;
2669                priv->dma_has_sram = pd->dma_has_sram;
2670                priv->dma_chan_en_mask = pd->dma_chan_en_mask;
2671                priv->dma_chan_int_mask = pd->dma_chan_int_mask;
2672                priv->dma_chan_width = pd->dma_chan_width;
2673        }
2674
2675        ret = bcm_enet_change_mtu(dev, dev->mtu);
2676        if (ret)
2677                goto out;
2678
2679        priv->base = devm_ioremap_resource(&pdev->dev, res_mem);
2680        if (IS_ERR(priv->base)) {
2681                ret = PTR_ERR(priv->base);
2682                goto out;
2683        }
2684
2685        priv->mac_clk = devm_clk_get(&pdev->dev, "enetsw");
2686        if (IS_ERR(priv->mac_clk)) {
2687                ret = PTR_ERR(priv->mac_clk);
2688                goto out;
2689        }
2690        ret = clk_prepare_enable(priv->mac_clk);
2691        if (ret)
2692                goto out;
2693
2694        priv->rx_chan = 0;
2695        priv->tx_chan = 1;
2696        spin_lock_init(&priv->rx_lock);
2697
2698        /* init rx timeout (used for oom) */
2699        timer_setup(&priv->rx_timeout, bcm_enet_refill_rx_timer, 0);
2700
2701        /* register netdevice */
2702        dev->netdev_ops = &bcm_enetsw_ops;
2703        netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16);
2704        dev->ethtool_ops = &bcm_enetsw_ethtool_ops;
2705        SET_NETDEV_DEV(dev, &pdev->dev);
2706
2707        spin_lock_init(&priv->enetsw_mdio_lock);
2708
2709        ret = register_netdev(dev);
2710        if (ret)
2711                goto out_disable_clk;
2712
2713        netif_carrier_off(dev);
2714        platform_set_drvdata(pdev, dev);
2715        priv->pdev = pdev;
2716        priv->net_dev = dev;
2717
2718        return 0;
2719
2720out_disable_clk:
2721        clk_disable_unprepare(priv->mac_clk);
2722out:
2723        free_netdev(dev);
2724        return ret;
2725}
2726
2727
2728/* exit func, stops hardware and unregisters netdevice */
2729static int bcm_enetsw_remove(struct platform_device *pdev)
2730{
2731        struct bcm_enet_priv *priv;
2732        struct net_device *dev;
2733
2734        /* stop netdevice */
2735        dev = platform_get_drvdata(pdev);
2736        priv = netdev_priv(dev);
2737        unregister_netdev(dev);
2738
2739        clk_disable_unprepare(priv->mac_clk);
2740
2741        free_netdev(dev);
2742        return 0;
2743}
2744
2745struct platform_driver bcm63xx_enetsw_driver = {
2746        .probe  = bcm_enetsw_probe,
2747        .remove = bcm_enetsw_remove,
2748        .driver = {
2749                .name   = "bcm63xx_enetsw",
2750                .owner  = THIS_MODULE,
2751        },
2752};
2753
2754/* reserve & remap memory space shared between all macs */
2755static int bcm_enet_shared_probe(struct platform_device *pdev)
2756{
2757        void __iomem *p[3];
2758        unsigned int i;
2759
2760        memset(bcm_enet_shared_base, 0, sizeof(bcm_enet_shared_base));
2761
2762        for (i = 0; i < 3; i++) {
2763                p[i] = devm_platform_ioremap_resource(pdev, i);
2764                if (IS_ERR(p[i]))
2765                        return PTR_ERR(p[i]);
2766        }
2767
2768        memcpy(bcm_enet_shared_base, p, sizeof(bcm_enet_shared_base));
2769
2770        return 0;
2771}
2772
2773static int bcm_enet_shared_remove(struct platform_device *pdev)
2774{
2775        return 0;
2776}
2777
2778/* this "shared" driver is needed because both macs share a single
2779 * address space
2780 */
2781struct platform_driver bcm63xx_enet_shared_driver = {
2782        .probe  = bcm_enet_shared_probe,
2783        .remove = bcm_enet_shared_remove,
2784        .driver = {
2785                .name   = "bcm63xx_enet_shared",
2786                .owner  = THIS_MODULE,
2787        },
2788};
2789
2790static struct platform_driver * const drivers[] = {
2791        &bcm63xx_enet_shared_driver,
2792        &bcm63xx_enet_driver,
2793        &bcm63xx_enetsw_driver,
2794};
2795
2796/* entry point */
2797static int __init bcm_enet_init(void)
2798{
2799        return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
2800}
2801
2802static void __exit bcm_enet_exit(void)
2803{
2804        platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
2805}
2806
2807
2808module_init(bcm_enet_init);
2809module_exit(bcm_enet_exit);
2810
2811MODULE_DESCRIPTION("BCM63xx internal ethernet mac driver");
2812MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>");
2813MODULE_LICENSE("GPL");
2814