linux/drivers/net/ethernet/broadcom/bcm63xx_enet.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Driver for BCM963xx builtin Ethernet mac
   4 *
   5 * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
   6 */
   7#include <linux/init.h>
   8#include <linux/interrupt.h>
   9#include <linux/module.h>
  10#include <linux/clk.h>
  11#include <linux/etherdevice.h>
  12#include <linux/slab.h>
  13#include <linux/delay.h>
  14#include <linux/ethtool.h>
  15#include <linux/crc32.h>
  16#include <linux/err.h>
  17#include <linux/dma-mapping.h>
  18#include <linux/platform_device.h>
  19#include <linux/if_vlan.h>
  20
  21#include <bcm63xx_dev_enet.h>
  22#include "bcm63xx_enet.h"
  23
  24static char bcm_enet_driver_name[] = "bcm63xx_enet";
  25
  26static int copybreak __read_mostly = 128;
  27module_param(copybreak, int, 0);
  28MODULE_PARM_DESC(copybreak, "Receive copy threshold");
  29
  30/* io registers memory shared between all devices */
  31static void __iomem *bcm_enet_shared_base[3];
  32
  33/*
  34 * io helpers to access mac registers
  35 */
  36static inline u32 enet_readl(struct bcm_enet_priv *priv, u32 off)
  37{
  38        return bcm_readl(priv->base + off);
  39}
  40
  41static inline void enet_writel(struct bcm_enet_priv *priv,
  42                               u32 val, u32 off)
  43{
  44        bcm_writel(val, priv->base + off);
  45}
  46
  47/*
  48 * io helpers to access switch registers
  49 */
  50static inline u32 enetsw_readl(struct bcm_enet_priv *priv, u32 off)
  51{
  52        return bcm_readl(priv->base + off);
  53}
  54
  55static inline void enetsw_writel(struct bcm_enet_priv *priv,
  56                                 u32 val, u32 off)
  57{
  58        bcm_writel(val, priv->base + off);
  59}
  60
  61static inline u16 enetsw_readw(struct bcm_enet_priv *priv, u32 off)
  62{
  63        return bcm_readw(priv->base + off);
  64}
  65
  66static inline void enetsw_writew(struct bcm_enet_priv *priv,
  67                                 u16 val, u32 off)
  68{
  69        bcm_writew(val, priv->base + off);
  70}
  71
  72static inline u8 enetsw_readb(struct bcm_enet_priv *priv, u32 off)
  73{
  74        return bcm_readb(priv->base + off);
  75}
  76
  77static inline void enetsw_writeb(struct bcm_enet_priv *priv,
  78                                 u8 val, u32 off)
  79{
  80        bcm_writeb(val, priv->base + off);
  81}
  82
  83
  84/* io helpers to access shared registers */
  85static inline u32 enet_dma_readl(struct bcm_enet_priv *priv, u32 off)
  86{
  87        return bcm_readl(bcm_enet_shared_base[0] + off);
  88}
  89
  90static inline void enet_dma_writel(struct bcm_enet_priv *priv,
  91                                       u32 val, u32 off)
  92{
  93        bcm_writel(val, bcm_enet_shared_base[0] + off);
  94}
  95
  96static inline u32 enet_dmac_readl(struct bcm_enet_priv *priv, u32 off, int chan)
  97{
  98        return bcm_readl(bcm_enet_shared_base[1] +
  99                bcm63xx_enetdmacreg(off) + chan * priv->dma_chan_width);
 100}
 101
 102static inline void enet_dmac_writel(struct bcm_enet_priv *priv,
 103                                       u32 val, u32 off, int chan)
 104{
 105        bcm_writel(val, bcm_enet_shared_base[1] +
 106                bcm63xx_enetdmacreg(off) + chan * priv->dma_chan_width);
 107}
 108
 109static inline u32 enet_dmas_readl(struct bcm_enet_priv *priv, u32 off, int chan)
 110{
 111        return bcm_readl(bcm_enet_shared_base[2] + off + chan * priv->dma_chan_width);
 112}
 113
 114static inline void enet_dmas_writel(struct bcm_enet_priv *priv,
 115                                       u32 val, u32 off, int chan)
 116{
 117        bcm_writel(val, bcm_enet_shared_base[2] + off + chan * priv->dma_chan_width);
 118}
 119
 120/*
 121 * write given data into mii register and wait for transfer to end
 122 * with timeout (average measured transfer time is 25us)
 123 */
 124static int do_mdio_op(struct bcm_enet_priv *priv, unsigned int data)
 125{
 126        int limit;
 127
 128        /* make sure mii interrupt status is cleared */
 129        enet_writel(priv, ENET_IR_MII, ENET_IR_REG);
 130
 131        enet_writel(priv, data, ENET_MIIDATA_REG);
 132        wmb();
 133
 134        /* busy wait on mii interrupt bit, with timeout */
 135        limit = 1000;
 136        do {
 137                if (enet_readl(priv, ENET_IR_REG) & ENET_IR_MII)
 138                        break;
 139                udelay(1);
 140        } while (limit-- > 0);
 141
 142        return (limit < 0) ? 1 : 0;
 143}
 144
 145/*
 146 * MII internal read callback
 147 */
 148static int bcm_enet_mdio_read(struct bcm_enet_priv *priv, int mii_id,
 149                              int regnum)
 150{
 151        u32 tmp, val;
 152
 153        tmp = regnum << ENET_MIIDATA_REG_SHIFT;
 154        tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT;
 155        tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT;
 156        tmp |= ENET_MIIDATA_OP_READ_MASK;
 157
 158        if (do_mdio_op(priv, tmp))
 159                return -1;
 160
 161        val = enet_readl(priv, ENET_MIIDATA_REG);
 162        val &= 0xffff;
 163        return val;
 164}
 165
 166/*
 167 * MII internal write callback
 168 */
 169static int bcm_enet_mdio_write(struct bcm_enet_priv *priv, int mii_id,
 170                               int regnum, u16 value)
 171{
 172        u32 tmp;
 173
 174        tmp = (value & 0xffff) << ENET_MIIDATA_DATA_SHIFT;
 175        tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT;
 176        tmp |= regnum << ENET_MIIDATA_REG_SHIFT;
 177        tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT;
 178        tmp |= ENET_MIIDATA_OP_WRITE_MASK;
 179
 180        (void)do_mdio_op(priv, tmp);
 181        return 0;
 182}
 183
 184/*
 185 * MII read callback from phylib
 186 */
 187static int bcm_enet_mdio_read_phylib(struct mii_bus *bus, int mii_id,
 188                                     int regnum)
 189{
 190        return bcm_enet_mdio_read(bus->priv, mii_id, regnum);
 191}
 192
 193/*
 194 * MII write callback from phylib
 195 */
 196static int bcm_enet_mdio_write_phylib(struct mii_bus *bus, int mii_id,
 197                                      int regnum, u16 value)
 198{
 199        return bcm_enet_mdio_write(bus->priv, mii_id, regnum, value);
 200}
 201
 202/*
 203 * MII read callback from mii core
 204 */
 205static int bcm_enet_mdio_read_mii(struct net_device *dev, int mii_id,
 206                                  int regnum)
 207{
 208        return bcm_enet_mdio_read(netdev_priv(dev), mii_id, regnum);
 209}
 210
 211/*
 212 * MII write callback from mii core
 213 */
 214static void bcm_enet_mdio_write_mii(struct net_device *dev, int mii_id,
 215                                    int regnum, int value)
 216{
 217        bcm_enet_mdio_write(netdev_priv(dev), mii_id, regnum, value);
 218}
 219
 220/*
 221 * refill rx queue
 222 */
 223static int bcm_enet_refill_rx(struct net_device *dev, bool napi_mode)
 224{
 225        struct bcm_enet_priv *priv;
 226
 227        priv = netdev_priv(dev);
 228
 229        while (priv->rx_desc_count < priv->rx_ring_size) {
 230                struct bcm_enet_desc *desc;
 231                int desc_idx;
 232                u32 len_stat;
 233
 234                desc_idx = priv->rx_dirty_desc;
 235                desc = &priv->rx_desc_cpu[desc_idx];
 236
 237                if (!priv->rx_buf[desc_idx]) {
 238                        void *buf;
 239
 240                        if (likely(napi_mode))
 241                                buf = napi_alloc_frag(priv->rx_frag_size);
 242                        else
 243                                buf = netdev_alloc_frag(priv->rx_frag_size);
 244                        if (unlikely(!buf))
 245                                break;
 246                        priv->rx_buf[desc_idx] = buf;
 247                        desc->address = dma_map_single(&priv->pdev->dev,
 248                                                       buf + priv->rx_buf_offset,
 249                                                       priv->rx_buf_size,
 250                                                       DMA_FROM_DEVICE);
 251                }
 252
 253                len_stat = priv->rx_buf_size << DMADESC_LENGTH_SHIFT;
 254                len_stat |= DMADESC_OWNER_MASK;
 255                if (priv->rx_dirty_desc == priv->rx_ring_size - 1) {
 256                        len_stat |= (DMADESC_WRAP_MASK >> priv->dma_desc_shift);
 257                        priv->rx_dirty_desc = 0;
 258                } else {
 259                        priv->rx_dirty_desc++;
 260                }
 261                wmb();
 262                desc->len_stat = len_stat;
 263
 264                priv->rx_desc_count++;
 265
 266                /* tell dma engine we allocated one buffer */
 267                if (priv->dma_has_sram)
 268                        enet_dma_writel(priv, 1, ENETDMA_BUFALLOC_REG(priv->rx_chan));
 269                else
 270                        enet_dmac_writel(priv, 1, ENETDMAC_BUFALLOC, priv->rx_chan);
 271        }
 272
 273        /* If rx ring is still empty, set a timer to try allocating
 274         * again at a later time. */
 275        if (priv->rx_desc_count == 0 && netif_running(dev)) {
 276                dev_warn(&priv->pdev->dev, "unable to refill rx ring\n");
 277                priv->rx_timeout.expires = jiffies + HZ;
 278                add_timer(&priv->rx_timeout);
 279        }
 280
 281        return 0;
 282}
 283
 284/*
 285 * timer callback to defer refill rx queue in case we're OOM
 286 */
 287static void bcm_enet_refill_rx_timer(struct timer_list *t)
 288{
 289        struct bcm_enet_priv *priv = from_timer(priv, t, rx_timeout);
 290        struct net_device *dev = priv->net_dev;
 291
 292        spin_lock(&priv->rx_lock);
 293        bcm_enet_refill_rx(dev, false);
 294        spin_unlock(&priv->rx_lock);
 295}
 296
 297/*
 298 * extract packet from rx queue
 299 */
 300static int bcm_enet_receive_queue(struct net_device *dev, int budget)
 301{
 302        struct bcm_enet_priv *priv;
 303        struct list_head rx_list;
 304        struct device *kdev;
 305        int processed;
 306
 307        priv = netdev_priv(dev);
 308        INIT_LIST_HEAD(&rx_list);
 309        kdev = &priv->pdev->dev;
 310        processed = 0;
 311
 312        /* don't scan ring further than number of refilled
 313         * descriptor */
 314        if (budget > priv->rx_desc_count)
 315                budget = priv->rx_desc_count;
 316
 317        do {
 318                struct bcm_enet_desc *desc;
 319                struct sk_buff *skb;
 320                int desc_idx;
 321                u32 len_stat;
 322                unsigned int len;
 323                void *buf;
 324
 325                desc_idx = priv->rx_curr_desc;
 326                desc = &priv->rx_desc_cpu[desc_idx];
 327
 328                /* make sure we actually read the descriptor status at
 329                 * each loop */
 330                rmb();
 331
 332                len_stat = desc->len_stat;
 333
 334                /* break if dma ownership belongs to hw */
 335                if (len_stat & DMADESC_OWNER_MASK)
 336                        break;
 337
 338                processed++;
 339                priv->rx_curr_desc++;
 340                if (priv->rx_curr_desc == priv->rx_ring_size)
 341                        priv->rx_curr_desc = 0;
 342
 343                /* if the packet does not have start of packet _and_
 344                 * end of packet flag set, then just recycle it */
 345                if ((len_stat & (DMADESC_ESOP_MASK >> priv->dma_desc_shift)) !=
 346                        (DMADESC_ESOP_MASK >> priv->dma_desc_shift)) {
 347                        dev->stats.rx_dropped++;
 348                        continue;
 349                }
 350
 351                /* recycle packet if it's marked as bad */
 352                if (!priv->enet_is_sw &&
 353                    unlikely(len_stat & DMADESC_ERR_MASK)) {
 354                        dev->stats.rx_errors++;
 355
 356                        if (len_stat & DMADESC_OVSIZE_MASK)
 357                                dev->stats.rx_length_errors++;
 358                        if (len_stat & DMADESC_CRC_MASK)
 359                                dev->stats.rx_crc_errors++;
 360                        if (len_stat & DMADESC_UNDER_MASK)
 361                                dev->stats.rx_frame_errors++;
 362                        if (len_stat & DMADESC_OV_MASK)
 363                                dev->stats.rx_fifo_errors++;
 364                        continue;
 365                }
 366
 367                /* valid packet */
 368                buf = priv->rx_buf[desc_idx];
 369                len = (len_stat & DMADESC_LENGTH_MASK) >> DMADESC_LENGTH_SHIFT;
 370                /* don't include FCS */
 371                len -= 4;
 372
 373                if (len < copybreak) {
 374                        skb = napi_alloc_skb(&priv->napi, len);
 375                        if (unlikely(!skb)) {
 376                                /* forget packet, just rearm desc */
 377                                dev->stats.rx_dropped++;
 378                                continue;
 379                        }
 380
 381                        dma_sync_single_for_cpu(kdev, desc->address,
 382                                                len, DMA_FROM_DEVICE);
 383                        memcpy(skb->data, buf + priv->rx_buf_offset, len);
 384                        dma_sync_single_for_device(kdev, desc->address,
 385                                                   len, DMA_FROM_DEVICE);
 386                } else {
 387                        dma_unmap_single(kdev, desc->address,
 388                                         priv->rx_buf_size, DMA_FROM_DEVICE);
 389                        priv->rx_buf[desc_idx] = NULL;
 390
 391                        skb = build_skb(buf, priv->rx_frag_size);
 392                        if (unlikely(!skb)) {
 393                                skb_free_frag(buf);
 394                                dev->stats.rx_dropped++;
 395                                continue;
 396                        }
 397                        skb_reserve(skb, priv->rx_buf_offset);
 398                }
 399
 400                skb_put(skb, len);
 401                skb->protocol = eth_type_trans(skb, dev);
 402                dev->stats.rx_packets++;
 403                dev->stats.rx_bytes += len;
 404                list_add_tail(&skb->list, &rx_list);
 405
 406        } while (processed < budget);
 407
 408        netif_receive_skb_list(&rx_list);
 409        priv->rx_desc_count -= processed;
 410
 411        if (processed || !priv->rx_desc_count) {
 412                bcm_enet_refill_rx(dev, true);
 413
 414                /* kick rx dma */
 415                enet_dmac_writel(priv, priv->dma_chan_en_mask,
 416                                         ENETDMAC_CHANCFG, priv->rx_chan);
 417        }
 418
 419        return processed;
 420}
 421
 422
 423/*
 424 * try to or force reclaim of transmitted buffers
 425 */
 426static int bcm_enet_tx_reclaim(struct net_device *dev, int force)
 427{
 428        struct bcm_enet_priv *priv;
 429        unsigned int bytes;
 430        int released;
 431
 432        priv = netdev_priv(dev);
 433        bytes = 0;
 434        released = 0;
 435
 436        while (priv->tx_desc_count < priv->tx_ring_size) {
 437                struct bcm_enet_desc *desc;
 438                struct sk_buff *skb;
 439
 440                /* We run in a bh and fight against start_xmit, which
 441                 * is called with bh disabled  */
 442                spin_lock(&priv->tx_lock);
 443
 444                desc = &priv->tx_desc_cpu[priv->tx_dirty_desc];
 445
 446                if (!force && (desc->len_stat & DMADESC_OWNER_MASK)) {
 447                        spin_unlock(&priv->tx_lock);
 448                        break;
 449                }
 450
 451                /* ensure other field of the descriptor were not read
 452                 * before we checked ownership */
 453                rmb();
 454
 455                skb = priv->tx_skb[priv->tx_dirty_desc];
 456                priv->tx_skb[priv->tx_dirty_desc] = NULL;
 457                dma_unmap_single(&priv->pdev->dev, desc->address, skb->len,
 458                                 DMA_TO_DEVICE);
 459
 460                priv->tx_dirty_desc++;
 461                if (priv->tx_dirty_desc == priv->tx_ring_size)
 462                        priv->tx_dirty_desc = 0;
 463                priv->tx_desc_count++;
 464
 465                spin_unlock(&priv->tx_lock);
 466
 467                if (desc->len_stat & DMADESC_UNDER_MASK)
 468                        dev->stats.tx_errors++;
 469
 470                bytes += skb->len;
 471                dev_kfree_skb(skb);
 472                released++;
 473        }
 474
 475        netdev_completed_queue(dev, released, bytes);
 476
 477        if (netif_queue_stopped(dev) && released)
 478                netif_wake_queue(dev);
 479
 480        return released;
 481}
 482
 483/*
 484 * poll func, called by network core
 485 */
 486static int bcm_enet_poll(struct napi_struct *napi, int budget)
 487{
 488        struct bcm_enet_priv *priv;
 489        struct net_device *dev;
 490        int rx_work_done;
 491
 492        priv = container_of(napi, struct bcm_enet_priv, napi);
 493        dev = priv->net_dev;
 494
 495        /* ack interrupts */
 496        enet_dmac_writel(priv, priv->dma_chan_int_mask,
 497                         ENETDMAC_IR, priv->rx_chan);
 498        enet_dmac_writel(priv, priv->dma_chan_int_mask,
 499                         ENETDMAC_IR, priv->tx_chan);
 500
 501        /* reclaim sent skb */
 502        bcm_enet_tx_reclaim(dev, 0);
 503
 504        spin_lock(&priv->rx_lock);
 505        rx_work_done = bcm_enet_receive_queue(dev, budget);
 506        spin_unlock(&priv->rx_lock);
 507
 508        if (rx_work_done >= budget) {
 509                /* rx queue is not yet empty/clean */
 510                return rx_work_done;
 511        }
 512
 513        /* no more packet in rx/tx queue, remove device from poll
 514         * queue */
 515        napi_complete_done(napi, rx_work_done);
 516
 517        /* restore rx/tx interrupt */
 518        enet_dmac_writel(priv, priv->dma_chan_int_mask,
 519                         ENETDMAC_IRMASK, priv->rx_chan);
 520        enet_dmac_writel(priv, priv->dma_chan_int_mask,
 521                         ENETDMAC_IRMASK, priv->tx_chan);
 522
 523        return rx_work_done;
 524}
 525
 526/*
 527 * mac interrupt handler
 528 */
 529static irqreturn_t bcm_enet_isr_mac(int irq, void *dev_id)
 530{
 531        struct net_device *dev;
 532        struct bcm_enet_priv *priv;
 533        u32 stat;
 534
 535        dev = dev_id;
 536        priv = netdev_priv(dev);
 537
 538        stat = enet_readl(priv, ENET_IR_REG);
 539        if (!(stat & ENET_IR_MIB))
 540                return IRQ_NONE;
 541
 542        /* clear & mask interrupt */
 543        enet_writel(priv, ENET_IR_MIB, ENET_IR_REG);
 544        enet_writel(priv, 0, ENET_IRMASK_REG);
 545
 546        /* read mib registers in workqueue */
 547        schedule_work(&priv->mib_update_task);
 548
 549        return IRQ_HANDLED;
 550}
 551
 552/*
 553 * rx/tx dma interrupt handler
 554 */
 555static irqreturn_t bcm_enet_isr_dma(int irq, void *dev_id)
 556{
 557        struct net_device *dev;
 558        struct bcm_enet_priv *priv;
 559
 560        dev = dev_id;
 561        priv = netdev_priv(dev);
 562
 563        /* mask rx/tx interrupts */
 564        enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
 565        enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
 566
 567        napi_schedule(&priv->napi);
 568
 569        return IRQ_HANDLED;
 570}
 571
 572/*
 573 * tx request callback
 574 */
 575static netdev_tx_t
 576bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
 577{
 578        struct bcm_enet_priv *priv;
 579        struct bcm_enet_desc *desc;
 580        u32 len_stat;
 581        netdev_tx_t ret;
 582
 583        priv = netdev_priv(dev);
 584
 585        /* lock against tx reclaim */
 586        spin_lock(&priv->tx_lock);
 587
 588        /* make sure  the tx hw queue  is not full,  should not happen
 589         * since we stop queue before it's the case */
 590        if (unlikely(!priv->tx_desc_count)) {
 591                netif_stop_queue(dev);
 592                dev_err(&priv->pdev->dev, "xmit called with no tx desc "
 593                        "available?\n");
 594                ret = NETDEV_TX_BUSY;
 595                goto out_unlock;
 596        }
 597
 598        /* pad small packets sent on a switch device */
 599        if (priv->enet_is_sw && skb->len < 64) {
 600                int needed = 64 - skb->len;
 601                char *data;
 602
 603                if (unlikely(skb_tailroom(skb) < needed)) {
 604                        struct sk_buff *nskb;
 605
 606                        nskb = skb_copy_expand(skb, 0, needed, GFP_ATOMIC);
 607                        if (!nskb) {
 608                                ret = NETDEV_TX_BUSY;
 609                                goto out_unlock;
 610                        }
 611                        dev_kfree_skb(skb);
 612                        skb = nskb;
 613                }
 614                data = skb_put_zero(skb, needed);
 615        }
 616
 617        /* point to the next available desc */
 618        desc = &priv->tx_desc_cpu[priv->tx_curr_desc];
 619        priv->tx_skb[priv->tx_curr_desc] = skb;
 620
 621        /* fill descriptor */
 622        desc->address = dma_map_single(&priv->pdev->dev, skb->data, skb->len,
 623                                       DMA_TO_DEVICE);
 624
 625        len_stat = (skb->len << DMADESC_LENGTH_SHIFT) & DMADESC_LENGTH_MASK;
 626        len_stat |= (DMADESC_ESOP_MASK >> priv->dma_desc_shift) |
 627                DMADESC_APPEND_CRC |
 628                DMADESC_OWNER_MASK;
 629
 630        priv->tx_curr_desc++;
 631        if (priv->tx_curr_desc == priv->tx_ring_size) {
 632                priv->tx_curr_desc = 0;
 633                len_stat |= (DMADESC_WRAP_MASK >> priv->dma_desc_shift);
 634        }
 635        priv->tx_desc_count--;
 636
 637        /* dma might be already polling, make sure we update desc
 638         * fields in correct order */
 639        wmb();
 640        desc->len_stat = len_stat;
 641        wmb();
 642
 643        netdev_sent_queue(dev, skb->len);
 644
 645        /* kick tx dma */
 646        if (!netdev_xmit_more() || !priv->tx_desc_count)
 647                enet_dmac_writel(priv, priv->dma_chan_en_mask,
 648                                 ENETDMAC_CHANCFG, priv->tx_chan);
 649
 650        /* stop queue if no more desc available */
 651        if (!priv->tx_desc_count)
 652                netif_stop_queue(dev);
 653
 654        dev->stats.tx_bytes += skb->len;
 655        dev->stats.tx_packets++;
 656        ret = NETDEV_TX_OK;
 657
 658out_unlock:
 659        spin_unlock(&priv->tx_lock);
 660        return ret;
 661}
 662
 663/*
 664 * Change the interface's mac address.
 665 */
 666static int bcm_enet_set_mac_address(struct net_device *dev, void *p)
 667{
 668        struct bcm_enet_priv *priv;
 669        struct sockaddr *addr = p;
 670        u32 val;
 671
 672        priv = netdev_priv(dev);
 673        memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
 674
 675        /* use perfect match register 0 to store my mac address */
 676        val = (dev->dev_addr[2] << 24) | (dev->dev_addr[3] << 16) |
 677                (dev->dev_addr[4] << 8) | dev->dev_addr[5];
 678        enet_writel(priv, val, ENET_PML_REG(0));
 679
 680        val = (dev->dev_addr[0] << 8 | dev->dev_addr[1]);
 681        val |= ENET_PMH_DATAVALID_MASK;
 682        enet_writel(priv, val, ENET_PMH_REG(0));
 683
 684        return 0;
 685}
 686
 687/*
 688 * Change rx mode (promiscuous/allmulti) and update multicast list
 689 */
 690static void bcm_enet_set_multicast_list(struct net_device *dev)
 691{
 692        struct bcm_enet_priv *priv;
 693        struct netdev_hw_addr *ha;
 694        u32 val;
 695        int i;
 696
 697        priv = netdev_priv(dev);
 698
 699        val = enet_readl(priv, ENET_RXCFG_REG);
 700
 701        if (dev->flags & IFF_PROMISC)
 702                val |= ENET_RXCFG_PROMISC_MASK;
 703        else
 704                val &= ~ENET_RXCFG_PROMISC_MASK;
 705
 706        /* only 3 perfect match registers left, first one is used for
 707         * own mac address */
 708        if ((dev->flags & IFF_ALLMULTI) || netdev_mc_count(dev) > 3)
 709                val |= ENET_RXCFG_ALLMCAST_MASK;
 710        else
 711                val &= ~ENET_RXCFG_ALLMCAST_MASK;
 712
 713        /* no need to set perfect match registers if we catch all
 714         * multicast */
 715        if (val & ENET_RXCFG_ALLMCAST_MASK) {
 716                enet_writel(priv, val, ENET_RXCFG_REG);
 717                return;
 718        }
 719
 720        i = 0;
 721        netdev_for_each_mc_addr(ha, dev) {
 722                u8 *dmi_addr;
 723                u32 tmp;
 724
 725                if (i == 3)
 726                        break;
 727                /* update perfect match registers */
 728                dmi_addr = ha->addr;
 729                tmp = (dmi_addr[2] << 24) | (dmi_addr[3] << 16) |
 730                        (dmi_addr[4] << 8) | dmi_addr[5];
 731                enet_writel(priv, tmp, ENET_PML_REG(i + 1));
 732
 733                tmp = (dmi_addr[0] << 8 | dmi_addr[1]);
 734                tmp |= ENET_PMH_DATAVALID_MASK;
 735                enet_writel(priv, tmp, ENET_PMH_REG(i++ + 1));
 736        }
 737
 738        for (; i < 3; i++) {
 739                enet_writel(priv, 0, ENET_PML_REG(i + 1));
 740                enet_writel(priv, 0, ENET_PMH_REG(i + 1));
 741        }
 742
 743        enet_writel(priv, val, ENET_RXCFG_REG);
 744}
 745
 746/*
 747 * set mac duplex parameters
 748 */
 749static void bcm_enet_set_duplex(struct bcm_enet_priv *priv, int fullduplex)
 750{
 751        u32 val;
 752
 753        val = enet_readl(priv, ENET_TXCTL_REG);
 754        if (fullduplex)
 755                val |= ENET_TXCTL_FD_MASK;
 756        else
 757                val &= ~ENET_TXCTL_FD_MASK;
 758        enet_writel(priv, val, ENET_TXCTL_REG);
 759}
 760
 761/*
 762 * set mac flow control parameters
 763 */
 764static void bcm_enet_set_flow(struct bcm_enet_priv *priv, int rx_en, int tx_en)
 765{
 766        u32 val;
 767
 768        /* rx flow control (pause frame handling) */
 769        val = enet_readl(priv, ENET_RXCFG_REG);
 770        if (rx_en)
 771                val |= ENET_RXCFG_ENFLOW_MASK;
 772        else
 773                val &= ~ENET_RXCFG_ENFLOW_MASK;
 774        enet_writel(priv, val, ENET_RXCFG_REG);
 775
 776        if (!priv->dma_has_sram)
 777                return;
 778
 779        /* tx flow control (pause frame generation) */
 780        val = enet_dma_readl(priv, ENETDMA_CFG_REG);
 781        if (tx_en)
 782                val |= ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan);
 783        else
 784                val &= ~ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan);
 785        enet_dma_writel(priv, val, ENETDMA_CFG_REG);
 786}
 787
 788/*
 789 * link changed callback (from phylib)
 790 */
 791static void bcm_enet_adjust_phy_link(struct net_device *dev)
 792{
 793        struct bcm_enet_priv *priv;
 794        struct phy_device *phydev;
 795        int status_changed;
 796
 797        priv = netdev_priv(dev);
 798        phydev = dev->phydev;
 799        status_changed = 0;
 800
 801        if (priv->old_link != phydev->link) {
 802                status_changed = 1;
 803                priv->old_link = phydev->link;
 804        }
 805
 806        /* reflect duplex change in mac configuration */
 807        if (phydev->link && phydev->duplex != priv->old_duplex) {
 808                bcm_enet_set_duplex(priv,
 809                                    (phydev->duplex == DUPLEX_FULL) ? 1 : 0);
 810                status_changed = 1;
 811                priv->old_duplex = phydev->duplex;
 812        }
 813
 814        /* enable flow control if remote advertise it (trust phylib to
 815         * check that duplex is full */
 816        if (phydev->link && phydev->pause != priv->old_pause) {
 817                int rx_pause_en, tx_pause_en;
 818
 819                if (phydev->pause) {
 820                        /* pause was advertised by lpa and us */
 821                        rx_pause_en = 1;
 822                        tx_pause_en = 1;
 823                } else if (!priv->pause_auto) {
 824                        /* pause setting overridden by user */
 825                        rx_pause_en = priv->pause_rx;
 826                        tx_pause_en = priv->pause_tx;
 827                } else {
 828                        rx_pause_en = 0;
 829                        tx_pause_en = 0;
 830                }
 831
 832                bcm_enet_set_flow(priv, rx_pause_en, tx_pause_en);
 833                status_changed = 1;
 834                priv->old_pause = phydev->pause;
 835        }
 836
 837        if (status_changed) {
 838                pr_info("%s: link %s", dev->name, phydev->link ?
 839                        "UP" : "DOWN");
 840                if (phydev->link)
 841                        pr_cont(" - %d/%s - flow control %s", phydev->speed,
 842                               DUPLEX_FULL == phydev->duplex ? "full" : "half",
 843                               phydev->pause == 1 ? "rx&tx" : "off");
 844
 845                pr_cont("\n");
 846        }
 847}
 848
 849/*
 850 * link changed callback (if phylib is not used)
 851 */
 852static void bcm_enet_adjust_link(struct net_device *dev)
 853{
 854        struct bcm_enet_priv *priv;
 855
 856        priv = netdev_priv(dev);
 857        bcm_enet_set_duplex(priv, priv->force_duplex_full);
 858        bcm_enet_set_flow(priv, priv->pause_rx, priv->pause_tx);
 859        netif_carrier_on(dev);
 860
 861        pr_info("%s: link forced UP - %d/%s - flow control %s/%s\n",
 862                dev->name,
 863                priv->force_speed_100 ? 100 : 10,
 864                priv->force_duplex_full ? "full" : "half",
 865                priv->pause_rx ? "rx" : "off",
 866                priv->pause_tx ? "tx" : "off");
 867}
 868
 869static void bcm_enet_free_rx_buf_ring(struct device *kdev, struct bcm_enet_priv *priv)
 870{
 871        int i;
 872
 873        for (i = 0; i < priv->rx_ring_size; i++) {
 874                struct bcm_enet_desc *desc;
 875
 876                if (!priv->rx_buf[i])
 877                        continue;
 878
 879                desc = &priv->rx_desc_cpu[i];
 880                dma_unmap_single(kdev, desc->address, priv->rx_buf_size,
 881                                 DMA_FROM_DEVICE);
 882                skb_free_frag(priv->rx_buf[i]);
 883        }
 884        kfree(priv->rx_buf);
 885}
 886
 887/*
 888 * open callback, allocate dma rings & buffers and start rx operation
 889 */
 890static int bcm_enet_open(struct net_device *dev)
 891{
 892        struct bcm_enet_priv *priv;
 893        struct sockaddr addr;
 894        struct device *kdev;
 895        struct phy_device *phydev;
 896        int i, ret;
 897        unsigned int size;
 898        char phy_id[MII_BUS_ID_SIZE + 3];
 899        void *p;
 900        u32 val;
 901
 902        priv = netdev_priv(dev);
 903        kdev = &priv->pdev->dev;
 904
 905        if (priv->has_phy) {
 906                /* connect to PHY */
 907                snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
 908                         priv->mii_bus->id, priv->phy_id);
 909
 910                phydev = phy_connect(dev, phy_id, bcm_enet_adjust_phy_link,
 911                                     PHY_INTERFACE_MODE_MII);
 912
 913                if (IS_ERR(phydev)) {
 914                        dev_err(kdev, "could not attach to PHY\n");
 915                        return PTR_ERR(phydev);
 916                }
 917
 918                /* mask with MAC supported features */
 919                phy_support_sym_pause(phydev);
 920                phy_set_max_speed(phydev, SPEED_100);
 921                phy_set_sym_pause(phydev, priv->pause_rx, priv->pause_rx,
 922                                  priv->pause_auto);
 923
 924                phy_attached_info(phydev);
 925
 926                priv->old_link = 0;
 927                priv->old_duplex = -1;
 928                priv->old_pause = -1;
 929        } else {
 930                phydev = NULL;
 931        }
 932
 933        /* mask all interrupts and request them */
 934        enet_writel(priv, 0, ENET_IRMASK_REG);
 935        enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
 936        enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
 937
 938        ret = request_irq(dev->irq, bcm_enet_isr_mac, 0, dev->name, dev);
 939        if (ret)
 940                goto out_phy_disconnect;
 941
 942        ret = request_irq(priv->irq_rx, bcm_enet_isr_dma, 0,
 943                          dev->name, dev);
 944        if (ret)
 945                goto out_freeirq;
 946
 947        ret = request_irq(priv->irq_tx, bcm_enet_isr_dma,
 948                          0, dev->name, dev);
 949        if (ret)
 950                goto out_freeirq_rx;
 951
 952        /* initialize perfect match registers */
 953        for (i = 0; i < 4; i++) {
 954                enet_writel(priv, 0, ENET_PML_REG(i));
 955                enet_writel(priv, 0, ENET_PMH_REG(i));
 956        }
 957
 958        /* write device mac address */
 959        memcpy(addr.sa_data, dev->dev_addr, ETH_ALEN);
 960        bcm_enet_set_mac_address(dev, &addr);
 961
 962        /* allocate rx dma ring */
 963        size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
 964        p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
 965        if (!p) {
 966                ret = -ENOMEM;
 967                goto out_freeirq_tx;
 968        }
 969
 970        priv->rx_desc_alloc_size = size;
 971        priv->rx_desc_cpu = p;
 972
 973        /* allocate tx dma ring */
 974        size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
 975        p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
 976        if (!p) {
 977                ret = -ENOMEM;
 978                goto out_free_rx_ring;
 979        }
 980
 981        priv->tx_desc_alloc_size = size;
 982        priv->tx_desc_cpu = p;
 983
 984        priv->tx_skb = kcalloc(priv->tx_ring_size, sizeof(struct sk_buff *),
 985                               GFP_KERNEL);
 986        if (!priv->tx_skb) {
 987                ret = -ENOMEM;
 988                goto out_free_tx_ring;
 989        }
 990
 991        priv->tx_desc_count = priv->tx_ring_size;
 992        priv->tx_dirty_desc = 0;
 993        priv->tx_curr_desc = 0;
 994        spin_lock_init(&priv->tx_lock);
 995
 996        /* init & fill rx ring with buffers */
 997        priv->rx_buf = kcalloc(priv->rx_ring_size, sizeof(void *),
 998                               GFP_KERNEL);
 999        if (!priv->rx_buf) {
1000                ret = -ENOMEM;
1001                goto out_free_tx_skb;
1002        }
1003
1004        priv->rx_desc_count = 0;
1005        priv->rx_dirty_desc = 0;
1006        priv->rx_curr_desc = 0;
1007
1008        /* initialize flow control buffer allocation */
1009        if (priv->dma_has_sram)
1010                enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
1011                                ENETDMA_BUFALLOC_REG(priv->rx_chan));
1012        else
1013                enet_dmac_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
1014                                ENETDMAC_BUFALLOC, priv->rx_chan);
1015
1016        if (bcm_enet_refill_rx(dev, false)) {
1017                dev_err(kdev, "cannot allocate rx buffer queue\n");
1018                ret = -ENOMEM;
1019                goto out;
1020        }
1021
1022        /* write rx & tx ring addresses */
1023        if (priv->dma_has_sram) {
1024                enet_dmas_writel(priv, priv->rx_desc_dma,
1025                                 ENETDMAS_RSTART_REG, priv->rx_chan);
1026                enet_dmas_writel(priv, priv->tx_desc_dma,
1027                         ENETDMAS_RSTART_REG, priv->tx_chan);
1028        } else {
1029                enet_dmac_writel(priv, priv->rx_desc_dma,
1030                                ENETDMAC_RSTART, priv->rx_chan);
1031                enet_dmac_writel(priv, priv->tx_desc_dma,
1032                                ENETDMAC_RSTART, priv->tx_chan);
1033        }
1034
1035        /* clear remaining state ram for rx & tx channel */
1036        if (priv->dma_has_sram) {
1037                enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->rx_chan);
1038                enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->tx_chan);
1039                enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->rx_chan);
1040                enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->tx_chan);
1041                enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->rx_chan);
1042                enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->tx_chan);
1043        } else {
1044                enet_dmac_writel(priv, 0, ENETDMAC_FC, priv->rx_chan);
1045                enet_dmac_writel(priv, 0, ENETDMAC_FC, priv->tx_chan);
1046        }
1047
1048        /* set max rx/tx length */
1049        enet_writel(priv, priv->hw_mtu, ENET_RXMAXLEN_REG);
1050        enet_writel(priv, priv->hw_mtu, ENET_TXMAXLEN_REG);
1051
1052        /* set dma maximum burst len */
1053        enet_dmac_writel(priv, priv->dma_maxburst,
1054                         ENETDMAC_MAXBURST, priv->rx_chan);
1055        enet_dmac_writel(priv, priv->dma_maxburst,
1056                         ENETDMAC_MAXBURST, priv->tx_chan);
1057
1058        /* set correct transmit fifo watermark */
1059        enet_writel(priv, BCMENET_TX_FIFO_TRESH, ENET_TXWMARK_REG);
1060
1061        /* set flow control low/high threshold to 1/3 / 2/3 */
1062        if (priv->dma_has_sram) {
1063                val = priv->rx_ring_size / 3;
1064                enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan));
1065                val = (priv->rx_ring_size * 2) / 3;
1066                enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan));
1067        } else {
1068                enet_dmac_writel(priv, 5, ENETDMAC_FC, priv->rx_chan);
1069                enet_dmac_writel(priv, priv->rx_ring_size, ENETDMAC_LEN, priv->rx_chan);
1070                enet_dmac_writel(priv, priv->tx_ring_size, ENETDMAC_LEN, priv->tx_chan);
1071        }
1072
1073        /* all set, enable mac and interrupts, start dma engine and
1074         * kick rx dma channel */
1075        wmb();
1076        val = enet_readl(priv, ENET_CTL_REG);
1077        val |= ENET_CTL_ENABLE_MASK;
1078        enet_writel(priv, val, ENET_CTL_REG);
1079        if (priv->dma_has_sram)
1080                enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
1081        enet_dmac_writel(priv, priv->dma_chan_en_mask,
1082                         ENETDMAC_CHANCFG, priv->rx_chan);
1083
1084        /* watch "mib counters about to overflow" interrupt */
1085        enet_writel(priv, ENET_IR_MIB, ENET_IR_REG);
1086        enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG);
1087
1088        /* watch "packet transferred" interrupt in rx and tx */
1089        enet_dmac_writel(priv, priv->dma_chan_int_mask,
1090                         ENETDMAC_IR, priv->rx_chan);
1091        enet_dmac_writel(priv, priv->dma_chan_int_mask,
1092                         ENETDMAC_IR, priv->tx_chan);
1093
1094        /* make sure we enable napi before rx interrupt  */
1095        napi_enable(&priv->napi);
1096
1097        enet_dmac_writel(priv, priv->dma_chan_int_mask,
1098                         ENETDMAC_IRMASK, priv->rx_chan);
1099        enet_dmac_writel(priv, priv->dma_chan_int_mask,
1100                         ENETDMAC_IRMASK, priv->tx_chan);
1101
1102        if (phydev)
1103                phy_start(phydev);
1104        else
1105                bcm_enet_adjust_link(dev);
1106
1107        netif_start_queue(dev);
1108        return 0;
1109
1110out:
1111        bcm_enet_free_rx_buf_ring(kdev, priv);
1112
1113out_free_tx_skb:
1114        kfree(priv->tx_skb);
1115
1116out_free_tx_ring:
1117        dma_free_coherent(kdev, priv->tx_desc_alloc_size,
1118                          priv->tx_desc_cpu, priv->tx_desc_dma);
1119
1120out_free_rx_ring:
1121        dma_free_coherent(kdev, priv->rx_desc_alloc_size,
1122                          priv->rx_desc_cpu, priv->rx_desc_dma);
1123
1124out_freeirq_tx:
1125        free_irq(priv->irq_tx, dev);
1126
1127out_freeirq_rx:
1128        free_irq(priv->irq_rx, dev);
1129
1130out_freeirq:
1131        free_irq(dev->irq, dev);
1132
1133out_phy_disconnect:
1134        if (phydev)
1135                phy_disconnect(phydev);
1136
1137        return ret;
1138}
1139
1140/*
1141 * disable mac
1142 */
1143static void bcm_enet_disable_mac(struct bcm_enet_priv *priv)
1144{
1145        int limit;
1146        u32 val;
1147
1148        val = enet_readl(priv, ENET_CTL_REG);
1149        val |= ENET_CTL_DISABLE_MASK;
1150        enet_writel(priv, val, ENET_CTL_REG);
1151
1152        limit = 1000;
1153        do {
1154                u32 val;
1155
1156                val = enet_readl(priv, ENET_CTL_REG);
1157                if (!(val & ENET_CTL_DISABLE_MASK))
1158                        break;
1159                udelay(1);
1160        } while (limit--);
1161}
1162
1163/*
1164 * disable dma in given channel
1165 */
1166static void bcm_enet_disable_dma(struct bcm_enet_priv *priv, int chan)
1167{
1168        int limit;
1169
1170        enet_dmac_writel(priv, 0, ENETDMAC_CHANCFG, chan);
1171
1172        limit = 1000;
1173        do {
1174                u32 val;
1175
1176                val = enet_dmac_readl(priv, ENETDMAC_CHANCFG, chan);
1177                if (!(val & ENETDMAC_CHANCFG_EN_MASK))
1178                        break;
1179                udelay(1);
1180        } while (limit--);
1181}
1182
1183/*
1184 * stop callback
1185 */
1186static int bcm_enet_stop(struct net_device *dev)
1187{
1188        struct bcm_enet_priv *priv;
1189        struct device *kdev;
1190
1191        priv = netdev_priv(dev);
1192        kdev = &priv->pdev->dev;
1193
1194        netif_stop_queue(dev);
1195        napi_disable(&priv->napi);
1196        if (priv->has_phy)
1197                phy_stop(dev->phydev);
1198        del_timer_sync(&priv->rx_timeout);
1199
1200        /* mask all interrupts */
1201        enet_writel(priv, 0, ENET_IRMASK_REG);
1202        enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
1203        enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
1204
1205        /* make sure no mib update is scheduled */
1206        cancel_work_sync(&priv->mib_update_task);
1207
1208        /* disable dma & mac */
1209        bcm_enet_disable_dma(priv, priv->tx_chan);
1210        bcm_enet_disable_dma(priv, priv->rx_chan);
1211        bcm_enet_disable_mac(priv);
1212
1213        /* force reclaim of all tx buffers */
1214        bcm_enet_tx_reclaim(dev, 1);
1215
1216        /* free the rx buffer ring */
1217        bcm_enet_free_rx_buf_ring(kdev, priv);
1218
1219        /* free remaining allocated memory */
1220        kfree(priv->tx_skb);
1221        dma_free_coherent(kdev, priv->rx_desc_alloc_size,
1222                          priv->rx_desc_cpu, priv->rx_desc_dma);
1223        dma_free_coherent(kdev, priv->tx_desc_alloc_size,
1224                          priv->tx_desc_cpu, priv->tx_desc_dma);
1225        free_irq(priv->irq_tx, dev);
1226        free_irq(priv->irq_rx, dev);
1227        free_irq(dev->irq, dev);
1228
1229        /* release phy */
1230        if (priv->has_phy)
1231                phy_disconnect(dev->phydev);
1232
1233        /* reset BQL after forced tx reclaim to prevent kernel panic */
1234        netdev_reset_queue(dev);
1235
1236        return 0;
1237}
1238
1239/*
1240 * ethtool callbacks
1241 */
1242struct bcm_enet_stats {
1243        char stat_string[ETH_GSTRING_LEN];
1244        int sizeof_stat;
1245        int stat_offset;
1246        int mib_reg;
1247};
1248
1249#define GEN_STAT(m) sizeof(((struct bcm_enet_priv *)0)->m),             \
1250                     offsetof(struct bcm_enet_priv, m)
1251#define DEV_STAT(m) sizeof(((struct net_device_stats *)0)->m),          \
1252                     offsetof(struct net_device_stats, m)
1253
1254static const struct bcm_enet_stats bcm_enet_gstrings_stats[] = {
1255        { "rx_packets", DEV_STAT(rx_packets), -1 },
1256        { "tx_packets", DEV_STAT(tx_packets), -1 },
1257        { "rx_bytes", DEV_STAT(rx_bytes), -1 },
1258        { "tx_bytes", DEV_STAT(tx_bytes), -1 },
1259        { "rx_errors", DEV_STAT(rx_errors), -1 },
1260        { "tx_errors", DEV_STAT(tx_errors), -1 },
1261        { "rx_dropped", DEV_STAT(rx_dropped), -1 },
1262        { "tx_dropped", DEV_STAT(tx_dropped), -1 },
1263
1264        { "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETH_MIB_RX_GD_OCTETS},
1265        { "rx_good_pkts", GEN_STAT(mib.rx_gd_pkts), ETH_MIB_RX_GD_PKTS },
1266        { "rx_broadcast", GEN_STAT(mib.rx_brdcast), ETH_MIB_RX_BRDCAST },
1267        { "rx_multicast", GEN_STAT(mib.rx_mult), ETH_MIB_RX_MULT },
1268        { "rx_64_octets", GEN_STAT(mib.rx_64), ETH_MIB_RX_64 },
1269        { "rx_65_127_oct", GEN_STAT(mib.rx_65_127), ETH_MIB_RX_65_127 },
1270        { "rx_128_255_oct", GEN_STAT(mib.rx_128_255), ETH_MIB_RX_128_255 },
1271        { "rx_256_511_oct", GEN_STAT(mib.rx_256_511), ETH_MIB_RX_256_511 },
1272        { "rx_512_1023_oct", GEN_STAT(mib.rx_512_1023), ETH_MIB_RX_512_1023 },
1273        { "rx_1024_max_oct", GEN_STAT(mib.rx_1024_max), ETH_MIB_RX_1024_MAX },
1274        { "rx_jabber", GEN_STAT(mib.rx_jab), ETH_MIB_RX_JAB },
1275        { "rx_oversize", GEN_STAT(mib.rx_ovr), ETH_MIB_RX_OVR },
1276        { "rx_fragment", GEN_STAT(mib.rx_frag), ETH_MIB_RX_FRAG },
1277        { "rx_dropped", GEN_STAT(mib.rx_drop), ETH_MIB_RX_DROP },
1278        { "rx_crc_align", GEN_STAT(mib.rx_crc_align), ETH_MIB_RX_CRC_ALIGN },
1279        { "rx_undersize", GEN_STAT(mib.rx_und), ETH_MIB_RX_UND },
1280        { "rx_crc", GEN_STAT(mib.rx_crc), ETH_MIB_RX_CRC },
1281        { "rx_align", GEN_STAT(mib.rx_align), ETH_MIB_RX_ALIGN },
1282        { "rx_symbol_error", GEN_STAT(mib.rx_sym), ETH_MIB_RX_SYM },
1283        { "rx_pause", GEN_STAT(mib.rx_pause), ETH_MIB_RX_PAUSE },
1284        { "rx_control", GEN_STAT(mib.rx_cntrl), ETH_MIB_RX_CNTRL },
1285
1286        { "tx_good_octets", GEN_STAT(mib.tx_gd_octets), ETH_MIB_TX_GD_OCTETS },
1287        { "tx_good_pkts", GEN_STAT(mib.tx_gd_pkts), ETH_MIB_TX_GD_PKTS },
1288        { "tx_broadcast", GEN_STAT(mib.tx_brdcast), ETH_MIB_TX_BRDCAST },
1289        { "tx_multicast", GEN_STAT(mib.tx_mult), ETH_MIB_TX_MULT },
1290        { "tx_64_oct", GEN_STAT(mib.tx_64), ETH_MIB_TX_64 },
1291        { "tx_65_127_oct", GEN_STAT(mib.tx_65_127), ETH_MIB_TX_65_127 },
1292        { "tx_128_255_oct", GEN_STAT(mib.tx_128_255), ETH_MIB_TX_128_255 },
1293        { "tx_256_511_oct", GEN_STAT(mib.tx_256_511), ETH_MIB_TX_256_511 },
1294        { "tx_512_1023_oct", GEN_STAT(mib.tx_512_1023), ETH_MIB_TX_512_1023},
1295        { "tx_1024_max_oct", GEN_STAT(mib.tx_1024_max), ETH_MIB_TX_1024_MAX },
1296        { "tx_jabber", GEN_STAT(mib.tx_jab), ETH_MIB_TX_JAB },
1297        { "tx_oversize", GEN_STAT(mib.tx_ovr), ETH_MIB_TX_OVR },
1298        { "tx_fragment", GEN_STAT(mib.tx_frag), ETH_MIB_TX_FRAG },
1299        { "tx_underrun", GEN_STAT(mib.tx_underrun), ETH_MIB_TX_UNDERRUN },
1300        { "tx_collisions", GEN_STAT(mib.tx_col), ETH_MIB_TX_COL },
1301        { "tx_single_collision", GEN_STAT(mib.tx_1_col), ETH_MIB_TX_1_COL },
1302        { "tx_multiple_collision", GEN_STAT(mib.tx_m_col), ETH_MIB_TX_M_COL },
1303        { "tx_excess_collision", GEN_STAT(mib.tx_ex_col), ETH_MIB_TX_EX_COL },
1304        { "tx_late_collision", GEN_STAT(mib.tx_late), ETH_MIB_TX_LATE },
1305        { "tx_deferred", GEN_STAT(mib.tx_def), ETH_MIB_TX_DEF },
1306        { "tx_carrier_sense", GEN_STAT(mib.tx_crs), ETH_MIB_TX_CRS },
1307        { "tx_pause", GEN_STAT(mib.tx_pause), ETH_MIB_TX_PAUSE },
1308
1309};
1310
1311#define BCM_ENET_STATS_LEN      ARRAY_SIZE(bcm_enet_gstrings_stats)
1312
1313static const u32 unused_mib_regs[] = {
1314        ETH_MIB_TX_ALL_OCTETS,
1315        ETH_MIB_TX_ALL_PKTS,
1316        ETH_MIB_RX_ALL_OCTETS,
1317        ETH_MIB_RX_ALL_PKTS,
1318};
1319
1320
1321static void bcm_enet_get_drvinfo(struct net_device *netdev,
1322                                 struct ethtool_drvinfo *drvinfo)
1323{
1324        strlcpy(drvinfo->driver, bcm_enet_driver_name, sizeof(drvinfo->driver));
1325        strlcpy(drvinfo->bus_info, "bcm63xx", sizeof(drvinfo->bus_info));
1326}
1327
1328static int bcm_enet_get_sset_count(struct net_device *netdev,
1329                                        int string_set)
1330{
1331        switch (string_set) {
1332        case ETH_SS_STATS:
1333                return BCM_ENET_STATS_LEN;
1334        default:
1335                return -EINVAL;
1336        }
1337}
1338
1339static void bcm_enet_get_strings(struct net_device *netdev,
1340                                 u32 stringset, u8 *data)
1341{
1342        int i;
1343
1344        switch (stringset) {
1345        case ETH_SS_STATS:
1346                for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
1347                        memcpy(data + i * ETH_GSTRING_LEN,
1348                               bcm_enet_gstrings_stats[i].stat_string,
1349                               ETH_GSTRING_LEN);
1350                }
1351                break;
1352        }
1353}
1354
1355static void update_mib_counters(struct bcm_enet_priv *priv)
1356{
1357        int i;
1358
1359        for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
1360                const struct bcm_enet_stats *s;
1361                u32 val;
1362                char *p;
1363
1364                s = &bcm_enet_gstrings_stats[i];
1365                if (s->mib_reg == -1)
1366                        continue;
1367
1368                val = enet_readl(priv, ENET_MIB_REG(s->mib_reg));
1369                p = (char *)priv + s->stat_offset;
1370
1371                if (s->sizeof_stat == sizeof(u64))
1372                        *(u64 *)p += val;
1373                else
1374                        *(u32 *)p += val;
1375        }
1376
1377        /* also empty unused mib counters to make sure mib counter
1378         * overflow interrupt is cleared */
1379        for (i = 0; i < ARRAY_SIZE(unused_mib_regs); i++)
1380                (void)enet_readl(priv, ENET_MIB_REG(unused_mib_regs[i]));
1381}
1382
1383static void bcm_enet_update_mib_counters_defer(struct work_struct *t)
1384{
1385        struct bcm_enet_priv *priv;
1386
1387        priv = container_of(t, struct bcm_enet_priv, mib_update_task);
1388        mutex_lock(&priv->mib_update_lock);
1389        update_mib_counters(priv);
1390        mutex_unlock(&priv->mib_update_lock);
1391
1392        /* reenable mib interrupt */
1393        if (netif_running(priv->net_dev))
1394                enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG);
1395}
1396
1397static void bcm_enet_get_ethtool_stats(struct net_device *netdev,
1398                                       struct ethtool_stats *stats,
1399                                       u64 *data)
1400{
1401        struct bcm_enet_priv *priv;
1402        int i;
1403
1404        priv = netdev_priv(netdev);
1405
1406        mutex_lock(&priv->mib_update_lock);
1407        update_mib_counters(priv);
1408
1409        for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
1410                const struct bcm_enet_stats *s;
1411                char *p;
1412
1413                s = &bcm_enet_gstrings_stats[i];
1414                if (s->mib_reg == -1)
1415                        p = (char *)&netdev->stats;
1416                else
1417                        p = (char *)priv;
1418                p += s->stat_offset;
1419                data[i] = (s->sizeof_stat == sizeof(u64)) ?
1420                        *(u64 *)p : *(u32 *)p;
1421        }
1422        mutex_unlock(&priv->mib_update_lock);
1423}
1424
1425static int bcm_enet_nway_reset(struct net_device *dev)
1426{
1427        struct bcm_enet_priv *priv;
1428
1429        priv = netdev_priv(dev);
1430        if (priv->has_phy)
1431                return phy_ethtool_nway_reset(dev);
1432
1433        return -EOPNOTSUPP;
1434}
1435
1436static int bcm_enet_get_link_ksettings(struct net_device *dev,
1437                                       struct ethtool_link_ksettings *cmd)
1438{
1439        struct bcm_enet_priv *priv;
1440        u32 supported, advertising;
1441
1442        priv = netdev_priv(dev);
1443
1444        if (priv->has_phy) {
1445                if (!dev->phydev)
1446                        return -ENODEV;
1447
1448                phy_ethtool_ksettings_get(dev->phydev, cmd);
1449
1450                return 0;
1451        } else {
1452                cmd->base.autoneg = 0;
1453                cmd->base.speed = (priv->force_speed_100) ?
1454                        SPEED_100 : SPEED_10;
1455                cmd->base.duplex = (priv->force_duplex_full) ?
1456                        DUPLEX_FULL : DUPLEX_HALF;
1457                supported = ADVERTISED_10baseT_Half |
1458                        ADVERTISED_10baseT_Full |
1459                        ADVERTISED_100baseT_Half |
1460                        ADVERTISED_100baseT_Full;
1461                advertising = 0;
1462                ethtool_convert_legacy_u32_to_link_mode(
1463                        cmd->link_modes.supported, supported);
1464                ethtool_convert_legacy_u32_to_link_mode(
1465                        cmd->link_modes.advertising, advertising);
1466                cmd->base.port = PORT_MII;
1467        }
1468        return 0;
1469}
1470
1471static int bcm_enet_set_link_ksettings(struct net_device *dev,
1472                                       const struct ethtool_link_ksettings *cmd)
1473{
1474        struct bcm_enet_priv *priv;
1475
1476        priv = netdev_priv(dev);
1477        if (priv->has_phy) {
1478                if (!dev->phydev)
1479                        return -ENODEV;
1480                return phy_ethtool_ksettings_set(dev->phydev, cmd);
1481        } else {
1482
1483                if (cmd->base.autoneg ||
1484                    (cmd->base.speed != SPEED_100 &&
1485                     cmd->base.speed != SPEED_10) ||
1486                    cmd->base.port != PORT_MII)
1487                        return -EINVAL;
1488
1489                priv->force_speed_100 =
1490                        (cmd->base.speed == SPEED_100) ? 1 : 0;
1491                priv->force_duplex_full =
1492                        (cmd->base.duplex == DUPLEX_FULL) ? 1 : 0;
1493
1494                if (netif_running(dev))
1495                        bcm_enet_adjust_link(dev);
1496                return 0;
1497        }
1498}
1499
1500static void bcm_enet_get_ringparam(struct net_device *dev,
1501                                   struct ethtool_ringparam *ering)
1502{
1503        struct bcm_enet_priv *priv;
1504
1505        priv = netdev_priv(dev);
1506
1507        /* rx/tx ring is actually only limited by memory */
1508        ering->rx_max_pending = 8192;
1509        ering->tx_max_pending = 8192;
1510        ering->rx_pending = priv->rx_ring_size;
1511        ering->tx_pending = priv->tx_ring_size;
1512}
1513
1514static int bcm_enet_set_ringparam(struct net_device *dev,
1515                                  struct ethtool_ringparam *ering)
1516{
1517        struct bcm_enet_priv *priv;
1518        int was_running;
1519
1520        priv = netdev_priv(dev);
1521
1522        was_running = 0;
1523        if (netif_running(dev)) {
1524                bcm_enet_stop(dev);
1525                was_running = 1;
1526        }
1527
1528        priv->rx_ring_size = ering->rx_pending;
1529        priv->tx_ring_size = ering->tx_pending;
1530
1531        if (was_running) {
1532                int err;
1533
1534                err = bcm_enet_open(dev);
1535                if (err)
1536                        dev_close(dev);
1537                else
1538                        bcm_enet_set_multicast_list(dev);
1539        }
1540        return 0;
1541}
1542
1543static void bcm_enet_get_pauseparam(struct net_device *dev,
1544                                    struct ethtool_pauseparam *ecmd)
1545{
1546        struct bcm_enet_priv *priv;
1547
1548        priv = netdev_priv(dev);
1549        ecmd->autoneg = priv->pause_auto;
1550        ecmd->rx_pause = priv->pause_rx;
1551        ecmd->tx_pause = priv->pause_tx;
1552}
1553
1554static int bcm_enet_set_pauseparam(struct net_device *dev,
1555                                   struct ethtool_pauseparam *ecmd)
1556{
1557        struct bcm_enet_priv *priv;
1558
1559        priv = netdev_priv(dev);
1560
1561        if (priv->has_phy) {
1562                if (ecmd->autoneg && (ecmd->rx_pause != ecmd->tx_pause)) {
1563                        /* asymetric pause mode not supported,
1564                         * actually possible but integrated PHY has RO
1565                         * asym_pause bit */
1566                        return -EINVAL;
1567                }
1568        } else {
1569                /* no pause autoneg on direct mii connection */
1570                if (ecmd->autoneg)
1571                        return -EINVAL;
1572        }
1573
1574        priv->pause_auto = ecmd->autoneg;
1575        priv->pause_rx = ecmd->rx_pause;
1576        priv->pause_tx = ecmd->tx_pause;
1577
1578        return 0;
1579}
1580
1581static const struct ethtool_ops bcm_enet_ethtool_ops = {
1582        .get_strings            = bcm_enet_get_strings,
1583        .get_sset_count         = bcm_enet_get_sset_count,
1584        .get_ethtool_stats      = bcm_enet_get_ethtool_stats,
1585        .nway_reset             = bcm_enet_nway_reset,
1586        .get_drvinfo            = bcm_enet_get_drvinfo,
1587        .get_link               = ethtool_op_get_link,
1588        .get_ringparam          = bcm_enet_get_ringparam,
1589        .set_ringparam          = bcm_enet_set_ringparam,
1590        .get_pauseparam         = bcm_enet_get_pauseparam,
1591        .set_pauseparam         = bcm_enet_set_pauseparam,
1592        .get_link_ksettings     = bcm_enet_get_link_ksettings,
1593        .set_link_ksettings     = bcm_enet_set_link_ksettings,
1594};
1595
1596static int bcm_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1597{
1598        struct bcm_enet_priv *priv;
1599
1600        priv = netdev_priv(dev);
1601        if (priv->has_phy) {
1602                if (!dev->phydev)
1603                        return -ENODEV;
1604                return phy_mii_ioctl(dev->phydev, rq, cmd);
1605        } else {
1606                struct mii_if_info mii;
1607
1608                mii.dev = dev;
1609                mii.mdio_read = bcm_enet_mdio_read_mii;
1610                mii.mdio_write = bcm_enet_mdio_write_mii;
1611                mii.phy_id = 0;
1612                mii.phy_id_mask = 0x3f;
1613                mii.reg_num_mask = 0x1f;
1614                return generic_mii_ioctl(&mii, if_mii(rq), cmd, NULL);
1615        }
1616}
1617
1618/*
1619 * adjust mtu, can't be called while device is running
1620 */
1621static int bcm_enet_change_mtu(struct net_device *dev, int new_mtu)
1622{
1623        struct bcm_enet_priv *priv = netdev_priv(dev);
1624        int actual_mtu = new_mtu;
1625
1626        if (netif_running(dev))
1627                return -EBUSY;
1628
1629        /* add ethernet header + vlan tag size */
1630        actual_mtu += VLAN_ETH_HLEN;
1631
1632        /*
1633         * setup maximum size before we get overflow mark in
1634         * descriptor, note that this will not prevent reception of
1635         * big frames, they will be split into multiple buffers
1636         * anyway
1637         */
1638        priv->hw_mtu = actual_mtu;
1639
1640        /*
1641         * align rx buffer size to dma burst len, account FCS since
1642         * it's appended
1643         */
1644        priv->rx_buf_size = ALIGN(actual_mtu + ETH_FCS_LEN,
1645                                  priv->dma_maxburst * 4);
1646
1647        priv->rx_frag_size = SKB_DATA_ALIGN(priv->rx_buf_offset + priv->rx_buf_size) +
1648                                            SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1649
1650        dev->mtu = new_mtu;
1651        return 0;
1652}
1653
1654/*
1655 * preinit hardware to allow mii operation while device is down
1656 */
1657static void bcm_enet_hw_preinit(struct bcm_enet_priv *priv)
1658{
1659        u32 val;
1660        int limit;
1661
1662        /* make sure mac is disabled */
1663        bcm_enet_disable_mac(priv);
1664
1665        /* soft reset mac */
1666        val = ENET_CTL_SRESET_MASK;
1667        enet_writel(priv, val, ENET_CTL_REG);
1668        wmb();
1669
1670        limit = 1000;
1671        do {
1672                val = enet_readl(priv, ENET_CTL_REG);
1673                if (!(val & ENET_CTL_SRESET_MASK))
1674                        break;
1675                udelay(1);
1676        } while (limit--);
1677
1678        /* select correct mii interface */
1679        val = enet_readl(priv, ENET_CTL_REG);
1680        if (priv->use_external_mii)
1681                val |= ENET_CTL_EPHYSEL_MASK;
1682        else
1683                val &= ~ENET_CTL_EPHYSEL_MASK;
1684        enet_writel(priv, val, ENET_CTL_REG);
1685
1686        /* turn on mdc clock */
1687        enet_writel(priv, (0x1f << ENET_MIISC_MDCFREQDIV_SHIFT) |
1688                    ENET_MIISC_PREAMBLEEN_MASK, ENET_MIISC_REG);
1689
1690        /* set mib counters to self-clear when read */
1691        val = enet_readl(priv, ENET_MIBCTL_REG);
1692        val |= ENET_MIBCTL_RDCLEAR_MASK;
1693        enet_writel(priv, val, ENET_MIBCTL_REG);
1694}
1695
1696static const struct net_device_ops bcm_enet_ops = {
1697        .ndo_open               = bcm_enet_open,
1698        .ndo_stop               = bcm_enet_stop,
1699        .ndo_start_xmit         = bcm_enet_start_xmit,
1700        .ndo_set_mac_address    = bcm_enet_set_mac_address,
1701        .ndo_set_rx_mode        = bcm_enet_set_multicast_list,
1702        .ndo_eth_ioctl          = bcm_enet_ioctl,
1703        .ndo_change_mtu         = bcm_enet_change_mtu,
1704};
1705
1706/*
1707 * allocate netdevice, request register memory and register device.
1708 */
1709static int bcm_enet_probe(struct platform_device *pdev)
1710{
1711        struct bcm_enet_priv *priv;
1712        struct net_device *dev;
1713        struct bcm63xx_enet_platform_data *pd;
1714        struct resource *res_irq, *res_irq_rx, *res_irq_tx;
1715        struct mii_bus *bus;
1716        int i, ret;
1717
1718        if (!bcm_enet_shared_base[0])
1719                return -EPROBE_DEFER;
1720
1721        res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1722        res_irq_rx = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
1723        res_irq_tx = platform_get_resource(pdev, IORESOURCE_IRQ, 2);
1724        if (!res_irq || !res_irq_rx || !res_irq_tx)
1725                return -ENODEV;
1726
1727        dev = alloc_etherdev(sizeof(*priv));
1728        if (!dev)
1729                return -ENOMEM;
1730        priv = netdev_priv(dev);
1731
1732        priv->enet_is_sw = false;
1733        priv->dma_maxburst = BCMENET_DMA_MAXBURST;
1734        priv->rx_buf_offset = NET_SKB_PAD;
1735
1736        ret = bcm_enet_change_mtu(dev, dev->mtu);
1737        if (ret)
1738                goto out;
1739
1740        priv->base = devm_platform_ioremap_resource(pdev, 0);
1741        if (IS_ERR(priv->base)) {
1742                ret = PTR_ERR(priv->base);
1743                goto out;
1744        }
1745
1746        dev->irq = priv->irq = res_irq->start;
1747        priv->irq_rx = res_irq_rx->start;
1748        priv->irq_tx = res_irq_tx->start;
1749
1750        priv->mac_clk = devm_clk_get(&pdev->dev, "enet");
1751        if (IS_ERR(priv->mac_clk)) {
1752                ret = PTR_ERR(priv->mac_clk);
1753                goto out;
1754        }
1755        ret = clk_prepare_enable(priv->mac_clk);
1756        if (ret)
1757                goto out;
1758
1759        /* initialize default and fetch platform data */
1760        priv->rx_ring_size = BCMENET_DEF_RX_DESC;
1761        priv->tx_ring_size = BCMENET_DEF_TX_DESC;
1762
1763        pd = dev_get_platdata(&pdev->dev);
1764        if (pd) {
1765                memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
1766                priv->has_phy = pd->has_phy;
1767                priv->phy_id = pd->phy_id;
1768                priv->has_phy_interrupt = pd->has_phy_interrupt;
1769                priv->phy_interrupt = pd->phy_interrupt;
1770                priv->use_external_mii = !pd->use_internal_phy;
1771                priv->pause_auto = pd->pause_auto;
1772                priv->pause_rx = pd->pause_rx;
1773                priv->pause_tx = pd->pause_tx;
1774                priv->force_duplex_full = pd->force_duplex_full;
1775                priv->force_speed_100 = pd->force_speed_100;
1776                priv->dma_chan_en_mask = pd->dma_chan_en_mask;
1777                priv->dma_chan_int_mask = pd->dma_chan_int_mask;
1778                priv->dma_chan_width = pd->dma_chan_width;
1779                priv->dma_has_sram = pd->dma_has_sram;
1780                priv->dma_desc_shift = pd->dma_desc_shift;
1781                priv->rx_chan = pd->rx_chan;
1782                priv->tx_chan = pd->tx_chan;
1783        }
1784
1785        if (priv->has_phy && !priv->use_external_mii) {
1786                /* using internal PHY, enable clock */
1787                priv->phy_clk = devm_clk_get(&pdev->dev, "ephy");
1788                if (IS_ERR(priv->phy_clk)) {
1789                        ret = PTR_ERR(priv->phy_clk);
1790                        priv->phy_clk = NULL;
1791                        goto out_disable_clk_mac;
1792                }
1793                ret = clk_prepare_enable(priv->phy_clk);
1794                if (ret)
1795                        goto out_disable_clk_mac;
1796        }
1797
1798        /* do minimal hardware init to be able to probe mii bus */
1799        bcm_enet_hw_preinit(priv);
1800
1801        /* MII bus registration */
1802        if (priv->has_phy) {
1803
1804                priv->mii_bus = mdiobus_alloc();
1805                if (!priv->mii_bus) {
1806                        ret = -ENOMEM;
1807                        goto out_uninit_hw;
1808                }
1809
1810                bus = priv->mii_bus;
1811                bus->name = "bcm63xx_enet MII bus";
1812                bus->parent = &pdev->dev;
1813                bus->priv = priv;
1814                bus->read = bcm_enet_mdio_read_phylib;
1815                bus->write = bcm_enet_mdio_write_phylib;
1816                sprintf(bus->id, "%s-%d", pdev->name, pdev->id);
1817
1818                /* only probe bus where we think the PHY is, because
1819                 * the mdio read operation return 0 instead of 0xffff
1820                 * if a slave is not present on hw */
1821                bus->phy_mask = ~(1 << priv->phy_id);
1822
1823                if (priv->has_phy_interrupt)
1824                        bus->irq[priv->phy_id] = priv->phy_interrupt;
1825
1826                ret = mdiobus_register(bus);
1827                if (ret) {
1828                        dev_err(&pdev->dev, "unable to register mdio bus\n");
1829                        goto out_free_mdio;
1830                }
1831        } else {
1832
1833                /* run platform code to initialize PHY device */
1834                if (pd && pd->mii_config &&
1835                    pd->mii_config(dev, 1, bcm_enet_mdio_read_mii,
1836                                   bcm_enet_mdio_write_mii)) {
1837                        dev_err(&pdev->dev, "unable to configure mdio bus\n");
1838                        goto out_uninit_hw;
1839                }
1840        }
1841
1842        spin_lock_init(&priv->rx_lock);
1843
1844        /* init rx timeout (used for oom) */
1845        timer_setup(&priv->rx_timeout, bcm_enet_refill_rx_timer, 0);
1846
1847        /* init the mib update lock&work */
1848        mutex_init(&priv->mib_update_lock);
1849        INIT_WORK(&priv->mib_update_task, bcm_enet_update_mib_counters_defer);
1850
1851        /* zero mib counters */
1852        for (i = 0; i < ENET_MIB_REG_COUNT; i++)
1853                enet_writel(priv, 0, ENET_MIB_REG(i));
1854
1855        /* register netdevice */
1856        dev->netdev_ops = &bcm_enet_ops;
1857        netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16);
1858
1859        dev->ethtool_ops = &bcm_enet_ethtool_ops;
1860        /* MTU range: 46 - 2028 */
1861        dev->min_mtu = ETH_ZLEN - ETH_HLEN;
1862        dev->max_mtu = BCMENET_MAX_MTU - VLAN_ETH_HLEN;
1863        SET_NETDEV_DEV(dev, &pdev->dev);
1864
1865        ret = register_netdev(dev);
1866        if (ret)
1867                goto out_unregister_mdio;
1868
1869        netif_carrier_off(dev);
1870        platform_set_drvdata(pdev, dev);
1871        priv->pdev = pdev;
1872        priv->net_dev = dev;
1873
1874        return 0;
1875
1876out_unregister_mdio:
1877        if (priv->mii_bus)
1878                mdiobus_unregister(priv->mii_bus);
1879
1880out_free_mdio:
1881        if (priv->mii_bus)
1882                mdiobus_free(priv->mii_bus);
1883
1884out_uninit_hw:
1885        /* turn off mdc clock */
1886        enet_writel(priv, 0, ENET_MIISC_REG);
1887        clk_disable_unprepare(priv->phy_clk);
1888
1889out_disable_clk_mac:
1890        clk_disable_unprepare(priv->mac_clk);
1891out:
1892        free_netdev(dev);
1893        return ret;
1894}
1895
1896
1897/*
1898 * exit func, stops hardware and unregisters netdevice
1899 */
1900static int bcm_enet_remove(struct platform_device *pdev)
1901{
1902        struct bcm_enet_priv *priv;
1903        struct net_device *dev;
1904
1905        /* stop netdevice */
1906        dev = platform_get_drvdata(pdev);
1907        priv = netdev_priv(dev);
1908        unregister_netdev(dev);
1909
1910        /* turn off mdc clock */
1911        enet_writel(priv, 0, ENET_MIISC_REG);
1912
1913        if (priv->has_phy) {
1914                mdiobus_unregister(priv->mii_bus);
1915                mdiobus_free(priv->mii_bus);
1916        } else {
1917                struct bcm63xx_enet_platform_data *pd;
1918
1919                pd = dev_get_platdata(&pdev->dev);
1920                if (pd && pd->mii_config)
1921                        pd->mii_config(dev, 0, bcm_enet_mdio_read_mii,
1922                                       bcm_enet_mdio_write_mii);
1923        }
1924
1925        /* disable hw block clocks */
1926        clk_disable_unprepare(priv->phy_clk);
1927        clk_disable_unprepare(priv->mac_clk);
1928
1929        free_netdev(dev);
1930        return 0;
1931}
1932
1933struct platform_driver bcm63xx_enet_driver = {
1934        .probe  = bcm_enet_probe,
1935        .remove = bcm_enet_remove,
1936        .driver = {
1937                .name   = "bcm63xx_enet",
1938                .owner  = THIS_MODULE,
1939        },
1940};
1941
1942/*
1943 * switch mii access callbacks
1944 */
1945static int bcmenet_sw_mdio_read(struct bcm_enet_priv *priv,
1946                                int ext, int phy_id, int location)
1947{
1948        u32 reg;
1949        int ret;
1950
1951        spin_lock_bh(&priv->enetsw_mdio_lock);
1952        enetsw_writel(priv, 0, ENETSW_MDIOC_REG);
1953
1954        reg = ENETSW_MDIOC_RD_MASK |
1955                (phy_id << ENETSW_MDIOC_PHYID_SHIFT) |
1956                (location << ENETSW_MDIOC_REG_SHIFT);
1957
1958        if (ext)
1959                reg |= ENETSW_MDIOC_EXT_MASK;
1960
1961        enetsw_writel(priv, reg, ENETSW_MDIOC_REG);
1962        udelay(50);
1963        ret = enetsw_readw(priv, ENETSW_MDIOD_REG);
1964        spin_unlock_bh(&priv->enetsw_mdio_lock);
1965        return ret;
1966}
1967
1968static void bcmenet_sw_mdio_write(struct bcm_enet_priv *priv,
1969                                 int ext, int phy_id, int location,
1970                                 uint16_t data)
1971{
1972        u32 reg;
1973
1974        spin_lock_bh(&priv->enetsw_mdio_lock);
1975        enetsw_writel(priv, 0, ENETSW_MDIOC_REG);
1976
1977        reg = ENETSW_MDIOC_WR_MASK |
1978                (phy_id << ENETSW_MDIOC_PHYID_SHIFT) |
1979                (location << ENETSW_MDIOC_REG_SHIFT);
1980
1981        if (ext)
1982                reg |= ENETSW_MDIOC_EXT_MASK;
1983
1984        reg |= data;
1985
1986        enetsw_writel(priv, reg, ENETSW_MDIOC_REG);
1987        udelay(50);
1988        spin_unlock_bh(&priv->enetsw_mdio_lock);
1989}
1990
1991static inline int bcm_enet_port_is_rgmii(int portid)
1992{
1993        return portid >= ENETSW_RGMII_PORT0;
1994}
1995
1996/*
1997 * enet sw PHY polling
1998 */
1999static void swphy_poll_timer(struct timer_list *t)
2000{
2001        struct bcm_enet_priv *priv = from_timer(priv, t, swphy_poll);
2002        unsigned int i;
2003
2004        for (i = 0; i < priv->num_ports; i++) {
2005                struct bcm63xx_enetsw_port *port;
2006                int val, j, up, advertise, lpa, speed, duplex, media;
2007                int external_phy = bcm_enet_port_is_rgmii(i);
2008                u8 override;
2009
2010                port = &priv->used_ports[i];
2011                if (!port->used)
2012                        continue;
2013
2014                if (port->bypass_link)
2015                        continue;
2016
2017                /* dummy read to clear */
2018                for (j = 0; j < 2; j++)
2019                        val = bcmenet_sw_mdio_read(priv, external_phy,
2020                                                   port->phy_id, MII_BMSR);
2021
2022                if (val == 0xffff)
2023                        continue;
2024
2025                up = (val & BMSR_LSTATUS) ? 1 : 0;
2026                if (!(up ^ priv->sw_port_link[i]))
2027                        continue;
2028
2029                priv->sw_port_link[i] = up;
2030
2031                /* link changed */
2032                if (!up) {
2033                        dev_info(&priv->pdev->dev, "link DOWN on %s\n",
2034                                 port->name);
2035                        enetsw_writeb(priv, ENETSW_PORTOV_ENABLE_MASK,
2036                                      ENETSW_PORTOV_REG(i));
2037                        enetsw_writeb(priv, ENETSW_PTCTRL_RXDIS_MASK |
2038                                      ENETSW_PTCTRL_TXDIS_MASK,
2039                                      ENETSW_PTCTRL_REG(i));
2040                        continue;
2041                }
2042
2043                advertise = bcmenet_sw_mdio_read(priv, external_phy,
2044                                                 port->phy_id, MII_ADVERTISE);
2045
2046                lpa = bcmenet_sw_mdio_read(priv, external_phy, port->phy_id,
2047                                           MII_LPA);
2048
2049                /* figure out media and duplex from advertise and LPA values */
2050                media = mii_nway_result(lpa & advertise);
2051                duplex = (media & ADVERTISE_FULL) ? 1 : 0;
2052
2053                if (media & (ADVERTISE_100FULL | ADVERTISE_100HALF))
2054                        speed = 100;
2055                else
2056                        speed = 10;
2057
2058                if (val & BMSR_ESTATEN) {
2059                        advertise = bcmenet_sw_mdio_read(priv, external_phy,
2060                                                port->phy_id, MII_CTRL1000);
2061
2062                        lpa = bcmenet_sw_mdio_read(priv, external_phy,
2063                                                port->phy_id, MII_STAT1000);
2064
2065                        if (advertise & (ADVERTISE_1000FULL | ADVERTISE_1000HALF)
2066                                        && lpa & (LPA_1000FULL | LPA_1000HALF)) {
2067                                speed = 1000;
2068                                duplex = (lpa & LPA_1000FULL);
2069                        }
2070                }
2071
2072                dev_info(&priv->pdev->dev,
2073                         "link UP on %s, %dMbps, %s-duplex\n",
2074                         port->name, speed, duplex ? "full" : "half");
2075
2076                override = ENETSW_PORTOV_ENABLE_MASK |
2077                        ENETSW_PORTOV_LINKUP_MASK;
2078
2079                if (speed == 1000)
2080                        override |= ENETSW_IMPOV_1000_MASK;
2081                else if (speed == 100)
2082                        override |= ENETSW_IMPOV_100_MASK;
2083                if (duplex)
2084                        override |= ENETSW_IMPOV_FDX_MASK;
2085
2086                enetsw_writeb(priv, override, ENETSW_PORTOV_REG(i));
2087                enetsw_writeb(priv, 0, ENETSW_PTCTRL_REG(i));
2088        }
2089
2090        priv->swphy_poll.expires = jiffies + HZ;
2091        add_timer(&priv->swphy_poll);
2092}
2093
2094/*
2095 * open callback, allocate dma rings & buffers and start rx operation
2096 */
2097static int bcm_enetsw_open(struct net_device *dev)
2098{
2099        struct bcm_enet_priv *priv;
2100        struct device *kdev;
2101        int i, ret;
2102        unsigned int size;
2103        void *p;
2104        u32 val;
2105
2106        priv = netdev_priv(dev);
2107        kdev = &priv->pdev->dev;
2108
2109        /* mask all interrupts and request them */
2110        enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
2111        enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
2112
2113        ret = request_irq(priv->irq_rx, bcm_enet_isr_dma,
2114                          0, dev->name, dev);
2115        if (ret)
2116                goto out_freeirq;
2117
2118        if (priv->irq_tx != -1) {
2119                ret = request_irq(priv->irq_tx, bcm_enet_isr_dma,
2120                                  0, dev->name, dev);
2121                if (ret)
2122                        goto out_freeirq_rx;
2123        }
2124
2125        /* allocate rx dma ring */
2126        size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
2127        p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
2128        if (!p) {
2129                dev_err(kdev, "cannot allocate rx ring %u\n", size);
2130                ret = -ENOMEM;
2131                goto out_freeirq_tx;
2132        }
2133
2134        priv->rx_desc_alloc_size = size;
2135        priv->rx_desc_cpu = p;
2136
2137        /* allocate tx dma ring */
2138        size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
2139        p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
2140        if (!p) {
2141                dev_err(kdev, "cannot allocate tx ring\n");
2142                ret = -ENOMEM;
2143                goto out_free_rx_ring;
2144        }
2145
2146        priv->tx_desc_alloc_size = size;
2147        priv->tx_desc_cpu = p;
2148
2149        priv->tx_skb = kcalloc(priv->tx_ring_size, sizeof(struct sk_buff *),
2150                               GFP_KERNEL);
2151        if (!priv->tx_skb) {
2152                dev_err(kdev, "cannot allocate tx skb queue\n");
2153                ret = -ENOMEM;
2154                goto out_free_tx_ring;
2155        }
2156
2157        priv->tx_desc_count = priv->tx_ring_size;
2158        priv->tx_dirty_desc = 0;
2159        priv->tx_curr_desc = 0;
2160        spin_lock_init(&priv->tx_lock);
2161
2162        /* init & fill rx ring with buffers */
2163        priv->rx_buf = kcalloc(priv->rx_ring_size, sizeof(void *),
2164                               GFP_KERNEL);
2165        if (!priv->rx_buf) {
2166                dev_err(kdev, "cannot allocate rx buffer queue\n");
2167                ret = -ENOMEM;
2168                goto out_free_tx_skb;
2169        }
2170
2171        priv->rx_desc_count = 0;
2172        priv->rx_dirty_desc = 0;
2173        priv->rx_curr_desc = 0;
2174
2175        /* disable all ports */
2176        for (i = 0; i < priv->num_ports; i++) {
2177                enetsw_writeb(priv, ENETSW_PORTOV_ENABLE_MASK,
2178                              ENETSW_PORTOV_REG(i));
2179                enetsw_writeb(priv, ENETSW_PTCTRL_RXDIS_MASK |
2180                              ENETSW_PTCTRL_TXDIS_MASK,
2181                              ENETSW_PTCTRL_REG(i));
2182
2183                priv->sw_port_link[i] = 0;
2184        }
2185
2186        /* reset mib */
2187        val = enetsw_readb(priv, ENETSW_GMCR_REG);
2188        val |= ENETSW_GMCR_RST_MIB_MASK;
2189        enetsw_writeb(priv, val, ENETSW_GMCR_REG);
2190        mdelay(1);
2191        val &= ~ENETSW_GMCR_RST_MIB_MASK;
2192        enetsw_writeb(priv, val, ENETSW_GMCR_REG);
2193        mdelay(1);
2194
2195        /* force CPU port state */
2196        val = enetsw_readb(priv, ENETSW_IMPOV_REG);
2197        val |= ENETSW_IMPOV_FORCE_MASK | ENETSW_IMPOV_LINKUP_MASK;
2198        enetsw_writeb(priv, val, ENETSW_IMPOV_REG);
2199
2200        /* enable switch forward engine */
2201        val = enetsw_readb(priv, ENETSW_SWMODE_REG);
2202        val |= ENETSW_SWMODE_FWD_EN_MASK;
2203        enetsw_writeb(priv, val, ENETSW_SWMODE_REG);
2204
2205        /* enable jumbo on all ports */
2206        enetsw_writel(priv, 0x1ff, ENETSW_JMBCTL_PORT_REG);
2207        enetsw_writew(priv, 9728, ENETSW_JMBCTL_MAXSIZE_REG);
2208
2209        /* initialize flow control buffer allocation */
2210        enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
2211                        ENETDMA_BUFALLOC_REG(priv->rx_chan));
2212
2213        if (bcm_enet_refill_rx(dev, false)) {
2214                dev_err(kdev, "cannot allocate rx buffer queue\n");
2215                ret = -ENOMEM;
2216                goto out;
2217        }
2218
2219        /* write rx & tx ring addresses */
2220        enet_dmas_writel(priv, priv->rx_desc_dma,
2221                         ENETDMAS_RSTART_REG, priv->rx_chan);
2222        enet_dmas_writel(priv, priv->tx_desc_dma,
2223                         ENETDMAS_RSTART_REG, priv->tx_chan);
2224
2225        /* clear remaining state ram for rx & tx channel */
2226        enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->rx_chan);
2227        enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->tx_chan);
2228        enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->rx_chan);
2229        enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->tx_chan);
2230        enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->rx_chan);
2231        enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->tx_chan);
2232
2233        /* set dma maximum burst len */
2234        enet_dmac_writel(priv, priv->dma_maxburst,
2235                         ENETDMAC_MAXBURST, priv->rx_chan);
2236        enet_dmac_writel(priv, priv->dma_maxburst,
2237                         ENETDMAC_MAXBURST, priv->tx_chan);
2238
2239        /* set flow control low/high threshold to 1/3 / 2/3 */
2240        val = priv->rx_ring_size / 3;
2241        enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan));
2242        val = (priv->rx_ring_size * 2) / 3;
2243        enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan));
2244
2245        /* all set, enable mac and interrupts, start dma engine and
2246         * kick rx dma channel
2247         */
2248        wmb();
2249        enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
2250        enet_dmac_writel(priv, ENETDMAC_CHANCFG_EN_MASK,
2251                         ENETDMAC_CHANCFG, priv->rx_chan);
2252
2253        /* watch "packet transferred" interrupt in rx and tx */
2254        enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
2255                         ENETDMAC_IR, priv->rx_chan);
2256        enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
2257                         ENETDMAC_IR, priv->tx_chan);
2258
2259        /* make sure we enable napi before rx interrupt  */
2260        napi_enable(&priv->napi);
2261
2262        enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
2263                         ENETDMAC_IRMASK, priv->rx_chan);
2264        enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
2265                         ENETDMAC_IRMASK, priv->tx_chan);
2266
2267        netif_carrier_on(dev);
2268        netif_start_queue(dev);
2269
2270        /* apply override config for bypass_link ports here. */
2271        for (i = 0; i < priv->num_ports; i++) {
2272                struct bcm63xx_enetsw_port *port;
2273                u8 override;
2274                port = &priv->used_ports[i];
2275                if (!port->used)
2276                        continue;
2277
2278                if (!port->bypass_link)
2279                        continue;
2280
2281                override = ENETSW_PORTOV_ENABLE_MASK |
2282                        ENETSW_PORTOV_LINKUP_MASK;
2283
2284                switch (port->force_speed) {
2285                case 1000:
2286                        override |= ENETSW_IMPOV_1000_MASK;
2287                        break;
2288                case 100:
2289                        override |= ENETSW_IMPOV_100_MASK;
2290                        break;
2291                case 10:
2292                        break;
2293                default:
2294                        pr_warn("invalid forced speed on port %s: assume 10\n",
2295                               port->name);
2296                        break;
2297                }
2298
2299                if (port->force_duplex_full)
2300                        override |= ENETSW_IMPOV_FDX_MASK;
2301
2302
2303                enetsw_writeb(priv, override, ENETSW_PORTOV_REG(i));
2304                enetsw_writeb(priv, 0, ENETSW_PTCTRL_REG(i));
2305        }
2306
2307        /* start phy polling timer */
2308        timer_setup(&priv->swphy_poll, swphy_poll_timer, 0);
2309        mod_timer(&priv->swphy_poll, jiffies);
2310        return 0;
2311
2312out:
2313        bcm_enet_free_rx_buf_ring(kdev, priv);
2314
2315out_free_tx_skb:
2316        kfree(priv->tx_skb);
2317
2318out_free_tx_ring:
2319        dma_free_coherent(kdev, priv->tx_desc_alloc_size,
2320                          priv->tx_desc_cpu, priv->tx_desc_dma);
2321
2322out_free_rx_ring:
2323        dma_free_coherent(kdev, priv->rx_desc_alloc_size,
2324                          priv->rx_desc_cpu, priv->rx_desc_dma);
2325
2326out_freeirq_tx:
2327        if (priv->irq_tx != -1)
2328                free_irq(priv->irq_tx, dev);
2329
2330out_freeirq_rx:
2331        free_irq(priv->irq_rx, dev);
2332
2333out_freeirq:
2334        return ret;
2335}
2336
2337/* stop callback */
2338static int bcm_enetsw_stop(struct net_device *dev)
2339{
2340        struct bcm_enet_priv *priv;
2341        struct device *kdev;
2342
2343        priv = netdev_priv(dev);
2344        kdev = &priv->pdev->dev;
2345
2346        del_timer_sync(&priv->swphy_poll);
2347        netif_stop_queue(dev);
2348        napi_disable(&priv->napi);
2349        del_timer_sync(&priv->rx_timeout);
2350
2351        /* mask all interrupts */
2352        enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
2353        enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
2354
2355        /* disable dma & mac */
2356        bcm_enet_disable_dma(priv, priv->tx_chan);
2357        bcm_enet_disable_dma(priv, priv->rx_chan);
2358
2359        /* force reclaim of all tx buffers */
2360        bcm_enet_tx_reclaim(dev, 1);
2361
2362        /* free the rx buffer ring */
2363        bcm_enet_free_rx_buf_ring(kdev, priv);
2364
2365        /* free remaining allocated memory */
2366        kfree(priv->tx_skb);
2367        dma_free_coherent(kdev, priv->rx_desc_alloc_size,
2368                          priv->rx_desc_cpu, priv->rx_desc_dma);
2369        dma_free_coherent(kdev, priv->tx_desc_alloc_size,
2370                          priv->tx_desc_cpu, priv->tx_desc_dma);
2371        if (priv->irq_tx != -1)
2372                free_irq(priv->irq_tx, dev);
2373        free_irq(priv->irq_rx, dev);
2374
2375        /* reset BQL after forced tx reclaim to prevent kernel panic */
2376        netdev_reset_queue(dev);
2377
2378        return 0;
2379}
2380
2381/* try to sort out phy external status by walking the used_port field
2382 * in the bcm_enet_priv structure. in case the phy address is not
2383 * assigned to any physical port on the switch, assume it is external
2384 * (and yell at the user).
2385 */
2386static int bcm_enetsw_phy_is_external(struct bcm_enet_priv *priv, int phy_id)
2387{
2388        int i;
2389
2390        for (i = 0; i < priv->num_ports; ++i) {
2391                if (!priv->used_ports[i].used)
2392                        continue;
2393                if (priv->used_ports[i].phy_id == phy_id)
2394                        return bcm_enet_port_is_rgmii(i);
2395        }
2396
2397        printk_once(KERN_WARNING  "bcm63xx_enet: could not find a used port with phy_id %i, assuming phy is external\n",
2398                    phy_id);
2399        return 1;
2400}
2401
2402/* can't use bcmenet_sw_mdio_read directly as we need to sort out
2403 * external/internal status of the given phy_id first.
2404 */
2405static int bcm_enetsw_mii_mdio_read(struct net_device *dev, int phy_id,
2406                                    int location)
2407{
2408        struct bcm_enet_priv *priv;
2409
2410        priv = netdev_priv(dev);
2411        return bcmenet_sw_mdio_read(priv,
2412                                    bcm_enetsw_phy_is_external(priv, phy_id),
2413                                    phy_id, location);
2414}
2415
2416/* can't use bcmenet_sw_mdio_write directly as we need to sort out
2417 * external/internal status of the given phy_id first.
2418 */
2419static void bcm_enetsw_mii_mdio_write(struct net_device *dev, int phy_id,
2420                                      int location,
2421                                      int val)
2422{
2423        struct bcm_enet_priv *priv;
2424
2425        priv = netdev_priv(dev);
2426        bcmenet_sw_mdio_write(priv, bcm_enetsw_phy_is_external(priv, phy_id),
2427                              phy_id, location, val);
2428}
2429
2430static int bcm_enetsw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2431{
2432        struct mii_if_info mii;
2433
2434        mii.dev = dev;
2435        mii.mdio_read = bcm_enetsw_mii_mdio_read;
2436        mii.mdio_write = bcm_enetsw_mii_mdio_write;
2437        mii.phy_id = 0;
2438        mii.phy_id_mask = 0x3f;
2439        mii.reg_num_mask = 0x1f;
2440        return generic_mii_ioctl(&mii, if_mii(rq), cmd, NULL);
2441
2442}
2443
2444static const struct net_device_ops bcm_enetsw_ops = {
2445        .ndo_open               = bcm_enetsw_open,
2446        .ndo_stop               = bcm_enetsw_stop,
2447        .ndo_start_xmit         = bcm_enet_start_xmit,
2448        .ndo_change_mtu         = bcm_enet_change_mtu,
2449        .ndo_eth_ioctl          = bcm_enetsw_ioctl,
2450};
2451
2452
2453static const struct bcm_enet_stats bcm_enetsw_gstrings_stats[] = {
2454        { "rx_packets", DEV_STAT(rx_packets), -1 },
2455        { "tx_packets", DEV_STAT(tx_packets), -1 },
2456        { "rx_bytes", DEV_STAT(rx_bytes), -1 },
2457        { "tx_bytes", DEV_STAT(tx_bytes), -1 },
2458        { "rx_errors", DEV_STAT(rx_errors), -1 },
2459        { "tx_errors", DEV_STAT(tx_errors), -1 },
2460        { "rx_dropped", DEV_STAT(rx_dropped), -1 },
2461        { "tx_dropped", DEV_STAT(tx_dropped), -1 },
2462
2463        { "tx_good_octets", GEN_STAT(mib.tx_gd_octets), ETHSW_MIB_RX_GD_OCT },
2464        { "tx_unicast", GEN_STAT(mib.tx_unicast), ETHSW_MIB_RX_BRDCAST },
2465        { "tx_broadcast", GEN_STAT(mib.tx_brdcast), ETHSW_MIB_RX_BRDCAST },
2466        { "tx_multicast", GEN_STAT(mib.tx_mult), ETHSW_MIB_RX_MULT },
2467        { "tx_64_octets", GEN_STAT(mib.tx_64), ETHSW_MIB_RX_64 },
2468        { "tx_65_127_oct", GEN_STAT(mib.tx_65_127), ETHSW_MIB_RX_65_127 },
2469        { "tx_128_255_oct", GEN_STAT(mib.tx_128_255), ETHSW_MIB_RX_128_255 },
2470        { "tx_256_511_oct", GEN_STAT(mib.tx_256_511), ETHSW_MIB_RX_256_511 },
2471        { "tx_512_1023_oct", GEN_STAT(mib.tx_512_1023), ETHSW_MIB_RX_512_1023},
2472        { "tx_1024_1522_oct", GEN_STAT(mib.tx_1024_max),
2473          ETHSW_MIB_RX_1024_1522 },
2474        { "tx_1523_2047_oct", GEN_STAT(mib.tx_1523_2047),
2475          ETHSW_MIB_RX_1523_2047 },
2476        { "tx_2048_4095_oct", GEN_STAT(mib.tx_2048_4095),
2477          ETHSW_MIB_RX_2048_4095 },
2478        { "tx_4096_8191_oct", GEN_STAT(mib.tx_4096_8191),
2479          ETHSW_MIB_RX_4096_8191 },
2480        { "tx_8192_9728_oct", GEN_STAT(mib.tx_8192_9728),
2481          ETHSW_MIB_RX_8192_9728 },
2482        { "tx_oversize", GEN_STAT(mib.tx_ovr), ETHSW_MIB_RX_OVR },
2483        { "tx_oversize_drop", GEN_STAT(mib.tx_ovr), ETHSW_MIB_RX_OVR_DISC },
2484        { "tx_dropped", GEN_STAT(mib.tx_drop), ETHSW_MIB_RX_DROP },
2485        { "tx_undersize", GEN_STAT(mib.tx_underrun), ETHSW_MIB_RX_UND },
2486        { "tx_pause", GEN_STAT(mib.tx_pause), ETHSW_MIB_RX_PAUSE },
2487
2488        { "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETHSW_MIB_TX_ALL_OCT },
2489        { "rx_broadcast", GEN_STAT(mib.rx_brdcast), ETHSW_MIB_TX_BRDCAST },
2490        { "rx_multicast", GEN_STAT(mib.rx_mult), ETHSW_MIB_TX_MULT },
2491        { "rx_unicast", GEN_STAT(mib.rx_unicast), ETHSW_MIB_TX_MULT },
2492        { "rx_pause", GEN_STAT(mib.rx_pause), ETHSW_MIB_TX_PAUSE },
2493        { "rx_dropped", GEN_STAT(mib.rx_drop), ETHSW_MIB_TX_DROP_PKTS },
2494
2495};
2496
2497#define BCM_ENETSW_STATS_LEN    \
2498        (sizeof(bcm_enetsw_gstrings_stats) / sizeof(struct bcm_enet_stats))
2499
2500static void bcm_enetsw_get_strings(struct net_device *netdev,
2501                                   u32 stringset, u8 *data)
2502{
2503        int i;
2504
2505        switch (stringset) {
2506        case ETH_SS_STATS:
2507                for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) {
2508                        memcpy(data + i * ETH_GSTRING_LEN,
2509                               bcm_enetsw_gstrings_stats[i].stat_string,
2510                               ETH_GSTRING_LEN);
2511                }
2512                break;
2513        }
2514}
2515
2516static int bcm_enetsw_get_sset_count(struct net_device *netdev,
2517                                     int string_set)
2518{
2519        switch (string_set) {
2520        case ETH_SS_STATS:
2521                return BCM_ENETSW_STATS_LEN;
2522        default:
2523                return -EINVAL;
2524        }
2525}
2526
2527static void bcm_enetsw_get_drvinfo(struct net_device *netdev,
2528                                   struct ethtool_drvinfo *drvinfo)
2529{
2530        strncpy(drvinfo->driver, bcm_enet_driver_name, sizeof(drvinfo->driver));
2531        strncpy(drvinfo->bus_info, "bcm63xx", sizeof(drvinfo->bus_info));
2532}
2533
2534static void bcm_enetsw_get_ethtool_stats(struct net_device *netdev,
2535                                         struct ethtool_stats *stats,
2536                                         u64 *data)
2537{
2538        struct bcm_enet_priv *priv;
2539        int i;
2540
2541        priv = netdev_priv(netdev);
2542
2543        for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) {
2544                const struct bcm_enet_stats *s;
2545                u32 lo, hi;
2546                char *p;
2547                int reg;
2548
2549                s = &bcm_enetsw_gstrings_stats[i];
2550
2551                reg = s->mib_reg;
2552                if (reg == -1)
2553                        continue;
2554
2555                lo = enetsw_readl(priv, ENETSW_MIB_REG(reg));
2556                p = (char *)priv + s->stat_offset;
2557
2558                if (s->sizeof_stat == sizeof(u64)) {
2559                        hi = enetsw_readl(priv, ENETSW_MIB_REG(reg + 1));
2560                        *(u64 *)p = ((u64)hi << 32 | lo);
2561                } else {
2562                        *(u32 *)p = lo;
2563                }
2564        }
2565
2566        for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) {
2567                const struct bcm_enet_stats *s;
2568                char *p;
2569
2570                s = &bcm_enetsw_gstrings_stats[i];
2571
2572                if (s->mib_reg == -1)
2573                        p = (char *)&netdev->stats + s->stat_offset;
2574                else
2575                        p = (char *)priv + s->stat_offset;
2576
2577                data[i] = (s->sizeof_stat == sizeof(u64)) ?
2578                        *(u64 *)p : *(u32 *)p;
2579        }
2580}
2581
2582static void bcm_enetsw_get_ringparam(struct net_device *dev,
2583                                     struct ethtool_ringparam *ering)
2584{
2585        struct bcm_enet_priv *priv;
2586
2587        priv = netdev_priv(dev);
2588
2589        /* rx/tx ring is actually only limited by memory */
2590        ering->rx_max_pending = 8192;
2591        ering->tx_max_pending = 8192;
2592        ering->rx_mini_max_pending = 0;
2593        ering->rx_jumbo_max_pending = 0;
2594        ering->rx_pending = priv->rx_ring_size;
2595        ering->tx_pending = priv->tx_ring_size;
2596}
2597
2598static int bcm_enetsw_set_ringparam(struct net_device *dev,
2599                                    struct ethtool_ringparam *ering)
2600{
2601        struct bcm_enet_priv *priv;
2602        int was_running;
2603
2604        priv = netdev_priv(dev);
2605
2606        was_running = 0;
2607        if (netif_running(dev)) {
2608                bcm_enetsw_stop(dev);
2609                was_running = 1;
2610        }
2611
2612        priv->rx_ring_size = ering->rx_pending;
2613        priv->tx_ring_size = ering->tx_pending;
2614
2615        if (was_running) {
2616                int err;
2617
2618                err = bcm_enetsw_open(dev);
2619                if (err)
2620                        dev_close(dev);
2621        }
2622        return 0;
2623}
2624
2625static const struct ethtool_ops bcm_enetsw_ethtool_ops = {
2626        .get_strings            = bcm_enetsw_get_strings,
2627        .get_sset_count         = bcm_enetsw_get_sset_count,
2628        .get_ethtool_stats      = bcm_enetsw_get_ethtool_stats,
2629        .get_drvinfo            = bcm_enetsw_get_drvinfo,
2630        .get_ringparam          = bcm_enetsw_get_ringparam,
2631        .set_ringparam          = bcm_enetsw_set_ringparam,
2632};
2633
2634/* allocate netdevice, request register memory and register device. */
2635static int bcm_enetsw_probe(struct platform_device *pdev)
2636{
2637        struct bcm_enet_priv *priv;
2638        struct net_device *dev;
2639        struct bcm63xx_enetsw_platform_data *pd;
2640        struct resource *res_mem;
2641        int ret, irq_rx, irq_tx;
2642
2643        if (!bcm_enet_shared_base[0])
2644                return -EPROBE_DEFER;
2645
2646        res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2647        irq_rx = platform_get_irq(pdev, 0);
2648        irq_tx = platform_get_irq(pdev, 1);
2649        if (!res_mem || irq_rx < 0)
2650                return -ENODEV;
2651
2652        dev = alloc_etherdev(sizeof(*priv));
2653        if (!dev)
2654                return -ENOMEM;
2655        priv = netdev_priv(dev);
2656
2657        /* initialize default and fetch platform data */
2658        priv->enet_is_sw = true;
2659        priv->irq_rx = irq_rx;
2660        priv->irq_tx = irq_tx;
2661        priv->rx_ring_size = BCMENET_DEF_RX_DESC;
2662        priv->tx_ring_size = BCMENET_DEF_TX_DESC;
2663        priv->dma_maxburst = BCMENETSW_DMA_MAXBURST;
2664        priv->rx_buf_offset = NET_SKB_PAD + NET_IP_ALIGN;
2665
2666        pd = dev_get_platdata(&pdev->dev);
2667        if (pd) {
2668                memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
2669                memcpy(priv->used_ports, pd->used_ports,
2670                       sizeof(pd->used_ports));
2671                priv->num_ports = pd->num_ports;
2672                priv->dma_has_sram = pd->dma_has_sram;
2673                priv->dma_chan_en_mask = pd->dma_chan_en_mask;
2674                priv->dma_chan_int_mask = pd->dma_chan_int_mask;
2675                priv->dma_chan_width = pd->dma_chan_width;
2676        }
2677
2678        ret = bcm_enet_change_mtu(dev, dev->mtu);
2679        if (ret)
2680                goto out;
2681
2682        priv->base = devm_ioremap_resource(&pdev->dev, res_mem);
2683        if (IS_ERR(priv->base)) {
2684                ret = PTR_ERR(priv->base);
2685                goto out;
2686        }
2687
2688        priv->mac_clk = devm_clk_get(&pdev->dev, "enetsw");
2689        if (IS_ERR(priv->mac_clk)) {
2690                ret = PTR_ERR(priv->mac_clk);
2691                goto out;
2692        }
2693        ret = clk_prepare_enable(priv->mac_clk);
2694        if (ret)
2695                goto out;
2696
2697        priv->rx_chan = 0;
2698        priv->tx_chan = 1;
2699        spin_lock_init(&priv->rx_lock);
2700
2701        /* init rx timeout (used for oom) */
2702        timer_setup(&priv->rx_timeout, bcm_enet_refill_rx_timer, 0);
2703
2704        /* register netdevice */
2705        dev->netdev_ops = &bcm_enetsw_ops;
2706        netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16);
2707        dev->ethtool_ops = &bcm_enetsw_ethtool_ops;
2708        SET_NETDEV_DEV(dev, &pdev->dev);
2709
2710        spin_lock_init(&priv->enetsw_mdio_lock);
2711
2712        ret = register_netdev(dev);
2713        if (ret)
2714                goto out_disable_clk;
2715
2716        netif_carrier_off(dev);
2717        platform_set_drvdata(pdev, dev);
2718        priv->pdev = pdev;
2719        priv->net_dev = dev;
2720
2721        return 0;
2722
2723out_disable_clk:
2724        clk_disable_unprepare(priv->mac_clk);
2725out:
2726        free_netdev(dev);
2727        return ret;
2728}
2729
2730
2731/* exit func, stops hardware and unregisters netdevice */
2732static int bcm_enetsw_remove(struct platform_device *pdev)
2733{
2734        struct bcm_enet_priv *priv;
2735        struct net_device *dev;
2736
2737        /* stop netdevice */
2738        dev = platform_get_drvdata(pdev);
2739        priv = netdev_priv(dev);
2740        unregister_netdev(dev);
2741
2742        clk_disable_unprepare(priv->mac_clk);
2743
2744        free_netdev(dev);
2745        return 0;
2746}
2747
2748struct platform_driver bcm63xx_enetsw_driver = {
2749        .probe  = bcm_enetsw_probe,
2750        .remove = bcm_enetsw_remove,
2751        .driver = {
2752                .name   = "bcm63xx_enetsw",
2753                .owner  = THIS_MODULE,
2754        },
2755};
2756
2757/* reserve & remap memory space shared between all macs */
2758static int bcm_enet_shared_probe(struct platform_device *pdev)
2759{
2760        void __iomem *p[3];
2761        unsigned int i;
2762
2763        memset(bcm_enet_shared_base, 0, sizeof(bcm_enet_shared_base));
2764
2765        for (i = 0; i < 3; i++) {
2766                p[i] = devm_platform_ioremap_resource(pdev, i);
2767                if (IS_ERR(p[i]))
2768                        return PTR_ERR(p[i]);
2769        }
2770
2771        memcpy(bcm_enet_shared_base, p, sizeof(bcm_enet_shared_base));
2772
2773        return 0;
2774}
2775
2776static int bcm_enet_shared_remove(struct platform_device *pdev)
2777{
2778        return 0;
2779}
2780
2781/* this "shared" driver is needed because both macs share a single
2782 * address space
2783 */
2784struct platform_driver bcm63xx_enet_shared_driver = {
2785        .probe  = bcm_enet_shared_probe,
2786        .remove = bcm_enet_shared_remove,
2787        .driver = {
2788                .name   = "bcm63xx_enet_shared",
2789                .owner  = THIS_MODULE,
2790        },
2791};
2792
2793static struct platform_driver * const drivers[] = {
2794        &bcm63xx_enet_shared_driver,
2795        &bcm63xx_enet_driver,
2796        &bcm63xx_enetsw_driver,
2797};
2798
2799/* entry point */
2800static int __init bcm_enet_init(void)
2801{
2802        return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
2803}
2804
2805static void __exit bcm_enet_exit(void)
2806{
2807        platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
2808}
2809
2810
2811module_init(bcm_enet_init);
2812module_exit(bcm_enet_exit);
2813
2814MODULE_DESCRIPTION("BCM63xx internal ethernet mac driver");
2815MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>");
2816MODULE_LICENSE("GPL");
2817