linux/drivers/net/ethernet/broadcom/bcm63xx_enet.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Driver for BCM963xx builtin Ethernet mac
   4 *
   5 * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
   6 */
   7#include <linux/init.h>
   8#include <linux/interrupt.h>
   9#include <linux/module.h>
  10#include <linux/clk.h>
  11#include <linux/etherdevice.h>
  12#include <linux/slab.h>
  13#include <linux/delay.h>
  14#include <linux/ethtool.h>
  15#include <linux/crc32.h>
  16#include <linux/err.h>
  17#include <linux/dma-mapping.h>
  18#include <linux/platform_device.h>
  19#include <linux/if_vlan.h>
  20
  21#include <bcm63xx_dev_enet.h>
  22#include "bcm63xx_enet.h"
  23
  24static char bcm_enet_driver_name[] = "bcm63xx_enet";
  25
  26static int copybreak __read_mostly = 128;
  27module_param(copybreak, int, 0);
  28MODULE_PARM_DESC(copybreak, "Receive copy threshold");
  29
  30/* io registers memory shared between all devices */
  31static void __iomem *bcm_enet_shared_base[3];
  32
  33/*
  34 * io helpers to access mac registers
  35 */
  36static inline u32 enet_readl(struct bcm_enet_priv *priv, u32 off)
  37{
  38        return bcm_readl(priv->base + off);
  39}
  40
  41static inline void enet_writel(struct bcm_enet_priv *priv,
  42                               u32 val, u32 off)
  43{
  44        bcm_writel(val, priv->base + off);
  45}
  46
  47/*
  48 * io helpers to access switch registers
  49 */
  50static inline u32 enetsw_readl(struct bcm_enet_priv *priv, u32 off)
  51{
  52        return bcm_readl(priv->base + off);
  53}
  54
  55static inline void enetsw_writel(struct bcm_enet_priv *priv,
  56                                 u32 val, u32 off)
  57{
  58        bcm_writel(val, priv->base + off);
  59}
  60
  61static inline u16 enetsw_readw(struct bcm_enet_priv *priv, u32 off)
  62{
  63        return bcm_readw(priv->base + off);
  64}
  65
  66static inline void enetsw_writew(struct bcm_enet_priv *priv,
  67                                 u16 val, u32 off)
  68{
  69        bcm_writew(val, priv->base + off);
  70}
  71
  72static inline u8 enetsw_readb(struct bcm_enet_priv *priv, u32 off)
  73{
  74        return bcm_readb(priv->base + off);
  75}
  76
  77static inline void enetsw_writeb(struct bcm_enet_priv *priv,
  78                                 u8 val, u32 off)
  79{
  80        bcm_writeb(val, priv->base + off);
  81}
  82
  83
  84/* io helpers to access shared registers */
  85static inline u32 enet_dma_readl(struct bcm_enet_priv *priv, u32 off)
  86{
  87        return bcm_readl(bcm_enet_shared_base[0] + off);
  88}
  89
  90static inline void enet_dma_writel(struct bcm_enet_priv *priv,
  91                                       u32 val, u32 off)
  92{
  93        bcm_writel(val, bcm_enet_shared_base[0] + off);
  94}
  95
  96static inline u32 enet_dmac_readl(struct bcm_enet_priv *priv, u32 off, int chan)
  97{
  98        return bcm_readl(bcm_enet_shared_base[1] +
  99                bcm63xx_enetdmacreg(off) + chan * priv->dma_chan_width);
 100}
 101
 102static inline void enet_dmac_writel(struct bcm_enet_priv *priv,
 103                                       u32 val, u32 off, int chan)
 104{
 105        bcm_writel(val, bcm_enet_shared_base[1] +
 106                bcm63xx_enetdmacreg(off) + chan * priv->dma_chan_width);
 107}
 108
 109static inline u32 enet_dmas_readl(struct bcm_enet_priv *priv, u32 off, int chan)
 110{
 111        return bcm_readl(bcm_enet_shared_base[2] + off + chan * priv->dma_chan_width);
 112}
 113
 114static inline void enet_dmas_writel(struct bcm_enet_priv *priv,
 115                                       u32 val, u32 off, int chan)
 116{
 117        bcm_writel(val, bcm_enet_shared_base[2] + off + chan * priv->dma_chan_width);
 118}
 119
 120/*
 121 * write given data into mii register and wait for transfer to end
 122 * with timeout (average measured transfer time is 25us)
 123 */
 124static int do_mdio_op(struct bcm_enet_priv *priv, unsigned int data)
 125{
 126        int limit;
 127
 128        /* make sure mii interrupt status is cleared */
 129        enet_writel(priv, ENET_IR_MII, ENET_IR_REG);
 130
 131        enet_writel(priv, data, ENET_MIIDATA_REG);
 132        wmb();
 133
 134        /* busy wait on mii interrupt bit, with timeout */
 135        limit = 1000;
 136        do {
 137                if (enet_readl(priv, ENET_IR_REG) & ENET_IR_MII)
 138                        break;
 139                udelay(1);
 140        } while (limit-- > 0);
 141
 142        return (limit < 0) ? 1 : 0;
 143}
 144
 145/*
 146 * MII internal read callback
 147 */
 148static int bcm_enet_mdio_read(struct bcm_enet_priv *priv, int mii_id,
 149                              int regnum)
 150{
 151        u32 tmp, val;
 152
 153        tmp = regnum << ENET_MIIDATA_REG_SHIFT;
 154        tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT;
 155        tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT;
 156        tmp |= ENET_MIIDATA_OP_READ_MASK;
 157
 158        if (do_mdio_op(priv, tmp))
 159                return -1;
 160
 161        val = enet_readl(priv, ENET_MIIDATA_REG);
 162        val &= 0xffff;
 163        return val;
 164}
 165
 166/*
 167 * MII internal write callback
 168 */
 169static int bcm_enet_mdio_write(struct bcm_enet_priv *priv, int mii_id,
 170                               int regnum, u16 value)
 171{
 172        u32 tmp;
 173
 174        tmp = (value & 0xffff) << ENET_MIIDATA_DATA_SHIFT;
 175        tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT;
 176        tmp |= regnum << ENET_MIIDATA_REG_SHIFT;
 177        tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT;
 178        tmp |= ENET_MIIDATA_OP_WRITE_MASK;
 179
 180        (void)do_mdio_op(priv, tmp);
 181        return 0;
 182}
 183
 184/*
 185 * MII read callback from phylib
 186 */
 187static int bcm_enet_mdio_read_phylib(struct mii_bus *bus, int mii_id,
 188                                     int regnum)
 189{
 190        return bcm_enet_mdio_read(bus->priv, mii_id, regnum);
 191}
 192
 193/*
 194 * MII write callback from phylib
 195 */
 196static int bcm_enet_mdio_write_phylib(struct mii_bus *bus, int mii_id,
 197                                      int regnum, u16 value)
 198{
 199        return bcm_enet_mdio_write(bus->priv, mii_id, regnum, value);
 200}
 201
 202/*
 203 * MII read callback from mii core
 204 */
 205static int bcm_enet_mdio_read_mii(struct net_device *dev, int mii_id,
 206                                  int regnum)
 207{
 208        return bcm_enet_mdio_read(netdev_priv(dev), mii_id, regnum);
 209}
 210
 211/*
 212 * MII write callback from mii core
 213 */
 214static void bcm_enet_mdio_write_mii(struct net_device *dev, int mii_id,
 215                                    int regnum, int value)
 216{
 217        bcm_enet_mdio_write(netdev_priv(dev), mii_id, regnum, value);
 218}
 219
 220/*
 221 * refill rx queue
 222 */
 223static int bcm_enet_refill_rx(struct net_device *dev, bool napi_mode)
 224{
 225        struct bcm_enet_priv *priv;
 226
 227        priv = netdev_priv(dev);
 228
 229        while (priv->rx_desc_count < priv->rx_ring_size) {
 230                struct bcm_enet_desc *desc;
 231                int desc_idx;
 232                u32 len_stat;
 233
 234                desc_idx = priv->rx_dirty_desc;
 235                desc = &priv->rx_desc_cpu[desc_idx];
 236
 237                if (!priv->rx_buf[desc_idx]) {
 238                        void *buf;
 239
 240                        if (likely(napi_mode))
 241                                buf = napi_alloc_frag(priv->rx_frag_size);
 242                        else
 243                                buf = netdev_alloc_frag(priv->rx_frag_size);
 244                        if (unlikely(!buf))
 245                                break;
 246                        priv->rx_buf[desc_idx] = buf;
 247                        desc->address = dma_map_single(&priv->pdev->dev,
 248                                                       buf + priv->rx_buf_offset,
 249                                                       priv->rx_buf_size,
 250                                                       DMA_FROM_DEVICE);
 251                }
 252
 253                len_stat = priv->rx_buf_size << DMADESC_LENGTH_SHIFT;
 254                len_stat |= DMADESC_OWNER_MASK;
 255                if (priv->rx_dirty_desc == priv->rx_ring_size - 1) {
 256                        len_stat |= (DMADESC_WRAP_MASK >> priv->dma_desc_shift);
 257                        priv->rx_dirty_desc = 0;
 258                } else {
 259                        priv->rx_dirty_desc++;
 260                }
 261                wmb();
 262                desc->len_stat = len_stat;
 263
 264                priv->rx_desc_count++;
 265
 266                /* tell dma engine we allocated one buffer */
 267                if (priv->dma_has_sram)
 268                        enet_dma_writel(priv, 1, ENETDMA_BUFALLOC_REG(priv->rx_chan));
 269                else
 270                        enet_dmac_writel(priv, 1, ENETDMAC_BUFALLOC, priv->rx_chan);
 271        }
 272
 273        /* If rx ring is still empty, set a timer to try allocating
 274         * again at a later time. */
 275        if (priv->rx_desc_count == 0 && netif_running(dev)) {
 276                dev_warn(&priv->pdev->dev, "unable to refill rx ring\n");
 277                priv->rx_timeout.expires = jiffies + HZ;
 278                add_timer(&priv->rx_timeout);
 279        }
 280
 281        return 0;
 282}
 283
 284/*
 285 * timer callback to defer refill rx queue in case we're OOM
 286 */
 287static void bcm_enet_refill_rx_timer(struct timer_list *t)
 288{
 289        struct bcm_enet_priv *priv = from_timer(priv, t, rx_timeout);
 290        struct net_device *dev = priv->net_dev;
 291
 292        spin_lock(&priv->rx_lock);
 293        bcm_enet_refill_rx(dev, false);
 294        spin_unlock(&priv->rx_lock);
 295}
 296
 297/*
 298 * extract packet from rx queue
 299 */
 300static int bcm_enet_receive_queue(struct net_device *dev, int budget)
 301{
 302        struct bcm_enet_priv *priv;
 303        struct list_head rx_list;
 304        struct device *kdev;
 305        int processed;
 306
 307        priv = netdev_priv(dev);
 308        INIT_LIST_HEAD(&rx_list);
 309        kdev = &priv->pdev->dev;
 310        processed = 0;
 311
 312        /* don't scan ring further than number of refilled
 313         * descriptor */
 314        if (budget > priv->rx_desc_count)
 315                budget = priv->rx_desc_count;
 316
 317        do {
 318                struct bcm_enet_desc *desc;
 319                struct sk_buff *skb;
 320                int desc_idx;
 321                u32 len_stat;
 322                unsigned int len;
 323                void *buf;
 324
 325                desc_idx = priv->rx_curr_desc;
 326                desc = &priv->rx_desc_cpu[desc_idx];
 327
 328                /* make sure we actually read the descriptor status at
 329                 * each loop */
 330                rmb();
 331
 332                len_stat = desc->len_stat;
 333
 334                /* break if dma ownership belongs to hw */
 335                if (len_stat & DMADESC_OWNER_MASK)
 336                        break;
 337
 338                processed++;
 339                priv->rx_curr_desc++;
 340                if (priv->rx_curr_desc == priv->rx_ring_size)
 341                        priv->rx_curr_desc = 0;
 342
 343                /* if the packet does not have start of packet _and_
 344                 * end of packet flag set, then just recycle it */
 345                if ((len_stat & (DMADESC_ESOP_MASK >> priv->dma_desc_shift)) !=
 346                        (DMADESC_ESOP_MASK >> priv->dma_desc_shift)) {
 347                        dev->stats.rx_dropped++;
 348                        continue;
 349                }
 350
 351                /* recycle packet if it's marked as bad */
 352                if (!priv->enet_is_sw &&
 353                    unlikely(len_stat & DMADESC_ERR_MASK)) {
 354                        dev->stats.rx_errors++;
 355
 356                        if (len_stat & DMADESC_OVSIZE_MASK)
 357                                dev->stats.rx_length_errors++;
 358                        if (len_stat & DMADESC_CRC_MASK)
 359                                dev->stats.rx_crc_errors++;
 360                        if (len_stat & DMADESC_UNDER_MASK)
 361                                dev->stats.rx_frame_errors++;
 362                        if (len_stat & DMADESC_OV_MASK)
 363                                dev->stats.rx_fifo_errors++;
 364                        continue;
 365                }
 366
 367                /* valid packet */
 368                buf = priv->rx_buf[desc_idx];
 369                len = (len_stat & DMADESC_LENGTH_MASK) >> DMADESC_LENGTH_SHIFT;
 370                /* don't include FCS */
 371                len -= 4;
 372
 373                if (len < copybreak) {
 374                        skb = napi_alloc_skb(&priv->napi, len);
 375                        if (unlikely(!skb)) {
 376                                /* forget packet, just rearm desc */
 377                                dev->stats.rx_dropped++;
 378                                continue;
 379                        }
 380
 381                        dma_sync_single_for_cpu(kdev, desc->address,
 382                                                len, DMA_FROM_DEVICE);
 383                        memcpy(skb->data, buf + priv->rx_buf_offset, len);
 384                        dma_sync_single_for_device(kdev, desc->address,
 385                                                   len, DMA_FROM_DEVICE);
 386                } else {
 387                        dma_unmap_single(kdev, desc->address,
 388                                         priv->rx_buf_size, DMA_FROM_DEVICE);
 389                        priv->rx_buf[desc_idx] = NULL;
 390
 391                        skb = build_skb(buf, priv->rx_frag_size);
 392                        if (unlikely(!skb)) {
 393                                skb_free_frag(buf);
 394                                dev->stats.rx_dropped++;
 395                                continue;
 396                        }
 397                        skb_reserve(skb, priv->rx_buf_offset);
 398                }
 399
 400                skb_put(skb, len);
 401                skb->protocol = eth_type_trans(skb, dev);
 402                dev->stats.rx_packets++;
 403                dev->stats.rx_bytes += len;
 404                list_add_tail(&skb->list, &rx_list);
 405
 406        } while (processed < budget);
 407
 408        netif_receive_skb_list(&rx_list);
 409        priv->rx_desc_count -= processed;
 410
 411        if (processed || !priv->rx_desc_count) {
 412                bcm_enet_refill_rx(dev, true);
 413
 414                /* kick rx dma */
 415                enet_dmac_writel(priv, priv->dma_chan_en_mask,
 416                                         ENETDMAC_CHANCFG, priv->rx_chan);
 417        }
 418
 419        return processed;
 420}
 421
 422
 423/*
 424 * try to or force reclaim of transmitted buffers
 425 */
 426static int bcm_enet_tx_reclaim(struct net_device *dev, int force)
 427{
 428        struct bcm_enet_priv *priv;
 429        unsigned int bytes;
 430        int released;
 431
 432        priv = netdev_priv(dev);
 433        bytes = 0;
 434        released = 0;
 435
 436        while (priv->tx_desc_count < priv->tx_ring_size) {
 437                struct bcm_enet_desc *desc;
 438                struct sk_buff *skb;
 439
 440                /* We run in a bh and fight against start_xmit, which
 441                 * is called with bh disabled  */
 442                spin_lock(&priv->tx_lock);
 443
 444                desc = &priv->tx_desc_cpu[priv->tx_dirty_desc];
 445
 446                if (!force && (desc->len_stat & DMADESC_OWNER_MASK)) {
 447                        spin_unlock(&priv->tx_lock);
 448                        break;
 449                }
 450
 451                /* ensure other field of the descriptor were not read
 452                 * before we checked ownership */
 453                rmb();
 454
 455                skb = priv->tx_skb[priv->tx_dirty_desc];
 456                priv->tx_skb[priv->tx_dirty_desc] = NULL;
 457                dma_unmap_single(&priv->pdev->dev, desc->address, skb->len,
 458                                 DMA_TO_DEVICE);
 459
 460                priv->tx_dirty_desc++;
 461                if (priv->tx_dirty_desc == priv->tx_ring_size)
 462                        priv->tx_dirty_desc = 0;
 463                priv->tx_desc_count++;
 464
 465                spin_unlock(&priv->tx_lock);
 466
 467                if (desc->len_stat & DMADESC_UNDER_MASK)
 468                        dev->stats.tx_errors++;
 469
 470                bytes += skb->len;
 471                dev_kfree_skb(skb);
 472                released++;
 473        }
 474
 475        netdev_completed_queue(dev, released, bytes);
 476
 477        if (netif_queue_stopped(dev) && released)
 478                netif_wake_queue(dev);
 479
 480        return released;
 481}
 482
 483/*
 484 * poll func, called by network core
 485 */
 486static int bcm_enet_poll(struct napi_struct *napi, int budget)
 487{
 488        struct bcm_enet_priv *priv;
 489        struct net_device *dev;
 490        int rx_work_done;
 491
 492        priv = container_of(napi, struct bcm_enet_priv, napi);
 493        dev = priv->net_dev;
 494
 495        /* ack interrupts */
 496        enet_dmac_writel(priv, priv->dma_chan_int_mask,
 497                         ENETDMAC_IR, priv->rx_chan);
 498        enet_dmac_writel(priv, priv->dma_chan_int_mask,
 499                         ENETDMAC_IR, priv->tx_chan);
 500
 501        /* reclaim sent skb */
 502        bcm_enet_tx_reclaim(dev, 0);
 503
 504        spin_lock(&priv->rx_lock);
 505        rx_work_done = bcm_enet_receive_queue(dev, budget);
 506        spin_unlock(&priv->rx_lock);
 507
 508        if (rx_work_done >= budget) {
 509                /* rx queue is not yet empty/clean */
 510                return rx_work_done;
 511        }
 512
 513        /* no more packet in rx/tx queue, remove device from poll
 514         * queue */
 515        napi_complete_done(napi, rx_work_done);
 516
 517        /* restore rx/tx interrupt */
 518        enet_dmac_writel(priv, priv->dma_chan_int_mask,
 519                         ENETDMAC_IRMASK, priv->rx_chan);
 520        enet_dmac_writel(priv, priv->dma_chan_int_mask,
 521                         ENETDMAC_IRMASK, priv->tx_chan);
 522
 523        return rx_work_done;
 524}
 525
 526/*
 527 * mac interrupt handler
 528 */
 529static irqreturn_t bcm_enet_isr_mac(int irq, void *dev_id)
 530{
 531        struct net_device *dev;
 532        struct bcm_enet_priv *priv;
 533        u32 stat;
 534
 535        dev = dev_id;
 536        priv = netdev_priv(dev);
 537
 538        stat = enet_readl(priv, ENET_IR_REG);
 539        if (!(stat & ENET_IR_MIB))
 540                return IRQ_NONE;
 541
 542        /* clear & mask interrupt */
 543        enet_writel(priv, ENET_IR_MIB, ENET_IR_REG);
 544        enet_writel(priv, 0, ENET_IRMASK_REG);
 545
 546        /* read mib registers in workqueue */
 547        schedule_work(&priv->mib_update_task);
 548
 549        return IRQ_HANDLED;
 550}
 551
 552/*
 553 * rx/tx dma interrupt handler
 554 */
 555static irqreturn_t bcm_enet_isr_dma(int irq, void *dev_id)
 556{
 557        struct net_device *dev;
 558        struct bcm_enet_priv *priv;
 559
 560        dev = dev_id;
 561        priv = netdev_priv(dev);
 562
 563        /* mask rx/tx interrupts */
 564        enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
 565        enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
 566
 567        napi_schedule(&priv->napi);
 568
 569        return IRQ_HANDLED;
 570}
 571
 572/*
 573 * tx request callback
 574 */
 575static netdev_tx_t
 576bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
 577{
 578        struct bcm_enet_priv *priv;
 579        struct bcm_enet_desc *desc;
 580        u32 len_stat;
 581        netdev_tx_t ret;
 582
 583        priv = netdev_priv(dev);
 584
 585        /* lock against tx reclaim */
 586        spin_lock(&priv->tx_lock);
 587
 588        /* make sure  the tx hw queue  is not full,  should not happen
 589         * since we stop queue before it's the case */
 590        if (unlikely(!priv->tx_desc_count)) {
 591                netif_stop_queue(dev);
 592                dev_err(&priv->pdev->dev, "xmit called with no tx desc "
 593                        "available?\n");
 594                ret = NETDEV_TX_BUSY;
 595                goto out_unlock;
 596        }
 597
 598        /* pad small packets sent on a switch device */
 599        if (priv->enet_is_sw && skb->len < 64) {
 600                int needed = 64 - skb->len;
 601                char *data;
 602
 603                if (unlikely(skb_tailroom(skb) < needed)) {
 604                        struct sk_buff *nskb;
 605
 606                        nskb = skb_copy_expand(skb, 0, needed, GFP_ATOMIC);
 607                        if (!nskb) {
 608                                ret = NETDEV_TX_BUSY;
 609                                goto out_unlock;
 610                        }
 611                        dev_kfree_skb(skb);
 612                        skb = nskb;
 613                }
 614                data = skb_put_zero(skb, needed);
 615        }
 616
 617        /* point to the next available desc */
 618        desc = &priv->tx_desc_cpu[priv->tx_curr_desc];
 619        priv->tx_skb[priv->tx_curr_desc] = skb;
 620
 621        /* fill descriptor */
 622        desc->address = dma_map_single(&priv->pdev->dev, skb->data, skb->len,
 623                                       DMA_TO_DEVICE);
 624
 625        len_stat = (skb->len << DMADESC_LENGTH_SHIFT) & DMADESC_LENGTH_MASK;
 626        len_stat |= (DMADESC_ESOP_MASK >> priv->dma_desc_shift) |
 627                DMADESC_APPEND_CRC |
 628                DMADESC_OWNER_MASK;
 629
 630        priv->tx_curr_desc++;
 631        if (priv->tx_curr_desc == priv->tx_ring_size) {
 632                priv->tx_curr_desc = 0;
 633                len_stat |= (DMADESC_WRAP_MASK >> priv->dma_desc_shift);
 634        }
 635        priv->tx_desc_count--;
 636
 637        /* dma might be already polling, make sure we update desc
 638         * fields in correct order */
 639        wmb();
 640        desc->len_stat = len_stat;
 641        wmb();
 642
 643        netdev_sent_queue(dev, skb->len);
 644
 645        /* kick tx dma */
 646        if (!netdev_xmit_more() || !priv->tx_desc_count)
 647                enet_dmac_writel(priv, priv->dma_chan_en_mask,
 648                                 ENETDMAC_CHANCFG, priv->tx_chan);
 649
 650        /* stop queue if no more desc available */
 651        if (!priv->tx_desc_count)
 652                netif_stop_queue(dev);
 653
 654        dev->stats.tx_bytes += skb->len;
 655        dev->stats.tx_packets++;
 656        ret = NETDEV_TX_OK;
 657
 658out_unlock:
 659        spin_unlock(&priv->tx_lock);
 660        return ret;
 661}
 662
 663/*
 664 * Change the interface's mac address.
 665 */
 666static int bcm_enet_set_mac_address(struct net_device *dev, void *p)
 667{
 668        struct bcm_enet_priv *priv;
 669        struct sockaddr *addr = p;
 670        u32 val;
 671
 672        priv = netdev_priv(dev);
 673        eth_hw_addr_set(dev, addr->sa_data);
 674
 675        /* use perfect match register 0 to store my mac address */
 676        val = (dev->dev_addr[2] << 24) | (dev->dev_addr[3] << 16) |
 677                (dev->dev_addr[4] << 8) | dev->dev_addr[5];
 678        enet_writel(priv, val, ENET_PML_REG(0));
 679
 680        val = (dev->dev_addr[0] << 8 | dev->dev_addr[1]);
 681        val |= ENET_PMH_DATAVALID_MASK;
 682        enet_writel(priv, val, ENET_PMH_REG(0));
 683
 684        return 0;
 685}
 686
 687/*
 688 * Change rx mode (promiscuous/allmulti) and update multicast list
 689 */
 690static void bcm_enet_set_multicast_list(struct net_device *dev)
 691{
 692        struct bcm_enet_priv *priv;
 693        struct netdev_hw_addr *ha;
 694        u32 val;
 695        int i;
 696
 697        priv = netdev_priv(dev);
 698
 699        val = enet_readl(priv, ENET_RXCFG_REG);
 700
 701        if (dev->flags & IFF_PROMISC)
 702                val |= ENET_RXCFG_PROMISC_MASK;
 703        else
 704                val &= ~ENET_RXCFG_PROMISC_MASK;
 705
 706        /* only 3 perfect match registers left, first one is used for
 707         * own mac address */
 708        if ((dev->flags & IFF_ALLMULTI) || netdev_mc_count(dev) > 3)
 709                val |= ENET_RXCFG_ALLMCAST_MASK;
 710        else
 711                val &= ~ENET_RXCFG_ALLMCAST_MASK;
 712
 713        /* no need to set perfect match registers if we catch all
 714         * multicast */
 715        if (val & ENET_RXCFG_ALLMCAST_MASK) {
 716                enet_writel(priv, val, ENET_RXCFG_REG);
 717                return;
 718        }
 719
 720        i = 0;
 721        netdev_for_each_mc_addr(ha, dev) {
 722                u8 *dmi_addr;
 723                u32 tmp;
 724
 725                if (i == 3)
 726                        break;
 727                /* update perfect match registers */
 728                dmi_addr = ha->addr;
 729                tmp = (dmi_addr[2] << 24) | (dmi_addr[3] << 16) |
 730                        (dmi_addr[4] << 8) | dmi_addr[5];
 731                enet_writel(priv, tmp, ENET_PML_REG(i + 1));
 732
 733                tmp = (dmi_addr[0] << 8 | dmi_addr[1]);
 734                tmp |= ENET_PMH_DATAVALID_MASK;
 735                enet_writel(priv, tmp, ENET_PMH_REG(i++ + 1));
 736        }
 737
 738        for (; i < 3; i++) {
 739                enet_writel(priv, 0, ENET_PML_REG(i + 1));
 740                enet_writel(priv, 0, ENET_PMH_REG(i + 1));
 741        }
 742
 743        enet_writel(priv, val, ENET_RXCFG_REG);
 744}
 745
 746/*
 747 * set mac duplex parameters
 748 */
 749static void bcm_enet_set_duplex(struct bcm_enet_priv *priv, int fullduplex)
 750{
 751        u32 val;
 752
 753        val = enet_readl(priv, ENET_TXCTL_REG);
 754        if (fullduplex)
 755                val |= ENET_TXCTL_FD_MASK;
 756        else
 757                val &= ~ENET_TXCTL_FD_MASK;
 758        enet_writel(priv, val, ENET_TXCTL_REG);
 759}
 760
 761/*
 762 * set mac flow control parameters
 763 */
 764static void bcm_enet_set_flow(struct bcm_enet_priv *priv, int rx_en, int tx_en)
 765{
 766        u32 val;
 767
 768        /* rx flow control (pause frame handling) */
 769        val = enet_readl(priv, ENET_RXCFG_REG);
 770        if (rx_en)
 771                val |= ENET_RXCFG_ENFLOW_MASK;
 772        else
 773                val &= ~ENET_RXCFG_ENFLOW_MASK;
 774        enet_writel(priv, val, ENET_RXCFG_REG);
 775
 776        if (!priv->dma_has_sram)
 777                return;
 778
 779        /* tx flow control (pause frame generation) */
 780        val = enet_dma_readl(priv, ENETDMA_CFG_REG);
 781        if (tx_en)
 782                val |= ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan);
 783        else
 784                val &= ~ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan);
 785        enet_dma_writel(priv, val, ENETDMA_CFG_REG);
 786}
 787
 788/*
 789 * link changed callback (from phylib)
 790 */
 791static void bcm_enet_adjust_phy_link(struct net_device *dev)
 792{
 793        struct bcm_enet_priv *priv;
 794        struct phy_device *phydev;
 795        int status_changed;
 796
 797        priv = netdev_priv(dev);
 798        phydev = dev->phydev;
 799        status_changed = 0;
 800
 801        if (priv->old_link != phydev->link) {
 802                status_changed = 1;
 803                priv->old_link = phydev->link;
 804        }
 805
 806        /* reflect duplex change in mac configuration */
 807        if (phydev->link && phydev->duplex != priv->old_duplex) {
 808                bcm_enet_set_duplex(priv,
 809                                    (phydev->duplex == DUPLEX_FULL) ? 1 : 0);
 810                status_changed = 1;
 811                priv->old_duplex = phydev->duplex;
 812        }
 813
 814        /* enable flow control if remote advertise it (trust phylib to
 815         * check that duplex is full */
 816        if (phydev->link && phydev->pause != priv->old_pause) {
 817                int rx_pause_en, tx_pause_en;
 818
 819                if (phydev->pause) {
 820                        /* pause was advertised by lpa and us */
 821                        rx_pause_en = 1;
 822                        tx_pause_en = 1;
 823                } else if (!priv->pause_auto) {
 824                        /* pause setting overridden by user */
 825                        rx_pause_en = priv->pause_rx;
 826                        tx_pause_en = priv->pause_tx;
 827                } else {
 828                        rx_pause_en = 0;
 829                        tx_pause_en = 0;
 830                }
 831
 832                bcm_enet_set_flow(priv, rx_pause_en, tx_pause_en);
 833                status_changed = 1;
 834                priv->old_pause = phydev->pause;
 835        }
 836
 837        if (status_changed) {
 838                pr_info("%s: link %s", dev->name, phydev->link ?
 839                        "UP" : "DOWN");
 840                if (phydev->link)
 841                        pr_cont(" - %d/%s - flow control %s", phydev->speed,
 842                               DUPLEX_FULL == phydev->duplex ? "full" : "half",
 843                               phydev->pause == 1 ? "rx&tx" : "off");
 844
 845                pr_cont("\n");
 846        }
 847}
 848
 849/*
 850 * link changed callback (if phylib is not used)
 851 */
 852static void bcm_enet_adjust_link(struct net_device *dev)
 853{
 854        struct bcm_enet_priv *priv;
 855
 856        priv = netdev_priv(dev);
 857        bcm_enet_set_duplex(priv, priv->force_duplex_full);
 858        bcm_enet_set_flow(priv, priv->pause_rx, priv->pause_tx);
 859        netif_carrier_on(dev);
 860
 861        pr_info("%s: link forced UP - %d/%s - flow control %s/%s\n",
 862                dev->name,
 863                priv->force_speed_100 ? 100 : 10,
 864                priv->force_duplex_full ? "full" : "half",
 865                priv->pause_rx ? "rx" : "off",
 866                priv->pause_tx ? "tx" : "off");
 867}
 868
 869static void bcm_enet_free_rx_buf_ring(struct device *kdev, struct bcm_enet_priv *priv)
 870{
 871        int i;
 872
 873        for (i = 0; i < priv->rx_ring_size; i++) {
 874                struct bcm_enet_desc *desc;
 875
 876                if (!priv->rx_buf[i])
 877                        continue;
 878
 879                desc = &priv->rx_desc_cpu[i];
 880                dma_unmap_single(kdev, desc->address, priv->rx_buf_size,
 881                                 DMA_FROM_DEVICE);
 882                skb_free_frag(priv->rx_buf[i]);
 883        }
 884        kfree(priv->rx_buf);
 885}
 886
 887/*
 888 * open callback, allocate dma rings & buffers and start rx operation
 889 */
 890static int bcm_enet_open(struct net_device *dev)
 891{
 892        struct bcm_enet_priv *priv;
 893        struct sockaddr addr;
 894        struct device *kdev;
 895        struct phy_device *phydev;
 896        int i, ret;
 897        unsigned int size;
 898        char phy_id[MII_BUS_ID_SIZE + 3];
 899        void *p;
 900        u32 val;
 901
 902        priv = netdev_priv(dev);
 903        kdev = &priv->pdev->dev;
 904
 905        if (priv->has_phy) {
 906                /* connect to PHY */
 907                snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
 908                         priv->mii_bus->id, priv->phy_id);
 909
 910                phydev = phy_connect(dev, phy_id, bcm_enet_adjust_phy_link,
 911                                     PHY_INTERFACE_MODE_MII);
 912
 913                if (IS_ERR(phydev)) {
 914                        dev_err(kdev, "could not attach to PHY\n");
 915                        return PTR_ERR(phydev);
 916                }
 917
 918                /* mask with MAC supported features */
 919                phy_support_sym_pause(phydev);
 920                phy_set_max_speed(phydev, SPEED_100);
 921                phy_set_sym_pause(phydev, priv->pause_rx, priv->pause_rx,
 922                                  priv->pause_auto);
 923
 924                phy_attached_info(phydev);
 925
 926                priv->old_link = 0;
 927                priv->old_duplex = -1;
 928                priv->old_pause = -1;
 929        } else {
 930                phydev = NULL;
 931        }
 932
 933        /* mask all interrupts and request them */
 934        enet_writel(priv, 0, ENET_IRMASK_REG);
 935        enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
 936        enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
 937
 938        ret = request_irq(dev->irq, bcm_enet_isr_mac, 0, dev->name, dev);
 939        if (ret)
 940                goto out_phy_disconnect;
 941
 942        ret = request_irq(priv->irq_rx, bcm_enet_isr_dma, 0,
 943                          dev->name, dev);
 944        if (ret)
 945                goto out_freeirq;
 946
 947        ret = request_irq(priv->irq_tx, bcm_enet_isr_dma,
 948                          0, dev->name, dev);
 949        if (ret)
 950                goto out_freeirq_rx;
 951
 952        /* initialize perfect match registers */
 953        for (i = 0; i < 4; i++) {
 954                enet_writel(priv, 0, ENET_PML_REG(i));
 955                enet_writel(priv, 0, ENET_PMH_REG(i));
 956        }
 957
 958        /* write device mac address */
 959        memcpy(addr.sa_data, dev->dev_addr, ETH_ALEN);
 960        bcm_enet_set_mac_address(dev, &addr);
 961
 962        /* allocate rx dma ring */
 963        size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
 964        p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
 965        if (!p) {
 966                ret = -ENOMEM;
 967                goto out_freeirq_tx;
 968        }
 969
 970        priv->rx_desc_alloc_size = size;
 971        priv->rx_desc_cpu = p;
 972
 973        /* allocate tx dma ring */
 974        size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
 975        p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
 976        if (!p) {
 977                ret = -ENOMEM;
 978                goto out_free_rx_ring;
 979        }
 980
 981        priv->tx_desc_alloc_size = size;
 982        priv->tx_desc_cpu = p;
 983
 984        priv->tx_skb = kcalloc(priv->tx_ring_size, sizeof(struct sk_buff *),
 985                               GFP_KERNEL);
 986        if (!priv->tx_skb) {
 987                ret = -ENOMEM;
 988                goto out_free_tx_ring;
 989        }
 990
 991        priv->tx_desc_count = priv->tx_ring_size;
 992        priv->tx_dirty_desc = 0;
 993        priv->tx_curr_desc = 0;
 994        spin_lock_init(&priv->tx_lock);
 995
 996        /* init & fill rx ring with buffers */
 997        priv->rx_buf = kcalloc(priv->rx_ring_size, sizeof(void *),
 998                               GFP_KERNEL);
 999        if (!priv->rx_buf) {
1000                ret = -ENOMEM;
1001                goto out_free_tx_skb;
1002        }
1003
1004        priv->rx_desc_count = 0;
1005        priv->rx_dirty_desc = 0;
1006        priv->rx_curr_desc = 0;
1007
1008        /* initialize flow control buffer allocation */
1009        if (priv->dma_has_sram)
1010                enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
1011                                ENETDMA_BUFALLOC_REG(priv->rx_chan));
1012        else
1013                enet_dmac_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
1014                                ENETDMAC_BUFALLOC, priv->rx_chan);
1015
1016        if (bcm_enet_refill_rx(dev, false)) {
1017                dev_err(kdev, "cannot allocate rx buffer queue\n");
1018                ret = -ENOMEM;
1019                goto out;
1020        }
1021
1022        /* write rx & tx ring addresses */
1023        if (priv->dma_has_sram) {
1024                enet_dmas_writel(priv, priv->rx_desc_dma,
1025                                 ENETDMAS_RSTART_REG, priv->rx_chan);
1026                enet_dmas_writel(priv, priv->tx_desc_dma,
1027                         ENETDMAS_RSTART_REG, priv->tx_chan);
1028        } else {
1029                enet_dmac_writel(priv, priv->rx_desc_dma,
1030                                ENETDMAC_RSTART, priv->rx_chan);
1031                enet_dmac_writel(priv, priv->tx_desc_dma,
1032                                ENETDMAC_RSTART, priv->tx_chan);
1033        }
1034
1035        /* clear remaining state ram for rx & tx channel */
1036        if (priv->dma_has_sram) {
1037                enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->rx_chan);
1038                enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->tx_chan);
1039                enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->rx_chan);
1040                enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->tx_chan);
1041                enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->rx_chan);
1042                enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->tx_chan);
1043        } else {
1044                enet_dmac_writel(priv, 0, ENETDMAC_FC, priv->rx_chan);
1045                enet_dmac_writel(priv, 0, ENETDMAC_FC, priv->tx_chan);
1046        }
1047
1048        /* set max rx/tx length */
1049        enet_writel(priv, priv->hw_mtu, ENET_RXMAXLEN_REG);
1050        enet_writel(priv, priv->hw_mtu, ENET_TXMAXLEN_REG);
1051
1052        /* set dma maximum burst len */
1053        enet_dmac_writel(priv, priv->dma_maxburst,
1054                         ENETDMAC_MAXBURST, priv->rx_chan);
1055        enet_dmac_writel(priv, priv->dma_maxburst,
1056                         ENETDMAC_MAXBURST, priv->tx_chan);
1057
1058        /* set correct transmit fifo watermark */
1059        enet_writel(priv, BCMENET_TX_FIFO_TRESH, ENET_TXWMARK_REG);
1060
1061        /* set flow control low/high threshold to 1/3 / 2/3 */
1062        if (priv->dma_has_sram) {
1063                val = priv->rx_ring_size / 3;
1064                enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan));
1065                val = (priv->rx_ring_size * 2) / 3;
1066                enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan));
1067        } else {
1068                enet_dmac_writel(priv, 5, ENETDMAC_FC, priv->rx_chan);
1069                enet_dmac_writel(priv, priv->rx_ring_size, ENETDMAC_LEN, priv->rx_chan);
1070                enet_dmac_writel(priv, priv->tx_ring_size, ENETDMAC_LEN, priv->tx_chan);
1071        }
1072
1073        /* all set, enable mac and interrupts, start dma engine and
1074         * kick rx dma channel */
1075        wmb();
1076        val = enet_readl(priv, ENET_CTL_REG);
1077        val |= ENET_CTL_ENABLE_MASK;
1078        enet_writel(priv, val, ENET_CTL_REG);
1079        if (priv->dma_has_sram)
1080                enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
1081        enet_dmac_writel(priv, priv->dma_chan_en_mask,
1082                         ENETDMAC_CHANCFG, priv->rx_chan);
1083
1084        /* watch "mib counters about to overflow" interrupt */
1085        enet_writel(priv, ENET_IR_MIB, ENET_IR_REG);
1086        enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG);
1087
1088        /* watch "packet transferred" interrupt in rx and tx */
1089        enet_dmac_writel(priv, priv->dma_chan_int_mask,
1090                         ENETDMAC_IR, priv->rx_chan);
1091        enet_dmac_writel(priv, priv->dma_chan_int_mask,
1092                         ENETDMAC_IR, priv->tx_chan);
1093
1094        /* make sure we enable napi before rx interrupt  */
1095        napi_enable(&priv->napi);
1096
1097        enet_dmac_writel(priv, priv->dma_chan_int_mask,
1098                         ENETDMAC_IRMASK, priv->rx_chan);
1099        enet_dmac_writel(priv, priv->dma_chan_int_mask,
1100                         ENETDMAC_IRMASK, priv->tx_chan);
1101
1102        if (phydev)
1103                phy_start(phydev);
1104        else
1105                bcm_enet_adjust_link(dev);
1106
1107        netif_start_queue(dev);
1108        return 0;
1109
1110out:
1111        bcm_enet_free_rx_buf_ring(kdev, priv);
1112
1113out_free_tx_skb:
1114        kfree(priv->tx_skb);
1115
1116out_free_tx_ring:
1117        dma_free_coherent(kdev, priv->tx_desc_alloc_size,
1118                          priv->tx_desc_cpu, priv->tx_desc_dma);
1119
1120out_free_rx_ring:
1121        dma_free_coherent(kdev, priv->rx_desc_alloc_size,
1122                          priv->rx_desc_cpu, priv->rx_desc_dma);
1123
1124out_freeirq_tx:
1125        free_irq(priv->irq_tx, dev);
1126
1127out_freeirq_rx:
1128        free_irq(priv->irq_rx, dev);
1129
1130out_freeirq:
1131        free_irq(dev->irq, dev);
1132
1133out_phy_disconnect:
1134        if (phydev)
1135                phy_disconnect(phydev);
1136
1137        return ret;
1138}
1139
1140/*
1141 * disable mac
1142 */
1143static void bcm_enet_disable_mac(struct bcm_enet_priv *priv)
1144{
1145        int limit;
1146        u32 val;
1147
1148        val = enet_readl(priv, ENET_CTL_REG);
1149        val |= ENET_CTL_DISABLE_MASK;
1150        enet_writel(priv, val, ENET_CTL_REG);
1151
1152        limit = 1000;
1153        do {
1154                u32 val;
1155
1156                val = enet_readl(priv, ENET_CTL_REG);
1157                if (!(val & ENET_CTL_DISABLE_MASK))
1158                        break;
1159                udelay(1);
1160        } while (limit--);
1161}
1162
1163/*
1164 * disable dma in given channel
1165 */
1166static void bcm_enet_disable_dma(struct bcm_enet_priv *priv, int chan)
1167{
1168        int limit;
1169
1170        enet_dmac_writel(priv, 0, ENETDMAC_CHANCFG, chan);
1171
1172        limit = 1000;
1173        do {
1174                u32 val;
1175
1176                val = enet_dmac_readl(priv, ENETDMAC_CHANCFG, chan);
1177                if (!(val & ENETDMAC_CHANCFG_EN_MASK))
1178                        break;
1179                udelay(1);
1180        } while (limit--);
1181}
1182
1183/*
1184 * stop callback
1185 */
1186static int bcm_enet_stop(struct net_device *dev)
1187{
1188        struct bcm_enet_priv *priv;
1189        struct device *kdev;
1190
1191        priv = netdev_priv(dev);
1192        kdev = &priv->pdev->dev;
1193
1194        netif_stop_queue(dev);
1195        napi_disable(&priv->napi);
1196        if (priv->has_phy)
1197                phy_stop(dev->phydev);
1198        del_timer_sync(&priv->rx_timeout);
1199
1200        /* mask all interrupts */
1201        enet_writel(priv, 0, ENET_IRMASK_REG);
1202        enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
1203        enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
1204
1205        /* make sure no mib update is scheduled */
1206        cancel_work_sync(&priv->mib_update_task);
1207
1208        /* disable dma & mac */
1209        bcm_enet_disable_dma(priv, priv->tx_chan);
1210        bcm_enet_disable_dma(priv, priv->rx_chan);
1211        bcm_enet_disable_mac(priv);
1212
1213        /* force reclaim of all tx buffers */
1214        bcm_enet_tx_reclaim(dev, 1);
1215
1216        /* free the rx buffer ring */
1217        bcm_enet_free_rx_buf_ring(kdev, priv);
1218
1219        /* free remaining allocated memory */
1220        kfree(priv->tx_skb);
1221        dma_free_coherent(kdev, priv->rx_desc_alloc_size,
1222                          priv->rx_desc_cpu, priv->rx_desc_dma);
1223        dma_free_coherent(kdev, priv->tx_desc_alloc_size,
1224                          priv->tx_desc_cpu, priv->tx_desc_dma);
1225        free_irq(priv->irq_tx, dev);
1226        free_irq(priv->irq_rx, dev);
1227        free_irq(dev->irq, dev);
1228
1229        /* release phy */
1230        if (priv->has_phy)
1231                phy_disconnect(dev->phydev);
1232
1233        /* reset BQL after forced tx reclaim to prevent kernel panic */
1234        netdev_reset_queue(dev);
1235
1236        return 0;
1237}
1238
1239/*
1240 * ethtool callbacks
1241 */
1242struct bcm_enet_stats {
1243        char stat_string[ETH_GSTRING_LEN];
1244        int sizeof_stat;
1245        int stat_offset;
1246        int mib_reg;
1247};
1248
1249#define GEN_STAT(m) sizeof(((struct bcm_enet_priv *)0)->m),             \
1250                     offsetof(struct bcm_enet_priv, m)
1251#define DEV_STAT(m) sizeof(((struct net_device_stats *)0)->m),          \
1252                     offsetof(struct net_device_stats, m)
1253
1254static const struct bcm_enet_stats bcm_enet_gstrings_stats[] = {
1255        { "rx_packets", DEV_STAT(rx_packets), -1 },
1256        { "tx_packets", DEV_STAT(tx_packets), -1 },
1257        { "rx_bytes", DEV_STAT(rx_bytes), -1 },
1258        { "tx_bytes", DEV_STAT(tx_bytes), -1 },
1259        { "rx_errors", DEV_STAT(rx_errors), -1 },
1260        { "tx_errors", DEV_STAT(tx_errors), -1 },
1261        { "rx_dropped", DEV_STAT(rx_dropped), -1 },
1262        { "tx_dropped", DEV_STAT(tx_dropped), -1 },
1263
1264        { "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETH_MIB_RX_GD_OCTETS},
1265        { "rx_good_pkts", GEN_STAT(mib.rx_gd_pkts), ETH_MIB_RX_GD_PKTS },
1266        { "rx_broadcast", GEN_STAT(mib.rx_brdcast), ETH_MIB_RX_BRDCAST },
1267        { "rx_multicast", GEN_STAT(mib.rx_mult), ETH_MIB_RX_MULT },
1268        { "rx_64_octets", GEN_STAT(mib.rx_64), ETH_MIB_RX_64 },
1269        { "rx_65_127_oct", GEN_STAT(mib.rx_65_127), ETH_MIB_RX_65_127 },
1270        { "rx_128_255_oct", GEN_STAT(mib.rx_128_255), ETH_MIB_RX_128_255 },
1271        { "rx_256_511_oct", GEN_STAT(mib.rx_256_511), ETH_MIB_RX_256_511 },
1272        { "rx_512_1023_oct", GEN_STAT(mib.rx_512_1023), ETH_MIB_RX_512_1023 },
1273        { "rx_1024_max_oct", GEN_STAT(mib.rx_1024_max), ETH_MIB_RX_1024_MAX },
1274        { "rx_jabber", GEN_STAT(mib.rx_jab), ETH_MIB_RX_JAB },
1275        { "rx_oversize", GEN_STAT(mib.rx_ovr), ETH_MIB_RX_OVR },
1276        { "rx_fragment", GEN_STAT(mib.rx_frag), ETH_MIB_RX_FRAG },
1277        { "rx_dropped", GEN_STAT(mib.rx_drop), ETH_MIB_RX_DROP },
1278        { "rx_crc_align", GEN_STAT(mib.rx_crc_align), ETH_MIB_RX_CRC_ALIGN },
1279        { "rx_undersize", GEN_STAT(mib.rx_und), ETH_MIB_RX_UND },
1280        { "rx_crc", GEN_STAT(mib.rx_crc), ETH_MIB_RX_CRC },
1281        { "rx_align", GEN_STAT(mib.rx_align), ETH_MIB_RX_ALIGN },
1282        { "rx_symbol_error", GEN_STAT(mib.rx_sym), ETH_MIB_RX_SYM },
1283        { "rx_pause", GEN_STAT(mib.rx_pause), ETH_MIB_RX_PAUSE },
1284        { "rx_control", GEN_STAT(mib.rx_cntrl), ETH_MIB_RX_CNTRL },
1285
1286        { "tx_good_octets", GEN_STAT(mib.tx_gd_octets), ETH_MIB_TX_GD_OCTETS },
1287        { "tx_good_pkts", GEN_STAT(mib.tx_gd_pkts), ETH_MIB_TX_GD_PKTS },
1288        { "tx_broadcast", GEN_STAT(mib.tx_brdcast), ETH_MIB_TX_BRDCAST },
1289        { "tx_multicast", GEN_STAT(mib.tx_mult), ETH_MIB_TX_MULT },
1290        { "tx_64_oct", GEN_STAT(mib.tx_64), ETH_MIB_TX_64 },
1291        { "tx_65_127_oct", GEN_STAT(mib.tx_65_127), ETH_MIB_TX_65_127 },
1292        { "tx_128_255_oct", GEN_STAT(mib.tx_128_255), ETH_MIB_TX_128_255 },
1293        { "tx_256_511_oct", GEN_STAT(mib.tx_256_511), ETH_MIB_TX_256_511 },
1294        { "tx_512_1023_oct", GEN_STAT(mib.tx_512_1023), ETH_MIB_TX_512_1023},
1295        { "tx_1024_max_oct", GEN_STAT(mib.tx_1024_max), ETH_MIB_TX_1024_MAX },
1296        { "tx_jabber", GEN_STAT(mib.tx_jab), ETH_MIB_TX_JAB },
1297        { "tx_oversize", GEN_STAT(mib.tx_ovr), ETH_MIB_TX_OVR },
1298        { "tx_fragment", GEN_STAT(mib.tx_frag), ETH_MIB_TX_FRAG },
1299        { "tx_underrun", GEN_STAT(mib.tx_underrun), ETH_MIB_TX_UNDERRUN },
1300        { "tx_collisions", GEN_STAT(mib.tx_col), ETH_MIB_TX_COL },
1301        { "tx_single_collision", GEN_STAT(mib.tx_1_col), ETH_MIB_TX_1_COL },
1302        { "tx_multiple_collision", GEN_STAT(mib.tx_m_col), ETH_MIB_TX_M_COL },
1303        { "tx_excess_collision", GEN_STAT(mib.tx_ex_col), ETH_MIB_TX_EX_COL },
1304        { "tx_late_collision", GEN_STAT(mib.tx_late), ETH_MIB_TX_LATE },
1305        { "tx_deferred", GEN_STAT(mib.tx_def), ETH_MIB_TX_DEF },
1306        { "tx_carrier_sense", GEN_STAT(mib.tx_crs), ETH_MIB_TX_CRS },
1307        { "tx_pause", GEN_STAT(mib.tx_pause), ETH_MIB_TX_PAUSE },
1308
1309};
1310
1311#define BCM_ENET_STATS_LEN      ARRAY_SIZE(bcm_enet_gstrings_stats)
1312
1313static const u32 unused_mib_regs[] = {
1314        ETH_MIB_TX_ALL_OCTETS,
1315        ETH_MIB_TX_ALL_PKTS,
1316        ETH_MIB_RX_ALL_OCTETS,
1317        ETH_MIB_RX_ALL_PKTS,
1318};
1319
1320
1321static void bcm_enet_get_drvinfo(struct net_device *netdev,
1322                                 struct ethtool_drvinfo *drvinfo)
1323{
1324        strlcpy(drvinfo->driver, bcm_enet_driver_name, sizeof(drvinfo->driver));
1325        strlcpy(drvinfo->bus_info, "bcm63xx", sizeof(drvinfo->bus_info));
1326}
1327
1328static int bcm_enet_get_sset_count(struct net_device *netdev,
1329                                        int string_set)
1330{
1331        switch (string_set) {
1332        case ETH_SS_STATS:
1333                return BCM_ENET_STATS_LEN;
1334        default:
1335                return -EINVAL;
1336        }
1337}
1338
1339static void bcm_enet_get_strings(struct net_device *netdev,
1340                                 u32 stringset, u8 *data)
1341{
1342        int i;
1343
1344        switch (stringset) {
1345        case ETH_SS_STATS:
1346                for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
1347                        memcpy(data + i * ETH_GSTRING_LEN,
1348                               bcm_enet_gstrings_stats[i].stat_string,
1349                               ETH_GSTRING_LEN);
1350                }
1351                break;
1352        }
1353}
1354
1355static void update_mib_counters(struct bcm_enet_priv *priv)
1356{
1357        int i;
1358
1359        for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
1360                const struct bcm_enet_stats *s;
1361                u32 val;
1362                char *p;
1363
1364                s = &bcm_enet_gstrings_stats[i];
1365                if (s->mib_reg == -1)
1366                        continue;
1367
1368                val = enet_readl(priv, ENET_MIB_REG(s->mib_reg));
1369                p = (char *)priv + s->stat_offset;
1370
1371                if (s->sizeof_stat == sizeof(u64))
1372                        *(u64 *)p += val;
1373                else
1374                        *(u32 *)p += val;
1375        }
1376
1377        /* also empty unused mib counters to make sure mib counter
1378         * overflow interrupt is cleared */
1379        for (i = 0; i < ARRAY_SIZE(unused_mib_regs); i++)
1380                (void)enet_readl(priv, ENET_MIB_REG(unused_mib_regs[i]));
1381}
1382
1383static void bcm_enet_update_mib_counters_defer(struct work_struct *t)
1384{
1385        struct bcm_enet_priv *priv;
1386
1387        priv = container_of(t, struct bcm_enet_priv, mib_update_task);
1388        mutex_lock(&priv->mib_update_lock);
1389        update_mib_counters(priv);
1390        mutex_unlock(&priv->mib_update_lock);
1391
1392        /* reenable mib interrupt */
1393        if (netif_running(priv->net_dev))
1394                enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG);
1395}
1396
1397static void bcm_enet_get_ethtool_stats(struct net_device *netdev,
1398                                       struct ethtool_stats *stats,
1399                                       u64 *data)
1400{
1401        struct bcm_enet_priv *priv;
1402        int i;
1403
1404        priv = netdev_priv(netdev);
1405
1406        mutex_lock(&priv->mib_update_lock);
1407        update_mib_counters(priv);
1408
1409        for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
1410                const struct bcm_enet_stats *s;
1411                char *p;
1412
1413                s = &bcm_enet_gstrings_stats[i];
1414                if (s->mib_reg == -1)
1415                        p = (char *)&netdev->stats;
1416                else
1417                        p = (char *)priv;
1418                p += s->stat_offset;
1419                data[i] = (s->sizeof_stat == sizeof(u64)) ?
1420                        *(u64 *)p : *(u32 *)p;
1421        }
1422        mutex_unlock(&priv->mib_update_lock);
1423}
1424
1425static int bcm_enet_nway_reset(struct net_device *dev)
1426{
1427        struct bcm_enet_priv *priv;
1428
1429        priv = netdev_priv(dev);
1430        if (priv->has_phy)
1431                return phy_ethtool_nway_reset(dev);
1432
1433        return -EOPNOTSUPP;
1434}
1435
1436static int bcm_enet_get_link_ksettings(struct net_device *dev,
1437                                       struct ethtool_link_ksettings *cmd)
1438{
1439        struct bcm_enet_priv *priv;
1440        u32 supported, advertising;
1441
1442        priv = netdev_priv(dev);
1443
1444        if (priv->has_phy) {
1445                if (!dev->phydev)
1446                        return -ENODEV;
1447
1448                phy_ethtool_ksettings_get(dev->phydev, cmd);
1449
1450                return 0;
1451        } else {
1452                cmd->base.autoneg = 0;
1453                cmd->base.speed = (priv->force_speed_100) ?
1454                        SPEED_100 : SPEED_10;
1455                cmd->base.duplex = (priv->force_duplex_full) ?
1456                        DUPLEX_FULL : DUPLEX_HALF;
1457                supported = ADVERTISED_10baseT_Half |
1458                        ADVERTISED_10baseT_Full |
1459                        ADVERTISED_100baseT_Half |
1460                        ADVERTISED_100baseT_Full;
1461                advertising = 0;
1462                ethtool_convert_legacy_u32_to_link_mode(
1463                        cmd->link_modes.supported, supported);
1464                ethtool_convert_legacy_u32_to_link_mode(
1465                        cmd->link_modes.advertising, advertising);
1466                cmd->base.port = PORT_MII;
1467        }
1468        return 0;
1469}
1470
1471static int bcm_enet_set_link_ksettings(struct net_device *dev,
1472                                       const struct ethtool_link_ksettings *cmd)
1473{
1474        struct bcm_enet_priv *priv;
1475
1476        priv = netdev_priv(dev);
1477        if (priv->has_phy) {
1478                if (!dev->phydev)
1479                        return -ENODEV;
1480                return phy_ethtool_ksettings_set(dev->phydev, cmd);
1481        } else {
1482
1483                if (cmd->base.autoneg ||
1484                    (cmd->base.speed != SPEED_100 &&
1485                     cmd->base.speed != SPEED_10) ||
1486                    cmd->base.port != PORT_MII)
1487                        return -EINVAL;
1488
1489                priv->force_speed_100 =
1490                        (cmd->base.speed == SPEED_100) ? 1 : 0;
1491                priv->force_duplex_full =
1492                        (cmd->base.duplex == DUPLEX_FULL) ? 1 : 0;
1493
1494                if (netif_running(dev))
1495                        bcm_enet_adjust_link(dev);
1496                return 0;
1497        }
1498}
1499
1500static void
1501bcm_enet_get_ringparam(struct net_device *dev,
1502                       struct ethtool_ringparam *ering,
1503                       struct kernel_ethtool_ringparam *kernel_ering,
1504                       struct netlink_ext_ack *extack)
1505{
1506        struct bcm_enet_priv *priv;
1507
1508        priv = netdev_priv(dev);
1509
1510        /* rx/tx ring is actually only limited by memory */
1511        ering->rx_max_pending = 8192;
1512        ering->tx_max_pending = 8192;
1513        ering->rx_pending = priv->rx_ring_size;
1514        ering->tx_pending = priv->tx_ring_size;
1515}
1516
1517static int bcm_enet_set_ringparam(struct net_device *dev,
1518                                  struct ethtool_ringparam *ering,
1519                                  struct kernel_ethtool_ringparam *kernel_ering,
1520                                  struct netlink_ext_ack *extack)
1521{
1522        struct bcm_enet_priv *priv;
1523        int was_running;
1524
1525        priv = netdev_priv(dev);
1526
1527        was_running = 0;
1528        if (netif_running(dev)) {
1529                bcm_enet_stop(dev);
1530                was_running = 1;
1531        }
1532
1533        priv->rx_ring_size = ering->rx_pending;
1534        priv->tx_ring_size = ering->tx_pending;
1535
1536        if (was_running) {
1537                int err;
1538
1539                err = bcm_enet_open(dev);
1540                if (err)
1541                        dev_close(dev);
1542                else
1543                        bcm_enet_set_multicast_list(dev);
1544        }
1545        return 0;
1546}
1547
1548static void bcm_enet_get_pauseparam(struct net_device *dev,
1549                                    struct ethtool_pauseparam *ecmd)
1550{
1551        struct bcm_enet_priv *priv;
1552
1553        priv = netdev_priv(dev);
1554        ecmd->autoneg = priv->pause_auto;
1555        ecmd->rx_pause = priv->pause_rx;
1556        ecmd->tx_pause = priv->pause_tx;
1557}
1558
1559static int bcm_enet_set_pauseparam(struct net_device *dev,
1560                                   struct ethtool_pauseparam *ecmd)
1561{
1562        struct bcm_enet_priv *priv;
1563
1564        priv = netdev_priv(dev);
1565
1566        if (priv->has_phy) {
1567                if (ecmd->autoneg && (ecmd->rx_pause != ecmd->tx_pause)) {
1568                        /* asymetric pause mode not supported,
1569                         * actually possible but integrated PHY has RO
1570                         * asym_pause bit */
1571                        return -EINVAL;
1572                }
1573        } else {
1574                /* no pause autoneg on direct mii connection */
1575                if (ecmd->autoneg)
1576                        return -EINVAL;
1577        }
1578
1579        priv->pause_auto = ecmd->autoneg;
1580        priv->pause_rx = ecmd->rx_pause;
1581        priv->pause_tx = ecmd->tx_pause;
1582
1583        return 0;
1584}
1585
1586static const struct ethtool_ops bcm_enet_ethtool_ops = {
1587        .get_strings            = bcm_enet_get_strings,
1588        .get_sset_count         = bcm_enet_get_sset_count,
1589        .get_ethtool_stats      = bcm_enet_get_ethtool_stats,
1590        .nway_reset             = bcm_enet_nway_reset,
1591        .get_drvinfo            = bcm_enet_get_drvinfo,
1592        .get_link               = ethtool_op_get_link,
1593        .get_ringparam          = bcm_enet_get_ringparam,
1594        .set_ringparam          = bcm_enet_set_ringparam,
1595        .get_pauseparam         = bcm_enet_get_pauseparam,
1596        .set_pauseparam         = bcm_enet_set_pauseparam,
1597        .get_link_ksettings     = bcm_enet_get_link_ksettings,
1598        .set_link_ksettings     = bcm_enet_set_link_ksettings,
1599};
1600
1601static int bcm_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1602{
1603        struct bcm_enet_priv *priv;
1604
1605        priv = netdev_priv(dev);
1606        if (priv->has_phy) {
1607                if (!dev->phydev)
1608                        return -ENODEV;
1609                return phy_mii_ioctl(dev->phydev, rq, cmd);
1610        } else {
1611                struct mii_if_info mii;
1612
1613                mii.dev = dev;
1614                mii.mdio_read = bcm_enet_mdio_read_mii;
1615                mii.mdio_write = bcm_enet_mdio_write_mii;
1616                mii.phy_id = 0;
1617                mii.phy_id_mask = 0x3f;
1618                mii.reg_num_mask = 0x1f;
1619                return generic_mii_ioctl(&mii, if_mii(rq), cmd, NULL);
1620        }
1621}
1622
1623/*
1624 * adjust mtu, can't be called while device is running
1625 */
1626static int bcm_enet_change_mtu(struct net_device *dev, int new_mtu)
1627{
1628        struct bcm_enet_priv *priv = netdev_priv(dev);
1629        int actual_mtu = new_mtu;
1630
1631        if (netif_running(dev))
1632                return -EBUSY;
1633
1634        /* add ethernet header + vlan tag size */
1635        actual_mtu += VLAN_ETH_HLEN;
1636
1637        /*
1638         * setup maximum size before we get overflow mark in
1639         * descriptor, note that this will not prevent reception of
1640         * big frames, they will be split into multiple buffers
1641         * anyway
1642         */
1643        priv->hw_mtu = actual_mtu;
1644
1645        /*
1646         * align rx buffer size to dma burst len, account FCS since
1647         * it's appended
1648         */
1649        priv->rx_buf_size = ALIGN(actual_mtu + ETH_FCS_LEN,
1650                                  priv->dma_maxburst * 4);
1651
1652        priv->rx_frag_size = SKB_DATA_ALIGN(priv->rx_buf_offset + priv->rx_buf_size) +
1653                                            SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1654
1655        dev->mtu = new_mtu;
1656        return 0;
1657}
1658
1659/*
1660 * preinit hardware to allow mii operation while device is down
1661 */
1662static void bcm_enet_hw_preinit(struct bcm_enet_priv *priv)
1663{
1664        u32 val;
1665        int limit;
1666
1667        /* make sure mac is disabled */
1668        bcm_enet_disable_mac(priv);
1669
1670        /* soft reset mac */
1671        val = ENET_CTL_SRESET_MASK;
1672        enet_writel(priv, val, ENET_CTL_REG);
1673        wmb();
1674
1675        limit = 1000;
1676        do {
1677                val = enet_readl(priv, ENET_CTL_REG);
1678                if (!(val & ENET_CTL_SRESET_MASK))
1679                        break;
1680                udelay(1);
1681        } while (limit--);
1682
1683        /* select correct mii interface */
1684        val = enet_readl(priv, ENET_CTL_REG);
1685        if (priv->use_external_mii)
1686                val |= ENET_CTL_EPHYSEL_MASK;
1687        else
1688                val &= ~ENET_CTL_EPHYSEL_MASK;
1689        enet_writel(priv, val, ENET_CTL_REG);
1690
1691        /* turn on mdc clock */
1692        enet_writel(priv, (0x1f << ENET_MIISC_MDCFREQDIV_SHIFT) |
1693                    ENET_MIISC_PREAMBLEEN_MASK, ENET_MIISC_REG);
1694
1695        /* set mib counters to self-clear when read */
1696        val = enet_readl(priv, ENET_MIBCTL_REG);
1697        val |= ENET_MIBCTL_RDCLEAR_MASK;
1698        enet_writel(priv, val, ENET_MIBCTL_REG);
1699}
1700
1701static const struct net_device_ops bcm_enet_ops = {
1702        .ndo_open               = bcm_enet_open,
1703        .ndo_stop               = bcm_enet_stop,
1704        .ndo_start_xmit         = bcm_enet_start_xmit,
1705        .ndo_set_mac_address    = bcm_enet_set_mac_address,
1706        .ndo_set_rx_mode        = bcm_enet_set_multicast_list,
1707        .ndo_eth_ioctl          = bcm_enet_ioctl,
1708        .ndo_change_mtu         = bcm_enet_change_mtu,
1709};
1710
1711/*
1712 * allocate netdevice, request register memory and register device.
1713 */
1714static int bcm_enet_probe(struct platform_device *pdev)
1715{
1716        struct bcm_enet_priv *priv;
1717        struct net_device *dev;
1718        struct bcm63xx_enet_platform_data *pd;
1719        int irq, irq_rx, irq_tx;
1720        struct mii_bus *bus;
1721        int i, ret;
1722
1723        if (!bcm_enet_shared_base[0])
1724                return -EPROBE_DEFER;
1725
1726        irq = platform_get_irq(pdev, 0);
1727        irq_rx = platform_get_irq(pdev, 1);
1728        irq_tx = platform_get_irq(pdev, 2);
1729        if (irq < 0 || irq_rx < 0 || irq_tx < 0)
1730                return -ENODEV;
1731
1732        dev = alloc_etherdev(sizeof(*priv));
1733        if (!dev)
1734                return -ENOMEM;
1735        priv = netdev_priv(dev);
1736
1737        priv->enet_is_sw = false;
1738        priv->dma_maxburst = BCMENET_DMA_MAXBURST;
1739        priv->rx_buf_offset = NET_SKB_PAD;
1740
1741        ret = bcm_enet_change_mtu(dev, dev->mtu);
1742        if (ret)
1743                goto out;
1744
1745        priv->base = devm_platform_ioremap_resource(pdev, 0);
1746        if (IS_ERR(priv->base)) {
1747                ret = PTR_ERR(priv->base);
1748                goto out;
1749        }
1750
1751        dev->irq = priv->irq = irq;
1752        priv->irq_rx = irq_rx;
1753        priv->irq_tx = irq_tx;
1754
1755        priv->mac_clk = devm_clk_get(&pdev->dev, "enet");
1756        if (IS_ERR(priv->mac_clk)) {
1757                ret = PTR_ERR(priv->mac_clk);
1758                goto out;
1759        }
1760        ret = clk_prepare_enable(priv->mac_clk);
1761        if (ret)
1762                goto out;
1763
1764        /* initialize default and fetch platform data */
1765        priv->rx_ring_size = BCMENET_DEF_RX_DESC;
1766        priv->tx_ring_size = BCMENET_DEF_TX_DESC;
1767
1768        pd = dev_get_platdata(&pdev->dev);
1769        if (pd) {
1770                eth_hw_addr_set(dev, pd->mac_addr);
1771                priv->has_phy = pd->has_phy;
1772                priv->phy_id = pd->phy_id;
1773                priv->has_phy_interrupt = pd->has_phy_interrupt;
1774                priv->phy_interrupt = pd->phy_interrupt;
1775                priv->use_external_mii = !pd->use_internal_phy;
1776                priv->pause_auto = pd->pause_auto;
1777                priv->pause_rx = pd->pause_rx;
1778                priv->pause_tx = pd->pause_tx;
1779                priv->force_duplex_full = pd->force_duplex_full;
1780                priv->force_speed_100 = pd->force_speed_100;
1781                priv->dma_chan_en_mask = pd->dma_chan_en_mask;
1782                priv->dma_chan_int_mask = pd->dma_chan_int_mask;
1783                priv->dma_chan_width = pd->dma_chan_width;
1784                priv->dma_has_sram = pd->dma_has_sram;
1785                priv->dma_desc_shift = pd->dma_desc_shift;
1786                priv->rx_chan = pd->rx_chan;
1787                priv->tx_chan = pd->tx_chan;
1788        }
1789
1790        if (priv->has_phy && !priv->use_external_mii) {
1791                /* using internal PHY, enable clock */
1792                priv->phy_clk = devm_clk_get(&pdev->dev, "ephy");
1793                if (IS_ERR(priv->phy_clk)) {
1794                        ret = PTR_ERR(priv->phy_clk);
1795                        priv->phy_clk = NULL;
1796                        goto out_disable_clk_mac;
1797                }
1798                ret = clk_prepare_enable(priv->phy_clk);
1799                if (ret)
1800                        goto out_disable_clk_mac;
1801        }
1802
1803        /* do minimal hardware init to be able to probe mii bus */
1804        bcm_enet_hw_preinit(priv);
1805
1806        /* MII bus registration */
1807        if (priv->has_phy) {
1808
1809                priv->mii_bus = mdiobus_alloc();
1810                if (!priv->mii_bus) {
1811                        ret = -ENOMEM;
1812                        goto out_uninit_hw;
1813                }
1814
1815                bus = priv->mii_bus;
1816                bus->name = "bcm63xx_enet MII bus";
1817                bus->parent = &pdev->dev;
1818                bus->priv = priv;
1819                bus->read = bcm_enet_mdio_read_phylib;
1820                bus->write = bcm_enet_mdio_write_phylib;
1821                sprintf(bus->id, "%s-%d", pdev->name, pdev->id);
1822
1823                /* only probe bus where we think the PHY is, because
1824                 * the mdio read operation return 0 instead of 0xffff
1825                 * if a slave is not present on hw */
1826                bus->phy_mask = ~(1 << priv->phy_id);
1827
1828                if (priv->has_phy_interrupt)
1829                        bus->irq[priv->phy_id] = priv->phy_interrupt;
1830
1831                ret = mdiobus_register(bus);
1832                if (ret) {
1833                        dev_err(&pdev->dev, "unable to register mdio bus\n");
1834                        goto out_free_mdio;
1835                }
1836        } else {
1837
1838                /* run platform code to initialize PHY device */
1839                if (pd && pd->mii_config &&
1840                    pd->mii_config(dev, 1, bcm_enet_mdio_read_mii,
1841                                   bcm_enet_mdio_write_mii)) {
1842                        dev_err(&pdev->dev, "unable to configure mdio bus\n");
1843                        goto out_uninit_hw;
1844                }
1845        }
1846
1847        spin_lock_init(&priv->rx_lock);
1848
1849        /* init rx timeout (used for oom) */
1850        timer_setup(&priv->rx_timeout, bcm_enet_refill_rx_timer, 0);
1851
1852        /* init the mib update lock&work */
1853        mutex_init(&priv->mib_update_lock);
1854        INIT_WORK(&priv->mib_update_task, bcm_enet_update_mib_counters_defer);
1855
1856        /* zero mib counters */
1857        for (i = 0; i < ENET_MIB_REG_COUNT; i++)
1858                enet_writel(priv, 0, ENET_MIB_REG(i));
1859
1860        /* register netdevice */
1861        dev->netdev_ops = &bcm_enet_ops;
1862        netif_napi_add_weight(dev, &priv->napi, bcm_enet_poll, 16);
1863
1864        dev->ethtool_ops = &bcm_enet_ethtool_ops;
1865        /* MTU range: 46 - 2028 */
1866        dev->min_mtu = ETH_ZLEN - ETH_HLEN;
1867        dev->max_mtu = BCMENET_MAX_MTU - VLAN_ETH_HLEN;
1868        SET_NETDEV_DEV(dev, &pdev->dev);
1869
1870        ret = register_netdev(dev);
1871        if (ret)
1872                goto out_unregister_mdio;
1873
1874        netif_carrier_off(dev);
1875        platform_set_drvdata(pdev, dev);
1876        priv->pdev = pdev;
1877        priv->net_dev = dev;
1878
1879        return 0;
1880
1881out_unregister_mdio:
1882        if (priv->mii_bus)
1883                mdiobus_unregister(priv->mii_bus);
1884
1885out_free_mdio:
1886        if (priv->mii_bus)
1887                mdiobus_free(priv->mii_bus);
1888
1889out_uninit_hw:
1890        /* turn off mdc clock */
1891        enet_writel(priv, 0, ENET_MIISC_REG);
1892        clk_disable_unprepare(priv->phy_clk);
1893
1894out_disable_clk_mac:
1895        clk_disable_unprepare(priv->mac_clk);
1896out:
1897        free_netdev(dev);
1898        return ret;
1899}
1900
1901
1902/*
1903 * exit func, stops hardware and unregisters netdevice
1904 */
1905static int bcm_enet_remove(struct platform_device *pdev)
1906{
1907        struct bcm_enet_priv *priv;
1908        struct net_device *dev;
1909
1910        /* stop netdevice */
1911        dev = platform_get_drvdata(pdev);
1912        priv = netdev_priv(dev);
1913        unregister_netdev(dev);
1914
1915        /* turn off mdc clock */
1916        enet_writel(priv, 0, ENET_MIISC_REG);
1917
1918        if (priv->has_phy) {
1919                mdiobus_unregister(priv->mii_bus);
1920                mdiobus_free(priv->mii_bus);
1921        } else {
1922                struct bcm63xx_enet_platform_data *pd;
1923
1924                pd = dev_get_platdata(&pdev->dev);
1925                if (pd && pd->mii_config)
1926                        pd->mii_config(dev, 0, bcm_enet_mdio_read_mii,
1927                                       bcm_enet_mdio_write_mii);
1928        }
1929
1930        /* disable hw block clocks */
1931        clk_disable_unprepare(priv->phy_clk);
1932        clk_disable_unprepare(priv->mac_clk);
1933
1934        free_netdev(dev);
1935        return 0;
1936}
1937
1938struct platform_driver bcm63xx_enet_driver = {
1939        .probe  = bcm_enet_probe,
1940        .remove = bcm_enet_remove,
1941        .driver = {
1942                .name   = "bcm63xx_enet",
1943                .owner  = THIS_MODULE,
1944        },
1945};
1946
1947/*
1948 * switch mii access callbacks
1949 */
1950static int bcmenet_sw_mdio_read(struct bcm_enet_priv *priv,
1951                                int ext, int phy_id, int location)
1952{
1953        u32 reg;
1954        int ret;
1955
1956        spin_lock_bh(&priv->enetsw_mdio_lock);
1957        enetsw_writel(priv, 0, ENETSW_MDIOC_REG);
1958
1959        reg = ENETSW_MDIOC_RD_MASK |
1960                (phy_id << ENETSW_MDIOC_PHYID_SHIFT) |
1961                (location << ENETSW_MDIOC_REG_SHIFT);
1962
1963        if (ext)
1964                reg |= ENETSW_MDIOC_EXT_MASK;
1965
1966        enetsw_writel(priv, reg, ENETSW_MDIOC_REG);
1967        udelay(50);
1968        ret = enetsw_readw(priv, ENETSW_MDIOD_REG);
1969        spin_unlock_bh(&priv->enetsw_mdio_lock);
1970        return ret;
1971}
1972
1973static void bcmenet_sw_mdio_write(struct bcm_enet_priv *priv,
1974                                 int ext, int phy_id, int location,
1975                                 uint16_t data)
1976{
1977        u32 reg;
1978
1979        spin_lock_bh(&priv->enetsw_mdio_lock);
1980        enetsw_writel(priv, 0, ENETSW_MDIOC_REG);
1981
1982        reg = ENETSW_MDIOC_WR_MASK |
1983                (phy_id << ENETSW_MDIOC_PHYID_SHIFT) |
1984                (location << ENETSW_MDIOC_REG_SHIFT);
1985
1986        if (ext)
1987                reg |= ENETSW_MDIOC_EXT_MASK;
1988
1989        reg |= data;
1990
1991        enetsw_writel(priv, reg, ENETSW_MDIOC_REG);
1992        udelay(50);
1993        spin_unlock_bh(&priv->enetsw_mdio_lock);
1994}
1995
1996static inline int bcm_enet_port_is_rgmii(int portid)
1997{
1998        return portid >= ENETSW_RGMII_PORT0;
1999}
2000
2001/*
2002 * enet sw PHY polling
2003 */
2004static void swphy_poll_timer(struct timer_list *t)
2005{
2006        struct bcm_enet_priv *priv = from_timer(priv, t, swphy_poll);
2007        unsigned int i;
2008
2009        for (i = 0; i < priv->num_ports; i++) {
2010                struct bcm63xx_enetsw_port *port;
2011                int val, j, up, advertise, lpa, speed, duplex, media;
2012                int external_phy = bcm_enet_port_is_rgmii(i);
2013                u8 override;
2014
2015                port = &priv->used_ports[i];
2016                if (!port->used)
2017                        continue;
2018
2019                if (port->bypass_link)
2020                        continue;
2021
2022                /* dummy read to clear */
2023                for (j = 0; j < 2; j++)
2024                        val = bcmenet_sw_mdio_read(priv, external_phy,
2025                                                   port->phy_id, MII_BMSR);
2026
2027                if (val == 0xffff)
2028                        continue;
2029
2030                up = (val & BMSR_LSTATUS) ? 1 : 0;
2031                if (!(up ^ priv->sw_port_link[i]))
2032                        continue;
2033
2034                priv->sw_port_link[i] = up;
2035
2036                /* link changed */
2037                if (!up) {
2038                        dev_info(&priv->pdev->dev, "link DOWN on %s\n",
2039                                 port->name);
2040                        enetsw_writeb(priv, ENETSW_PORTOV_ENABLE_MASK,
2041                                      ENETSW_PORTOV_REG(i));
2042                        enetsw_writeb(priv, ENETSW_PTCTRL_RXDIS_MASK |
2043                                      ENETSW_PTCTRL_TXDIS_MASK,
2044                                      ENETSW_PTCTRL_REG(i));
2045                        continue;
2046                }
2047
2048                advertise = bcmenet_sw_mdio_read(priv, external_phy,
2049                                                 port->phy_id, MII_ADVERTISE);
2050
2051                lpa = bcmenet_sw_mdio_read(priv, external_phy, port->phy_id,
2052                                           MII_LPA);
2053
2054                /* figure out media and duplex from advertise and LPA values */
2055                media = mii_nway_result(lpa & advertise);
2056                duplex = (media & ADVERTISE_FULL) ? 1 : 0;
2057
2058                if (media & (ADVERTISE_100FULL | ADVERTISE_100HALF))
2059                        speed = 100;
2060                else
2061                        speed = 10;
2062
2063                if (val & BMSR_ESTATEN) {
2064                        advertise = bcmenet_sw_mdio_read(priv, external_phy,
2065                                                port->phy_id, MII_CTRL1000);
2066
2067                        lpa = bcmenet_sw_mdio_read(priv, external_phy,
2068                                                port->phy_id, MII_STAT1000);
2069
2070                        if (advertise & (ADVERTISE_1000FULL | ADVERTISE_1000HALF)
2071                                        && lpa & (LPA_1000FULL | LPA_1000HALF)) {
2072                                speed = 1000;
2073                                duplex = (lpa & LPA_1000FULL);
2074                        }
2075                }
2076
2077                dev_info(&priv->pdev->dev,
2078                         "link UP on %s, %dMbps, %s-duplex\n",
2079                         port->name, speed, duplex ? "full" : "half");
2080
2081                override = ENETSW_PORTOV_ENABLE_MASK |
2082                        ENETSW_PORTOV_LINKUP_MASK;
2083
2084                if (speed == 1000)
2085                        override |= ENETSW_IMPOV_1000_MASK;
2086                else if (speed == 100)
2087                        override |= ENETSW_IMPOV_100_MASK;
2088                if (duplex)
2089                        override |= ENETSW_IMPOV_FDX_MASK;
2090
2091                enetsw_writeb(priv, override, ENETSW_PORTOV_REG(i));
2092                enetsw_writeb(priv, 0, ENETSW_PTCTRL_REG(i));
2093        }
2094
2095        priv->swphy_poll.expires = jiffies + HZ;
2096        add_timer(&priv->swphy_poll);
2097}
2098
2099/*
2100 * open callback, allocate dma rings & buffers and start rx operation
2101 */
2102static int bcm_enetsw_open(struct net_device *dev)
2103{
2104        struct bcm_enet_priv *priv;
2105        struct device *kdev;
2106        int i, ret;
2107        unsigned int size;
2108        void *p;
2109        u32 val;
2110
2111        priv = netdev_priv(dev);
2112        kdev = &priv->pdev->dev;
2113
2114        /* mask all interrupts and request them */
2115        enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
2116        enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
2117
2118        ret = request_irq(priv->irq_rx, bcm_enet_isr_dma,
2119                          0, dev->name, dev);
2120        if (ret)
2121                goto out_freeirq;
2122
2123        if (priv->irq_tx != -1) {
2124                ret = request_irq(priv->irq_tx, bcm_enet_isr_dma,
2125                                  0, dev->name, dev);
2126                if (ret)
2127                        goto out_freeirq_rx;
2128        }
2129
2130        /* allocate rx dma ring */
2131        size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
2132        p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
2133        if (!p) {
2134                dev_err(kdev, "cannot allocate rx ring %u\n", size);
2135                ret = -ENOMEM;
2136                goto out_freeirq_tx;
2137        }
2138
2139        priv->rx_desc_alloc_size = size;
2140        priv->rx_desc_cpu = p;
2141
2142        /* allocate tx dma ring */
2143        size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
2144        p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
2145        if (!p) {
2146                dev_err(kdev, "cannot allocate tx ring\n");
2147                ret = -ENOMEM;
2148                goto out_free_rx_ring;
2149        }
2150
2151        priv->tx_desc_alloc_size = size;
2152        priv->tx_desc_cpu = p;
2153
2154        priv->tx_skb = kcalloc(priv->tx_ring_size, sizeof(struct sk_buff *),
2155                               GFP_KERNEL);
2156        if (!priv->tx_skb) {
2157                dev_err(kdev, "cannot allocate tx skb queue\n");
2158                ret = -ENOMEM;
2159                goto out_free_tx_ring;
2160        }
2161
2162        priv->tx_desc_count = priv->tx_ring_size;
2163        priv->tx_dirty_desc = 0;
2164        priv->tx_curr_desc = 0;
2165        spin_lock_init(&priv->tx_lock);
2166
2167        /* init & fill rx ring with buffers */
2168        priv->rx_buf = kcalloc(priv->rx_ring_size, sizeof(void *),
2169                               GFP_KERNEL);
2170        if (!priv->rx_buf) {
2171                dev_err(kdev, "cannot allocate rx buffer queue\n");
2172                ret = -ENOMEM;
2173                goto out_free_tx_skb;
2174        }
2175
2176        priv->rx_desc_count = 0;
2177        priv->rx_dirty_desc = 0;
2178        priv->rx_curr_desc = 0;
2179
2180        /* disable all ports */
2181        for (i = 0; i < priv->num_ports; i++) {
2182                enetsw_writeb(priv, ENETSW_PORTOV_ENABLE_MASK,
2183                              ENETSW_PORTOV_REG(i));
2184                enetsw_writeb(priv, ENETSW_PTCTRL_RXDIS_MASK |
2185                              ENETSW_PTCTRL_TXDIS_MASK,
2186                              ENETSW_PTCTRL_REG(i));
2187
2188                priv->sw_port_link[i] = 0;
2189        }
2190
2191        /* reset mib */
2192        val = enetsw_readb(priv, ENETSW_GMCR_REG);
2193        val |= ENETSW_GMCR_RST_MIB_MASK;
2194        enetsw_writeb(priv, val, ENETSW_GMCR_REG);
2195        mdelay(1);
2196        val &= ~ENETSW_GMCR_RST_MIB_MASK;
2197        enetsw_writeb(priv, val, ENETSW_GMCR_REG);
2198        mdelay(1);
2199
2200        /* force CPU port state */
2201        val = enetsw_readb(priv, ENETSW_IMPOV_REG);
2202        val |= ENETSW_IMPOV_FORCE_MASK | ENETSW_IMPOV_LINKUP_MASK;
2203        enetsw_writeb(priv, val, ENETSW_IMPOV_REG);
2204
2205        /* enable switch forward engine */
2206        val = enetsw_readb(priv, ENETSW_SWMODE_REG);
2207        val |= ENETSW_SWMODE_FWD_EN_MASK;
2208        enetsw_writeb(priv, val, ENETSW_SWMODE_REG);
2209
2210        /* enable jumbo on all ports */
2211        enetsw_writel(priv, 0x1ff, ENETSW_JMBCTL_PORT_REG);
2212        enetsw_writew(priv, 9728, ENETSW_JMBCTL_MAXSIZE_REG);
2213
2214        /* initialize flow control buffer allocation */
2215        enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
2216                        ENETDMA_BUFALLOC_REG(priv->rx_chan));
2217
2218        if (bcm_enet_refill_rx(dev, false)) {
2219                dev_err(kdev, "cannot allocate rx buffer queue\n");
2220                ret = -ENOMEM;
2221                goto out;
2222        }
2223
2224        /* write rx & tx ring addresses */
2225        enet_dmas_writel(priv, priv->rx_desc_dma,
2226                         ENETDMAS_RSTART_REG, priv->rx_chan);
2227        enet_dmas_writel(priv, priv->tx_desc_dma,
2228                         ENETDMAS_RSTART_REG, priv->tx_chan);
2229
2230        /* clear remaining state ram for rx & tx channel */
2231        enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->rx_chan);
2232        enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->tx_chan);
2233        enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->rx_chan);
2234        enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->tx_chan);
2235        enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->rx_chan);
2236        enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->tx_chan);
2237
2238        /* set dma maximum burst len */
2239        enet_dmac_writel(priv, priv->dma_maxburst,
2240                         ENETDMAC_MAXBURST, priv->rx_chan);
2241        enet_dmac_writel(priv, priv->dma_maxburst,
2242                         ENETDMAC_MAXBURST, priv->tx_chan);
2243
2244        /* set flow control low/high threshold to 1/3 / 2/3 */
2245        val = priv->rx_ring_size / 3;
2246        enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan));
2247        val = (priv->rx_ring_size * 2) / 3;
2248        enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan));
2249
2250        /* all set, enable mac and interrupts, start dma engine and
2251         * kick rx dma channel
2252         */
2253        wmb();
2254        enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
2255        enet_dmac_writel(priv, ENETDMAC_CHANCFG_EN_MASK,
2256                         ENETDMAC_CHANCFG, priv->rx_chan);
2257
2258        /* watch "packet transferred" interrupt in rx and tx */
2259        enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
2260                         ENETDMAC_IR, priv->rx_chan);
2261        enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
2262                         ENETDMAC_IR, priv->tx_chan);
2263
2264        /* make sure we enable napi before rx interrupt  */
2265        napi_enable(&priv->napi);
2266
2267        enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
2268                         ENETDMAC_IRMASK, priv->rx_chan);
2269        enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
2270                         ENETDMAC_IRMASK, priv->tx_chan);
2271
2272        netif_carrier_on(dev);
2273        netif_start_queue(dev);
2274
2275        /* apply override config for bypass_link ports here. */
2276        for (i = 0; i < priv->num_ports; i++) {
2277                struct bcm63xx_enetsw_port *port;
2278                u8 override;
2279                port = &priv->used_ports[i];
2280                if (!port->used)
2281                        continue;
2282
2283                if (!port->bypass_link)
2284                        continue;
2285
2286                override = ENETSW_PORTOV_ENABLE_MASK |
2287                        ENETSW_PORTOV_LINKUP_MASK;
2288
2289                switch (port->force_speed) {
2290                case 1000:
2291                        override |= ENETSW_IMPOV_1000_MASK;
2292                        break;
2293                case 100:
2294                        override |= ENETSW_IMPOV_100_MASK;
2295                        break;
2296                case 10:
2297                        break;
2298                default:
2299                        pr_warn("invalid forced speed on port %s: assume 10\n",
2300                               port->name);
2301                        break;
2302                }
2303
2304                if (port->force_duplex_full)
2305                        override |= ENETSW_IMPOV_FDX_MASK;
2306
2307
2308                enetsw_writeb(priv, override, ENETSW_PORTOV_REG(i));
2309                enetsw_writeb(priv, 0, ENETSW_PTCTRL_REG(i));
2310        }
2311
2312        /* start phy polling timer */
2313        timer_setup(&priv->swphy_poll, swphy_poll_timer, 0);
2314        mod_timer(&priv->swphy_poll, jiffies);
2315        return 0;
2316
2317out:
2318        bcm_enet_free_rx_buf_ring(kdev, priv);
2319
2320out_free_tx_skb:
2321        kfree(priv->tx_skb);
2322
2323out_free_tx_ring:
2324        dma_free_coherent(kdev, priv->tx_desc_alloc_size,
2325                          priv->tx_desc_cpu, priv->tx_desc_dma);
2326
2327out_free_rx_ring:
2328        dma_free_coherent(kdev, priv->rx_desc_alloc_size,
2329                          priv->rx_desc_cpu, priv->rx_desc_dma);
2330
2331out_freeirq_tx:
2332        if (priv->irq_tx != -1)
2333                free_irq(priv->irq_tx, dev);
2334
2335out_freeirq_rx:
2336        free_irq(priv->irq_rx, dev);
2337
2338out_freeirq:
2339        return ret;
2340}
2341
2342/* stop callback */
2343static int bcm_enetsw_stop(struct net_device *dev)
2344{
2345        struct bcm_enet_priv *priv;
2346        struct device *kdev;
2347
2348        priv = netdev_priv(dev);
2349        kdev = &priv->pdev->dev;
2350
2351        del_timer_sync(&priv->swphy_poll);
2352        netif_stop_queue(dev);
2353        napi_disable(&priv->napi);
2354        del_timer_sync(&priv->rx_timeout);
2355
2356        /* mask all interrupts */
2357        enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
2358        enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
2359
2360        /* disable dma & mac */
2361        bcm_enet_disable_dma(priv, priv->tx_chan);
2362        bcm_enet_disable_dma(priv, priv->rx_chan);
2363
2364        /* force reclaim of all tx buffers */
2365        bcm_enet_tx_reclaim(dev, 1);
2366
2367        /* free the rx buffer ring */
2368        bcm_enet_free_rx_buf_ring(kdev, priv);
2369
2370        /* free remaining allocated memory */
2371        kfree(priv->tx_skb);
2372        dma_free_coherent(kdev, priv->rx_desc_alloc_size,
2373                          priv->rx_desc_cpu, priv->rx_desc_dma);
2374        dma_free_coherent(kdev, priv->tx_desc_alloc_size,
2375                          priv->tx_desc_cpu, priv->tx_desc_dma);
2376        if (priv->irq_tx != -1)
2377                free_irq(priv->irq_tx, dev);
2378        free_irq(priv->irq_rx, dev);
2379
2380        /* reset BQL after forced tx reclaim to prevent kernel panic */
2381        netdev_reset_queue(dev);
2382
2383        return 0;
2384}
2385
2386/* try to sort out phy external status by walking the used_port field
2387 * in the bcm_enet_priv structure. in case the phy address is not
2388 * assigned to any physical port on the switch, assume it is external
2389 * (and yell at the user).
2390 */
2391static int bcm_enetsw_phy_is_external(struct bcm_enet_priv *priv, int phy_id)
2392{
2393        int i;
2394
2395        for (i = 0; i < priv->num_ports; ++i) {
2396                if (!priv->used_ports[i].used)
2397                        continue;
2398                if (priv->used_ports[i].phy_id == phy_id)
2399                        return bcm_enet_port_is_rgmii(i);
2400        }
2401
2402        printk_once(KERN_WARNING  "bcm63xx_enet: could not find a used port with phy_id %i, assuming phy is external\n",
2403                    phy_id);
2404        return 1;
2405}
2406
2407/* can't use bcmenet_sw_mdio_read directly as we need to sort out
2408 * external/internal status of the given phy_id first.
2409 */
2410static int bcm_enetsw_mii_mdio_read(struct net_device *dev, int phy_id,
2411                                    int location)
2412{
2413        struct bcm_enet_priv *priv;
2414
2415        priv = netdev_priv(dev);
2416        return bcmenet_sw_mdio_read(priv,
2417                                    bcm_enetsw_phy_is_external(priv, phy_id),
2418                                    phy_id, location);
2419}
2420
2421/* can't use bcmenet_sw_mdio_write directly as we need to sort out
2422 * external/internal status of the given phy_id first.
2423 */
2424static void bcm_enetsw_mii_mdio_write(struct net_device *dev, int phy_id,
2425                                      int location,
2426                                      int val)
2427{
2428        struct bcm_enet_priv *priv;
2429
2430        priv = netdev_priv(dev);
2431        bcmenet_sw_mdio_write(priv, bcm_enetsw_phy_is_external(priv, phy_id),
2432                              phy_id, location, val);
2433}
2434
2435static int bcm_enetsw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2436{
2437        struct mii_if_info mii;
2438
2439        mii.dev = dev;
2440        mii.mdio_read = bcm_enetsw_mii_mdio_read;
2441        mii.mdio_write = bcm_enetsw_mii_mdio_write;
2442        mii.phy_id = 0;
2443        mii.phy_id_mask = 0x3f;
2444        mii.reg_num_mask = 0x1f;
2445        return generic_mii_ioctl(&mii, if_mii(rq), cmd, NULL);
2446
2447}
2448
2449static const struct net_device_ops bcm_enetsw_ops = {
2450        .ndo_open               = bcm_enetsw_open,
2451        .ndo_stop               = bcm_enetsw_stop,
2452        .ndo_start_xmit         = bcm_enet_start_xmit,
2453        .ndo_change_mtu         = bcm_enet_change_mtu,
2454        .ndo_eth_ioctl          = bcm_enetsw_ioctl,
2455};
2456
2457
2458static const struct bcm_enet_stats bcm_enetsw_gstrings_stats[] = {
2459        { "rx_packets", DEV_STAT(rx_packets), -1 },
2460        { "tx_packets", DEV_STAT(tx_packets), -1 },
2461        { "rx_bytes", DEV_STAT(rx_bytes), -1 },
2462        { "tx_bytes", DEV_STAT(tx_bytes), -1 },
2463        { "rx_errors", DEV_STAT(rx_errors), -1 },
2464        { "tx_errors", DEV_STAT(tx_errors), -1 },
2465        { "rx_dropped", DEV_STAT(rx_dropped), -1 },
2466        { "tx_dropped", DEV_STAT(tx_dropped), -1 },
2467
2468        { "tx_good_octets", GEN_STAT(mib.tx_gd_octets), ETHSW_MIB_RX_GD_OCT },
2469        { "tx_unicast", GEN_STAT(mib.tx_unicast), ETHSW_MIB_RX_BRDCAST },
2470        { "tx_broadcast", GEN_STAT(mib.tx_brdcast), ETHSW_MIB_RX_BRDCAST },
2471        { "tx_multicast", GEN_STAT(mib.tx_mult), ETHSW_MIB_RX_MULT },
2472        { "tx_64_octets", GEN_STAT(mib.tx_64), ETHSW_MIB_RX_64 },
2473        { "tx_65_127_oct", GEN_STAT(mib.tx_65_127), ETHSW_MIB_RX_65_127 },
2474        { "tx_128_255_oct", GEN_STAT(mib.tx_128_255), ETHSW_MIB_RX_128_255 },
2475        { "tx_256_511_oct", GEN_STAT(mib.tx_256_511), ETHSW_MIB_RX_256_511 },
2476        { "tx_512_1023_oct", GEN_STAT(mib.tx_512_1023), ETHSW_MIB_RX_512_1023},
2477        { "tx_1024_1522_oct", GEN_STAT(mib.tx_1024_max),
2478          ETHSW_MIB_RX_1024_1522 },
2479        { "tx_1523_2047_oct", GEN_STAT(mib.tx_1523_2047),
2480          ETHSW_MIB_RX_1523_2047 },
2481        { "tx_2048_4095_oct", GEN_STAT(mib.tx_2048_4095),
2482          ETHSW_MIB_RX_2048_4095 },
2483        { "tx_4096_8191_oct", GEN_STAT(mib.tx_4096_8191),
2484          ETHSW_MIB_RX_4096_8191 },
2485        { "tx_8192_9728_oct", GEN_STAT(mib.tx_8192_9728),
2486          ETHSW_MIB_RX_8192_9728 },
2487        { "tx_oversize", GEN_STAT(mib.tx_ovr), ETHSW_MIB_RX_OVR },
2488        { "tx_oversize_drop", GEN_STAT(mib.tx_ovr), ETHSW_MIB_RX_OVR_DISC },
2489        { "tx_dropped", GEN_STAT(mib.tx_drop), ETHSW_MIB_RX_DROP },
2490        { "tx_undersize", GEN_STAT(mib.tx_underrun), ETHSW_MIB_RX_UND },
2491        { "tx_pause", GEN_STAT(mib.tx_pause), ETHSW_MIB_RX_PAUSE },
2492
2493        { "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETHSW_MIB_TX_ALL_OCT },
2494        { "rx_broadcast", GEN_STAT(mib.rx_brdcast), ETHSW_MIB_TX_BRDCAST },
2495        { "rx_multicast", GEN_STAT(mib.rx_mult), ETHSW_MIB_TX_MULT },
2496        { "rx_unicast", GEN_STAT(mib.rx_unicast), ETHSW_MIB_TX_MULT },
2497        { "rx_pause", GEN_STAT(mib.rx_pause), ETHSW_MIB_TX_PAUSE },
2498        { "rx_dropped", GEN_STAT(mib.rx_drop), ETHSW_MIB_TX_DROP_PKTS },
2499
2500};
2501
2502#define BCM_ENETSW_STATS_LEN    \
2503        (sizeof(bcm_enetsw_gstrings_stats) / sizeof(struct bcm_enet_stats))
2504
2505static void bcm_enetsw_get_strings(struct net_device *netdev,
2506                                   u32 stringset, u8 *data)
2507{
2508        int i;
2509
2510        switch (stringset) {
2511        case ETH_SS_STATS:
2512                for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) {
2513                        memcpy(data + i * ETH_GSTRING_LEN,
2514                               bcm_enetsw_gstrings_stats[i].stat_string,
2515                               ETH_GSTRING_LEN);
2516                }
2517                break;
2518        }
2519}
2520
2521static int bcm_enetsw_get_sset_count(struct net_device *netdev,
2522                                     int string_set)
2523{
2524        switch (string_set) {
2525        case ETH_SS_STATS:
2526                return BCM_ENETSW_STATS_LEN;
2527        default:
2528                return -EINVAL;
2529        }
2530}
2531
2532static void bcm_enetsw_get_drvinfo(struct net_device *netdev,
2533                                   struct ethtool_drvinfo *drvinfo)
2534{
2535        strncpy(drvinfo->driver, bcm_enet_driver_name, sizeof(drvinfo->driver));
2536        strncpy(drvinfo->bus_info, "bcm63xx", sizeof(drvinfo->bus_info));
2537}
2538
2539static void bcm_enetsw_get_ethtool_stats(struct net_device *netdev,
2540                                         struct ethtool_stats *stats,
2541                                         u64 *data)
2542{
2543        struct bcm_enet_priv *priv;
2544        int i;
2545
2546        priv = netdev_priv(netdev);
2547
2548        for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) {
2549                const struct bcm_enet_stats *s;
2550                u32 lo, hi;
2551                char *p;
2552                int reg;
2553
2554                s = &bcm_enetsw_gstrings_stats[i];
2555
2556                reg = s->mib_reg;
2557                if (reg == -1)
2558                        continue;
2559
2560                lo = enetsw_readl(priv, ENETSW_MIB_REG(reg));
2561                p = (char *)priv + s->stat_offset;
2562
2563                if (s->sizeof_stat == sizeof(u64)) {
2564                        hi = enetsw_readl(priv, ENETSW_MIB_REG(reg + 1));
2565                        *(u64 *)p = ((u64)hi << 32 | lo);
2566                } else {
2567                        *(u32 *)p = lo;
2568                }
2569        }
2570
2571        for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) {
2572                const struct bcm_enet_stats *s;
2573                char *p;
2574
2575                s = &bcm_enetsw_gstrings_stats[i];
2576
2577                if (s->mib_reg == -1)
2578                        p = (char *)&netdev->stats + s->stat_offset;
2579                else
2580                        p = (char *)priv + s->stat_offset;
2581
2582                data[i] = (s->sizeof_stat == sizeof(u64)) ?
2583                        *(u64 *)p : *(u32 *)p;
2584        }
2585}
2586
2587static void
2588bcm_enetsw_get_ringparam(struct net_device *dev,
2589                         struct ethtool_ringparam *ering,
2590                         struct kernel_ethtool_ringparam *kernel_ering,
2591                         struct netlink_ext_ack *extack)
2592{
2593        struct bcm_enet_priv *priv;
2594
2595        priv = netdev_priv(dev);
2596
2597        /* rx/tx ring is actually only limited by memory */
2598        ering->rx_max_pending = 8192;
2599        ering->tx_max_pending = 8192;
2600        ering->rx_mini_max_pending = 0;
2601        ering->rx_jumbo_max_pending = 0;
2602        ering->rx_pending = priv->rx_ring_size;
2603        ering->tx_pending = priv->tx_ring_size;
2604}
2605
2606static int
2607bcm_enetsw_set_ringparam(struct net_device *dev,
2608                         struct ethtool_ringparam *ering,
2609                         struct kernel_ethtool_ringparam *kernel_ering,
2610                         struct netlink_ext_ack *extack)
2611{
2612        struct bcm_enet_priv *priv;
2613        int was_running;
2614
2615        priv = netdev_priv(dev);
2616
2617        was_running = 0;
2618        if (netif_running(dev)) {
2619                bcm_enetsw_stop(dev);
2620                was_running = 1;
2621        }
2622
2623        priv->rx_ring_size = ering->rx_pending;
2624        priv->tx_ring_size = ering->tx_pending;
2625
2626        if (was_running) {
2627                int err;
2628
2629                err = bcm_enetsw_open(dev);
2630                if (err)
2631                        dev_close(dev);
2632        }
2633        return 0;
2634}
2635
2636static const struct ethtool_ops bcm_enetsw_ethtool_ops = {
2637        .get_strings            = bcm_enetsw_get_strings,
2638        .get_sset_count         = bcm_enetsw_get_sset_count,
2639        .get_ethtool_stats      = bcm_enetsw_get_ethtool_stats,
2640        .get_drvinfo            = bcm_enetsw_get_drvinfo,
2641        .get_ringparam          = bcm_enetsw_get_ringparam,
2642        .set_ringparam          = bcm_enetsw_set_ringparam,
2643};
2644
2645/* allocate netdevice, request register memory and register device. */
2646static int bcm_enetsw_probe(struct platform_device *pdev)
2647{
2648        struct bcm_enet_priv *priv;
2649        struct net_device *dev;
2650        struct bcm63xx_enetsw_platform_data *pd;
2651        struct resource *res_mem;
2652        int ret, irq_rx, irq_tx;
2653
2654        if (!bcm_enet_shared_base[0])
2655                return -EPROBE_DEFER;
2656
2657        res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2658        irq_rx = platform_get_irq(pdev, 0);
2659        irq_tx = platform_get_irq(pdev, 1);
2660        if (!res_mem || irq_rx < 0)
2661                return -ENODEV;
2662
2663        dev = alloc_etherdev(sizeof(*priv));
2664        if (!dev)
2665                return -ENOMEM;
2666        priv = netdev_priv(dev);
2667
2668        /* initialize default and fetch platform data */
2669        priv->enet_is_sw = true;
2670        priv->irq_rx = irq_rx;
2671        priv->irq_tx = irq_tx;
2672        priv->rx_ring_size = BCMENET_DEF_RX_DESC;
2673        priv->tx_ring_size = BCMENET_DEF_TX_DESC;
2674        priv->dma_maxburst = BCMENETSW_DMA_MAXBURST;
2675        priv->rx_buf_offset = NET_SKB_PAD + NET_IP_ALIGN;
2676
2677        pd = dev_get_platdata(&pdev->dev);
2678        if (pd) {
2679                eth_hw_addr_set(dev, pd->mac_addr);
2680                memcpy(priv->used_ports, pd->used_ports,
2681                       sizeof(pd->used_ports));
2682                priv->num_ports = pd->num_ports;
2683                priv->dma_has_sram = pd->dma_has_sram;
2684                priv->dma_chan_en_mask = pd->dma_chan_en_mask;
2685                priv->dma_chan_int_mask = pd->dma_chan_int_mask;
2686                priv->dma_chan_width = pd->dma_chan_width;
2687        }
2688
2689        ret = bcm_enet_change_mtu(dev, dev->mtu);
2690        if (ret)
2691                goto out;
2692
2693        priv->base = devm_ioremap_resource(&pdev->dev, res_mem);
2694        if (IS_ERR(priv->base)) {
2695                ret = PTR_ERR(priv->base);
2696                goto out;
2697        }
2698
2699        priv->mac_clk = devm_clk_get(&pdev->dev, "enetsw");
2700        if (IS_ERR(priv->mac_clk)) {
2701                ret = PTR_ERR(priv->mac_clk);
2702                goto out;
2703        }
2704        ret = clk_prepare_enable(priv->mac_clk);
2705        if (ret)
2706                goto out;
2707
2708        priv->rx_chan = 0;
2709        priv->tx_chan = 1;
2710        spin_lock_init(&priv->rx_lock);
2711
2712        /* init rx timeout (used for oom) */
2713        timer_setup(&priv->rx_timeout, bcm_enet_refill_rx_timer, 0);
2714
2715        /* register netdevice */
2716        dev->netdev_ops = &bcm_enetsw_ops;
2717        netif_napi_add_weight(dev, &priv->napi, bcm_enet_poll, 16);
2718        dev->ethtool_ops = &bcm_enetsw_ethtool_ops;
2719        SET_NETDEV_DEV(dev, &pdev->dev);
2720
2721        spin_lock_init(&priv->enetsw_mdio_lock);
2722
2723        ret = register_netdev(dev);
2724        if (ret)
2725                goto out_disable_clk;
2726
2727        netif_carrier_off(dev);
2728        platform_set_drvdata(pdev, dev);
2729        priv->pdev = pdev;
2730        priv->net_dev = dev;
2731
2732        return 0;
2733
2734out_disable_clk:
2735        clk_disable_unprepare(priv->mac_clk);
2736out:
2737        free_netdev(dev);
2738        return ret;
2739}
2740
2741
2742/* exit func, stops hardware and unregisters netdevice */
2743static int bcm_enetsw_remove(struct platform_device *pdev)
2744{
2745        struct bcm_enet_priv *priv;
2746        struct net_device *dev;
2747
2748        /* stop netdevice */
2749        dev = platform_get_drvdata(pdev);
2750        priv = netdev_priv(dev);
2751        unregister_netdev(dev);
2752
2753        clk_disable_unprepare(priv->mac_clk);
2754
2755        free_netdev(dev);
2756        return 0;
2757}
2758
2759struct platform_driver bcm63xx_enetsw_driver = {
2760        .probe  = bcm_enetsw_probe,
2761        .remove = bcm_enetsw_remove,
2762        .driver = {
2763                .name   = "bcm63xx_enetsw",
2764                .owner  = THIS_MODULE,
2765        },
2766};
2767
2768/* reserve & remap memory space shared between all macs */
2769static int bcm_enet_shared_probe(struct platform_device *pdev)
2770{
2771        void __iomem *p[3];
2772        unsigned int i;
2773
2774        memset(bcm_enet_shared_base, 0, sizeof(bcm_enet_shared_base));
2775
2776        for (i = 0; i < 3; i++) {
2777                p[i] = devm_platform_ioremap_resource(pdev, i);
2778                if (IS_ERR(p[i]))
2779                        return PTR_ERR(p[i]);
2780        }
2781
2782        memcpy(bcm_enet_shared_base, p, sizeof(bcm_enet_shared_base));
2783
2784        return 0;
2785}
2786
2787static int bcm_enet_shared_remove(struct platform_device *pdev)
2788{
2789        return 0;
2790}
2791
2792/* this "shared" driver is needed because both macs share a single
2793 * address space
2794 */
2795struct platform_driver bcm63xx_enet_shared_driver = {
2796        .probe  = bcm_enet_shared_probe,
2797        .remove = bcm_enet_shared_remove,
2798        .driver = {
2799                .name   = "bcm63xx_enet_shared",
2800                .owner  = THIS_MODULE,
2801        },
2802};
2803
2804static struct platform_driver * const drivers[] = {
2805        &bcm63xx_enet_shared_driver,
2806        &bcm63xx_enet_driver,
2807        &bcm63xx_enetsw_driver,
2808};
2809
2810/* entry point */
2811static int __init bcm_enet_init(void)
2812{
2813        return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
2814}
2815
2816static void __exit bcm_enet_exit(void)
2817{
2818        platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
2819}
2820
2821
2822module_init(bcm_enet_init);
2823module_exit(bcm_enet_exit);
2824
2825MODULE_DESCRIPTION("BCM63xx internal ethernet mac driver");
2826MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>");
2827MODULE_LICENSE("GPL");
2828