linux/drivers/net/ethernet/broadcom/bcm63xx_enet.c
<<
>>
Prefs
   1/*
   2 * Driver for BCM963xx builtin Ethernet mac
   3 *
   4 * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation; either version 2 of the License, or
   9 * (at your option) any later version.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program; if not, write to the Free Software
  18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  19 */
  20#include <linux/init.h>
  21#include <linux/interrupt.h>
  22#include <linux/module.h>
  23#include <linux/clk.h>
  24#include <linux/etherdevice.h>
  25#include <linux/slab.h>
  26#include <linux/delay.h>
  27#include <linux/ethtool.h>
  28#include <linux/crc32.h>
  29#include <linux/err.h>
  30#include <linux/dma-mapping.h>
  31#include <linux/platform_device.h>
  32#include <linux/if_vlan.h>
  33
  34#include <bcm63xx_dev_enet.h>
  35#include "bcm63xx_enet.h"
  36
  37static char bcm_enet_driver_name[] = "bcm63xx_enet";
  38
  39static int copybreak __read_mostly = 128;
  40module_param(copybreak, int, 0);
  41MODULE_PARM_DESC(copybreak, "Receive copy threshold");
  42
  43/* io registers memory shared between all devices */
  44static void __iomem *bcm_enet_shared_base[3];
  45
  46/*
  47 * io helpers to access mac registers
  48 */
  49static inline u32 enet_readl(struct bcm_enet_priv *priv, u32 off)
  50{
  51        return bcm_readl(priv->base + off);
  52}
  53
  54static inline void enet_writel(struct bcm_enet_priv *priv,
  55                               u32 val, u32 off)
  56{
  57        bcm_writel(val, priv->base + off);
  58}
  59
  60/*
  61 * io helpers to access switch registers
  62 */
  63static inline u32 enetsw_readl(struct bcm_enet_priv *priv, u32 off)
  64{
  65        return bcm_readl(priv->base + off);
  66}
  67
  68static inline void enetsw_writel(struct bcm_enet_priv *priv,
  69                                 u32 val, u32 off)
  70{
  71        bcm_writel(val, priv->base + off);
  72}
  73
  74static inline u16 enetsw_readw(struct bcm_enet_priv *priv, u32 off)
  75{
  76        return bcm_readw(priv->base + off);
  77}
  78
  79static inline void enetsw_writew(struct bcm_enet_priv *priv,
  80                                 u16 val, u32 off)
  81{
  82        bcm_writew(val, priv->base + off);
  83}
  84
  85static inline u8 enetsw_readb(struct bcm_enet_priv *priv, u32 off)
  86{
  87        return bcm_readb(priv->base + off);
  88}
  89
  90static inline void enetsw_writeb(struct bcm_enet_priv *priv,
  91                                 u8 val, u32 off)
  92{
  93        bcm_writeb(val, priv->base + off);
  94}
  95
  96
  97/* io helpers to access shared registers */
  98static inline u32 enet_dma_readl(struct bcm_enet_priv *priv, u32 off)
  99{
 100        return bcm_readl(bcm_enet_shared_base[0] + off);
 101}
 102
 103static inline void enet_dma_writel(struct bcm_enet_priv *priv,
 104                                       u32 val, u32 off)
 105{
 106        bcm_writel(val, bcm_enet_shared_base[0] + off);
 107}
 108
 109static inline u32 enet_dmac_readl(struct bcm_enet_priv *priv, u32 off, int chan)
 110{
 111        return bcm_readl(bcm_enet_shared_base[1] +
 112                bcm63xx_enetdmacreg(off) + chan * priv->dma_chan_width);
 113}
 114
 115static inline void enet_dmac_writel(struct bcm_enet_priv *priv,
 116                                       u32 val, u32 off, int chan)
 117{
 118        bcm_writel(val, bcm_enet_shared_base[1] +
 119                bcm63xx_enetdmacreg(off) + chan * priv->dma_chan_width);
 120}
 121
 122static inline u32 enet_dmas_readl(struct bcm_enet_priv *priv, u32 off, int chan)
 123{
 124        return bcm_readl(bcm_enet_shared_base[2] + off + chan * priv->dma_chan_width);
 125}
 126
 127static inline void enet_dmas_writel(struct bcm_enet_priv *priv,
 128                                       u32 val, u32 off, int chan)
 129{
 130        bcm_writel(val, bcm_enet_shared_base[2] + off + chan * priv->dma_chan_width);
 131}
 132
 133/*
 134 * write given data into mii register and wait for transfer to end
 135 * with timeout (average measured transfer time is 25us)
 136 */
 137static int do_mdio_op(struct bcm_enet_priv *priv, unsigned int data)
 138{
 139        int limit;
 140
 141        /* make sure mii interrupt status is cleared */
 142        enet_writel(priv, ENET_IR_MII, ENET_IR_REG);
 143
 144        enet_writel(priv, data, ENET_MIIDATA_REG);
 145        wmb();
 146
 147        /* busy wait on mii interrupt bit, with timeout */
 148        limit = 1000;
 149        do {
 150                if (enet_readl(priv, ENET_IR_REG) & ENET_IR_MII)
 151                        break;
 152                udelay(1);
 153        } while (limit-- > 0);
 154
 155        return (limit < 0) ? 1 : 0;
 156}
 157
 158/*
 159 * MII internal read callback
 160 */
 161static int bcm_enet_mdio_read(struct bcm_enet_priv *priv, int mii_id,
 162                              int regnum)
 163{
 164        u32 tmp, val;
 165
 166        tmp = regnum << ENET_MIIDATA_REG_SHIFT;
 167        tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT;
 168        tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT;
 169        tmp |= ENET_MIIDATA_OP_READ_MASK;
 170
 171        if (do_mdio_op(priv, tmp))
 172                return -1;
 173
 174        val = enet_readl(priv, ENET_MIIDATA_REG);
 175        val &= 0xffff;
 176        return val;
 177}
 178
 179/*
 180 * MII internal write callback
 181 */
 182static int bcm_enet_mdio_write(struct bcm_enet_priv *priv, int mii_id,
 183                               int regnum, u16 value)
 184{
 185        u32 tmp;
 186
 187        tmp = (value & 0xffff) << ENET_MIIDATA_DATA_SHIFT;
 188        tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT;
 189        tmp |= regnum << ENET_MIIDATA_REG_SHIFT;
 190        tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT;
 191        tmp |= ENET_MIIDATA_OP_WRITE_MASK;
 192
 193        (void)do_mdio_op(priv, tmp);
 194        return 0;
 195}
 196
 197/*
 198 * MII read callback from phylib
 199 */
 200static int bcm_enet_mdio_read_phylib(struct mii_bus *bus, int mii_id,
 201                                     int regnum)
 202{
 203        return bcm_enet_mdio_read(bus->priv, mii_id, regnum);
 204}
 205
 206/*
 207 * MII write callback from phylib
 208 */
 209static int bcm_enet_mdio_write_phylib(struct mii_bus *bus, int mii_id,
 210                                      int regnum, u16 value)
 211{
 212        return bcm_enet_mdio_write(bus->priv, mii_id, regnum, value);
 213}
 214
 215/*
 216 * MII read callback from mii core
 217 */
 218static int bcm_enet_mdio_read_mii(struct net_device *dev, int mii_id,
 219                                  int regnum)
 220{
 221        return bcm_enet_mdio_read(netdev_priv(dev), mii_id, regnum);
 222}
 223
 224/*
 225 * MII write callback from mii core
 226 */
 227static void bcm_enet_mdio_write_mii(struct net_device *dev, int mii_id,
 228                                    int regnum, int value)
 229{
 230        bcm_enet_mdio_write(netdev_priv(dev), mii_id, regnum, value);
 231}
 232
 233/*
 234 * refill rx queue
 235 */
 236static int bcm_enet_refill_rx(struct net_device *dev)
 237{
 238        struct bcm_enet_priv *priv;
 239
 240        priv = netdev_priv(dev);
 241
 242        while (priv->rx_desc_count < priv->rx_ring_size) {
 243                struct bcm_enet_desc *desc;
 244                struct sk_buff *skb;
 245                dma_addr_t p;
 246                int desc_idx;
 247                u32 len_stat;
 248
 249                desc_idx = priv->rx_dirty_desc;
 250                desc = &priv->rx_desc_cpu[desc_idx];
 251
 252                if (!priv->rx_skb[desc_idx]) {
 253                        skb = netdev_alloc_skb(dev, priv->rx_skb_size);
 254                        if (!skb)
 255                                break;
 256                        priv->rx_skb[desc_idx] = skb;
 257                        p = dma_map_single(&priv->pdev->dev, skb->data,
 258                                           priv->rx_skb_size,
 259                                           DMA_FROM_DEVICE);
 260                        desc->address = p;
 261                }
 262
 263                len_stat = priv->rx_skb_size << DMADESC_LENGTH_SHIFT;
 264                len_stat |= DMADESC_OWNER_MASK;
 265                if (priv->rx_dirty_desc == priv->rx_ring_size - 1) {
 266                        len_stat |= (DMADESC_WRAP_MASK >> priv->dma_desc_shift);
 267                        priv->rx_dirty_desc = 0;
 268                } else {
 269                        priv->rx_dirty_desc++;
 270                }
 271                wmb();
 272                desc->len_stat = len_stat;
 273
 274                priv->rx_desc_count++;
 275
 276                /* tell dma engine we allocated one buffer */
 277                if (priv->dma_has_sram)
 278                        enet_dma_writel(priv, 1, ENETDMA_BUFALLOC_REG(priv->rx_chan));
 279                else
 280                        enet_dmac_writel(priv, 1, ENETDMAC_BUFALLOC, priv->rx_chan);
 281        }
 282
 283        /* If rx ring is still empty, set a timer to try allocating
 284         * again at a later time. */
 285        if (priv->rx_desc_count == 0 && netif_running(dev)) {
 286                dev_warn(&priv->pdev->dev, "unable to refill rx ring\n");
 287                priv->rx_timeout.expires = jiffies + HZ;
 288                add_timer(&priv->rx_timeout);
 289        }
 290
 291        return 0;
 292}
 293
 294/*
 295 * timer callback to defer refill rx queue in case we're OOM
 296 */
 297static void bcm_enet_refill_rx_timer(struct timer_list *t)
 298{
 299        struct bcm_enet_priv *priv = from_timer(priv, t, rx_timeout);
 300        struct net_device *dev = priv->net_dev;
 301
 302        spin_lock(&priv->rx_lock);
 303        bcm_enet_refill_rx(dev);
 304        spin_unlock(&priv->rx_lock);
 305}
 306
 307/*
 308 * extract packet from rx queue
 309 */
 310static int bcm_enet_receive_queue(struct net_device *dev, int budget)
 311{
 312        struct bcm_enet_priv *priv;
 313        struct device *kdev;
 314        int processed;
 315
 316        priv = netdev_priv(dev);
 317        kdev = &priv->pdev->dev;
 318        processed = 0;
 319
 320        /* don't scan ring further than number of refilled
 321         * descriptor */
 322        if (budget > priv->rx_desc_count)
 323                budget = priv->rx_desc_count;
 324
 325        do {
 326                struct bcm_enet_desc *desc;
 327                struct sk_buff *skb;
 328                int desc_idx;
 329                u32 len_stat;
 330                unsigned int len;
 331
 332                desc_idx = priv->rx_curr_desc;
 333                desc = &priv->rx_desc_cpu[desc_idx];
 334
 335                /* make sure we actually read the descriptor status at
 336                 * each loop */
 337                rmb();
 338
 339                len_stat = desc->len_stat;
 340
 341                /* break if dma ownership belongs to hw */
 342                if (len_stat & DMADESC_OWNER_MASK)
 343                        break;
 344
 345                processed++;
 346                priv->rx_curr_desc++;
 347                if (priv->rx_curr_desc == priv->rx_ring_size)
 348                        priv->rx_curr_desc = 0;
 349                priv->rx_desc_count--;
 350
 351                /* if the packet does not have start of packet _and_
 352                 * end of packet flag set, then just recycle it */
 353                if ((len_stat & (DMADESC_ESOP_MASK >> priv->dma_desc_shift)) !=
 354                        (DMADESC_ESOP_MASK >> priv->dma_desc_shift)) {
 355                        dev->stats.rx_dropped++;
 356                        continue;
 357                }
 358
 359                /* recycle packet if it's marked as bad */
 360                if (!priv->enet_is_sw &&
 361                    unlikely(len_stat & DMADESC_ERR_MASK)) {
 362                        dev->stats.rx_errors++;
 363
 364                        if (len_stat & DMADESC_OVSIZE_MASK)
 365                                dev->stats.rx_length_errors++;
 366                        if (len_stat & DMADESC_CRC_MASK)
 367                                dev->stats.rx_crc_errors++;
 368                        if (len_stat & DMADESC_UNDER_MASK)
 369                                dev->stats.rx_frame_errors++;
 370                        if (len_stat & DMADESC_OV_MASK)
 371                                dev->stats.rx_fifo_errors++;
 372                        continue;
 373                }
 374
 375                /* valid packet */
 376                skb = priv->rx_skb[desc_idx];
 377                len = (len_stat & DMADESC_LENGTH_MASK) >> DMADESC_LENGTH_SHIFT;
 378                /* don't include FCS */
 379                len -= 4;
 380
 381                if (len < copybreak) {
 382                        struct sk_buff *nskb;
 383
 384                        nskb = napi_alloc_skb(&priv->napi, len);
 385                        if (!nskb) {
 386                                /* forget packet, just rearm desc */
 387                                dev->stats.rx_dropped++;
 388                                continue;
 389                        }
 390
 391                        dma_sync_single_for_cpu(kdev, desc->address,
 392                                                len, DMA_FROM_DEVICE);
 393                        memcpy(nskb->data, skb->data, len);
 394                        dma_sync_single_for_device(kdev, desc->address,
 395                                                   len, DMA_FROM_DEVICE);
 396                        skb = nskb;
 397                } else {
 398                        dma_unmap_single(&priv->pdev->dev, desc->address,
 399                                         priv->rx_skb_size, DMA_FROM_DEVICE);
 400                        priv->rx_skb[desc_idx] = NULL;
 401                }
 402
 403                skb_put(skb, len);
 404                skb->protocol = eth_type_trans(skb, dev);
 405                dev->stats.rx_packets++;
 406                dev->stats.rx_bytes += len;
 407                netif_receive_skb(skb);
 408
 409        } while (--budget > 0);
 410
 411        if (processed || !priv->rx_desc_count) {
 412                bcm_enet_refill_rx(dev);
 413
 414                /* kick rx dma */
 415                enet_dmac_writel(priv, priv->dma_chan_en_mask,
 416                                         ENETDMAC_CHANCFG, priv->rx_chan);
 417        }
 418
 419        return processed;
 420}
 421
 422
 423/*
 424 * try to or force reclaim of transmitted buffers
 425 */
 426static int bcm_enet_tx_reclaim(struct net_device *dev, int force)
 427{
 428        struct bcm_enet_priv *priv;
 429        int released;
 430
 431        priv = netdev_priv(dev);
 432        released = 0;
 433
 434        while (priv->tx_desc_count < priv->tx_ring_size) {
 435                struct bcm_enet_desc *desc;
 436                struct sk_buff *skb;
 437
 438                /* We run in a bh and fight against start_xmit, which
 439                 * is called with bh disabled  */
 440                spin_lock(&priv->tx_lock);
 441
 442                desc = &priv->tx_desc_cpu[priv->tx_dirty_desc];
 443
 444                if (!force && (desc->len_stat & DMADESC_OWNER_MASK)) {
 445                        spin_unlock(&priv->tx_lock);
 446                        break;
 447                }
 448
 449                /* ensure other field of the descriptor were not read
 450                 * before we checked ownership */
 451                rmb();
 452
 453                skb = priv->tx_skb[priv->tx_dirty_desc];
 454                priv->tx_skb[priv->tx_dirty_desc] = NULL;
 455                dma_unmap_single(&priv->pdev->dev, desc->address, skb->len,
 456                                 DMA_TO_DEVICE);
 457
 458                priv->tx_dirty_desc++;
 459                if (priv->tx_dirty_desc == priv->tx_ring_size)
 460                        priv->tx_dirty_desc = 0;
 461                priv->tx_desc_count++;
 462
 463                spin_unlock(&priv->tx_lock);
 464
 465                if (desc->len_stat & DMADESC_UNDER_MASK)
 466                        dev->stats.tx_errors++;
 467
 468                dev_kfree_skb(skb);
 469                released++;
 470        }
 471
 472        if (netif_queue_stopped(dev) && released)
 473                netif_wake_queue(dev);
 474
 475        return released;
 476}
 477
 478/*
 479 * poll func, called by network core
 480 */
 481static int bcm_enet_poll(struct napi_struct *napi, int budget)
 482{
 483        struct bcm_enet_priv *priv;
 484        struct net_device *dev;
 485        int rx_work_done;
 486
 487        priv = container_of(napi, struct bcm_enet_priv, napi);
 488        dev = priv->net_dev;
 489
 490        /* ack interrupts */
 491        enet_dmac_writel(priv, priv->dma_chan_int_mask,
 492                         ENETDMAC_IR, priv->rx_chan);
 493        enet_dmac_writel(priv, priv->dma_chan_int_mask,
 494                         ENETDMAC_IR, priv->tx_chan);
 495
 496        /* reclaim sent skb */
 497        bcm_enet_tx_reclaim(dev, 0);
 498
 499        spin_lock(&priv->rx_lock);
 500        rx_work_done = bcm_enet_receive_queue(dev, budget);
 501        spin_unlock(&priv->rx_lock);
 502
 503        if (rx_work_done >= budget) {
 504                /* rx queue is not yet empty/clean */
 505                return rx_work_done;
 506        }
 507
 508        /* no more packet in rx/tx queue, remove device from poll
 509         * queue */
 510        napi_complete_done(napi, rx_work_done);
 511
 512        /* restore rx/tx interrupt */
 513        enet_dmac_writel(priv, priv->dma_chan_int_mask,
 514                         ENETDMAC_IRMASK, priv->rx_chan);
 515        enet_dmac_writel(priv, priv->dma_chan_int_mask,
 516                         ENETDMAC_IRMASK, priv->tx_chan);
 517
 518        return rx_work_done;
 519}
 520
 521/*
 522 * mac interrupt handler
 523 */
 524static irqreturn_t bcm_enet_isr_mac(int irq, void *dev_id)
 525{
 526        struct net_device *dev;
 527        struct bcm_enet_priv *priv;
 528        u32 stat;
 529
 530        dev = dev_id;
 531        priv = netdev_priv(dev);
 532
 533        stat = enet_readl(priv, ENET_IR_REG);
 534        if (!(stat & ENET_IR_MIB))
 535                return IRQ_NONE;
 536
 537        /* clear & mask interrupt */
 538        enet_writel(priv, ENET_IR_MIB, ENET_IR_REG);
 539        enet_writel(priv, 0, ENET_IRMASK_REG);
 540
 541        /* read mib registers in workqueue */
 542        schedule_work(&priv->mib_update_task);
 543
 544        return IRQ_HANDLED;
 545}
 546
 547/*
 548 * rx/tx dma interrupt handler
 549 */
 550static irqreturn_t bcm_enet_isr_dma(int irq, void *dev_id)
 551{
 552        struct net_device *dev;
 553        struct bcm_enet_priv *priv;
 554
 555        dev = dev_id;
 556        priv = netdev_priv(dev);
 557
 558        /* mask rx/tx interrupts */
 559        enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
 560        enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
 561
 562        napi_schedule(&priv->napi);
 563
 564        return IRQ_HANDLED;
 565}
 566
 567/*
 568 * tx request callback
 569 */
 570static int bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
 571{
 572        struct bcm_enet_priv *priv;
 573        struct bcm_enet_desc *desc;
 574        u32 len_stat;
 575        int ret;
 576
 577        priv = netdev_priv(dev);
 578
 579        /* lock against tx reclaim */
 580        spin_lock(&priv->tx_lock);
 581
 582        /* make sure  the tx hw queue  is not full,  should not happen
 583         * since we stop queue before it's the case */
 584        if (unlikely(!priv->tx_desc_count)) {
 585                netif_stop_queue(dev);
 586                dev_err(&priv->pdev->dev, "xmit called with no tx desc "
 587                        "available?\n");
 588                ret = NETDEV_TX_BUSY;
 589                goto out_unlock;
 590        }
 591
 592        /* pad small packets sent on a switch device */
 593        if (priv->enet_is_sw && skb->len < 64) {
 594                int needed = 64 - skb->len;
 595                char *data;
 596
 597                if (unlikely(skb_tailroom(skb) < needed)) {
 598                        struct sk_buff *nskb;
 599
 600                        nskb = skb_copy_expand(skb, 0, needed, GFP_ATOMIC);
 601                        if (!nskb) {
 602                                ret = NETDEV_TX_BUSY;
 603                                goto out_unlock;
 604                        }
 605                        dev_kfree_skb(skb);
 606                        skb = nskb;
 607                }
 608                data = skb_put_zero(skb, needed);
 609        }
 610
 611        /* point to the next available desc */
 612        desc = &priv->tx_desc_cpu[priv->tx_curr_desc];
 613        priv->tx_skb[priv->tx_curr_desc] = skb;
 614
 615        /* fill descriptor */
 616        desc->address = dma_map_single(&priv->pdev->dev, skb->data, skb->len,
 617                                       DMA_TO_DEVICE);
 618
 619        len_stat = (skb->len << DMADESC_LENGTH_SHIFT) & DMADESC_LENGTH_MASK;
 620        len_stat |= (DMADESC_ESOP_MASK >> priv->dma_desc_shift) |
 621                DMADESC_APPEND_CRC |
 622                DMADESC_OWNER_MASK;
 623
 624        priv->tx_curr_desc++;
 625        if (priv->tx_curr_desc == priv->tx_ring_size) {
 626                priv->tx_curr_desc = 0;
 627                len_stat |= (DMADESC_WRAP_MASK >> priv->dma_desc_shift);
 628        }
 629        priv->tx_desc_count--;
 630
 631        /* dma might be already polling, make sure we update desc
 632         * fields in correct order */
 633        wmb();
 634        desc->len_stat = len_stat;
 635        wmb();
 636
 637        /* kick tx dma */
 638        enet_dmac_writel(priv, priv->dma_chan_en_mask,
 639                                 ENETDMAC_CHANCFG, priv->tx_chan);
 640
 641        /* stop queue if no more desc available */
 642        if (!priv->tx_desc_count)
 643                netif_stop_queue(dev);
 644
 645        dev->stats.tx_bytes += skb->len;
 646        dev->stats.tx_packets++;
 647        ret = NETDEV_TX_OK;
 648
 649out_unlock:
 650        spin_unlock(&priv->tx_lock);
 651        return ret;
 652}
 653
 654/*
 655 * Change the interface's mac address.
 656 */
 657static int bcm_enet_set_mac_address(struct net_device *dev, void *p)
 658{
 659        struct bcm_enet_priv *priv;
 660        struct sockaddr *addr = p;
 661        u32 val;
 662
 663        priv = netdev_priv(dev);
 664        memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
 665
 666        /* use perfect match register 0 to store my mac address */
 667        val = (dev->dev_addr[2] << 24) | (dev->dev_addr[3] << 16) |
 668                (dev->dev_addr[4] << 8) | dev->dev_addr[5];
 669        enet_writel(priv, val, ENET_PML_REG(0));
 670
 671        val = (dev->dev_addr[0] << 8 | dev->dev_addr[1]);
 672        val |= ENET_PMH_DATAVALID_MASK;
 673        enet_writel(priv, val, ENET_PMH_REG(0));
 674
 675        return 0;
 676}
 677
 678/*
 679 * Change rx mode (promiscuous/allmulti) and update multicast list
 680 */
 681static void bcm_enet_set_multicast_list(struct net_device *dev)
 682{
 683        struct bcm_enet_priv *priv;
 684        struct netdev_hw_addr *ha;
 685        u32 val;
 686        int i;
 687
 688        priv = netdev_priv(dev);
 689
 690        val = enet_readl(priv, ENET_RXCFG_REG);
 691
 692        if (dev->flags & IFF_PROMISC)
 693                val |= ENET_RXCFG_PROMISC_MASK;
 694        else
 695                val &= ~ENET_RXCFG_PROMISC_MASK;
 696
 697        /* only 3 perfect match registers left, first one is used for
 698         * own mac address */
 699        if ((dev->flags & IFF_ALLMULTI) || netdev_mc_count(dev) > 3)
 700                val |= ENET_RXCFG_ALLMCAST_MASK;
 701        else
 702                val &= ~ENET_RXCFG_ALLMCAST_MASK;
 703
 704        /* no need to set perfect match registers if we catch all
 705         * multicast */
 706        if (val & ENET_RXCFG_ALLMCAST_MASK) {
 707                enet_writel(priv, val, ENET_RXCFG_REG);
 708                return;
 709        }
 710
 711        i = 0;
 712        netdev_for_each_mc_addr(ha, dev) {
 713                u8 *dmi_addr;
 714                u32 tmp;
 715
 716                if (i == 3)
 717                        break;
 718                /* update perfect match registers */
 719                dmi_addr = ha->addr;
 720                tmp = (dmi_addr[2] << 24) | (dmi_addr[3] << 16) |
 721                        (dmi_addr[4] << 8) | dmi_addr[5];
 722                enet_writel(priv, tmp, ENET_PML_REG(i + 1));
 723
 724                tmp = (dmi_addr[0] << 8 | dmi_addr[1]);
 725                tmp |= ENET_PMH_DATAVALID_MASK;
 726                enet_writel(priv, tmp, ENET_PMH_REG(i++ + 1));
 727        }
 728
 729        for (; i < 3; i++) {
 730                enet_writel(priv, 0, ENET_PML_REG(i + 1));
 731                enet_writel(priv, 0, ENET_PMH_REG(i + 1));
 732        }
 733
 734        enet_writel(priv, val, ENET_RXCFG_REG);
 735}
 736
 737/*
 738 * set mac duplex parameters
 739 */
 740static void bcm_enet_set_duplex(struct bcm_enet_priv *priv, int fullduplex)
 741{
 742        u32 val;
 743
 744        val = enet_readl(priv, ENET_TXCTL_REG);
 745        if (fullduplex)
 746                val |= ENET_TXCTL_FD_MASK;
 747        else
 748                val &= ~ENET_TXCTL_FD_MASK;
 749        enet_writel(priv, val, ENET_TXCTL_REG);
 750}
 751
 752/*
 753 * set mac flow control parameters
 754 */
 755static void bcm_enet_set_flow(struct bcm_enet_priv *priv, int rx_en, int tx_en)
 756{
 757        u32 val;
 758
 759        /* rx flow control (pause frame handling) */
 760        val = enet_readl(priv, ENET_RXCFG_REG);
 761        if (rx_en)
 762                val |= ENET_RXCFG_ENFLOW_MASK;
 763        else
 764                val &= ~ENET_RXCFG_ENFLOW_MASK;
 765        enet_writel(priv, val, ENET_RXCFG_REG);
 766
 767        if (!priv->dma_has_sram)
 768                return;
 769
 770        /* tx flow control (pause frame generation) */
 771        val = enet_dma_readl(priv, ENETDMA_CFG_REG);
 772        if (tx_en)
 773                val |= ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan);
 774        else
 775                val &= ~ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan);
 776        enet_dma_writel(priv, val, ENETDMA_CFG_REG);
 777}
 778
 779/*
 780 * link changed callback (from phylib)
 781 */
 782static void bcm_enet_adjust_phy_link(struct net_device *dev)
 783{
 784        struct bcm_enet_priv *priv;
 785        struct phy_device *phydev;
 786        int status_changed;
 787
 788        priv = netdev_priv(dev);
 789        phydev = dev->phydev;
 790        status_changed = 0;
 791
 792        if (priv->old_link != phydev->link) {
 793                status_changed = 1;
 794                priv->old_link = phydev->link;
 795        }
 796
 797        /* reflect duplex change in mac configuration */
 798        if (phydev->link && phydev->duplex != priv->old_duplex) {
 799                bcm_enet_set_duplex(priv,
 800                                    (phydev->duplex == DUPLEX_FULL) ? 1 : 0);
 801                status_changed = 1;
 802                priv->old_duplex = phydev->duplex;
 803        }
 804
 805        /* enable flow control if remote advertise it (trust phylib to
 806         * check that duplex is full */
 807        if (phydev->link && phydev->pause != priv->old_pause) {
 808                int rx_pause_en, tx_pause_en;
 809
 810                if (phydev->pause) {
 811                        /* pause was advertised by lpa and us */
 812                        rx_pause_en = 1;
 813                        tx_pause_en = 1;
 814                } else if (!priv->pause_auto) {
 815                        /* pause setting overridden by user */
 816                        rx_pause_en = priv->pause_rx;
 817                        tx_pause_en = priv->pause_tx;
 818                } else {
 819                        rx_pause_en = 0;
 820                        tx_pause_en = 0;
 821                }
 822
 823                bcm_enet_set_flow(priv, rx_pause_en, tx_pause_en);
 824                status_changed = 1;
 825                priv->old_pause = phydev->pause;
 826        }
 827
 828        if (status_changed) {
 829                pr_info("%s: link %s", dev->name, phydev->link ?
 830                        "UP" : "DOWN");
 831                if (phydev->link)
 832                        pr_cont(" - %d/%s - flow control %s", phydev->speed,
 833                               DUPLEX_FULL == phydev->duplex ? "full" : "half",
 834                               phydev->pause == 1 ? "rx&tx" : "off");
 835
 836                pr_cont("\n");
 837        }
 838}
 839
 840/*
 841 * link changed callback (if phylib is not used)
 842 */
 843static void bcm_enet_adjust_link(struct net_device *dev)
 844{
 845        struct bcm_enet_priv *priv;
 846
 847        priv = netdev_priv(dev);
 848        bcm_enet_set_duplex(priv, priv->force_duplex_full);
 849        bcm_enet_set_flow(priv, priv->pause_rx, priv->pause_tx);
 850        netif_carrier_on(dev);
 851
 852        pr_info("%s: link forced UP - %d/%s - flow control %s/%s\n",
 853                dev->name,
 854                priv->force_speed_100 ? 100 : 10,
 855                priv->force_duplex_full ? "full" : "half",
 856                priv->pause_rx ? "rx" : "off",
 857                priv->pause_tx ? "tx" : "off");
 858}
 859
 860/*
 861 * open callback, allocate dma rings & buffers and start rx operation
 862 */
 863static int bcm_enet_open(struct net_device *dev)
 864{
 865        struct bcm_enet_priv *priv;
 866        struct sockaddr addr;
 867        struct device *kdev;
 868        struct phy_device *phydev;
 869        int i, ret;
 870        unsigned int size;
 871        char phy_id[MII_BUS_ID_SIZE + 3];
 872        void *p;
 873        u32 val;
 874
 875        priv = netdev_priv(dev);
 876        kdev = &priv->pdev->dev;
 877
 878        if (priv->has_phy) {
 879                /* connect to PHY */
 880                snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
 881                         priv->mii_bus->id, priv->phy_id);
 882
 883                phydev = phy_connect(dev, phy_id, bcm_enet_adjust_phy_link,
 884                                     PHY_INTERFACE_MODE_MII);
 885
 886                if (IS_ERR(phydev)) {
 887                        dev_err(kdev, "could not attach to PHY\n");
 888                        return PTR_ERR(phydev);
 889                }
 890
 891                /* mask with MAC supported features */
 892                phy_support_sym_pause(phydev);
 893                phy_set_max_speed(phydev, SPEED_100);
 894                phy_set_sym_pause(phydev, priv->pause_rx, priv->pause_rx,
 895                                  priv->pause_auto);
 896
 897                phy_attached_info(phydev);
 898
 899                priv->old_link = 0;
 900                priv->old_duplex = -1;
 901                priv->old_pause = -1;
 902        } else {
 903                phydev = NULL;
 904        }
 905
 906        /* mask all interrupts and request them */
 907        enet_writel(priv, 0, ENET_IRMASK_REG);
 908        enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
 909        enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
 910
 911        ret = request_irq(dev->irq, bcm_enet_isr_mac, 0, dev->name, dev);
 912        if (ret)
 913                goto out_phy_disconnect;
 914
 915        ret = request_irq(priv->irq_rx, bcm_enet_isr_dma, 0,
 916                          dev->name, dev);
 917        if (ret)
 918                goto out_freeirq;
 919
 920        ret = request_irq(priv->irq_tx, bcm_enet_isr_dma,
 921                          0, dev->name, dev);
 922        if (ret)
 923                goto out_freeirq_rx;
 924
 925        /* initialize perfect match registers */
 926        for (i = 0; i < 4; i++) {
 927                enet_writel(priv, 0, ENET_PML_REG(i));
 928                enet_writel(priv, 0, ENET_PMH_REG(i));
 929        }
 930
 931        /* write device mac address */
 932        memcpy(addr.sa_data, dev->dev_addr, ETH_ALEN);
 933        bcm_enet_set_mac_address(dev, &addr);
 934
 935        /* allocate rx dma ring */
 936        size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
 937        p = dma_zalloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
 938        if (!p) {
 939                ret = -ENOMEM;
 940                goto out_freeirq_tx;
 941        }
 942
 943        priv->rx_desc_alloc_size = size;
 944        priv->rx_desc_cpu = p;
 945
 946        /* allocate tx dma ring */
 947        size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
 948        p = dma_zalloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
 949        if (!p) {
 950                ret = -ENOMEM;
 951                goto out_free_rx_ring;
 952        }
 953
 954        priv->tx_desc_alloc_size = size;
 955        priv->tx_desc_cpu = p;
 956
 957        priv->tx_skb = kcalloc(priv->tx_ring_size, sizeof(struct sk_buff *),
 958                               GFP_KERNEL);
 959        if (!priv->tx_skb) {
 960                ret = -ENOMEM;
 961                goto out_free_tx_ring;
 962        }
 963
 964        priv->tx_desc_count = priv->tx_ring_size;
 965        priv->tx_dirty_desc = 0;
 966        priv->tx_curr_desc = 0;
 967        spin_lock_init(&priv->tx_lock);
 968
 969        /* init & fill rx ring with skbs */
 970        priv->rx_skb = kcalloc(priv->rx_ring_size, sizeof(struct sk_buff *),
 971                               GFP_KERNEL);
 972        if (!priv->rx_skb) {
 973                ret = -ENOMEM;
 974                goto out_free_tx_skb;
 975        }
 976
 977        priv->rx_desc_count = 0;
 978        priv->rx_dirty_desc = 0;
 979        priv->rx_curr_desc = 0;
 980
 981        /* initialize flow control buffer allocation */
 982        if (priv->dma_has_sram)
 983                enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
 984                                ENETDMA_BUFALLOC_REG(priv->rx_chan));
 985        else
 986                enet_dmac_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
 987                                ENETDMAC_BUFALLOC, priv->rx_chan);
 988
 989        if (bcm_enet_refill_rx(dev)) {
 990                dev_err(kdev, "cannot allocate rx skb queue\n");
 991                ret = -ENOMEM;
 992                goto out;
 993        }
 994
 995        /* write rx & tx ring addresses */
 996        if (priv->dma_has_sram) {
 997                enet_dmas_writel(priv, priv->rx_desc_dma,
 998                                 ENETDMAS_RSTART_REG, priv->rx_chan);
 999                enet_dmas_writel(priv, priv->tx_desc_dma,
1000                         ENETDMAS_RSTART_REG, priv->tx_chan);
1001        } else {
1002                enet_dmac_writel(priv, priv->rx_desc_dma,
1003                                ENETDMAC_RSTART, priv->rx_chan);
1004                enet_dmac_writel(priv, priv->tx_desc_dma,
1005                                ENETDMAC_RSTART, priv->tx_chan);
1006        }
1007
1008        /* clear remaining state ram for rx & tx channel */
1009        if (priv->dma_has_sram) {
1010                enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->rx_chan);
1011                enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->tx_chan);
1012                enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->rx_chan);
1013                enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->tx_chan);
1014                enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->rx_chan);
1015                enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->tx_chan);
1016        } else {
1017                enet_dmac_writel(priv, 0, ENETDMAC_FC, priv->rx_chan);
1018                enet_dmac_writel(priv, 0, ENETDMAC_FC, priv->tx_chan);
1019        }
1020
1021        /* set max rx/tx length */
1022        enet_writel(priv, priv->hw_mtu, ENET_RXMAXLEN_REG);
1023        enet_writel(priv, priv->hw_mtu, ENET_TXMAXLEN_REG);
1024
1025        /* set dma maximum burst len */
1026        enet_dmac_writel(priv, priv->dma_maxburst,
1027                         ENETDMAC_MAXBURST, priv->rx_chan);
1028        enet_dmac_writel(priv, priv->dma_maxburst,
1029                         ENETDMAC_MAXBURST, priv->tx_chan);
1030
1031        /* set correct transmit fifo watermark */
1032        enet_writel(priv, BCMENET_TX_FIFO_TRESH, ENET_TXWMARK_REG);
1033
1034        /* set flow control low/high threshold to 1/3 / 2/3 */
1035        if (priv->dma_has_sram) {
1036                val = priv->rx_ring_size / 3;
1037                enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan));
1038                val = (priv->rx_ring_size * 2) / 3;
1039                enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan));
1040        } else {
1041                enet_dmac_writel(priv, 5, ENETDMAC_FC, priv->rx_chan);
1042                enet_dmac_writel(priv, priv->rx_ring_size, ENETDMAC_LEN, priv->rx_chan);
1043                enet_dmac_writel(priv, priv->tx_ring_size, ENETDMAC_LEN, priv->tx_chan);
1044        }
1045
1046        /* all set, enable mac and interrupts, start dma engine and
1047         * kick rx dma channel */
1048        wmb();
1049        val = enet_readl(priv, ENET_CTL_REG);
1050        val |= ENET_CTL_ENABLE_MASK;
1051        enet_writel(priv, val, ENET_CTL_REG);
1052        if (priv->dma_has_sram)
1053                enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
1054        enet_dmac_writel(priv, priv->dma_chan_en_mask,
1055                         ENETDMAC_CHANCFG, priv->rx_chan);
1056
1057        /* watch "mib counters about to overflow" interrupt */
1058        enet_writel(priv, ENET_IR_MIB, ENET_IR_REG);
1059        enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG);
1060
1061        /* watch "packet transferred" interrupt in rx and tx */
1062        enet_dmac_writel(priv, priv->dma_chan_int_mask,
1063                         ENETDMAC_IR, priv->rx_chan);
1064        enet_dmac_writel(priv, priv->dma_chan_int_mask,
1065                         ENETDMAC_IR, priv->tx_chan);
1066
1067        /* make sure we enable napi before rx interrupt  */
1068        napi_enable(&priv->napi);
1069
1070        enet_dmac_writel(priv, priv->dma_chan_int_mask,
1071                         ENETDMAC_IRMASK, priv->rx_chan);
1072        enet_dmac_writel(priv, priv->dma_chan_int_mask,
1073                         ENETDMAC_IRMASK, priv->tx_chan);
1074
1075        if (phydev)
1076                phy_start(phydev);
1077        else
1078                bcm_enet_adjust_link(dev);
1079
1080        netif_start_queue(dev);
1081        return 0;
1082
1083out:
1084        for (i = 0; i < priv->rx_ring_size; i++) {
1085                struct bcm_enet_desc *desc;
1086
1087                if (!priv->rx_skb[i])
1088                        continue;
1089
1090                desc = &priv->rx_desc_cpu[i];
1091                dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
1092                                 DMA_FROM_DEVICE);
1093                kfree_skb(priv->rx_skb[i]);
1094        }
1095        kfree(priv->rx_skb);
1096
1097out_free_tx_skb:
1098        kfree(priv->tx_skb);
1099
1100out_free_tx_ring:
1101        dma_free_coherent(kdev, priv->tx_desc_alloc_size,
1102                          priv->tx_desc_cpu, priv->tx_desc_dma);
1103
1104out_free_rx_ring:
1105        dma_free_coherent(kdev, priv->rx_desc_alloc_size,
1106                          priv->rx_desc_cpu, priv->rx_desc_dma);
1107
1108out_freeirq_tx:
1109        free_irq(priv->irq_tx, dev);
1110
1111out_freeirq_rx:
1112        free_irq(priv->irq_rx, dev);
1113
1114out_freeirq:
1115        free_irq(dev->irq, dev);
1116
1117out_phy_disconnect:
1118        if (phydev)
1119                phy_disconnect(phydev);
1120
1121        return ret;
1122}
1123
1124/*
1125 * disable mac
1126 */
1127static void bcm_enet_disable_mac(struct bcm_enet_priv *priv)
1128{
1129        int limit;
1130        u32 val;
1131
1132        val = enet_readl(priv, ENET_CTL_REG);
1133        val |= ENET_CTL_DISABLE_MASK;
1134        enet_writel(priv, val, ENET_CTL_REG);
1135
1136        limit = 1000;
1137        do {
1138                u32 val;
1139
1140                val = enet_readl(priv, ENET_CTL_REG);
1141                if (!(val & ENET_CTL_DISABLE_MASK))
1142                        break;
1143                udelay(1);
1144        } while (limit--);
1145}
1146
1147/*
1148 * disable dma in given channel
1149 */
1150static void bcm_enet_disable_dma(struct bcm_enet_priv *priv, int chan)
1151{
1152        int limit;
1153
1154        enet_dmac_writel(priv, 0, ENETDMAC_CHANCFG, chan);
1155
1156        limit = 1000;
1157        do {
1158                u32 val;
1159
1160                val = enet_dmac_readl(priv, ENETDMAC_CHANCFG, chan);
1161                if (!(val & ENETDMAC_CHANCFG_EN_MASK))
1162                        break;
1163                udelay(1);
1164        } while (limit--);
1165}
1166
1167/*
1168 * stop callback
1169 */
1170static int bcm_enet_stop(struct net_device *dev)
1171{
1172        struct bcm_enet_priv *priv;
1173        struct device *kdev;
1174        int i;
1175
1176        priv = netdev_priv(dev);
1177        kdev = &priv->pdev->dev;
1178
1179        netif_stop_queue(dev);
1180        napi_disable(&priv->napi);
1181        if (priv->has_phy)
1182                phy_stop(dev->phydev);
1183        del_timer_sync(&priv->rx_timeout);
1184
1185        /* mask all interrupts */
1186        enet_writel(priv, 0, ENET_IRMASK_REG);
1187        enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
1188        enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
1189
1190        /* make sure no mib update is scheduled */
1191        cancel_work_sync(&priv->mib_update_task);
1192
1193        /* disable dma & mac */
1194        bcm_enet_disable_dma(priv, priv->tx_chan);
1195        bcm_enet_disable_dma(priv, priv->rx_chan);
1196        bcm_enet_disable_mac(priv);
1197
1198        /* force reclaim of all tx buffers */
1199        bcm_enet_tx_reclaim(dev, 1);
1200
1201        /* free the rx skb ring */
1202        for (i = 0; i < priv->rx_ring_size; i++) {
1203                struct bcm_enet_desc *desc;
1204
1205                if (!priv->rx_skb[i])
1206                        continue;
1207
1208                desc = &priv->rx_desc_cpu[i];
1209                dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
1210                                 DMA_FROM_DEVICE);
1211                kfree_skb(priv->rx_skb[i]);
1212        }
1213
1214        /* free remaining allocated memory */
1215        kfree(priv->rx_skb);
1216        kfree(priv->tx_skb);
1217        dma_free_coherent(kdev, priv->rx_desc_alloc_size,
1218                          priv->rx_desc_cpu, priv->rx_desc_dma);
1219        dma_free_coherent(kdev, priv->tx_desc_alloc_size,
1220                          priv->tx_desc_cpu, priv->tx_desc_dma);
1221        free_irq(priv->irq_tx, dev);
1222        free_irq(priv->irq_rx, dev);
1223        free_irq(dev->irq, dev);
1224
1225        /* release phy */
1226        if (priv->has_phy)
1227                phy_disconnect(dev->phydev);
1228
1229        return 0;
1230}
1231
1232/*
1233 * ethtool callbacks
1234 */
1235struct bcm_enet_stats {
1236        char stat_string[ETH_GSTRING_LEN];
1237        int sizeof_stat;
1238        int stat_offset;
1239        int mib_reg;
1240};
1241
1242#define GEN_STAT(m) sizeof(((struct bcm_enet_priv *)0)->m),             \
1243                     offsetof(struct bcm_enet_priv, m)
1244#define DEV_STAT(m) sizeof(((struct net_device_stats *)0)->m),          \
1245                     offsetof(struct net_device_stats, m)
1246
1247static const struct bcm_enet_stats bcm_enet_gstrings_stats[] = {
1248        { "rx_packets", DEV_STAT(rx_packets), -1 },
1249        { "tx_packets", DEV_STAT(tx_packets), -1 },
1250        { "rx_bytes", DEV_STAT(rx_bytes), -1 },
1251        { "tx_bytes", DEV_STAT(tx_bytes), -1 },
1252        { "rx_errors", DEV_STAT(rx_errors), -1 },
1253        { "tx_errors", DEV_STAT(tx_errors), -1 },
1254        { "rx_dropped", DEV_STAT(rx_dropped), -1 },
1255        { "tx_dropped", DEV_STAT(tx_dropped), -1 },
1256
1257        { "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETH_MIB_RX_GD_OCTETS},
1258        { "rx_good_pkts", GEN_STAT(mib.rx_gd_pkts), ETH_MIB_RX_GD_PKTS },
1259        { "rx_broadcast", GEN_STAT(mib.rx_brdcast), ETH_MIB_RX_BRDCAST },
1260        { "rx_multicast", GEN_STAT(mib.rx_mult), ETH_MIB_RX_MULT },
1261        { "rx_64_octets", GEN_STAT(mib.rx_64), ETH_MIB_RX_64 },
1262        { "rx_65_127_oct", GEN_STAT(mib.rx_65_127), ETH_MIB_RX_65_127 },
1263        { "rx_128_255_oct", GEN_STAT(mib.rx_128_255), ETH_MIB_RX_128_255 },
1264        { "rx_256_511_oct", GEN_STAT(mib.rx_256_511), ETH_MIB_RX_256_511 },
1265        { "rx_512_1023_oct", GEN_STAT(mib.rx_512_1023), ETH_MIB_RX_512_1023 },
1266        { "rx_1024_max_oct", GEN_STAT(mib.rx_1024_max), ETH_MIB_RX_1024_MAX },
1267        { "rx_jabber", GEN_STAT(mib.rx_jab), ETH_MIB_RX_JAB },
1268        { "rx_oversize", GEN_STAT(mib.rx_ovr), ETH_MIB_RX_OVR },
1269        { "rx_fragment", GEN_STAT(mib.rx_frag), ETH_MIB_RX_FRAG },
1270        { "rx_dropped", GEN_STAT(mib.rx_drop), ETH_MIB_RX_DROP },
1271        { "rx_crc_align", GEN_STAT(mib.rx_crc_align), ETH_MIB_RX_CRC_ALIGN },
1272        { "rx_undersize", GEN_STAT(mib.rx_und), ETH_MIB_RX_UND },
1273        { "rx_crc", GEN_STAT(mib.rx_crc), ETH_MIB_RX_CRC },
1274        { "rx_align", GEN_STAT(mib.rx_align), ETH_MIB_RX_ALIGN },
1275        { "rx_symbol_error", GEN_STAT(mib.rx_sym), ETH_MIB_RX_SYM },
1276        { "rx_pause", GEN_STAT(mib.rx_pause), ETH_MIB_RX_PAUSE },
1277        { "rx_control", GEN_STAT(mib.rx_cntrl), ETH_MIB_RX_CNTRL },
1278
1279        { "tx_good_octets", GEN_STAT(mib.tx_gd_octets), ETH_MIB_TX_GD_OCTETS },
1280        { "tx_good_pkts", GEN_STAT(mib.tx_gd_pkts), ETH_MIB_TX_GD_PKTS },
1281        { "tx_broadcast", GEN_STAT(mib.tx_brdcast), ETH_MIB_TX_BRDCAST },
1282        { "tx_multicast", GEN_STAT(mib.tx_mult), ETH_MIB_TX_MULT },
1283        { "tx_64_oct", GEN_STAT(mib.tx_64), ETH_MIB_TX_64 },
1284        { "tx_65_127_oct", GEN_STAT(mib.tx_65_127), ETH_MIB_TX_65_127 },
1285        { "tx_128_255_oct", GEN_STAT(mib.tx_128_255), ETH_MIB_TX_128_255 },
1286        { "tx_256_511_oct", GEN_STAT(mib.tx_256_511), ETH_MIB_TX_256_511 },
1287        { "tx_512_1023_oct", GEN_STAT(mib.tx_512_1023), ETH_MIB_TX_512_1023},
1288        { "tx_1024_max_oct", GEN_STAT(mib.tx_1024_max), ETH_MIB_TX_1024_MAX },
1289        { "tx_jabber", GEN_STAT(mib.tx_jab), ETH_MIB_TX_JAB },
1290        { "tx_oversize", GEN_STAT(mib.tx_ovr), ETH_MIB_TX_OVR },
1291        { "tx_fragment", GEN_STAT(mib.tx_frag), ETH_MIB_TX_FRAG },
1292        { "tx_underrun", GEN_STAT(mib.tx_underrun), ETH_MIB_TX_UNDERRUN },
1293        { "tx_collisions", GEN_STAT(mib.tx_col), ETH_MIB_TX_COL },
1294        { "tx_single_collision", GEN_STAT(mib.tx_1_col), ETH_MIB_TX_1_COL },
1295        { "tx_multiple_collision", GEN_STAT(mib.tx_m_col), ETH_MIB_TX_M_COL },
1296        { "tx_excess_collision", GEN_STAT(mib.tx_ex_col), ETH_MIB_TX_EX_COL },
1297        { "tx_late_collision", GEN_STAT(mib.tx_late), ETH_MIB_TX_LATE },
1298        { "tx_deferred", GEN_STAT(mib.tx_def), ETH_MIB_TX_DEF },
1299        { "tx_carrier_sense", GEN_STAT(mib.tx_crs), ETH_MIB_TX_CRS },
1300        { "tx_pause", GEN_STAT(mib.tx_pause), ETH_MIB_TX_PAUSE },
1301
1302};
1303
1304#define BCM_ENET_STATS_LEN      ARRAY_SIZE(bcm_enet_gstrings_stats)
1305
1306static const u32 unused_mib_regs[] = {
1307        ETH_MIB_TX_ALL_OCTETS,
1308        ETH_MIB_TX_ALL_PKTS,
1309        ETH_MIB_RX_ALL_OCTETS,
1310        ETH_MIB_RX_ALL_PKTS,
1311};
1312
1313
1314static void bcm_enet_get_drvinfo(struct net_device *netdev,
1315                                 struct ethtool_drvinfo *drvinfo)
1316{
1317        strlcpy(drvinfo->driver, bcm_enet_driver_name, sizeof(drvinfo->driver));
1318        strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
1319        strlcpy(drvinfo->bus_info, "bcm63xx", sizeof(drvinfo->bus_info));
1320}
1321
1322static int bcm_enet_get_sset_count(struct net_device *netdev,
1323                                        int string_set)
1324{
1325        switch (string_set) {
1326        case ETH_SS_STATS:
1327                return BCM_ENET_STATS_LEN;
1328        default:
1329                return -EINVAL;
1330        }
1331}
1332
1333static void bcm_enet_get_strings(struct net_device *netdev,
1334                                 u32 stringset, u8 *data)
1335{
1336        int i;
1337
1338        switch (stringset) {
1339        case ETH_SS_STATS:
1340                for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
1341                        memcpy(data + i * ETH_GSTRING_LEN,
1342                               bcm_enet_gstrings_stats[i].stat_string,
1343                               ETH_GSTRING_LEN);
1344                }
1345                break;
1346        }
1347}
1348
1349static void update_mib_counters(struct bcm_enet_priv *priv)
1350{
1351        int i;
1352
1353        for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
1354                const struct bcm_enet_stats *s;
1355                u32 val;
1356                char *p;
1357
1358                s = &bcm_enet_gstrings_stats[i];
1359                if (s->mib_reg == -1)
1360                        continue;
1361
1362                val = enet_readl(priv, ENET_MIB_REG(s->mib_reg));
1363                p = (char *)priv + s->stat_offset;
1364
1365                if (s->sizeof_stat == sizeof(u64))
1366                        *(u64 *)p += val;
1367                else
1368                        *(u32 *)p += val;
1369        }
1370
1371        /* also empty unused mib counters to make sure mib counter
1372         * overflow interrupt is cleared */
1373        for (i = 0; i < ARRAY_SIZE(unused_mib_regs); i++)
1374                (void)enet_readl(priv, ENET_MIB_REG(unused_mib_regs[i]));
1375}
1376
1377static void bcm_enet_update_mib_counters_defer(struct work_struct *t)
1378{
1379        struct bcm_enet_priv *priv;
1380
1381        priv = container_of(t, struct bcm_enet_priv, mib_update_task);
1382        mutex_lock(&priv->mib_update_lock);
1383        update_mib_counters(priv);
1384        mutex_unlock(&priv->mib_update_lock);
1385
1386        /* reenable mib interrupt */
1387        if (netif_running(priv->net_dev))
1388                enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG);
1389}
1390
1391static void bcm_enet_get_ethtool_stats(struct net_device *netdev,
1392                                       struct ethtool_stats *stats,
1393                                       u64 *data)
1394{
1395        struct bcm_enet_priv *priv;
1396        int i;
1397
1398        priv = netdev_priv(netdev);
1399
1400        mutex_lock(&priv->mib_update_lock);
1401        update_mib_counters(priv);
1402
1403        for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
1404                const struct bcm_enet_stats *s;
1405                char *p;
1406
1407                s = &bcm_enet_gstrings_stats[i];
1408                if (s->mib_reg == -1)
1409                        p = (char *)&netdev->stats;
1410                else
1411                        p = (char *)priv;
1412                p += s->stat_offset;
1413                data[i] = (s->sizeof_stat == sizeof(u64)) ?
1414                        *(u64 *)p : *(u32 *)p;
1415        }
1416        mutex_unlock(&priv->mib_update_lock);
1417}
1418
1419static int bcm_enet_nway_reset(struct net_device *dev)
1420{
1421        struct bcm_enet_priv *priv;
1422
1423        priv = netdev_priv(dev);
1424        if (priv->has_phy)
1425                return phy_ethtool_nway_reset(dev);
1426
1427        return -EOPNOTSUPP;
1428}
1429
1430static int bcm_enet_get_link_ksettings(struct net_device *dev,
1431                                       struct ethtool_link_ksettings *cmd)
1432{
1433        struct bcm_enet_priv *priv;
1434        u32 supported, advertising;
1435
1436        priv = netdev_priv(dev);
1437
1438        if (priv->has_phy) {
1439                if (!dev->phydev)
1440                        return -ENODEV;
1441
1442                phy_ethtool_ksettings_get(dev->phydev, cmd);
1443
1444                return 0;
1445        } else {
1446                cmd->base.autoneg = 0;
1447                cmd->base.speed = (priv->force_speed_100) ?
1448                        SPEED_100 : SPEED_10;
1449                cmd->base.duplex = (priv->force_duplex_full) ?
1450                        DUPLEX_FULL : DUPLEX_HALF;
1451                supported = ADVERTISED_10baseT_Half |
1452                        ADVERTISED_10baseT_Full |
1453                        ADVERTISED_100baseT_Half |
1454                        ADVERTISED_100baseT_Full;
1455                advertising = 0;
1456                ethtool_convert_legacy_u32_to_link_mode(
1457                        cmd->link_modes.supported, supported);
1458                ethtool_convert_legacy_u32_to_link_mode(
1459                        cmd->link_modes.advertising, advertising);
1460                cmd->base.port = PORT_MII;
1461        }
1462        return 0;
1463}
1464
1465static int bcm_enet_set_link_ksettings(struct net_device *dev,
1466                                       const struct ethtool_link_ksettings *cmd)
1467{
1468        struct bcm_enet_priv *priv;
1469
1470        priv = netdev_priv(dev);
1471        if (priv->has_phy) {
1472                if (!dev->phydev)
1473                        return -ENODEV;
1474                return phy_ethtool_ksettings_set(dev->phydev, cmd);
1475        } else {
1476
1477                if (cmd->base.autoneg ||
1478                    (cmd->base.speed != SPEED_100 &&
1479                     cmd->base.speed != SPEED_10) ||
1480                    cmd->base.port != PORT_MII)
1481                        return -EINVAL;
1482
1483                priv->force_speed_100 =
1484                        (cmd->base.speed == SPEED_100) ? 1 : 0;
1485                priv->force_duplex_full =
1486                        (cmd->base.duplex == DUPLEX_FULL) ? 1 : 0;
1487
1488                if (netif_running(dev))
1489                        bcm_enet_adjust_link(dev);
1490                return 0;
1491        }
1492}
1493
1494static void bcm_enet_get_ringparam(struct net_device *dev,
1495                                   struct ethtool_ringparam *ering)
1496{
1497        struct bcm_enet_priv *priv;
1498
1499        priv = netdev_priv(dev);
1500
1501        /* rx/tx ring is actually only limited by memory */
1502        ering->rx_max_pending = 8192;
1503        ering->tx_max_pending = 8192;
1504        ering->rx_pending = priv->rx_ring_size;
1505        ering->tx_pending = priv->tx_ring_size;
1506}
1507
1508static int bcm_enet_set_ringparam(struct net_device *dev,
1509                                  struct ethtool_ringparam *ering)
1510{
1511        struct bcm_enet_priv *priv;
1512        int was_running;
1513
1514        priv = netdev_priv(dev);
1515
1516        was_running = 0;
1517        if (netif_running(dev)) {
1518                bcm_enet_stop(dev);
1519                was_running = 1;
1520        }
1521
1522        priv->rx_ring_size = ering->rx_pending;
1523        priv->tx_ring_size = ering->tx_pending;
1524
1525        if (was_running) {
1526                int err;
1527
1528                err = bcm_enet_open(dev);
1529                if (err)
1530                        dev_close(dev);
1531                else
1532                        bcm_enet_set_multicast_list(dev);
1533        }
1534        return 0;
1535}
1536
1537static void bcm_enet_get_pauseparam(struct net_device *dev,
1538                                    struct ethtool_pauseparam *ecmd)
1539{
1540        struct bcm_enet_priv *priv;
1541
1542        priv = netdev_priv(dev);
1543        ecmd->autoneg = priv->pause_auto;
1544        ecmd->rx_pause = priv->pause_rx;
1545        ecmd->tx_pause = priv->pause_tx;
1546}
1547
1548static int bcm_enet_set_pauseparam(struct net_device *dev,
1549                                   struct ethtool_pauseparam *ecmd)
1550{
1551        struct bcm_enet_priv *priv;
1552
1553        priv = netdev_priv(dev);
1554
1555        if (priv->has_phy) {
1556                if (ecmd->autoneg && (ecmd->rx_pause != ecmd->tx_pause)) {
1557                        /* asymetric pause mode not supported,
1558                         * actually possible but integrated PHY has RO
1559                         * asym_pause bit */
1560                        return -EINVAL;
1561                }
1562        } else {
1563                /* no pause autoneg on direct mii connection */
1564                if (ecmd->autoneg)
1565                        return -EINVAL;
1566        }
1567
1568        priv->pause_auto = ecmd->autoneg;
1569        priv->pause_rx = ecmd->rx_pause;
1570        priv->pause_tx = ecmd->tx_pause;
1571
1572        return 0;
1573}
1574
1575static const struct ethtool_ops bcm_enet_ethtool_ops = {
1576        .get_strings            = bcm_enet_get_strings,
1577        .get_sset_count         = bcm_enet_get_sset_count,
1578        .get_ethtool_stats      = bcm_enet_get_ethtool_stats,
1579        .nway_reset             = bcm_enet_nway_reset,
1580        .get_drvinfo            = bcm_enet_get_drvinfo,
1581        .get_link               = ethtool_op_get_link,
1582        .get_ringparam          = bcm_enet_get_ringparam,
1583        .set_ringparam          = bcm_enet_set_ringparam,
1584        .get_pauseparam         = bcm_enet_get_pauseparam,
1585        .set_pauseparam         = bcm_enet_set_pauseparam,
1586        .get_link_ksettings     = bcm_enet_get_link_ksettings,
1587        .set_link_ksettings     = bcm_enet_set_link_ksettings,
1588};
1589
1590static int bcm_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1591{
1592        struct bcm_enet_priv *priv;
1593
1594        priv = netdev_priv(dev);
1595        if (priv->has_phy) {
1596                if (!dev->phydev)
1597                        return -ENODEV;
1598                return phy_mii_ioctl(dev->phydev, rq, cmd);
1599        } else {
1600                struct mii_if_info mii;
1601
1602                mii.dev = dev;
1603                mii.mdio_read = bcm_enet_mdio_read_mii;
1604                mii.mdio_write = bcm_enet_mdio_write_mii;
1605                mii.phy_id = 0;
1606                mii.phy_id_mask = 0x3f;
1607                mii.reg_num_mask = 0x1f;
1608                return generic_mii_ioctl(&mii, if_mii(rq), cmd, NULL);
1609        }
1610}
1611
1612/*
1613 * adjust mtu, can't be called while device is running
1614 */
1615static int bcm_enet_change_mtu(struct net_device *dev, int new_mtu)
1616{
1617        struct bcm_enet_priv *priv = netdev_priv(dev);
1618        int actual_mtu = new_mtu;
1619
1620        if (netif_running(dev))
1621                return -EBUSY;
1622
1623        /* add ethernet header + vlan tag size */
1624        actual_mtu += VLAN_ETH_HLEN;
1625
1626        /*
1627         * setup maximum size before we get overflow mark in
1628         * descriptor, note that this will not prevent reception of
1629         * big frames, they will be split into multiple buffers
1630         * anyway
1631         */
1632        priv->hw_mtu = actual_mtu;
1633
1634        /*
1635         * align rx buffer size to dma burst len, account FCS since
1636         * it's appended
1637         */
1638        priv->rx_skb_size = ALIGN(actual_mtu + ETH_FCS_LEN,
1639                                  priv->dma_maxburst * 4);
1640
1641        dev->mtu = new_mtu;
1642        return 0;
1643}
1644
1645/*
1646 * preinit hardware to allow mii operation while device is down
1647 */
1648static void bcm_enet_hw_preinit(struct bcm_enet_priv *priv)
1649{
1650        u32 val;
1651        int limit;
1652
1653        /* make sure mac is disabled */
1654        bcm_enet_disable_mac(priv);
1655
1656        /* soft reset mac */
1657        val = ENET_CTL_SRESET_MASK;
1658        enet_writel(priv, val, ENET_CTL_REG);
1659        wmb();
1660
1661        limit = 1000;
1662        do {
1663                val = enet_readl(priv, ENET_CTL_REG);
1664                if (!(val & ENET_CTL_SRESET_MASK))
1665                        break;
1666                udelay(1);
1667        } while (limit--);
1668
1669        /* select correct mii interface */
1670        val = enet_readl(priv, ENET_CTL_REG);
1671        if (priv->use_external_mii)
1672                val |= ENET_CTL_EPHYSEL_MASK;
1673        else
1674                val &= ~ENET_CTL_EPHYSEL_MASK;
1675        enet_writel(priv, val, ENET_CTL_REG);
1676
1677        /* turn on mdc clock */
1678        enet_writel(priv, (0x1f << ENET_MIISC_MDCFREQDIV_SHIFT) |
1679                    ENET_MIISC_PREAMBLEEN_MASK, ENET_MIISC_REG);
1680
1681        /* set mib counters to self-clear when read */
1682        val = enet_readl(priv, ENET_MIBCTL_REG);
1683        val |= ENET_MIBCTL_RDCLEAR_MASK;
1684        enet_writel(priv, val, ENET_MIBCTL_REG);
1685}
1686
1687static const struct net_device_ops bcm_enet_ops = {
1688        .ndo_open               = bcm_enet_open,
1689        .ndo_stop               = bcm_enet_stop,
1690        .ndo_start_xmit         = bcm_enet_start_xmit,
1691        .ndo_set_mac_address    = bcm_enet_set_mac_address,
1692        .ndo_set_rx_mode        = bcm_enet_set_multicast_list,
1693        .ndo_do_ioctl           = bcm_enet_ioctl,
1694        .ndo_change_mtu         = bcm_enet_change_mtu,
1695};
1696
1697/*
1698 * allocate netdevice, request register memory and register device.
1699 */
1700static int bcm_enet_probe(struct platform_device *pdev)
1701{
1702        struct bcm_enet_priv *priv;
1703        struct net_device *dev;
1704        struct bcm63xx_enet_platform_data *pd;
1705        struct resource *res_mem, *res_irq, *res_irq_rx, *res_irq_tx;
1706        struct mii_bus *bus;
1707        int i, ret;
1708
1709        if (!bcm_enet_shared_base[0])
1710                return -EPROBE_DEFER;
1711
1712        res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1713        res_irq_rx = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
1714        res_irq_tx = platform_get_resource(pdev, IORESOURCE_IRQ, 2);
1715        if (!res_irq || !res_irq_rx || !res_irq_tx)
1716                return -ENODEV;
1717
1718        ret = 0;
1719        dev = alloc_etherdev(sizeof(*priv));
1720        if (!dev)
1721                return -ENOMEM;
1722        priv = netdev_priv(dev);
1723
1724        priv->enet_is_sw = false;
1725        priv->dma_maxburst = BCMENET_DMA_MAXBURST;
1726
1727        ret = bcm_enet_change_mtu(dev, dev->mtu);
1728        if (ret)
1729                goto out;
1730
1731        res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1732        priv->base = devm_ioremap_resource(&pdev->dev, res_mem);
1733        if (IS_ERR(priv->base)) {
1734                ret = PTR_ERR(priv->base);
1735                goto out;
1736        }
1737
1738        dev->irq = priv->irq = res_irq->start;
1739        priv->irq_rx = res_irq_rx->start;
1740        priv->irq_tx = res_irq_tx->start;
1741
1742        priv->mac_clk = devm_clk_get(&pdev->dev, "enet");
1743        if (IS_ERR(priv->mac_clk)) {
1744                ret = PTR_ERR(priv->mac_clk);
1745                goto out;
1746        }
1747        ret = clk_prepare_enable(priv->mac_clk);
1748        if (ret)
1749                goto out;
1750
1751        /* initialize default and fetch platform data */
1752        priv->rx_ring_size = BCMENET_DEF_RX_DESC;
1753        priv->tx_ring_size = BCMENET_DEF_TX_DESC;
1754
1755        pd = dev_get_platdata(&pdev->dev);
1756        if (pd) {
1757                memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
1758                priv->has_phy = pd->has_phy;
1759                priv->phy_id = pd->phy_id;
1760                priv->has_phy_interrupt = pd->has_phy_interrupt;
1761                priv->phy_interrupt = pd->phy_interrupt;
1762                priv->use_external_mii = !pd->use_internal_phy;
1763                priv->pause_auto = pd->pause_auto;
1764                priv->pause_rx = pd->pause_rx;
1765                priv->pause_tx = pd->pause_tx;
1766                priv->force_duplex_full = pd->force_duplex_full;
1767                priv->force_speed_100 = pd->force_speed_100;
1768                priv->dma_chan_en_mask = pd->dma_chan_en_mask;
1769                priv->dma_chan_int_mask = pd->dma_chan_int_mask;
1770                priv->dma_chan_width = pd->dma_chan_width;
1771                priv->dma_has_sram = pd->dma_has_sram;
1772                priv->dma_desc_shift = pd->dma_desc_shift;
1773                priv->rx_chan = pd->rx_chan;
1774                priv->tx_chan = pd->tx_chan;
1775        }
1776
1777        if (priv->has_phy && !priv->use_external_mii) {
1778                /* using internal PHY, enable clock */
1779                priv->phy_clk = devm_clk_get(&pdev->dev, "ephy");
1780                if (IS_ERR(priv->phy_clk)) {
1781                        ret = PTR_ERR(priv->phy_clk);
1782                        priv->phy_clk = NULL;
1783                        goto out_disable_clk_mac;
1784                }
1785                ret = clk_prepare_enable(priv->phy_clk);
1786                if (ret)
1787                        goto out_disable_clk_mac;
1788        }
1789
1790        /* do minimal hardware init to be able to probe mii bus */
1791        bcm_enet_hw_preinit(priv);
1792
1793        /* MII bus registration */
1794        if (priv->has_phy) {
1795
1796                priv->mii_bus = mdiobus_alloc();
1797                if (!priv->mii_bus) {
1798                        ret = -ENOMEM;
1799                        goto out_uninit_hw;
1800                }
1801
1802                bus = priv->mii_bus;
1803                bus->name = "bcm63xx_enet MII bus";
1804                bus->parent = &pdev->dev;
1805                bus->priv = priv;
1806                bus->read = bcm_enet_mdio_read_phylib;
1807                bus->write = bcm_enet_mdio_write_phylib;
1808                sprintf(bus->id, "%s-%d", pdev->name, pdev->id);
1809
1810                /* only probe bus where we think the PHY is, because
1811                 * the mdio read operation return 0 instead of 0xffff
1812                 * if a slave is not present on hw */
1813                bus->phy_mask = ~(1 << priv->phy_id);
1814
1815                if (priv->has_phy_interrupt)
1816                        bus->irq[priv->phy_id] = priv->phy_interrupt;
1817
1818                ret = mdiobus_register(bus);
1819                if (ret) {
1820                        dev_err(&pdev->dev, "unable to register mdio bus\n");
1821                        goto out_free_mdio;
1822                }
1823        } else {
1824
1825                /* run platform code to initialize PHY device */
1826                if (pd && pd->mii_config &&
1827                    pd->mii_config(dev, 1, bcm_enet_mdio_read_mii,
1828                                   bcm_enet_mdio_write_mii)) {
1829                        dev_err(&pdev->dev, "unable to configure mdio bus\n");
1830                        goto out_uninit_hw;
1831                }
1832        }
1833
1834        spin_lock_init(&priv->rx_lock);
1835
1836        /* init rx timeout (used for oom) */
1837        timer_setup(&priv->rx_timeout, bcm_enet_refill_rx_timer, 0);
1838
1839        /* init the mib update lock&work */
1840        mutex_init(&priv->mib_update_lock);
1841        INIT_WORK(&priv->mib_update_task, bcm_enet_update_mib_counters_defer);
1842
1843        /* zero mib counters */
1844        for (i = 0; i < ENET_MIB_REG_COUNT; i++)
1845                enet_writel(priv, 0, ENET_MIB_REG(i));
1846
1847        /* register netdevice */
1848        dev->netdev_ops = &bcm_enet_ops;
1849        netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16);
1850
1851        dev->ethtool_ops = &bcm_enet_ethtool_ops;
1852        /* MTU range: 46 - 2028 */
1853        dev->min_mtu = ETH_ZLEN - ETH_HLEN;
1854        dev->max_mtu = BCMENET_MAX_MTU - VLAN_ETH_HLEN;
1855        SET_NETDEV_DEV(dev, &pdev->dev);
1856
1857        ret = register_netdev(dev);
1858        if (ret)
1859                goto out_unregister_mdio;
1860
1861        netif_carrier_off(dev);
1862        platform_set_drvdata(pdev, dev);
1863        priv->pdev = pdev;
1864        priv->net_dev = dev;
1865
1866        return 0;
1867
1868out_unregister_mdio:
1869        if (priv->mii_bus)
1870                mdiobus_unregister(priv->mii_bus);
1871
1872out_free_mdio:
1873        if (priv->mii_bus)
1874                mdiobus_free(priv->mii_bus);
1875
1876out_uninit_hw:
1877        /* turn off mdc clock */
1878        enet_writel(priv, 0, ENET_MIISC_REG);
1879        clk_disable_unprepare(priv->phy_clk);
1880
1881out_disable_clk_mac:
1882        clk_disable_unprepare(priv->mac_clk);
1883out:
1884        free_netdev(dev);
1885        return ret;
1886}
1887
1888
1889/*
1890 * exit func, stops hardware and unregisters netdevice
1891 */
1892static int bcm_enet_remove(struct platform_device *pdev)
1893{
1894        struct bcm_enet_priv *priv;
1895        struct net_device *dev;
1896
1897        /* stop netdevice */
1898        dev = platform_get_drvdata(pdev);
1899        priv = netdev_priv(dev);
1900        unregister_netdev(dev);
1901
1902        /* turn off mdc clock */
1903        enet_writel(priv, 0, ENET_MIISC_REG);
1904
1905        if (priv->has_phy) {
1906                mdiobus_unregister(priv->mii_bus);
1907                mdiobus_free(priv->mii_bus);
1908        } else {
1909                struct bcm63xx_enet_platform_data *pd;
1910
1911                pd = dev_get_platdata(&pdev->dev);
1912                if (pd && pd->mii_config)
1913                        pd->mii_config(dev, 0, bcm_enet_mdio_read_mii,
1914                                       bcm_enet_mdio_write_mii);
1915        }
1916
1917        /* disable hw block clocks */
1918        clk_disable_unprepare(priv->phy_clk);
1919        clk_disable_unprepare(priv->mac_clk);
1920
1921        free_netdev(dev);
1922        return 0;
1923}
1924
1925struct platform_driver bcm63xx_enet_driver = {
1926        .probe  = bcm_enet_probe,
1927        .remove = bcm_enet_remove,
1928        .driver = {
1929                .name   = "bcm63xx_enet",
1930                .owner  = THIS_MODULE,
1931        },
1932};
1933
1934/*
1935 * switch mii access callbacks
1936 */
1937static int bcmenet_sw_mdio_read(struct bcm_enet_priv *priv,
1938                                int ext, int phy_id, int location)
1939{
1940        u32 reg;
1941        int ret;
1942
1943        spin_lock_bh(&priv->enetsw_mdio_lock);
1944        enetsw_writel(priv, 0, ENETSW_MDIOC_REG);
1945
1946        reg = ENETSW_MDIOC_RD_MASK |
1947                (phy_id << ENETSW_MDIOC_PHYID_SHIFT) |
1948                (location << ENETSW_MDIOC_REG_SHIFT);
1949
1950        if (ext)
1951                reg |= ENETSW_MDIOC_EXT_MASK;
1952
1953        enetsw_writel(priv, reg, ENETSW_MDIOC_REG);
1954        udelay(50);
1955        ret = enetsw_readw(priv, ENETSW_MDIOD_REG);
1956        spin_unlock_bh(&priv->enetsw_mdio_lock);
1957        return ret;
1958}
1959
1960static void bcmenet_sw_mdio_write(struct bcm_enet_priv *priv,
1961                                 int ext, int phy_id, int location,
1962                                 uint16_t data)
1963{
1964        u32 reg;
1965
1966        spin_lock_bh(&priv->enetsw_mdio_lock);
1967        enetsw_writel(priv, 0, ENETSW_MDIOC_REG);
1968
1969        reg = ENETSW_MDIOC_WR_MASK |
1970                (phy_id << ENETSW_MDIOC_PHYID_SHIFT) |
1971                (location << ENETSW_MDIOC_REG_SHIFT);
1972
1973        if (ext)
1974                reg |= ENETSW_MDIOC_EXT_MASK;
1975
1976        reg |= data;
1977
1978        enetsw_writel(priv, reg, ENETSW_MDIOC_REG);
1979        udelay(50);
1980        spin_unlock_bh(&priv->enetsw_mdio_lock);
1981}
1982
1983static inline int bcm_enet_port_is_rgmii(int portid)
1984{
1985        return portid >= ENETSW_RGMII_PORT0;
1986}
1987
1988/*
1989 * enet sw PHY polling
1990 */
1991static void swphy_poll_timer(struct timer_list *t)
1992{
1993        struct bcm_enet_priv *priv = from_timer(priv, t, swphy_poll);
1994        unsigned int i;
1995
1996        for (i = 0; i < priv->num_ports; i++) {
1997                struct bcm63xx_enetsw_port *port;
1998                int val, j, up, advertise, lpa, speed, duplex, media;
1999                int external_phy = bcm_enet_port_is_rgmii(i);
2000                u8 override;
2001
2002                port = &priv->used_ports[i];
2003                if (!port->used)
2004                        continue;
2005
2006                if (port->bypass_link)
2007                        continue;
2008
2009                /* dummy read to clear */
2010                for (j = 0; j < 2; j++)
2011                        val = bcmenet_sw_mdio_read(priv, external_phy,
2012                                                   port->phy_id, MII_BMSR);
2013
2014                if (val == 0xffff)
2015                        continue;
2016
2017                up = (val & BMSR_LSTATUS) ? 1 : 0;
2018                if (!(up ^ priv->sw_port_link[i]))
2019                        continue;
2020
2021                priv->sw_port_link[i] = up;
2022
2023                /* link changed */
2024                if (!up) {
2025                        dev_info(&priv->pdev->dev, "link DOWN on %s\n",
2026                                 port->name);
2027                        enetsw_writeb(priv, ENETSW_PORTOV_ENABLE_MASK,
2028                                      ENETSW_PORTOV_REG(i));
2029                        enetsw_writeb(priv, ENETSW_PTCTRL_RXDIS_MASK |
2030                                      ENETSW_PTCTRL_TXDIS_MASK,
2031                                      ENETSW_PTCTRL_REG(i));
2032                        continue;
2033                }
2034
2035                advertise = bcmenet_sw_mdio_read(priv, external_phy,
2036                                                 port->phy_id, MII_ADVERTISE);
2037
2038                lpa = bcmenet_sw_mdio_read(priv, external_phy, port->phy_id,
2039                                           MII_LPA);
2040
2041                /* figure out media and duplex from advertise and LPA values */
2042                media = mii_nway_result(lpa & advertise);
2043                duplex = (media & ADVERTISE_FULL) ? 1 : 0;
2044
2045                if (media & (ADVERTISE_100FULL | ADVERTISE_100HALF))
2046                        speed = 100;
2047                else
2048                        speed = 10;
2049
2050                if (val & BMSR_ESTATEN) {
2051                        advertise = bcmenet_sw_mdio_read(priv, external_phy,
2052                                                port->phy_id, MII_CTRL1000);
2053
2054                        lpa = bcmenet_sw_mdio_read(priv, external_phy,
2055                                                port->phy_id, MII_STAT1000);
2056
2057                        if (advertise & (ADVERTISE_1000FULL | ADVERTISE_1000HALF)
2058                                        && lpa & (LPA_1000FULL | LPA_1000HALF)) {
2059                                speed = 1000;
2060                                duplex = (lpa & LPA_1000FULL);
2061                        }
2062                }
2063
2064                dev_info(&priv->pdev->dev,
2065                         "link UP on %s, %dMbps, %s-duplex\n",
2066                         port->name, speed, duplex ? "full" : "half");
2067
2068                override = ENETSW_PORTOV_ENABLE_MASK |
2069                        ENETSW_PORTOV_LINKUP_MASK;
2070
2071                if (speed == 1000)
2072                        override |= ENETSW_IMPOV_1000_MASK;
2073                else if (speed == 100)
2074                        override |= ENETSW_IMPOV_100_MASK;
2075                if (duplex)
2076                        override |= ENETSW_IMPOV_FDX_MASK;
2077
2078                enetsw_writeb(priv, override, ENETSW_PORTOV_REG(i));
2079                enetsw_writeb(priv, 0, ENETSW_PTCTRL_REG(i));
2080        }
2081
2082        priv->swphy_poll.expires = jiffies + HZ;
2083        add_timer(&priv->swphy_poll);
2084}
2085
2086/*
2087 * open callback, allocate dma rings & buffers and start rx operation
2088 */
2089static int bcm_enetsw_open(struct net_device *dev)
2090{
2091        struct bcm_enet_priv *priv;
2092        struct device *kdev;
2093        int i, ret;
2094        unsigned int size;
2095        void *p;
2096        u32 val;
2097
2098        priv = netdev_priv(dev);
2099        kdev = &priv->pdev->dev;
2100
2101        /* mask all interrupts and request them */
2102        enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
2103        enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
2104
2105        ret = request_irq(priv->irq_rx, bcm_enet_isr_dma,
2106                          0, dev->name, dev);
2107        if (ret)
2108                goto out_freeirq;
2109
2110        if (priv->irq_tx != -1) {
2111                ret = request_irq(priv->irq_tx, bcm_enet_isr_dma,
2112                                  0, dev->name, dev);
2113                if (ret)
2114                        goto out_freeirq_rx;
2115        }
2116
2117        /* allocate rx dma ring */
2118        size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
2119        p = dma_zalloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
2120        if (!p) {
2121                dev_err(kdev, "cannot allocate rx ring %u\n", size);
2122                ret = -ENOMEM;
2123                goto out_freeirq_tx;
2124        }
2125
2126        priv->rx_desc_alloc_size = size;
2127        priv->rx_desc_cpu = p;
2128
2129        /* allocate tx dma ring */
2130        size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
2131        p = dma_zalloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
2132        if (!p) {
2133                dev_err(kdev, "cannot allocate tx ring\n");
2134                ret = -ENOMEM;
2135                goto out_free_rx_ring;
2136        }
2137
2138        priv->tx_desc_alloc_size = size;
2139        priv->tx_desc_cpu = p;
2140
2141        priv->tx_skb = kcalloc(priv->tx_ring_size, sizeof(struct sk_buff *),
2142                               GFP_KERNEL);
2143        if (!priv->tx_skb) {
2144                dev_err(kdev, "cannot allocate rx skb queue\n");
2145                ret = -ENOMEM;
2146                goto out_free_tx_ring;
2147        }
2148
2149        priv->tx_desc_count = priv->tx_ring_size;
2150        priv->tx_dirty_desc = 0;
2151        priv->tx_curr_desc = 0;
2152        spin_lock_init(&priv->tx_lock);
2153
2154        /* init & fill rx ring with skbs */
2155        priv->rx_skb = kcalloc(priv->rx_ring_size, sizeof(struct sk_buff *),
2156                               GFP_KERNEL);
2157        if (!priv->rx_skb) {
2158                dev_err(kdev, "cannot allocate rx skb queue\n");
2159                ret = -ENOMEM;
2160                goto out_free_tx_skb;
2161        }
2162
2163        priv->rx_desc_count = 0;
2164        priv->rx_dirty_desc = 0;
2165        priv->rx_curr_desc = 0;
2166
2167        /* disable all ports */
2168        for (i = 0; i < priv->num_ports; i++) {
2169                enetsw_writeb(priv, ENETSW_PORTOV_ENABLE_MASK,
2170                              ENETSW_PORTOV_REG(i));
2171                enetsw_writeb(priv, ENETSW_PTCTRL_RXDIS_MASK |
2172                              ENETSW_PTCTRL_TXDIS_MASK,
2173                              ENETSW_PTCTRL_REG(i));
2174
2175                priv->sw_port_link[i] = 0;
2176        }
2177
2178        /* reset mib */
2179        val = enetsw_readb(priv, ENETSW_GMCR_REG);
2180        val |= ENETSW_GMCR_RST_MIB_MASK;
2181        enetsw_writeb(priv, val, ENETSW_GMCR_REG);
2182        mdelay(1);
2183        val &= ~ENETSW_GMCR_RST_MIB_MASK;
2184        enetsw_writeb(priv, val, ENETSW_GMCR_REG);
2185        mdelay(1);
2186
2187        /* force CPU port state */
2188        val = enetsw_readb(priv, ENETSW_IMPOV_REG);
2189        val |= ENETSW_IMPOV_FORCE_MASK | ENETSW_IMPOV_LINKUP_MASK;
2190        enetsw_writeb(priv, val, ENETSW_IMPOV_REG);
2191
2192        /* enable switch forward engine */
2193        val = enetsw_readb(priv, ENETSW_SWMODE_REG);
2194        val |= ENETSW_SWMODE_FWD_EN_MASK;
2195        enetsw_writeb(priv, val, ENETSW_SWMODE_REG);
2196
2197        /* enable jumbo on all ports */
2198        enetsw_writel(priv, 0x1ff, ENETSW_JMBCTL_PORT_REG);
2199        enetsw_writew(priv, 9728, ENETSW_JMBCTL_MAXSIZE_REG);
2200
2201        /* initialize flow control buffer allocation */
2202        enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
2203                        ENETDMA_BUFALLOC_REG(priv->rx_chan));
2204
2205        if (bcm_enet_refill_rx(dev)) {
2206                dev_err(kdev, "cannot allocate rx skb queue\n");
2207                ret = -ENOMEM;
2208                goto out;
2209        }
2210
2211        /* write rx & tx ring addresses */
2212        enet_dmas_writel(priv, priv->rx_desc_dma,
2213                         ENETDMAS_RSTART_REG, priv->rx_chan);
2214        enet_dmas_writel(priv, priv->tx_desc_dma,
2215                         ENETDMAS_RSTART_REG, priv->tx_chan);
2216
2217        /* clear remaining state ram for rx & tx channel */
2218        enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->rx_chan);
2219        enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->tx_chan);
2220        enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->rx_chan);
2221        enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->tx_chan);
2222        enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->rx_chan);
2223        enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->tx_chan);
2224
2225        /* set dma maximum burst len */
2226        enet_dmac_writel(priv, priv->dma_maxburst,
2227                         ENETDMAC_MAXBURST, priv->rx_chan);
2228        enet_dmac_writel(priv, priv->dma_maxburst,
2229                         ENETDMAC_MAXBURST, priv->tx_chan);
2230
2231        /* set flow control low/high threshold to 1/3 / 2/3 */
2232        val = priv->rx_ring_size / 3;
2233        enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan));
2234        val = (priv->rx_ring_size * 2) / 3;
2235        enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan));
2236
2237        /* all set, enable mac and interrupts, start dma engine and
2238         * kick rx dma channel
2239         */
2240        wmb();
2241        enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
2242        enet_dmac_writel(priv, ENETDMAC_CHANCFG_EN_MASK,
2243                         ENETDMAC_CHANCFG, priv->rx_chan);
2244
2245        /* watch "packet transferred" interrupt in rx and tx */
2246        enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
2247                         ENETDMAC_IR, priv->rx_chan);
2248        enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
2249                         ENETDMAC_IR, priv->tx_chan);
2250
2251        /* make sure we enable napi before rx interrupt  */
2252        napi_enable(&priv->napi);
2253
2254        enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
2255                         ENETDMAC_IRMASK, priv->rx_chan);
2256        enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
2257                         ENETDMAC_IRMASK, priv->tx_chan);
2258
2259        netif_carrier_on(dev);
2260        netif_start_queue(dev);
2261
2262        /* apply override config for bypass_link ports here. */
2263        for (i = 0; i < priv->num_ports; i++) {
2264                struct bcm63xx_enetsw_port *port;
2265                u8 override;
2266                port = &priv->used_ports[i];
2267                if (!port->used)
2268                        continue;
2269
2270                if (!port->bypass_link)
2271                        continue;
2272
2273                override = ENETSW_PORTOV_ENABLE_MASK |
2274                        ENETSW_PORTOV_LINKUP_MASK;
2275
2276                switch (port->force_speed) {
2277                case 1000:
2278                        override |= ENETSW_IMPOV_1000_MASK;
2279                        break;
2280                case 100:
2281                        override |= ENETSW_IMPOV_100_MASK;
2282                        break;
2283                case 10:
2284                        break;
2285                default:
2286                        pr_warn("invalid forced speed on port %s: assume 10\n",
2287                               port->name);
2288                        break;
2289                }
2290
2291                if (port->force_duplex_full)
2292                        override |= ENETSW_IMPOV_FDX_MASK;
2293
2294
2295                enetsw_writeb(priv, override, ENETSW_PORTOV_REG(i));
2296                enetsw_writeb(priv, 0, ENETSW_PTCTRL_REG(i));
2297        }
2298
2299        /* start phy polling timer */
2300        timer_setup(&priv->swphy_poll, swphy_poll_timer, 0);
2301        mod_timer(&priv->swphy_poll, jiffies);
2302        return 0;
2303
2304out:
2305        for (i = 0; i < priv->rx_ring_size; i++) {
2306                struct bcm_enet_desc *desc;
2307
2308                if (!priv->rx_skb[i])
2309                        continue;
2310
2311                desc = &priv->rx_desc_cpu[i];
2312                dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
2313                                 DMA_FROM_DEVICE);
2314                kfree_skb(priv->rx_skb[i]);
2315        }
2316        kfree(priv->rx_skb);
2317
2318out_free_tx_skb:
2319        kfree(priv->tx_skb);
2320
2321out_free_tx_ring:
2322        dma_free_coherent(kdev, priv->tx_desc_alloc_size,
2323                          priv->tx_desc_cpu, priv->tx_desc_dma);
2324
2325out_free_rx_ring:
2326        dma_free_coherent(kdev, priv->rx_desc_alloc_size,
2327                          priv->rx_desc_cpu, priv->rx_desc_dma);
2328
2329out_freeirq_tx:
2330        if (priv->irq_tx != -1)
2331                free_irq(priv->irq_tx, dev);
2332
2333out_freeirq_rx:
2334        free_irq(priv->irq_rx, dev);
2335
2336out_freeirq:
2337        return ret;
2338}
2339
2340/* stop callback */
2341static int bcm_enetsw_stop(struct net_device *dev)
2342{
2343        struct bcm_enet_priv *priv;
2344        struct device *kdev;
2345        int i;
2346
2347        priv = netdev_priv(dev);
2348        kdev = &priv->pdev->dev;
2349
2350        del_timer_sync(&priv->swphy_poll);
2351        netif_stop_queue(dev);
2352        napi_disable(&priv->napi);
2353        del_timer_sync(&priv->rx_timeout);
2354
2355        /* mask all interrupts */
2356        enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
2357        enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
2358
2359        /* disable dma & mac */
2360        bcm_enet_disable_dma(priv, priv->tx_chan);
2361        bcm_enet_disable_dma(priv, priv->rx_chan);
2362
2363        /* force reclaim of all tx buffers */
2364        bcm_enet_tx_reclaim(dev, 1);
2365
2366        /* free the rx skb ring */
2367        for (i = 0; i < priv->rx_ring_size; i++) {
2368                struct bcm_enet_desc *desc;
2369
2370                if (!priv->rx_skb[i])
2371                        continue;
2372
2373                desc = &priv->rx_desc_cpu[i];
2374                dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
2375                                 DMA_FROM_DEVICE);
2376                kfree_skb(priv->rx_skb[i]);
2377        }
2378
2379        /* free remaining allocated memory */
2380        kfree(priv->rx_skb);
2381        kfree(priv->tx_skb);
2382        dma_free_coherent(kdev, priv->rx_desc_alloc_size,
2383                          priv->rx_desc_cpu, priv->rx_desc_dma);
2384        dma_free_coherent(kdev, priv->tx_desc_alloc_size,
2385                          priv->tx_desc_cpu, priv->tx_desc_dma);
2386        if (priv->irq_tx != -1)
2387                free_irq(priv->irq_tx, dev);
2388        free_irq(priv->irq_rx, dev);
2389
2390        return 0;
2391}
2392
2393/* try to sort out phy external status by walking the used_port field
2394 * in the bcm_enet_priv structure. in case the phy address is not
2395 * assigned to any physical port on the switch, assume it is external
2396 * (and yell at the user).
2397 */
2398static int bcm_enetsw_phy_is_external(struct bcm_enet_priv *priv, int phy_id)
2399{
2400        int i;
2401
2402        for (i = 0; i < priv->num_ports; ++i) {
2403                if (!priv->used_ports[i].used)
2404                        continue;
2405                if (priv->used_ports[i].phy_id == phy_id)
2406                        return bcm_enet_port_is_rgmii(i);
2407        }
2408
2409        printk_once(KERN_WARNING  "bcm63xx_enet: could not find a used port with phy_id %i, assuming phy is external\n",
2410                    phy_id);
2411        return 1;
2412}
2413
2414/* can't use bcmenet_sw_mdio_read directly as we need to sort out
2415 * external/internal status of the given phy_id first.
2416 */
2417static int bcm_enetsw_mii_mdio_read(struct net_device *dev, int phy_id,
2418                                    int location)
2419{
2420        struct bcm_enet_priv *priv;
2421
2422        priv = netdev_priv(dev);
2423        return bcmenet_sw_mdio_read(priv,
2424                                    bcm_enetsw_phy_is_external(priv, phy_id),
2425                                    phy_id, location);
2426}
2427
2428/* can't use bcmenet_sw_mdio_write directly as we need to sort out
2429 * external/internal status of the given phy_id first.
2430 */
2431static void bcm_enetsw_mii_mdio_write(struct net_device *dev, int phy_id,
2432                                      int location,
2433                                      int val)
2434{
2435        struct bcm_enet_priv *priv;
2436
2437        priv = netdev_priv(dev);
2438        bcmenet_sw_mdio_write(priv, bcm_enetsw_phy_is_external(priv, phy_id),
2439                              phy_id, location, val);
2440}
2441
2442static int bcm_enetsw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2443{
2444        struct mii_if_info mii;
2445
2446        mii.dev = dev;
2447        mii.mdio_read = bcm_enetsw_mii_mdio_read;
2448        mii.mdio_write = bcm_enetsw_mii_mdio_write;
2449        mii.phy_id = 0;
2450        mii.phy_id_mask = 0x3f;
2451        mii.reg_num_mask = 0x1f;
2452        return generic_mii_ioctl(&mii, if_mii(rq), cmd, NULL);
2453
2454}
2455
2456static const struct net_device_ops bcm_enetsw_ops = {
2457        .ndo_open               = bcm_enetsw_open,
2458        .ndo_stop               = bcm_enetsw_stop,
2459        .ndo_start_xmit         = bcm_enet_start_xmit,
2460        .ndo_change_mtu         = bcm_enet_change_mtu,
2461        .ndo_do_ioctl           = bcm_enetsw_ioctl,
2462};
2463
2464
2465static const struct bcm_enet_stats bcm_enetsw_gstrings_stats[] = {
2466        { "rx_packets", DEV_STAT(rx_packets), -1 },
2467        { "tx_packets", DEV_STAT(tx_packets), -1 },
2468        { "rx_bytes", DEV_STAT(rx_bytes), -1 },
2469        { "tx_bytes", DEV_STAT(tx_bytes), -1 },
2470        { "rx_errors", DEV_STAT(rx_errors), -1 },
2471        { "tx_errors", DEV_STAT(tx_errors), -1 },
2472        { "rx_dropped", DEV_STAT(rx_dropped), -1 },
2473        { "tx_dropped", DEV_STAT(tx_dropped), -1 },
2474
2475        { "tx_good_octets", GEN_STAT(mib.tx_gd_octets), ETHSW_MIB_RX_GD_OCT },
2476        { "tx_unicast", GEN_STAT(mib.tx_unicast), ETHSW_MIB_RX_BRDCAST },
2477        { "tx_broadcast", GEN_STAT(mib.tx_brdcast), ETHSW_MIB_RX_BRDCAST },
2478        { "tx_multicast", GEN_STAT(mib.tx_mult), ETHSW_MIB_RX_MULT },
2479        { "tx_64_octets", GEN_STAT(mib.tx_64), ETHSW_MIB_RX_64 },
2480        { "tx_65_127_oct", GEN_STAT(mib.tx_65_127), ETHSW_MIB_RX_65_127 },
2481        { "tx_128_255_oct", GEN_STAT(mib.tx_128_255), ETHSW_MIB_RX_128_255 },
2482        { "tx_256_511_oct", GEN_STAT(mib.tx_256_511), ETHSW_MIB_RX_256_511 },
2483        { "tx_512_1023_oct", GEN_STAT(mib.tx_512_1023), ETHSW_MIB_RX_512_1023},
2484        { "tx_1024_1522_oct", GEN_STAT(mib.tx_1024_max),
2485          ETHSW_MIB_RX_1024_1522 },
2486        { "tx_1523_2047_oct", GEN_STAT(mib.tx_1523_2047),
2487          ETHSW_MIB_RX_1523_2047 },
2488        { "tx_2048_4095_oct", GEN_STAT(mib.tx_2048_4095),
2489          ETHSW_MIB_RX_2048_4095 },
2490        { "tx_4096_8191_oct", GEN_STAT(mib.tx_4096_8191),
2491          ETHSW_MIB_RX_4096_8191 },
2492        { "tx_8192_9728_oct", GEN_STAT(mib.tx_8192_9728),
2493          ETHSW_MIB_RX_8192_9728 },
2494        { "tx_oversize", GEN_STAT(mib.tx_ovr), ETHSW_MIB_RX_OVR },
2495        { "tx_oversize_drop", GEN_STAT(mib.tx_ovr), ETHSW_MIB_RX_OVR_DISC },
2496        { "tx_dropped", GEN_STAT(mib.tx_drop), ETHSW_MIB_RX_DROP },
2497        { "tx_undersize", GEN_STAT(mib.tx_underrun), ETHSW_MIB_RX_UND },
2498        { "tx_pause", GEN_STAT(mib.tx_pause), ETHSW_MIB_RX_PAUSE },
2499
2500        { "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETHSW_MIB_TX_ALL_OCT },
2501        { "rx_broadcast", GEN_STAT(mib.rx_brdcast), ETHSW_MIB_TX_BRDCAST },
2502        { "rx_multicast", GEN_STAT(mib.rx_mult), ETHSW_MIB_TX_MULT },
2503        { "rx_unicast", GEN_STAT(mib.rx_unicast), ETHSW_MIB_TX_MULT },
2504        { "rx_pause", GEN_STAT(mib.rx_pause), ETHSW_MIB_TX_PAUSE },
2505        { "rx_dropped", GEN_STAT(mib.rx_drop), ETHSW_MIB_TX_DROP_PKTS },
2506
2507};
2508
2509#define BCM_ENETSW_STATS_LEN    \
2510        (sizeof(bcm_enetsw_gstrings_stats) / sizeof(struct bcm_enet_stats))
2511
2512static void bcm_enetsw_get_strings(struct net_device *netdev,
2513                                   u32 stringset, u8 *data)
2514{
2515        int i;
2516
2517        switch (stringset) {
2518        case ETH_SS_STATS:
2519                for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) {
2520                        memcpy(data + i * ETH_GSTRING_LEN,
2521                               bcm_enetsw_gstrings_stats[i].stat_string,
2522                               ETH_GSTRING_LEN);
2523                }
2524                break;
2525        }
2526}
2527
2528static int bcm_enetsw_get_sset_count(struct net_device *netdev,
2529                                     int string_set)
2530{
2531        switch (string_set) {
2532        case ETH_SS_STATS:
2533                return BCM_ENETSW_STATS_LEN;
2534        default:
2535                return -EINVAL;
2536        }
2537}
2538
2539static void bcm_enetsw_get_drvinfo(struct net_device *netdev,
2540                                   struct ethtool_drvinfo *drvinfo)
2541{
2542        strncpy(drvinfo->driver, bcm_enet_driver_name, sizeof(drvinfo->driver));
2543        strncpy(drvinfo->fw_version, "N/A", 32);
2544        strncpy(drvinfo->bus_info, "bcm63xx", sizeof(drvinfo->bus_info));
2545}
2546
2547static void bcm_enetsw_get_ethtool_stats(struct net_device *netdev,
2548                                         struct ethtool_stats *stats,
2549                                         u64 *data)
2550{
2551        struct bcm_enet_priv *priv;
2552        int i;
2553
2554        priv = netdev_priv(netdev);
2555
2556        for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) {
2557                const struct bcm_enet_stats *s;
2558                u32 lo, hi;
2559                char *p;
2560                int reg;
2561
2562                s = &bcm_enetsw_gstrings_stats[i];
2563
2564                reg = s->mib_reg;
2565                if (reg == -1)
2566                        continue;
2567
2568                lo = enetsw_readl(priv, ENETSW_MIB_REG(reg));
2569                p = (char *)priv + s->stat_offset;
2570
2571                if (s->sizeof_stat == sizeof(u64)) {
2572                        hi = enetsw_readl(priv, ENETSW_MIB_REG(reg + 1));
2573                        *(u64 *)p = ((u64)hi << 32 | lo);
2574                } else {
2575                        *(u32 *)p = lo;
2576                }
2577        }
2578
2579        for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) {
2580                const struct bcm_enet_stats *s;
2581                char *p;
2582
2583                s = &bcm_enetsw_gstrings_stats[i];
2584
2585                if (s->mib_reg == -1)
2586                        p = (char *)&netdev->stats + s->stat_offset;
2587                else
2588                        p = (char *)priv + s->stat_offset;
2589
2590                data[i] = (s->sizeof_stat == sizeof(u64)) ?
2591                        *(u64 *)p : *(u32 *)p;
2592        }
2593}
2594
2595static void bcm_enetsw_get_ringparam(struct net_device *dev,
2596                                     struct ethtool_ringparam *ering)
2597{
2598        struct bcm_enet_priv *priv;
2599
2600        priv = netdev_priv(dev);
2601
2602        /* rx/tx ring is actually only limited by memory */
2603        ering->rx_max_pending = 8192;
2604        ering->tx_max_pending = 8192;
2605        ering->rx_mini_max_pending = 0;
2606        ering->rx_jumbo_max_pending = 0;
2607        ering->rx_pending = priv->rx_ring_size;
2608        ering->tx_pending = priv->tx_ring_size;
2609}
2610
2611static int bcm_enetsw_set_ringparam(struct net_device *dev,
2612                                    struct ethtool_ringparam *ering)
2613{
2614        struct bcm_enet_priv *priv;
2615        int was_running;
2616
2617        priv = netdev_priv(dev);
2618
2619        was_running = 0;
2620        if (netif_running(dev)) {
2621                bcm_enetsw_stop(dev);
2622                was_running = 1;
2623        }
2624
2625        priv->rx_ring_size = ering->rx_pending;
2626        priv->tx_ring_size = ering->tx_pending;
2627
2628        if (was_running) {
2629                int err;
2630
2631                err = bcm_enetsw_open(dev);
2632                if (err)
2633                        dev_close(dev);
2634        }
2635        return 0;
2636}
2637
2638static const struct ethtool_ops bcm_enetsw_ethtool_ops = {
2639        .get_strings            = bcm_enetsw_get_strings,
2640        .get_sset_count         = bcm_enetsw_get_sset_count,
2641        .get_ethtool_stats      = bcm_enetsw_get_ethtool_stats,
2642        .get_drvinfo            = bcm_enetsw_get_drvinfo,
2643        .get_ringparam          = bcm_enetsw_get_ringparam,
2644        .set_ringparam          = bcm_enetsw_set_ringparam,
2645};
2646
2647/* allocate netdevice, request register memory and register device. */
2648static int bcm_enetsw_probe(struct platform_device *pdev)
2649{
2650        struct bcm_enet_priv *priv;
2651        struct net_device *dev;
2652        struct bcm63xx_enetsw_platform_data *pd;
2653        struct resource *res_mem;
2654        int ret, irq_rx, irq_tx;
2655
2656        if (!bcm_enet_shared_base[0])
2657                return -EPROBE_DEFER;
2658
2659        res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2660        irq_rx = platform_get_irq(pdev, 0);
2661        irq_tx = platform_get_irq(pdev, 1);
2662        if (!res_mem || irq_rx < 0)
2663                return -ENODEV;
2664
2665        ret = 0;
2666        dev = alloc_etherdev(sizeof(*priv));
2667        if (!dev)
2668                return -ENOMEM;
2669        priv = netdev_priv(dev);
2670        memset(priv, 0, sizeof(*priv));
2671
2672        /* initialize default and fetch platform data */
2673        priv->enet_is_sw = true;
2674        priv->irq_rx = irq_rx;
2675        priv->irq_tx = irq_tx;
2676        priv->rx_ring_size = BCMENET_DEF_RX_DESC;
2677        priv->tx_ring_size = BCMENET_DEF_TX_DESC;
2678        priv->dma_maxburst = BCMENETSW_DMA_MAXBURST;
2679
2680        pd = dev_get_platdata(&pdev->dev);
2681        if (pd) {
2682                memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
2683                memcpy(priv->used_ports, pd->used_ports,
2684                       sizeof(pd->used_ports));
2685                priv->num_ports = pd->num_ports;
2686                priv->dma_has_sram = pd->dma_has_sram;
2687                priv->dma_chan_en_mask = pd->dma_chan_en_mask;
2688                priv->dma_chan_int_mask = pd->dma_chan_int_mask;
2689                priv->dma_chan_width = pd->dma_chan_width;
2690        }
2691
2692        ret = bcm_enet_change_mtu(dev, dev->mtu);
2693        if (ret)
2694                goto out;
2695
2696        priv->base = devm_ioremap_resource(&pdev->dev, res_mem);
2697        if (IS_ERR(priv->base)) {
2698                ret = PTR_ERR(priv->base);
2699                goto out;
2700        }
2701
2702        priv->mac_clk = devm_clk_get(&pdev->dev, "enetsw");
2703        if (IS_ERR(priv->mac_clk)) {
2704                ret = PTR_ERR(priv->mac_clk);
2705                goto out;
2706        }
2707        ret = clk_prepare_enable(priv->mac_clk);
2708        if (ret)
2709                goto out;
2710
2711        priv->rx_chan = 0;
2712        priv->tx_chan = 1;
2713        spin_lock_init(&priv->rx_lock);
2714
2715        /* init rx timeout (used for oom) */
2716        timer_setup(&priv->rx_timeout, bcm_enet_refill_rx_timer, 0);
2717
2718        /* register netdevice */
2719        dev->netdev_ops = &bcm_enetsw_ops;
2720        netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16);
2721        dev->ethtool_ops = &bcm_enetsw_ethtool_ops;
2722        SET_NETDEV_DEV(dev, &pdev->dev);
2723
2724        spin_lock_init(&priv->enetsw_mdio_lock);
2725
2726        ret = register_netdev(dev);
2727        if (ret)
2728                goto out_disable_clk;
2729
2730        netif_carrier_off(dev);
2731        platform_set_drvdata(pdev, dev);
2732        priv->pdev = pdev;
2733        priv->net_dev = dev;
2734
2735        return 0;
2736
2737out_disable_clk:
2738        clk_disable_unprepare(priv->mac_clk);
2739out:
2740        free_netdev(dev);
2741        return ret;
2742}
2743
2744
2745/* exit func, stops hardware and unregisters netdevice */
2746static int bcm_enetsw_remove(struct platform_device *pdev)
2747{
2748        struct bcm_enet_priv *priv;
2749        struct net_device *dev;
2750
2751        /* stop netdevice */
2752        dev = platform_get_drvdata(pdev);
2753        priv = netdev_priv(dev);
2754        unregister_netdev(dev);
2755
2756        clk_disable_unprepare(priv->mac_clk);
2757
2758        free_netdev(dev);
2759        return 0;
2760}
2761
2762struct platform_driver bcm63xx_enetsw_driver = {
2763        .probe  = bcm_enetsw_probe,
2764        .remove = bcm_enetsw_remove,
2765        .driver = {
2766                .name   = "bcm63xx_enetsw",
2767                .owner  = THIS_MODULE,
2768        },
2769};
2770
2771/* reserve & remap memory space shared between all macs */
2772static int bcm_enet_shared_probe(struct platform_device *pdev)
2773{
2774        struct resource *res;
2775        void __iomem *p[3];
2776        unsigned int i;
2777
2778        memset(bcm_enet_shared_base, 0, sizeof(bcm_enet_shared_base));
2779
2780        for (i = 0; i < 3; i++) {
2781                res = platform_get_resource(pdev, IORESOURCE_MEM, i);
2782                p[i] = devm_ioremap_resource(&pdev->dev, res);
2783                if (IS_ERR(p[i]))
2784                        return PTR_ERR(p[i]);
2785        }
2786
2787        memcpy(bcm_enet_shared_base, p, sizeof(bcm_enet_shared_base));
2788
2789        return 0;
2790}
2791
2792static int bcm_enet_shared_remove(struct platform_device *pdev)
2793{
2794        return 0;
2795}
2796
2797/* this "shared" driver is needed because both macs share a single
2798 * address space
2799 */
2800struct platform_driver bcm63xx_enet_shared_driver = {
2801        .probe  = bcm_enet_shared_probe,
2802        .remove = bcm_enet_shared_remove,
2803        .driver = {
2804                .name   = "bcm63xx_enet_shared",
2805                .owner  = THIS_MODULE,
2806        },
2807};
2808
2809static struct platform_driver * const drivers[] = {
2810        &bcm63xx_enet_shared_driver,
2811        &bcm63xx_enet_driver,
2812        &bcm63xx_enetsw_driver,
2813};
2814
2815/* entry point */
2816static int __init bcm_enet_init(void)
2817{
2818        return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
2819}
2820
2821static void __exit bcm_enet_exit(void)
2822{
2823        platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
2824}
2825
2826
2827module_init(bcm_enet_init);
2828module_exit(bcm_enet_exit);
2829
2830MODULE_DESCRIPTION("BCM63xx internal ethernet mac driver");
2831MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>");
2832MODULE_LICENSE("GPL");
2833