linux/drivers/net/ethernet/freescale/fec_main.c
<<
>>
Prefs
   1/*
   2 * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx.
   3 * Copyright (c) 1997 Dan Malek (dmalek@jlc.net)
   4 *
   5 * Right now, I am very wasteful with the buffers.  I allocate memory
   6 * pages and then divide them into 2K frame buffers.  This way I know I
   7 * have buffers large enough to hold one frame within one buffer descriptor.
   8 * Once I get this working, I will use 64 or 128 byte CPM buffers, which
   9 * will be much more memory efficient and will easily handle lots of
  10 * small packets.
  11 *
  12 * Much better multiple PHY support by Magnus Damm.
  13 * Copyright (c) 2000 Ericsson Radio Systems AB.
  14 *
  15 * Support for FEC controller of ColdFire processors.
  16 * Copyright (c) 2001-2005 Greg Ungerer (gerg@snapgear.com)
  17 *
  18 * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be)
  19 * Copyright (c) 2004-2006 Macq Electronique SA.
  20 *
  21 * Copyright (C) 2010-2011 Freescale Semiconductor, Inc.
  22 */
  23
  24#include <linux/module.h>
  25#include <linux/kernel.h>
  26#include <linux/string.h>
  27#include <linux/ptrace.h>
  28#include <linux/errno.h>
  29#include <linux/ioport.h>
  30#include <linux/slab.h>
  31#include <linux/interrupt.h>
  32#include <linux/delay.h>
  33#include <linux/netdevice.h>
  34#include <linux/etherdevice.h>
  35#include <linux/skbuff.h>
  36#include <linux/in.h>
  37#include <linux/ip.h>
  38#include <net/ip.h>
  39#include <linux/tcp.h>
  40#include <linux/udp.h>
  41#include <linux/icmp.h>
  42#include <linux/spinlock.h>
  43#include <linux/workqueue.h>
  44#include <linux/bitops.h>
  45#include <linux/io.h>
  46#include <linux/irq.h>
  47#include <linux/clk.h>
  48#include <linux/platform_device.h>
  49#include <linux/phy.h>
  50#include <linux/fec.h>
  51#include <linux/of.h>
  52#include <linux/of_device.h>
  53#include <linux/of_gpio.h>
  54#include <linux/of_net.h>
  55#include <linux/regulator/consumer.h>
  56#include <linux/if_vlan.h>
  57
  58#include <asm/cacheflush.h>
  59
  60#include "fec.h"
  61
  62static void set_multicast_list(struct net_device *ndev);
  63
  64#if defined(CONFIG_ARM)
  65#define FEC_ALIGNMENT   0xf
  66#else
  67#define FEC_ALIGNMENT   0x3
  68#endif
  69
  70#define DRIVER_NAME     "fec"
  71
  72/* Pause frame feild and FIFO threshold */
  73#define FEC_ENET_FCE    (1 << 5)
  74#define FEC_ENET_RSEM_V 0x84
  75#define FEC_ENET_RSFL_V 16
  76#define FEC_ENET_RAEM_V 0x8
  77#define FEC_ENET_RAFL_V 0x8
  78#define FEC_ENET_OPD_V  0xFFF0
  79
  80/* Controller is ENET-MAC */
  81#define FEC_QUIRK_ENET_MAC              (1 << 0)
  82/* Controller needs driver to swap frame */
  83#define FEC_QUIRK_SWAP_FRAME            (1 << 1)
  84/* Controller uses gasket */
  85#define FEC_QUIRK_USE_GASKET            (1 << 2)
  86/* Controller has GBIT support */
  87#define FEC_QUIRK_HAS_GBIT              (1 << 3)
  88/* Controller has extend desc buffer */
  89#define FEC_QUIRK_HAS_BUFDESC_EX        (1 << 4)
  90/* Controller has hardware checksum support */
  91#define FEC_QUIRK_HAS_CSUM              (1 << 5)
  92/* Controller has hardware vlan support */
  93#define FEC_QUIRK_HAS_VLAN              (1 << 6)
  94/* ENET IP errata ERR006358
  95 *
  96 * If the ready bit in the transmit buffer descriptor (TxBD[R]) is previously
  97 * detected as not set during a prior frame transmission, then the
  98 * ENET_TDAR[TDAR] bit is cleared at a later time, even if additional TxBDs
  99 * were added to the ring and the ENET_TDAR[TDAR] bit is set. This results in
 100 * frames not being transmitted until there is a 0-to-1 transition on
 101 * ENET_TDAR[TDAR].
 102 */
 103#define FEC_QUIRK_ERR006358            (1 << 7)
 104
 105static struct platform_device_id fec_devtype[] = {
 106        {
 107                /* keep it for coldfire */
 108                .name = DRIVER_NAME,
 109                .driver_data = 0,
 110        }, {
 111                .name = "imx25-fec",
 112                .driver_data = FEC_QUIRK_USE_GASKET,
 113        }, {
 114                .name = "imx27-fec",
 115                .driver_data = 0,
 116        }, {
 117                .name = "imx28-fec",
 118                .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME,
 119        }, {
 120                .name = "imx6q-fec",
 121                .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
 122                                FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
 123                                FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358,
 124        }, {
 125                .name = "mvf600-fec",
 126                .driver_data = FEC_QUIRK_ENET_MAC,
 127        }, {
 128                /* sentinel */
 129        }
 130};
 131MODULE_DEVICE_TABLE(platform, fec_devtype);
 132
 133enum imx_fec_type {
 134        IMX25_FEC = 1,  /* runs on i.mx25/50/53 */
 135        IMX27_FEC,      /* runs on i.mx27/35/51 */
 136        IMX28_FEC,
 137        IMX6Q_FEC,
 138        MVF600_FEC,
 139};
 140
 141static const struct of_device_id fec_dt_ids[] = {
 142        { .compatible = "fsl,imx25-fec", .data = &fec_devtype[IMX25_FEC], },
 143        { .compatible = "fsl,imx27-fec", .data = &fec_devtype[IMX27_FEC], },
 144        { .compatible = "fsl,imx28-fec", .data = &fec_devtype[IMX28_FEC], },
 145        { .compatible = "fsl,imx6q-fec", .data = &fec_devtype[IMX6Q_FEC], },
 146        { .compatible = "fsl,mvf600-fec", .data = &fec_devtype[MVF600_FEC], },
 147        { /* sentinel */ }
 148};
 149MODULE_DEVICE_TABLE(of, fec_dt_ids);
 150
 151static unsigned char macaddr[ETH_ALEN];
 152module_param_array(macaddr, byte, NULL, 0);
 153MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
 154
 155#if defined(CONFIG_M5272)
 156/*
 157 * Some hardware gets it MAC address out of local flash memory.
 158 * if this is non-zero then assume it is the address to get MAC from.
 159 */
 160#if defined(CONFIG_NETtel)
 161#define FEC_FLASHMAC    0xf0006006
 162#elif defined(CONFIG_GILBARCONAP) || defined(CONFIG_SCALES)
 163#define FEC_FLASHMAC    0xf0006000
 164#elif defined(CONFIG_CANCam)
 165#define FEC_FLASHMAC    0xf0020000
 166#elif defined (CONFIG_M5272C3)
 167#define FEC_FLASHMAC    (0xffe04000 + 4)
 168#elif defined(CONFIG_MOD5272)
 169#define FEC_FLASHMAC    0xffc0406b
 170#else
 171#define FEC_FLASHMAC    0
 172#endif
 173#endif /* CONFIG_M5272 */
 174
 175#if (((RX_RING_SIZE + TX_RING_SIZE) * 32) > PAGE_SIZE)
 176#error "FEC: descriptor ring size constants too large"
 177#endif
 178
 179/* Interrupt events/masks. */
 180#define FEC_ENET_HBERR  ((uint)0x80000000)      /* Heartbeat error */
 181#define FEC_ENET_BABR   ((uint)0x40000000)      /* Babbling receiver */
 182#define FEC_ENET_BABT   ((uint)0x20000000)      /* Babbling transmitter */
 183#define FEC_ENET_GRA    ((uint)0x10000000)      /* Graceful stop complete */
 184#define FEC_ENET_TXF    ((uint)0x08000000)      /* Full frame transmitted */
 185#define FEC_ENET_TXB    ((uint)0x04000000)      /* A buffer was transmitted */
 186#define FEC_ENET_RXF    ((uint)0x02000000)      /* Full frame received */
 187#define FEC_ENET_RXB    ((uint)0x01000000)      /* A buffer was received */
 188#define FEC_ENET_MII    ((uint)0x00800000)      /* MII interrupt */
 189#define FEC_ENET_EBERR  ((uint)0x00400000)      /* SDMA bus error */
 190
 191#define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII)
 192#define FEC_RX_DISABLED_IMASK (FEC_DEFAULT_IMASK & (~FEC_ENET_RXF))
 193
 194/* The FEC stores dest/src/type/vlan, data, and checksum for receive packets.
 195 */
 196#define PKT_MAXBUF_SIZE         1522
 197#define PKT_MINBUF_SIZE         64
 198#define PKT_MAXBLR_SIZE         1536
 199
 200/* FEC receive acceleration */
 201#define FEC_RACC_IPDIS          (1 << 1)
 202#define FEC_RACC_PRODIS         (1 << 2)
 203#define FEC_RACC_OPTIONS        (FEC_RACC_IPDIS | FEC_RACC_PRODIS)
 204
 205/*
 206 * The 5270/5271/5280/5282/532x RX control register also contains maximum frame
 207 * size bits. Other FEC hardware does not, so we need to take that into
 208 * account when setting it.
 209 */
 210#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
 211    defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM)
 212#define OPT_FRAME_SIZE  (PKT_MAXBUF_SIZE << 16)
 213#else
 214#define OPT_FRAME_SIZE  0
 215#endif
 216
 217/* FEC MII MMFR bits definition */
 218#define FEC_MMFR_ST             (1 << 30)
 219#define FEC_MMFR_OP_READ        (2 << 28)
 220#define FEC_MMFR_OP_WRITE       (1 << 28)
 221#define FEC_MMFR_PA(v)          ((v & 0x1f) << 23)
 222#define FEC_MMFR_RA(v)          ((v & 0x1f) << 18)
 223#define FEC_MMFR_TA             (2 << 16)
 224#define FEC_MMFR_DATA(v)        (v & 0xffff)
 225
 226#define FEC_MII_TIMEOUT         30000 /* us */
 227
 228/* Transmitter timeout */
 229#define TX_TIMEOUT (2 * HZ)
 230
 231#define FEC_PAUSE_FLAG_AUTONEG  0x1
 232#define FEC_PAUSE_FLAG_ENABLE   0x2
 233
 234static int mii_cnt;
 235
 236static inline
 237struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, struct fec_enet_private *fep)
 238{
 239        struct bufdesc *new_bd = bdp + 1;
 240        struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp + 1;
 241        struct bufdesc_ex *ex_base;
 242        struct bufdesc *base;
 243        int ring_size;
 244
 245        if (bdp >= fep->tx_bd_base) {
 246                base = fep->tx_bd_base;
 247                ring_size = fep->tx_ring_size;
 248                ex_base = (struct bufdesc_ex *)fep->tx_bd_base;
 249        } else {
 250                base = fep->rx_bd_base;
 251                ring_size = fep->rx_ring_size;
 252                ex_base = (struct bufdesc_ex *)fep->rx_bd_base;
 253        }
 254
 255        if (fep->bufdesc_ex)
 256                return (struct bufdesc *)((ex_new_bd >= (ex_base + ring_size)) ?
 257                        ex_base : ex_new_bd);
 258        else
 259                return (new_bd >= (base + ring_size)) ?
 260                        base : new_bd;
 261}
 262
 263static inline
 264struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, struct fec_enet_private *fep)
 265{
 266        struct bufdesc *new_bd = bdp - 1;
 267        struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp - 1;
 268        struct bufdesc_ex *ex_base;
 269        struct bufdesc *base;
 270        int ring_size;
 271
 272        if (bdp >= fep->tx_bd_base) {
 273                base = fep->tx_bd_base;
 274                ring_size = fep->tx_ring_size;
 275                ex_base = (struct bufdesc_ex *)fep->tx_bd_base;
 276        } else {
 277                base = fep->rx_bd_base;
 278                ring_size = fep->rx_ring_size;
 279                ex_base = (struct bufdesc_ex *)fep->rx_bd_base;
 280        }
 281
 282        if (fep->bufdesc_ex)
 283                return (struct bufdesc *)((ex_new_bd < ex_base) ?
 284                        (ex_new_bd + ring_size) : ex_new_bd);
 285        else
 286                return (new_bd < base) ? (new_bd + ring_size) : new_bd;
 287}
 288
 289static void *swap_buffer(void *bufaddr, int len)
 290{
 291        int i;
 292        unsigned int *buf = bufaddr;
 293
 294        for (i = 0; i < DIV_ROUND_UP(len, 4); i++, buf++)
 295                *buf = cpu_to_be32(*buf);
 296
 297        return bufaddr;
 298}
 299
 300static int
 301fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev)
 302{
 303        /* Only run for packets requiring a checksum. */
 304        if (skb->ip_summed != CHECKSUM_PARTIAL)
 305                return 0;
 306
 307        if (unlikely(skb_cow_head(skb, 0)))
 308                return -1;
 309
 310        *(__sum16 *)(skb->head + skb->csum_start + skb->csum_offset) = 0;
 311
 312        return 0;
 313}
 314
 315static netdev_tx_t
 316fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 317{
 318        struct fec_enet_private *fep = netdev_priv(ndev);
 319        const struct platform_device_id *id_entry =
 320                                platform_get_device_id(fep->pdev);
 321        struct bufdesc *bdp, *bdp_pre;
 322        void *bufaddr;
 323        unsigned short  status;
 324        unsigned int index;
 325
 326        /* Fill in a Tx ring entry */
 327        bdp = fep->cur_tx;
 328
 329        status = bdp->cbd_sc;
 330
 331        if (status & BD_ENET_TX_READY) {
 332                /* Ooops.  All transmit buffers are full.  Bail out.
 333                 * This should not happen, since ndev->tbusy should be set.
 334                 */
 335                netdev_err(ndev, "tx queue full!\n");
 336                return NETDEV_TX_BUSY;
 337        }
 338
 339        /* Protocol checksum off-load for TCP and UDP. */
 340        if (fec_enet_clear_csum(skb, ndev)) {
 341                dev_kfree_skb_any(skb);
 342                return NETDEV_TX_OK;
 343        }
 344
 345        /* Clear all of the status flags */
 346        status &= ~BD_ENET_TX_STATS;
 347
 348        /* Set buffer length and buffer pointer */
 349        bufaddr = skb->data;
 350        bdp->cbd_datlen = skb->len;
 351
 352        /*
 353         * On some FEC implementations data must be aligned on
 354         * 4-byte boundaries. Use bounce buffers to copy data
 355         * and get it aligned. Ugh.
 356         */
 357        if (fep->bufdesc_ex)
 358                index = (struct bufdesc_ex *)bdp -
 359                        (struct bufdesc_ex *)fep->tx_bd_base;
 360        else
 361                index = bdp - fep->tx_bd_base;
 362
 363        if (((unsigned long) bufaddr) & FEC_ALIGNMENT) {
 364                memcpy(fep->tx_bounce[index], skb->data, skb->len);
 365                bufaddr = fep->tx_bounce[index];
 366        }
 367
 368        /*
 369         * Some design made an incorrect assumption on endian mode of
 370         * the system that it's running on. As the result, driver has to
 371         * swap every frame going to and coming from the controller.
 372         */
 373        if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
 374                swap_buffer(bufaddr, skb->len);
 375
 376        /* Save skb pointer */
 377        fep->tx_skbuff[index] = skb;
 378
 379        /* Push the data cache so the CPM does not get stale memory
 380         * data.
 381         */
 382        bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, bufaddr,
 383                        skb->len, DMA_TO_DEVICE);
 384        if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) {
 385                bdp->cbd_bufaddr = 0;
 386                fep->tx_skbuff[index] = NULL;
 387                dev_kfree_skb_any(skb);
 388                if (net_ratelimit())
 389                        netdev_err(ndev, "Tx DMA memory map failed\n");
 390                return NETDEV_TX_OK;
 391        }
 392
 393        if (fep->bufdesc_ex) {
 394
 395                struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
 396                ebdp->cbd_bdu = 0;
 397                if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
 398                        fep->hwts_tx_en)) {
 399                        ebdp->cbd_esc = (BD_ENET_TX_TS | BD_ENET_TX_INT);
 400                        skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
 401                } else {
 402                        ebdp->cbd_esc = BD_ENET_TX_INT;
 403
 404                        /* Enable protocol checksum flags
 405                         * We do not bother with the IP Checksum bits as they
 406                         * are done by the kernel
 407                         */
 408                        if (skb->ip_summed == CHECKSUM_PARTIAL)
 409                                ebdp->cbd_esc |= BD_ENET_TX_PINS;
 410                }
 411        }
 412
 413        /* Send it on its way.  Tell FEC it's ready, interrupt when done,
 414         * it's the last BD of the frame, and to put the CRC on the end.
 415         */
 416        status |= (BD_ENET_TX_READY | BD_ENET_TX_INTR
 417                        | BD_ENET_TX_LAST | BD_ENET_TX_TC);
 418        bdp->cbd_sc = status;
 419
 420        bdp_pre = fec_enet_get_prevdesc(bdp, fep);
 421        if ((id_entry->driver_data & FEC_QUIRK_ERR006358) &&
 422            !(bdp_pre->cbd_sc & BD_ENET_TX_READY)) {
 423                fep->delay_work.trig_tx = true;
 424                schedule_delayed_work(&(fep->delay_work.delay_work),
 425                                        msecs_to_jiffies(1));
 426        }
 427
 428        /* If this was the last BD in the ring, start at the beginning again. */
 429        bdp = fec_enet_get_nextdesc(bdp, fep);
 430
 431        skb_tx_timestamp(skb);
 432
 433        fep->cur_tx = bdp;
 434
 435        if (fep->cur_tx == fep->dirty_tx)
 436                netif_stop_queue(ndev);
 437
 438        /* Trigger transmission start */
 439        writel(0, fep->hwp + FEC_X_DES_ACTIVE);
 440
 441        return NETDEV_TX_OK;
 442}
 443
 444/* Init RX & TX buffer descriptors
 445 */
 446static void fec_enet_bd_init(struct net_device *dev)
 447{
 448        struct fec_enet_private *fep = netdev_priv(dev);
 449        struct bufdesc *bdp;
 450        unsigned int i;
 451
 452        /* Initialize the receive buffer descriptors. */
 453        bdp = fep->rx_bd_base;
 454        for (i = 0; i < fep->rx_ring_size; i++) {
 455
 456                /* Initialize the BD for every fragment in the page. */
 457                if (bdp->cbd_bufaddr)
 458                        bdp->cbd_sc = BD_ENET_RX_EMPTY;
 459                else
 460                        bdp->cbd_sc = 0;
 461                bdp = fec_enet_get_nextdesc(bdp, fep);
 462        }
 463
 464        /* Set the last buffer to wrap */
 465        bdp = fec_enet_get_prevdesc(bdp, fep);
 466        bdp->cbd_sc |= BD_SC_WRAP;
 467
 468        fep->cur_rx = fep->rx_bd_base;
 469
 470        /* ...and the same for transmit */
 471        bdp = fep->tx_bd_base;
 472        fep->cur_tx = bdp;
 473        for (i = 0; i < fep->tx_ring_size; i++) {
 474
 475                /* Initialize the BD for every fragment in the page. */
 476                bdp->cbd_sc = 0;
 477                if (bdp->cbd_bufaddr && fep->tx_skbuff[i]) {
 478                        dev_kfree_skb_any(fep->tx_skbuff[i]);
 479                        fep->tx_skbuff[i] = NULL;
 480                }
 481                bdp->cbd_bufaddr = 0;
 482                bdp = fec_enet_get_nextdesc(bdp, fep);
 483        }
 484
 485        /* Set the last buffer to wrap */
 486        bdp = fec_enet_get_prevdesc(bdp, fep);
 487        bdp->cbd_sc |= BD_SC_WRAP;
 488        fep->dirty_tx = bdp;
 489}
 490
 491/* This function is called to start or restart the FEC during a link
 492 * change.  This only happens when switching between half and full
 493 * duplex.
 494 */
 495static void
 496fec_restart(struct net_device *ndev, int duplex)
 497{
 498        struct fec_enet_private *fep = netdev_priv(ndev);
 499        const struct platform_device_id *id_entry =
 500                                platform_get_device_id(fep->pdev);
 501        int i;
 502        u32 val;
 503        u32 temp_mac[2];
 504        u32 rcntl = OPT_FRAME_SIZE | 0x04;
 505        u32 ecntl = 0x2; /* ETHEREN */
 506
 507        if (netif_running(ndev)) {
 508                netif_device_detach(ndev);
 509                napi_disable(&fep->napi);
 510                netif_stop_queue(ndev);
 511                netif_tx_lock_bh(ndev);
 512        }
 513
 514        /* Whack a reset.  We should wait for this. */
 515        writel(1, fep->hwp + FEC_ECNTRL);
 516        udelay(10);
 517
 518        /*
 519         * enet-mac reset will reset mac address registers too,
 520         * so need to reconfigure it.
 521         */
 522        if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
 523                memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
 524                writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW);
 525                writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH);
 526        }
 527
 528        /* Clear any outstanding interrupt. */
 529        writel(0xffc00000, fep->hwp + FEC_IEVENT);
 530
 531        /* Set maximum receive buffer size. */
 532        writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE);
 533
 534        fec_enet_bd_init(ndev);
 535
 536        /* Set receive and transmit descriptor base. */
 537        writel(fep->bd_dma, fep->hwp + FEC_R_DES_START);
 538        if (fep->bufdesc_ex)
 539                writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc_ex)
 540                        * fep->rx_ring_size, fep->hwp + FEC_X_DES_START);
 541        else
 542                writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc)
 543                        * fep->rx_ring_size,    fep->hwp + FEC_X_DES_START);
 544
 545
 546        for (i = 0; i <= TX_RING_MOD_MASK; i++) {
 547                if (fep->tx_skbuff[i]) {
 548                        dev_kfree_skb_any(fep->tx_skbuff[i]);
 549                        fep->tx_skbuff[i] = NULL;
 550                }
 551        }
 552
 553        /* Enable MII mode */
 554        if (duplex) {
 555                /* FD enable */
 556                writel(0x04, fep->hwp + FEC_X_CNTRL);
 557        } else {
 558                /* No Rcv on Xmit */
 559                rcntl |= 0x02;
 560                writel(0x0, fep->hwp + FEC_X_CNTRL);
 561        }
 562
 563        fep->full_duplex = duplex;
 564
 565        /* Set MII speed */
 566        writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
 567
 568#if !defined(CONFIG_M5272)
 569        /* set RX checksum */
 570        val = readl(fep->hwp + FEC_RACC);
 571        if (fep->csum_flags & FLAG_RX_CSUM_ENABLED)
 572                val |= FEC_RACC_OPTIONS;
 573        else
 574                val &= ~FEC_RACC_OPTIONS;
 575        writel(val, fep->hwp + FEC_RACC);
 576#endif
 577
 578        /*
 579         * The phy interface and speed need to get configured
 580         * differently on enet-mac.
 581         */
 582        if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
 583                /* Enable flow control and length check */
 584                rcntl |= 0x40000000 | 0x00000020;
 585
 586                /* RGMII, RMII or MII */
 587                if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII)
 588                        rcntl |= (1 << 6);
 589                else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
 590                        rcntl |= (1 << 8);
 591                else
 592                        rcntl &= ~(1 << 8);
 593
 594                /* 1G, 100M or 10M */
 595                if (fep->phy_dev) {
 596                        if (fep->phy_dev->speed == SPEED_1000)
 597                                ecntl |= (1 << 5);
 598                        else if (fep->phy_dev->speed == SPEED_100)
 599                                rcntl &= ~(1 << 9);
 600                        else
 601                                rcntl |= (1 << 9);
 602                }
 603        } else {
 604#ifdef FEC_MIIGSK_ENR
 605                if (id_entry->driver_data & FEC_QUIRK_USE_GASKET) {
 606                        u32 cfgr;
 607                        /* disable the gasket and wait */
 608                        writel(0, fep->hwp + FEC_MIIGSK_ENR);
 609                        while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4)
 610                                udelay(1);
 611
 612                        /*
 613                         * configure the gasket:
 614                         *   RMII, 50 MHz, no loopback, no echo
 615                         *   MII, 25 MHz, no loopback, no echo
 616                         */
 617                        cfgr = (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
 618                                ? BM_MIIGSK_CFGR_RMII : BM_MIIGSK_CFGR_MII;
 619                        if (fep->phy_dev && fep->phy_dev->speed == SPEED_10)
 620                                cfgr |= BM_MIIGSK_CFGR_FRCONT_10M;
 621                        writel(cfgr, fep->hwp + FEC_MIIGSK_CFGR);
 622
 623                        /* re-enable the gasket */
 624                        writel(2, fep->hwp + FEC_MIIGSK_ENR);
 625                }
 626#endif
 627        }
 628
 629#if !defined(CONFIG_M5272)
 630        /* enable pause frame*/
 631        if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) ||
 632            ((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) &&
 633             fep->phy_dev && fep->phy_dev->pause)) {
 634                rcntl |= FEC_ENET_FCE;
 635
 636                /* set FIFO threshold parameter to reduce overrun */
 637                writel(FEC_ENET_RSEM_V, fep->hwp + FEC_R_FIFO_RSEM);
 638                writel(FEC_ENET_RSFL_V, fep->hwp + FEC_R_FIFO_RSFL);
 639                writel(FEC_ENET_RAEM_V, fep->hwp + FEC_R_FIFO_RAEM);
 640                writel(FEC_ENET_RAFL_V, fep->hwp + FEC_R_FIFO_RAFL);
 641
 642                /* OPD */
 643                writel(FEC_ENET_OPD_V, fep->hwp + FEC_OPD);
 644        } else {
 645                rcntl &= ~FEC_ENET_FCE;
 646        }
 647#endif /* !defined(CONFIG_M5272) */
 648
 649        writel(rcntl, fep->hwp + FEC_R_CNTRL);
 650
 651        /* Setup multicast filter. */
 652        set_multicast_list(ndev);
 653#ifndef CONFIG_M5272
 654        writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
 655        writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
 656#endif
 657
 658        if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
 659                /* enable ENET endian swap */
 660                ecntl |= (1 << 8);
 661                /* enable ENET store and forward mode */
 662                writel(1 << 8, fep->hwp + FEC_X_WMRK);
 663        }
 664
 665        if (fep->bufdesc_ex)
 666                ecntl |= (1 << 4);
 667
 668#ifndef CONFIG_M5272
 669        /* Enable the MIB statistic event counters */
 670        writel(0 << 31, fep->hwp + FEC_MIB_CTRLSTAT);
 671#endif
 672
 673        /* And last, enable the transmit and receive processing */
 674        writel(ecntl, fep->hwp + FEC_ECNTRL);
 675        writel(0, fep->hwp + FEC_R_DES_ACTIVE);
 676
 677        if (fep->bufdesc_ex)
 678                fec_ptp_start_cyclecounter(ndev);
 679
 680        /* Enable interrupts we wish to service */
 681        writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
 682
 683        if (netif_running(ndev)) {
 684                netif_tx_unlock_bh(ndev);
 685                netif_wake_queue(ndev);
 686                napi_enable(&fep->napi);
 687                netif_device_attach(ndev);
 688        }
 689}
 690
 691static void
 692fec_stop(struct net_device *ndev)
 693{
 694        struct fec_enet_private *fep = netdev_priv(ndev);
 695        const struct platform_device_id *id_entry =
 696                                platform_get_device_id(fep->pdev);
 697        u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8);
 698
 699        /* We cannot expect a graceful transmit stop without link !!! */
 700        if (fep->link) {
 701                writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */
 702                udelay(10);
 703                if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA))
 704                        netdev_err(ndev, "Graceful transmit stop did not complete!\n");
 705        }
 706
 707        /* Whack a reset.  We should wait for this. */
 708        writel(1, fep->hwp + FEC_ECNTRL);
 709        udelay(10);
 710        writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
 711        writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
 712
 713        /* We have to keep ENET enabled to have MII interrupt stay working */
 714        if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
 715                writel(2, fep->hwp + FEC_ECNTRL);
 716                writel(rmii_mode, fep->hwp + FEC_R_CNTRL);
 717        }
 718}
 719
 720
 721static void
 722fec_timeout(struct net_device *ndev)
 723{
 724        struct fec_enet_private *fep = netdev_priv(ndev);
 725
 726        ndev->stats.tx_errors++;
 727
 728        fep->delay_work.timeout = true;
 729        schedule_delayed_work(&(fep->delay_work.delay_work), 0);
 730}
 731
 732static void fec_enet_work(struct work_struct *work)
 733{
 734        struct fec_enet_private *fep =
 735                container_of(work,
 736                             struct fec_enet_private,
 737                             delay_work.delay_work.work);
 738
 739        if (fep->delay_work.timeout) {
 740                fep->delay_work.timeout = false;
 741                fec_restart(fep->netdev, fep->full_duplex);
 742                netif_wake_queue(fep->netdev);
 743        }
 744
 745        if (fep->delay_work.trig_tx) {
 746                fep->delay_work.trig_tx = false;
 747                writel(0, fep->hwp + FEC_X_DES_ACTIVE);
 748        }
 749}
 750
 751static void
 752fec_enet_tx(struct net_device *ndev)
 753{
 754        struct  fec_enet_private *fep;
 755        struct bufdesc *bdp;
 756        unsigned short status;
 757        struct  sk_buff *skb;
 758        int     index = 0;
 759
 760        fep = netdev_priv(ndev);
 761        bdp = fep->dirty_tx;
 762
 763        /* get next bdp of dirty_tx */
 764        bdp = fec_enet_get_nextdesc(bdp, fep);
 765
 766        while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) {
 767
 768                /* current queue is empty */
 769                if (bdp == fep->cur_tx)
 770                        break;
 771
 772                if (fep->bufdesc_ex)
 773                        index = (struct bufdesc_ex *)bdp -
 774                                (struct bufdesc_ex *)fep->tx_bd_base;
 775                else
 776                        index = bdp - fep->tx_bd_base;
 777
 778                skb = fep->tx_skbuff[index];
 779                dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, skb->len,
 780                                DMA_TO_DEVICE);
 781                bdp->cbd_bufaddr = 0;
 782
 783                /* Check for errors. */
 784                if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
 785                                   BD_ENET_TX_RL | BD_ENET_TX_UN |
 786                                   BD_ENET_TX_CSL)) {
 787                        ndev->stats.tx_errors++;
 788                        if (status & BD_ENET_TX_HB)  /* No heartbeat */
 789                                ndev->stats.tx_heartbeat_errors++;
 790                        if (status & BD_ENET_TX_LC)  /* Late collision */
 791                                ndev->stats.tx_window_errors++;
 792                        if (status & BD_ENET_TX_RL)  /* Retrans limit */
 793                                ndev->stats.tx_aborted_errors++;
 794                        if (status & BD_ENET_TX_UN)  /* Underrun */
 795                                ndev->stats.tx_fifo_errors++;
 796                        if (status & BD_ENET_TX_CSL) /* Carrier lost */
 797                                ndev->stats.tx_carrier_errors++;
 798                } else {
 799                        ndev->stats.tx_packets++;
 800                        ndev->stats.tx_bytes += bdp->cbd_datlen;
 801                }
 802
 803                if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) &&
 804                        fep->bufdesc_ex) {
 805                        struct skb_shared_hwtstamps shhwtstamps;
 806                        unsigned long flags;
 807                        struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
 808
 809                        memset(&shhwtstamps, 0, sizeof(shhwtstamps));
 810                        spin_lock_irqsave(&fep->tmreg_lock, flags);
 811                        shhwtstamps.hwtstamp = ns_to_ktime(
 812                                timecounter_cyc2time(&fep->tc, ebdp->ts));
 813                        spin_unlock_irqrestore(&fep->tmreg_lock, flags);
 814                        skb_tstamp_tx(skb, &shhwtstamps);
 815                }
 816
 817                if (status & BD_ENET_TX_READY)
 818                        netdev_err(ndev, "HEY! Enet xmit interrupt and TX_READY\n");
 819
 820                /* Deferred means some collisions occurred during transmit,
 821                 * but we eventually sent the packet OK.
 822                 */
 823                if (status & BD_ENET_TX_DEF)
 824                        ndev->stats.collisions++;
 825
 826                /* Free the sk buffer associated with this last transmit */
 827                dev_kfree_skb_any(skb);
 828                fep->tx_skbuff[index] = NULL;
 829
 830                fep->dirty_tx = bdp;
 831
 832                /* Update pointer to next buffer descriptor to be transmitted */
 833                bdp = fec_enet_get_nextdesc(bdp, fep);
 834
 835                /* Since we have freed up a buffer, the ring is no longer full
 836                 */
 837                if (fep->dirty_tx != fep->cur_tx) {
 838                        if (netif_queue_stopped(ndev))
 839                                netif_wake_queue(ndev);
 840                }
 841        }
 842        return;
 843}
 844
 845
 846/* During a receive, the cur_rx points to the current incoming buffer.
 847 * When we update through the ring, if the next incoming buffer has
 848 * not been given to the system, we just set the empty indicator,
 849 * effectively tossing the packet.
 850 */
 851static int
 852fec_enet_rx(struct net_device *ndev, int budget)
 853{
 854        struct fec_enet_private *fep = netdev_priv(ndev);
 855        const struct platform_device_id *id_entry =
 856                                platform_get_device_id(fep->pdev);
 857        struct bufdesc *bdp;
 858        unsigned short status;
 859        struct  sk_buff *skb;
 860        ushort  pkt_len;
 861        __u8 *data;
 862        int     pkt_received = 0;
 863        struct  bufdesc_ex *ebdp = NULL;
 864        bool    vlan_packet_rcvd = false;
 865        u16     vlan_tag;
 866        int     index = 0;
 867
 868#ifdef CONFIG_M532x
 869        flush_cache_all();
 870#endif
 871
 872        /* First, grab all of the stats for the incoming packet.
 873         * These get messed up if we get called due to a busy condition.
 874         */
 875        bdp = fep->cur_rx;
 876
 877        while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) {
 878
 879                if (pkt_received >= budget)
 880                        break;
 881                pkt_received++;
 882
 883                /* Since we have allocated space to hold a complete frame,
 884                 * the last indicator should be set.
 885                 */
 886                if ((status & BD_ENET_RX_LAST) == 0)
 887                        netdev_err(ndev, "rcv is not +last\n");
 888
 889                if (!fep->opened)
 890                        goto rx_processing_done;
 891
 892                /* Check for errors. */
 893                if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
 894                           BD_ENET_RX_CR | BD_ENET_RX_OV)) {
 895                        ndev->stats.rx_errors++;
 896                        if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) {
 897                                /* Frame too long or too short. */
 898                                ndev->stats.rx_length_errors++;
 899                        }
 900                        if (status & BD_ENET_RX_NO)     /* Frame alignment */
 901                                ndev->stats.rx_frame_errors++;
 902                        if (status & BD_ENET_RX_CR)     /* CRC Error */
 903                                ndev->stats.rx_crc_errors++;
 904                        if (status & BD_ENET_RX_OV)     /* FIFO overrun */
 905                                ndev->stats.rx_fifo_errors++;
 906                }
 907
 908                /* Report late collisions as a frame error.
 909                 * On this error, the BD is closed, but we don't know what we
 910                 * have in the buffer.  So, just drop this frame on the floor.
 911                 */
 912                if (status & BD_ENET_RX_CL) {
 913                        ndev->stats.rx_errors++;
 914                        ndev->stats.rx_frame_errors++;
 915                        goto rx_processing_done;
 916                }
 917
 918                /* Process the incoming frame. */
 919                ndev->stats.rx_packets++;
 920                pkt_len = bdp->cbd_datlen;
 921                ndev->stats.rx_bytes += pkt_len;
 922
 923                if (fep->bufdesc_ex)
 924                        index = (struct bufdesc_ex *)bdp -
 925                                (struct bufdesc_ex *)fep->rx_bd_base;
 926                else
 927                        index = bdp - fep->rx_bd_base;
 928                data = fep->rx_skbuff[index]->data;
 929                dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr,
 930                                        FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
 931
 932                if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
 933                        swap_buffer(data, pkt_len);
 934
 935                /* Extract the enhanced buffer descriptor */
 936                ebdp = NULL;
 937                if (fep->bufdesc_ex)
 938                        ebdp = (struct bufdesc_ex *)bdp;
 939
 940                /* If this is a VLAN packet remove the VLAN Tag */
 941                vlan_packet_rcvd = false;
 942                if ((ndev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
 943                    fep->bufdesc_ex && (ebdp->cbd_esc & BD_ENET_RX_VLAN)) {
 944                        /* Push and remove the vlan tag */
 945                        struct vlan_hdr *vlan_header =
 946                                        (struct vlan_hdr *) (data + ETH_HLEN);
 947                        vlan_tag = ntohs(vlan_header->h_vlan_TCI);
 948                        pkt_len -= VLAN_HLEN;
 949
 950                        vlan_packet_rcvd = true;
 951                }
 952
 953                /* This does 16 byte alignment, exactly what we need.
 954                 * The packet length includes FCS, but we don't want to
 955                 * include that when passing upstream as it messes up
 956                 * bridging applications.
 957                 */
 958                skb = netdev_alloc_skb(ndev, pkt_len - 4 + NET_IP_ALIGN);
 959
 960                if (unlikely(!skb)) {
 961                        ndev->stats.rx_dropped++;
 962                } else {
 963                        int payload_offset = (2 * ETH_ALEN);
 964                        skb_reserve(skb, NET_IP_ALIGN);
 965                        skb_put(skb, pkt_len - 4);      /* Make room */
 966
 967                        /* Extract the frame data without the VLAN header. */
 968                        skb_copy_to_linear_data(skb, data, (2 * ETH_ALEN));
 969                        if (vlan_packet_rcvd)
 970                                payload_offset = (2 * ETH_ALEN) + VLAN_HLEN;
 971                        skb_copy_to_linear_data_offset(skb, (2 * ETH_ALEN),
 972                                                       data + payload_offset,
 973                                                       pkt_len - 4 - (2 * ETH_ALEN));
 974
 975                        skb->protocol = eth_type_trans(skb, ndev);
 976
 977                        /* Get receive timestamp from the skb */
 978                        if (fep->hwts_rx_en && fep->bufdesc_ex) {
 979                                struct skb_shared_hwtstamps *shhwtstamps =
 980                                                            skb_hwtstamps(skb);
 981                                unsigned long flags;
 982
 983                                memset(shhwtstamps, 0, sizeof(*shhwtstamps));
 984
 985                                spin_lock_irqsave(&fep->tmreg_lock, flags);
 986                                shhwtstamps->hwtstamp = ns_to_ktime(
 987                                    timecounter_cyc2time(&fep->tc, ebdp->ts));
 988                                spin_unlock_irqrestore(&fep->tmreg_lock, flags);
 989                        }
 990
 991                        if (fep->bufdesc_ex &&
 992                            (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) {
 993                                if (!(ebdp->cbd_esc & FLAG_RX_CSUM_ERROR)) {
 994                                        /* don't check it */
 995                                        skb->ip_summed = CHECKSUM_UNNECESSARY;
 996                                } else {
 997                                        skb_checksum_none_assert(skb);
 998                                }
 999                        }
1000
1001                        /* Handle received VLAN packets */
1002                        if (vlan_packet_rcvd)
1003                                __vlan_hwaccel_put_tag(skb,
1004                                                       htons(ETH_P_8021Q),
1005                                                       vlan_tag);
1006
1007                        napi_gro_receive(&fep->napi, skb);
1008                }
1009
1010                dma_sync_single_for_device(&fep->pdev->dev, bdp->cbd_bufaddr,
1011                                        FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
1012rx_processing_done:
1013                /* Clear the status flags for this buffer */
1014                status &= ~BD_ENET_RX_STATS;
1015
1016                /* Mark the buffer empty */
1017                status |= BD_ENET_RX_EMPTY;
1018                bdp->cbd_sc = status;
1019
1020                if (fep->bufdesc_ex) {
1021                        struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
1022
1023                        ebdp->cbd_esc = BD_ENET_RX_INT;
1024                        ebdp->cbd_prot = 0;
1025                        ebdp->cbd_bdu = 0;
1026                }
1027
1028                /* Update BD pointer to next entry */
1029                bdp = fec_enet_get_nextdesc(bdp, fep);
1030
1031                /* Doing this here will keep the FEC running while we process
1032                 * incoming frames.  On a heavily loaded network, we should be
1033                 * able to keep up at the expense of system resources.
1034                 */
1035                writel(0, fep->hwp + FEC_R_DES_ACTIVE);
1036        }
1037        fep->cur_rx = bdp;
1038
1039        return pkt_received;
1040}
1041
1042static irqreturn_t
1043fec_enet_interrupt(int irq, void *dev_id)
1044{
1045        struct net_device *ndev = dev_id;
1046        struct fec_enet_private *fep = netdev_priv(ndev);
1047        uint int_events;
1048        irqreturn_t ret = IRQ_NONE;
1049
1050        do {
1051                int_events = readl(fep->hwp + FEC_IEVENT);
1052                writel(int_events, fep->hwp + FEC_IEVENT);
1053
1054                if (int_events & (FEC_ENET_RXF | FEC_ENET_TXF)) {
1055                        ret = IRQ_HANDLED;
1056
1057                        /* Disable the RX interrupt */
1058                        if (napi_schedule_prep(&fep->napi)) {
1059                                writel(FEC_RX_DISABLED_IMASK,
1060                                        fep->hwp + FEC_IMASK);
1061                                __napi_schedule(&fep->napi);
1062                        }
1063                }
1064
1065                if (int_events & FEC_ENET_MII) {
1066                        ret = IRQ_HANDLED;
1067                        complete(&fep->mdio_done);
1068                }
1069        } while (int_events);
1070
1071        return ret;
1072}
1073
1074static int fec_enet_rx_napi(struct napi_struct *napi, int budget)
1075{
1076        struct net_device *ndev = napi->dev;
1077        int pkts = fec_enet_rx(ndev, budget);
1078        struct fec_enet_private *fep = netdev_priv(ndev);
1079
1080        fec_enet_tx(ndev);
1081
1082        if (pkts < budget) {
1083                napi_complete(napi);
1084                writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
1085        }
1086        return pkts;
1087}
1088
1089/* ------------------------------------------------------------------------- */
1090static void fec_get_mac(struct net_device *ndev)
1091{
1092        struct fec_enet_private *fep = netdev_priv(ndev);
1093        struct fec_platform_data *pdata = dev_get_platdata(&fep->pdev->dev);
1094        unsigned char *iap, tmpaddr[ETH_ALEN];
1095
1096        /*
1097         * try to get mac address in following order:
1098         *
1099         * 1) module parameter via kernel command line in form
1100         *    fec.macaddr=0x00,0x04,0x9f,0x01,0x30,0xe0
1101         */
1102        iap = macaddr;
1103
1104        /*
1105         * 2) from device tree data
1106         */
1107        if (!is_valid_ether_addr(iap)) {
1108                struct device_node *np = fep->pdev->dev.of_node;
1109                if (np) {
1110                        const char *mac = of_get_mac_address(np);
1111                        if (mac)
1112                                iap = (unsigned char *) mac;
1113                }
1114        }
1115
1116        /*
1117         * 3) from flash or fuse (via platform data)
1118         */
1119        if (!is_valid_ether_addr(iap)) {
1120#ifdef CONFIG_M5272
1121                if (FEC_FLASHMAC)
1122                        iap = (unsigned char *)FEC_FLASHMAC;
1123#else
1124                if (pdata)
1125                        iap = (unsigned char *)&pdata->mac;
1126#endif
1127        }
1128
1129        /*
1130         * 4) FEC mac registers set by bootloader
1131         */
1132        if (!is_valid_ether_addr(iap)) {
1133                *((__be32 *) &tmpaddr[0]) =
1134                        cpu_to_be32(readl(fep->hwp + FEC_ADDR_LOW));
1135                *((__be16 *) &tmpaddr[4]) =
1136                        cpu_to_be16(readl(fep->hwp + FEC_ADDR_HIGH) >> 16);
1137                iap = &tmpaddr[0];
1138        }
1139
1140        /*
1141         * 5) random mac address
1142         */
1143        if (!is_valid_ether_addr(iap)) {
1144                /* Report it and use a random ethernet address instead */
1145                netdev_err(ndev, "Invalid MAC address: %pM\n", iap);
1146                eth_hw_addr_random(ndev);
1147                netdev_info(ndev, "Using random MAC address: %pM\n",
1148                            ndev->dev_addr);
1149                return;
1150        }
1151
1152        memcpy(ndev->dev_addr, iap, ETH_ALEN);
1153
1154        /* Adjust MAC if using macaddr */
1155        if (iap == macaddr)
1156                 ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->dev_id;
1157}
1158
1159/* ------------------------------------------------------------------------- */
1160
1161/*
1162 * Phy section
1163 */
1164static void fec_enet_adjust_link(struct net_device *ndev)
1165{
1166        struct fec_enet_private *fep = netdev_priv(ndev);
1167        struct phy_device *phy_dev = fep->phy_dev;
1168        int status_change = 0;
1169
1170        /* Prevent a state halted on mii error */
1171        if (fep->mii_timeout && phy_dev->state == PHY_HALTED) {
1172                phy_dev->state = PHY_RESUMING;
1173                return;
1174        }
1175
1176        if (phy_dev->link) {
1177                if (!fep->link) {
1178                        fep->link = phy_dev->link;
1179                        status_change = 1;
1180                }
1181
1182                if (fep->full_duplex != phy_dev->duplex)
1183                        status_change = 1;
1184
1185                if (phy_dev->speed != fep->speed) {
1186                        fep->speed = phy_dev->speed;
1187                        status_change = 1;
1188                }
1189
1190                /* if any of the above changed restart the FEC */
1191                if (status_change)
1192                        fec_restart(ndev, phy_dev->duplex);
1193        } else {
1194                if (fep->link) {
1195                        fec_stop(ndev);
1196                        fep->link = phy_dev->link;
1197                        status_change = 1;
1198                }
1199        }
1200
1201        if (status_change)
1202                phy_print_status(phy_dev);
1203}
1204
1205static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
1206{
1207        struct fec_enet_private *fep = bus->priv;
1208        unsigned long time_left;
1209
1210        fep->mii_timeout = 0;
1211        init_completion(&fep->mdio_done);
1212
1213        /* start a read op */
1214        writel(FEC_MMFR_ST | FEC_MMFR_OP_READ |
1215                FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
1216                FEC_MMFR_TA, fep->hwp + FEC_MII_DATA);
1217
1218        /* wait for end of transfer */
1219        time_left = wait_for_completion_timeout(&fep->mdio_done,
1220                        usecs_to_jiffies(FEC_MII_TIMEOUT));
1221        if (time_left == 0) {
1222                fep->mii_timeout = 1;
1223                netdev_err(fep->netdev, "MDIO read timeout\n");
1224                return -ETIMEDOUT;
1225        }
1226
1227        /* return value */
1228        return FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
1229}
1230
1231static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
1232                           u16 value)
1233{
1234        struct fec_enet_private *fep = bus->priv;
1235        unsigned long time_left;
1236
1237        fep->mii_timeout = 0;
1238        init_completion(&fep->mdio_done);
1239
1240        /* start a write op */
1241        writel(FEC_MMFR_ST | FEC_MMFR_OP_WRITE |
1242                FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
1243                FEC_MMFR_TA | FEC_MMFR_DATA(value),
1244                fep->hwp + FEC_MII_DATA);
1245
1246        /* wait for end of transfer */
1247        time_left = wait_for_completion_timeout(&fep->mdio_done,
1248                        usecs_to_jiffies(FEC_MII_TIMEOUT));
1249        if (time_left == 0) {
1250                fep->mii_timeout = 1;
1251                netdev_err(fep->netdev, "MDIO write timeout\n");
1252                return -ETIMEDOUT;
1253        }
1254
1255        return 0;
1256}
1257
1258static int fec_enet_mii_probe(struct net_device *ndev)
1259{
1260        struct fec_enet_private *fep = netdev_priv(ndev);
1261        const struct platform_device_id *id_entry =
1262                                platform_get_device_id(fep->pdev);
1263        struct phy_device *phy_dev = NULL;
1264        char mdio_bus_id[MII_BUS_ID_SIZE];
1265        char phy_name[MII_BUS_ID_SIZE + 3];
1266        int phy_id;
1267        int dev_id = fep->dev_id;
1268
1269        fep->phy_dev = NULL;
1270
1271        /* check for attached phy */
1272        for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) {
1273                if ((fep->mii_bus->phy_mask & (1 << phy_id)))
1274                        continue;
1275                if (fep->mii_bus->phy_map[phy_id] == NULL)
1276                        continue;
1277                if (fep->mii_bus->phy_map[phy_id]->phy_id == 0)
1278                        continue;
1279                if (dev_id--)
1280                        continue;
1281                strncpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE);
1282                break;
1283        }
1284
1285        if (phy_id >= PHY_MAX_ADDR) {
1286                netdev_info(ndev, "no PHY, assuming direct connection to switch\n");
1287                strncpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE);
1288                phy_id = 0;
1289        }
1290
1291        snprintf(phy_name, sizeof(phy_name), PHY_ID_FMT, mdio_bus_id, phy_id);
1292        phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link,
1293                              fep->phy_interface);
1294        if (IS_ERR(phy_dev)) {
1295                netdev_err(ndev, "could not attach to PHY\n");
1296                return PTR_ERR(phy_dev);
1297        }
1298
1299        /* mask with MAC supported features */
1300        if (id_entry->driver_data & FEC_QUIRK_HAS_GBIT) {
1301                phy_dev->supported &= PHY_GBIT_FEATURES;
1302#if !defined(CONFIG_M5272)
1303                phy_dev->supported |= SUPPORTED_Pause;
1304#endif
1305        }
1306        else
1307                phy_dev->supported &= PHY_BASIC_FEATURES;
1308
1309        phy_dev->advertising = phy_dev->supported;
1310
1311        fep->phy_dev = phy_dev;
1312        fep->link = 0;
1313        fep->full_duplex = 0;
1314
1315        netdev_info(ndev, "Freescale FEC PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
1316                    fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev),
1317                    fep->phy_dev->irq);
1318
1319        return 0;
1320}
1321
1322static int fec_enet_mii_init(struct platform_device *pdev)
1323{
1324        static struct mii_bus *fec0_mii_bus;
1325        struct net_device *ndev = platform_get_drvdata(pdev);
1326        struct fec_enet_private *fep = netdev_priv(ndev);
1327        const struct platform_device_id *id_entry =
1328                                platform_get_device_id(fep->pdev);
1329        int err = -ENXIO, i;
1330
1331        /*
1332         * The dual fec interfaces are not equivalent with enet-mac.
1333         * Here are the differences:
1334         *
1335         *  - fec0 supports MII & RMII modes while fec1 only supports RMII
1336         *  - fec0 acts as the 1588 time master while fec1 is slave
1337         *  - external phys can only be configured by fec0
1338         *
1339         * That is to say fec1 can not work independently. It only works
1340         * when fec0 is working. The reason behind this design is that the
1341         * second interface is added primarily for Switch mode.
1342         *
1343         * Because of the last point above, both phys are attached on fec0
1344         * mdio interface in board design, and need to be configured by
1345         * fec0 mii_bus.
1346         */
1347        if ((id_entry->driver_data & FEC_QUIRK_ENET_MAC) && fep->dev_id > 0) {
1348                /* fec1 uses fec0 mii_bus */
1349                if (mii_cnt && fec0_mii_bus) {
1350                        fep->mii_bus = fec0_mii_bus;
1351                        mii_cnt++;
1352                        return 0;
1353                }
1354                return -ENOENT;
1355        }
1356
1357        fep->mii_timeout = 0;
1358
1359        /*
1360         * Set MII speed to 2.5 MHz (= clk_get_rate() / 2 * phy_speed)
1361         *
1362         * The formula for FEC MDC is 'ref_freq / (MII_SPEED x 2)' while
1363         * for ENET-MAC is 'ref_freq / ((MII_SPEED + 1) x 2)'.  The i.MX28
1364         * Reference Manual has an error on this, and gets fixed on i.MX6Q
1365         * document.
1366         */
1367        fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ahb), 5000000);
1368        if (id_entry->driver_data & FEC_QUIRK_ENET_MAC)
1369                fep->phy_speed--;
1370        fep->phy_speed <<= 1;
1371        writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
1372
1373        fep->mii_bus = mdiobus_alloc();
1374        if (fep->mii_bus == NULL) {
1375                err = -ENOMEM;
1376                goto err_out;
1377        }
1378
1379        fep->mii_bus->name = "fec_enet_mii_bus";
1380        fep->mii_bus->read = fec_enet_mdio_read;
1381        fep->mii_bus->write = fec_enet_mdio_write;
1382        snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
1383                pdev->name, fep->dev_id + 1);
1384        fep->mii_bus->priv = fep;
1385        fep->mii_bus->parent = &pdev->dev;
1386
1387        fep->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
1388        if (!fep->mii_bus->irq) {
1389                err = -ENOMEM;
1390                goto err_out_free_mdiobus;
1391        }
1392
1393        for (i = 0; i < PHY_MAX_ADDR; i++)
1394                fep->mii_bus->irq[i] = PHY_POLL;
1395
1396        if (mdiobus_register(fep->mii_bus))
1397                goto err_out_free_mdio_irq;
1398
1399        mii_cnt++;
1400
1401        /* save fec0 mii_bus */
1402        if (id_entry->driver_data & FEC_QUIRK_ENET_MAC)
1403                fec0_mii_bus = fep->mii_bus;
1404
1405        return 0;
1406
1407err_out_free_mdio_irq:
1408        kfree(fep->mii_bus->irq);
1409err_out_free_mdiobus:
1410        mdiobus_free(fep->mii_bus);
1411err_out:
1412        return err;
1413}
1414
1415static void fec_enet_mii_remove(struct fec_enet_private *fep)
1416{
1417        if (--mii_cnt == 0) {
1418                mdiobus_unregister(fep->mii_bus);
1419                kfree(fep->mii_bus->irq);
1420                mdiobus_free(fep->mii_bus);
1421        }
1422}
1423
1424static int fec_enet_get_settings(struct net_device *ndev,
1425                                  struct ethtool_cmd *cmd)
1426{
1427        struct fec_enet_private *fep = netdev_priv(ndev);
1428        struct phy_device *phydev = fep->phy_dev;
1429
1430        if (!phydev)
1431                return -ENODEV;
1432
1433        return phy_ethtool_gset(phydev, cmd);
1434}
1435
1436static int fec_enet_set_settings(struct net_device *ndev,
1437                                 struct ethtool_cmd *cmd)
1438{
1439        struct fec_enet_private *fep = netdev_priv(ndev);
1440        struct phy_device *phydev = fep->phy_dev;
1441
1442        if (!phydev)
1443                return -ENODEV;
1444
1445        return phy_ethtool_sset(phydev, cmd);
1446}
1447
1448static void fec_enet_get_drvinfo(struct net_device *ndev,
1449                                 struct ethtool_drvinfo *info)
1450{
1451        struct fec_enet_private *fep = netdev_priv(ndev);
1452
1453        strlcpy(info->driver, fep->pdev->dev.driver->name,
1454                sizeof(info->driver));
1455        strlcpy(info->version, "Revision: 1.0", sizeof(info->version));
1456        strlcpy(info->bus_info, dev_name(&ndev->dev), sizeof(info->bus_info));
1457}
1458
1459static int fec_enet_get_ts_info(struct net_device *ndev,
1460                                struct ethtool_ts_info *info)
1461{
1462        struct fec_enet_private *fep = netdev_priv(ndev);
1463
1464        if (fep->bufdesc_ex) {
1465
1466                info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
1467                                        SOF_TIMESTAMPING_RX_SOFTWARE |
1468                                        SOF_TIMESTAMPING_SOFTWARE |
1469                                        SOF_TIMESTAMPING_TX_HARDWARE |
1470                                        SOF_TIMESTAMPING_RX_HARDWARE |
1471                                        SOF_TIMESTAMPING_RAW_HARDWARE;
1472                if (fep->ptp_clock)
1473                        info->phc_index = ptp_clock_index(fep->ptp_clock);
1474                else
1475                        info->phc_index = -1;
1476
1477                info->tx_types = (1 << HWTSTAMP_TX_OFF) |
1478                                 (1 << HWTSTAMP_TX_ON);
1479
1480                info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
1481                                   (1 << HWTSTAMP_FILTER_ALL);
1482                return 0;
1483        } else {
1484                return ethtool_op_get_ts_info(ndev, info);
1485        }
1486}
1487
1488#if !defined(CONFIG_M5272)
1489
1490static void fec_enet_get_pauseparam(struct net_device *ndev,
1491                                    struct ethtool_pauseparam *pause)
1492{
1493        struct fec_enet_private *fep = netdev_priv(ndev);
1494
1495        pause->autoneg = (fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) != 0;
1496        pause->tx_pause = (fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) != 0;
1497        pause->rx_pause = pause->tx_pause;
1498}
1499
1500static int fec_enet_set_pauseparam(struct net_device *ndev,
1501                                   struct ethtool_pauseparam *pause)
1502{
1503        struct fec_enet_private *fep = netdev_priv(ndev);
1504
1505        if (pause->tx_pause != pause->rx_pause) {
1506                netdev_info(ndev,
1507                        "hardware only support enable/disable both tx and rx");
1508                return -EINVAL;
1509        }
1510
1511        fep->pause_flag = 0;
1512
1513        /* tx pause must be same as rx pause */
1514        fep->pause_flag |= pause->rx_pause ? FEC_PAUSE_FLAG_ENABLE : 0;
1515        fep->pause_flag |= pause->autoneg ? FEC_PAUSE_FLAG_AUTONEG : 0;
1516
1517        if (pause->rx_pause || pause->autoneg) {
1518                fep->phy_dev->supported |= ADVERTISED_Pause;
1519                fep->phy_dev->advertising |= ADVERTISED_Pause;
1520        } else {
1521                fep->phy_dev->supported &= ~ADVERTISED_Pause;
1522                fep->phy_dev->advertising &= ~ADVERTISED_Pause;
1523        }
1524
1525        if (pause->autoneg) {
1526                if (netif_running(ndev))
1527                        fec_stop(ndev);
1528                phy_start_aneg(fep->phy_dev);
1529        }
1530        if (netif_running(ndev))
1531                fec_restart(ndev, 0);
1532
1533        return 0;
1534}
1535
1536static const struct fec_stat {
1537        char name[ETH_GSTRING_LEN];
1538        u16 offset;
1539} fec_stats[] = {
1540        /* RMON TX */
1541        { "tx_dropped", RMON_T_DROP },
1542        { "tx_packets", RMON_T_PACKETS },
1543        { "tx_broadcast", RMON_T_BC_PKT },
1544        { "tx_multicast", RMON_T_MC_PKT },
1545        { "tx_crc_errors", RMON_T_CRC_ALIGN },
1546        { "tx_undersize", RMON_T_UNDERSIZE },
1547        { "tx_oversize", RMON_T_OVERSIZE },
1548        { "tx_fragment", RMON_T_FRAG },
1549        { "tx_jabber", RMON_T_JAB },
1550        { "tx_collision", RMON_T_COL },
1551        { "tx_64byte", RMON_T_P64 },
1552        { "tx_65to127byte", RMON_T_P65TO127 },
1553        { "tx_128to255byte", RMON_T_P128TO255 },
1554        { "tx_256to511byte", RMON_T_P256TO511 },
1555        { "tx_512to1023byte", RMON_T_P512TO1023 },
1556        { "tx_1024to2047byte", RMON_T_P1024TO2047 },
1557        { "tx_GTE2048byte", RMON_T_P_GTE2048 },
1558        { "tx_octets", RMON_T_OCTETS },
1559
1560        /* IEEE TX */
1561        { "IEEE_tx_drop", IEEE_T_DROP },
1562        { "IEEE_tx_frame_ok", IEEE_T_FRAME_OK },
1563        { "IEEE_tx_1col", IEEE_T_1COL },
1564        { "IEEE_tx_mcol", IEEE_T_MCOL },
1565        { "IEEE_tx_def", IEEE_T_DEF },
1566        { "IEEE_tx_lcol", IEEE_T_LCOL },
1567        { "IEEE_tx_excol", IEEE_T_EXCOL },
1568        { "IEEE_tx_macerr", IEEE_T_MACERR },
1569        { "IEEE_tx_cserr", IEEE_T_CSERR },
1570        { "IEEE_tx_sqe", IEEE_T_SQE },
1571        { "IEEE_tx_fdxfc", IEEE_T_FDXFC },
1572        { "IEEE_tx_octets_ok", IEEE_T_OCTETS_OK },
1573
1574        /* RMON RX */
1575        { "rx_packets", RMON_R_PACKETS },
1576        { "rx_broadcast", RMON_R_BC_PKT },
1577        { "rx_multicast", RMON_R_MC_PKT },
1578        { "rx_crc_errors", RMON_R_CRC_ALIGN },
1579        { "rx_undersize", RMON_R_UNDERSIZE },
1580        { "rx_oversize", RMON_R_OVERSIZE },
1581        { "rx_fragment", RMON_R_FRAG },
1582        { "rx_jabber", RMON_R_JAB },
1583        { "rx_64byte", RMON_R_P64 },
1584        { "rx_65to127byte", RMON_R_P65TO127 },
1585        { "rx_128to255byte", RMON_R_P128TO255 },
1586        { "rx_256to511byte", RMON_R_P256TO511 },
1587        { "rx_512to1023byte", RMON_R_P512TO1023 },
1588        { "rx_1024to2047byte", RMON_R_P1024TO2047 },
1589        { "rx_GTE2048byte", RMON_R_P_GTE2048 },
1590        { "rx_octets", RMON_R_OCTETS },
1591
1592        /* IEEE RX */
1593        { "IEEE_rx_drop", IEEE_R_DROP },
1594        { "IEEE_rx_frame_ok", IEEE_R_FRAME_OK },
1595        { "IEEE_rx_crc", IEEE_R_CRC },
1596        { "IEEE_rx_align", IEEE_R_ALIGN },
1597        { "IEEE_rx_macerr", IEEE_R_MACERR },
1598        { "IEEE_rx_fdxfc", IEEE_R_FDXFC },
1599        { "IEEE_rx_octets_ok", IEEE_R_OCTETS_OK },
1600};
1601
1602static void fec_enet_get_ethtool_stats(struct net_device *dev,
1603        struct ethtool_stats *stats, u64 *data)
1604{
1605        struct fec_enet_private *fep = netdev_priv(dev);
1606        int i;
1607
1608        for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
1609                data[i] = readl(fep->hwp + fec_stats[i].offset);
1610}
1611
1612static void fec_enet_get_strings(struct net_device *netdev,
1613        u32 stringset, u8 *data)
1614{
1615        int i;
1616        switch (stringset) {
1617        case ETH_SS_STATS:
1618                for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
1619                        memcpy(data + i * ETH_GSTRING_LEN,
1620                                fec_stats[i].name, ETH_GSTRING_LEN);
1621                break;
1622        }
1623}
1624
1625static int fec_enet_get_sset_count(struct net_device *dev, int sset)
1626{
1627        switch (sset) {
1628        case ETH_SS_STATS:
1629                return ARRAY_SIZE(fec_stats);
1630        default:
1631                return -EOPNOTSUPP;
1632        }
1633}
1634#endif /* !defined(CONFIG_M5272) */
1635
1636static int fec_enet_nway_reset(struct net_device *dev)
1637{
1638        struct fec_enet_private *fep = netdev_priv(dev);
1639        struct phy_device *phydev = fep->phy_dev;
1640
1641        if (!phydev)
1642                return -ENODEV;
1643
1644        return genphy_restart_aneg(phydev);
1645}
1646
1647static const struct ethtool_ops fec_enet_ethtool_ops = {
1648#if !defined(CONFIG_M5272)
1649        .get_pauseparam         = fec_enet_get_pauseparam,
1650        .set_pauseparam         = fec_enet_set_pauseparam,
1651#endif
1652        .get_settings           = fec_enet_get_settings,
1653        .set_settings           = fec_enet_set_settings,
1654        .get_drvinfo            = fec_enet_get_drvinfo,
1655        .get_link               = ethtool_op_get_link,
1656        .get_ts_info            = fec_enet_get_ts_info,
1657        .nway_reset             = fec_enet_nway_reset,
1658#ifndef CONFIG_M5272
1659        .get_ethtool_stats      = fec_enet_get_ethtool_stats,
1660        .get_strings            = fec_enet_get_strings,
1661        .get_sset_count         = fec_enet_get_sset_count,
1662#endif
1663};
1664
1665static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
1666{
1667        struct fec_enet_private *fep = netdev_priv(ndev);
1668        struct phy_device *phydev = fep->phy_dev;
1669
1670        if (!netif_running(ndev))
1671                return -EINVAL;
1672
1673        if (!phydev)
1674                return -ENODEV;
1675
1676        if (fep->bufdesc_ex) {
1677                if (cmd == SIOCSHWTSTAMP)
1678                        return fec_ptp_set(ndev, rq);
1679                if (cmd == SIOCGHWTSTAMP)
1680                        return fec_ptp_get(ndev, rq);
1681        }
1682
1683        return phy_mii_ioctl(phydev, rq, cmd);
1684}
1685
1686static void fec_enet_free_buffers(struct net_device *ndev)
1687{
1688        struct fec_enet_private *fep = netdev_priv(ndev);
1689        unsigned int i;
1690        struct sk_buff *skb;
1691        struct bufdesc  *bdp;
1692
1693        bdp = fep->rx_bd_base;
1694        for (i = 0; i < fep->rx_ring_size; i++) {
1695                skb = fep->rx_skbuff[i];
1696
1697                if (bdp->cbd_bufaddr)
1698                        dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
1699                                        FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
1700                if (skb)
1701                        dev_kfree_skb(skb);
1702                bdp = fec_enet_get_nextdesc(bdp, fep);
1703        }
1704
1705        bdp = fep->tx_bd_base;
1706        for (i = 0; i < fep->tx_ring_size; i++)
1707                kfree(fep->tx_bounce[i]);
1708}
1709
1710static int fec_enet_alloc_buffers(struct net_device *ndev)
1711{
1712        struct fec_enet_private *fep = netdev_priv(ndev);
1713        unsigned int i;
1714        struct sk_buff *skb;
1715        struct bufdesc  *bdp;
1716
1717        bdp = fep->rx_bd_base;
1718        for (i = 0; i < fep->rx_ring_size; i++) {
1719                skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
1720                if (!skb) {
1721                        fec_enet_free_buffers(ndev);
1722                        return -ENOMEM;
1723                }
1724                fep->rx_skbuff[i] = skb;
1725
1726                bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data,
1727                                FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
1728                if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) {
1729                        fec_enet_free_buffers(ndev);
1730                        if (net_ratelimit())
1731                                netdev_err(ndev, "Rx DMA memory map failed\n");
1732                        return -ENOMEM;
1733                }
1734                bdp->cbd_sc = BD_ENET_RX_EMPTY;
1735
1736                if (fep->bufdesc_ex) {
1737                        struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
1738                        ebdp->cbd_esc = BD_ENET_RX_INT;
1739                }
1740
1741                bdp = fec_enet_get_nextdesc(bdp, fep);
1742        }
1743
1744        /* Set the last buffer to wrap. */
1745        bdp = fec_enet_get_prevdesc(bdp, fep);
1746        bdp->cbd_sc |= BD_SC_WRAP;
1747
1748        bdp = fep->tx_bd_base;
1749        for (i = 0; i < fep->tx_ring_size; i++) {
1750                fep->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL);
1751
1752                bdp->cbd_sc = 0;
1753                bdp->cbd_bufaddr = 0;
1754
1755                if (fep->bufdesc_ex) {
1756                        struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
1757                        ebdp->cbd_esc = BD_ENET_TX_INT;
1758                }
1759
1760                bdp = fec_enet_get_nextdesc(bdp, fep);
1761        }
1762
1763        /* Set the last buffer to wrap. */
1764        bdp = fec_enet_get_prevdesc(bdp, fep);
1765        bdp->cbd_sc |= BD_SC_WRAP;
1766
1767        return 0;
1768}
1769
1770static int
1771fec_enet_open(struct net_device *ndev)
1772{
1773        struct fec_enet_private *fep = netdev_priv(ndev);
1774        int ret;
1775
1776        /* I should reset the ring buffers here, but I don't yet know
1777         * a simple way to do that.
1778         */
1779
1780        ret = fec_enet_alloc_buffers(ndev);
1781        if (ret)
1782                return ret;
1783
1784        /* Probe and connect to PHY when open the interface */
1785        ret = fec_enet_mii_probe(ndev);
1786        if (ret) {
1787                fec_enet_free_buffers(ndev);
1788                return ret;
1789        }
1790
1791        napi_enable(&fep->napi);
1792        phy_start(fep->phy_dev);
1793        netif_start_queue(ndev);
1794        fep->opened = 1;
1795        return 0;
1796}
1797
1798static int
1799fec_enet_close(struct net_device *ndev)
1800{
1801        struct fec_enet_private *fep = netdev_priv(ndev);
1802
1803        /* Don't know what to do yet. */
1804        napi_disable(&fep->napi);
1805        fep->opened = 0;
1806        netif_stop_queue(ndev);
1807        fec_stop(ndev);
1808
1809        if (fep->phy_dev) {
1810                phy_stop(fep->phy_dev);
1811                phy_disconnect(fep->phy_dev);
1812        }
1813
1814        fec_enet_free_buffers(ndev);
1815
1816        return 0;
1817}
1818
1819/* Set or clear the multicast filter for this adaptor.
1820 * Skeleton taken from sunlance driver.
1821 * The CPM Ethernet implementation allows Multicast as well as individual
1822 * MAC address filtering.  Some of the drivers check to make sure it is
1823 * a group multicast address, and discard those that are not.  I guess I
1824 * will do the same for now, but just remove the test if you want
1825 * individual filtering as well (do the upper net layers want or support
1826 * this kind of feature?).
1827 */
1828
1829#define HASH_BITS       6               /* #bits in hash */
1830#define CRC32_POLY      0xEDB88320
1831
1832static void set_multicast_list(struct net_device *ndev)
1833{
1834        struct fec_enet_private *fep = netdev_priv(ndev);
1835        struct netdev_hw_addr *ha;
1836        unsigned int i, bit, data, crc, tmp;
1837        unsigned char hash;
1838
1839        if (ndev->flags & IFF_PROMISC) {
1840                tmp = readl(fep->hwp + FEC_R_CNTRL);
1841                tmp |= 0x8;
1842                writel(tmp, fep->hwp + FEC_R_CNTRL);
1843                return;
1844        }
1845
1846        tmp = readl(fep->hwp + FEC_R_CNTRL);
1847        tmp &= ~0x8;
1848        writel(tmp, fep->hwp + FEC_R_CNTRL);
1849
1850        if (ndev->flags & IFF_ALLMULTI) {
1851                /* Catch all multicast addresses, so set the
1852                 * filter to all 1's
1853                 */
1854                writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
1855                writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
1856
1857                return;
1858        }
1859
1860        /* Clear filter and add the addresses in hash register
1861         */
1862        writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
1863        writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
1864
1865        netdev_for_each_mc_addr(ha, ndev) {
1866                /* calculate crc32 value of mac address */
1867                crc = 0xffffffff;
1868
1869                for (i = 0; i < ndev->addr_len; i++) {
1870                        data = ha->addr[i];
1871                        for (bit = 0; bit < 8; bit++, data >>= 1) {
1872                                crc = (crc >> 1) ^
1873                                (((crc ^ data) & 1) ? CRC32_POLY : 0);
1874                        }
1875                }
1876
1877                /* only upper 6 bits (HASH_BITS) are used
1878                 * which point to specific bit in he hash registers
1879                 */
1880                hash = (crc >> (32 - HASH_BITS)) & 0x3f;
1881
1882                if (hash > 31) {
1883                        tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
1884                        tmp |= 1 << (hash - 32);
1885                        writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
1886                } else {
1887                        tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_LOW);
1888                        tmp |= 1 << hash;
1889                        writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
1890                }
1891        }
1892}
1893
1894/* Set a MAC change in hardware. */
1895static int
1896fec_set_mac_address(struct net_device *ndev, void *p)
1897{
1898        struct fec_enet_private *fep = netdev_priv(ndev);
1899        struct sockaddr *addr = p;
1900
1901        if (addr) {
1902                if (!is_valid_ether_addr(addr->sa_data))
1903                        return -EADDRNOTAVAIL;
1904                memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
1905        }
1906
1907        writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) |
1908                (ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24),
1909                fep->hwp + FEC_ADDR_LOW);
1910        writel((ndev->dev_addr[5] << 16) | (ndev->dev_addr[4] << 24),
1911                fep->hwp + FEC_ADDR_HIGH);
1912        return 0;
1913}
1914
1915#ifdef CONFIG_NET_POLL_CONTROLLER
1916/**
1917 * fec_poll_controller - FEC Poll controller function
1918 * @dev: The FEC network adapter
1919 *
1920 * Polled functionality used by netconsole and others in non interrupt mode
1921 *
1922 */
1923static void fec_poll_controller(struct net_device *dev)
1924{
1925        int i;
1926        struct fec_enet_private *fep = netdev_priv(dev);
1927
1928        for (i = 0; i < FEC_IRQ_NUM; i++) {
1929                if (fep->irq[i] > 0) {
1930                        disable_irq(fep->irq[i]);
1931                        fec_enet_interrupt(fep->irq[i], dev);
1932                        enable_irq(fep->irq[i]);
1933                }
1934        }
1935}
1936#endif
1937
1938static int fec_set_features(struct net_device *netdev,
1939        netdev_features_t features)
1940{
1941        struct fec_enet_private *fep = netdev_priv(netdev);
1942        netdev_features_t changed = features ^ netdev->features;
1943
1944        netdev->features = features;
1945
1946        /* Receive checksum has been changed */
1947        if (changed & NETIF_F_RXCSUM) {
1948                if (features & NETIF_F_RXCSUM)
1949                        fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
1950                else
1951                        fep->csum_flags &= ~FLAG_RX_CSUM_ENABLED;
1952
1953                if (netif_running(netdev)) {
1954                        fec_stop(netdev);
1955                        fec_restart(netdev, fep->phy_dev->duplex);
1956                        netif_wake_queue(netdev);
1957                } else {
1958                        fec_restart(netdev, fep->phy_dev->duplex);
1959                }
1960        }
1961
1962        return 0;
1963}
1964
1965static const struct net_device_ops fec_netdev_ops = {
1966        .ndo_open               = fec_enet_open,
1967        .ndo_stop               = fec_enet_close,
1968        .ndo_start_xmit         = fec_enet_start_xmit,
1969        .ndo_set_rx_mode        = set_multicast_list,
1970        .ndo_change_mtu         = eth_change_mtu,
1971        .ndo_validate_addr      = eth_validate_addr,
1972        .ndo_tx_timeout         = fec_timeout,
1973        .ndo_set_mac_address    = fec_set_mac_address,
1974        .ndo_do_ioctl           = fec_enet_ioctl,
1975#ifdef CONFIG_NET_POLL_CONTROLLER
1976        .ndo_poll_controller    = fec_poll_controller,
1977#endif
1978        .ndo_set_features       = fec_set_features,
1979};
1980
1981 /*
1982  * XXX:  We need to clean up on failure exits here.
1983  *
1984  */
1985static int fec_enet_init(struct net_device *ndev)
1986{
1987        struct fec_enet_private *fep = netdev_priv(ndev);
1988        const struct platform_device_id *id_entry =
1989                                platform_get_device_id(fep->pdev);
1990        struct bufdesc *cbd_base;
1991
1992        /* Allocate memory for buffer descriptors. */
1993        cbd_base = dma_alloc_coherent(NULL, PAGE_SIZE, &fep->bd_dma,
1994                                      GFP_KERNEL);
1995        if (!cbd_base)
1996                return -ENOMEM;
1997
1998        memset(cbd_base, 0, PAGE_SIZE);
1999
2000        fep->netdev = ndev;
2001
2002        /* Get the Ethernet address */
2003        fec_get_mac(ndev);
2004        /* make sure MAC we just acquired is programmed into the hw */
2005        fec_set_mac_address(ndev, NULL);
2006
2007        /* init the tx & rx ring size */
2008        fep->tx_ring_size = TX_RING_SIZE;
2009        fep->rx_ring_size = RX_RING_SIZE;
2010
2011        /* Set receive and transmit descriptor base. */
2012        fep->rx_bd_base = cbd_base;
2013        if (fep->bufdesc_ex)
2014                fep->tx_bd_base = (struct bufdesc *)
2015                        (((struct bufdesc_ex *)cbd_base) + fep->rx_ring_size);
2016        else
2017                fep->tx_bd_base = cbd_base + fep->rx_ring_size;
2018
2019        /* The FEC Ethernet specific entries in the device structure */
2020        ndev->watchdog_timeo = TX_TIMEOUT;
2021        ndev->netdev_ops = &fec_netdev_ops;
2022        ndev->ethtool_ops = &fec_enet_ethtool_ops;
2023
2024        writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK);
2025        netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, NAPI_POLL_WEIGHT);
2026
2027        if (id_entry->driver_data & FEC_QUIRK_HAS_VLAN) {
2028                /* enable hw VLAN support */
2029                ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
2030                ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
2031        }
2032
2033        if (id_entry->driver_data & FEC_QUIRK_HAS_CSUM) {
2034                /* enable hw accelerator */
2035                ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
2036                                | NETIF_F_RXCSUM);
2037                ndev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
2038                                | NETIF_F_RXCSUM);
2039                fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
2040        }
2041
2042        fec_restart(ndev, 0);
2043
2044        return 0;
2045}
2046
2047#ifdef CONFIG_OF
2048static void fec_reset_phy(struct platform_device *pdev)
2049{
2050        int err, phy_reset;
2051        int msec = 1;
2052        struct device_node *np = pdev->dev.of_node;
2053
2054        if (!np)
2055                return;
2056
2057        of_property_read_u32(np, "phy-reset-duration", &msec);
2058        /* A sane reset duration should not be longer than 1s */
2059        if (msec > 1000)
2060                msec = 1;
2061
2062        phy_reset = of_get_named_gpio(np, "phy-reset-gpios", 0);
2063        if (!gpio_is_valid(phy_reset))
2064                return;
2065
2066        err = devm_gpio_request_one(&pdev->dev, phy_reset,
2067                                    GPIOF_OUT_INIT_LOW, "phy-reset");
2068        if (err) {
2069                dev_err(&pdev->dev, "failed to get phy-reset-gpios: %d\n", err);
2070                return;
2071        }
2072        msleep(msec);
2073        gpio_set_value(phy_reset, 1);
2074}
2075#else /* CONFIG_OF */
2076static void fec_reset_phy(struct platform_device *pdev)
2077{
2078        /*
2079         * In case of platform probe, the reset has been done
2080         * by machine code.
2081         */
2082}
2083#endif /* CONFIG_OF */
2084
2085static int
2086fec_probe(struct platform_device *pdev)
2087{
2088        struct fec_enet_private *fep;
2089        struct fec_platform_data *pdata;
2090        struct net_device *ndev;
2091        int i, irq, ret = 0;
2092        struct resource *r;
2093        const struct of_device_id *of_id;
2094        static int dev_id;
2095
2096        of_id = of_match_device(fec_dt_ids, &pdev->dev);
2097        if (of_id)
2098                pdev->id_entry = of_id->data;
2099
2100        /* Init network device */
2101        ndev = alloc_etherdev(sizeof(struct fec_enet_private));
2102        if (!ndev)
2103                return -ENOMEM;
2104
2105        SET_NETDEV_DEV(ndev, &pdev->dev);
2106
2107        /* setup board info structure */
2108        fep = netdev_priv(ndev);
2109
2110#if !defined(CONFIG_M5272)
2111        /* default enable pause frame auto negotiation */
2112        if (pdev->id_entry &&
2113            (pdev->id_entry->driver_data & FEC_QUIRK_HAS_GBIT))
2114                fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG;
2115#endif
2116
2117        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2118        fep->hwp = devm_ioremap_resource(&pdev->dev, r);
2119        if (IS_ERR(fep->hwp)) {
2120                ret = PTR_ERR(fep->hwp);
2121                goto failed_ioremap;
2122        }
2123
2124        fep->pdev = pdev;
2125        fep->dev_id = dev_id++;
2126
2127        fep->bufdesc_ex = 0;
2128
2129        platform_set_drvdata(pdev, ndev);
2130
2131        ret = of_get_phy_mode(pdev->dev.of_node);
2132        if (ret < 0) {
2133                pdata = dev_get_platdata(&pdev->dev);
2134                if (pdata)
2135                        fep->phy_interface = pdata->phy;
2136                else
2137                        fep->phy_interface = PHY_INTERFACE_MODE_MII;
2138        } else {
2139                fep->phy_interface = ret;
2140        }
2141
2142        fep->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
2143        if (IS_ERR(fep->clk_ipg)) {
2144                ret = PTR_ERR(fep->clk_ipg);
2145                goto failed_clk;
2146        }
2147
2148        fep->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
2149        if (IS_ERR(fep->clk_ahb)) {
2150                ret = PTR_ERR(fep->clk_ahb);
2151                goto failed_clk;
2152        }
2153
2154        /* enet_out is optional, depends on board */
2155        fep->clk_enet_out = devm_clk_get(&pdev->dev, "enet_out");
2156        if (IS_ERR(fep->clk_enet_out))
2157                fep->clk_enet_out = NULL;
2158
2159        fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp");
2160        fep->bufdesc_ex =
2161                pdev->id_entry->driver_data & FEC_QUIRK_HAS_BUFDESC_EX;
2162        if (IS_ERR(fep->clk_ptp)) {
2163                fep->clk_ptp = NULL;
2164                fep->bufdesc_ex = 0;
2165        }
2166
2167        ret = clk_prepare_enable(fep->clk_ahb);
2168        if (ret)
2169                goto failed_clk;
2170
2171        ret = clk_prepare_enable(fep->clk_ipg);
2172        if (ret)
2173                goto failed_clk_ipg;
2174
2175        if (fep->clk_enet_out) {
2176                ret = clk_prepare_enable(fep->clk_enet_out);
2177                if (ret)
2178                        goto failed_clk_enet_out;
2179        }
2180
2181        if (fep->clk_ptp) {
2182                ret = clk_prepare_enable(fep->clk_ptp);
2183                if (ret)
2184                        goto failed_clk_ptp;
2185        }
2186
2187        fep->reg_phy = devm_regulator_get(&pdev->dev, "phy");
2188        if (!IS_ERR(fep->reg_phy)) {
2189                ret = regulator_enable(fep->reg_phy);
2190                if (ret) {
2191                        dev_err(&pdev->dev,
2192                                "Failed to enable phy regulator: %d\n", ret);
2193                        goto failed_regulator;
2194                }
2195        } else {
2196                fep->reg_phy = NULL;
2197        }
2198
2199        fec_reset_phy(pdev);
2200
2201        if (fep->bufdesc_ex)
2202                fec_ptp_init(pdev);
2203
2204        ret = fec_enet_init(ndev);
2205        if (ret)
2206                goto failed_init;
2207
2208        for (i = 0; i < FEC_IRQ_NUM; i++) {
2209                irq = platform_get_irq(pdev, i);
2210                if (irq < 0) {
2211                        if (i)
2212                                break;
2213                        ret = irq;
2214                        goto failed_irq;
2215                }
2216                ret = devm_request_irq(&pdev->dev, irq, fec_enet_interrupt,
2217                                       0, pdev->name, ndev);
2218                if (ret)
2219                        goto failed_irq;
2220        }
2221
2222        ret = fec_enet_mii_init(pdev);
2223        if (ret)
2224                goto failed_mii_init;
2225
2226        /* Carrier starts down, phylib will bring it up */
2227        netif_carrier_off(ndev);
2228
2229        ret = register_netdev(ndev);
2230        if (ret)
2231                goto failed_register;
2232
2233        if (fep->bufdesc_ex && fep->ptp_clock)
2234                netdev_info(ndev, "registered PHC device %d\n", fep->dev_id);
2235
2236        INIT_DELAYED_WORK(&(fep->delay_work.delay_work), fec_enet_work);
2237        return 0;
2238
2239failed_register:
2240        fec_enet_mii_remove(fep);
2241failed_mii_init:
2242failed_irq:
2243failed_init:
2244        if (fep->reg_phy)
2245                regulator_disable(fep->reg_phy);
2246failed_regulator:
2247        if (fep->clk_ptp)
2248                clk_disable_unprepare(fep->clk_ptp);
2249failed_clk_ptp:
2250        if (fep->clk_enet_out)
2251                clk_disable_unprepare(fep->clk_enet_out);
2252failed_clk_enet_out:
2253        clk_disable_unprepare(fep->clk_ipg);
2254failed_clk_ipg:
2255        clk_disable_unprepare(fep->clk_ahb);
2256failed_clk:
2257failed_ioremap:
2258        free_netdev(ndev);
2259
2260        return ret;
2261}
2262
2263static int
2264fec_drv_remove(struct platform_device *pdev)
2265{
2266        struct net_device *ndev = platform_get_drvdata(pdev);
2267        struct fec_enet_private *fep = netdev_priv(ndev);
2268
2269        cancel_delayed_work_sync(&(fep->delay_work.delay_work));
2270        unregister_netdev(ndev);
2271        fec_enet_mii_remove(fep);
2272        del_timer_sync(&fep->time_keep);
2273        if (fep->reg_phy)
2274                regulator_disable(fep->reg_phy);
2275        if (fep->clk_ptp)
2276                clk_disable_unprepare(fep->clk_ptp);
2277        if (fep->ptp_clock)
2278                ptp_clock_unregister(fep->ptp_clock);
2279        if (fep->clk_enet_out)
2280                clk_disable_unprepare(fep->clk_enet_out);
2281        clk_disable_unprepare(fep->clk_ipg);
2282        clk_disable_unprepare(fep->clk_ahb);
2283        free_netdev(ndev);
2284
2285        return 0;
2286}
2287
2288#ifdef CONFIG_PM_SLEEP
2289static int
2290fec_suspend(struct device *dev)
2291{
2292        struct net_device *ndev = dev_get_drvdata(dev);
2293        struct fec_enet_private *fep = netdev_priv(ndev);
2294
2295        if (netif_running(ndev)) {
2296                fec_stop(ndev);
2297                netif_device_detach(ndev);
2298        }
2299        if (fep->clk_ptp)
2300                clk_disable_unprepare(fep->clk_ptp);
2301        if (fep->clk_enet_out)
2302                clk_disable_unprepare(fep->clk_enet_out);
2303        clk_disable_unprepare(fep->clk_ipg);
2304        clk_disable_unprepare(fep->clk_ahb);
2305
2306        if (fep->reg_phy)
2307                regulator_disable(fep->reg_phy);
2308
2309        return 0;
2310}
2311
2312static int
2313fec_resume(struct device *dev)
2314{
2315        struct net_device *ndev = dev_get_drvdata(dev);
2316        struct fec_enet_private *fep = netdev_priv(ndev);
2317        int ret;
2318
2319        if (fep->reg_phy) {
2320                ret = regulator_enable(fep->reg_phy);
2321                if (ret)
2322                        return ret;
2323        }
2324
2325        ret = clk_prepare_enable(fep->clk_ahb);
2326        if (ret)
2327                goto failed_clk_ahb;
2328
2329        ret = clk_prepare_enable(fep->clk_ipg);
2330        if (ret)
2331                goto failed_clk_ipg;
2332
2333        if (fep->clk_enet_out) {
2334                ret = clk_prepare_enable(fep->clk_enet_out);
2335                if (ret)
2336                        goto failed_clk_enet_out;
2337        }
2338
2339        if (fep->clk_ptp) {
2340                ret = clk_prepare_enable(fep->clk_ptp);
2341                if (ret)
2342                        goto failed_clk_ptp;
2343        }
2344
2345        if (netif_running(ndev)) {
2346                fec_restart(ndev, fep->full_duplex);
2347                netif_device_attach(ndev);
2348        }
2349
2350        return 0;
2351
2352failed_clk_ptp:
2353        if (fep->clk_enet_out)
2354                clk_disable_unprepare(fep->clk_enet_out);
2355failed_clk_enet_out:
2356        clk_disable_unprepare(fep->clk_ipg);
2357failed_clk_ipg:
2358        clk_disable_unprepare(fep->clk_ahb);
2359failed_clk_ahb:
2360        if (fep->reg_phy)
2361                regulator_disable(fep->reg_phy);
2362        return ret;
2363}
2364#endif /* CONFIG_PM_SLEEP */
2365
2366static SIMPLE_DEV_PM_OPS(fec_pm_ops, fec_suspend, fec_resume);
2367
2368static struct platform_driver fec_driver = {
2369        .driver = {
2370                .name   = DRIVER_NAME,
2371                .owner  = THIS_MODULE,
2372                .pm     = &fec_pm_ops,
2373                .of_match_table = fec_dt_ids,
2374        },
2375        .id_table = fec_devtype,
2376        .probe  = fec_probe,
2377        .remove = fec_drv_remove,
2378};
2379
2380module_platform_driver(fec_driver);
2381
2382MODULE_ALIAS("platform:"DRIVER_NAME);
2383MODULE_LICENSE("GPL");
2384