linux/drivers/net/ethernet/freescale/fec_main.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx.
   4 * Copyright (c) 1997 Dan Malek (dmalek@jlc.net)
   5 *
   6 * Right now, I am very wasteful with the buffers.  I allocate memory
   7 * pages and then divide them into 2K frame buffers.  This way I know I
   8 * have buffers large enough to hold one frame within one buffer descriptor.
   9 * Once I get this working, I will use 64 or 128 byte CPM buffers, which
  10 * will be much more memory efficient and will easily handle lots of
  11 * small packets.
  12 *
  13 * Much better multiple PHY support by Magnus Damm.
  14 * Copyright (c) 2000 Ericsson Radio Systems AB.
  15 *
  16 * Support for FEC controller of ColdFire processors.
  17 * Copyright (c) 2001-2005 Greg Ungerer (gerg@snapgear.com)
  18 *
  19 * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be)
  20 * Copyright (c) 2004-2006 Macq Electronique SA.
  21 *
  22 * Copyright (C) 2010-2011 Freescale Semiconductor, Inc.
  23 */
  24
  25#include <linux/module.h>
  26#include <linux/kernel.h>
  27#include <linux/string.h>
  28#include <linux/pm_runtime.h>
  29#include <linux/ptrace.h>
  30#include <linux/errno.h>
  31#include <linux/ioport.h>
  32#include <linux/slab.h>
  33#include <linux/interrupt.h>
  34#include <linux/delay.h>
  35#include <linux/netdevice.h>
  36#include <linux/etherdevice.h>
  37#include <linux/skbuff.h>
  38#include <linux/in.h>
  39#include <linux/ip.h>
  40#include <net/ip.h>
  41#include <net/tso.h>
  42#include <linux/tcp.h>
  43#include <linux/udp.h>
  44#include <linux/icmp.h>
  45#include <linux/spinlock.h>
  46#include <linux/workqueue.h>
  47#include <linux/bitops.h>
  48#include <linux/io.h>
  49#include <linux/irq.h>
  50#include <linux/clk.h>
  51#include <linux/platform_device.h>
  52#include <linux/mdio.h>
  53#include <linux/phy.h>
  54#include <linux/fec.h>
  55#include <linux/of.h>
  56#include <linux/of_device.h>
  57#include <linux/of_gpio.h>
  58#include <linux/of_mdio.h>
  59#include <linux/of_net.h>
  60#include <linux/regulator/consumer.h>
  61#include <linux/if_vlan.h>
  62#include <linux/pinctrl/consumer.h>
  63#include <linux/prefetch.h>
  64#include <soc/imx/cpuidle.h>
  65
  66#include <asm/cacheflush.h>
  67
  68#include "fec.h"
  69
  70static void set_multicast_list(struct net_device *ndev);
  71static void fec_enet_itr_coal_init(struct net_device *ndev);
  72
  73#define DRIVER_NAME     "fec"
  74
  75#define FEC_ENET_GET_QUQUE(_x) ((_x == 0) ? 1 : ((_x == 1) ? 2 : 0))
  76
  77/* Pause frame feild and FIFO threshold */
  78#define FEC_ENET_FCE    (1 << 5)
  79#define FEC_ENET_RSEM_V 0x84
  80#define FEC_ENET_RSFL_V 16
  81#define FEC_ENET_RAEM_V 0x8
  82#define FEC_ENET_RAFL_V 0x8
  83#define FEC_ENET_OPD_V  0xFFF0
  84#define FEC_MDIO_PM_TIMEOUT  100 /* ms */
  85
  86static struct platform_device_id fec_devtype[] = {
  87        {
  88                /* keep it for coldfire */
  89                .name = DRIVER_NAME,
  90                .driver_data = 0,
  91        }, {
  92                .name = "imx25-fec",
  93                .driver_data = FEC_QUIRK_USE_GASKET | FEC_QUIRK_MIB_CLEAR,
  94        }, {
  95                .name = "imx27-fec",
  96                .driver_data = FEC_QUIRK_MIB_CLEAR,
  97        }, {
  98                .name = "imx28-fec",
  99                .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
 100                                FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC,
 101        }, {
 102                .name = "imx6q-fec",
 103                .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
 104                                FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
 105                                FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 |
 106                                FEC_QUIRK_HAS_RACC,
 107        }, {
 108                .name = "mvf600-fec",
 109                .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_RACC,
 110        }, {
 111                .name = "imx6sx-fec",
 112                .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
 113                                FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
 114                                FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
 115                                FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
 116                                FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE,
 117        }, {
 118                .name = "imx6ul-fec",
 119                .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
 120                                FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
 121                                FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR007885 |
 122                                FEC_QUIRK_BUG_CAPTURE | FEC_QUIRK_HAS_RACC |
 123                                FEC_QUIRK_HAS_COALESCE,
 124        }, {
 125                /* sentinel */
 126        }
 127};
 128MODULE_DEVICE_TABLE(platform, fec_devtype);
 129
 130enum imx_fec_type {
 131        IMX25_FEC = 1,  /* runs on i.mx25/50/53 */
 132        IMX27_FEC,      /* runs on i.mx27/35/51 */
 133        IMX28_FEC,
 134        IMX6Q_FEC,
 135        MVF600_FEC,
 136        IMX6SX_FEC,
 137        IMX6UL_FEC,
 138};
 139
 140static const struct of_device_id fec_dt_ids[] = {
 141        { .compatible = "fsl,imx25-fec", .data = &fec_devtype[IMX25_FEC], },
 142        { .compatible = "fsl,imx27-fec", .data = &fec_devtype[IMX27_FEC], },
 143        { .compatible = "fsl,imx28-fec", .data = &fec_devtype[IMX28_FEC], },
 144        { .compatible = "fsl,imx6q-fec", .data = &fec_devtype[IMX6Q_FEC], },
 145        { .compatible = "fsl,mvf600-fec", .data = &fec_devtype[MVF600_FEC], },
 146        { .compatible = "fsl,imx6sx-fec", .data = &fec_devtype[IMX6SX_FEC], },
 147        { .compatible = "fsl,imx6ul-fec", .data = &fec_devtype[IMX6UL_FEC], },
 148        { /* sentinel */ }
 149};
 150MODULE_DEVICE_TABLE(of, fec_dt_ids);
 151
 152static unsigned char macaddr[ETH_ALEN];
 153module_param_array(macaddr, byte, NULL, 0);
 154MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
 155
 156#if defined(CONFIG_M5272)
 157/*
 158 * Some hardware gets it MAC address out of local flash memory.
 159 * if this is non-zero then assume it is the address to get MAC from.
 160 */
 161#if defined(CONFIG_NETtel)
 162#define FEC_FLASHMAC    0xf0006006
 163#elif defined(CONFIG_GILBARCONAP) || defined(CONFIG_SCALES)
 164#define FEC_FLASHMAC    0xf0006000
 165#elif defined(CONFIG_CANCam)
 166#define FEC_FLASHMAC    0xf0020000
 167#elif defined (CONFIG_M5272C3)
 168#define FEC_FLASHMAC    (0xffe04000 + 4)
 169#elif defined(CONFIG_MOD5272)
 170#define FEC_FLASHMAC    0xffc0406b
 171#else
 172#define FEC_FLASHMAC    0
 173#endif
 174#endif /* CONFIG_M5272 */
 175
 176/* The FEC stores dest/src/type/vlan, data, and checksum for receive packets.
 177 *
 178 * 2048 byte skbufs are allocated. However, alignment requirements
 179 * varies between FEC variants. Worst case is 64, so round down by 64.
 180 */
 181#define PKT_MAXBUF_SIZE         (round_down(2048 - 64, 64))
 182#define PKT_MINBUF_SIZE         64
 183
 184/* FEC receive acceleration */
 185#define FEC_RACC_IPDIS          (1 << 1)
 186#define FEC_RACC_PRODIS         (1 << 2)
 187#define FEC_RACC_SHIFT16        BIT(7)
 188#define FEC_RACC_OPTIONS        (FEC_RACC_IPDIS | FEC_RACC_PRODIS)
 189
 190/* MIB Control Register */
 191#define FEC_MIB_CTRLSTAT_DISABLE        BIT(31)
 192
 193/*
 194 * The 5270/5271/5280/5282/532x RX control register also contains maximum frame
 195 * size bits. Other FEC hardware does not, so we need to take that into
 196 * account when setting it.
 197 */
 198#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
 199    defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
 200    defined(CONFIG_ARM64)
 201#define OPT_FRAME_SIZE  (PKT_MAXBUF_SIZE << 16)
 202#else
 203#define OPT_FRAME_SIZE  0
 204#endif
 205
 206/* FEC MII MMFR bits definition */
 207#define FEC_MMFR_ST             (1 << 30)
 208#define FEC_MMFR_OP_READ        (2 << 28)
 209#define FEC_MMFR_OP_WRITE       (1 << 28)
 210#define FEC_MMFR_PA(v)          ((v & 0x1f) << 23)
 211#define FEC_MMFR_RA(v)          ((v & 0x1f) << 18)
 212#define FEC_MMFR_TA             (2 << 16)
 213#define FEC_MMFR_DATA(v)        (v & 0xffff)
 214/* FEC ECR bits definition */
 215#define FEC_ECR_MAGICEN         (1 << 2)
 216#define FEC_ECR_SLEEP           (1 << 3)
 217
 218#define FEC_MII_TIMEOUT         30000 /* us */
 219
 220/* Transmitter timeout */
 221#define TX_TIMEOUT (2 * HZ)
 222
 223#define FEC_PAUSE_FLAG_AUTONEG  0x1
 224#define FEC_PAUSE_FLAG_ENABLE   0x2
 225#define FEC_WOL_HAS_MAGIC_PACKET        (0x1 << 0)
 226#define FEC_WOL_FLAG_ENABLE             (0x1 << 1)
 227#define FEC_WOL_FLAG_SLEEP_ON           (0x1 << 2)
 228
 229#define COPYBREAK_DEFAULT       256
 230
 231/* Max number of allowed TCP segments for software TSO */
 232#define FEC_MAX_TSO_SEGS        100
 233#define FEC_MAX_SKB_DESCS       (FEC_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
 234
 235#define IS_TSO_HEADER(txq, addr) \
 236        ((addr >= txq->tso_hdrs_dma) && \
 237        (addr < txq->tso_hdrs_dma + txq->bd.ring_size * TSO_HEADER_SIZE))
 238
 239static int mii_cnt;
 240
 241static struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp,
 242                                             struct bufdesc_prop *bd)
 243{
 244        return (bdp >= bd->last) ? bd->base
 245                        : (struct bufdesc *)(((void *)bdp) + bd->dsize);
 246}
 247
 248static struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp,
 249                                             struct bufdesc_prop *bd)
 250{
 251        return (bdp <= bd->base) ? bd->last
 252                        : (struct bufdesc *)(((void *)bdp) - bd->dsize);
 253}
 254
 255static int fec_enet_get_bd_index(struct bufdesc *bdp,
 256                                 struct bufdesc_prop *bd)
 257{
 258        return ((const char *)bdp - (const char *)bd->base) >> bd->dsize_log2;
 259}
 260
 261static int fec_enet_get_free_txdesc_num(struct fec_enet_priv_tx_q *txq)
 262{
 263        int entries;
 264
 265        entries = (((const char *)txq->dirty_tx -
 266                        (const char *)txq->bd.cur) >> txq->bd.dsize_log2) - 1;
 267
 268        return entries >= 0 ? entries : entries + txq->bd.ring_size;
 269}
 270
 271static void swap_buffer(void *bufaddr, int len)
 272{
 273        int i;
 274        unsigned int *buf = bufaddr;
 275
 276        for (i = 0; i < len; i += 4, buf++)
 277                swab32s(buf);
 278}
 279
 280static void swap_buffer2(void *dst_buf, void *src_buf, int len)
 281{
 282        int i;
 283        unsigned int *src = src_buf;
 284        unsigned int *dst = dst_buf;
 285
 286        for (i = 0; i < len; i += 4, src++, dst++)
 287                *dst = swab32p(src);
 288}
 289
 290static void fec_dump(struct net_device *ndev)
 291{
 292        struct fec_enet_private *fep = netdev_priv(ndev);
 293        struct bufdesc *bdp;
 294        struct fec_enet_priv_tx_q *txq;
 295        int index = 0;
 296
 297        netdev_info(ndev, "TX ring dump\n");
 298        pr_info("Nr     SC     addr       len  SKB\n");
 299
 300        txq = fep->tx_queue[0];
 301        bdp = txq->bd.base;
 302
 303        do {
 304                pr_info("%3u %c%c 0x%04x 0x%08x %4u %p\n",
 305                        index,
 306                        bdp == txq->bd.cur ? 'S' : ' ',
 307                        bdp == txq->dirty_tx ? 'H' : ' ',
 308                        fec16_to_cpu(bdp->cbd_sc),
 309                        fec32_to_cpu(bdp->cbd_bufaddr),
 310                        fec16_to_cpu(bdp->cbd_datlen),
 311                        txq->tx_skbuff[index]);
 312                bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
 313                index++;
 314        } while (bdp != txq->bd.base);
 315}
 316
 317static inline bool is_ipv4_pkt(struct sk_buff *skb)
 318{
 319        return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4;
 320}
 321
 322static int
 323fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev)
 324{
 325        /* Only run for packets requiring a checksum. */
 326        if (skb->ip_summed != CHECKSUM_PARTIAL)
 327                return 0;
 328
 329        if (unlikely(skb_cow_head(skb, 0)))
 330                return -1;
 331
 332        if (is_ipv4_pkt(skb))
 333                ip_hdr(skb)->check = 0;
 334        *(__sum16 *)(skb->head + skb->csum_start + skb->csum_offset) = 0;
 335
 336        return 0;
 337}
 338
 339static struct bufdesc *
 340fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
 341                             struct sk_buff *skb,
 342                             struct net_device *ndev)
 343{
 344        struct fec_enet_private *fep = netdev_priv(ndev);
 345        struct bufdesc *bdp = txq->bd.cur;
 346        struct bufdesc_ex *ebdp;
 347        int nr_frags = skb_shinfo(skb)->nr_frags;
 348        int frag, frag_len;
 349        unsigned short status;
 350        unsigned int estatus = 0;
 351        skb_frag_t *this_frag;
 352        unsigned int index;
 353        void *bufaddr;
 354        dma_addr_t addr;
 355        int i;
 356
 357        for (frag = 0; frag < nr_frags; frag++) {
 358                this_frag = &skb_shinfo(skb)->frags[frag];
 359                bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
 360                ebdp = (struct bufdesc_ex *)bdp;
 361
 362                status = fec16_to_cpu(bdp->cbd_sc);
 363                status &= ~BD_ENET_TX_STATS;
 364                status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
 365                frag_len = skb_shinfo(skb)->frags[frag].size;
 366
 367                /* Handle the last BD specially */
 368                if (frag == nr_frags - 1) {
 369                        status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
 370                        if (fep->bufdesc_ex) {
 371                                estatus |= BD_ENET_TX_INT;
 372                                if (unlikely(skb_shinfo(skb)->tx_flags &
 373                                        SKBTX_HW_TSTAMP && fep->hwts_tx_en))
 374                                        estatus |= BD_ENET_TX_TS;
 375                        }
 376                }
 377
 378                if (fep->bufdesc_ex) {
 379                        if (fep->quirks & FEC_QUIRK_HAS_AVB)
 380                                estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
 381                        if (skb->ip_summed == CHECKSUM_PARTIAL)
 382                                estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
 383                        ebdp->cbd_bdu = 0;
 384                        ebdp->cbd_esc = cpu_to_fec32(estatus);
 385                }
 386
 387                bufaddr = page_address(this_frag->page.p) + this_frag->page_offset;
 388
 389                index = fec_enet_get_bd_index(bdp, &txq->bd);
 390                if (((unsigned long) bufaddr) & fep->tx_align ||
 391                        fep->quirks & FEC_QUIRK_SWAP_FRAME) {
 392                        memcpy(txq->tx_bounce[index], bufaddr, frag_len);
 393                        bufaddr = txq->tx_bounce[index];
 394
 395                        if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
 396                                swap_buffer(bufaddr, frag_len);
 397                }
 398
 399                addr = dma_map_single(&fep->pdev->dev, bufaddr, frag_len,
 400                                      DMA_TO_DEVICE);
 401                if (dma_mapping_error(&fep->pdev->dev, addr)) {
 402                        if (net_ratelimit())
 403                                netdev_err(ndev, "Tx DMA memory map failed\n");
 404                        goto dma_mapping_error;
 405                }
 406
 407                bdp->cbd_bufaddr = cpu_to_fec32(addr);
 408                bdp->cbd_datlen = cpu_to_fec16(frag_len);
 409                /* Make sure the updates to rest of the descriptor are
 410                 * performed before transferring ownership.
 411                 */
 412                wmb();
 413                bdp->cbd_sc = cpu_to_fec16(status);
 414        }
 415
 416        return bdp;
 417dma_mapping_error:
 418        bdp = txq->bd.cur;
 419        for (i = 0; i < frag; i++) {
 420                bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
 421                dma_unmap_single(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr),
 422                                 fec16_to_cpu(bdp->cbd_datlen), DMA_TO_DEVICE);
 423        }
 424        return ERR_PTR(-ENOMEM);
 425}
 426
 427static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
 428                                   struct sk_buff *skb, struct net_device *ndev)
 429{
 430        struct fec_enet_private *fep = netdev_priv(ndev);
 431        int nr_frags = skb_shinfo(skb)->nr_frags;
 432        struct bufdesc *bdp, *last_bdp;
 433        void *bufaddr;
 434        dma_addr_t addr;
 435        unsigned short status;
 436        unsigned short buflen;
 437        unsigned int estatus = 0;
 438        unsigned int index;
 439        int entries_free;
 440
 441        entries_free = fec_enet_get_free_txdesc_num(txq);
 442        if (entries_free < MAX_SKB_FRAGS + 1) {
 443                dev_kfree_skb_any(skb);
 444                if (net_ratelimit())
 445                        netdev_err(ndev, "NOT enough BD for SG!\n");
 446                return NETDEV_TX_OK;
 447        }
 448
 449        /* Protocol checksum off-load for TCP and UDP. */
 450        if (fec_enet_clear_csum(skb, ndev)) {
 451                dev_kfree_skb_any(skb);
 452                return NETDEV_TX_OK;
 453        }
 454
 455        /* Fill in a Tx ring entry */
 456        bdp = txq->bd.cur;
 457        last_bdp = bdp;
 458        status = fec16_to_cpu(bdp->cbd_sc);
 459        status &= ~BD_ENET_TX_STATS;
 460
 461        /* Set buffer length and buffer pointer */
 462        bufaddr = skb->data;
 463        buflen = skb_headlen(skb);
 464
 465        index = fec_enet_get_bd_index(bdp, &txq->bd);
 466        if (((unsigned long) bufaddr) & fep->tx_align ||
 467                fep->quirks & FEC_QUIRK_SWAP_FRAME) {
 468                memcpy(txq->tx_bounce[index], skb->data, buflen);
 469                bufaddr = txq->tx_bounce[index];
 470
 471                if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
 472                        swap_buffer(bufaddr, buflen);
 473        }
 474
 475        /* Push the data cache so the CPM does not get stale memory data. */
 476        addr = dma_map_single(&fep->pdev->dev, bufaddr, buflen, DMA_TO_DEVICE);
 477        if (dma_mapping_error(&fep->pdev->dev, addr)) {
 478                dev_kfree_skb_any(skb);
 479                if (net_ratelimit())
 480                        netdev_err(ndev, "Tx DMA memory map failed\n");
 481                return NETDEV_TX_OK;
 482        }
 483
 484        if (nr_frags) {
 485                last_bdp = fec_enet_txq_submit_frag_skb(txq, skb, ndev);
 486                if (IS_ERR(last_bdp)) {
 487                        dma_unmap_single(&fep->pdev->dev, addr,
 488                                         buflen, DMA_TO_DEVICE);
 489                        dev_kfree_skb_any(skb);
 490                        return NETDEV_TX_OK;
 491                }
 492        } else {
 493                status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
 494                if (fep->bufdesc_ex) {
 495                        estatus = BD_ENET_TX_INT;
 496                        if (unlikely(skb_shinfo(skb)->tx_flags &
 497                                SKBTX_HW_TSTAMP && fep->hwts_tx_en))
 498                                estatus |= BD_ENET_TX_TS;
 499                }
 500        }
 501        bdp->cbd_bufaddr = cpu_to_fec32(addr);
 502        bdp->cbd_datlen = cpu_to_fec16(buflen);
 503
 504        if (fep->bufdesc_ex) {
 505
 506                struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
 507
 508                if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
 509                        fep->hwts_tx_en))
 510                        skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
 511
 512                if (fep->quirks & FEC_QUIRK_HAS_AVB)
 513                        estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
 514
 515                if (skb->ip_summed == CHECKSUM_PARTIAL)
 516                        estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
 517
 518                ebdp->cbd_bdu = 0;
 519                ebdp->cbd_esc = cpu_to_fec32(estatus);
 520        }
 521
 522        index = fec_enet_get_bd_index(last_bdp, &txq->bd);
 523        /* Save skb pointer */
 524        txq->tx_skbuff[index] = skb;
 525
 526        /* Make sure the updates to rest of the descriptor are performed before
 527         * transferring ownership.
 528         */
 529        wmb();
 530
 531        /* Send it on its way.  Tell FEC it's ready, interrupt when done,
 532         * it's the last BD of the frame, and to put the CRC on the end.
 533         */
 534        status |= (BD_ENET_TX_READY | BD_ENET_TX_TC);
 535        bdp->cbd_sc = cpu_to_fec16(status);
 536
 537        /* If this was the last BD in the ring, start at the beginning again. */
 538        bdp = fec_enet_get_nextdesc(last_bdp, &txq->bd);
 539
 540        skb_tx_timestamp(skb);
 541
 542        /* Make sure the update to bdp and tx_skbuff are performed before
 543         * txq->bd.cur.
 544         */
 545        wmb();
 546        txq->bd.cur = bdp;
 547
 548        /* Trigger transmission start */
 549        writel(0, txq->bd.reg_desc_active);
 550
 551        return 0;
 552}
 553
 554static int
 555fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
 556                          struct net_device *ndev,
 557                          struct bufdesc *bdp, int index, char *data,
 558                          int size, bool last_tcp, bool is_last)
 559{
 560        struct fec_enet_private *fep = netdev_priv(ndev);
 561        struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc);
 562        unsigned short status;
 563        unsigned int estatus = 0;
 564        dma_addr_t addr;
 565
 566        status = fec16_to_cpu(bdp->cbd_sc);
 567        status &= ~BD_ENET_TX_STATS;
 568
 569        status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
 570
 571        if (((unsigned long) data) & fep->tx_align ||
 572                fep->quirks & FEC_QUIRK_SWAP_FRAME) {
 573                memcpy(txq->tx_bounce[index], data, size);
 574                data = txq->tx_bounce[index];
 575
 576                if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
 577                        swap_buffer(data, size);
 578        }
 579
 580        addr = dma_map_single(&fep->pdev->dev, data, size, DMA_TO_DEVICE);
 581        if (dma_mapping_error(&fep->pdev->dev, addr)) {
 582                dev_kfree_skb_any(skb);
 583                if (net_ratelimit())
 584                        netdev_err(ndev, "Tx DMA memory map failed\n");
 585                return NETDEV_TX_BUSY;
 586        }
 587
 588        bdp->cbd_datlen = cpu_to_fec16(size);
 589        bdp->cbd_bufaddr = cpu_to_fec32(addr);
 590
 591        if (fep->bufdesc_ex) {
 592                if (fep->quirks & FEC_QUIRK_HAS_AVB)
 593                        estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
 594                if (skb->ip_summed == CHECKSUM_PARTIAL)
 595                        estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
 596                ebdp->cbd_bdu = 0;
 597                ebdp->cbd_esc = cpu_to_fec32(estatus);
 598        }
 599
 600        /* Handle the last BD specially */
 601        if (last_tcp)
 602                status |= (BD_ENET_TX_LAST | BD_ENET_TX_TC);
 603        if (is_last) {
 604                status |= BD_ENET_TX_INTR;
 605                if (fep->bufdesc_ex)
 606                        ebdp->cbd_esc |= cpu_to_fec32(BD_ENET_TX_INT);
 607        }
 608
 609        bdp->cbd_sc = cpu_to_fec16(status);
 610
 611        return 0;
 612}
 613
 614static int
 615fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
 616                         struct sk_buff *skb, struct net_device *ndev,
 617                         struct bufdesc *bdp, int index)
 618{
 619        struct fec_enet_private *fep = netdev_priv(ndev);
 620        int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
 621        struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc);
 622        void *bufaddr;
 623        unsigned long dmabuf;
 624        unsigned short status;
 625        unsigned int estatus = 0;
 626
 627        status = fec16_to_cpu(bdp->cbd_sc);
 628        status &= ~BD_ENET_TX_STATS;
 629        status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
 630
 631        bufaddr = txq->tso_hdrs + index * TSO_HEADER_SIZE;
 632        dmabuf = txq->tso_hdrs_dma + index * TSO_HEADER_SIZE;
 633        if (((unsigned long)bufaddr) & fep->tx_align ||
 634                fep->quirks & FEC_QUIRK_SWAP_FRAME) {
 635                memcpy(txq->tx_bounce[index], skb->data, hdr_len);
 636                bufaddr = txq->tx_bounce[index];
 637
 638                if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
 639                        swap_buffer(bufaddr, hdr_len);
 640
 641                dmabuf = dma_map_single(&fep->pdev->dev, bufaddr,
 642                                        hdr_len, DMA_TO_DEVICE);
 643                if (dma_mapping_error(&fep->pdev->dev, dmabuf)) {
 644                        dev_kfree_skb_any(skb);
 645                        if (net_ratelimit())
 646                                netdev_err(ndev, "Tx DMA memory map failed\n");
 647                        return NETDEV_TX_BUSY;
 648                }
 649        }
 650
 651        bdp->cbd_bufaddr = cpu_to_fec32(dmabuf);
 652        bdp->cbd_datlen = cpu_to_fec16(hdr_len);
 653
 654        if (fep->bufdesc_ex) {
 655                if (fep->quirks & FEC_QUIRK_HAS_AVB)
 656                        estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
 657                if (skb->ip_summed == CHECKSUM_PARTIAL)
 658                        estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
 659                ebdp->cbd_bdu = 0;
 660                ebdp->cbd_esc = cpu_to_fec32(estatus);
 661        }
 662
 663        bdp->cbd_sc = cpu_to_fec16(status);
 664
 665        return 0;
 666}
 667
 668static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
 669                                   struct sk_buff *skb,
 670                                   struct net_device *ndev)
 671{
 672        struct fec_enet_private *fep = netdev_priv(ndev);
 673        int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
 674        int total_len, data_left;
 675        struct bufdesc *bdp = txq->bd.cur;
 676        struct tso_t tso;
 677        unsigned int index = 0;
 678        int ret;
 679
 680        if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(txq)) {
 681                dev_kfree_skb_any(skb);
 682                if (net_ratelimit())
 683                        netdev_err(ndev, "NOT enough BD for TSO!\n");
 684                return NETDEV_TX_OK;
 685        }
 686
 687        /* Protocol checksum off-load for TCP and UDP. */
 688        if (fec_enet_clear_csum(skb, ndev)) {
 689                dev_kfree_skb_any(skb);
 690                return NETDEV_TX_OK;
 691        }
 692
 693        /* Initialize the TSO handler, and prepare the first payload */
 694        tso_start(skb, &tso);
 695
 696        total_len = skb->len - hdr_len;
 697        while (total_len > 0) {
 698                char *hdr;
 699
 700                index = fec_enet_get_bd_index(bdp, &txq->bd);
 701                data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
 702                total_len -= data_left;
 703
 704                /* prepare packet headers: MAC + IP + TCP */
 705                hdr = txq->tso_hdrs + index * TSO_HEADER_SIZE;
 706                tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
 707                ret = fec_enet_txq_put_hdr_tso(txq, skb, ndev, bdp, index);
 708                if (ret)
 709                        goto err_release;
 710
 711                while (data_left > 0) {
 712                        int size;
 713
 714                        size = min_t(int, tso.size, data_left);
 715                        bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
 716                        index = fec_enet_get_bd_index(bdp, &txq->bd);
 717                        ret = fec_enet_txq_put_data_tso(txq, skb, ndev,
 718                                                        bdp, index,
 719                                                        tso.data, size,
 720                                                        size == data_left,
 721                                                        total_len == 0);
 722                        if (ret)
 723                                goto err_release;
 724
 725                        data_left -= size;
 726                        tso_build_data(skb, &tso, size);
 727                }
 728
 729                bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
 730        }
 731
 732        /* Save skb pointer */
 733        txq->tx_skbuff[index] = skb;
 734
 735        skb_tx_timestamp(skb);
 736        txq->bd.cur = bdp;
 737
 738        /* Trigger transmission start */
 739        if (!(fep->quirks & FEC_QUIRK_ERR007885) ||
 740            !readl(txq->bd.reg_desc_active) ||
 741            !readl(txq->bd.reg_desc_active) ||
 742            !readl(txq->bd.reg_desc_active) ||
 743            !readl(txq->bd.reg_desc_active))
 744                writel(0, txq->bd.reg_desc_active);
 745
 746        return 0;
 747
 748err_release:
 749        /* TODO: Release all used data descriptors for TSO */
 750        return ret;
 751}
 752
 753static netdev_tx_t
 754fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 755{
 756        struct fec_enet_private *fep = netdev_priv(ndev);
 757        int entries_free;
 758        unsigned short queue;
 759        struct fec_enet_priv_tx_q *txq;
 760        struct netdev_queue *nq;
 761        int ret;
 762
 763        queue = skb_get_queue_mapping(skb);
 764        txq = fep->tx_queue[queue];
 765        nq = netdev_get_tx_queue(ndev, queue);
 766
 767        if (skb_is_gso(skb))
 768                ret = fec_enet_txq_submit_tso(txq, skb, ndev);
 769        else
 770                ret = fec_enet_txq_submit_skb(txq, skb, ndev);
 771        if (ret)
 772                return ret;
 773
 774        entries_free = fec_enet_get_free_txdesc_num(txq);
 775        if (entries_free <= txq->tx_stop_threshold)
 776                netif_tx_stop_queue(nq);
 777
 778        return NETDEV_TX_OK;
 779}
 780
 781/* Init RX & TX buffer descriptors
 782 */
 783static void fec_enet_bd_init(struct net_device *dev)
 784{
 785        struct fec_enet_private *fep = netdev_priv(dev);
 786        struct fec_enet_priv_tx_q *txq;
 787        struct fec_enet_priv_rx_q *rxq;
 788        struct bufdesc *bdp;
 789        unsigned int i;
 790        unsigned int q;
 791
 792        for (q = 0; q < fep->num_rx_queues; q++) {
 793                /* Initialize the receive buffer descriptors. */
 794                rxq = fep->rx_queue[q];
 795                bdp = rxq->bd.base;
 796
 797                for (i = 0; i < rxq->bd.ring_size; i++) {
 798
 799                        /* Initialize the BD for every fragment in the page. */
 800                        if (bdp->cbd_bufaddr)
 801                                bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
 802                        else
 803                                bdp->cbd_sc = cpu_to_fec16(0);
 804                        bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
 805                }
 806
 807                /* Set the last buffer to wrap */
 808                bdp = fec_enet_get_prevdesc(bdp, &rxq->bd);
 809                bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
 810
 811                rxq->bd.cur = rxq->bd.base;
 812        }
 813
 814        for (q = 0; q < fep->num_tx_queues; q++) {
 815                /* ...and the same for transmit */
 816                txq = fep->tx_queue[q];
 817                bdp = txq->bd.base;
 818                txq->bd.cur = bdp;
 819
 820                for (i = 0; i < txq->bd.ring_size; i++) {
 821                        /* Initialize the BD for every fragment in the page. */
 822                        bdp->cbd_sc = cpu_to_fec16(0);
 823                        if (bdp->cbd_bufaddr &&
 824                            !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
 825                                dma_unmap_single(&fep->pdev->dev,
 826                                                 fec32_to_cpu(bdp->cbd_bufaddr),
 827                                                 fec16_to_cpu(bdp->cbd_datlen),
 828                                                 DMA_TO_DEVICE);
 829                        if (txq->tx_skbuff[i]) {
 830                                dev_kfree_skb_any(txq->tx_skbuff[i]);
 831                                txq->tx_skbuff[i] = NULL;
 832                        }
 833                        bdp->cbd_bufaddr = cpu_to_fec32(0);
 834                        bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
 835                }
 836
 837                /* Set the last buffer to wrap */
 838                bdp = fec_enet_get_prevdesc(bdp, &txq->bd);
 839                bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
 840                txq->dirty_tx = bdp;
 841        }
 842}
 843
 844static void fec_enet_active_rxring(struct net_device *ndev)
 845{
 846        struct fec_enet_private *fep = netdev_priv(ndev);
 847        int i;
 848
 849        for (i = 0; i < fep->num_rx_queues; i++)
 850                writel(0, fep->rx_queue[i]->bd.reg_desc_active);
 851}
 852
 853static void fec_enet_enable_ring(struct net_device *ndev)
 854{
 855        struct fec_enet_private *fep = netdev_priv(ndev);
 856        struct fec_enet_priv_tx_q *txq;
 857        struct fec_enet_priv_rx_q *rxq;
 858        int i;
 859
 860        for (i = 0; i < fep->num_rx_queues; i++) {
 861                rxq = fep->rx_queue[i];
 862                writel(rxq->bd.dma, fep->hwp + FEC_R_DES_START(i));
 863                writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_R_BUFF_SIZE(i));
 864
 865                /* enable DMA1/2 */
 866                if (i)
 867                        writel(RCMR_MATCHEN | RCMR_CMP(i),
 868                               fep->hwp + FEC_RCMR(i));
 869        }
 870
 871        for (i = 0; i < fep->num_tx_queues; i++) {
 872                txq = fep->tx_queue[i];
 873                writel(txq->bd.dma, fep->hwp + FEC_X_DES_START(i));
 874
 875                /* enable DMA1/2 */
 876                if (i)
 877                        writel(DMA_CLASS_EN | IDLE_SLOPE(i),
 878                               fep->hwp + FEC_DMA_CFG(i));
 879        }
 880}
 881
 882static void fec_enet_reset_skb(struct net_device *ndev)
 883{
 884        struct fec_enet_private *fep = netdev_priv(ndev);
 885        struct fec_enet_priv_tx_q *txq;
 886        int i, j;
 887
 888        for (i = 0; i < fep->num_tx_queues; i++) {
 889                txq = fep->tx_queue[i];
 890
 891                for (j = 0; j < txq->bd.ring_size; j++) {
 892                        if (txq->tx_skbuff[j]) {
 893                                dev_kfree_skb_any(txq->tx_skbuff[j]);
 894                                txq->tx_skbuff[j] = NULL;
 895                        }
 896                }
 897        }
 898}
 899
 900/*
 901 * This function is called to start or restart the FEC during a link
 902 * change, transmit timeout, or to reconfigure the FEC.  The network
 903 * packet processing for this device must be stopped before this call.
 904 */
 905static void
 906fec_restart(struct net_device *ndev)
 907{
 908        struct fec_enet_private *fep = netdev_priv(ndev);
 909        u32 val;
 910        u32 temp_mac[2];
 911        u32 rcntl = OPT_FRAME_SIZE | 0x04;
 912        u32 ecntl = 0x2; /* ETHEREN */
 913
 914        /* Whack a reset.  We should wait for this.
 915         * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
 916         * instead of reset MAC itself.
 917         */
 918        if (fep->quirks & FEC_QUIRK_HAS_AVB) {
 919                writel(0, fep->hwp + FEC_ECNTRL);
 920        } else {
 921                writel(1, fep->hwp + FEC_ECNTRL);
 922                udelay(10);
 923        }
 924
 925        /*
 926         * enet-mac reset will reset mac address registers too,
 927         * so need to reconfigure it.
 928         */
 929        memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
 930        writel((__force u32)cpu_to_be32(temp_mac[0]),
 931               fep->hwp + FEC_ADDR_LOW);
 932        writel((__force u32)cpu_to_be32(temp_mac[1]),
 933               fep->hwp + FEC_ADDR_HIGH);
 934
 935        /* Clear any outstanding interrupt. */
 936        writel(0xffffffff, fep->hwp + FEC_IEVENT);
 937
 938        fec_enet_bd_init(ndev);
 939
 940        fec_enet_enable_ring(ndev);
 941
 942        /* Reset tx SKB buffers. */
 943        fec_enet_reset_skb(ndev);
 944
 945        /* Enable MII mode */
 946        if (fep->full_duplex == DUPLEX_FULL) {
 947                /* FD enable */
 948                writel(0x04, fep->hwp + FEC_X_CNTRL);
 949        } else {
 950                /* No Rcv on Xmit */
 951                rcntl |= 0x02;
 952                writel(0x0, fep->hwp + FEC_X_CNTRL);
 953        }
 954
 955        /* Set MII speed */
 956        writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
 957
 958#if !defined(CONFIG_M5272)
 959        if (fep->quirks & FEC_QUIRK_HAS_RACC) {
 960                val = readl(fep->hwp + FEC_RACC);
 961                /* align IP header */
 962                val |= FEC_RACC_SHIFT16;
 963                if (fep->csum_flags & FLAG_RX_CSUM_ENABLED)
 964                        /* set RX checksum */
 965                        val |= FEC_RACC_OPTIONS;
 966                else
 967                        val &= ~FEC_RACC_OPTIONS;
 968                writel(val, fep->hwp + FEC_RACC);
 969                writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_FTRL);
 970        }
 971#endif
 972
 973        /*
 974         * The phy interface and speed need to get configured
 975         * differently on enet-mac.
 976         */
 977        if (fep->quirks & FEC_QUIRK_ENET_MAC) {
 978                /* Enable flow control and length check */
 979                rcntl |= 0x40000000 | 0x00000020;
 980
 981                /* RGMII, RMII or MII */
 982                if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII ||
 983                    fep->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
 984                    fep->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID ||
 985                    fep->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID)
 986                        rcntl |= (1 << 6);
 987                else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
 988                        rcntl |= (1 << 8);
 989                else
 990                        rcntl &= ~(1 << 8);
 991
 992                /* 1G, 100M or 10M */
 993                if (ndev->phydev) {
 994                        if (ndev->phydev->speed == SPEED_1000)
 995                                ecntl |= (1 << 5);
 996                        else if (ndev->phydev->speed == SPEED_100)
 997                                rcntl &= ~(1 << 9);
 998                        else
 999                                rcntl |= (1 << 9);
1000                }
1001        } else {
1002#ifdef FEC_MIIGSK_ENR
1003                if (fep->quirks & FEC_QUIRK_USE_GASKET) {
1004                        u32 cfgr;
1005                        /* disable the gasket and wait */
1006                        writel(0, fep->hwp + FEC_MIIGSK_ENR);
1007                        while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4)
1008                                udelay(1);
1009
1010                        /*
1011                         * configure the gasket:
1012                         *   RMII, 50 MHz, no loopback, no echo
1013                         *   MII, 25 MHz, no loopback, no echo
1014                         */
1015                        cfgr = (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
1016                                ? BM_MIIGSK_CFGR_RMII : BM_MIIGSK_CFGR_MII;
1017                        if (ndev->phydev && ndev->phydev->speed == SPEED_10)
1018                                cfgr |= BM_MIIGSK_CFGR_FRCONT_10M;
1019                        writel(cfgr, fep->hwp + FEC_MIIGSK_CFGR);
1020
1021                        /* re-enable the gasket */
1022                        writel(2, fep->hwp + FEC_MIIGSK_ENR);
1023                }
1024#endif
1025        }
1026
1027#if !defined(CONFIG_M5272)
1028        /* enable pause frame*/
1029        if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) ||
1030            ((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) &&
1031             ndev->phydev && ndev->phydev->pause)) {
1032                rcntl |= FEC_ENET_FCE;
1033
1034                /* set FIFO threshold parameter to reduce overrun */
1035                writel(FEC_ENET_RSEM_V, fep->hwp + FEC_R_FIFO_RSEM);
1036                writel(FEC_ENET_RSFL_V, fep->hwp + FEC_R_FIFO_RSFL);
1037                writel(FEC_ENET_RAEM_V, fep->hwp + FEC_R_FIFO_RAEM);
1038                writel(FEC_ENET_RAFL_V, fep->hwp + FEC_R_FIFO_RAFL);
1039
1040                /* OPD */
1041                writel(FEC_ENET_OPD_V, fep->hwp + FEC_OPD);
1042        } else {
1043                rcntl &= ~FEC_ENET_FCE;
1044        }
1045#endif /* !defined(CONFIG_M5272) */
1046
1047        writel(rcntl, fep->hwp + FEC_R_CNTRL);
1048
1049        /* Setup multicast filter. */
1050        set_multicast_list(ndev);
1051#ifndef CONFIG_M5272
1052        writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
1053        writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
1054#endif
1055
1056        if (fep->quirks & FEC_QUIRK_ENET_MAC) {
1057                /* enable ENET endian swap */
1058                ecntl |= (1 << 8);
1059                /* enable ENET store and forward mode */
1060                writel(1 << 8, fep->hwp + FEC_X_WMRK);
1061        }
1062
1063        if (fep->bufdesc_ex)
1064                ecntl |= (1 << 4);
1065
1066#ifndef CONFIG_M5272
1067        /* Enable the MIB statistic event counters */
1068        writel(0 << 31, fep->hwp + FEC_MIB_CTRLSTAT);
1069#endif
1070
1071        /* And last, enable the transmit and receive processing */
1072        writel(ecntl, fep->hwp + FEC_ECNTRL);
1073        fec_enet_active_rxring(ndev);
1074
1075        if (fep->bufdesc_ex)
1076                fec_ptp_start_cyclecounter(ndev);
1077
1078        /* Enable interrupts we wish to service */
1079        if (fep->link)
1080                writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
1081        else
1082                writel(FEC_ENET_MII, fep->hwp + FEC_IMASK);
1083
1084        /* Init the interrupt coalescing */
1085        fec_enet_itr_coal_init(ndev);
1086
1087}
1088
1089static void
1090fec_stop(struct net_device *ndev)
1091{
1092        struct fec_enet_private *fep = netdev_priv(ndev);
1093        struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
1094        u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8);
1095        u32 val;
1096
1097        /* We cannot expect a graceful transmit stop without link !!! */
1098        if (fep->link) {
1099                writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */
1100                udelay(10);
1101                if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA))
1102                        netdev_err(ndev, "Graceful transmit stop did not complete!\n");
1103        }
1104
1105        /* Whack a reset.  We should wait for this.
1106         * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
1107         * instead of reset MAC itself.
1108         */
1109        if (!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
1110                if (fep->quirks & FEC_QUIRK_HAS_AVB) {
1111                        writel(0, fep->hwp + FEC_ECNTRL);
1112                } else {
1113                        writel(1, fep->hwp + FEC_ECNTRL);
1114                        udelay(10);
1115                }
1116                writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
1117        } else {
1118                writel(FEC_DEFAULT_IMASK | FEC_ENET_WAKEUP, fep->hwp + FEC_IMASK);
1119                val = readl(fep->hwp + FEC_ECNTRL);
1120                val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
1121                writel(val, fep->hwp + FEC_ECNTRL);
1122
1123                if (pdata && pdata->sleep_mode_enable)
1124                        pdata->sleep_mode_enable(true);
1125        }
1126        writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
1127
1128        /* We have to keep ENET enabled to have MII interrupt stay working */
1129        if (fep->quirks & FEC_QUIRK_ENET_MAC &&
1130                !(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
1131                writel(2, fep->hwp + FEC_ECNTRL);
1132                writel(rmii_mode, fep->hwp + FEC_R_CNTRL);
1133        }
1134}
1135
1136
1137static void
1138fec_timeout(struct net_device *ndev)
1139{
1140        struct fec_enet_private *fep = netdev_priv(ndev);
1141
1142        fec_dump(ndev);
1143
1144        ndev->stats.tx_errors++;
1145
1146        schedule_work(&fep->tx_timeout_work);
1147}
1148
1149static void fec_enet_timeout_work(struct work_struct *work)
1150{
1151        struct fec_enet_private *fep =
1152                container_of(work, struct fec_enet_private, tx_timeout_work);
1153        struct net_device *ndev = fep->netdev;
1154
1155        rtnl_lock();
1156        if (netif_device_present(ndev) || netif_running(ndev)) {
1157                napi_disable(&fep->napi);
1158                netif_tx_lock_bh(ndev);
1159                fec_restart(ndev);
1160                netif_wake_queue(ndev);
1161                netif_tx_unlock_bh(ndev);
1162                napi_enable(&fep->napi);
1163        }
1164        rtnl_unlock();
1165}
1166
1167static void
1168fec_enet_hwtstamp(struct fec_enet_private *fep, unsigned ts,
1169        struct skb_shared_hwtstamps *hwtstamps)
1170{
1171        unsigned long flags;
1172        u64 ns;
1173
1174        spin_lock_irqsave(&fep->tmreg_lock, flags);
1175        ns = timecounter_cyc2time(&fep->tc, ts);
1176        spin_unlock_irqrestore(&fep->tmreg_lock, flags);
1177
1178        memset(hwtstamps, 0, sizeof(*hwtstamps));
1179        hwtstamps->hwtstamp = ns_to_ktime(ns);
1180}
1181
1182static void
1183fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
1184{
1185        struct  fec_enet_private *fep;
1186        struct bufdesc *bdp;
1187        unsigned short status;
1188        struct  sk_buff *skb;
1189        struct fec_enet_priv_tx_q *txq;
1190        struct netdev_queue *nq;
1191        int     index = 0;
1192        int     entries_free;
1193
1194        fep = netdev_priv(ndev);
1195
1196        queue_id = FEC_ENET_GET_QUQUE(queue_id);
1197
1198        txq = fep->tx_queue[queue_id];
1199        /* get next bdp of dirty_tx */
1200        nq = netdev_get_tx_queue(ndev, queue_id);
1201        bdp = txq->dirty_tx;
1202
1203        /* get next bdp of dirty_tx */
1204        bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
1205
1206        while (bdp != READ_ONCE(txq->bd.cur)) {
1207                /* Order the load of bd.cur and cbd_sc */
1208                rmb();
1209                status = fec16_to_cpu(READ_ONCE(bdp->cbd_sc));
1210                if (status & BD_ENET_TX_READY)
1211                        break;
1212
1213                index = fec_enet_get_bd_index(bdp, &txq->bd);
1214
1215                skb = txq->tx_skbuff[index];
1216                txq->tx_skbuff[index] = NULL;
1217                if (!IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
1218                        dma_unmap_single(&fep->pdev->dev,
1219                                         fec32_to_cpu(bdp->cbd_bufaddr),
1220                                         fec16_to_cpu(bdp->cbd_datlen),
1221                                         DMA_TO_DEVICE);
1222                bdp->cbd_bufaddr = cpu_to_fec32(0);
1223                if (!skb)
1224                        goto skb_done;
1225
1226                /* Check for errors. */
1227                if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
1228                                   BD_ENET_TX_RL | BD_ENET_TX_UN |
1229                                   BD_ENET_TX_CSL)) {
1230                        ndev->stats.tx_errors++;
1231                        if (status & BD_ENET_TX_HB)  /* No heartbeat */
1232                                ndev->stats.tx_heartbeat_errors++;
1233                        if (status & BD_ENET_TX_LC)  /* Late collision */
1234                                ndev->stats.tx_window_errors++;
1235                        if (status & BD_ENET_TX_RL)  /* Retrans limit */
1236                                ndev->stats.tx_aborted_errors++;
1237                        if (status & BD_ENET_TX_UN)  /* Underrun */
1238                                ndev->stats.tx_fifo_errors++;
1239                        if (status & BD_ENET_TX_CSL) /* Carrier lost */
1240                                ndev->stats.tx_carrier_errors++;
1241                } else {
1242                        ndev->stats.tx_packets++;
1243                        ndev->stats.tx_bytes += skb->len;
1244                }
1245
1246                if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) &&
1247                        fep->bufdesc_ex) {
1248                        struct skb_shared_hwtstamps shhwtstamps;
1249                        struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
1250
1251                        fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), &shhwtstamps);
1252                        skb_tstamp_tx(skb, &shhwtstamps);
1253                }
1254
1255                /* Deferred means some collisions occurred during transmit,
1256                 * but we eventually sent the packet OK.
1257                 */
1258                if (status & BD_ENET_TX_DEF)
1259                        ndev->stats.collisions++;
1260
1261                /* Free the sk buffer associated with this last transmit */
1262                dev_kfree_skb_any(skb);
1263skb_done:
1264                /* Make sure the update to bdp and tx_skbuff are performed
1265                 * before dirty_tx
1266                 */
1267                wmb();
1268                txq->dirty_tx = bdp;
1269
1270                /* Update pointer to next buffer descriptor to be transmitted */
1271                bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
1272
1273                /* Since we have freed up a buffer, the ring is no longer full
1274                 */
1275                if (netif_queue_stopped(ndev)) {
1276                        entries_free = fec_enet_get_free_txdesc_num(txq);
1277                        if (entries_free >= txq->tx_wake_threshold)
1278                                netif_tx_wake_queue(nq);
1279                }
1280        }
1281
1282        /* ERR006358: Keep the transmitter going */
1283        if (bdp != txq->bd.cur &&
1284            readl(txq->bd.reg_desc_active) == 0)
1285                writel(0, txq->bd.reg_desc_active);
1286}
1287
1288static void
1289fec_enet_tx(struct net_device *ndev)
1290{
1291        struct fec_enet_private *fep = netdev_priv(ndev);
1292        u16 queue_id;
1293        /* First process class A queue, then Class B and Best Effort queue */
1294        for_each_set_bit(queue_id, &fep->work_tx, FEC_ENET_MAX_TX_QS) {
1295                clear_bit(queue_id, &fep->work_tx);
1296                fec_enet_tx_queue(ndev, queue_id);
1297        }
1298        return;
1299}
1300
1301static int
1302fec_enet_new_rxbdp(struct net_device *ndev, struct bufdesc *bdp, struct sk_buff *skb)
1303{
1304        struct  fec_enet_private *fep = netdev_priv(ndev);
1305        int off;
1306
1307        off = ((unsigned long)skb->data) & fep->rx_align;
1308        if (off)
1309                skb_reserve(skb, fep->rx_align + 1 - off);
1310
1311        bdp->cbd_bufaddr = cpu_to_fec32(dma_map_single(&fep->pdev->dev, skb->data, FEC_ENET_RX_FRSIZE - fep->rx_align, DMA_FROM_DEVICE));
1312        if (dma_mapping_error(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr))) {
1313                if (net_ratelimit())
1314                        netdev_err(ndev, "Rx DMA memory map failed\n");
1315                return -ENOMEM;
1316        }
1317
1318        return 0;
1319}
1320
1321static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb,
1322                               struct bufdesc *bdp, u32 length, bool swap)
1323{
1324        struct  fec_enet_private *fep = netdev_priv(ndev);
1325        struct sk_buff *new_skb;
1326
1327        if (length > fep->rx_copybreak)
1328                return false;
1329
1330        new_skb = netdev_alloc_skb(ndev, length);
1331        if (!new_skb)
1332                return false;
1333
1334        dma_sync_single_for_cpu(&fep->pdev->dev,
1335                                fec32_to_cpu(bdp->cbd_bufaddr),
1336                                FEC_ENET_RX_FRSIZE - fep->rx_align,
1337                                DMA_FROM_DEVICE);
1338        if (!swap)
1339                memcpy(new_skb->data, (*skb)->data, length);
1340        else
1341                swap_buffer2(new_skb->data, (*skb)->data, length);
1342        *skb = new_skb;
1343
1344        return true;
1345}
1346
1347/* During a receive, the bd_rx.cur points to the current incoming buffer.
1348 * When we update through the ring, if the next incoming buffer has
1349 * not been given to the system, we just set the empty indicator,
1350 * effectively tossing the packet.
1351 */
1352static int
1353fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
1354{
1355        struct fec_enet_private *fep = netdev_priv(ndev);
1356        struct fec_enet_priv_rx_q *rxq;
1357        struct bufdesc *bdp;
1358        unsigned short status;
1359        struct  sk_buff *skb_new = NULL;
1360        struct  sk_buff *skb;
1361        ushort  pkt_len;
1362        __u8 *data;
1363        int     pkt_received = 0;
1364        struct  bufdesc_ex *ebdp = NULL;
1365        bool    vlan_packet_rcvd = false;
1366        u16     vlan_tag;
1367        int     index = 0;
1368        bool    is_copybreak;
1369        bool    need_swap = fep->quirks & FEC_QUIRK_SWAP_FRAME;
1370
1371#ifdef CONFIG_M532x
1372        flush_cache_all();
1373#endif
1374        queue_id = FEC_ENET_GET_QUQUE(queue_id);
1375        rxq = fep->rx_queue[queue_id];
1376
1377        /* First, grab all of the stats for the incoming packet.
1378         * These get messed up if we get called due to a busy condition.
1379         */
1380        bdp = rxq->bd.cur;
1381
1382        while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) {
1383
1384                if (pkt_received >= budget)
1385                        break;
1386                pkt_received++;
1387
1388                writel(FEC_ENET_RXF, fep->hwp + FEC_IEVENT);
1389
1390                /* Check for errors. */
1391                status ^= BD_ENET_RX_LAST;
1392                if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
1393                           BD_ENET_RX_CR | BD_ENET_RX_OV | BD_ENET_RX_LAST |
1394                           BD_ENET_RX_CL)) {
1395                        ndev->stats.rx_errors++;
1396                        if (status & BD_ENET_RX_OV) {
1397                                /* FIFO overrun */
1398                                ndev->stats.rx_fifo_errors++;
1399                                goto rx_processing_done;
1400                        }
1401                        if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH
1402                                                | BD_ENET_RX_LAST)) {
1403                                /* Frame too long or too short. */
1404                                ndev->stats.rx_length_errors++;
1405                                if (status & BD_ENET_RX_LAST)
1406                                        netdev_err(ndev, "rcv is not +last\n");
1407                        }
1408                        if (status & BD_ENET_RX_CR)     /* CRC Error */
1409                                ndev->stats.rx_crc_errors++;
1410                        /* Report late collisions as a frame error. */
1411                        if (status & (BD_ENET_RX_NO | BD_ENET_RX_CL))
1412                                ndev->stats.rx_frame_errors++;
1413                        goto rx_processing_done;
1414                }
1415
1416                /* Process the incoming frame. */
1417                ndev->stats.rx_packets++;
1418                pkt_len = fec16_to_cpu(bdp->cbd_datlen);
1419                ndev->stats.rx_bytes += pkt_len;
1420
1421                index = fec_enet_get_bd_index(bdp, &rxq->bd);
1422                skb = rxq->rx_skbuff[index];
1423
1424                /* The packet length includes FCS, but we don't want to
1425                 * include that when passing upstream as it messes up
1426                 * bridging applications.
1427                 */
1428                is_copybreak = fec_enet_copybreak(ndev, &skb, bdp, pkt_len - 4,
1429                                                  need_swap);
1430                if (!is_copybreak) {
1431                        skb_new = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
1432                        if (unlikely(!skb_new)) {
1433                                ndev->stats.rx_dropped++;
1434                                goto rx_processing_done;
1435                        }
1436                        dma_unmap_single(&fep->pdev->dev,
1437                                         fec32_to_cpu(bdp->cbd_bufaddr),
1438                                         FEC_ENET_RX_FRSIZE - fep->rx_align,
1439                                         DMA_FROM_DEVICE);
1440                }
1441
1442                prefetch(skb->data - NET_IP_ALIGN);
1443                skb_put(skb, pkt_len - 4);
1444                data = skb->data;
1445
1446                if (!is_copybreak && need_swap)
1447                        swap_buffer(data, pkt_len);
1448
1449#if !defined(CONFIG_M5272)
1450                if (fep->quirks & FEC_QUIRK_HAS_RACC)
1451                        data = skb_pull_inline(skb, 2);
1452#endif
1453
1454                /* Extract the enhanced buffer descriptor */
1455                ebdp = NULL;
1456                if (fep->bufdesc_ex)
1457                        ebdp = (struct bufdesc_ex *)bdp;
1458
1459                /* If this is a VLAN packet remove the VLAN Tag */
1460                vlan_packet_rcvd = false;
1461                if ((ndev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1462                    fep->bufdesc_ex &&
1463                    (ebdp->cbd_esc & cpu_to_fec32(BD_ENET_RX_VLAN))) {
1464                        /* Push and remove the vlan tag */
1465                        struct vlan_hdr *vlan_header =
1466                                        (struct vlan_hdr *) (data + ETH_HLEN);
1467                        vlan_tag = ntohs(vlan_header->h_vlan_TCI);
1468
1469                        vlan_packet_rcvd = true;
1470
1471                        memmove(skb->data + VLAN_HLEN, data, ETH_ALEN * 2);
1472                        skb_pull(skb, VLAN_HLEN);
1473                }
1474
1475                skb->protocol = eth_type_trans(skb, ndev);
1476
1477                /* Get receive timestamp from the skb */
1478                if (fep->hwts_rx_en && fep->bufdesc_ex)
1479                        fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts),
1480                                          skb_hwtstamps(skb));
1481
1482                if (fep->bufdesc_ex &&
1483                    (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) {
1484                        if (!(ebdp->cbd_esc & cpu_to_fec32(FLAG_RX_CSUM_ERROR))) {
1485                                /* don't check it */
1486                                skb->ip_summed = CHECKSUM_UNNECESSARY;
1487                        } else {
1488                                skb_checksum_none_assert(skb);
1489                        }
1490                }
1491
1492                /* Handle received VLAN packets */
1493                if (vlan_packet_rcvd)
1494                        __vlan_hwaccel_put_tag(skb,
1495                                               htons(ETH_P_8021Q),
1496                                               vlan_tag);
1497
1498                napi_gro_receive(&fep->napi, skb);
1499
1500                if (is_copybreak) {
1501                        dma_sync_single_for_device(&fep->pdev->dev,
1502                                                   fec32_to_cpu(bdp->cbd_bufaddr),
1503                                                   FEC_ENET_RX_FRSIZE - fep->rx_align,
1504                                                   DMA_FROM_DEVICE);
1505                } else {
1506                        rxq->rx_skbuff[index] = skb_new;
1507                        fec_enet_new_rxbdp(ndev, bdp, skb_new);
1508                }
1509
1510rx_processing_done:
1511                /* Clear the status flags for this buffer */
1512                status &= ~BD_ENET_RX_STATS;
1513
1514                /* Mark the buffer empty */
1515                status |= BD_ENET_RX_EMPTY;
1516
1517                if (fep->bufdesc_ex) {
1518                        struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
1519
1520                        ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
1521                        ebdp->cbd_prot = 0;
1522                        ebdp->cbd_bdu = 0;
1523                }
1524                /* Make sure the updates to rest of the descriptor are
1525                 * performed before transferring ownership.
1526                 */
1527                wmb();
1528                bdp->cbd_sc = cpu_to_fec16(status);
1529
1530                /* Update BD pointer to next entry */
1531                bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
1532
1533                /* Doing this here will keep the FEC running while we process
1534                 * incoming frames.  On a heavily loaded network, we should be
1535                 * able to keep up at the expense of system resources.
1536                 */
1537                writel(0, rxq->bd.reg_desc_active);
1538        }
1539        rxq->bd.cur = bdp;
1540        return pkt_received;
1541}
1542
1543static int
1544fec_enet_rx(struct net_device *ndev, int budget)
1545{
1546        int     pkt_received = 0;
1547        u16     queue_id;
1548        struct fec_enet_private *fep = netdev_priv(ndev);
1549
1550        for_each_set_bit(queue_id, &fep->work_rx, FEC_ENET_MAX_RX_QS) {
1551                int ret;
1552
1553                ret = fec_enet_rx_queue(ndev,
1554                                        budget - pkt_received, queue_id);
1555
1556                if (ret < budget - pkt_received)
1557                        clear_bit(queue_id, &fep->work_rx);
1558
1559                pkt_received += ret;
1560        }
1561        return pkt_received;
1562}
1563
1564static bool
1565fec_enet_collect_events(struct fec_enet_private *fep, uint int_events)
1566{
1567        if (int_events == 0)
1568                return false;
1569
1570        if (int_events & FEC_ENET_RXF_0)
1571                fep->work_rx |= (1 << 2);
1572        if (int_events & FEC_ENET_RXF_1)
1573                fep->work_rx |= (1 << 0);
1574        if (int_events & FEC_ENET_RXF_2)
1575                fep->work_rx |= (1 << 1);
1576
1577        if (int_events & FEC_ENET_TXF_0)
1578                fep->work_tx |= (1 << 2);
1579        if (int_events & FEC_ENET_TXF_1)
1580                fep->work_tx |= (1 << 0);
1581        if (int_events & FEC_ENET_TXF_2)
1582                fep->work_tx |= (1 << 1);
1583
1584        return true;
1585}
1586
1587static irqreturn_t
1588fec_enet_interrupt(int irq, void *dev_id)
1589{
1590        struct net_device *ndev = dev_id;
1591        struct fec_enet_private *fep = netdev_priv(ndev);
1592        uint int_events;
1593        irqreturn_t ret = IRQ_NONE;
1594
1595        int_events = readl(fep->hwp + FEC_IEVENT);
1596        writel(int_events, fep->hwp + FEC_IEVENT);
1597        fec_enet_collect_events(fep, int_events);
1598
1599        if ((fep->work_tx || fep->work_rx) && fep->link) {
1600                ret = IRQ_HANDLED;
1601
1602                if (napi_schedule_prep(&fep->napi)) {
1603                        /* Disable the NAPI interrupts */
1604                        writel(FEC_NAPI_IMASK, fep->hwp + FEC_IMASK);
1605                        __napi_schedule(&fep->napi);
1606                }
1607        }
1608
1609        if (int_events & FEC_ENET_MII) {
1610                ret = IRQ_HANDLED;
1611                complete(&fep->mdio_done);
1612        }
1613        return ret;
1614}
1615
1616static int fec_enet_rx_napi(struct napi_struct *napi, int budget)
1617{
1618        struct net_device *ndev = napi->dev;
1619        struct fec_enet_private *fep = netdev_priv(ndev);
1620        int pkts;
1621
1622        pkts = fec_enet_rx(ndev, budget);
1623
1624        fec_enet_tx(ndev);
1625
1626        if (pkts < budget) {
1627                napi_complete_done(napi, pkts);
1628                writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
1629        }
1630        return pkts;
1631}
1632
1633/* ------------------------------------------------------------------------- */
1634static void fec_get_mac(struct net_device *ndev)
1635{
1636        struct fec_enet_private *fep = netdev_priv(ndev);
1637        struct fec_platform_data *pdata = dev_get_platdata(&fep->pdev->dev);
1638        unsigned char *iap, tmpaddr[ETH_ALEN];
1639
1640        /*
1641         * try to get mac address in following order:
1642         *
1643         * 1) module parameter via kernel command line in form
1644         *    fec.macaddr=0x00,0x04,0x9f,0x01,0x30,0xe0
1645         */
1646        iap = macaddr;
1647
1648        /*
1649         * 2) from device tree data
1650         */
1651        if (!is_valid_ether_addr(iap)) {
1652                struct device_node *np = fep->pdev->dev.of_node;
1653                if (np) {
1654                        const char *mac = of_get_mac_address(np);
1655                        if (mac)
1656                                iap = (unsigned char *) mac;
1657                }
1658        }
1659
1660        /*
1661         * 3) from flash or fuse (via platform data)
1662         */
1663        if (!is_valid_ether_addr(iap)) {
1664#ifdef CONFIG_M5272
1665                if (FEC_FLASHMAC)
1666                        iap = (unsigned char *)FEC_FLASHMAC;
1667#else
1668                if (pdata)
1669                        iap = (unsigned char *)&pdata->mac;
1670#endif
1671        }
1672
1673        /*
1674         * 4) FEC mac registers set by bootloader
1675         */
1676        if (!is_valid_ether_addr(iap)) {
1677                *((__be32 *) &tmpaddr[0]) =
1678                        cpu_to_be32(readl(fep->hwp + FEC_ADDR_LOW));
1679                *((__be16 *) &tmpaddr[4]) =
1680                        cpu_to_be16(readl(fep->hwp + FEC_ADDR_HIGH) >> 16);
1681                iap = &tmpaddr[0];
1682        }
1683
1684        /*
1685         * 5) random mac address
1686         */
1687        if (!is_valid_ether_addr(iap)) {
1688                /* Report it and use a random ethernet address instead */
1689                netdev_err(ndev, "Invalid MAC address: %pM\n", iap);
1690                eth_hw_addr_random(ndev);
1691                netdev_info(ndev, "Using random MAC address: %pM\n",
1692                            ndev->dev_addr);
1693                return;
1694        }
1695
1696        memcpy(ndev->dev_addr, iap, ETH_ALEN);
1697
1698        /* Adjust MAC if using macaddr */
1699        if (iap == macaddr)
1700                 ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->dev_id;
1701}
1702
1703/* ------------------------------------------------------------------------- */
1704
1705/*
1706 * Phy section
1707 */
1708static void fec_enet_adjust_link(struct net_device *ndev)
1709{
1710        struct fec_enet_private *fep = netdev_priv(ndev);
1711        struct phy_device *phy_dev = ndev->phydev;
1712        int status_change = 0;
1713
1714        /* Prevent a state halted on mii error */
1715        if (fep->mii_timeout && phy_dev->state == PHY_HALTED) {
1716                phy_dev->state = PHY_RESUMING;
1717                return;
1718        }
1719
1720        /*
1721         * If the netdev is down, or is going down, we're not interested
1722         * in link state events, so just mark our idea of the link as down
1723         * and ignore the event.
1724         */
1725        if (!netif_running(ndev) || !netif_device_present(ndev)) {
1726                fep->link = 0;
1727        } else if (phy_dev->link) {
1728                if (!fep->link) {
1729                        fep->link = phy_dev->link;
1730                        status_change = 1;
1731                }
1732
1733                if (fep->full_duplex != phy_dev->duplex) {
1734                        fep->full_duplex = phy_dev->duplex;
1735                        status_change = 1;
1736                }
1737
1738                if (phy_dev->speed != fep->speed) {
1739                        fep->speed = phy_dev->speed;
1740                        status_change = 1;
1741                }
1742
1743                /* if any of the above changed restart the FEC */
1744                if (status_change) {
1745                        napi_disable(&fep->napi);
1746                        netif_tx_lock_bh(ndev);
1747                        fec_restart(ndev);
1748                        netif_wake_queue(ndev);
1749                        netif_tx_unlock_bh(ndev);
1750                        napi_enable(&fep->napi);
1751                }
1752        } else {
1753                if (fep->link) {
1754                        napi_disable(&fep->napi);
1755                        netif_tx_lock_bh(ndev);
1756                        fec_stop(ndev);
1757                        netif_tx_unlock_bh(ndev);
1758                        napi_enable(&fep->napi);
1759                        fep->link = phy_dev->link;
1760                        status_change = 1;
1761                }
1762        }
1763
1764        if (status_change)
1765                phy_print_status(phy_dev);
1766}
1767
1768static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
1769{
1770        struct fec_enet_private *fep = bus->priv;
1771        struct device *dev = &fep->pdev->dev;
1772        unsigned long time_left;
1773        int ret = 0;
1774
1775        ret = pm_runtime_get_sync(dev);
1776        if (ret < 0)
1777                return ret;
1778
1779        fep->mii_timeout = 0;
1780        reinit_completion(&fep->mdio_done);
1781
1782        /* start a read op */
1783        writel(FEC_MMFR_ST | FEC_MMFR_OP_READ |
1784                FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
1785                FEC_MMFR_TA, fep->hwp + FEC_MII_DATA);
1786
1787        /* wait for end of transfer */
1788        time_left = wait_for_completion_timeout(&fep->mdio_done,
1789                        usecs_to_jiffies(FEC_MII_TIMEOUT));
1790        if (time_left == 0) {
1791                fep->mii_timeout = 1;
1792                netdev_err(fep->netdev, "MDIO read timeout\n");
1793                ret = -ETIMEDOUT;
1794                goto out;
1795        }
1796
1797        ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
1798
1799out:
1800        pm_runtime_mark_last_busy(dev);
1801        pm_runtime_put_autosuspend(dev);
1802
1803        return ret;
1804}
1805
1806static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
1807                           u16 value)
1808{
1809        struct fec_enet_private *fep = bus->priv;
1810        struct device *dev = &fep->pdev->dev;
1811        unsigned long time_left;
1812        int ret;
1813
1814        ret = pm_runtime_get_sync(dev);
1815        if (ret < 0)
1816                return ret;
1817        else
1818                ret = 0;
1819
1820        fep->mii_timeout = 0;
1821        reinit_completion(&fep->mdio_done);
1822
1823        /* start a write op */
1824        writel(FEC_MMFR_ST | FEC_MMFR_OP_WRITE |
1825                FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
1826                FEC_MMFR_TA | FEC_MMFR_DATA(value),
1827                fep->hwp + FEC_MII_DATA);
1828
1829        /* wait for end of transfer */
1830        time_left = wait_for_completion_timeout(&fep->mdio_done,
1831                        usecs_to_jiffies(FEC_MII_TIMEOUT));
1832        if (time_left == 0) {
1833                fep->mii_timeout = 1;
1834                netdev_err(fep->netdev, "MDIO write timeout\n");
1835                ret  = -ETIMEDOUT;
1836        }
1837
1838        pm_runtime_mark_last_busy(dev);
1839        pm_runtime_put_autosuspend(dev);
1840
1841        return ret;
1842}
1843
1844static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
1845{
1846        struct fec_enet_private *fep = netdev_priv(ndev);
1847        int ret;
1848
1849        if (enable) {
1850                ret = clk_prepare_enable(fep->clk_ahb);
1851                if (ret)
1852                        return ret;
1853
1854                ret = clk_prepare_enable(fep->clk_enet_out);
1855                if (ret)
1856                        goto failed_clk_enet_out;
1857
1858                if (fep->clk_ptp) {
1859                        mutex_lock(&fep->ptp_clk_mutex);
1860                        ret = clk_prepare_enable(fep->clk_ptp);
1861                        if (ret) {
1862                                mutex_unlock(&fep->ptp_clk_mutex);
1863                                goto failed_clk_ptp;
1864                        } else {
1865                                fep->ptp_clk_on = true;
1866                        }
1867                        mutex_unlock(&fep->ptp_clk_mutex);
1868                }
1869
1870                ret = clk_prepare_enable(fep->clk_ref);
1871                if (ret)
1872                        goto failed_clk_ref;
1873
1874                phy_reset_after_clk_enable(ndev->phydev);
1875        } else {
1876                clk_disable_unprepare(fep->clk_ahb);
1877                clk_disable_unprepare(fep->clk_enet_out);
1878                if (fep->clk_ptp) {
1879                        mutex_lock(&fep->ptp_clk_mutex);
1880                        clk_disable_unprepare(fep->clk_ptp);
1881                        fep->ptp_clk_on = false;
1882                        mutex_unlock(&fep->ptp_clk_mutex);
1883                }
1884                clk_disable_unprepare(fep->clk_ref);
1885        }
1886
1887        return 0;
1888
1889failed_clk_ref:
1890        if (fep->clk_ref)
1891                clk_disable_unprepare(fep->clk_ref);
1892failed_clk_ptp:
1893        if (fep->clk_enet_out)
1894                clk_disable_unprepare(fep->clk_enet_out);
1895failed_clk_enet_out:
1896                clk_disable_unprepare(fep->clk_ahb);
1897
1898        return ret;
1899}
1900
1901static int fec_enet_mii_probe(struct net_device *ndev)
1902{
1903        struct fec_enet_private *fep = netdev_priv(ndev);
1904        struct phy_device *phy_dev = NULL;
1905        char mdio_bus_id[MII_BUS_ID_SIZE];
1906        char phy_name[MII_BUS_ID_SIZE + 3];
1907        int phy_id;
1908        int dev_id = fep->dev_id;
1909
1910        if (fep->phy_node) {
1911                phy_dev = of_phy_connect(ndev, fep->phy_node,
1912                                         &fec_enet_adjust_link, 0,
1913                                         fep->phy_interface);
1914                if (!phy_dev) {
1915                        netdev_err(ndev, "Unable to connect to phy\n");
1916                        return -ENODEV;
1917                }
1918        } else {
1919                /* check for attached phy */
1920                for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) {
1921                        if (!mdiobus_is_registered_device(fep->mii_bus, phy_id))
1922                                continue;
1923                        if (dev_id--)
1924                                continue;
1925                        strlcpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE);
1926                        break;
1927                }
1928
1929                if (phy_id >= PHY_MAX_ADDR) {
1930                        netdev_info(ndev, "no PHY, assuming direct connection to switch\n");
1931                        strlcpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE);
1932                        phy_id = 0;
1933                }
1934
1935                snprintf(phy_name, sizeof(phy_name),
1936                         PHY_ID_FMT, mdio_bus_id, phy_id);
1937                phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link,
1938                                      fep->phy_interface);
1939        }
1940
1941        if (IS_ERR(phy_dev)) {
1942                netdev_err(ndev, "could not attach to PHY\n");
1943                return PTR_ERR(phy_dev);
1944        }
1945
1946        /* mask with MAC supported features */
1947        if (fep->quirks & FEC_QUIRK_HAS_GBIT) {
1948                phy_dev->supported &= PHY_GBIT_FEATURES;
1949                phy_dev->supported &= ~SUPPORTED_1000baseT_Half;
1950#if !defined(CONFIG_M5272)
1951                phy_dev->supported |= SUPPORTED_Pause;
1952#endif
1953        }
1954        else
1955                phy_dev->supported &= PHY_BASIC_FEATURES;
1956
1957        phy_dev->advertising = phy_dev->supported;
1958
1959        fep->link = 0;
1960        fep->full_duplex = 0;
1961
1962        phy_attached_info(phy_dev);
1963
1964        return 0;
1965}
1966
1967static int fec_enet_mii_init(struct platform_device *pdev)
1968{
1969        static struct mii_bus *fec0_mii_bus;
1970        struct net_device *ndev = platform_get_drvdata(pdev);
1971        struct fec_enet_private *fep = netdev_priv(ndev);
1972        struct device_node *node;
1973        int err = -ENXIO;
1974        u32 mii_speed, holdtime;
1975
1976        /*
1977         * The i.MX28 dual fec interfaces are not equal.
1978         * Here are the differences:
1979         *
1980         *  - fec0 supports MII & RMII modes while fec1 only supports RMII
1981         *  - fec0 acts as the 1588 time master while fec1 is slave
1982         *  - external phys can only be configured by fec0
1983         *
1984         * That is to say fec1 can not work independently. It only works
1985         * when fec0 is working. The reason behind this design is that the
1986         * second interface is added primarily for Switch mode.
1987         *
1988         * Because of the last point above, both phys are attached on fec0
1989         * mdio interface in board design, and need to be configured by
1990         * fec0 mii_bus.
1991         */
1992        if ((fep->quirks & FEC_QUIRK_SINGLE_MDIO) && fep->dev_id > 0) {
1993                /* fec1 uses fec0 mii_bus */
1994                if (mii_cnt && fec0_mii_bus) {
1995                        fep->mii_bus = fec0_mii_bus;
1996                        mii_cnt++;
1997                        return 0;
1998                }
1999                return -ENOENT;
2000        }
2001
2002        fep->mii_timeout = 0;
2003
2004        /*
2005         * Set MII speed to 2.5 MHz (= clk_get_rate() / 2 * phy_speed)
2006         *
2007         * The formula for FEC MDC is 'ref_freq / (MII_SPEED x 2)' while
2008         * for ENET-MAC is 'ref_freq / ((MII_SPEED + 1) x 2)'.  The i.MX28
2009         * Reference Manual has an error on this, and gets fixed on i.MX6Q
2010         * document.
2011         */
2012        mii_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 5000000);
2013        if (fep->quirks & FEC_QUIRK_ENET_MAC)
2014                mii_speed--;
2015        if (mii_speed > 63) {
2016                dev_err(&pdev->dev,
2017                        "fec clock (%lu) too fast to get right mii speed\n",
2018                        clk_get_rate(fep->clk_ipg));
2019                err = -EINVAL;
2020                goto err_out;
2021        }
2022
2023        /*
2024         * The i.MX28 and i.MX6 types have another filed in the MSCR (aka
2025         * MII_SPEED) register that defines the MDIO output hold time. Earlier
2026         * versions are RAZ there, so just ignore the difference and write the
2027         * register always.
2028         * The minimal hold time according to IEE802.3 (clause 22) is 10 ns.
2029         * HOLDTIME + 1 is the number of clk cycles the fec is holding the
2030         * output.
2031         * The HOLDTIME bitfield takes values between 0 and 7 (inclusive).
2032         * Given that ceil(clkrate / 5000000) <= 64, the calculation for
2033         * holdtime cannot result in a value greater than 3.
2034         */
2035        holdtime = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 100000000) - 1;
2036
2037        fep->phy_speed = mii_speed << 1 | holdtime << 8;
2038
2039        writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
2040
2041        fep->mii_bus = mdiobus_alloc();
2042        if (fep->mii_bus == NULL) {
2043                err = -ENOMEM;
2044                goto err_out;
2045        }
2046
2047        fep->mii_bus->name = "fec_enet_mii_bus";
2048        fep->mii_bus->read = fec_enet_mdio_read;
2049        fep->mii_bus->write = fec_enet_mdio_write;
2050        snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
2051                pdev->name, fep->dev_id + 1);
2052        fep->mii_bus->priv = fep;
2053        fep->mii_bus->parent = &pdev->dev;
2054
2055        node = of_get_child_by_name(pdev->dev.of_node, "mdio");
2056        if (node) {
2057                err = of_mdiobus_register(fep->mii_bus, node);
2058                of_node_put(node);
2059        } else {
2060                err = mdiobus_register(fep->mii_bus);
2061        }
2062
2063        if (err)
2064                goto err_out_free_mdiobus;
2065
2066        mii_cnt++;
2067
2068        /* save fec0 mii_bus */
2069        if (fep->quirks & FEC_QUIRK_SINGLE_MDIO)
2070                fec0_mii_bus = fep->mii_bus;
2071
2072        return 0;
2073
2074err_out_free_mdiobus:
2075        mdiobus_free(fep->mii_bus);
2076err_out:
2077        return err;
2078}
2079
2080static void fec_enet_mii_remove(struct fec_enet_private *fep)
2081{
2082        if (--mii_cnt == 0) {
2083                mdiobus_unregister(fep->mii_bus);
2084                mdiobus_free(fep->mii_bus);
2085        }
2086}
2087
2088static void fec_enet_get_drvinfo(struct net_device *ndev,
2089                                 struct ethtool_drvinfo *info)
2090{
2091        struct fec_enet_private *fep = netdev_priv(ndev);
2092
2093        strlcpy(info->driver, fep->pdev->dev.driver->name,
2094                sizeof(info->driver));
2095        strlcpy(info->version, "Revision: 1.0", sizeof(info->version));
2096        strlcpy(info->bus_info, dev_name(&ndev->dev), sizeof(info->bus_info));
2097}
2098
2099static int fec_enet_get_regs_len(struct net_device *ndev)
2100{
2101        struct fec_enet_private *fep = netdev_priv(ndev);
2102        struct resource *r;
2103        int s = 0;
2104
2105        r = platform_get_resource(fep->pdev, IORESOURCE_MEM, 0);
2106        if (r)
2107                s = resource_size(r);
2108
2109        return s;
2110}
2111
2112/* List of registers that can be safety be read to dump them with ethtool */
2113#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
2114        defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
2115        defined(CONFIG_ARM64)
2116static u32 fec_enet_register_offset[] = {
2117        FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0,
2118        FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL,
2119        FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH, FEC_OPD, FEC_TXIC0, FEC_TXIC1,
2120        FEC_TXIC2, FEC_RXIC0, FEC_RXIC1, FEC_RXIC2, FEC_HASH_TABLE_HIGH,
2121        FEC_HASH_TABLE_LOW, FEC_GRP_HASH_TABLE_HIGH, FEC_GRP_HASH_TABLE_LOW,
2122        FEC_X_WMRK, FEC_R_BOUND, FEC_R_FSTART, FEC_R_DES_START_1,
2123        FEC_X_DES_START_1, FEC_R_BUFF_SIZE_1, FEC_R_DES_START_2,
2124        FEC_X_DES_START_2, FEC_R_BUFF_SIZE_2, FEC_R_DES_START_0,
2125        FEC_X_DES_START_0, FEC_R_BUFF_SIZE_0, FEC_R_FIFO_RSFL, FEC_R_FIFO_RSEM,
2126        FEC_R_FIFO_RAEM, FEC_R_FIFO_RAFL, FEC_RACC, FEC_RCMR_1, FEC_RCMR_2,
2127        FEC_DMA_CFG_1, FEC_DMA_CFG_2, FEC_R_DES_ACTIVE_1, FEC_X_DES_ACTIVE_1,
2128        FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_2, FEC_QOS_SCHEME,
2129        RMON_T_DROP, RMON_T_PACKETS, RMON_T_BC_PKT, RMON_T_MC_PKT,
2130        RMON_T_CRC_ALIGN, RMON_T_UNDERSIZE, RMON_T_OVERSIZE, RMON_T_FRAG,
2131        RMON_T_JAB, RMON_T_COL, RMON_T_P64, RMON_T_P65TO127, RMON_T_P128TO255,
2132        RMON_T_P256TO511, RMON_T_P512TO1023, RMON_T_P1024TO2047,
2133        RMON_T_P_GTE2048, RMON_T_OCTETS,
2134        IEEE_T_DROP, IEEE_T_FRAME_OK, IEEE_T_1COL, IEEE_T_MCOL, IEEE_T_DEF,
2135        IEEE_T_LCOL, IEEE_T_EXCOL, IEEE_T_MACERR, IEEE_T_CSERR, IEEE_T_SQE,
2136        IEEE_T_FDXFC, IEEE_T_OCTETS_OK,
2137        RMON_R_PACKETS, RMON_R_BC_PKT, RMON_R_MC_PKT, RMON_R_CRC_ALIGN,
2138        RMON_R_UNDERSIZE, RMON_R_OVERSIZE, RMON_R_FRAG, RMON_R_JAB,
2139        RMON_R_RESVD_O, RMON_R_P64, RMON_R_P65TO127, RMON_R_P128TO255,
2140        RMON_R_P256TO511, RMON_R_P512TO1023, RMON_R_P1024TO2047,
2141        RMON_R_P_GTE2048, RMON_R_OCTETS,
2142        IEEE_R_DROP, IEEE_R_FRAME_OK, IEEE_R_CRC, IEEE_R_ALIGN, IEEE_R_MACERR,
2143        IEEE_R_FDXFC, IEEE_R_OCTETS_OK
2144};
2145#else
2146static u32 fec_enet_register_offset[] = {
2147        FEC_ECNTRL, FEC_IEVENT, FEC_IMASK, FEC_IVEC, FEC_R_DES_ACTIVE_0,
2148        FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_0,
2149        FEC_X_DES_ACTIVE_1, FEC_X_DES_ACTIVE_2, FEC_MII_DATA, FEC_MII_SPEED,
2150        FEC_R_BOUND, FEC_R_FSTART, FEC_X_WMRK, FEC_X_FSTART, FEC_R_CNTRL,
2151        FEC_MAX_FRM_LEN, FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH,
2152        FEC_GRP_HASH_TABLE_HIGH, FEC_GRP_HASH_TABLE_LOW, FEC_R_DES_START_0,
2153        FEC_R_DES_START_1, FEC_R_DES_START_2, FEC_X_DES_START_0,
2154        FEC_X_DES_START_1, FEC_X_DES_START_2, FEC_R_BUFF_SIZE_0,
2155        FEC_R_BUFF_SIZE_1, FEC_R_BUFF_SIZE_2
2156};
2157#endif
2158
2159static void fec_enet_get_regs(struct net_device *ndev,
2160                              struct ethtool_regs *regs, void *regbuf)
2161{
2162        struct fec_enet_private *fep = netdev_priv(ndev);
2163        u32 __iomem *theregs = (u32 __iomem *)fep->hwp;
2164        u32 *buf = (u32 *)regbuf;
2165        u32 i, off;
2166
2167        memset(buf, 0, regs->len);
2168
2169        for (i = 0; i < ARRAY_SIZE(fec_enet_register_offset); i++) {
2170                off = fec_enet_register_offset[i] / 4;
2171                buf[off] = readl(&theregs[off]);
2172        }
2173}
2174
2175static int fec_enet_get_ts_info(struct net_device *ndev,
2176                                struct ethtool_ts_info *info)
2177{
2178        struct fec_enet_private *fep = netdev_priv(ndev);
2179
2180        if (fep->bufdesc_ex) {
2181
2182                info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
2183                                        SOF_TIMESTAMPING_RX_SOFTWARE |
2184                                        SOF_TIMESTAMPING_SOFTWARE |
2185                                        SOF_TIMESTAMPING_TX_HARDWARE |
2186                                        SOF_TIMESTAMPING_RX_HARDWARE |
2187                                        SOF_TIMESTAMPING_RAW_HARDWARE;
2188                if (fep->ptp_clock)
2189                        info->phc_index = ptp_clock_index(fep->ptp_clock);
2190                else
2191                        info->phc_index = -1;
2192
2193                info->tx_types = (1 << HWTSTAMP_TX_OFF) |
2194                                 (1 << HWTSTAMP_TX_ON);
2195
2196                info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
2197                                   (1 << HWTSTAMP_FILTER_ALL);
2198                return 0;
2199        } else {
2200                return ethtool_op_get_ts_info(ndev, info);
2201        }
2202}
2203
2204#if !defined(CONFIG_M5272)
2205
2206static void fec_enet_get_pauseparam(struct net_device *ndev,
2207                                    struct ethtool_pauseparam *pause)
2208{
2209        struct fec_enet_private *fep = netdev_priv(ndev);
2210
2211        pause->autoneg = (fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) != 0;
2212        pause->tx_pause = (fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) != 0;
2213        pause->rx_pause = pause->tx_pause;
2214}
2215
2216static int fec_enet_set_pauseparam(struct net_device *ndev,
2217                                   struct ethtool_pauseparam *pause)
2218{
2219        struct fec_enet_private *fep = netdev_priv(ndev);
2220
2221        if (!ndev->phydev)
2222                return -ENODEV;
2223
2224        if (pause->tx_pause != pause->rx_pause) {
2225                netdev_info(ndev,
2226                        "hardware only support enable/disable both tx and rx");
2227                return -EINVAL;
2228        }
2229
2230        fep->pause_flag = 0;
2231
2232        /* tx pause must be same as rx pause */
2233        fep->pause_flag |= pause->rx_pause ? FEC_PAUSE_FLAG_ENABLE : 0;
2234        fep->pause_flag |= pause->autoneg ? FEC_PAUSE_FLAG_AUTONEG : 0;
2235
2236        if (pause->rx_pause || pause->autoneg) {
2237                ndev->phydev->supported |= ADVERTISED_Pause;
2238                ndev->phydev->advertising |= ADVERTISED_Pause;
2239        } else {
2240                ndev->phydev->supported &= ~ADVERTISED_Pause;
2241                ndev->phydev->advertising &= ~ADVERTISED_Pause;
2242        }
2243
2244        if (pause->autoneg) {
2245                if (netif_running(ndev))
2246                        fec_stop(ndev);
2247                phy_start_aneg(ndev->phydev);
2248        }
2249        if (netif_running(ndev)) {
2250                napi_disable(&fep->napi);
2251                netif_tx_lock_bh(ndev);
2252                fec_restart(ndev);
2253                netif_wake_queue(ndev);
2254                netif_tx_unlock_bh(ndev);
2255                napi_enable(&fep->napi);
2256        }
2257
2258        return 0;
2259}
2260
2261static const struct fec_stat {
2262        char name[ETH_GSTRING_LEN];
2263        u16 offset;
2264} fec_stats[] = {
2265        /* RMON TX */
2266        { "tx_dropped", RMON_T_DROP },
2267        { "tx_packets", RMON_T_PACKETS },
2268        { "tx_broadcast", RMON_T_BC_PKT },
2269        { "tx_multicast", RMON_T_MC_PKT },
2270        { "tx_crc_errors", RMON_T_CRC_ALIGN },
2271        { "tx_undersize", RMON_T_UNDERSIZE },
2272        { "tx_oversize", RMON_T_OVERSIZE },
2273        { "tx_fragment", RMON_T_FRAG },
2274        { "tx_jabber", RMON_T_JAB },
2275        { "tx_collision", RMON_T_COL },
2276        { "tx_64byte", RMON_T_P64 },
2277        { "tx_65to127byte", RMON_T_P65TO127 },
2278        { "tx_128to255byte", RMON_T_P128TO255 },
2279        { "tx_256to511byte", RMON_T_P256TO511 },
2280        { "tx_512to1023byte", RMON_T_P512TO1023 },
2281        { "tx_1024to2047byte", RMON_T_P1024TO2047 },
2282        { "tx_GTE2048byte", RMON_T_P_GTE2048 },
2283        { "tx_octets", RMON_T_OCTETS },
2284
2285        /* IEEE TX */
2286        { "IEEE_tx_drop", IEEE_T_DROP },
2287        { "IEEE_tx_frame_ok", IEEE_T_FRAME_OK },
2288        { "IEEE_tx_1col", IEEE_T_1COL },
2289        { "IEEE_tx_mcol", IEEE_T_MCOL },
2290        { "IEEE_tx_def", IEEE_T_DEF },
2291        { "IEEE_tx_lcol", IEEE_T_LCOL },
2292        { "IEEE_tx_excol", IEEE_T_EXCOL },
2293        { "IEEE_tx_macerr", IEEE_T_MACERR },
2294        { "IEEE_tx_cserr", IEEE_T_CSERR },
2295        { "IEEE_tx_sqe", IEEE_T_SQE },
2296        { "IEEE_tx_fdxfc", IEEE_T_FDXFC },
2297        { "IEEE_tx_octets_ok", IEEE_T_OCTETS_OK },
2298
2299        /* RMON RX */
2300        { "rx_packets", RMON_R_PACKETS },
2301        { "rx_broadcast", RMON_R_BC_PKT },
2302        { "rx_multicast", RMON_R_MC_PKT },
2303        { "rx_crc_errors", RMON_R_CRC_ALIGN },
2304        { "rx_undersize", RMON_R_UNDERSIZE },
2305        { "rx_oversize", RMON_R_OVERSIZE },
2306        { "rx_fragment", RMON_R_FRAG },
2307        { "rx_jabber", RMON_R_JAB },
2308        { "rx_64byte", RMON_R_P64 },
2309        { "rx_65to127byte", RMON_R_P65TO127 },
2310        { "rx_128to255byte", RMON_R_P128TO255 },
2311        { "rx_256to511byte", RMON_R_P256TO511 },
2312        { "rx_512to1023byte", RMON_R_P512TO1023 },
2313        { "rx_1024to2047byte", RMON_R_P1024TO2047 },
2314        { "rx_GTE2048byte", RMON_R_P_GTE2048 },
2315        { "rx_octets", RMON_R_OCTETS },
2316
2317        /* IEEE RX */
2318        { "IEEE_rx_drop", IEEE_R_DROP },
2319        { "IEEE_rx_frame_ok", IEEE_R_FRAME_OK },
2320        { "IEEE_rx_crc", IEEE_R_CRC },
2321        { "IEEE_rx_align", IEEE_R_ALIGN },
2322        { "IEEE_rx_macerr", IEEE_R_MACERR },
2323        { "IEEE_rx_fdxfc", IEEE_R_FDXFC },
2324        { "IEEE_rx_octets_ok", IEEE_R_OCTETS_OK },
2325};
2326
2327#define FEC_STATS_SIZE          (ARRAY_SIZE(fec_stats) * sizeof(u64))
2328
2329static void fec_enet_update_ethtool_stats(struct net_device *dev)
2330{
2331        struct fec_enet_private *fep = netdev_priv(dev);
2332        int i;
2333
2334        for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
2335                fep->ethtool_stats[i] = readl(fep->hwp + fec_stats[i].offset);
2336}
2337
2338static void fec_enet_get_ethtool_stats(struct net_device *dev,
2339                                       struct ethtool_stats *stats, u64 *data)
2340{
2341        struct fec_enet_private *fep = netdev_priv(dev);
2342
2343        if (netif_running(dev))
2344                fec_enet_update_ethtool_stats(dev);
2345
2346        memcpy(data, fep->ethtool_stats, FEC_STATS_SIZE);
2347}
2348
2349static void fec_enet_get_strings(struct net_device *netdev,
2350        u32 stringset, u8 *data)
2351{
2352        int i;
2353        switch (stringset) {
2354        case ETH_SS_STATS:
2355                for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
2356                        memcpy(data + i * ETH_GSTRING_LEN,
2357                                fec_stats[i].name, ETH_GSTRING_LEN);
2358                break;
2359        }
2360}
2361
2362static int fec_enet_get_sset_count(struct net_device *dev, int sset)
2363{
2364        switch (sset) {
2365        case ETH_SS_STATS:
2366                return ARRAY_SIZE(fec_stats);
2367        default:
2368                return -EOPNOTSUPP;
2369        }
2370}
2371
2372static void fec_enet_clear_ethtool_stats(struct net_device *dev)
2373{
2374        struct fec_enet_private *fep = netdev_priv(dev);
2375        int i;
2376
2377        /* Disable MIB statistics counters */
2378        writel(FEC_MIB_CTRLSTAT_DISABLE, fep->hwp + FEC_MIB_CTRLSTAT);
2379
2380        for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
2381                writel(0, fep->hwp + fec_stats[i].offset);
2382
2383        /* Don't disable MIB statistics counters */
2384        writel(0, fep->hwp + FEC_MIB_CTRLSTAT);
2385}
2386
2387#else   /* !defined(CONFIG_M5272) */
2388#define FEC_STATS_SIZE  0
2389static inline void fec_enet_update_ethtool_stats(struct net_device *dev)
2390{
2391}
2392
2393static inline void fec_enet_clear_ethtool_stats(struct net_device *dev)
2394{
2395}
2396#endif /* !defined(CONFIG_M5272) */
2397
2398/* ITR clock source is enet system clock (clk_ahb).
2399 * TCTT unit is cycle_ns * 64 cycle
2400 * So, the ICTT value = X us / (cycle_ns * 64)
2401 */
2402static int fec_enet_us_to_itr_clock(struct net_device *ndev, int us)
2403{
2404        struct fec_enet_private *fep = netdev_priv(ndev);
2405
2406        return us * (fep->itr_clk_rate / 64000) / 1000;
2407}
2408
2409/* Set threshold for interrupt coalescing */
2410static void fec_enet_itr_coal_set(struct net_device *ndev)
2411{
2412        struct fec_enet_private *fep = netdev_priv(ndev);
2413        int rx_itr, tx_itr;
2414
2415        /* Must be greater than zero to avoid unpredictable behavior */
2416        if (!fep->rx_time_itr || !fep->rx_pkts_itr ||
2417            !fep->tx_time_itr || !fep->tx_pkts_itr)
2418                return;
2419
2420        /* Select enet system clock as Interrupt Coalescing
2421         * timer Clock Source
2422         */
2423        rx_itr = FEC_ITR_CLK_SEL;
2424        tx_itr = FEC_ITR_CLK_SEL;
2425
2426        /* set ICFT and ICTT */
2427        rx_itr |= FEC_ITR_ICFT(fep->rx_pkts_itr);
2428        rx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr));
2429        tx_itr |= FEC_ITR_ICFT(fep->tx_pkts_itr);
2430        tx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr));
2431
2432        rx_itr |= FEC_ITR_EN;
2433        tx_itr |= FEC_ITR_EN;
2434
2435        writel(tx_itr, fep->hwp + FEC_TXIC0);
2436        writel(rx_itr, fep->hwp + FEC_RXIC0);
2437        if (fep->quirks & FEC_QUIRK_HAS_AVB) {
2438                writel(tx_itr, fep->hwp + FEC_TXIC1);
2439                writel(rx_itr, fep->hwp + FEC_RXIC1);
2440                writel(tx_itr, fep->hwp + FEC_TXIC2);
2441                writel(rx_itr, fep->hwp + FEC_RXIC2);
2442        }
2443}
2444
2445static int
2446fec_enet_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec)
2447{
2448        struct fec_enet_private *fep = netdev_priv(ndev);
2449
2450        if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE))
2451                return -EOPNOTSUPP;
2452
2453        ec->rx_coalesce_usecs = fep->rx_time_itr;
2454        ec->rx_max_coalesced_frames = fep->rx_pkts_itr;
2455
2456        ec->tx_coalesce_usecs = fep->tx_time_itr;
2457        ec->tx_max_coalesced_frames = fep->tx_pkts_itr;
2458
2459        return 0;
2460}
2461
2462static int
2463fec_enet_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec)
2464{
2465        struct fec_enet_private *fep = netdev_priv(ndev);
2466        unsigned int cycle;
2467
2468        if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE))
2469                return -EOPNOTSUPP;
2470
2471        if (ec->rx_max_coalesced_frames > 255) {
2472                pr_err("Rx coalesced frames exceed hardware limitation\n");
2473                return -EINVAL;
2474        }
2475
2476        if (ec->tx_max_coalesced_frames > 255) {
2477                pr_err("Tx coalesced frame exceed hardware limitation\n");
2478                return -EINVAL;
2479        }
2480
2481        cycle = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr);
2482        if (cycle > 0xFFFF) {
2483                pr_err("Rx coalesced usec exceed hardware limitation\n");
2484                return -EINVAL;
2485        }
2486
2487        cycle = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr);
2488        if (cycle > 0xFFFF) {
2489                pr_err("Rx coalesced usec exceed hardware limitation\n");
2490                return -EINVAL;
2491        }
2492
2493        fep->rx_time_itr = ec->rx_coalesce_usecs;
2494        fep->rx_pkts_itr = ec->rx_max_coalesced_frames;
2495
2496        fep->tx_time_itr = ec->tx_coalesce_usecs;
2497        fep->tx_pkts_itr = ec->tx_max_coalesced_frames;
2498
2499        fec_enet_itr_coal_set(ndev);
2500
2501        return 0;
2502}
2503
2504static void fec_enet_itr_coal_init(struct net_device *ndev)
2505{
2506        struct ethtool_coalesce ec;
2507
2508        ec.rx_coalesce_usecs = FEC_ITR_ICTT_DEFAULT;
2509        ec.rx_max_coalesced_frames = FEC_ITR_ICFT_DEFAULT;
2510
2511        ec.tx_coalesce_usecs = FEC_ITR_ICTT_DEFAULT;
2512        ec.tx_max_coalesced_frames = FEC_ITR_ICFT_DEFAULT;
2513
2514        fec_enet_set_coalesce(ndev, &ec);
2515}
2516
2517static int fec_enet_get_tunable(struct net_device *netdev,
2518                                const struct ethtool_tunable *tuna,
2519                                void *data)
2520{
2521        struct fec_enet_private *fep = netdev_priv(netdev);
2522        int ret = 0;
2523
2524        switch (tuna->id) {
2525        case ETHTOOL_RX_COPYBREAK:
2526                *(u32 *)data = fep->rx_copybreak;
2527                break;
2528        default:
2529                ret = -EINVAL;
2530                break;
2531        }
2532
2533        return ret;
2534}
2535
2536static int fec_enet_set_tunable(struct net_device *netdev,
2537                                const struct ethtool_tunable *tuna,
2538                                const void *data)
2539{
2540        struct fec_enet_private *fep = netdev_priv(netdev);
2541        int ret = 0;
2542
2543        switch (tuna->id) {
2544        case ETHTOOL_RX_COPYBREAK:
2545                fep->rx_copybreak = *(u32 *)data;
2546                break;
2547        default:
2548                ret = -EINVAL;
2549                break;
2550        }
2551
2552        return ret;
2553}
2554
2555static void
2556fec_enet_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
2557{
2558        struct fec_enet_private *fep = netdev_priv(ndev);
2559
2560        if (fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET) {
2561                wol->supported = WAKE_MAGIC;
2562                wol->wolopts = fep->wol_flag & FEC_WOL_FLAG_ENABLE ? WAKE_MAGIC : 0;
2563        } else {
2564                wol->supported = wol->wolopts = 0;
2565        }
2566}
2567
2568static int
2569fec_enet_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
2570{
2571        struct fec_enet_private *fep = netdev_priv(ndev);
2572
2573        if (!(fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET))
2574                return -EINVAL;
2575
2576        if (wol->wolopts & ~WAKE_MAGIC)
2577                return -EINVAL;
2578
2579        device_set_wakeup_enable(&ndev->dev, wol->wolopts & WAKE_MAGIC);
2580        if (device_may_wakeup(&ndev->dev)) {
2581                fep->wol_flag |= FEC_WOL_FLAG_ENABLE;
2582                if (fep->irq[0] > 0)
2583                        enable_irq_wake(fep->irq[0]);
2584        } else {
2585                fep->wol_flag &= (~FEC_WOL_FLAG_ENABLE);
2586                if (fep->irq[0] > 0)
2587                        disable_irq_wake(fep->irq[0]);
2588        }
2589
2590        return 0;
2591}
2592
2593static const struct ethtool_ops fec_enet_ethtool_ops = {
2594        .get_drvinfo            = fec_enet_get_drvinfo,
2595        .get_regs_len           = fec_enet_get_regs_len,
2596        .get_regs               = fec_enet_get_regs,
2597        .nway_reset             = phy_ethtool_nway_reset,
2598        .get_link               = ethtool_op_get_link,
2599        .get_coalesce           = fec_enet_get_coalesce,
2600        .set_coalesce           = fec_enet_set_coalesce,
2601#ifndef CONFIG_M5272
2602        .get_pauseparam         = fec_enet_get_pauseparam,
2603        .set_pauseparam         = fec_enet_set_pauseparam,
2604        .get_strings            = fec_enet_get_strings,
2605        .get_ethtool_stats      = fec_enet_get_ethtool_stats,
2606        .get_sset_count         = fec_enet_get_sset_count,
2607#endif
2608        .get_ts_info            = fec_enet_get_ts_info,
2609        .get_tunable            = fec_enet_get_tunable,
2610        .set_tunable            = fec_enet_set_tunable,
2611        .get_wol                = fec_enet_get_wol,
2612        .set_wol                = fec_enet_set_wol,
2613        .get_link_ksettings     = phy_ethtool_get_link_ksettings,
2614        .set_link_ksettings     = phy_ethtool_set_link_ksettings,
2615};
2616
2617static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2618{
2619        struct fec_enet_private *fep = netdev_priv(ndev);
2620        struct phy_device *phydev = ndev->phydev;
2621
2622        if (!netif_running(ndev))
2623                return -EINVAL;
2624
2625        if (!phydev)
2626                return -ENODEV;
2627
2628        if (fep->bufdesc_ex) {
2629                if (cmd == SIOCSHWTSTAMP)
2630                        return fec_ptp_set(ndev, rq);
2631                if (cmd == SIOCGHWTSTAMP)
2632                        return fec_ptp_get(ndev, rq);
2633        }
2634
2635        return phy_mii_ioctl(phydev, rq, cmd);
2636}
2637
2638static void fec_enet_free_buffers(struct net_device *ndev)
2639{
2640        struct fec_enet_private *fep = netdev_priv(ndev);
2641        unsigned int i;
2642        struct sk_buff *skb;
2643        struct bufdesc  *bdp;
2644        struct fec_enet_priv_tx_q *txq;
2645        struct fec_enet_priv_rx_q *rxq;
2646        unsigned int q;
2647
2648        for (q = 0; q < fep->num_rx_queues; q++) {
2649                rxq = fep->rx_queue[q];
2650                bdp = rxq->bd.base;
2651                for (i = 0; i < rxq->bd.ring_size; i++) {
2652                        skb = rxq->rx_skbuff[i];
2653                        rxq->rx_skbuff[i] = NULL;
2654                        if (skb) {
2655                                dma_unmap_single(&fep->pdev->dev,
2656                                                 fec32_to_cpu(bdp->cbd_bufaddr),
2657                                                 FEC_ENET_RX_FRSIZE - fep->rx_align,
2658                                                 DMA_FROM_DEVICE);
2659                                dev_kfree_skb(skb);
2660                        }
2661                        bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
2662                }
2663        }
2664
2665        for (q = 0; q < fep->num_tx_queues; q++) {
2666                txq = fep->tx_queue[q];
2667                bdp = txq->bd.base;
2668                for (i = 0; i < txq->bd.ring_size; i++) {
2669                        kfree(txq->tx_bounce[i]);
2670                        txq->tx_bounce[i] = NULL;
2671                        skb = txq->tx_skbuff[i];
2672                        txq->tx_skbuff[i] = NULL;
2673                        dev_kfree_skb(skb);
2674                }
2675        }
2676}
2677
2678static void fec_enet_free_queue(struct net_device *ndev)
2679{
2680        struct fec_enet_private *fep = netdev_priv(ndev);
2681        int i;
2682        struct fec_enet_priv_tx_q *txq;
2683
2684        for (i = 0; i < fep->num_tx_queues; i++)
2685                if (fep->tx_queue[i] && fep->tx_queue[i]->tso_hdrs) {
2686                        txq = fep->tx_queue[i];
2687                        dma_free_coherent(&fep->pdev->dev,
2688                                          txq->bd.ring_size * TSO_HEADER_SIZE,
2689                                          txq->tso_hdrs,
2690                                          txq->tso_hdrs_dma);
2691                }
2692
2693        for (i = 0; i < fep->num_rx_queues; i++)
2694                kfree(fep->rx_queue[i]);
2695        for (i = 0; i < fep->num_tx_queues; i++)
2696                kfree(fep->tx_queue[i]);
2697}
2698
2699static int fec_enet_alloc_queue(struct net_device *ndev)
2700{
2701        struct fec_enet_private *fep = netdev_priv(ndev);
2702        int i;
2703        int ret = 0;
2704        struct fec_enet_priv_tx_q *txq;
2705
2706        for (i = 0; i < fep->num_tx_queues; i++) {
2707                txq = kzalloc(sizeof(*txq), GFP_KERNEL);
2708                if (!txq) {
2709                        ret = -ENOMEM;
2710                        goto alloc_failed;
2711                }
2712
2713                fep->tx_queue[i] = txq;
2714                txq->bd.ring_size = TX_RING_SIZE;
2715                fep->total_tx_ring_size += fep->tx_queue[i]->bd.ring_size;
2716
2717                txq->tx_stop_threshold = FEC_MAX_SKB_DESCS;
2718                txq->tx_wake_threshold =
2719                        (txq->bd.ring_size - txq->tx_stop_threshold) / 2;
2720
2721                txq->tso_hdrs = dma_alloc_coherent(&fep->pdev->dev,
2722                                        txq->bd.ring_size * TSO_HEADER_SIZE,
2723                                        &txq->tso_hdrs_dma,
2724                                        GFP_KERNEL);
2725                if (!txq->tso_hdrs) {
2726                        ret = -ENOMEM;
2727                        goto alloc_failed;
2728                }
2729        }
2730
2731        for (i = 0; i < fep->num_rx_queues; i++) {
2732                fep->rx_queue[i] = kzalloc(sizeof(*fep->rx_queue[i]),
2733                                           GFP_KERNEL);
2734                if (!fep->rx_queue[i]) {
2735                        ret = -ENOMEM;
2736                        goto alloc_failed;
2737                }
2738
2739                fep->rx_queue[i]->bd.ring_size = RX_RING_SIZE;
2740                fep->total_rx_ring_size += fep->rx_queue[i]->bd.ring_size;
2741        }
2742        return ret;
2743
2744alloc_failed:
2745        fec_enet_free_queue(ndev);
2746        return ret;
2747}
2748
2749static int
2750fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
2751{
2752        struct fec_enet_private *fep = netdev_priv(ndev);
2753        unsigned int i;
2754        struct sk_buff *skb;
2755        struct bufdesc  *bdp;
2756        struct fec_enet_priv_rx_q *rxq;
2757
2758        rxq = fep->rx_queue[queue];
2759        bdp = rxq->bd.base;
2760        for (i = 0; i < rxq->bd.ring_size; i++) {
2761                skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
2762                if (!skb)
2763                        goto err_alloc;
2764
2765                if (fec_enet_new_rxbdp(ndev, bdp, skb)) {
2766                        dev_kfree_skb(skb);
2767                        goto err_alloc;
2768                }
2769
2770                rxq->rx_skbuff[i] = skb;
2771                bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
2772
2773                if (fep->bufdesc_ex) {
2774                        struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
2775                        ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
2776                }
2777
2778                bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
2779        }
2780
2781        /* Set the last buffer to wrap. */
2782        bdp = fec_enet_get_prevdesc(bdp, &rxq->bd);
2783        bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
2784        return 0;
2785
2786 err_alloc:
2787        fec_enet_free_buffers(ndev);
2788        return -ENOMEM;
2789}
2790
2791static int
2792fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue)
2793{
2794        struct fec_enet_private *fep = netdev_priv(ndev);
2795        unsigned int i;
2796        struct bufdesc  *bdp;
2797        struct fec_enet_priv_tx_q *txq;
2798
2799        txq = fep->tx_queue[queue];
2800        bdp = txq->bd.base;
2801        for (i = 0; i < txq->bd.ring_size; i++) {
2802                txq->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL);
2803                if (!txq->tx_bounce[i])
2804                        goto err_alloc;
2805
2806                bdp->cbd_sc = cpu_to_fec16(0);
2807                bdp->cbd_bufaddr = cpu_to_fec32(0);
2808
2809                if (fep->bufdesc_ex) {
2810                        struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
2811                        ebdp->cbd_esc = cpu_to_fec32(BD_ENET_TX_INT);
2812                }
2813
2814                bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
2815        }
2816
2817        /* Set the last buffer to wrap. */
2818        bdp = fec_enet_get_prevdesc(bdp, &txq->bd);
2819        bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
2820
2821        return 0;
2822
2823 err_alloc:
2824        fec_enet_free_buffers(ndev);
2825        return -ENOMEM;
2826}
2827
2828static int fec_enet_alloc_buffers(struct net_device *ndev)
2829{
2830        struct fec_enet_private *fep = netdev_priv(ndev);
2831        unsigned int i;
2832
2833        for (i = 0; i < fep->num_rx_queues; i++)
2834                if (fec_enet_alloc_rxq_buffers(ndev, i))
2835                        return -ENOMEM;
2836
2837        for (i = 0; i < fep->num_tx_queues; i++)
2838                if (fec_enet_alloc_txq_buffers(ndev, i))
2839                        return -ENOMEM;
2840        return 0;
2841}
2842
2843static int
2844fec_enet_open(struct net_device *ndev)
2845{
2846        struct fec_enet_private *fep = netdev_priv(ndev);
2847        int ret;
2848        bool reset_again;
2849
2850        ret = pm_runtime_get_sync(&fep->pdev->dev);
2851        if (ret < 0)
2852                return ret;
2853
2854        pinctrl_pm_select_default_state(&fep->pdev->dev);
2855        ret = fec_enet_clk_enable(ndev, true);
2856        if (ret)
2857                goto clk_enable;
2858
2859        /* During the first fec_enet_open call the PHY isn't probed at this
2860         * point. Therefore the phy_reset_after_clk_enable() call within
2861         * fec_enet_clk_enable() fails. As we need this reset in order to be
2862         * sure the PHY is working correctly we check if we need to reset again
2863         * later when the PHY is probed
2864         */
2865        if (ndev->phydev && ndev->phydev->drv)
2866                reset_again = false;
2867        else
2868                reset_again = true;
2869
2870        /* I should reset the ring buffers here, but I don't yet know
2871         * a simple way to do that.
2872         */
2873
2874        ret = fec_enet_alloc_buffers(ndev);
2875        if (ret)
2876                goto err_enet_alloc;
2877
2878        /* Init MAC prior to mii bus probe */
2879        fec_restart(ndev);
2880
2881        /* Probe and connect to PHY when open the interface */
2882        ret = fec_enet_mii_probe(ndev);
2883        if (ret)
2884                goto err_enet_mii_probe;
2885
2886        /* Call phy_reset_after_clk_enable() again if it failed during
2887         * phy_reset_after_clk_enable() before because the PHY wasn't probed.
2888         */
2889        if (reset_again)
2890                phy_reset_after_clk_enable(ndev->phydev);
2891
2892        if (fep->quirks & FEC_QUIRK_ERR006687)
2893                imx6q_cpuidle_fec_irqs_used();
2894
2895        napi_enable(&fep->napi);
2896        phy_start(ndev->phydev);
2897        netif_tx_start_all_queues(ndev);
2898
2899        device_set_wakeup_enable(&ndev->dev, fep->wol_flag &
2900                                 FEC_WOL_FLAG_ENABLE);
2901
2902        return 0;
2903
2904err_enet_mii_probe:
2905        fec_enet_free_buffers(ndev);
2906err_enet_alloc:
2907        fec_enet_clk_enable(ndev, false);
2908clk_enable:
2909        pm_runtime_mark_last_busy(&fep->pdev->dev);
2910        pm_runtime_put_autosuspend(&fep->pdev->dev);
2911        pinctrl_pm_select_sleep_state(&fep->pdev->dev);
2912        return ret;
2913}
2914
2915static int
2916fec_enet_close(struct net_device *ndev)
2917{
2918        struct fec_enet_private *fep = netdev_priv(ndev);
2919
2920        phy_stop(ndev->phydev);
2921
2922        if (netif_device_present(ndev)) {
2923                napi_disable(&fep->napi);
2924                netif_tx_disable(ndev);
2925                fec_stop(ndev);
2926        }
2927
2928        phy_disconnect(ndev->phydev);
2929
2930        if (fep->quirks & FEC_QUIRK_ERR006687)
2931                imx6q_cpuidle_fec_irqs_unused();
2932
2933        fec_enet_update_ethtool_stats(ndev);
2934
2935        fec_enet_clk_enable(ndev, false);
2936        pinctrl_pm_select_sleep_state(&fep->pdev->dev);
2937        pm_runtime_mark_last_busy(&fep->pdev->dev);
2938        pm_runtime_put_autosuspend(&fep->pdev->dev);
2939
2940        fec_enet_free_buffers(ndev);
2941
2942        return 0;
2943}
2944
2945/* Set or clear the multicast filter for this adaptor.
2946 * Skeleton taken from sunlance driver.
2947 * The CPM Ethernet implementation allows Multicast as well as individual
2948 * MAC address filtering.  Some of the drivers check to make sure it is
2949 * a group multicast address, and discard those that are not.  I guess I
2950 * will do the same for now, but just remove the test if you want
2951 * individual filtering as well (do the upper net layers want or support
2952 * this kind of feature?).
2953 */
2954
2955#define FEC_HASH_BITS   6               /* #bits in hash */
2956#define CRC32_POLY      0xEDB88320
2957
2958static void set_multicast_list(struct net_device *ndev)
2959{
2960        struct fec_enet_private *fep = netdev_priv(ndev);
2961        struct netdev_hw_addr *ha;
2962        unsigned int i, bit, data, crc, tmp;
2963        unsigned char hash;
2964        unsigned int hash_high = 0, hash_low = 0;
2965
2966        if (ndev->flags & IFF_PROMISC) {
2967                tmp = readl(fep->hwp + FEC_R_CNTRL);
2968                tmp |= 0x8;
2969                writel(tmp, fep->hwp + FEC_R_CNTRL);
2970                return;
2971        }
2972
2973        tmp = readl(fep->hwp + FEC_R_CNTRL);
2974        tmp &= ~0x8;
2975        writel(tmp, fep->hwp + FEC_R_CNTRL);
2976
2977        if (ndev->flags & IFF_ALLMULTI) {
2978                /* Catch all multicast addresses, so set the
2979                 * filter to all 1's
2980                 */
2981                writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
2982                writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
2983
2984                return;
2985        }
2986
2987        /* Add the addresses in hash register */
2988        netdev_for_each_mc_addr(ha, ndev) {
2989                /* calculate crc32 value of mac address */
2990                crc = 0xffffffff;
2991
2992                for (i = 0; i < ndev->addr_len; i++) {
2993                        data = ha->addr[i];
2994                        for (bit = 0; bit < 8; bit++, data >>= 1) {
2995                                crc = (crc >> 1) ^
2996                                (((crc ^ data) & 1) ? CRC32_POLY : 0);
2997                        }
2998                }
2999
3000                /* only upper 6 bits (FEC_HASH_BITS) are used
3001                 * which point to specific bit in the hash registers
3002                 */
3003                hash = (crc >> (32 - FEC_HASH_BITS)) & 0x3f;
3004
3005                if (hash > 31)
3006                        hash_high |= 1 << (hash - 32);
3007                else
3008                        hash_low |= 1 << hash;
3009        }
3010
3011        writel(hash_high, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
3012        writel(hash_low, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
3013}
3014
3015/* Set a MAC change in hardware. */
3016static int
3017fec_set_mac_address(struct net_device *ndev, void *p)
3018{
3019        struct fec_enet_private *fep = netdev_priv(ndev);
3020        struct sockaddr *addr = p;
3021
3022        if (addr) {
3023                if (!is_valid_ether_addr(addr->sa_data))
3024                        return -EADDRNOTAVAIL;
3025                memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
3026        }
3027
3028        /* Add netif status check here to avoid system hang in below case:
3029         * ifconfig ethx down; ifconfig ethx hw ether xx:xx:xx:xx:xx:xx;
3030         * After ethx down, fec all clocks are gated off and then register
3031         * access causes system hang.
3032         */
3033        if (!netif_running(ndev))
3034                return 0;
3035
3036        writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) |
3037                (ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24),
3038                fep->hwp + FEC_ADDR_LOW);
3039        writel((ndev->dev_addr[5] << 16) | (ndev->dev_addr[4] << 24),
3040                fep->hwp + FEC_ADDR_HIGH);
3041        return 0;
3042}
3043
3044#ifdef CONFIG_NET_POLL_CONTROLLER
3045/**
3046 * fec_poll_controller - FEC Poll controller function
3047 * @dev: The FEC network adapter
3048 *
3049 * Polled functionality used by netconsole and others in non interrupt mode
3050 *
3051 */
3052static void fec_poll_controller(struct net_device *dev)
3053{
3054        int i;
3055        struct fec_enet_private *fep = netdev_priv(dev);
3056
3057        for (i = 0; i < FEC_IRQ_NUM; i++) {
3058                if (fep->irq[i] > 0) {
3059                        disable_irq(fep->irq[i]);
3060                        fec_enet_interrupt(fep->irq[i], dev);
3061                        enable_irq(fep->irq[i]);
3062                }
3063        }
3064}
3065#endif
3066
3067static inline void fec_enet_set_netdev_features(struct net_device *netdev,
3068        netdev_features_t features)
3069{
3070        struct fec_enet_private *fep = netdev_priv(netdev);
3071        netdev_features_t changed = features ^ netdev->features;
3072
3073        netdev->features = features;
3074
3075        /* Receive checksum has been changed */
3076        if (changed & NETIF_F_RXCSUM) {
3077                if (features & NETIF_F_RXCSUM)
3078                        fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
3079                else
3080                        fep->csum_flags &= ~FLAG_RX_CSUM_ENABLED;
3081        }
3082}
3083
3084static int fec_set_features(struct net_device *netdev,
3085        netdev_features_t features)
3086{
3087        struct fec_enet_private *fep = netdev_priv(netdev);
3088        netdev_features_t changed = features ^ netdev->features;
3089
3090        if (netif_running(netdev) && changed & NETIF_F_RXCSUM) {
3091                napi_disable(&fep->napi);
3092                netif_tx_lock_bh(netdev);
3093                fec_stop(netdev);
3094                fec_enet_set_netdev_features(netdev, features);
3095                fec_restart(netdev);
3096                netif_tx_wake_all_queues(netdev);
3097                netif_tx_unlock_bh(netdev);
3098                napi_enable(&fep->napi);
3099        } else {
3100                fec_enet_set_netdev_features(netdev, features);
3101        }
3102
3103        return 0;
3104}
3105
3106static const struct net_device_ops fec_netdev_ops = {
3107        .ndo_open               = fec_enet_open,
3108        .ndo_stop               = fec_enet_close,
3109        .ndo_start_xmit         = fec_enet_start_xmit,
3110        .ndo_set_rx_mode        = set_multicast_list,
3111        .ndo_validate_addr      = eth_validate_addr,
3112        .ndo_tx_timeout         = fec_timeout,
3113        .ndo_set_mac_address    = fec_set_mac_address,
3114        .ndo_do_ioctl           = fec_enet_ioctl,
3115#ifdef CONFIG_NET_POLL_CONTROLLER
3116        .ndo_poll_controller    = fec_poll_controller,
3117#endif
3118        .ndo_set_features       = fec_set_features,
3119};
3120
3121static const unsigned short offset_des_active_rxq[] = {
3122        FEC_R_DES_ACTIVE_0, FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2
3123};
3124
3125static const unsigned short offset_des_active_txq[] = {
3126        FEC_X_DES_ACTIVE_0, FEC_X_DES_ACTIVE_1, FEC_X_DES_ACTIVE_2
3127};
3128
3129 /*
3130  * XXX:  We need to clean up on failure exits here.
3131  *
3132  */
3133static int fec_enet_init(struct net_device *ndev)
3134{
3135        struct fec_enet_private *fep = netdev_priv(ndev);
3136        struct bufdesc *cbd_base;
3137        dma_addr_t bd_dma;
3138        int bd_size;
3139        unsigned int i;
3140        unsigned dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) :
3141                        sizeof(struct bufdesc);
3142        unsigned dsize_log2 = __fls(dsize);
3143
3144        WARN_ON(dsize != (1 << dsize_log2));
3145#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
3146        fep->rx_align = 0xf;
3147        fep->tx_align = 0xf;
3148#else
3149        fep->rx_align = 0x3;
3150        fep->tx_align = 0x3;
3151#endif
3152
3153        fec_enet_alloc_queue(ndev);
3154
3155        bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * dsize;
3156
3157        /* Allocate memory for buffer descriptors. */
3158        cbd_base = dmam_alloc_coherent(&fep->pdev->dev, bd_size, &bd_dma,
3159                                       GFP_KERNEL);
3160        if (!cbd_base) {
3161                return -ENOMEM;
3162        }
3163
3164        memset(cbd_base, 0, bd_size);
3165
3166        /* Get the Ethernet address */
3167        fec_get_mac(ndev);
3168        /* make sure MAC we just acquired is programmed into the hw */
3169        fec_set_mac_address(ndev, NULL);
3170
3171        /* Set receive and transmit descriptor base. */
3172        for (i = 0; i < fep->num_rx_queues; i++) {
3173                struct fec_enet_priv_rx_q *rxq = fep->rx_queue[i];
3174                unsigned size = dsize * rxq->bd.ring_size;
3175
3176                rxq->bd.qid = i;
3177                rxq->bd.base = cbd_base;
3178                rxq->bd.cur = cbd_base;
3179                rxq->bd.dma = bd_dma;
3180                rxq->bd.dsize = dsize;
3181                rxq->bd.dsize_log2 = dsize_log2;
3182                rxq->bd.reg_desc_active = fep->hwp + offset_des_active_rxq[i];
3183                bd_dma += size;
3184                cbd_base = (struct bufdesc *)(((void *)cbd_base) + size);
3185                rxq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize);
3186        }
3187
3188        for (i = 0; i < fep->num_tx_queues; i++) {
3189                struct fec_enet_priv_tx_q *txq = fep->tx_queue[i];
3190                unsigned size = dsize * txq->bd.ring_size;
3191
3192                txq->bd.qid = i;
3193                txq->bd.base = cbd_base;
3194                txq->bd.cur = cbd_base;
3195                txq->bd.dma = bd_dma;
3196                txq->bd.dsize = dsize;
3197                txq->bd.dsize_log2 = dsize_log2;
3198                txq->bd.reg_desc_active = fep->hwp + offset_des_active_txq[i];
3199                bd_dma += size;
3200                cbd_base = (struct bufdesc *)(((void *)cbd_base) + size);
3201                txq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize);
3202        }
3203
3204
3205        /* The FEC Ethernet specific entries in the device structure */
3206        ndev->watchdog_timeo = TX_TIMEOUT;
3207        ndev->netdev_ops = &fec_netdev_ops;
3208        ndev->ethtool_ops = &fec_enet_ethtool_ops;
3209
3210        writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK);
3211        netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, NAPI_POLL_WEIGHT);
3212
3213        if (fep->quirks & FEC_QUIRK_HAS_VLAN)
3214                /* enable hw VLAN support */
3215                ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
3216
3217        if (fep->quirks & FEC_QUIRK_HAS_CSUM) {
3218                ndev->gso_max_segs = FEC_MAX_TSO_SEGS;
3219
3220                /* enable hw accelerator */
3221                ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
3222                                | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO);
3223                fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
3224        }
3225
3226        if (fep->quirks & FEC_QUIRK_HAS_AVB) {
3227                fep->tx_align = 0;
3228                fep->rx_align = 0x3f;
3229        }
3230
3231        ndev->hw_features = ndev->features;
3232
3233        fec_restart(ndev);
3234
3235        if (fep->quirks & FEC_QUIRK_MIB_CLEAR)
3236                fec_enet_clear_ethtool_stats(ndev);
3237        else
3238                fec_enet_update_ethtool_stats(ndev);
3239
3240        return 0;
3241}
3242
3243#ifdef CONFIG_OF
3244static int fec_reset_phy(struct platform_device *pdev)
3245{
3246        int err, phy_reset;
3247        bool active_high = false;
3248        int msec = 1, phy_post_delay = 0;
3249        struct device_node *np = pdev->dev.of_node;
3250
3251        if (!np)
3252                return 0;
3253
3254        err = of_property_read_u32(np, "phy-reset-duration", &msec);
3255        /* A sane reset duration should not be longer than 1s */
3256        if (!err && msec > 1000)
3257                msec = 1;
3258
3259        phy_reset = of_get_named_gpio(np, "phy-reset-gpios", 0);
3260        if (phy_reset == -EPROBE_DEFER)
3261                return phy_reset;
3262        else if (!gpio_is_valid(phy_reset))
3263                return 0;
3264
3265        err = of_property_read_u32(np, "phy-reset-post-delay", &phy_post_delay);
3266        /* valid reset duration should be less than 1s */
3267        if (!err && phy_post_delay > 1000)
3268                return -EINVAL;
3269
3270        active_high = of_property_read_bool(np, "phy-reset-active-high");
3271
3272        err = devm_gpio_request_one(&pdev->dev, phy_reset,
3273                        active_high ? GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW,
3274                        "phy-reset");
3275        if (err) {
3276                dev_err(&pdev->dev, "failed to get phy-reset-gpios: %d\n", err);
3277                return err;
3278        }
3279
3280        if (msec > 20)
3281                msleep(msec);
3282        else
3283                usleep_range(msec * 1000, msec * 1000 + 1000);
3284
3285        gpio_set_value_cansleep(phy_reset, !active_high);
3286
3287        if (!phy_post_delay)
3288                return 0;
3289
3290        if (phy_post_delay > 20)
3291                msleep(phy_post_delay);
3292        else
3293                usleep_range(phy_post_delay * 1000,
3294                             phy_post_delay * 1000 + 1000);
3295
3296        return 0;
3297}
3298#else /* CONFIG_OF */
3299static int fec_reset_phy(struct platform_device *pdev)
3300{
3301        /*
3302         * In case of platform probe, the reset has been done
3303         * by machine code.
3304         */
3305        return 0;
3306}
3307#endif /* CONFIG_OF */
3308
3309static void
3310fec_enet_get_queue_num(struct platform_device *pdev, int *num_tx, int *num_rx)
3311{
3312        struct device_node *np = pdev->dev.of_node;
3313
3314        *num_tx = *num_rx = 1;
3315
3316        if (!np || !of_device_is_available(np))
3317                return;
3318
3319        /* parse the num of tx and rx queues */
3320        of_property_read_u32(np, "fsl,num-tx-queues", num_tx);
3321
3322        of_property_read_u32(np, "fsl,num-rx-queues", num_rx);
3323
3324        if (*num_tx < 1 || *num_tx > FEC_ENET_MAX_TX_QS) {
3325                dev_warn(&pdev->dev, "Invalid num_tx(=%d), fall back to 1\n",
3326                         *num_tx);
3327                *num_tx = 1;
3328                return;
3329        }
3330
3331        if (*num_rx < 1 || *num_rx > FEC_ENET_MAX_RX_QS) {
3332                dev_warn(&pdev->dev, "Invalid num_rx(=%d), fall back to 1\n",
3333                         *num_rx);
3334                *num_rx = 1;
3335                return;
3336        }
3337
3338}
3339
3340static int fec_enet_get_irq_cnt(struct platform_device *pdev)
3341{
3342        int irq_cnt = platform_irq_count(pdev);
3343
3344        if (irq_cnt > FEC_IRQ_NUM)
3345                irq_cnt = FEC_IRQ_NUM;  /* last for pps */
3346        else if (irq_cnt == 2)
3347                irq_cnt = 1;    /* last for pps */
3348        else if (irq_cnt <= 0)
3349                irq_cnt = 1;    /* At least 1 irq is needed */
3350        return irq_cnt;
3351}
3352
3353static int
3354fec_probe(struct platform_device *pdev)
3355{
3356        struct fec_enet_private *fep;
3357        struct fec_platform_data *pdata;
3358        struct net_device *ndev;
3359        int i, irq, ret = 0;
3360        struct resource *r;
3361        const struct of_device_id *of_id;
3362        static int dev_id;
3363        struct device_node *np = pdev->dev.of_node, *phy_node;
3364        int num_tx_qs;
3365        int num_rx_qs;
3366        char irq_name[8];
3367        int irq_cnt;
3368
3369        fec_enet_get_queue_num(pdev, &num_tx_qs, &num_rx_qs);
3370
3371        /* Init network device */
3372        ndev = alloc_etherdev_mqs(sizeof(struct fec_enet_private) +
3373                                  FEC_STATS_SIZE, num_tx_qs, num_rx_qs);
3374        if (!ndev)
3375                return -ENOMEM;
3376
3377        SET_NETDEV_DEV(ndev, &pdev->dev);
3378
3379        /* setup board info structure */
3380        fep = netdev_priv(ndev);
3381
3382        of_id = of_match_device(fec_dt_ids, &pdev->dev);
3383        if (of_id)
3384                pdev->id_entry = of_id->data;
3385        fep->quirks = pdev->id_entry->driver_data;
3386
3387        fep->netdev = ndev;
3388        fep->num_rx_queues = num_rx_qs;
3389        fep->num_tx_queues = num_tx_qs;
3390
3391#if !defined(CONFIG_M5272)
3392        /* default enable pause frame auto negotiation */
3393        if (fep->quirks & FEC_QUIRK_HAS_GBIT)
3394                fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG;
3395#endif
3396
3397        /* Select default pin state */
3398        pinctrl_pm_select_default_state(&pdev->dev);
3399
3400        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3401        fep->hwp = devm_ioremap_resource(&pdev->dev, r);
3402        if (IS_ERR(fep->hwp)) {
3403                ret = PTR_ERR(fep->hwp);
3404                goto failed_ioremap;
3405        }
3406
3407        fep->pdev = pdev;
3408        fep->dev_id = dev_id++;
3409
3410        platform_set_drvdata(pdev, ndev);
3411
3412        if ((of_machine_is_compatible("fsl,imx6q") ||
3413             of_machine_is_compatible("fsl,imx6dl")) &&
3414            !of_property_read_bool(np, "fsl,err006687-workaround-present"))
3415                fep->quirks |= FEC_QUIRK_ERR006687;
3416
3417        if (of_get_property(np, "fsl,magic-packet", NULL))
3418                fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET;
3419
3420        phy_node = of_parse_phandle(np, "phy-handle", 0);
3421        if (!phy_node && of_phy_is_fixed_link(np)) {
3422                ret = of_phy_register_fixed_link(np);
3423                if (ret < 0) {
3424                        dev_err(&pdev->dev,
3425                                "broken fixed-link specification\n");
3426                        goto failed_phy;
3427                }
3428                phy_node = of_node_get(np);
3429        }
3430        fep->phy_node = phy_node;
3431
3432        ret = of_get_phy_mode(pdev->dev.of_node);
3433        if (ret < 0) {
3434                pdata = dev_get_platdata(&pdev->dev);
3435                if (pdata)
3436                        fep->phy_interface = pdata->phy;
3437                else
3438                        fep->phy_interface = PHY_INTERFACE_MODE_MII;
3439        } else {
3440                fep->phy_interface = ret;
3441        }
3442
3443        fep->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
3444        if (IS_ERR(fep->clk_ipg)) {
3445                ret = PTR_ERR(fep->clk_ipg);
3446                goto failed_clk;
3447        }
3448
3449        fep->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
3450        if (IS_ERR(fep->clk_ahb)) {
3451                ret = PTR_ERR(fep->clk_ahb);
3452                goto failed_clk;
3453        }
3454
3455        fep->itr_clk_rate = clk_get_rate(fep->clk_ahb);
3456
3457        /* enet_out is optional, depends on board */
3458        fep->clk_enet_out = devm_clk_get(&pdev->dev, "enet_out");
3459        if (IS_ERR(fep->clk_enet_out))
3460                fep->clk_enet_out = NULL;
3461
3462        fep->ptp_clk_on = false;
3463        mutex_init(&fep->ptp_clk_mutex);
3464
3465        /* clk_ref is optional, depends on board */
3466        fep->clk_ref = devm_clk_get(&pdev->dev, "enet_clk_ref");
3467        if (IS_ERR(fep->clk_ref))
3468                fep->clk_ref = NULL;
3469
3470        fep->bufdesc_ex = fep->quirks & FEC_QUIRK_HAS_BUFDESC_EX;
3471        fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp");
3472        if (IS_ERR(fep->clk_ptp)) {
3473                fep->clk_ptp = NULL;
3474                fep->bufdesc_ex = false;
3475        }
3476
3477        ret = fec_enet_clk_enable(ndev, true);
3478        if (ret)
3479                goto failed_clk;
3480
3481        ret = clk_prepare_enable(fep->clk_ipg);
3482        if (ret)
3483                goto failed_clk_ipg;
3484
3485        fep->reg_phy = devm_regulator_get(&pdev->dev, "phy");
3486        if (!IS_ERR(fep->reg_phy)) {
3487                ret = regulator_enable(fep->reg_phy);
3488                if (ret) {
3489                        dev_err(&pdev->dev,
3490                                "Failed to enable phy regulator: %d\n", ret);
3491                        clk_disable_unprepare(fep->clk_ipg);
3492                        goto failed_regulator;
3493                }
3494        } else {
3495                if (PTR_ERR(fep->reg_phy) == -EPROBE_DEFER) {
3496                        ret = -EPROBE_DEFER;
3497                        goto failed_regulator;
3498                }
3499                fep->reg_phy = NULL;
3500        }
3501
3502        pm_runtime_set_autosuspend_delay(&pdev->dev, FEC_MDIO_PM_TIMEOUT);
3503        pm_runtime_use_autosuspend(&pdev->dev);
3504        pm_runtime_get_noresume(&pdev->dev);
3505        pm_runtime_set_active(&pdev->dev);
3506        pm_runtime_enable(&pdev->dev);
3507
3508        ret = fec_reset_phy(pdev);
3509        if (ret)
3510                goto failed_reset;
3511
3512        irq_cnt = fec_enet_get_irq_cnt(pdev);
3513        if (fep->bufdesc_ex)
3514                fec_ptp_init(pdev, irq_cnt);
3515
3516        ret = fec_enet_init(ndev);
3517        if (ret)
3518                goto failed_init;
3519
3520        for (i = 0; i < irq_cnt; i++) {
3521                sprintf(irq_name, "int%d", i);
3522                irq = platform_get_irq_byname(pdev, irq_name);
3523                if (irq < 0)
3524                        irq = platform_get_irq(pdev, i);
3525                if (irq < 0) {
3526                        ret = irq;
3527                        goto failed_irq;
3528                }
3529                ret = devm_request_irq(&pdev->dev, irq, fec_enet_interrupt,
3530                                       0, pdev->name, ndev);
3531                if (ret)
3532                        goto failed_irq;
3533
3534                fep->irq[i] = irq;
3535        }
3536
3537        init_completion(&fep->mdio_done);
3538        ret = fec_enet_mii_init(pdev);
3539        if (ret)
3540                goto failed_mii_init;
3541
3542        /* Carrier starts down, phylib will bring it up */
3543        netif_carrier_off(ndev);
3544        fec_enet_clk_enable(ndev, false);
3545        pinctrl_pm_select_sleep_state(&pdev->dev);
3546
3547        ret = register_netdev(ndev);
3548        if (ret)
3549                goto failed_register;
3550
3551        device_init_wakeup(&ndev->dev, fep->wol_flag &
3552                           FEC_WOL_HAS_MAGIC_PACKET);
3553
3554        if (fep->bufdesc_ex && fep->ptp_clock)
3555                netdev_info(ndev, "registered PHC device %d\n", fep->dev_id);
3556
3557        fep->rx_copybreak = COPYBREAK_DEFAULT;
3558        INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work);
3559
3560        pm_runtime_mark_last_busy(&pdev->dev);
3561        pm_runtime_put_autosuspend(&pdev->dev);
3562
3563        return 0;
3564
3565failed_register:
3566        fec_enet_mii_remove(fep);
3567failed_mii_init:
3568failed_irq:
3569failed_init:
3570        fec_ptp_stop(pdev);
3571        if (fep->reg_phy)
3572                regulator_disable(fep->reg_phy);
3573failed_reset:
3574        pm_runtime_put(&pdev->dev);
3575        pm_runtime_disable(&pdev->dev);
3576failed_regulator:
3577failed_clk_ipg:
3578        fec_enet_clk_enable(ndev, false);
3579failed_clk:
3580        if (of_phy_is_fixed_link(np))
3581                of_phy_deregister_fixed_link(np);
3582        of_node_put(phy_node);
3583failed_phy:
3584        dev_id--;
3585failed_ioremap:
3586        free_netdev(ndev);
3587
3588        return ret;
3589}
3590
3591static int
3592fec_drv_remove(struct platform_device *pdev)
3593{
3594        struct net_device *ndev = platform_get_drvdata(pdev);
3595        struct fec_enet_private *fep = netdev_priv(ndev);
3596        struct device_node *np = pdev->dev.of_node;
3597
3598        cancel_work_sync(&fep->tx_timeout_work);
3599        fec_ptp_stop(pdev);
3600        unregister_netdev(ndev);
3601        fec_enet_mii_remove(fep);
3602        if (fep->reg_phy)
3603                regulator_disable(fep->reg_phy);
3604        pm_runtime_put(&pdev->dev);
3605        pm_runtime_disable(&pdev->dev);
3606        if (of_phy_is_fixed_link(np))
3607                of_phy_deregister_fixed_link(np);
3608        of_node_put(fep->phy_node);
3609        free_netdev(ndev);
3610
3611        return 0;
3612}
3613
3614static int __maybe_unused fec_suspend(struct device *dev)
3615{
3616        struct net_device *ndev = dev_get_drvdata(dev);
3617        struct fec_enet_private *fep = netdev_priv(ndev);
3618
3619        rtnl_lock();
3620        if (netif_running(ndev)) {
3621                if (fep->wol_flag & FEC_WOL_FLAG_ENABLE)
3622                        fep->wol_flag |= FEC_WOL_FLAG_SLEEP_ON;
3623                phy_stop(ndev->phydev);
3624                napi_disable(&fep->napi);
3625                netif_tx_lock_bh(ndev);
3626                netif_device_detach(ndev);
3627                netif_tx_unlock_bh(ndev);
3628                fec_stop(ndev);
3629                fec_enet_clk_enable(ndev, false);
3630                if (!(fep->wol_flag & FEC_WOL_FLAG_ENABLE))
3631                        pinctrl_pm_select_sleep_state(&fep->pdev->dev);
3632        }
3633        rtnl_unlock();
3634
3635        if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE))
3636                regulator_disable(fep->reg_phy);
3637
3638        /* SOC supply clock to phy, when clock is disabled, phy link down
3639         * SOC control phy regulator, when regulator is disabled, phy link down
3640         */
3641        if (fep->clk_enet_out || fep->reg_phy)
3642                fep->link = 0;
3643
3644        return 0;
3645}
3646
3647static int __maybe_unused fec_resume(struct device *dev)
3648{
3649        struct net_device *ndev = dev_get_drvdata(dev);
3650        struct fec_enet_private *fep = netdev_priv(ndev);
3651        struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
3652        int ret;
3653        int val;
3654
3655        if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) {
3656                ret = regulator_enable(fep->reg_phy);
3657                if (ret)
3658                        return ret;
3659        }
3660
3661        rtnl_lock();
3662        if (netif_running(ndev)) {
3663                ret = fec_enet_clk_enable(ndev, true);
3664                if (ret) {
3665                        rtnl_unlock();
3666                        goto failed_clk;
3667                }
3668                if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) {
3669                        if (pdata && pdata->sleep_mode_enable)
3670                                pdata->sleep_mode_enable(false);
3671                        val = readl(fep->hwp + FEC_ECNTRL);
3672                        val &= ~(FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
3673                        writel(val, fep->hwp + FEC_ECNTRL);
3674                        fep->wol_flag &= ~FEC_WOL_FLAG_SLEEP_ON;
3675                } else {
3676                        pinctrl_pm_select_default_state(&fep->pdev->dev);
3677                }
3678                fec_restart(ndev);
3679                netif_tx_lock_bh(ndev);
3680                netif_device_attach(ndev);
3681                netif_tx_unlock_bh(ndev);
3682                napi_enable(&fep->napi);
3683                phy_start(ndev->phydev);
3684        }
3685        rtnl_unlock();
3686
3687        return 0;
3688
3689failed_clk:
3690        if (fep->reg_phy)
3691                regulator_disable(fep->reg_phy);
3692        return ret;
3693}
3694
3695static int __maybe_unused fec_runtime_suspend(struct device *dev)
3696{
3697        struct net_device *ndev = dev_get_drvdata(dev);
3698        struct fec_enet_private *fep = netdev_priv(ndev);
3699
3700        clk_disable_unprepare(fep->clk_ipg);
3701
3702        return 0;
3703}
3704
3705static int __maybe_unused fec_runtime_resume(struct device *dev)
3706{
3707        struct net_device *ndev = dev_get_drvdata(dev);
3708        struct fec_enet_private *fep = netdev_priv(ndev);
3709
3710        return clk_prepare_enable(fep->clk_ipg);
3711}
3712
3713static const struct dev_pm_ops fec_pm_ops = {
3714        SET_SYSTEM_SLEEP_PM_OPS(fec_suspend, fec_resume)
3715        SET_RUNTIME_PM_OPS(fec_runtime_suspend, fec_runtime_resume, NULL)
3716};
3717
3718static struct platform_driver fec_driver = {
3719        .driver = {
3720                .name   = DRIVER_NAME,
3721                .pm     = &fec_pm_ops,
3722                .of_match_table = fec_dt_ids,
3723        },
3724        .id_table = fec_devtype,
3725        .probe  = fec_probe,
3726        .remove = fec_drv_remove,
3727};
3728
3729module_platform_driver(fec_driver);
3730
3731MODULE_ALIAS("platform:"DRIVER_NAME);
3732MODULE_LICENSE("GPL");
3733