linux/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
<<
>>
Prefs
   1/*
   2 * Combined Ethernet driver for Motorola MPC8xx and MPC82xx.
   3 *
   4 * Copyright (c) 2003 Intracom S.A.
   5 *  by Pantelis Antoniou <panto@intracom.gr>
   6 *
   7 * 2005 (c) MontaVista Software, Inc.
   8 * Vitaly Bordug <vbordug@ru.mvista.com>
   9 *
  10 * Heavily based on original FEC driver by Dan Malek <dan@embeddededge.com>
  11 * and modifications by Joakim Tjernlund <joakim.tjernlund@lumentis.se>
  12 *
  13 * This file is licensed under the terms of the GNU General Public License
  14 * version 2. This program is licensed "as is" without any warranty of any
  15 * kind, whether express or implied.
  16 */
  17
  18#include <linux/module.h>
  19#include <linux/kernel.h>
  20#include <linux/types.h>
  21#include <linux/string.h>
  22#include <linux/ptrace.h>
  23#include <linux/errno.h>
  24#include <linux/ioport.h>
  25#include <linux/slab.h>
  26#include <linux/interrupt.h>
  27#include <linux/delay.h>
  28#include <linux/netdevice.h>
  29#include <linux/etherdevice.h>
  30#include <linux/skbuff.h>
  31#include <linux/spinlock.h>
  32#include <linux/mii.h>
  33#include <linux/ethtool.h>
  34#include <linux/bitops.h>
  35#include <linux/fs.h>
  36#include <linux/platform_device.h>
  37#include <linux/phy.h>
  38#include <linux/of.h>
  39#include <linux/of_mdio.h>
  40#include <linux/of_platform.h>
  41#include <linux/of_gpio.h>
  42#include <linux/of_net.h>
  43
  44#include <linux/vmalloc.h>
  45#include <asm/pgtable.h>
  46#include <asm/irq.h>
  47#include <asm/uaccess.h>
  48
  49#include "fs_enet.h"
  50
  51/*************************************************/
  52
  53MODULE_AUTHOR("Pantelis Antoniou <panto@intracom.gr>");
  54MODULE_DESCRIPTION("Freescale Ethernet Driver");
  55MODULE_LICENSE("GPL");
  56MODULE_VERSION(DRV_MODULE_VERSION);
  57
  58static int fs_enet_debug = -1; /* -1 == use FS_ENET_DEF_MSG_ENABLE as value */
  59module_param(fs_enet_debug, int, 0);
  60MODULE_PARM_DESC(fs_enet_debug,
  61                 "Freescale bitmapped debugging message enable value");
  62
  63#ifdef CONFIG_NET_POLL_CONTROLLER
  64static void fs_enet_netpoll(struct net_device *dev);
  65#endif
  66
  67static void fs_set_multicast_list(struct net_device *dev)
  68{
  69        struct fs_enet_private *fep = netdev_priv(dev);
  70
  71        (*fep->ops->set_multicast_list)(dev);
  72}
  73
  74static void skb_align(struct sk_buff *skb, int align)
  75{
  76        int off = ((unsigned long)skb->data) & (align - 1);
  77
  78        if (off)
  79                skb_reserve(skb, align - off);
  80}
  81
  82/* NAPI receive function */
  83static int fs_enet_rx_napi(struct napi_struct *napi, int budget)
  84{
  85        struct fs_enet_private *fep = container_of(napi, struct fs_enet_private, napi);
  86        struct net_device *dev = fep->ndev;
  87        const struct fs_platform_info *fpi = fep->fpi;
  88        cbd_t __iomem *bdp;
  89        struct sk_buff *skb, *skbn;
  90        int received = 0;
  91        u16 pkt_len, sc;
  92        int curidx;
  93
  94        if (budget <= 0)
  95                return received;
  96
  97        /*
  98         * First, grab all of the stats for the incoming packet.
  99         * These get messed up if we get called due to a busy condition.
 100         */
 101        bdp = fep->cur_rx;
 102
 103        /* clear RX status bits for napi*/
 104        (*fep->ops->napi_clear_rx_event)(dev);
 105
 106        while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) {
 107                curidx = bdp - fep->rx_bd_base;
 108
 109                /*
 110                 * Since we have allocated space to hold a complete frame,
 111                 * the last indicator should be set.
 112                 */
 113                if ((sc & BD_ENET_RX_LAST) == 0)
 114                        dev_warn(fep->dev, "rcv is not +last\n");
 115
 116                /*
 117                 * Check for errors.
 118                 */
 119                if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL |
 120                          BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) {
 121                        fep->stats.rx_errors++;
 122                        /* Frame too long or too short. */
 123                        if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH))
 124                                fep->stats.rx_length_errors++;
 125                        /* Frame alignment */
 126                        if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL))
 127                                fep->stats.rx_frame_errors++;
 128                        /* CRC Error */
 129                        if (sc & BD_ENET_RX_CR)
 130                                fep->stats.rx_crc_errors++;
 131                        /* FIFO overrun */
 132                        if (sc & BD_ENET_RX_OV)
 133                                fep->stats.rx_crc_errors++;
 134
 135                        skb = fep->rx_skbuff[curidx];
 136
 137                        dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
 138                                L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
 139                                DMA_FROM_DEVICE);
 140
 141                        skbn = skb;
 142
 143                } else {
 144                        skb = fep->rx_skbuff[curidx];
 145
 146                        dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
 147                                L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
 148                                DMA_FROM_DEVICE);
 149
 150                        /*
 151                         * Process the incoming frame.
 152                         */
 153                        fep->stats.rx_packets++;
 154                        pkt_len = CBDR_DATLEN(bdp) - 4; /* remove CRC */
 155                        fep->stats.rx_bytes += pkt_len + 4;
 156
 157                        if (pkt_len <= fpi->rx_copybreak) {
 158                                /* +2 to make IP header L1 cache aligned */
 159                                skbn = netdev_alloc_skb(dev, pkt_len + 2);
 160                                if (skbn != NULL) {
 161                                        skb_reserve(skbn, 2);   /* align IP header */
 162                                        skb_copy_from_linear_data(skb,
 163                                                      skbn->data, pkt_len);
 164                                        swap(skb, skbn);
 165                                }
 166                        } else {
 167                                skbn = netdev_alloc_skb(dev, ENET_RX_FRSIZE);
 168
 169                                if (skbn)
 170                                        skb_align(skbn, ENET_RX_ALIGN);
 171                        }
 172
 173                        if (skbn != NULL) {
 174                                skb_put(skb, pkt_len);  /* Make room */
 175                                skb->protocol = eth_type_trans(skb, dev);
 176                                received++;
 177                                netif_receive_skb(skb);
 178                        } else {
 179                                fep->stats.rx_dropped++;
 180                                skbn = skb;
 181                        }
 182                }
 183
 184                fep->rx_skbuff[curidx] = skbn;
 185                CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data,
 186                             L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
 187                             DMA_FROM_DEVICE));
 188                CBDW_DATLEN(bdp, 0);
 189                CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY);
 190
 191                /*
 192                 * Update BD pointer to next entry.
 193                 */
 194                if ((sc & BD_ENET_RX_WRAP) == 0)
 195                        bdp++;
 196                else
 197                        bdp = fep->rx_bd_base;
 198
 199                (*fep->ops->rx_bd_done)(dev);
 200
 201                if (received >= budget)
 202                        break;
 203        }
 204
 205        fep->cur_rx = bdp;
 206
 207        if (received < budget) {
 208                /* done */
 209                napi_complete(napi);
 210                (*fep->ops->napi_enable_rx)(dev);
 211        }
 212        return received;
 213}
 214
 215static int fs_enet_tx_napi(struct napi_struct *napi, int budget)
 216{
 217        struct fs_enet_private *fep = container_of(napi, struct fs_enet_private,
 218                                                   napi_tx);
 219        struct net_device *dev = fep->ndev;
 220        cbd_t __iomem *bdp;
 221        struct sk_buff *skb;
 222        int dirtyidx, do_wake, do_restart;
 223        u16 sc;
 224        int has_tx_work = 0;
 225
 226        spin_lock(&fep->tx_lock);
 227        bdp = fep->dirty_tx;
 228
 229        /* clear TX status bits for napi*/
 230        (*fep->ops->napi_clear_tx_event)(dev);
 231
 232        do_wake = do_restart = 0;
 233        while (((sc = CBDR_SC(bdp)) & BD_ENET_TX_READY) == 0) {
 234                dirtyidx = bdp - fep->tx_bd_base;
 235
 236                if (fep->tx_free == fep->tx_ring)
 237                        break;
 238
 239                skb = fep->tx_skbuff[dirtyidx];
 240
 241                /*
 242                 * Check for errors.
 243                 */
 244                if (sc & (BD_ENET_TX_HB | BD_ENET_TX_LC |
 245                          BD_ENET_TX_RL | BD_ENET_TX_UN | BD_ENET_TX_CSL)) {
 246
 247                        if (sc & BD_ENET_TX_HB) /* No heartbeat */
 248                                fep->stats.tx_heartbeat_errors++;
 249                        if (sc & BD_ENET_TX_LC) /* Late collision */
 250                                fep->stats.tx_window_errors++;
 251                        if (sc & BD_ENET_TX_RL) /* Retrans limit */
 252                                fep->stats.tx_aborted_errors++;
 253                        if (sc & BD_ENET_TX_UN) /* Underrun */
 254                                fep->stats.tx_fifo_errors++;
 255                        if (sc & BD_ENET_TX_CSL)        /* Carrier lost */
 256                                fep->stats.tx_carrier_errors++;
 257
 258                        if (sc & (BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN)) {
 259                                fep->stats.tx_errors++;
 260                                do_restart = 1;
 261                        }
 262                } else
 263                        fep->stats.tx_packets++;
 264
 265                if (sc & BD_ENET_TX_READY) {
 266                        dev_warn(fep->dev,
 267                                 "HEY! Enet xmit interrupt and TX_READY.\n");
 268                }
 269
 270                /*
 271                 * Deferred means some collisions occurred during transmit,
 272                 * but we eventually sent the packet OK.
 273                 */
 274                if (sc & BD_ENET_TX_DEF)
 275                        fep->stats.collisions++;
 276
 277                /* unmap */
 278                if (fep->mapped_as_page[dirtyidx])
 279                        dma_unmap_page(fep->dev, CBDR_BUFADDR(bdp),
 280                                       CBDR_DATLEN(bdp), DMA_TO_DEVICE);
 281                else
 282                        dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
 283                                         CBDR_DATLEN(bdp), DMA_TO_DEVICE);
 284
 285                /*
 286                 * Free the sk buffer associated with this last transmit.
 287                 */
 288                if (skb) {
 289                        dev_kfree_skb(skb);
 290                        fep->tx_skbuff[dirtyidx] = NULL;
 291                }
 292
 293                /*
 294                 * Update pointer to next buffer descriptor to be transmitted.
 295                 */
 296                if ((sc & BD_ENET_TX_WRAP) == 0)
 297                        bdp++;
 298                else
 299                        bdp = fep->tx_bd_base;
 300
 301                /*
 302                 * Since we have freed up a buffer, the ring is no longer
 303                 * full.
 304                 */
 305                if (++fep->tx_free >= MAX_SKB_FRAGS)
 306                        do_wake = 1;
 307                has_tx_work = 1;
 308        }
 309
 310        fep->dirty_tx = bdp;
 311
 312        if (do_restart)
 313                (*fep->ops->tx_restart)(dev);
 314
 315        if (!has_tx_work) {
 316                napi_complete(napi);
 317                (*fep->ops->napi_enable_tx)(dev);
 318        }
 319
 320        spin_unlock(&fep->tx_lock);
 321
 322        if (do_wake)
 323                netif_wake_queue(dev);
 324
 325        if (has_tx_work)
 326                return budget;
 327        return 0;
 328}
 329
 330/*
 331 * The interrupt handler.
 332 * This is called from the MPC core interrupt.
 333 */
 334static irqreturn_t
 335fs_enet_interrupt(int irq, void *dev_id)
 336{
 337        struct net_device *dev = dev_id;
 338        struct fs_enet_private *fep;
 339        const struct fs_platform_info *fpi;
 340        u32 int_events;
 341        u32 int_clr_events;
 342        int nr, napi_ok;
 343        int handled;
 344
 345        fep = netdev_priv(dev);
 346        fpi = fep->fpi;
 347
 348        nr = 0;
 349        while ((int_events = (*fep->ops->get_int_events)(dev)) != 0) {
 350                nr++;
 351
 352                int_clr_events = int_events;
 353                int_clr_events &= ~fep->ev_napi_rx;
 354
 355                (*fep->ops->clear_int_events)(dev, int_clr_events);
 356
 357                if (int_events & fep->ev_err)
 358                        (*fep->ops->ev_error)(dev, int_events);
 359
 360                if (int_events & fep->ev_rx) {
 361                        napi_ok = napi_schedule_prep(&fep->napi);
 362
 363                        (*fep->ops->napi_disable_rx)(dev);
 364                        (*fep->ops->clear_int_events)(dev, fep->ev_napi_rx);
 365
 366                        /* NOTE: it is possible for FCCs in NAPI mode    */
 367                        /* to submit a spurious interrupt while in poll  */
 368                        if (napi_ok)
 369                                __napi_schedule(&fep->napi);
 370                }
 371
 372                if (int_events & fep->ev_tx) {
 373                        napi_ok = napi_schedule_prep(&fep->napi_tx);
 374
 375                        (*fep->ops->napi_disable_tx)(dev);
 376                        (*fep->ops->clear_int_events)(dev, fep->ev_napi_tx);
 377
 378                        /* NOTE: it is possible for FCCs in NAPI mode    */
 379                        /* to submit a spurious interrupt while in poll  */
 380                        if (napi_ok)
 381                                __napi_schedule(&fep->napi_tx);
 382                }
 383        }
 384
 385        handled = nr > 0;
 386        return IRQ_RETVAL(handled);
 387}
 388
 389void fs_init_bds(struct net_device *dev)
 390{
 391        struct fs_enet_private *fep = netdev_priv(dev);
 392        cbd_t __iomem *bdp;
 393        struct sk_buff *skb;
 394        int i;
 395
 396        fs_cleanup_bds(dev);
 397
 398        fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
 399        fep->tx_free = fep->tx_ring;
 400        fep->cur_rx = fep->rx_bd_base;
 401
 402        /*
 403         * Initialize the receive buffer descriptors.
 404         */
 405        for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) {
 406                skb = netdev_alloc_skb(dev, ENET_RX_FRSIZE);
 407                if (skb == NULL)
 408                        break;
 409
 410                skb_align(skb, ENET_RX_ALIGN);
 411                fep->rx_skbuff[i] = skb;
 412                CBDW_BUFADDR(bdp,
 413                        dma_map_single(fep->dev, skb->data,
 414                                L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
 415                                DMA_FROM_DEVICE));
 416                CBDW_DATLEN(bdp, 0);    /* zero */
 417                CBDW_SC(bdp, BD_ENET_RX_EMPTY |
 418                        ((i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP));
 419        }
 420        /*
 421         * if we failed, fillup remainder
 422         */
 423        for (; i < fep->rx_ring; i++, bdp++) {
 424                fep->rx_skbuff[i] = NULL;
 425                CBDW_SC(bdp, (i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP);
 426        }
 427
 428        /*
 429         * ...and the same for transmit.
 430         */
 431        for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) {
 432                fep->tx_skbuff[i] = NULL;
 433                CBDW_BUFADDR(bdp, 0);
 434                CBDW_DATLEN(bdp, 0);
 435                CBDW_SC(bdp, (i < fep->tx_ring - 1) ? 0 : BD_SC_WRAP);
 436        }
 437}
 438
 439void fs_cleanup_bds(struct net_device *dev)
 440{
 441        struct fs_enet_private *fep = netdev_priv(dev);
 442        struct sk_buff *skb;
 443        cbd_t __iomem *bdp;
 444        int i;
 445
 446        /*
 447         * Reset SKB transmit buffers.
 448         */
 449        for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) {
 450                if ((skb = fep->tx_skbuff[i]) == NULL)
 451                        continue;
 452
 453                /* unmap */
 454                dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
 455                                skb->len, DMA_TO_DEVICE);
 456
 457                fep->tx_skbuff[i] = NULL;
 458                dev_kfree_skb(skb);
 459        }
 460
 461        /*
 462         * Reset SKB receive buffers
 463         */
 464        for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) {
 465                if ((skb = fep->rx_skbuff[i]) == NULL)
 466                        continue;
 467
 468                /* unmap */
 469                dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
 470                        L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
 471                        DMA_FROM_DEVICE);
 472
 473                fep->rx_skbuff[i] = NULL;
 474
 475                dev_kfree_skb(skb);
 476        }
 477}
 478
 479/**********************************************************************************/
 480
 481#ifdef CONFIG_FS_ENET_MPC5121_FEC
 482/*
 483 * MPC5121 FEC requeries 4-byte alignment for TX data buffer!
 484 */
 485static struct sk_buff *tx_skb_align_workaround(struct net_device *dev,
 486                                               struct sk_buff *skb)
 487{
 488        struct sk_buff *new_skb;
 489
 490        if (skb_linearize(skb))
 491                return NULL;
 492
 493        /* Alloc new skb */
 494        new_skb = netdev_alloc_skb(dev, skb->len + 4);
 495        if (!new_skb)
 496                return NULL;
 497
 498        /* Make sure new skb is properly aligned */
 499        skb_align(new_skb, 4);
 500
 501        /* Copy data to new skb ... */
 502        skb_copy_from_linear_data(skb, new_skb->data, skb->len);
 503        skb_put(new_skb, skb->len);
 504
 505        /* ... and free an old one */
 506        dev_kfree_skb_any(skb);
 507
 508        return new_skb;
 509}
 510#endif
 511
 512static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
 513{
 514        struct fs_enet_private *fep = netdev_priv(dev);
 515        cbd_t __iomem *bdp;
 516        int curidx;
 517        u16 sc;
 518        int nr_frags;
 519        skb_frag_t *frag;
 520        int len;
 521#ifdef CONFIG_FS_ENET_MPC5121_FEC
 522        int is_aligned = 1;
 523        int i;
 524
 525        if (!IS_ALIGNED((unsigned long)skb->data, 4)) {
 526                is_aligned = 0;
 527        } else {
 528                nr_frags = skb_shinfo(skb)->nr_frags;
 529                frag = skb_shinfo(skb)->frags;
 530                for (i = 0; i < nr_frags; i++, frag++) {
 531                        if (!IS_ALIGNED(frag->page_offset, 4)) {
 532                                is_aligned = 0;
 533                                break;
 534                        }
 535                }
 536        }
 537
 538        if (!is_aligned) {
 539                skb = tx_skb_align_workaround(dev, skb);
 540                if (!skb) {
 541                        /*
 542                         * We have lost packet due to memory allocation error
 543                         * in tx_skb_align_workaround(). Hopefully original
 544                         * skb is still valid, so try transmit it later.
 545                         */
 546                        return NETDEV_TX_BUSY;
 547                }
 548        }
 549#endif
 550
 551        spin_lock(&fep->tx_lock);
 552
 553        /*
 554         * Fill in a Tx ring entry
 555         */
 556        bdp = fep->cur_tx;
 557
 558        nr_frags = skb_shinfo(skb)->nr_frags;
 559        if (fep->tx_free <= nr_frags || (CBDR_SC(bdp) & BD_ENET_TX_READY)) {
 560                netif_stop_queue(dev);
 561                spin_unlock(&fep->tx_lock);
 562
 563                /*
 564                 * Ooops.  All transmit buffers are full.  Bail out.
 565                 * This should not happen, since the tx queue should be stopped.
 566                 */
 567                dev_warn(fep->dev, "tx queue full!.\n");
 568                return NETDEV_TX_BUSY;
 569        }
 570
 571        curidx = bdp - fep->tx_bd_base;
 572
 573        len = skb->len;
 574        fep->stats.tx_bytes += len;
 575        if (nr_frags)
 576                len -= skb->data_len;
 577        fep->tx_free -= nr_frags + 1;
 578        /*
 579         * Push the data cache so the CPM does not get stale memory data.
 580         */
 581        CBDW_BUFADDR(bdp, dma_map_single(fep->dev,
 582                                skb->data, len, DMA_TO_DEVICE));
 583        CBDW_DATLEN(bdp, len);
 584
 585        fep->mapped_as_page[curidx] = 0;
 586        frag = skb_shinfo(skb)->frags;
 587        while (nr_frags) {
 588                CBDC_SC(bdp,
 589                        BD_ENET_TX_STATS | BD_ENET_TX_INTR | BD_ENET_TX_LAST |
 590                        BD_ENET_TX_TC);
 591                CBDS_SC(bdp, BD_ENET_TX_READY);
 592
 593                if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0)
 594                        bdp++, curidx++;
 595                else
 596                        bdp = fep->tx_bd_base, curidx = 0;
 597
 598                len = skb_frag_size(frag);
 599                CBDW_BUFADDR(bdp, skb_frag_dma_map(fep->dev, frag, 0, len,
 600                                                   DMA_TO_DEVICE));
 601                CBDW_DATLEN(bdp, len);
 602
 603                fep->tx_skbuff[curidx] = NULL;
 604                fep->mapped_as_page[curidx] = 1;
 605
 606                frag++;
 607                nr_frags--;
 608        }
 609
 610        /* Trigger transmission start */
 611        sc = BD_ENET_TX_READY | BD_ENET_TX_INTR |
 612             BD_ENET_TX_LAST | BD_ENET_TX_TC;
 613
 614        /* note that while FEC does not have this bit
 615         * it marks it as available for software use
 616         * yay for hw reuse :) */
 617        if (skb->len <= 60)
 618                sc |= BD_ENET_TX_PAD;
 619        CBDC_SC(bdp, BD_ENET_TX_STATS);
 620        CBDS_SC(bdp, sc);
 621
 622        /* Save skb pointer. */
 623        fep->tx_skbuff[curidx] = skb;
 624
 625        /* If this was the last BD in the ring, start at the beginning again. */
 626        if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0)
 627                bdp++;
 628        else
 629                bdp = fep->tx_bd_base;
 630        fep->cur_tx = bdp;
 631
 632        if (fep->tx_free < MAX_SKB_FRAGS)
 633                netif_stop_queue(dev);
 634
 635        skb_tx_timestamp(skb);
 636
 637        (*fep->ops->tx_kickstart)(dev);
 638
 639        spin_unlock(&fep->tx_lock);
 640
 641        return NETDEV_TX_OK;
 642}
 643
 644static void fs_timeout(struct net_device *dev)
 645{
 646        struct fs_enet_private *fep = netdev_priv(dev);
 647        unsigned long flags;
 648        int wake = 0;
 649
 650        fep->stats.tx_errors++;
 651
 652        spin_lock_irqsave(&fep->lock, flags);
 653
 654        if (dev->flags & IFF_UP) {
 655                phy_stop(dev->phydev);
 656                (*fep->ops->stop)(dev);
 657                (*fep->ops->restart)(dev);
 658                phy_start(dev->phydev);
 659        }
 660
 661        phy_start(dev->phydev);
 662        wake = fep->tx_free && !(CBDR_SC(fep->cur_tx) & BD_ENET_TX_READY);
 663        spin_unlock_irqrestore(&fep->lock, flags);
 664
 665        if (wake)
 666                netif_wake_queue(dev);
 667}
 668
 669/*-----------------------------------------------------------------------------
 670 *  generic link-change handler - should be sufficient for most cases
 671 *-----------------------------------------------------------------------------*/
 672static void generic_adjust_link(struct  net_device *dev)
 673{
 674        struct fs_enet_private *fep = netdev_priv(dev);
 675        struct phy_device *phydev = dev->phydev;
 676        int new_state = 0;
 677
 678        if (phydev->link) {
 679                /* adjust to duplex mode */
 680                if (phydev->duplex != fep->oldduplex) {
 681                        new_state = 1;
 682                        fep->oldduplex = phydev->duplex;
 683                }
 684
 685                if (phydev->speed != fep->oldspeed) {
 686                        new_state = 1;
 687                        fep->oldspeed = phydev->speed;
 688                }
 689
 690                if (!fep->oldlink) {
 691                        new_state = 1;
 692                        fep->oldlink = 1;
 693                }
 694
 695                if (new_state)
 696                        fep->ops->restart(dev);
 697        } else if (fep->oldlink) {
 698                new_state = 1;
 699                fep->oldlink = 0;
 700                fep->oldspeed = 0;
 701                fep->oldduplex = -1;
 702        }
 703
 704        if (new_state && netif_msg_link(fep))
 705                phy_print_status(phydev);
 706}
 707
 708
 709static void fs_adjust_link(struct net_device *dev)
 710{
 711        struct fs_enet_private *fep = netdev_priv(dev);
 712        unsigned long flags;
 713
 714        spin_lock_irqsave(&fep->lock, flags);
 715
 716        if(fep->ops->adjust_link)
 717                fep->ops->adjust_link(dev);
 718        else
 719                generic_adjust_link(dev);
 720
 721        spin_unlock_irqrestore(&fep->lock, flags);
 722}
 723
 724static int fs_init_phy(struct net_device *dev)
 725{
 726        struct fs_enet_private *fep = netdev_priv(dev);
 727        struct phy_device *phydev;
 728        phy_interface_t iface;
 729
 730        fep->oldlink = 0;
 731        fep->oldspeed = 0;
 732        fep->oldduplex = -1;
 733
 734        iface = fep->fpi->use_rmii ?
 735                PHY_INTERFACE_MODE_RMII : PHY_INTERFACE_MODE_MII;
 736
 737        phydev = of_phy_connect(dev, fep->fpi->phy_node, &fs_adjust_link, 0,
 738                                iface);
 739        if (!phydev) {
 740                dev_err(&dev->dev, "Could not attach to PHY\n");
 741                return -ENODEV;
 742        }
 743
 744        return 0;
 745}
 746
 747static int fs_enet_open(struct net_device *dev)
 748{
 749        struct fs_enet_private *fep = netdev_priv(dev);
 750        int r;
 751        int err;
 752
 753        /* to initialize the fep->cur_rx,... */
 754        /* not doing this, will cause a crash in fs_enet_rx_napi */
 755        fs_init_bds(fep->ndev);
 756
 757        napi_enable(&fep->napi);
 758        napi_enable(&fep->napi_tx);
 759
 760        /* Install our interrupt handler. */
 761        r = request_irq(fep->interrupt, fs_enet_interrupt, IRQF_SHARED,
 762                        "fs_enet-mac", dev);
 763        if (r != 0) {
 764                dev_err(fep->dev, "Could not allocate FS_ENET IRQ!");
 765                napi_disable(&fep->napi);
 766                napi_disable(&fep->napi_tx);
 767                return -EINVAL;
 768        }
 769
 770        err = fs_init_phy(dev);
 771        if (err) {
 772                free_irq(fep->interrupt, dev);
 773                napi_disable(&fep->napi);
 774                napi_disable(&fep->napi_tx);
 775                return err;
 776        }
 777        phy_start(dev->phydev);
 778
 779        netif_start_queue(dev);
 780
 781        return 0;
 782}
 783
 784static int fs_enet_close(struct net_device *dev)
 785{
 786        struct fs_enet_private *fep = netdev_priv(dev);
 787        unsigned long flags;
 788
 789        netif_stop_queue(dev);
 790        netif_carrier_off(dev);
 791        napi_disable(&fep->napi);
 792        napi_disable(&fep->napi_tx);
 793        phy_stop(dev->phydev);
 794
 795        spin_lock_irqsave(&fep->lock, flags);
 796        spin_lock(&fep->tx_lock);
 797        (*fep->ops->stop)(dev);
 798        spin_unlock(&fep->tx_lock);
 799        spin_unlock_irqrestore(&fep->lock, flags);
 800
 801        /* release any irqs */
 802        phy_disconnect(dev->phydev);
 803        free_irq(fep->interrupt, dev);
 804
 805        return 0;
 806}
 807
 808static struct net_device_stats *fs_enet_get_stats(struct net_device *dev)
 809{
 810        struct fs_enet_private *fep = netdev_priv(dev);
 811        return &fep->stats;
 812}
 813
 814/*************************************************************************/
 815
 816static void fs_get_drvinfo(struct net_device *dev,
 817                            struct ethtool_drvinfo *info)
 818{
 819        strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
 820        strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
 821}
 822
 823static int fs_get_regs_len(struct net_device *dev)
 824{
 825        struct fs_enet_private *fep = netdev_priv(dev);
 826
 827        return (*fep->ops->get_regs_len)(dev);
 828}
 829
 830static void fs_get_regs(struct net_device *dev, struct ethtool_regs *regs,
 831                         void *p)
 832{
 833        struct fs_enet_private *fep = netdev_priv(dev);
 834        unsigned long flags;
 835        int r, len;
 836
 837        len = regs->len;
 838
 839        spin_lock_irqsave(&fep->lock, flags);
 840        r = (*fep->ops->get_regs)(dev, p, &len);
 841        spin_unlock_irqrestore(&fep->lock, flags);
 842
 843        if (r == 0)
 844                regs->version = 0;
 845}
 846
 847static int fs_nway_reset(struct net_device *dev)
 848{
 849        return 0;
 850}
 851
 852static u32 fs_get_msglevel(struct net_device *dev)
 853{
 854        struct fs_enet_private *fep = netdev_priv(dev);
 855        return fep->msg_enable;
 856}
 857
 858static void fs_set_msglevel(struct net_device *dev, u32 value)
 859{
 860        struct fs_enet_private *fep = netdev_priv(dev);
 861        fep->msg_enable = value;
 862}
 863
 864static const struct ethtool_ops fs_ethtool_ops = {
 865        .get_drvinfo = fs_get_drvinfo,
 866        .get_regs_len = fs_get_regs_len,
 867        .nway_reset = fs_nway_reset,
 868        .get_link = ethtool_op_get_link,
 869        .get_msglevel = fs_get_msglevel,
 870        .set_msglevel = fs_set_msglevel,
 871        .get_regs = fs_get_regs,
 872        .get_ts_info = ethtool_op_get_ts_info,
 873        .get_link_ksettings = phy_ethtool_get_link_ksettings,
 874        .set_link_ksettings = phy_ethtool_set_link_ksettings,
 875};
 876
 877static int fs_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 878{
 879        if (!netif_running(dev))
 880                return -EINVAL;
 881
 882        return phy_mii_ioctl(dev->phydev, rq, cmd);
 883}
 884
 885extern int fs_mii_connect(struct net_device *dev);
 886extern void fs_mii_disconnect(struct net_device *dev);
 887
 888/**************************************************************************************/
 889
 890#ifdef CONFIG_FS_ENET_HAS_FEC
 891#define IS_FEC(match) ((match)->data == &fs_fec_ops)
 892#else
 893#define IS_FEC(match) 0
 894#endif
 895
 896static const struct net_device_ops fs_enet_netdev_ops = {
 897        .ndo_open               = fs_enet_open,
 898        .ndo_stop               = fs_enet_close,
 899        .ndo_get_stats          = fs_enet_get_stats,
 900        .ndo_start_xmit         = fs_enet_start_xmit,
 901        .ndo_tx_timeout         = fs_timeout,
 902        .ndo_set_rx_mode        = fs_set_multicast_list,
 903        .ndo_do_ioctl           = fs_ioctl,
 904        .ndo_validate_addr      = eth_validate_addr,
 905        .ndo_set_mac_address    = eth_mac_addr,
 906        .ndo_change_mtu         = eth_change_mtu,
 907#ifdef CONFIG_NET_POLL_CONTROLLER
 908        .ndo_poll_controller    = fs_enet_netpoll,
 909#endif
 910};
 911
 912static const struct of_device_id fs_enet_match[];
 913static int fs_enet_probe(struct platform_device *ofdev)
 914{
 915        const struct of_device_id *match;
 916        struct net_device *ndev;
 917        struct fs_enet_private *fep;
 918        struct fs_platform_info *fpi;
 919        const u32 *data;
 920        struct clk *clk;
 921        int err;
 922        const u8 *mac_addr;
 923        const char *phy_connection_type;
 924        int privsize, len, ret = -ENODEV;
 925
 926        match = of_match_device(fs_enet_match, &ofdev->dev);
 927        if (!match)
 928                return -EINVAL;
 929
 930        fpi = kzalloc(sizeof(*fpi), GFP_KERNEL);
 931        if (!fpi)
 932                return -ENOMEM;
 933
 934        if (!IS_FEC(match)) {
 935                data = of_get_property(ofdev->dev.of_node, "fsl,cpm-command", &len);
 936                if (!data || len != 4)
 937                        goto out_free_fpi;
 938
 939                fpi->cp_command = *data;
 940        }
 941
 942        fpi->rx_ring = 32;
 943        fpi->tx_ring = 64;
 944        fpi->rx_copybreak = 240;
 945        fpi->napi_weight = 17;
 946        fpi->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0);
 947        if (!fpi->phy_node && of_phy_is_fixed_link(ofdev->dev.of_node)) {
 948                err = of_phy_register_fixed_link(ofdev->dev.of_node);
 949                if (err)
 950                        goto out_free_fpi;
 951
 952                /* In the case of a fixed PHY, the DT node associated
 953                 * to the PHY is the Ethernet MAC DT node.
 954                 */
 955                fpi->phy_node = of_node_get(ofdev->dev.of_node);
 956        }
 957
 958        if (of_device_is_compatible(ofdev->dev.of_node, "fsl,mpc5125-fec")) {
 959                phy_connection_type = of_get_property(ofdev->dev.of_node,
 960                                                "phy-connection-type", NULL);
 961                if (phy_connection_type && !strcmp("rmii", phy_connection_type))
 962                        fpi->use_rmii = 1;
 963        }
 964
 965        /* make clock lookup non-fatal (the driver is shared among platforms),
 966         * but require enable to succeed when a clock was specified/found,
 967         * keep a reference to the clock upon successful acquisition
 968         */
 969        clk = devm_clk_get(&ofdev->dev, "per");
 970        if (!IS_ERR(clk)) {
 971                err = clk_prepare_enable(clk);
 972                if (err) {
 973                        ret = err;
 974                        goto out_free_fpi;
 975                }
 976                fpi->clk_per = clk;
 977        }
 978
 979        privsize = sizeof(*fep) +
 980                   sizeof(struct sk_buff **) *
 981                     (fpi->rx_ring + fpi->tx_ring) +
 982                   sizeof(char) * fpi->tx_ring;
 983
 984        ndev = alloc_etherdev(privsize);
 985        if (!ndev) {
 986                ret = -ENOMEM;
 987                goto out_put;
 988        }
 989
 990        SET_NETDEV_DEV(ndev, &ofdev->dev);
 991        platform_set_drvdata(ofdev, ndev);
 992
 993        fep = netdev_priv(ndev);
 994        fep->dev = &ofdev->dev;
 995        fep->ndev = ndev;
 996        fep->fpi = fpi;
 997        fep->ops = match->data;
 998
 999        ret = fep->ops->setup_data(ndev);
1000        if (ret)
1001                goto out_free_dev;
1002
1003        fep->rx_skbuff = (struct sk_buff **)&fep[1];
1004        fep->tx_skbuff = fep->rx_skbuff + fpi->rx_ring;
1005        fep->mapped_as_page = (char *)(fep->rx_skbuff + fpi->rx_ring +
1006                                       fpi->tx_ring);
1007
1008        spin_lock_init(&fep->lock);
1009        spin_lock_init(&fep->tx_lock);
1010
1011        mac_addr = of_get_mac_address(ofdev->dev.of_node);
1012        if (mac_addr)
1013                memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);
1014
1015        ret = fep->ops->allocate_bd(ndev);
1016        if (ret)
1017                goto out_cleanup_data;
1018
1019        fep->rx_bd_base = fep->ring_base;
1020        fep->tx_bd_base = fep->rx_bd_base + fpi->rx_ring;
1021
1022        fep->tx_ring = fpi->tx_ring;
1023        fep->rx_ring = fpi->rx_ring;
1024
1025        ndev->netdev_ops = &fs_enet_netdev_ops;
1026        ndev->watchdog_timeo = 2 * HZ;
1027        netif_napi_add(ndev, &fep->napi, fs_enet_rx_napi, fpi->napi_weight);
1028        netif_tx_napi_add(ndev, &fep->napi_tx, fs_enet_tx_napi, 2);
1029
1030        ndev->ethtool_ops = &fs_ethtool_ops;
1031
1032        init_timer(&fep->phy_timer_list);
1033
1034        netif_carrier_off(ndev);
1035
1036        ndev->features |= NETIF_F_SG;
1037
1038        ret = register_netdev(ndev);
1039        if (ret)
1040                goto out_free_bd;
1041
1042        pr_info("%s: fs_enet: %pM\n", ndev->name, ndev->dev_addr);
1043
1044        return 0;
1045
1046out_free_bd:
1047        fep->ops->free_bd(ndev);
1048out_cleanup_data:
1049        fep->ops->cleanup_data(ndev);
1050out_free_dev:
1051        free_netdev(ndev);
1052out_put:
1053        of_node_put(fpi->phy_node);
1054        if (fpi->clk_per)
1055                clk_disable_unprepare(fpi->clk_per);
1056out_free_fpi:
1057        kfree(fpi);
1058        return ret;
1059}
1060
1061static int fs_enet_remove(struct platform_device *ofdev)
1062{
1063        struct net_device *ndev = platform_get_drvdata(ofdev);
1064        struct fs_enet_private *fep = netdev_priv(ndev);
1065
1066        unregister_netdev(ndev);
1067
1068        fep->ops->free_bd(ndev);
1069        fep->ops->cleanup_data(ndev);
1070        dev_set_drvdata(fep->dev, NULL);
1071        of_node_put(fep->fpi->phy_node);
1072        if (fep->fpi->clk_per)
1073                clk_disable_unprepare(fep->fpi->clk_per);
1074        free_netdev(ndev);
1075        return 0;
1076}
1077
1078static const struct of_device_id fs_enet_match[] = {
1079#ifdef CONFIG_FS_ENET_HAS_SCC
1080        {
1081                .compatible = "fsl,cpm1-scc-enet",
1082                .data = (void *)&fs_scc_ops,
1083        },
1084        {
1085                .compatible = "fsl,cpm2-scc-enet",
1086                .data = (void *)&fs_scc_ops,
1087        },
1088#endif
1089#ifdef CONFIG_FS_ENET_HAS_FCC
1090        {
1091                .compatible = "fsl,cpm2-fcc-enet",
1092                .data = (void *)&fs_fcc_ops,
1093        },
1094#endif
1095#ifdef CONFIG_FS_ENET_HAS_FEC
1096#ifdef CONFIG_FS_ENET_MPC5121_FEC
1097        {
1098                .compatible = "fsl,mpc5121-fec",
1099                .data = (void *)&fs_fec_ops,
1100        },
1101        {
1102                .compatible = "fsl,mpc5125-fec",
1103                .data = (void *)&fs_fec_ops,
1104        },
1105#else
1106        {
1107                .compatible = "fsl,pq1-fec-enet",
1108                .data = (void *)&fs_fec_ops,
1109        },
1110#endif
1111#endif
1112        {}
1113};
1114MODULE_DEVICE_TABLE(of, fs_enet_match);
1115
1116static struct platform_driver fs_enet_driver = {
1117        .driver = {
1118                .name = "fs_enet",
1119                .of_match_table = fs_enet_match,
1120        },
1121        .probe = fs_enet_probe,
1122        .remove = fs_enet_remove,
1123};
1124
1125#ifdef CONFIG_NET_POLL_CONTROLLER
1126static void fs_enet_netpoll(struct net_device *dev)
1127{
1128       disable_irq(dev->irq);
1129       fs_enet_interrupt(dev->irq, dev);
1130       enable_irq(dev->irq);
1131}
1132#endif
1133
1134module_platform_driver(fs_enet_driver);
1135