linux/drivers/net/fs_enet/fs_enet-main.c
<<
>>
Prefs
   1/*
   2 * Combined Ethernet driver for Motorola MPC8xx and MPC82xx.
   3 *
   4 * Copyright (c) 2003 Intracom S.A.
   5 *  by Pantelis Antoniou <panto@intracom.gr>
   6 *
   7 * 2005 (c) MontaVista Software, Inc.
   8 * Vitaly Bordug <vbordug@ru.mvista.com>
   9 *
  10 * Heavily based on original FEC driver by Dan Malek <dan@embeddededge.com>
  11 * and modifications by Joakim Tjernlund <joakim.tjernlund@lumentis.se>
  12 *
  13 * This file is licensed under the terms of the GNU General Public License
  14 * version 2. This program is licensed "as is" without any warranty of any
  15 * kind, whether express or implied.
  16 */
  17
  18#include <linux/module.h>
  19#include <linux/kernel.h>
  20#include <linux/types.h>
  21#include <linux/string.h>
  22#include <linux/ptrace.h>
  23#include <linux/errno.h>
  24#include <linux/ioport.h>
  25#include <linux/slab.h>
  26#include <linux/interrupt.h>
  27#include <linux/init.h>
  28#include <linux/delay.h>
  29#include <linux/netdevice.h>
  30#include <linux/etherdevice.h>
  31#include <linux/skbuff.h>
  32#include <linux/spinlock.h>
  33#include <linux/mii.h>
  34#include <linux/ethtool.h>
  35#include <linux/bitops.h>
  36#include <linux/fs.h>
  37#include <linux/platform_device.h>
  38#include <linux/phy.h>
  39#include <linux/of.h>
  40#include <linux/of_mdio.h>
  41#include <linux/of_platform.h>
  42#include <linux/of_gpio.h>
  43
  44#include <linux/vmalloc.h>
  45#include <asm/pgtable.h>
  46#include <asm/irq.h>
  47#include <asm/uaccess.h>
  48
  49#include "fs_enet.h"
  50
  51/*************************************************/
  52
  53MODULE_AUTHOR("Pantelis Antoniou <panto@intracom.gr>");
  54MODULE_DESCRIPTION("Freescale Ethernet Driver");
  55MODULE_LICENSE("GPL");
  56MODULE_VERSION(DRV_MODULE_VERSION);
  57
  58static int fs_enet_debug = -1; /* -1 == use FS_ENET_DEF_MSG_ENABLE as value */
  59module_param(fs_enet_debug, int, 0);
  60MODULE_PARM_DESC(fs_enet_debug,
  61                 "Freescale bitmapped debugging message enable value");
  62
  63#ifdef CONFIG_NET_POLL_CONTROLLER
  64static void fs_enet_netpoll(struct net_device *dev);
  65#endif
  66
  67static void fs_set_multicast_list(struct net_device *dev)
  68{
  69        struct fs_enet_private *fep = netdev_priv(dev);
  70
  71        (*fep->ops->set_multicast_list)(dev);
  72}
  73
  74static void skb_align(struct sk_buff *skb, int align)
  75{
  76        int off = ((unsigned long)skb->data) & (align - 1);
  77
  78        if (off)
  79                skb_reserve(skb, align - off);
  80}
  81
  82/* NAPI receive function */
  83static int fs_enet_rx_napi(struct napi_struct *napi, int budget)
  84{
  85        struct fs_enet_private *fep = container_of(napi, struct fs_enet_private, napi);
  86        struct net_device *dev = fep->ndev;
  87        const struct fs_platform_info *fpi = fep->fpi;
  88        cbd_t __iomem *bdp;
  89        struct sk_buff *skb, *skbn, *skbt;
  90        int received = 0;
  91        u16 pkt_len, sc;
  92        int curidx;
  93
  94        /*
  95         * First, grab all of the stats for the incoming packet.
  96         * These get messed up if we get called due to a busy condition.
  97         */
  98        bdp = fep->cur_rx;
  99
 100        /* clear RX status bits for napi*/
 101        (*fep->ops->napi_clear_rx_event)(dev);
 102
 103        while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) {
 104                curidx = bdp - fep->rx_bd_base;
 105
 106                /*
 107                 * Since we have allocated space to hold a complete frame,
 108                 * the last indicator should be set.
 109                 */
 110                if ((sc & BD_ENET_RX_LAST) == 0)
 111                        printk(KERN_WARNING DRV_MODULE_NAME
 112                               ": %s rcv is not +last\n",
 113                               dev->name);
 114
 115                /*
 116                 * Check for errors.
 117                 */
 118                if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL |
 119                          BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) {
 120                        fep->stats.rx_errors++;
 121                        /* Frame too long or too short. */
 122                        if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH))
 123                                fep->stats.rx_length_errors++;
 124                        /* Frame alignment */
 125                        if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL))
 126                                fep->stats.rx_frame_errors++;
 127                        /* CRC Error */
 128                        if (sc & BD_ENET_RX_CR)
 129                                fep->stats.rx_crc_errors++;
 130                        /* FIFO overrun */
 131                        if (sc & BD_ENET_RX_OV)
 132                                fep->stats.rx_crc_errors++;
 133
 134                        skb = fep->rx_skbuff[curidx];
 135
 136                        dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
 137                                L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
 138                                DMA_FROM_DEVICE);
 139
 140                        skbn = skb;
 141
 142                } else {
 143                        skb = fep->rx_skbuff[curidx];
 144
 145                        dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
 146                                L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
 147                                DMA_FROM_DEVICE);
 148
 149                        /*
 150                         * Process the incoming frame.
 151                         */
 152                        fep->stats.rx_packets++;
 153                        pkt_len = CBDR_DATLEN(bdp) - 4; /* remove CRC */
 154                        fep->stats.rx_bytes += pkt_len + 4;
 155
 156                        if (pkt_len <= fpi->rx_copybreak) {
 157                                /* +2 to make IP header L1 cache aligned */
 158                                skbn = dev_alloc_skb(pkt_len + 2);
 159                                if (skbn != NULL) {
 160                                        skb_reserve(skbn, 2);   /* align IP header */
 161                                        skb_copy_from_linear_data(skb,
 162                                                      skbn->data, pkt_len);
 163                                        /* swap */
 164                                        skbt = skb;
 165                                        skb = skbn;
 166                                        skbn = skbt;
 167                                }
 168                        } else {
 169                                skbn = dev_alloc_skb(ENET_RX_FRSIZE);
 170
 171                                if (skbn)
 172                                        skb_align(skbn, ENET_RX_ALIGN);
 173                        }
 174
 175                        if (skbn != NULL) {
 176                                skb_put(skb, pkt_len);  /* Make room */
 177                                skb->protocol = eth_type_trans(skb, dev);
 178                                received++;
 179                                netif_receive_skb(skb);
 180                        } else {
 181                                printk(KERN_WARNING DRV_MODULE_NAME
 182                                       ": %s Memory squeeze, dropping packet.\n",
 183                                       dev->name);
 184                                fep->stats.rx_dropped++;
 185                                skbn = skb;
 186                        }
 187                }
 188
 189                fep->rx_skbuff[curidx] = skbn;
 190                CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data,
 191                             L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
 192                             DMA_FROM_DEVICE));
 193                CBDW_DATLEN(bdp, 0);
 194                CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY);
 195
 196                /*
 197                 * Update BD pointer to next entry.
 198                 */
 199                if ((sc & BD_ENET_RX_WRAP) == 0)
 200                        bdp++;
 201                else
 202                        bdp = fep->rx_bd_base;
 203
 204                (*fep->ops->rx_bd_done)(dev);
 205
 206                if (received >= budget)
 207                        break;
 208        }
 209
 210        fep->cur_rx = bdp;
 211
 212        if (received < budget) {
 213                /* done */
 214                napi_complete(napi);
 215                (*fep->ops->napi_enable_rx)(dev);
 216        }
 217        return received;
 218}
 219
 220/* non NAPI receive function */
 221static int fs_enet_rx_non_napi(struct net_device *dev)
 222{
 223        struct fs_enet_private *fep = netdev_priv(dev);
 224        const struct fs_platform_info *fpi = fep->fpi;
 225        cbd_t __iomem *bdp;
 226        struct sk_buff *skb, *skbn, *skbt;
 227        int received = 0;
 228        u16 pkt_len, sc;
 229        int curidx;
 230        /*
 231         * First, grab all of the stats for the incoming packet.
 232         * These get messed up if we get called due to a busy condition.
 233         */
 234        bdp = fep->cur_rx;
 235
 236        while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) {
 237
 238                curidx = bdp - fep->rx_bd_base;
 239
 240                /*
 241                 * Since we have allocated space to hold a complete frame,
 242                 * the last indicator should be set.
 243                 */
 244                if ((sc & BD_ENET_RX_LAST) == 0)
 245                        printk(KERN_WARNING DRV_MODULE_NAME
 246                               ": %s rcv is not +last\n",
 247                               dev->name);
 248
 249                /*
 250                 * Check for errors.
 251                 */
 252                if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL |
 253                          BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) {
 254                        fep->stats.rx_errors++;
 255                        /* Frame too long or too short. */
 256                        if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH))
 257                                fep->stats.rx_length_errors++;
 258                        /* Frame alignment */
 259                        if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL))
 260                                fep->stats.rx_frame_errors++;
 261                        /* CRC Error */
 262                        if (sc & BD_ENET_RX_CR)
 263                                fep->stats.rx_crc_errors++;
 264                        /* FIFO overrun */
 265                        if (sc & BD_ENET_RX_OV)
 266                                fep->stats.rx_crc_errors++;
 267
 268                        skb = fep->rx_skbuff[curidx];
 269
 270                        dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
 271                                L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
 272                                DMA_FROM_DEVICE);
 273
 274                        skbn = skb;
 275
 276                } else {
 277
 278                        skb = fep->rx_skbuff[curidx];
 279
 280                        dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
 281                                L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
 282                                DMA_FROM_DEVICE);
 283
 284                        /*
 285                         * Process the incoming frame.
 286                         */
 287                        fep->stats.rx_packets++;
 288                        pkt_len = CBDR_DATLEN(bdp) - 4; /* remove CRC */
 289                        fep->stats.rx_bytes += pkt_len + 4;
 290
 291                        if (pkt_len <= fpi->rx_copybreak) {
 292                                /* +2 to make IP header L1 cache aligned */
 293                                skbn = dev_alloc_skb(pkt_len + 2);
 294                                if (skbn != NULL) {
 295                                        skb_reserve(skbn, 2);   /* align IP header */
 296                                        skb_copy_from_linear_data(skb,
 297                                                      skbn->data, pkt_len);
 298                                        /* swap */
 299                                        skbt = skb;
 300                                        skb = skbn;
 301                                        skbn = skbt;
 302                                }
 303                        } else {
 304                                skbn = dev_alloc_skb(ENET_RX_FRSIZE);
 305
 306                                if (skbn)
 307                                        skb_align(skbn, ENET_RX_ALIGN);
 308                        }
 309
 310                        if (skbn != NULL) {
 311                                skb_put(skb, pkt_len);  /* Make room */
 312                                skb->protocol = eth_type_trans(skb, dev);
 313                                received++;
 314                                netif_rx(skb);
 315                        } else {
 316                                printk(KERN_WARNING DRV_MODULE_NAME
 317                                       ": %s Memory squeeze, dropping packet.\n",
 318                                       dev->name);
 319                                fep->stats.rx_dropped++;
 320                                skbn = skb;
 321                        }
 322                }
 323
 324                fep->rx_skbuff[curidx] = skbn;
 325                CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data,
 326                             L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
 327                             DMA_FROM_DEVICE));
 328                CBDW_DATLEN(bdp, 0);
 329                CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY);
 330
 331                /*
 332                 * Update BD pointer to next entry.
 333                 */
 334                if ((sc & BD_ENET_RX_WRAP) == 0)
 335                        bdp++;
 336                else
 337                        bdp = fep->rx_bd_base;
 338
 339                (*fep->ops->rx_bd_done)(dev);
 340        }
 341
 342        fep->cur_rx = bdp;
 343
 344        return 0;
 345}
 346
 347static void fs_enet_tx(struct net_device *dev)
 348{
 349        struct fs_enet_private *fep = netdev_priv(dev);
 350        cbd_t __iomem *bdp;
 351        struct sk_buff *skb;
 352        int dirtyidx, do_wake, do_restart;
 353        u16 sc;
 354
 355        spin_lock(&fep->tx_lock);
 356        bdp = fep->dirty_tx;
 357
 358        do_wake = do_restart = 0;
 359        while (((sc = CBDR_SC(bdp)) & BD_ENET_TX_READY) == 0) {
 360                dirtyidx = bdp - fep->tx_bd_base;
 361
 362                if (fep->tx_free == fep->tx_ring)
 363                        break;
 364
 365                skb = fep->tx_skbuff[dirtyidx];
 366
 367                /*
 368                 * Check for errors.
 369                 */
 370                if (sc & (BD_ENET_TX_HB | BD_ENET_TX_LC |
 371                          BD_ENET_TX_RL | BD_ENET_TX_UN | BD_ENET_TX_CSL)) {
 372
 373                        if (sc & BD_ENET_TX_HB) /* No heartbeat */
 374                                fep->stats.tx_heartbeat_errors++;
 375                        if (sc & BD_ENET_TX_LC) /* Late collision */
 376                                fep->stats.tx_window_errors++;
 377                        if (sc & BD_ENET_TX_RL) /* Retrans limit */
 378                                fep->stats.tx_aborted_errors++;
 379                        if (sc & BD_ENET_TX_UN) /* Underrun */
 380                                fep->stats.tx_fifo_errors++;
 381                        if (sc & BD_ENET_TX_CSL)        /* Carrier lost */
 382                                fep->stats.tx_carrier_errors++;
 383
 384                        if (sc & (BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN)) {
 385                                fep->stats.tx_errors++;
 386                                do_restart = 1;
 387                        }
 388                } else
 389                        fep->stats.tx_packets++;
 390
 391                if (sc & BD_ENET_TX_READY)
 392                        printk(KERN_WARNING DRV_MODULE_NAME
 393                               ": %s HEY! Enet xmit interrupt and TX_READY.\n",
 394                               dev->name);
 395
 396                /*
 397                 * Deferred means some collisions occurred during transmit,
 398                 * but we eventually sent the packet OK.
 399                 */
 400                if (sc & BD_ENET_TX_DEF)
 401                        fep->stats.collisions++;
 402
 403                /* unmap */
 404                dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
 405                                skb->len, DMA_TO_DEVICE);
 406
 407                /*
 408                 * Free the sk buffer associated with this last transmit.
 409                 */
 410                dev_kfree_skb_irq(skb);
 411                fep->tx_skbuff[dirtyidx] = NULL;
 412
 413                /*
 414                 * Update pointer to next buffer descriptor to be transmitted.
 415                 */
 416                if ((sc & BD_ENET_TX_WRAP) == 0)
 417                        bdp++;
 418                else
 419                        bdp = fep->tx_bd_base;
 420
 421                /*
 422                 * Since we have freed up a buffer, the ring is no longer
 423                 * full.
 424                 */
 425                if (!fep->tx_free++)
 426                        do_wake = 1;
 427        }
 428
 429        fep->dirty_tx = bdp;
 430
 431        if (do_restart)
 432                (*fep->ops->tx_restart)(dev);
 433
 434        spin_unlock(&fep->tx_lock);
 435
 436        if (do_wake)
 437                netif_wake_queue(dev);
 438}
 439
 440/*
 441 * The interrupt handler.
 442 * This is called from the MPC core interrupt.
 443 */
 444static irqreturn_t
 445fs_enet_interrupt(int irq, void *dev_id)
 446{
 447        struct net_device *dev = dev_id;
 448        struct fs_enet_private *fep;
 449        const struct fs_platform_info *fpi;
 450        u32 int_events;
 451        u32 int_clr_events;
 452        int nr, napi_ok;
 453        int handled;
 454
 455        fep = netdev_priv(dev);
 456        fpi = fep->fpi;
 457
 458        nr = 0;
 459        while ((int_events = (*fep->ops->get_int_events)(dev)) != 0) {
 460                nr++;
 461
 462                int_clr_events = int_events;
 463                if (fpi->use_napi)
 464                        int_clr_events &= ~fep->ev_napi_rx;
 465
 466                (*fep->ops->clear_int_events)(dev, int_clr_events);
 467
 468                if (int_events & fep->ev_err)
 469                        (*fep->ops->ev_error)(dev, int_events);
 470
 471                if (int_events & fep->ev_rx) {
 472                        if (!fpi->use_napi)
 473                                fs_enet_rx_non_napi(dev);
 474                        else {
 475                                napi_ok = napi_schedule_prep(&fep->napi);
 476
 477                                (*fep->ops->napi_disable_rx)(dev);
 478                                (*fep->ops->clear_int_events)(dev, fep->ev_napi_rx);
 479
 480                                /* NOTE: it is possible for FCCs in NAPI mode    */
 481                                /* to submit a spurious interrupt while in poll  */
 482                                if (napi_ok)
 483                                        __napi_schedule(&fep->napi);
 484                        }
 485                }
 486
 487                if (int_events & fep->ev_tx)
 488                        fs_enet_tx(dev);
 489        }
 490
 491        handled = nr > 0;
 492        return IRQ_RETVAL(handled);
 493}
 494
 495void fs_init_bds(struct net_device *dev)
 496{
 497        struct fs_enet_private *fep = netdev_priv(dev);
 498        cbd_t __iomem *bdp;
 499        struct sk_buff *skb;
 500        int i;
 501
 502        fs_cleanup_bds(dev);
 503
 504        fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
 505        fep->tx_free = fep->tx_ring;
 506        fep->cur_rx = fep->rx_bd_base;
 507
 508        /*
 509         * Initialize the receive buffer descriptors.
 510         */
 511        for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) {
 512                skb = dev_alloc_skb(ENET_RX_FRSIZE);
 513                if (skb == NULL) {
 514                        printk(KERN_WARNING DRV_MODULE_NAME
 515                               ": %s Memory squeeze, unable to allocate skb\n",
 516                               dev->name);
 517                        break;
 518                }
 519                skb_align(skb, ENET_RX_ALIGN);
 520                fep->rx_skbuff[i] = skb;
 521                CBDW_BUFADDR(bdp,
 522                        dma_map_single(fep->dev, skb->data,
 523                                L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
 524                                DMA_FROM_DEVICE));
 525                CBDW_DATLEN(bdp, 0);    /* zero */
 526                CBDW_SC(bdp, BD_ENET_RX_EMPTY |
 527                        ((i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP));
 528        }
 529        /*
 530         * if we failed, fillup remainder
 531         */
 532        for (; i < fep->rx_ring; i++, bdp++) {
 533                fep->rx_skbuff[i] = NULL;
 534                CBDW_SC(bdp, (i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP);
 535        }
 536
 537        /*
 538         * ...and the same for transmit.
 539         */
 540        for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) {
 541                fep->tx_skbuff[i] = NULL;
 542                CBDW_BUFADDR(bdp, 0);
 543                CBDW_DATLEN(bdp, 0);
 544                CBDW_SC(bdp, (i < fep->tx_ring - 1) ? 0 : BD_SC_WRAP);
 545        }
 546}
 547
 548void fs_cleanup_bds(struct net_device *dev)
 549{
 550        struct fs_enet_private *fep = netdev_priv(dev);
 551        struct sk_buff *skb;
 552        cbd_t __iomem *bdp;
 553        int i;
 554
 555        /*
 556         * Reset SKB transmit buffers.
 557         */
 558        for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) {
 559                if ((skb = fep->tx_skbuff[i]) == NULL)
 560                        continue;
 561
 562                /* unmap */
 563                dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
 564                                skb->len, DMA_TO_DEVICE);
 565
 566                fep->tx_skbuff[i] = NULL;
 567                dev_kfree_skb(skb);
 568        }
 569
 570        /*
 571         * Reset SKB receive buffers
 572         */
 573        for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) {
 574                if ((skb = fep->rx_skbuff[i]) == NULL)
 575                        continue;
 576
 577                /* unmap */
 578                dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
 579                        L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
 580                        DMA_FROM_DEVICE);
 581
 582                fep->rx_skbuff[i] = NULL;
 583
 584                dev_kfree_skb(skb);
 585        }
 586}
 587
 588/**********************************************************************************/
 589
 590static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
 591{
 592        struct fs_enet_private *fep = netdev_priv(dev);
 593        cbd_t __iomem *bdp;
 594        int curidx;
 595        u16 sc;
 596        unsigned long flags;
 597
 598        spin_lock_irqsave(&fep->tx_lock, flags);
 599
 600        /*
 601         * Fill in a Tx ring entry
 602         */
 603        bdp = fep->cur_tx;
 604
 605        if (!fep->tx_free || (CBDR_SC(bdp) & BD_ENET_TX_READY)) {
 606                netif_stop_queue(dev);
 607                spin_unlock_irqrestore(&fep->tx_lock, flags);
 608
 609                /*
 610                 * Ooops.  All transmit buffers are full.  Bail out.
 611                 * This should not happen, since the tx queue should be stopped.
 612                 */
 613                printk(KERN_WARNING DRV_MODULE_NAME
 614                       ": %s tx queue full!.\n", dev->name);
 615                return NETDEV_TX_BUSY;
 616        }
 617
 618        curidx = bdp - fep->tx_bd_base;
 619        /*
 620         * Clear all of the status flags.
 621         */
 622        CBDC_SC(bdp, BD_ENET_TX_STATS);
 623
 624        /*
 625         * Save skb pointer.
 626         */
 627        fep->tx_skbuff[curidx] = skb;
 628
 629        fep->stats.tx_bytes += skb->len;
 630
 631        /*
 632         * Push the data cache so the CPM does not get stale memory data.
 633         */
 634        CBDW_BUFADDR(bdp, dma_map_single(fep->dev,
 635                                skb->data, skb->len, DMA_TO_DEVICE));
 636        CBDW_DATLEN(bdp, skb->len);
 637
 638        dev->trans_start = jiffies;
 639
 640        /*
 641         * If this was the last BD in the ring, start at the beginning again.
 642         */
 643        if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0)
 644                fep->cur_tx++;
 645        else
 646                fep->cur_tx = fep->tx_bd_base;
 647
 648        if (!--fep->tx_free)
 649                netif_stop_queue(dev);
 650
 651        /* Trigger transmission start */
 652        sc = BD_ENET_TX_READY | BD_ENET_TX_INTR |
 653             BD_ENET_TX_LAST | BD_ENET_TX_TC;
 654
 655        /* note that while FEC does not have this bit
 656         * it marks it as available for software use
 657         * yay for hw reuse :) */
 658        if (skb->len <= 60)
 659                sc |= BD_ENET_TX_PAD;
 660        CBDS_SC(bdp, sc);
 661
 662        (*fep->ops->tx_kickstart)(dev);
 663
 664        spin_unlock_irqrestore(&fep->tx_lock, flags);
 665
 666        return NETDEV_TX_OK;
 667}
 668
 669static void fs_timeout(struct net_device *dev)
 670{
 671        struct fs_enet_private *fep = netdev_priv(dev);
 672        unsigned long flags;
 673        int wake = 0;
 674
 675        fep->stats.tx_errors++;
 676
 677        spin_lock_irqsave(&fep->lock, flags);
 678
 679        if (dev->flags & IFF_UP) {
 680                phy_stop(fep->phydev);
 681                (*fep->ops->stop)(dev);
 682                (*fep->ops->restart)(dev);
 683                phy_start(fep->phydev);
 684        }
 685
 686        phy_start(fep->phydev);
 687        wake = fep->tx_free && !(CBDR_SC(fep->cur_tx) & BD_ENET_TX_READY);
 688        spin_unlock_irqrestore(&fep->lock, flags);
 689
 690        if (wake)
 691                netif_wake_queue(dev);
 692}
 693
 694/*-----------------------------------------------------------------------------
 695 *  generic link-change handler - should be sufficient for most cases
 696 *-----------------------------------------------------------------------------*/
 697static void generic_adjust_link(struct  net_device *dev)
 698{
 699        struct fs_enet_private *fep = netdev_priv(dev);
 700        struct phy_device *phydev = fep->phydev;
 701        int new_state = 0;
 702
 703        if (phydev->link) {
 704                /* adjust to duplex mode */
 705                if (phydev->duplex != fep->oldduplex) {
 706                        new_state = 1;
 707                        fep->oldduplex = phydev->duplex;
 708                }
 709
 710                if (phydev->speed != fep->oldspeed) {
 711                        new_state = 1;
 712                        fep->oldspeed = phydev->speed;
 713                }
 714
 715                if (!fep->oldlink) {
 716                        new_state = 1;
 717                        fep->oldlink = 1;
 718                }
 719
 720                if (new_state)
 721                        fep->ops->restart(dev);
 722        } else if (fep->oldlink) {
 723                new_state = 1;
 724                fep->oldlink = 0;
 725                fep->oldspeed = 0;
 726                fep->oldduplex = -1;
 727        }
 728
 729        if (new_state && netif_msg_link(fep))
 730                phy_print_status(phydev);
 731}
 732
 733
 734static void fs_adjust_link(struct net_device *dev)
 735{
 736        struct fs_enet_private *fep = netdev_priv(dev);
 737        unsigned long flags;
 738
 739        spin_lock_irqsave(&fep->lock, flags);
 740
 741        if(fep->ops->adjust_link)
 742                fep->ops->adjust_link(dev);
 743        else
 744                generic_adjust_link(dev);
 745
 746        spin_unlock_irqrestore(&fep->lock, flags);
 747}
 748
 749static int fs_init_phy(struct net_device *dev)
 750{
 751        struct fs_enet_private *fep = netdev_priv(dev);
 752        struct phy_device *phydev;
 753
 754        fep->oldlink = 0;
 755        fep->oldspeed = 0;
 756        fep->oldduplex = -1;
 757
 758        phydev = of_phy_connect(dev, fep->fpi->phy_node, &fs_adjust_link, 0,
 759                                PHY_INTERFACE_MODE_MII);
 760        if (!phydev) {
 761                phydev = of_phy_connect_fixed_link(dev, &fs_adjust_link,
 762                                                   PHY_INTERFACE_MODE_MII);
 763        }
 764        if (!phydev) {
 765                dev_err(&dev->dev, "Could not attach to PHY\n");
 766                return -ENODEV;
 767        }
 768
 769        fep->phydev = phydev;
 770
 771        return 0;
 772}
 773
 774static int fs_enet_open(struct net_device *dev)
 775{
 776        struct fs_enet_private *fep = netdev_priv(dev);
 777        int r;
 778        int err;
 779
 780        /* to initialize the fep->cur_rx,... */
 781        /* not doing this, will cause a crash in fs_enet_rx_napi */
 782        fs_init_bds(fep->ndev);
 783
 784        if (fep->fpi->use_napi)
 785                napi_enable(&fep->napi);
 786
 787        /* Install our interrupt handler. */
 788        r = request_irq(fep->interrupt, fs_enet_interrupt, IRQF_SHARED,
 789                        "fs_enet-mac", dev);
 790        if (r != 0) {
 791                printk(KERN_ERR DRV_MODULE_NAME
 792                       ": %s Could not allocate FS_ENET IRQ!", dev->name);
 793                if (fep->fpi->use_napi)
 794                        napi_disable(&fep->napi);
 795                return -EINVAL;
 796        }
 797
 798        err = fs_init_phy(dev);
 799        if (err) {
 800                free_irq(fep->interrupt, dev);
 801                if (fep->fpi->use_napi)
 802                        napi_disable(&fep->napi);
 803                return err;
 804        }
 805        phy_start(fep->phydev);
 806
 807        netif_start_queue(dev);
 808
 809        return 0;
 810}
 811
 812static int fs_enet_close(struct net_device *dev)
 813{
 814        struct fs_enet_private *fep = netdev_priv(dev);
 815        unsigned long flags;
 816
 817        netif_stop_queue(dev);
 818        netif_carrier_off(dev);
 819        if (fep->fpi->use_napi)
 820                napi_disable(&fep->napi);
 821        phy_stop(fep->phydev);
 822
 823        spin_lock_irqsave(&fep->lock, flags);
 824        spin_lock(&fep->tx_lock);
 825        (*fep->ops->stop)(dev);
 826        spin_unlock(&fep->tx_lock);
 827        spin_unlock_irqrestore(&fep->lock, flags);
 828
 829        /* release any irqs */
 830        phy_disconnect(fep->phydev);
 831        fep->phydev = NULL;
 832        free_irq(fep->interrupt, dev);
 833
 834        return 0;
 835}
 836
 837static struct net_device_stats *fs_enet_get_stats(struct net_device *dev)
 838{
 839        struct fs_enet_private *fep = netdev_priv(dev);
 840        return &fep->stats;
 841}
 842
 843/*************************************************************************/
 844
 845static void fs_get_drvinfo(struct net_device *dev,
 846                            struct ethtool_drvinfo *info)
 847{
 848        strcpy(info->driver, DRV_MODULE_NAME);
 849        strcpy(info->version, DRV_MODULE_VERSION);
 850}
 851
 852static int fs_get_regs_len(struct net_device *dev)
 853{
 854        struct fs_enet_private *fep = netdev_priv(dev);
 855
 856        return (*fep->ops->get_regs_len)(dev);
 857}
 858
 859static void fs_get_regs(struct net_device *dev, struct ethtool_regs *regs,
 860                         void *p)
 861{
 862        struct fs_enet_private *fep = netdev_priv(dev);
 863        unsigned long flags;
 864        int r, len;
 865
 866        len = regs->len;
 867
 868        spin_lock_irqsave(&fep->lock, flags);
 869        r = (*fep->ops->get_regs)(dev, p, &len);
 870        spin_unlock_irqrestore(&fep->lock, flags);
 871
 872        if (r == 0)
 873                regs->version = 0;
 874}
 875
 876static int fs_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 877{
 878        struct fs_enet_private *fep = netdev_priv(dev);
 879
 880        if (!fep->phydev)
 881                return -ENODEV;
 882
 883        return phy_ethtool_gset(fep->phydev, cmd);
 884}
 885
 886static int fs_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 887{
 888        struct fs_enet_private *fep = netdev_priv(dev);
 889
 890        if (!fep->phydev)
 891                return -ENODEV;
 892
 893        return phy_ethtool_sset(fep->phydev, cmd);
 894}
 895
 896static int fs_nway_reset(struct net_device *dev)
 897{
 898        return 0;
 899}
 900
 901static u32 fs_get_msglevel(struct net_device *dev)
 902{
 903        struct fs_enet_private *fep = netdev_priv(dev);
 904        return fep->msg_enable;
 905}
 906
 907static void fs_set_msglevel(struct net_device *dev, u32 value)
 908{
 909        struct fs_enet_private *fep = netdev_priv(dev);
 910        fep->msg_enable = value;
 911}
 912
 913static const struct ethtool_ops fs_ethtool_ops = {
 914        .get_drvinfo = fs_get_drvinfo,
 915        .get_regs_len = fs_get_regs_len,
 916        .get_settings = fs_get_settings,
 917        .set_settings = fs_set_settings,
 918        .nway_reset = fs_nway_reset,
 919        .get_link = ethtool_op_get_link,
 920        .get_msglevel = fs_get_msglevel,
 921        .set_msglevel = fs_set_msglevel,
 922        .set_tx_csum = ethtool_op_set_tx_csum,  /* local! */
 923        .set_sg = ethtool_op_set_sg,
 924        .get_regs = fs_get_regs,
 925};
 926
 927static int fs_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 928{
 929        struct fs_enet_private *fep = netdev_priv(dev);
 930        struct mii_ioctl_data *mii = (struct mii_ioctl_data *)&rq->ifr_data;
 931
 932        if (!netif_running(dev))
 933                return -EINVAL;
 934
 935        return phy_mii_ioctl(fep->phydev, mii, cmd);
 936}
 937
 938extern int fs_mii_connect(struct net_device *dev);
 939extern void fs_mii_disconnect(struct net_device *dev);
 940
 941/**************************************************************************************/
 942
 943#ifdef CONFIG_FS_ENET_HAS_FEC
 944#define IS_FEC(match) ((match)->data == &fs_fec_ops)
 945#else
 946#define IS_FEC(match) 0
 947#endif
 948
 949static const struct net_device_ops fs_enet_netdev_ops = {
 950        .ndo_open               = fs_enet_open,
 951        .ndo_stop               = fs_enet_close,
 952        .ndo_get_stats          = fs_enet_get_stats,
 953        .ndo_start_xmit         = fs_enet_start_xmit,
 954        .ndo_tx_timeout         = fs_timeout,
 955        .ndo_set_multicast_list = fs_set_multicast_list,
 956        .ndo_do_ioctl           = fs_ioctl,
 957        .ndo_validate_addr      = eth_validate_addr,
 958        .ndo_set_mac_address    = eth_mac_addr,
 959        .ndo_change_mtu         = eth_change_mtu,
 960#ifdef CONFIG_NET_POLL_CONTROLLER
 961        .ndo_poll_controller    = fs_enet_netpoll,
 962#endif
 963};
 964
 965static int __devinit fs_enet_probe(struct of_device *ofdev,
 966                                   const struct of_device_id *match)
 967{
 968        struct net_device *ndev;
 969        struct fs_enet_private *fep;
 970        struct fs_platform_info *fpi;
 971        const u32 *data;
 972        const u8 *mac_addr;
 973        int privsize, len, ret = -ENODEV;
 974
 975        fpi = kzalloc(sizeof(*fpi), GFP_KERNEL);
 976        if (!fpi)
 977                return -ENOMEM;
 978
 979        if (!IS_FEC(match)) {
 980                data = of_get_property(ofdev->node, "fsl,cpm-command", &len);
 981                if (!data || len != 4)
 982                        goto out_free_fpi;
 983
 984                fpi->cp_command = *data;
 985        }
 986
 987        fpi->rx_ring = 32;
 988        fpi->tx_ring = 32;
 989        fpi->rx_copybreak = 240;
 990        fpi->use_napi = 1;
 991        fpi->napi_weight = 17;
 992        fpi->phy_node = of_parse_phandle(ofdev->node, "phy-handle", 0);
 993        if ((!fpi->phy_node) && (!of_get_property(ofdev->node, "fixed-link",
 994                                                  NULL)))
 995                goto out_free_fpi;
 996
 997        privsize = sizeof(*fep) +
 998                   sizeof(struct sk_buff **) *
 999                   (fpi->rx_ring + fpi->tx_ring);
1000
1001        ndev = alloc_etherdev(privsize);
1002        if (!ndev) {
1003                ret = -ENOMEM;
1004                goto out_free_fpi;
1005        }
1006
1007        SET_NETDEV_DEV(ndev, &ofdev->dev);
1008        dev_set_drvdata(&ofdev->dev, ndev);
1009
1010        fep = netdev_priv(ndev);
1011        fep->dev = &ofdev->dev;
1012        fep->ndev = ndev;
1013        fep->fpi = fpi;
1014        fep->ops = match->data;
1015
1016        ret = fep->ops->setup_data(ndev);
1017        if (ret)
1018                goto out_free_dev;
1019
1020        fep->rx_skbuff = (struct sk_buff **)&fep[1];
1021        fep->tx_skbuff = fep->rx_skbuff + fpi->rx_ring;
1022
1023        spin_lock_init(&fep->lock);
1024        spin_lock_init(&fep->tx_lock);
1025
1026        mac_addr = of_get_mac_address(ofdev->node);
1027        if (mac_addr)
1028                memcpy(ndev->dev_addr, mac_addr, 6);
1029
1030        ret = fep->ops->allocate_bd(ndev);
1031        if (ret)
1032                goto out_cleanup_data;
1033
1034        fep->rx_bd_base = fep->ring_base;
1035        fep->tx_bd_base = fep->rx_bd_base + fpi->rx_ring;
1036
1037        fep->tx_ring = fpi->tx_ring;
1038        fep->rx_ring = fpi->rx_ring;
1039
1040        ndev->netdev_ops = &fs_enet_netdev_ops;
1041        ndev->watchdog_timeo = 2 * HZ;
1042        if (fpi->use_napi)
1043                netif_napi_add(ndev, &fep->napi, fs_enet_rx_napi,
1044                               fpi->napi_weight);
1045
1046        ndev->ethtool_ops = &fs_ethtool_ops;
1047
1048        init_timer(&fep->phy_timer_list);
1049
1050        netif_carrier_off(ndev);
1051
1052        ret = register_netdev(ndev);
1053        if (ret)
1054                goto out_free_bd;
1055
1056        printk(KERN_INFO "%s: fs_enet: %pM\n", ndev->name, ndev->dev_addr);
1057
1058        return 0;
1059
1060out_free_bd:
1061        fep->ops->free_bd(ndev);
1062out_cleanup_data:
1063        fep->ops->cleanup_data(ndev);
1064out_free_dev:
1065        free_netdev(ndev);
1066        dev_set_drvdata(&ofdev->dev, NULL);
1067        of_node_put(fpi->phy_node);
1068out_free_fpi:
1069        kfree(fpi);
1070        return ret;
1071}
1072
1073static int fs_enet_remove(struct of_device *ofdev)
1074{
1075        struct net_device *ndev = dev_get_drvdata(&ofdev->dev);
1076        struct fs_enet_private *fep = netdev_priv(ndev);
1077
1078        unregister_netdev(ndev);
1079
1080        fep->ops->free_bd(ndev);
1081        fep->ops->cleanup_data(ndev);
1082        dev_set_drvdata(fep->dev, NULL);
1083        of_node_put(fep->fpi->phy_node);
1084        free_netdev(ndev);
1085        return 0;
1086}
1087
1088static struct of_device_id fs_enet_match[] = {
1089#ifdef CONFIG_FS_ENET_HAS_SCC
1090        {
1091                .compatible = "fsl,cpm1-scc-enet",
1092                .data = (void *)&fs_scc_ops,
1093        },
1094        {
1095                .compatible = "fsl,cpm2-scc-enet",
1096                .data = (void *)&fs_scc_ops,
1097        },
1098#endif
1099#ifdef CONFIG_FS_ENET_HAS_FCC
1100        {
1101                .compatible = "fsl,cpm2-fcc-enet",
1102                .data = (void *)&fs_fcc_ops,
1103        },
1104#endif
1105#ifdef CONFIG_FS_ENET_HAS_FEC
1106        {
1107                .compatible = "fsl,pq1-fec-enet",
1108                .data = (void *)&fs_fec_ops,
1109        },
1110#endif
1111        {}
1112};
1113MODULE_DEVICE_TABLE(of, fs_enet_match);
1114
1115static struct of_platform_driver fs_enet_driver = {
1116        .name   = "fs_enet",
1117        .match_table = fs_enet_match,
1118        .probe = fs_enet_probe,
1119        .remove = fs_enet_remove,
1120};
1121
1122static int __init fs_init(void)
1123{
1124        return of_register_platform_driver(&fs_enet_driver);
1125}
1126
1127static void __exit fs_cleanup(void)
1128{
1129        of_unregister_platform_driver(&fs_enet_driver);
1130}
1131
1132#ifdef CONFIG_NET_POLL_CONTROLLER
1133static void fs_enet_netpoll(struct net_device *dev)
1134{
1135       disable_irq(dev->irq);
1136       fs_enet_interrupt(dev->irq, dev);
1137       enable_irq(dev->irq);
1138}
1139#endif
1140
1141/**************************************************************************************/
1142
1143module_init(fs_init);
1144module_exit(fs_cleanup);
1145