linux/drivers/net/gianfar.c
<<
>>
Prefs
   1/*
   2 * drivers/net/gianfar.c
   3 *
   4 * Gianfar Ethernet Driver
   5 * This driver is designed for the non-CPM ethernet controllers
   6 * on the 85xx and 83xx family of integrated processors
   7 * Based on 8260_io/fcc_enet.c
   8 *
   9 * Author: Andy Fleming
  10 * Maintainer: Kumar Gala
  11 *
  12 * Copyright (c) 2002-2006 Freescale Semiconductor, Inc.
  13 * Copyright (c) 2007 MontaVista Software, Inc.
  14 *
  15 * This program is free software; you can redistribute  it and/or modify it
  16 * under  the terms of  the GNU General  Public License as published by the
  17 * Free Software Foundation;  either version 2 of the  License, or (at your
  18 * option) any later version.
  19 *
  20 *  Gianfar:  AKA Lambda Draconis, "Dragon"
  21 *  RA 11 31 24.2
  22 *  Dec +69 19 52
  23 *  V 3.84
  24 *  B-V +1.62
  25 *
  26 *  Theory of operation
  27 *
  28 *  The driver is initialized through of_device. Configuration information
  29 *  is therefore conveyed through an OF-style device tree.
  30 *
  31 *  The Gianfar Ethernet Controller uses a ring of buffer
  32 *  descriptors.  The beginning is indicated by a register
  33 *  pointing to the physical address of the start of the ring.
  34 *  The end is determined by a "wrap" bit being set in the
  35 *  last descriptor of the ring.
  36 *
  37 *  When a packet is received, the RXF bit in the
  38 *  IEVENT register is set, triggering an interrupt when the
  39 *  corresponding bit in the IMASK register is also set (if
  40 *  interrupt coalescing is active, then the interrupt may not
  41 *  happen immediately, but will wait until either a set number
  42 *  of frames or amount of time have passed).  In NAPI, the
  43 *  interrupt handler will signal there is work to be done, and
  44 *  exit. This method will start at the last known empty
  45 *  descriptor, and process every subsequent descriptor until there
  46 *  are none left with data (NAPI will stop after a set number of
  47 *  packets to give time to other tasks, but will eventually
  48 *  process all the packets).  The data arrives inside a
  49 *  pre-allocated skb, and so after the skb is passed up to the
  50 *  stack, a new skb must be allocated, and the address field in
  51 *  the buffer descriptor must be updated to indicate this new
  52 *  skb.
  53 *
  54 *  When the kernel requests that a packet be transmitted, the
  55 *  driver starts where it left off last time, and points the
  56 *  descriptor at the buffer which was passed in.  The driver
  57 *  then informs the DMA engine that there are packets ready to
  58 *  be transmitted.  Once the controller is finished transmitting
  59 *  the packet, an interrupt may be triggered (under the same
  60 *  conditions as for reception, but depending on the TXF bit).
  61 *  The driver then cleans up the buffer.
  62 */
  63
  64#include <linux/kernel.h>
  65#include <linux/string.h>
  66#include <linux/errno.h>
  67#include <linux/unistd.h>
  68#include <linux/slab.h>
  69#include <linux/interrupt.h>
  70#include <linux/init.h>
  71#include <linux/delay.h>
  72#include <linux/netdevice.h>
  73#include <linux/etherdevice.h>
  74#include <linux/skbuff.h>
  75#include <linux/if_vlan.h>
  76#include <linux/spinlock.h>
  77#include <linux/mm.h>
  78#include <linux/of_mdio.h>
  79#include <linux/of_platform.h>
  80#include <linux/ip.h>
  81#include <linux/tcp.h>
  82#include <linux/udp.h>
  83#include <linux/in.h>
  84
  85#include <asm/io.h>
  86#include <asm/irq.h>
  87#include <asm/uaccess.h>
  88#include <linux/module.h>
  89#include <linux/dma-mapping.h>
  90#include <linux/crc32.h>
  91#include <linux/mii.h>
  92#include <linux/phy.h>
  93#include <linux/phy_fixed.h>
  94#include <linux/of.h>
  95
  96#include "gianfar.h"
  97#include "fsl_pq_mdio.h"
  98
  99#define TX_TIMEOUT      (1*HZ)
 100#undef BRIEF_GFAR_ERRORS
 101#undef VERBOSE_GFAR_ERRORS
 102
 103const char gfar_driver_name[] = "Gianfar Ethernet";
 104const char gfar_driver_version[] = "1.3";
 105
 106static int gfar_enet_open(struct net_device *dev);
 107static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
 108static void gfar_reset_task(struct work_struct *work);
 109static void gfar_timeout(struct net_device *dev);
 110static int gfar_close(struct net_device *dev);
 111struct sk_buff *gfar_new_skb(struct net_device *dev);
 112static void gfar_new_rxbdp(struct net_device *dev, struct rxbd8 *bdp,
 113                struct sk_buff *skb);
 114static int gfar_set_mac_address(struct net_device *dev);
 115static int gfar_change_mtu(struct net_device *dev, int new_mtu);
 116static irqreturn_t gfar_error(int irq, void *dev_id);
 117static irqreturn_t gfar_transmit(int irq, void *dev_id);
 118static irqreturn_t gfar_interrupt(int irq, void *dev_id);
 119static void adjust_link(struct net_device *dev);
 120static void init_registers(struct net_device *dev);
 121static int init_phy(struct net_device *dev);
 122static int gfar_probe(struct of_device *ofdev,
 123                const struct of_device_id *match);
 124static int gfar_remove(struct of_device *ofdev);
 125static void free_skb_resources(struct gfar_private *priv);
 126static void gfar_set_multi(struct net_device *dev);
 127static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
 128static void gfar_configure_serdes(struct net_device *dev);
 129static int gfar_poll(struct napi_struct *napi, int budget);
 130#ifdef CONFIG_NET_POLL_CONTROLLER
 131static void gfar_netpoll(struct net_device *dev);
 132#endif
 133int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit);
 134static int gfar_clean_tx_ring(struct net_device *dev);
 135static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
 136                              int amount_pull);
 137static void gfar_vlan_rx_register(struct net_device *netdev,
 138                                struct vlan_group *grp);
 139void gfar_halt(struct net_device *dev);
 140static void gfar_halt_nodisable(struct net_device *dev);
 141void gfar_start(struct net_device *dev);
 142static void gfar_clear_exact_match(struct net_device *dev);
 143static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr);
 144static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 145
 146MODULE_AUTHOR("Freescale Semiconductor, Inc");
 147MODULE_DESCRIPTION("Gianfar Ethernet Driver");
 148MODULE_LICENSE("GPL");
 149
 150static const struct net_device_ops gfar_netdev_ops = {
 151        .ndo_open = gfar_enet_open,
 152        .ndo_start_xmit = gfar_start_xmit,
 153        .ndo_stop = gfar_close,
 154        .ndo_change_mtu = gfar_change_mtu,
 155        .ndo_set_multicast_list = gfar_set_multi,
 156        .ndo_tx_timeout = gfar_timeout,
 157        .ndo_do_ioctl = gfar_ioctl,
 158        .ndo_vlan_rx_register = gfar_vlan_rx_register,
 159        .ndo_set_mac_address = eth_mac_addr,
 160        .ndo_validate_addr = eth_validate_addr,
 161#ifdef CONFIG_NET_POLL_CONTROLLER
 162        .ndo_poll_controller = gfar_netpoll,
 163#endif
 164};
 165
 166/* Returns 1 if incoming frames use an FCB */
 167static inline int gfar_uses_fcb(struct gfar_private *priv)
 168{
 169        return priv->vlgrp || priv->rx_csum_enable;
 170}
 171
 172static int gfar_of_init(struct net_device *dev)
 173{
 174        const char *model;
 175        const char *ctype;
 176        const void *mac_addr;
 177        u64 addr, size;
 178        int err = 0;
 179        struct gfar_private *priv = netdev_priv(dev);
 180        struct device_node *np = priv->node;
 181        const u32 *stash;
 182        const u32 *stash_len;
 183        const u32 *stash_idx;
 184
 185        if (!np || !of_device_is_available(np))
 186                return -ENODEV;
 187
 188        /* get a pointer to the register memory */
 189        addr = of_translate_address(np, of_get_address(np, 0, &size, NULL));
 190        priv->regs = ioremap(addr, size);
 191
 192        if (priv->regs == NULL)
 193                return -ENOMEM;
 194
 195        priv->interruptTransmit = irq_of_parse_and_map(np, 0);
 196
 197        model = of_get_property(np, "model", NULL);
 198
 199        /* If we aren't the FEC we have multiple interrupts */
 200        if (model && strcasecmp(model, "FEC")) {
 201                priv->interruptReceive = irq_of_parse_and_map(np, 1);
 202
 203                priv->interruptError = irq_of_parse_and_map(np, 2);
 204
 205                if (priv->interruptTransmit < 0 ||
 206                                priv->interruptReceive < 0 ||
 207                                priv->interruptError < 0) {
 208                        err = -EINVAL;
 209                        goto err_out;
 210                }
 211        }
 212
 213        stash = of_get_property(np, "bd-stash", NULL);
 214
 215        if(stash) {
 216                priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
 217                priv->bd_stash_en = 1;
 218        }
 219
 220        stash_len = of_get_property(np, "rx-stash-len", NULL);
 221
 222        if (stash_len)
 223                priv->rx_stash_size = *stash_len;
 224
 225        stash_idx = of_get_property(np, "rx-stash-idx", NULL);
 226
 227        if (stash_idx)
 228                priv->rx_stash_index = *stash_idx;
 229
 230        if (stash_len || stash_idx)
 231                priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
 232
 233        mac_addr = of_get_mac_address(np);
 234        if (mac_addr)
 235                memcpy(dev->dev_addr, mac_addr, MAC_ADDR_LEN);
 236
 237        if (model && !strcasecmp(model, "TSEC"))
 238                priv->device_flags =
 239                        FSL_GIANFAR_DEV_HAS_GIGABIT |
 240                        FSL_GIANFAR_DEV_HAS_COALESCE |
 241                        FSL_GIANFAR_DEV_HAS_RMON |
 242                        FSL_GIANFAR_DEV_HAS_MULTI_INTR;
 243        if (model && !strcasecmp(model, "eTSEC"))
 244                priv->device_flags =
 245                        FSL_GIANFAR_DEV_HAS_GIGABIT |
 246                        FSL_GIANFAR_DEV_HAS_COALESCE |
 247                        FSL_GIANFAR_DEV_HAS_RMON |
 248                        FSL_GIANFAR_DEV_HAS_MULTI_INTR |
 249                        FSL_GIANFAR_DEV_HAS_PADDING |
 250                        FSL_GIANFAR_DEV_HAS_CSUM |
 251                        FSL_GIANFAR_DEV_HAS_VLAN |
 252                        FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
 253                        FSL_GIANFAR_DEV_HAS_EXTENDED_HASH;
 254
 255        ctype = of_get_property(np, "phy-connection-type", NULL);
 256
 257        /* We only care about rgmii-id.  The rest are autodetected */
 258        if (ctype && !strcmp(ctype, "rgmii-id"))
 259                priv->interface = PHY_INTERFACE_MODE_RGMII_ID;
 260        else
 261                priv->interface = PHY_INTERFACE_MODE_MII;
 262
 263        if (of_get_property(np, "fsl,magic-packet", NULL))
 264                priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
 265
 266        priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
 267
 268        /* Find the TBI PHY.  If it's not there, we don't support SGMII */
 269        priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
 270
 271        return 0;
 272
 273err_out:
 274        iounmap(priv->regs);
 275        return err;
 276}
 277
 278/* Ioctl MII Interface */
 279static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 280{
 281        struct gfar_private *priv = netdev_priv(dev);
 282
 283        if (!netif_running(dev))
 284                return -EINVAL;
 285
 286        if (!priv->phydev)
 287                return -ENODEV;
 288
 289        return phy_mii_ioctl(priv->phydev, if_mii(rq), cmd);
 290}
 291
 292/* Set up the ethernet device structure, private data,
 293 * and anything else we need before we start */
 294static int gfar_probe(struct of_device *ofdev,
 295                const struct of_device_id *match)
 296{
 297        u32 tempval;
 298        struct net_device *dev = NULL;
 299        struct gfar_private *priv = NULL;
 300        int err = 0;
 301        int len_devname;
 302
 303        /* Create an ethernet device instance */
 304        dev = alloc_etherdev(sizeof (*priv));
 305
 306        if (NULL == dev)
 307                return -ENOMEM;
 308
 309        priv = netdev_priv(dev);
 310        priv->ndev = dev;
 311        priv->ofdev = ofdev;
 312        priv->node = ofdev->node;
 313        SET_NETDEV_DEV(dev, &ofdev->dev);
 314
 315        err = gfar_of_init(dev);
 316
 317        if (err)
 318                goto regs_fail;
 319
 320        spin_lock_init(&priv->txlock);
 321        spin_lock_init(&priv->rxlock);
 322        spin_lock_init(&priv->bflock);
 323        INIT_WORK(&priv->reset_task, gfar_reset_task);
 324
 325        dev_set_drvdata(&ofdev->dev, priv);
 326
 327        /* Stop the DMA engine now, in case it was running before */
 328        /* (The firmware could have used it, and left it running). */
 329        gfar_halt(dev);
 330
 331        /* Reset MAC layer */
 332        gfar_write(&priv->regs->maccfg1, MACCFG1_SOFT_RESET);
 333
 334        /* We need to delay at least 3 TX clocks */
 335        udelay(2);
 336
 337        tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
 338        gfar_write(&priv->regs->maccfg1, tempval);
 339
 340        /* Initialize MACCFG2. */
 341        gfar_write(&priv->regs->maccfg2, MACCFG2_INIT_SETTINGS);
 342
 343        /* Initialize ECNTRL */
 344        gfar_write(&priv->regs->ecntrl, ECNTRL_INIT_SETTINGS);
 345
 346        /* Set the dev->base_addr to the gfar reg region */
 347        dev->base_addr = (unsigned long) (priv->regs);
 348
 349        SET_NETDEV_DEV(dev, &ofdev->dev);
 350
 351        /* Fill in the dev structure */
 352        dev->watchdog_timeo = TX_TIMEOUT;
 353        netif_napi_add(dev, &priv->napi, gfar_poll, GFAR_DEV_WEIGHT);
 354        dev->mtu = 1500;
 355
 356        dev->netdev_ops = &gfar_netdev_ops;
 357        dev->ethtool_ops = &gfar_ethtool_ops;
 358
 359        if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
 360                priv->rx_csum_enable = 1;
 361                dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA;
 362        } else
 363                priv->rx_csum_enable = 0;
 364
 365        priv->vlgrp = NULL;
 366
 367        if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN)
 368                dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
 369
 370        if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
 371                priv->extended_hash = 1;
 372                priv->hash_width = 9;
 373
 374                priv->hash_regs[0] = &priv->regs->igaddr0;
 375                priv->hash_regs[1] = &priv->regs->igaddr1;
 376                priv->hash_regs[2] = &priv->regs->igaddr2;
 377                priv->hash_regs[3] = &priv->regs->igaddr3;
 378                priv->hash_regs[4] = &priv->regs->igaddr4;
 379                priv->hash_regs[5] = &priv->regs->igaddr5;
 380                priv->hash_regs[6] = &priv->regs->igaddr6;
 381                priv->hash_regs[7] = &priv->regs->igaddr7;
 382                priv->hash_regs[8] = &priv->regs->gaddr0;
 383                priv->hash_regs[9] = &priv->regs->gaddr1;
 384                priv->hash_regs[10] = &priv->regs->gaddr2;
 385                priv->hash_regs[11] = &priv->regs->gaddr3;
 386                priv->hash_regs[12] = &priv->regs->gaddr4;
 387                priv->hash_regs[13] = &priv->regs->gaddr5;
 388                priv->hash_regs[14] = &priv->regs->gaddr6;
 389                priv->hash_regs[15] = &priv->regs->gaddr7;
 390
 391        } else {
 392                priv->extended_hash = 0;
 393                priv->hash_width = 8;
 394
 395                priv->hash_regs[0] = &priv->regs->gaddr0;
 396                priv->hash_regs[1] = &priv->regs->gaddr1;
 397                priv->hash_regs[2] = &priv->regs->gaddr2;
 398                priv->hash_regs[3] = &priv->regs->gaddr3;
 399                priv->hash_regs[4] = &priv->regs->gaddr4;
 400                priv->hash_regs[5] = &priv->regs->gaddr5;
 401                priv->hash_regs[6] = &priv->regs->gaddr6;
 402                priv->hash_regs[7] = &priv->regs->gaddr7;
 403        }
 404
 405        if (priv->device_flags & FSL_GIANFAR_DEV_HAS_PADDING)
 406                priv->padding = DEFAULT_PADDING;
 407        else
 408                priv->padding = 0;
 409
 410        if (dev->features & NETIF_F_IP_CSUM)
 411                dev->hard_header_len += GMAC_FCB_LEN;
 412
 413        priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
 414        priv->tx_ring_size = DEFAULT_TX_RING_SIZE;
 415        priv->rx_ring_size = DEFAULT_RX_RING_SIZE;
 416        priv->num_txbdfree = DEFAULT_TX_RING_SIZE;
 417
 418        priv->txcoalescing = DEFAULT_TX_COALESCE;
 419        priv->txic = DEFAULT_TXIC;
 420        priv->rxcoalescing = DEFAULT_RX_COALESCE;
 421        priv->rxic = DEFAULT_RXIC;
 422
 423        /* Enable most messages by default */
 424        priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
 425
 426        /* Carrier starts down, phylib will bring it up */
 427        netif_carrier_off(dev);
 428
 429        err = register_netdev(dev);
 430
 431        if (err) {
 432                printk(KERN_ERR "%s: Cannot register net device, aborting.\n",
 433                                dev->name);
 434                goto register_fail;
 435        }
 436
 437        device_init_wakeup(&dev->dev,
 438                priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
 439
 440        /* fill out IRQ number and name fields */
 441        len_devname = strlen(dev->name);
 442        strncpy(&priv->int_name_tx[0], dev->name, len_devname);
 443        if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
 444                strncpy(&priv->int_name_tx[len_devname],
 445                        "_tx", sizeof("_tx") + 1);
 446
 447                strncpy(&priv->int_name_rx[0], dev->name, len_devname);
 448                strncpy(&priv->int_name_rx[len_devname],
 449                        "_rx", sizeof("_rx") + 1);
 450
 451                strncpy(&priv->int_name_er[0], dev->name, len_devname);
 452                strncpy(&priv->int_name_er[len_devname],
 453                        "_er", sizeof("_er") + 1);
 454        } else
 455                priv->int_name_tx[len_devname] = '\0';
 456
 457        /* Create all the sysfs files */
 458        gfar_init_sysfs(dev);
 459
 460        /* Print out the device info */
 461        printk(KERN_INFO DEVICE_NAME "%pM\n", dev->name, dev->dev_addr);
 462
 463        /* Even more device info helps when determining which kernel */
 464        /* provided which set of benchmarks. */
 465        printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name);
 466        printk(KERN_INFO "%s: %d/%d RX/TX BD ring size\n",
 467               dev->name, priv->rx_ring_size, priv->tx_ring_size);
 468
 469        return 0;
 470
 471register_fail:
 472        iounmap(priv->regs);
 473regs_fail:
 474        if (priv->phy_node)
 475                of_node_put(priv->phy_node);
 476        if (priv->tbi_node)
 477                of_node_put(priv->tbi_node);
 478        free_netdev(dev);
 479        return err;
 480}
 481
 482static int gfar_remove(struct of_device *ofdev)
 483{
 484        struct gfar_private *priv = dev_get_drvdata(&ofdev->dev);
 485
 486        if (priv->phy_node)
 487                of_node_put(priv->phy_node);
 488        if (priv->tbi_node)
 489                of_node_put(priv->tbi_node);
 490
 491        dev_set_drvdata(&ofdev->dev, NULL);
 492
 493        unregister_netdev(priv->ndev);
 494        iounmap(priv->regs);
 495        free_netdev(priv->ndev);
 496
 497        return 0;
 498}
 499
 500#ifdef CONFIG_PM
 501static int gfar_suspend(struct of_device *ofdev, pm_message_t state)
 502{
 503        struct gfar_private *priv = dev_get_drvdata(&ofdev->dev);
 504        struct net_device *dev = priv->ndev;
 505        unsigned long flags;
 506        u32 tempval;
 507
 508        int magic_packet = priv->wol_en &&
 509                (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
 510
 511        netif_device_detach(dev);
 512
 513        if (netif_running(dev)) {
 514                spin_lock_irqsave(&priv->txlock, flags);
 515                spin_lock(&priv->rxlock);
 516
 517                gfar_halt_nodisable(dev);
 518
 519                /* Disable Tx, and Rx if wake-on-LAN is disabled. */
 520                tempval = gfar_read(&priv->regs->maccfg1);
 521
 522                tempval &= ~MACCFG1_TX_EN;
 523
 524                if (!magic_packet)
 525                        tempval &= ~MACCFG1_RX_EN;
 526
 527                gfar_write(&priv->regs->maccfg1, tempval);
 528
 529                spin_unlock(&priv->rxlock);
 530                spin_unlock_irqrestore(&priv->txlock, flags);
 531
 532                napi_disable(&priv->napi);
 533
 534                if (magic_packet) {
 535                        /* Enable interrupt on Magic Packet */
 536                        gfar_write(&priv->regs->imask, IMASK_MAG);
 537
 538                        /* Enable Magic Packet mode */
 539                        tempval = gfar_read(&priv->regs->maccfg2);
 540                        tempval |= MACCFG2_MPEN;
 541                        gfar_write(&priv->regs->maccfg2, tempval);
 542                } else {
 543                        phy_stop(priv->phydev);
 544                }
 545        }
 546
 547        return 0;
 548}
 549
 550static int gfar_resume(struct of_device *ofdev)
 551{
 552        struct gfar_private *priv = dev_get_drvdata(&ofdev->dev);
 553        struct net_device *dev = priv->ndev;
 554        unsigned long flags;
 555        u32 tempval;
 556        int magic_packet = priv->wol_en &&
 557                (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
 558
 559        if (!netif_running(dev)) {
 560                netif_device_attach(dev);
 561                return 0;
 562        }
 563
 564        if (!magic_packet && priv->phydev)
 565                phy_start(priv->phydev);
 566
 567        /* Disable Magic Packet mode, in case something
 568         * else woke us up.
 569         */
 570
 571        spin_lock_irqsave(&priv->txlock, flags);
 572        spin_lock(&priv->rxlock);
 573
 574        tempval = gfar_read(&priv->regs->maccfg2);
 575        tempval &= ~MACCFG2_MPEN;
 576        gfar_write(&priv->regs->maccfg2, tempval);
 577
 578        gfar_start(dev);
 579
 580        spin_unlock(&priv->rxlock);
 581        spin_unlock_irqrestore(&priv->txlock, flags);
 582
 583        netif_device_attach(dev);
 584
 585        napi_enable(&priv->napi);
 586
 587        return 0;
 588}
 589#else
 590#define gfar_suspend NULL
 591#define gfar_resume NULL
 592#endif
 593
 594/* Reads the controller's registers to determine what interface
 595 * connects it to the PHY.
 596 */
 597static phy_interface_t gfar_get_interface(struct net_device *dev)
 598{
 599        struct gfar_private *priv = netdev_priv(dev);
 600        u32 ecntrl = gfar_read(&priv->regs->ecntrl);
 601
 602        if (ecntrl & ECNTRL_SGMII_MODE)
 603                return PHY_INTERFACE_MODE_SGMII;
 604
 605        if (ecntrl & ECNTRL_TBI_MODE) {
 606                if (ecntrl & ECNTRL_REDUCED_MODE)
 607                        return PHY_INTERFACE_MODE_RTBI;
 608                else
 609                        return PHY_INTERFACE_MODE_TBI;
 610        }
 611
 612        if (ecntrl & ECNTRL_REDUCED_MODE) {
 613                if (ecntrl & ECNTRL_REDUCED_MII_MODE)
 614                        return PHY_INTERFACE_MODE_RMII;
 615                else {
 616                        phy_interface_t interface = priv->interface;
 617
 618                        /*
 619                         * This isn't autodetected right now, so it must
 620                         * be set by the device tree or platform code.
 621                         */
 622                        if (interface == PHY_INTERFACE_MODE_RGMII_ID)
 623                                return PHY_INTERFACE_MODE_RGMII_ID;
 624
 625                        return PHY_INTERFACE_MODE_RGMII;
 626                }
 627        }
 628
 629        if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
 630                return PHY_INTERFACE_MODE_GMII;
 631
 632        return PHY_INTERFACE_MODE_MII;
 633}
 634
 635
 636/* Initializes driver's PHY state, and attaches to the PHY.
 637 * Returns 0 on success.
 638 */
 639static int init_phy(struct net_device *dev)
 640{
 641        struct gfar_private *priv = netdev_priv(dev);
 642        uint gigabit_support =
 643                priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
 644                SUPPORTED_1000baseT_Full : 0;
 645        phy_interface_t interface;
 646
 647        priv->oldlink = 0;
 648        priv->oldspeed = 0;
 649        priv->oldduplex = -1;
 650
 651        interface = gfar_get_interface(dev);
 652
 653        priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
 654                                      interface);
 655        if (!priv->phydev)
 656                priv->phydev = of_phy_connect_fixed_link(dev, &adjust_link,
 657                                                         interface);
 658        if (!priv->phydev) {
 659                dev_err(&dev->dev, "could not attach to PHY\n");
 660                return -ENODEV;
 661        }
 662
 663        if (interface == PHY_INTERFACE_MODE_SGMII)
 664                gfar_configure_serdes(dev);
 665
 666        /* Remove any features not supported by the controller */
 667        priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
 668        priv->phydev->advertising = priv->phydev->supported;
 669
 670        return 0;
 671}
 672
 673/*
 674 * Initialize TBI PHY interface for communicating with the
 675 * SERDES lynx PHY on the chip.  We communicate with this PHY
 676 * through the MDIO bus on each controller, treating it as a
 677 * "normal" PHY at the address found in the TBIPA register.  We assume
 678 * that the TBIPA register is valid.  Either the MDIO bus code will set
 679 * it to a value that doesn't conflict with other PHYs on the bus, or the
 680 * value doesn't matter, as there are no other PHYs on the bus.
 681 */
 682static void gfar_configure_serdes(struct net_device *dev)
 683{
 684        struct gfar_private *priv = netdev_priv(dev);
 685        struct phy_device *tbiphy;
 686
 687        if (!priv->tbi_node) {
 688                dev_warn(&dev->dev, "error: SGMII mode requires that the "
 689                                    "device tree specify a tbi-handle\n");
 690                return;
 691        }
 692
 693        tbiphy = of_phy_find_device(priv->tbi_node);
 694        if (!tbiphy) {
 695                dev_err(&dev->dev, "error: Could not get TBI device\n");
 696                return;
 697        }
 698
 699        /*
 700         * If the link is already up, we must already be ok, and don't need to
 701         * configure and reset the TBI<->SerDes link.  Maybe U-Boot configured
 702         * everything for us?  Resetting it takes the link down and requires
 703         * several seconds for it to come back.
 704         */
 705        if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS)
 706                return;
 707
 708        /* Single clk mode, mii mode off(for serdes communication) */
 709        phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
 710
 711        phy_write(tbiphy, MII_ADVERTISE,
 712                        ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
 713                        ADVERTISE_1000XPSE_ASYM);
 714
 715        phy_write(tbiphy, MII_BMCR, BMCR_ANENABLE |
 716                        BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000);
 717}
 718
 719static void init_registers(struct net_device *dev)
 720{
 721        struct gfar_private *priv = netdev_priv(dev);
 722
 723        /* Clear IEVENT */
 724        gfar_write(&priv->regs->ievent, IEVENT_INIT_CLEAR);
 725
 726        /* Initialize IMASK */
 727        gfar_write(&priv->regs->imask, IMASK_INIT_CLEAR);
 728
 729        /* Init hash registers to zero */
 730        gfar_write(&priv->regs->igaddr0, 0);
 731        gfar_write(&priv->regs->igaddr1, 0);
 732        gfar_write(&priv->regs->igaddr2, 0);
 733        gfar_write(&priv->regs->igaddr3, 0);
 734        gfar_write(&priv->regs->igaddr4, 0);
 735        gfar_write(&priv->regs->igaddr5, 0);
 736        gfar_write(&priv->regs->igaddr6, 0);
 737        gfar_write(&priv->regs->igaddr7, 0);
 738
 739        gfar_write(&priv->regs->gaddr0, 0);
 740        gfar_write(&priv->regs->gaddr1, 0);
 741        gfar_write(&priv->regs->gaddr2, 0);
 742        gfar_write(&priv->regs->gaddr3, 0);
 743        gfar_write(&priv->regs->gaddr4, 0);
 744        gfar_write(&priv->regs->gaddr5, 0);
 745        gfar_write(&priv->regs->gaddr6, 0);
 746        gfar_write(&priv->regs->gaddr7, 0);
 747
 748        /* Zero out the rmon mib registers if it has them */
 749        if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
 750                memset_io(&(priv->regs->rmon), 0, sizeof (struct rmon_mib));
 751
 752                /* Mask off the CAM interrupts */
 753                gfar_write(&priv->regs->rmon.cam1, 0xffffffff);
 754                gfar_write(&priv->regs->rmon.cam2, 0xffffffff);
 755        }
 756
 757        /* Initialize the max receive buffer length */
 758        gfar_write(&priv->regs->mrblr, priv->rx_buffer_size);
 759
 760        /* Initialize the Minimum Frame Length Register */
 761        gfar_write(&priv->regs->minflr, MINFLR_INIT_SETTINGS);
 762}
 763
 764
 765/* Halt the receive and transmit queues */
 766static void gfar_halt_nodisable(struct net_device *dev)
 767{
 768        struct gfar_private *priv = netdev_priv(dev);
 769        struct gfar __iomem *regs = priv->regs;
 770        u32 tempval;
 771
 772        /* Mask all interrupts */
 773        gfar_write(&regs->imask, IMASK_INIT_CLEAR);
 774
 775        /* Clear all interrupts */
 776        gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
 777
 778        /* Stop the DMA, and wait for it to stop */
 779        tempval = gfar_read(&priv->regs->dmactrl);
 780        if ((tempval & (DMACTRL_GRS | DMACTRL_GTS))
 781            != (DMACTRL_GRS | DMACTRL_GTS)) {
 782                tempval |= (DMACTRL_GRS | DMACTRL_GTS);
 783                gfar_write(&priv->regs->dmactrl, tempval);
 784
 785                while (!(gfar_read(&priv->regs->ievent) &
 786                         (IEVENT_GRSC | IEVENT_GTSC)))
 787                        cpu_relax();
 788        }
 789}
 790
 791/* Halt the receive and transmit queues */
 792void gfar_halt(struct net_device *dev)
 793{
 794        struct gfar_private *priv = netdev_priv(dev);
 795        struct gfar __iomem *regs = priv->regs;
 796        u32 tempval;
 797
 798        gfar_halt_nodisable(dev);
 799
 800        /* Disable Rx and Tx */
 801        tempval = gfar_read(&regs->maccfg1);
 802        tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
 803        gfar_write(&regs->maccfg1, tempval);
 804}
 805
 806void stop_gfar(struct net_device *dev)
 807{
 808        struct gfar_private *priv = netdev_priv(dev);
 809        struct gfar __iomem *regs = priv->regs;
 810        unsigned long flags;
 811
 812        phy_stop(priv->phydev);
 813
 814        /* Lock it down */
 815        spin_lock_irqsave(&priv->txlock, flags);
 816        spin_lock(&priv->rxlock);
 817
 818        gfar_halt(dev);
 819
 820        spin_unlock(&priv->rxlock);
 821        spin_unlock_irqrestore(&priv->txlock, flags);
 822
 823        /* Free the IRQs */
 824        if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
 825                free_irq(priv->interruptError, dev);
 826                free_irq(priv->interruptTransmit, dev);
 827                free_irq(priv->interruptReceive, dev);
 828        } else {
 829                free_irq(priv->interruptTransmit, dev);
 830        }
 831
 832        free_skb_resources(priv);
 833
 834        dma_free_coherent(&priv->ofdev->dev,
 835                        sizeof(struct txbd8)*priv->tx_ring_size
 836                        + sizeof(struct rxbd8)*priv->rx_ring_size,
 837                        priv->tx_bd_base,
 838                        gfar_read(&regs->tbase0));
 839}
 840
 841/* If there are any tx skbs or rx skbs still around, free them.
 842 * Then free tx_skbuff and rx_skbuff */
 843static void free_skb_resources(struct gfar_private *priv)
 844{
 845        struct rxbd8 *rxbdp;
 846        struct txbd8 *txbdp;
 847        int i, j;
 848
 849        /* Go through all the buffer descriptors and free their data buffers */
 850        txbdp = priv->tx_bd_base;
 851
 852        for (i = 0; i < priv->tx_ring_size; i++) {
 853                if (!priv->tx_skbuff[i])
 854                        continue;
 855
 856                dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr,
 857                                txbdp->length, DMA_TO_DEVICE);
 858                txbdp->lstatus = 0;
 859                for (j = 0; j < skb_shinfo(priv->tx_skbuff[i])->nr_frags; j++) {
 860                        txbdp++;
 861                        dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr,
 862                                        txbdp->length, DMA_TO_DEVICE);
 863                }
 864                txbdp++;
 865                dev_kfree_skb_any(priv->tx_skbuff[i]);
 866                priv->tx_skbuff[i] = NULL;
 867        }
 868
 869        kfree(priv->tx_skbuff);
 870
 871        rxbdp = priv->rx_bd_base;
 872
 873        /* rx_skbuff is not guaranteed to be allocated, so only
 874         * free it and its contents if it is allocated */
 875        if(priv->rx_skbuff != NULL) {
 876                for (i = 0; i < priv->rx_ring_size; i++) {
 877                        if (priv->rx_skbuff[i]) {
 878                                dma_unmap_single(&priv->ofdev->dev, rxbdp->bufPtr,
 879                                                priv->rx_buffer_size,
 880                                                DMA_FROM_DEVICE);
 881
 882                                dev_kfree_skb_any(priv->rx_skbuff[i]);
 883                                priv->rx_skbuff[i] = NULL;
 884                        }
 885
 886                        rxbdp->lstatus = 0;
 887                        rxbdp->bufPtr = 0;
 888
 889                        rxbdp++;
 890                }
 891
 892                kfree(priv->rx_skbuff);
 893        }
 894}
 895
 896void gfar_start(struct net_device *dev)
 897{
 898        struct gfar_private *priv = netdev_priv(dev);
 899        struct gfar __iomem *regs = priv->regs;
 900        u32 tempval;
 901
 902        /* Enable Rx and Tx in MACCFG1 */
 903        tempval = gfar_read(&regs->maccfg1);
 904        tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
 905        gfar_write(&regs->maccfg1, tempval);
 906
 907        /* Initialize DMACTRL to have WWR and WOP */
 908        tempval = gfar_read(&priv->regs->dmactrl);
 909        tempval |= DMACTRL_INIT_SETTINGS;
 910        gfar_write(&priv->regs->dmactrl, tempval);
 911
 912        /* Make sure we aren't stopped */
 913        tempval = gfar_read(&priv->regs->dmactrl);
 914        tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
 915        gfar_write(&priv->regs->dmactrl, tempval);
 916
 917        /* Clear THLT/RHLT, so that the DMA starts polling now */
 918        gfar_write(&regs->tstat, TSTAT_CLEAR_THALT);
 919        gfar_write(&regs->rstat, RSTAT_CLEAR_RHALT);
 920
 921        /* Unmask the interrupts we look for */
 922        gfar_write(&regs->imask, IMASK_DEFAULT);
 923
 924        dev->trans_start = jiffies;
 925}
 926
 927/* Bring the controller up and running */
 928int startup_gfar(struct net_device *dev)
 929{
 930        struct txbd8 *txbdp;
 931        struct rxbd8 *rxbdp;
 932        dma_addr_t addr = 0;
 933        unsigned long vaddr;
 934        int i;
 935        struct gfar_private *priv = netdev_priv(dev);
 936        struct gfar __iomem *regs = priv->regs;
 937        int err = 0;
 938        u32 rctrl = 0;
 939        u32 tctrl = 0;
 940        u32 attrs = 0;
 941
 942        gfar_write(&regs->imask, IMASK_INIT_CLEAR);
 943
 944        /* Allocate memory for the buffer descriptors */
 945        vaddr = (unsigned long) dma_alloc_coherent(&priv->ofdev->dev,
 946                        sizeof (struct txbd8) * priv->tx_ring_size +
 947                        sizeof (struct rxbd8) * priv->rx_ring_size,
 948                        &addr, GFP_KERNEL);
 949
 950        if (vaddr == 0) {
 951                if (netif_msg_ifup(priv))
 952                        printk(KERN_ERR "%s: Could not allocate buffer descriptors!\n",
 953                                        dev->name);
 954                return -ENOMEM;
 955        }
 956
 957        priv->tx_bd_base = (struct txbd8 *) vaddr;
 958
 959        /* enet DMA only understands physical addresses */
 960        gfar_write(&regs->tbase0, addr);
 961
 962        /* Start the rx descriptor ring where the tx ring leaves off */
 963        addr = addr + sizeof (struct txbd8) * priv->tx_ring_size;
 964        vaddr = vaddr + sizeof (struct txbd8) * priv->tx_ring_size;
 965        priv->rx_bd_base = (struct rxbd8 *) vaddr;
 966        gfar_write(&regs->rbase0, addr);
 967
 968        /* Setup the skbuff rings */
 969        priv->tx_skbuff =
 970            (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) *
 971                                        priv->tx_ring_size, GFP_KERNEL);
 972
 973        if (NULL == priv->tx_skbuff) {
 974                if (netif_msg_ifup(priv))
 975                        printk(KERN_ERR "%s: Could not allocate tx_skbuff\n",
 976                                        dev->name);
 977                err = -ENOMEM;
 978                goto tx_skb_fail;
 979        }
 980
 981        for (i = 0; i < priv->tx_ring_size; i++)
 982                priv->tx_skbuff[i] = NULL;
 983
 984        priv->rx_skbuff =
 985            (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) *
 986                                        priv->rx_ring_size, GFP_KERNEL);
 987
 988        if (NULL == priv->rx_skbuff) {
 989                if (netif_msg_ifup(priv))
 990                        printk(KERN_ERR "%s: Could not allocate rx_skbuff\n",
 991                                        dev->name);
 992                err = -ENOMEM;
 993                goto rx_skb_fail;
 994        }
 995
 996        for (i = 0; i < priv->rx_ring_size; i++)
 997                priv->rx_skbuff[i] = NULL;
 998
 999        /* Initialize some variables in our dev structure */
1000        priv->num_txbdfree = priv->tx_ring_size;
1001        priv->dirty_tx = priv->cur_tx = priv->tx_bd_base;
1002        priv->cur_rx = priv->rx_bd_base;
1003        priv->skb_curtx = priv->skb_dirtytx = 0;
1004        priv->skb_currx = 0;
1005
1006        /* Initialize Transmit Descriptor Ring */
1007        txbdp = priv->tx_bd_base;
1008        for (i = 0; i < priv->tx_ring_size; i++) {
1009                txbdp->lstatus = 0;
1010                txbdp->bufPtr = 0;
1011                txbdp++;
1012        }
1013
1014        /* Set the last descriptor in the ring to indicate wrap */
1015        txbdp--;
1016        txbdp->status |= TXBD_WRAP;
1017
1018        rxbdp = priv->rx_bd_base;
1019        for (i = 0; i < priv->rx_ring_size; i++) {
1020                struct sk_buff *skb;
1021
1022                skb = gfar_new_skb(dev);
1023
1024                if (!skb) {
1025                        printk(KERN_ERR "%s: Can't allocate RX buffers\n",
1026                                        dev->name);
1027
1028                        goto err_rxalloc_fail;
1029                }
1030
1031                priv->rx_skbuff[i] = skb;
1032
1033                gfar_new_rxbdp(dev, rxbdp, skb);
1034
1035                rxbdp++;
1036        }
1037
1038        /* Set the last descriptor in the ring to wrap */
1039        rxbdp--;
1040        rxbdp->status |= RXBD_WRAP;
1041
1042        /* If the device has multiple interrupts, register for
1043         * them.  Otherwise, only register for the one */
1044        if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1045                /* Install our interrupt handlers for Error,
1046                 * Transmit, and Receive */
1047                if (request_irq(priv->interruptError, gfar_error,
1048                                0, priv->int_name_er, dev) < 0) {
1049                        if (netif_msg_intr(priv))
1050                                printk(KERN_ERR "%s: Can't get IRQ %d\n",
1051                                        dev->name, priv->interruptError);
1052
1053                        err = -1;
1054                        goto err_irq_fail;
1055                }
1056
1057                if (request_irq(priv->interruptTransmit, gfar_transmit,
1058                                0, priv->int_name_tx, dev) < 0) {
1059                        if (netif_msg_intr(priv))
1060                                printk(KERN_ERR "%s: Can't get IRQ %d\n",
1061                                        dev->name, priv->interruptTransmit);
1062
1063                        err = -1;
1064
1065                        goto tx_irq_fail;
1066                }
1067
1068                if (request_irq(priv->interruptReceive, gfar_receive,
1069                                0, priv->int_name_rx, dev) < 0) {
1070                        if (netif_msg_intr(priv))
1071                                printk(KERN_ERR "%s: Can't get IRQ %d (receive0)\n",
1072                                                dev->name, priv->interruptReceive);
1073
1074                        err = -1;
1075                        goto rx_irq_fail;
1076                }
1077        } else {
1078                if (request_irq(priv->interruptTransmit, gfar_interrupt,
1079                                0, priv->int_name_tx, dev) < 0) {
1080                        if (netif_msg_intr(priv))
1081                                printk(KERN_ERR "%s: Can't get IRQ %d\n",
1082                                        dev->name, priv->interruptTransmit);
1083
1084                        err = -1;
1085                        goto err_irq_fail;
1086                }
1087        }
1088
1089        phy_start(priv->phydev);
1090
1091        /* Configure the coalescing support */
1092        gfar_write(&regs->txic, 0);
1093        if (priv->txcoalescing)
1094                gfar_write(&regs->txic, priv->txic);
1095
1096        gfar_write(&regs->rxic, 0);
1097        if (priv->rxcoalescing)
1098                gfar_write(&regs->rxic, priv->rxic);
1099
1100        if (priv->rx_csum_enable)
1101                rctrl |= RCTRL_CHECKSUMMING;
1102
1103        if (priv->extended_hash) {
1104                rctrl |= RCTRL_EXTHASH;
1105
1106                gfar_clear_exact_match(dev);
1107                rctrl |= RCTRL_EMEN;
1108        }
1109
1110        if (priv->padding) {
1111                rctrl &= ~RCTRL_PAL_MASK;
1112                rctrl |= RCTRL_PADDING(priv->padding);
1113        }
1114
1115        /* keep vlan related bits if it's enabled */
1116        if (priv->vlgrp) {
1117                rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
1118                tctrl |= TCTRL_VLINS;
1119        }
1120
1121        /* Init rctrl based on our settings */
1122        gfar_write(&priv->regs->rctrl, rctrl);
1123
1124        if (dev->features & NETIF_F_IP_CSUM)
1125                tctrl |= TCTRL_INIT_CSUM;
1126
1127        gfar_write(&priv->regs->tctrl, tctrl);
1128
1129        /* Set the extraction length and index */
1130        attrs = ATTRELI_EL(priv->rx_stash_size) |
1131                ATTRELI_EI(priv->rx_stash_index);
1132
1133        gfar_write(&priv->regs->attreli, attrs);
1134
1135        /* Start with defaults, and add stashing or locking
1136         * depending on the approprate variables */
1137        attrs = ATTR_INIT_SETTINGS;
1138
1139        if (priv->bd_stash_en)
1140                attrs |= ATTR_BDSTASH;
1141
1142        if (priv->rx_stash_size != 0)
1143                attrs |= ATTR_BUFSTASH;
1144
1145        gfar_write(&priv->regs->attr, attrs);
1146
1147        gfar_write(&priv->regs->fifo_tx_thr, priv->fifo_threshold);
1148        gfar_write(&priv->regs->fifo_tx_starve, priv->fifo_starve);
1149        gfar_write(&priv->regs->fifo_tx_starve_shutoff, priv->fifo_starve_off);
1150
1151        /* Start the controller */
1152        gfar_start(dev);
1153
1154        return 0;
1155
1156rx_irq_fail:
1157        free_irq(priv->interruptTransmit, dev);
1158tx_irq_fail:
1159        free_irq(priv->interruptError, dev);
1160err_irq_fail:
1161err_rxalloc_fail:
1162rx_skb_fail:
1163        free_skb_resources(priv);
1164tx_skb_fail:
1165        dma_free_coherent(&priv->ofdev->dev,
1166                        sizeof(struct txbd8)*priv->tx_ring_size
1167                        + sizeof(struct rxbd8)*priv->rx_ring_size,
1168                        priv->tx_bd_base,
1169                        gfar_read(&regs->tbase0));
1170
1171        return err;
1172}
1173
1174/* Called when something needs to use the ethernet device */
1175/* Returns 0 for success. */
1176static int gfar_enet_open(struct net_device *dev)
1177{
1178        struct gfar_private *priv = netdev_priv(dev);
1179        int err;
1180
1181        napi_enable(&priv->napi);
1182
1183        skb_queue_head_init(&priv->rx_recycle);
1184
1185        /* Initialize a bunch of registers */
1186        init_registers(dev);
1187
1188        gfar_set_mac_address(dev);
1189
1190        err = init_phy(dev);
1191
1192        if(err) {
1193                napi_disable(&priv->napi);
1194                return err;
1195        }
1196
1197        err = startup_gfar(dev);
1198        if (err) {
1199                napi_disable(&priv->napi);
1200                return err;
1201        }
1202
1203        netif_start_queue(dev);
1204
1205        device_set_wakeup_enable(&dev->dev, priv->wol_en);
1206
1207        return err;
1208}
1209
1210static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
1211{
1212        struct txfcb *fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN);
1213
1214        memset(fcb, 0, GMAC_FCB_LEN);
1215
1216        return fcb;
1217}
1218
1219static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb)
1220{
1221        u8 flags = 0;
1222
1223        /* If we're here, it's a IP packet with a TCP or UDP
1224         * payload.  We set it to checksum, using a pseudo-header
1225         * we provide
1226         */
1227        flags = TXFCB_DEFAULT;
1228
1229        /* Tell the controller what the protocol is */
1230        /* And provide the already calculated phcs */
1231        if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
1232                flags |= TXFCB_UDP;
1233                fcb->phcs = udp_hdr(skb)->check;
1234        } else
1235                fcb->phcs = tcp_hdr(skb)->check;
1236
1237        /* l3os is the distance between the start of the
1238         * frame (skb->data) and the start of the IP hdr.
1239         * l4os is the distance between the start of the
1240         * l3 hdr and the l4 hdr */
1241        fcb->l3os = (u16)(skb_network_offset(skb) - GMAC_FCB_LEN);
1242        fcb->l4os = skb_network_header_len(skb);
1243
1244        fcb->flags = flags;
1245}
1246
1247void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
1248{
1249        fcb->flags |= TXFCB_VLN;
1250        fcb->vlctl = vlan_tx_tag_get(skb);
1251}
1252
1253static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
1254                               struct txbd8 *base, int ring_size)
1255{
1256        struct txbd8 *new_bd = bdp + stride;
1257
1258        return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd;
1259}
1260
1261static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
1262                int ring_size)
1263{
1264        return skip_txbd(bdp, 1, base, ring_size);
1265}
1266
1267/* This is called by the kernel when a frame is ready for transmission. */
1268/* It is pointed to by the dev->hard_start_xmit function pointer */
1269static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1270{
1271        struct gfar_private *priv = netdev_priv(dev);
1272        struct txfcb *fcb = NULL;
1273        struct txbd8 *txbdp, *txbdp_start, *base;
1274        u32 lstatus;
1275        int i;
1276        u32 bufaddr;
1277        unsigned long flags;
1278        unsigned int nr_frags, length;
1279
1280        base = priv->tx_bd_base;
1281
1282        /* make space for additional header when fcb is needed */
1283        if (((skb->ip_summed == CHECKSUM_PARTIAL) ||
1284                        (priv->vlgrp && vlan_tx_tag_present(skb))) &&
1285                        (skb_headroom(skb) < GMAC_FCB_LEN)) {
1286                struct sk_buff *skb_new;
1287
1288                skb_new = skb_realloc_headroom(skb, GMAC_FCB_LEN);
1289                if (!skb_new) {
1290                        dev->stats.tx_errors++;
1291                        kfree_skb(skb);
1292                        return NETDEV_TX_OK;
1293                }
1294                kfree_skb(skb);
1295                skb = skb_new;
1296        }
1297
1298        /* total number of fragments in the SKB */
1299        nr_frags = skb_shinfo(skb)->nr_frags;
1300
1301        spin_lock_irqsave(&priv->txlock, flags);
1302
1303        /* check if there is space to queue this packet */
1304        if ((nr_frags+1) > priv->num_txbdfree) {
1305                /* no space, stop the queue */
1306                netif_stop_queue(dev);
1307                dev->stats.tx_fifo_errors++;
1308                spin_unlock_irqrestore(&priv->txlock, flags);
1309                return NETDEV_TX_BUSY;
1310        }
1311
1312        /* Update transmit stats */
1313        dev->stats.tx_bytes += skb->len;
1314
1315        txbdp = txbdp_start = priv->cur_tx;
1316
1317        if (nr_frags == 0) {
1318                lstatus = txbdp->lstatus | BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
1319        } else {
1320                /* Place the fragment addresses and lengths into the TxBDs */
1321                for (i = 0; i < nr_frags; i++) {
1322                        /* Point at the next BD, wrapping as needed */
1323                        txbdp = next_txbd(txbdp, base, priv->tx_ring_size);
1324
1325                        length = skb_shinfo(skb)->frags[i].size;
1326
1327                        lstatus = txbdp->lstatus | length |
1328                                BD_LFLAG(TXBD_READY);
1329
1330                        /* Handle the last BD specially */
1331                        if (i == nr_frags - 1)
1332                                lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
1333
1334                        bufaddr = dma_map_page(&priv->ofdev->dev,
1335                                        skb_shinfo(skb)->frags[i].page,
1336                                        skb_shinfo(skb)->frags[i].page_offset,
1337                                        length,
1338                                        DMA_TO_DEVICE);
1339
1340                        /* set the TxBD length and buffer pointer */
1341                        txbdp->bufPtr = bufaddr;
1342                        txbdp->lstatus = lstatus;
1343                }
1344
1345                lstatus = txbdp_start->lstatus;
1346        }
1347
1348        /* Set up checksumming */
1349        if (CHECKSUM_PARTIAL == skb->ip_summed) {
1350                fcb = gfar_add_fcb(skb);
1351                lstatus |= BD_LFLAG(TXBD_TOE);
1352                gfar_tx_checksum(skb, fcb);
1353        }
1354
1355        if (priv->vlgrp && vlan_tx_tag_present(skb)) {
1356                if (unlikely(NULL == fcb)) {
1357                        fcb = gfar_add_fcb(skb);
1358                        lstatus |= BD_LFLAG(TXBD_TOE);
1359                }
1360
1361                gfar_tx_vlan(skb, fcb);
1362        }
1363
1364        /* setup the TxBD length and buffer pointer for the first BD */
1365        priv->tx_skbuff[priv->skb_curtx] = skb;
1366        txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data,
1367                        skb_headlen(skb), DMA_TO_DEVICE);
1368
1369        lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
1370
1371        /*
1372         * The powerpc-specific eieio() is used, as wmb() has too strong
1373         * semantics (it requires synchronization between cacheable and
1374         * uncacheable mappings, which eieio doesn't provide and which we
1375         * don't need), thus requiring a more expensive sync instruction.  At
1376         * some point, the set of architecture-independent barrier functions
1377         * should be expanded to include weaker barriers.
1378         */
1379        eieio();
1380
1381        txbdp_start->lstatus = lstatus;
1382
1383        /* Update the current skb pointer to the next entry we will use
1384         * (wrapping if necessary) */
1385        priv->skb_curtx = (priv->skb_curtx + 1) &
1386                TX_RING_MOD_MASK(priv->tx_ring_size);
1387
1388        priv->cur_tx = next_txbd(txbdp, base, priv->tx_ring_size);
1389
1390        /* reduce TxBD free count */
1391        priv->num_txbdfree -= (nr_frags + 1);
1392
1393        dev->trans_start = jiffies;
1394
1395        /* If the next BD still needs to be cleaned up, then the bds
1396           are full.  We need to tell the kernel to stop sending us stuff. */
1397        if (!priv->num_txbdfree) {
1398                netif_stop_queue(dev);
1399
1400                dev->stats.tx_fifo_errors++;
1401        }
1402
1403        /* Tell the DMA to go go go */
1404        gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT);
1405
1406        /* Unlock priv */
1407        spin_unlock_irqrestore(&priv->txlock, flags);
1408
1409        return NETDEV_TX_OK;
1410}
1411
1412/* Stops the kernel queue, and halts the controller */
1413static int gfar_close(struct net_device *dev)
1414{
1415        struct gfar_private *priv = netdev_priv(dev);
1416
1417        napi_disable(&priv->napi);
1418
1419        skb_queue_purge(&priv->rx_recycle);
1420        cancel_work_sync(&priv->reset_task);
1421        stop_gfar(dev);
1422
1423        /* Disconnect from the PHY */
1424        phy_disconnect(priv->phydev);
1425        priv->phydev = NULL;
1426
1427        netif_stop_queue(dev);
1428
1429        return 0;
1430}
1431
1432/* Changes the mac address if the controller is not running. */
1433static int gfar_set_mac_address(struct net_device *dev)
1434{
1435        gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
1436
1437        return 0;
1438}
1439
1440
1441/* Enables and disables VLAN insertion/extraction */
1442static void gfar_vlan_rx_register(struct net_device *dev,
1443                struct vlan_group *grp)
1444{
1445        struct gfar_private *priv = netdev_priv(dev);
1446        unsigned long flags;
1447        u32 tempval;
1448
1449        spin_lock_irqsave(&priv->rxlock, flags);
1450
1451        priv->vlgrp = grp;
1452
1453        if (grp) {
1454                /* Enable VLAN tag insertion */
1455                tempval = gfar_read(&priv->regs->tctrl);
1456                tempval |= TCTRL_VLINS;
1457
1458                gfar_write(&priv->regs->tctrl, tempval);
1459
1460                /* Enable VLAN tag extraction */
1461                tempval = gfar_read(&priv->regs->rctrl);
1462                tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT);
1463                gfar_write(&priv->regs->rctrl, tempval);
1464        } else {
1465                /* Disable VLAN tag insertion */
1466                tempval = gfar_read(&priv->regs->tctrl);
1467                tempval &= ~TCTRL_VLINS;
1468                gfar_write(&priv->regs->tctrl, tempval);
1469
1470                /* Disable VLAN tag extraction */
1471                tempval = gfar_read(&priv->regs->rctrl);
1472                tempval &= ~RCTRL_VLEX;
1473                /* If parse is no longer required, then disable parser */
1474                if (tempval & RCTRL_REQ_PARSER)
1475                        tempval |= RCTRL_PRSDEP_INIT;
1476                else
1477                        tempval &= ~RCTRL_PRSDEP_INIT;
1478                gfar_write(&priv->regs->rctrl, tempval);
1479        }
1480
1481        gfar_change_mtu(dev, dev->mtu);
1482
1483        spin_unlock_irqrestore(&priv->rxlock, flags);
1484}
1485
1486static int gfar_change_mtu(struct net_device *dev, int new_mtu)
1487{
1488        int tempsize, tempval;
1489        struct gfar_private *priv = netdev_priv(dev);
1490        int oldsize = priv->rx_buffer_size;
1491        int frame_size = new_mtu + ETH_HLEN;
1492
1493        if (priv->vlgrp)
1494                frame_size += VLAN_HLEN;
1495
1496        if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
1497                if (netif_msg_drv(priv))
1498                        printk(KERN_ERR "%s: Invalid MTU setting\n",
1499                                        dev->name);
1500                return -EINVAL;
1501        }
1502
1503        if (gfar_uses_fcb(priv))
1504                frame_size += GMAC_FCB_LEN;
1505
1506        frame_size += priv->padding;
1507
1508        tempsize =
1509            (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
1510            INCREMENTAL_BUFFER_SIZE;
1511
1512        /* Only stop and start the controller if it isn't already
1513         * stopped, and we changed something */
1514        if ((oldsize != tempsize) && (dev->flags & IFF_UP))
1515                stop_gfar(dev);
1516
1517        priv->rx_buffer_size = tempsize;
1518
1519        dev->mtu = new_mtu;
1520
1521        gfar_write(&priv->regs->mrblr, priv->rx_buffer_size);
1522        gfar_write(&priv->regs->maxfrm, priv->rx_buffer_size);
1523
1524        /* If the mtu is larger than the max size for standard
1525         * ethernet frames (ie, a jumbo frame), then set maccfg2
1526         * to allow huge frames, and to check the length */
1527        tempval = gfar_read(&priv->regs->maccfg2);
1528
1529        if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE)
1530                tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
1531        else
1532                tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
1533
1534        gfar_write(&priv->regs->maccfg2, tempval);
1535
1536        if ((oldsize != tempsize) && (dev->flags & IFF_UP))
1537                startup_gfar(dev);
1538
1539        return 0;
1540}
1541
1542/* gfar_reset_task gets scheduled when a packet has not been
1543 * transmitted after a set amount of time.
1544 * For now, assume that clearing out all the structures, and
1545 * starting over will fix the problem.
1546 */
1547static void gfar_reset_task(struct work_struct *work)
1548{
1549        struct gfar_private *priv = container_of(work, struct gfar_private,
1550                        reset_task);
1551        struct net_device *dev = priv->ndev;
1552
1553        if (dev->flags & IFF_UP) {
1554                netif_stop_queue(dev);
1555                stop_gfar(dev);
1556                startup_gfar(dev);
1557                netif_start_queue(dev);
1558        }
1559
1560        netif_tx_schedule_all(dev);
1561}
1562
1563static void gfar_timeout(struct net_device *dev)
1564{
1565        struct gfar_private *priv = netdev_priv(dev);
1566
1567        dev->stats.tx_errors++;
1568        schedule_work(&priv->reset_task);
1569}
1570
1571/* Interrupt Handler for Transmit complete */
1572static int gfar_clean_tx_ring(struct net_device *dev)
1573{
1574        struct gfar_private *priv = netdev_priv(dev);
1575        struct txbd8 *bdp;
1576        struct txbd8 *lbdp = NULL;
1577        struct txbd8 *base = priv->tx_bd_base;
1578        struct sk_buff *skb;
1579        int skb_dirtytx;
1580        int tx_ring_size = priv->tx_ring_size;
1581        int frags = 0;
1582        int i;
1583        int howmany = 0;
1584        u32 lstatus;
1585
1586        bdp = priv->dirty_tx;
1587        skb_dirtytx = priv->skb_dirtytx;
1588
1589        while ((skb = priv->tx_skbuff[skb_dirtytx])) {
1590                frags = skb_shinfo(skb)->nr_frags;
1591                lbdp = skip_txbd(bdp, frags, base, tx_ring_size);
1592
1593                lstatus = lbdp->lstatus;
1594
1595                /* Only clean completed frames */
1596                if ((lstatus & BD_LFLAG(TXBD_READY)) &&
1597                                (lstatus & BD_LENGTH_MASK))
1598                        break;
1599
1600                dma_unmap_single(&priv->ofdev->dev,
1601                                bdp->bufPtr,
1602                                bdp->length,
1603                                DMA_TO_DEVICE);
1604
1605                bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
1606                bdp = next_txbd(bdp, base, tx_ring_size);
1607
1608                for (i = 0; i < frags; i++) {
1609                        dma_unmap_page(&priv->ofdev->dev,
1610                                        bdp->bufPtr,
1611                                        bdp->length,
1612                                        DMA_TO_DEVICE);
1613                        bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
1614                        bdp = next_txbd(bdp, base, tx_ring_size);
1615                }
1616
1617                /*
1618                 * If there's room in the queue (limit it to rx_buffer_size)
1619                 * we add this skb back into the pool, if it's the right size
1620                 */
1621                if (skb_queue_len(&priv->rx_recycle) < priv->rx_ring_size &&
1622                                skb_recycle_check(skb, priv->rx_buffer_size +
1623                                        RXBUF_ALIGNMENT))
1624                        __skb_queue_head(&priv->rx_recycle, skb);
1625                else
1626                        dev_kfree_skb_any(skb);
1627
1628                priv->tx_skbuff[skb_dirtytx] = NULL;
1629
1630                skb_dirtytx = (skb_dirtytx + 1) &
1631                        TX_RING_MOD_MASK(tx_ring_size);
1632
1633                howmany++;
1634                priv->num_txbdfree += frags + 1;
1635        }
1636
1637        /* If we freed a buffer, we can restart transmission, if necessary */
1638        if (netif_queue_stopped(dev) && priv->num_txbdfree)
1639                netif_wake_queue(dev);
1640
1641        /* Update dirty indicators */
1642        priv->skb_dirtytx = skb_dirtytx;
1643        priv->dirty_tx = bdp;
1644
1645        dev->stats.tx_packets += howmany;
1646
1647        return howmany;
1648}
1649
1650static void gfar_schedule_cleanup(struct net_device *dev)
1651{
1652        struct gfar_private *priv = netdev_priv(dev);
1653        unsigned long flags;
1654
1655        spin_lock_irqsave(&priv->txlock, flags);
1656        spin_lock(&priv->rxlock);
1657
1658        if (napi_schedule_prep(&priv->napi)) {
1659                gfar_write(&priv->regs->imask, IMASK_RTX_DISABLED);
1660                __napi_schedule(&priv->napi);
1661        } else {
1662                /*
1663                 * Clear IEVENT, so interrupts aren't called again
1664                 * because of the packets that have already arrived.
1665                 */
1666                gfar_write(&priv->regs->ievent, IEVENT_RTX_MASK);
1667        }
1668
1669        spin_unlock(&priv->rxlock);
1670        spin_unlock_irqrestore(&priv->txlock, flags);
1671}
1672
1673/* Interrupt Handler for Transmit complete */
1674static irqreturn_t gfar_transmit(int irq, void *dev_id)
1675{
1676        gfar_schedule_cleanup((struct net_device *)dev_id);
1677        return IRQ_HANDLED;
1678}
1679
1680static void gfar_new_rxbdp(struct net_device *dev, struct rxbd8 *bdp,
1681                struct sk_buff *skb)
1682{
1683        struct gfar_private *priv = netdev_priv(dev);
1684        u32 lstatus;
1685
1686        bdp->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data,
1687                        priv->rx_buffer_size, DMA_FROM_DEVICE);
1688
1689        lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
1690
1691        if (bdp == priv->rx_bd_base + priv->rx_ring_size - 1)
1692                lstatus |= BD_LFLAG(RXBD_WRAP);
1693
1694        eieio();
1695
1696        bdp->lstatus = lstatus;
1697}
1698
1699
1700struct sk_buff * gfar_new_skb(struct net_device *dev)
1701{
1702        unsigned int alignamount;
1703        struct gfar_private *priv = netdev_priv(dev);
1704        struct sk_buff *skb = NULL;
1705
1706        skb = __skb_dequeue(&priv->rx_recycle);
1707        if (!skb)
1708                skb = netdev_alloc_skb(dev,
1709                                priv->rx_buffer_size + RXBUF_ALIGNMENT);
1710
1711        if (!skb)
1712                return NULL;
1713
1714        alignamount = RXBUF_ALIGNMENT -
1715                (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1));
1716
1717        /* We need the data buffer to be aligned properly.  We will reserve
1718         * as many bytes as needed to align the data properly
1719         */
1720        skb_reserve(skb, alignamount);
1721
1722        return skb;
1723}
1724
1725static inline void count_errors(unsigned short status, struct net_device *dev)
1726{
1727        struct gfar_private *priv = netdev_priv(dev);
1728        struct net_device_stats *stats = &dev->stats;
1729        struct gfar_extra_stats *estats = &priv->extra_stats;
1730
1731        /* If the packet was truncated, none of the other errors
1732         * matter */
1733        if (status & RXBD_TRUNCATED) {
1734                stats->rx_length_errors++;
1735
1736                estats->rx_trunc++;
1737
1738                return;
1739        }
1740        /* Count the errors, if there were any */
1741        if (status & (RXBD_LARGE | RXBD_SHORT)) {
1742                stats->rx_length_errors++;
1743
1744                if (status & RXBD_LARGE)
1745                        estats->rx_large++;
1746                else
1747                        estats->rx_short++;
1748        }
1749        if (status & RXBD_NONOCTET) {
1750                stats->rx_frame_errors++;
1751                estats->rx_nonoctet++;
1752        }
1753        if (status & RXBD_CRCERR) {
1754                estats->rx_crcerr++;
1755                stats->rx_crc_errors++;
1756        }
1757        if (status & RXBD_OVERRUN) {
1758                estats->rx_overrun++;
1759                stats->rx_crc_errors++;
1760        }
1761}
1762
1763irqreturn_t gfar_receive(int irq, void *dev_id)
1764{
1765        gfar_schedule_cleanup((struct net_device *)dev_id);
1766        return IRQ_HANDLED;
1767}
1768
1769static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
1770{
1771        /* If valid headers were found, and valid sums
1772         * were verified, then we tell the kernel that no
1773         * checksumming is necessary.  Otherwise, it is */
1774        if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU))
1775                skb->ip_summed = CHECKSUM_UNNECESSARY;
1776        else
1777                skb->ip_summed = CHECKSUM_NONE;
1778}
1779
1780
1781/* gfar_process_frame() -- handle one incoming packet if skb
1782 * isn't NULL.  */
1783static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
1784                              int amount_pull)
1785{
1786        struct gfar_private *priv = netdev_priv(dev);
1787        struct rxfcb *fcb = NULL;
1788
1789        int ret;
1790
1791        /* fcb is at the beginning if exists */
1792        fcb = (struct rxfcb *)skb->data;
1793
1794        /* Remove the FCB from the skb */
1795        /* Remove the padded bytes, if there are any */
1796        if (amount_pull)
1797                skb_pull(skb, amount_pull);
1798
1799        if (priv->rx_csum_enable)
1800                gfar_rx_checksum(skb, fcb);
1801
1802        /* Tell the skb what kind of packet this is */
1803        skb->protocol = eth_type_trans(skb, dev);
1804
1805        /* Send the packet up the stack */
1806        if (unlikely(priv->vlgrp && (fcb->flags & RXFCB_VLN)))
1807                ret = vlan_hwaccel_receive_skb(skb, priv->vlgrp, fcb->vlctl);
1808        else
1809                ret = netif_receive_skb(skb);
1810
1811        if (NET_RX_DROP == ret)
1812                priv->extra_stats.kernel_dropped++;
1813
1814        return 0;
1815}
1816
1817/* gfar_clean_rx_ring() -- Processes each frame in the rx ring
1818 *   until the budget/quota has been reached. Returns the number
1819 *   of frames handled
1820 */
1821int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
1822{
1823        struct rxbd8 *bdp, *base;
1824        struct sk_buff *skb;
1825        int pkt_len;
1826        int amount_pull;
1827        int howmany = 0;
1828        struct gfar_private *priv = netdev_priv(dev);
1829
1830        /* Get the first full descriptor */
1831        bdp = priv->cur_rx;
1832        base = priv->rx_bd_base;
1833
1834        amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0) +
1835                priv->padding;
1836
1837        while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
1838                struct sk_buff *newskb;
1839                rmb();
1840
1841                /* Add another skb for the future */
1842                newskb = gfar_new_skb(dev);
1843
1844                skb = priv->rx_skbuff[priv->skb_currx];
1845
1846                dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
1847                                priv->rx_buffer_size, DMA_FROM_DEVICE);
1848
1849                /* We drop the frame if we failed to allocate a new buffer */
1850                if (unlikely(!newskb || !(bdp->status & RXBD_LAST) ||
1851                                 bdp->status & RXBD_ERR)) {
1852                        count_errors(bdp->status, dev);
1853
1854                        if (unlikely(!newskb))
1855                                newskb = skb;
1856                        else if (skb) {
1857                                /*
1858                                 * We need to reset ->data to what it
1859                                 * was before gfar_new_skb() re-aligned
1860                                 * it to an RXBUF_ALIGNMENT boundary
1861                                 * before we put the skb back on the
1862                                 * recycle list.
1863                                 */
1864                                skb->data = skb->head + NET_SKB_PAD;
1865                                __skb_queue_head(&priv->rx_recycle, skb);
1866                        }
1867                } else {
1868                        /* Increment the number of packets */
1869                        dev->stats.rx_packets++;
1870                        howmany++;
1871
1872                        if (likely(skb)) {
1873                                pkt_len = bdp->length - ETH_FCS_LEN;
1874                                /* Remove the FCS from the packet length */
1875                                skb_put(skb, pkt_len);
1876                                dev->stats.rx_bytes += pkt_len;
1877
1878                                if (in_irq() || irqs_disabled())
1879                                        printk("Interrupt problem!\n");
1880                                gfar_process_frame(dev, skb, amount_pull);
1881
1882                        } else {
1883                                if (netif_msg_rx_err(priv))
1884                                        printk(KERN_WARNING
1885                                               "%s: Missing skb!\n", dev->name);
1886                                dev->stats.rx_dropped++;
1887                                priv->extra_stats.rx_skbmissing++;
1888                        }
1889
1890                }
1891
1892                priv->rx_skbuff[priv->skb_currx] = newskb;
1893
1894                /* Setup the new bdp */
1895                gfar_new_rxbdp(dev, bdp, newskb);
1896
1897                /* Update to the next pointer */
1898                bdp = next_bd(bdp, base, priv->rx_ring_size);
1899
1900                /* update to point at the next skb */
1901                priv->skb_currx =
1902                    (priv->skb_currx + 1) &
1903                    RX_RING_MOD_MASK(priv->rx_ring_size);
1904        }
1905
1906        /* Update the current rxbd pointer to be the next one */
1907        priv->cur_rx = bdp;
1908
1909        return howmany;
1910}
1911
1912static int gfar_poll(struct napi_struct *napi, int budget)
1913{
1914        struct gfar_private *priv = container_of(napi, struct gfar_private, napi);
1915        struct net_device *dev = priv->ndev;
1916        int tx_cleaned = 0;
1917        int rx_cleaned = 0;
1918        unsigned long flags;
1919
1920        /* Clear IEVENT, so interrupts aren't called again
1921         * because of the packets that have already arrived */
1922        gfar_write(&priv->regs->ievent, IEVENT_RTX_MASK);
1923
1924        /* If we fail to get the lock, don't bother with the TX BDs */
1925        if (spin_trylock_irqsave(&priv->txlock, flags)) {
1926                tx_cleaned = gfar_clean_tx_ring(dev);
1927                spin_unlock_irqrestore(&priv->txlock, flags);
1928        }
1929
1930        rx_cleaned = gfar_clean_rx_ring(dev, budget);
1931
1932        if (tx_cleaned)
1933                return budget;
1934
1935        if (rx_cleaned < budget) {
1936                napi_complete(napi);
1937
1938                /* Clear the halt bit in RSTAT */
1939                gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT);
1940
1941                gfar_write(&priv->regs->imask, IMASK_DEFAULT);
1942
1943                /* If we are coalescing interrupts, update the timer */
1944                /* Otherwise, clear it */
1945                if (likely(priv->rxcoalescing)) {
1946                        gfar_write(&priv->regs->rxic, 0);
1947                        gfar_write(&priv->regs->rxic, priv->rxic);
1948                }
1949                if (likely(priv->txcoalescing)) {
1950                        gfar_write(&priv->regs->txic, 0);
1951                        gfar_write(&priv->regs->txic, priv->txic);
1952                }
1953        }
1954
1955        return rx_cleaned;
1956}
1957
1958#ifdef CONFIG_NET_POLL_CONTROLLER
1959/*
1960 * Polling 'interrupt' - used by things like netconsole to send skbs
1961 * without having to re-enable interrupts. It's not called while
1962 * the interrupt routine is executing.
1963 */
1964static void gfar_netpoll(struct net_device *dev)
1965{
1966        struct gfar_private *priv = netdev_priv(dev);
1967
1968        /* If the device has multiple interrupts, run tx/rx */
1969        if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1970                disable_irq(priv->interruptTransmit);
1971                disable_irq(priv->interruptReceive);
1972                disable_irq(priv->interruptError);
1973                gfar_interrupt(priv->interruptTransmit, dev);
1974                enable_irq(priv->interruptError);
1975                enable_irq(priv->interruptReceive);
1976                enable_irq(priv->interruptTransmit);
1977        } else {
1978                disable_irq(priv->interruptTransmit);
1979                gfar_interrupt(priv->interruptTransmit, dev);
1980                enable_irq(priv->interruptTransmit);
1981        }
1982}
1983#endif
1984
1985/* The interrupt handler for devices with one interrupt */
1986static irqreturn_t gfar_interrupt(int irq, void *dev_id)
1987{
1988        struct net_device *dev = dev_id;
1989        struct gfar_private *priv = netdev_priv(dev);
1990
1991        /* Save ievent for future reference */
1992        u32 events = gfar_read(&priv->regs->ievent);
1993
1994        /* Check for reception */
1995        if (events & IEVENT_RX_MASK)
1996                gfar_receive(irq, dev_id);
1997
1998        /* Check for transmit completion */
1999        if (events & IEVENT_TX_MASK)
2000                gfar_transmit(irq, dev_id);
2001
2002        /* Check for errors */
2003        if (events & IEVENT_ERR_MASK)
2004                gfar_error(irq, dev_id);
2005
2006        return IRQ_HANDLED;
2007}
2008
2009/* Called every time the controller might need to be made
2010 * aware of new link state.  The PHY code conveys this
2011 * information through variables in the phydev structure, and this
2012 * function converts those variables into the appropriate
2013 * register values, and can bring down the device if needed.
2014 */
2015static void adjust_link(struct net_device *dev)
2016{
2017        struct gfar_private *priv = netdev_priv(dev);
2018        struct gfar __iomem *regs = priv->regs;
2019        unsigned long flags;
2020        struct phy_device *phydev = priv->phydev;
2021        int new_state = 0;
2022
2023        spin_lock_irqsave(&priv->txlock, flags);
2024        if (phydev->link) {
2025                u32 tempval = gfar_read(&regs->maccfg2);
2026                u32 ecntrl = gfar_read(&regs->ecntrl);
2027
2028                /* Now we make sure that we can be in full duplex mode.
2029                 * If not, we operate in half-duplex mode. */
2030                if (phydev->duplex != priv->oldduplex) {
2031                        new_state = 1;
2032                        if (!(phydev->duplex))
2033                                tempval &= ~(MACCFG2_FULL_DUPLEX);
2034                        else
2035                                tempval |= MACCFG2_FULL_DUPLEX;
2036
2037                        priv->oldduplex = phydev->duplex;
2038                }
2039
2040                if (phydev->speed != priv->oldspeed) {
2041                        new_state = 1;
2042                        switch (phydev->speed) {
2043                        case 1000:
2044                                tempval =
2045                                    ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
2046
2047                                ecntrl &= ~(ECNTRL_R100);
2048                                break;
2049                        case 100:
2050                        case 10:
2051                                tempval =
2052                                    ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
2053
2054                                /* Reduced mode distinguishes
2055                                 * between 10 and 100 */
2056                                if (phydev->speed == SPEED_100)
2057                                        ecntrl |= ECNTRL_R100;
2058                                else
2059                                        ecntrl &= ~(ECNTRL_R100);
2060                                break;
2061                        default:
2062                                if (netif_msg_link(priv))
2063                                        printk(KERN_WARNING
2064                                                "%s: Ack!  Speed (%d) is not 10/100/1000!\n",
2065                                                dev->name, phydev->speed);
2066                                break;
2067                        }
2068
2069                        priv->oldspeed = phydev->speed;
2070                }
2071
2072                gfar_write(&regs->maccfg2, tempval);
2073                gfar_write(&regs->ecntrl, ecntrl);
2074
2075                if (!priv->oldlink) {
2076                        new_state = 1;
2077                        priv->oldlink = 1;
2078                }
2079        } else if (priv->oldlink) {
2080                new_state = 1;
2081                priv->oldlink = 0;
2082                priv->oldspeed = 0;
2083                priv->oldduplex = -1;
2084        }
2085
2086        if (new_state && netif_msg_link(priv))
2087                phy_print_status(phydev);
2088
2089        spin_unlock_irqrestore(&priv->txlock, flags);
2090}
2091
2092/* Update the hash table based on the current list of multicast
2093 * addresses we subscribe to.  Also, change the promiscuity of
2094 * the device based on the flags (this function is called
2095 * whenever dev->flags is changed */
2096static void gfar_set_multi(struct net_device *dev)
2097{
2098        struct dev_mc_list *mc_ptr;
2099        struct gfar_private *priv = netdev_priv(dev);
2100        struct gfar __iomem *regs = priv->regs;
2101        u32 tempval;
2102
2103        if(dev->flags & IFF_PROMISC) {
2104                /* Set RCTRL to PROM */
2105                tempval = gfar_read(&regs->rctrl);
2106                tempval |= RCTRL_PROM;
2107                gfar_write(&regs->rctrl, tempval);
2108        } else {
2109                /* Set RCTRL to not PROM */
2110                tempval = gfar_read(&regs->rctrl);
2111                tempval &= ~(RCTRL_PROM);
2112                gfar_write(&regs->rctrl, tempval);
2113        }
2114
2115        if(dev->flags & IFF_ALLMULTI) {
2116                /* Set the hash to rx all multicast frames */
2117                gfar_write(&regs->igaddr0, 0xffffffff);
2118                gfar_write(&regs->igaddr1, 0xffffffff);
2119                gfar_write(&regs->igaddr2, 0xffffffff);
2120                gfar_write(&regs->igaddr3, 0xffffffff);
2121                gfar_write(&regs->igaddr4, 0xffffffff);
2122                gfar_write(&regs->igaddr5, 0xffffffff);
2123                gfar_write(&regs->igaddr6, 0xffffffff);
2124                gfar_write(&regs->igaddr7, 0xffffffff);
2125                gfar_write(&regs->gaddr0, 0xffffffff);
2126                gfar_write(&regs->gaddr1, 0xffffffff);
2127                gfar_write(&regs->gaddr2, 0xffffffff);
2128                gfar_write(&regs->gaddr3, 0xffffffff);
2129                gfar_write(&regs->gaddr4, 0xffffffff);
2130                gfar_write(&regs->gaddr5, 0xffffffff);
2131                gfar_write(&regs->gaddr6, 0xffffffff);
2132                gfar_write(&regs->gaddr7, 0xffffffff);
2133        } else {
2134                int em_num;
2135                int idx;
2136
2137                /* zero out the hash */
2138                gfar_write(&regs->igaddr0, 0x0);
2139                gfar_write(&regs->igaddr1, 0x0);
2140                gfar_write(&regs->igaddr2, 0x0);
2141                gfar_write(&regs->igaddr3, 0x0);
2142                gfar_write(&regs->igaddr4, 0x0);
2143                gfar_write(&regs->igaddr5, 0x0);
2144                gfar_write(&regs->igaddr6, 0x0);
2145                gfar_write(&regs->igaddr7, 0x0);
2146                gfar_write(&regs->gaddr0, 0x0);
2147                gfar_write(&regs->gaddr1, 0x0);
2148                gfar_write(&regs->gaddr2, 0x0);
2149                gfar_write(&regs->gaddr3, 0x0);
2150                gfar_write(&regs->gaddr4, 0x0);
2151                gfar_write(&regs->gaddr5, 0x0);
2152                gfar_write(&regs->gaddr6, 0x0);
2153                gfar_write(&regs->gaddr7, 0x0);
2154
2155                /* If we have extended hash tables, we need to
2156                 * clear the exact match registers to prepare for
2157                 * setting them */
2158                if (priv->extended_hash) {
2159                        em_num = GFAR_EM_NUM + 1;
2160                        gfar_clear_exact_match(dev);
2161                        idx = 1;
2162                } else {
2163                        idx = 0;
2164                        em_num = 0;
2165                }
2166
2167                if(dev->mc_count == 0)
2168                        return;
2169
2170                /* Parse the list, and set the appropriate bits */
2171                for(mc_ptr = dev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) {
2172                        if (idx < em_num) {
2173                                gfar_set_mac_for_addr(dev, idx,
2174                                                mc_ptr->dmi_addr);
2175                                idx++;
2176                        } else
2177                                gfar_set_hash_for_addr(dev, mc_ptr->dmi_addr);
2178                }
2179        }
2180
2181        return;
2182}
2183
2184
2185/* Clears each of the exact match registers to zero, so they
2186 * don't interfere with normal reception */
2187static void gfar_clear_exact_match(struct net_device *dev)
2188{
2189        int idx;
2190        u8 zero_arr[MAC_ADDR_LEN] = {0,0,0,0,0,0};
2191
2192        for(idx = 1;idx < GFAR_EM_NUM + 1;idx++)
2193                gfar_set_mac_for_addr(dev, idx, (u8 *)zero_arr);
2194}
2195
2196/* Set the appropriate hash bit for the given addr */
2197/* The algorithm works like so:
2198 * 1) Take the Destination Address (ie the multicast address), and
2199 * do a CRC on it (little endian), and reverse the bits of the
2200 * result.
2201 * 2) Use the 8 most significant bits as a hash into a 256-entry
2202 * table.  The table is controlled through 8 32-bit registers:
2203 * gaddr0-7.  gaddr0's MSB is entry 0, and gaddr7's LSB is
2204 * gaddr7.  This means that the 3 most significant bits in the
2205 * hash index which gaddr register to use, and the 5 other bits
2206 * indicate which bit (assuming an IBM numbering scheme, which
2207 * for PowerPC (tm) is usually the case) in the register holds
2208 * the entry. */
2209static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
2210{
2211        u32 tempval;
2212        struct gfar_private *priv = netdev_priv(dev);
2213        u32 result = ether_crc(MAC_ADDR_LEN, addr);
2214        int width = priv->hash_width;
2215        u8 whichbit = (result >> (32 - width)) & 0x1f;
2216        u8 whichreg = result >> (32 - width + 5);
2217        u32 value = (1 << (31-whichbit));
2218
2219        tempval = gfar_read(priv->hash_regs[whichreg]);
2220        tempval |= value;
2221        gfar_write(priv->hash_regs[whichreg], tempval);
2222
2223        return;
2224}
2225
2226
2227/* There are multiple MAC Address register pairs on some controllers
2228 * This function sets the numth pair to a given address
2229 */
2230static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr)
2231{
2232        struct gfar_private *priv = netdev_priv(dev);
2233        int idx;
2234        char tmpbuf[MAC_ADDR_LEN];
2235        u32 tempval;
2236        u32 __iomem *macptr = &priv->regs->macstnaddr1;
2237
2238        macptr += num*2;
2239
2240        /* Now copy it into the mac registers backwards, cuz */
2241        /* little endian is silly */
2242        for (idx = 0; idx < MAC_ADDR_LEN; idx++)
2243                tmpbuf[MAC_ADDR_LEN - 1 - idx] = addr[idx];
2244
2245        gfar_write(macptr, *((u32 *) (tmpbuf)));
2246
2247        tempval = *((u32 *) (tmpbuf + 4));
2248
2249        gfar_write(macptr+1, tempval);
2250}
2251
2252/* GFAR error interrupt handler */
2253static irqreturn_t gfar_error(int irq, void *dev_id)
2254{
2255        struct net_device *dev = dev_id;
2256        struct gfar_private *priv = netdev_priv(dev);
2257
2258        /* Save ievent for future reference */
2259        u32 events = gfar_read(&priv->regs->ievent);
2260
2261        /* Clear IEVENT */
2262        gfar_write(&priv->regs->ievent, events & IEVENT_ERR_MASK);
2263
2264        /* Magic Packet is not an error. */
2265        if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
2266            (events & IEVENT_MAG))
2267                events &= ~IEVENT_MAG;
2268
2269        /* Hmm... */
2270        if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
2271                printk(KERN_DEBUG "%s: error interrupt (ievent=0x%08x imask=0x%08x)\n",
2272                       dev->name, events, gfar_read(&priv->regs->imask));
2273
2274        /* Update the error counters */
2275        if (events & IEVENT_TXE) {
2276                dev->stats.tx_errors++;
2277
2278                if (events & IEVENT_LC)
2279                        dev->stats.tx_window_errors++;
2280                if (events & IEVENT_CRL)
2281                        dev->stats.tx_aborted_errors++;
2282                if (events & IEVENT_XFUN) {
2283                        if (netif_msg_tx_err(priv))
2284                                printk(KERN_DEBUG "%s: TX FIFO underrun, "
2285                                       "packet dropped.\n", dev->name);
2286                        dev->stats.tx_dropped++;
2287                        priv->extra_stats.tx_underrun++;
2288
2289                        /* Reactivate the Tx Queues */
2290                        gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT);
2291                }
2292                if (netif_msg_tx_err(priv))
2293                        printk(KERN_DEBUG "%s: Transmit Error\n", dev->name);
2294        }
2295        if (events & IEVENT_BSY) {
2296                dev->stats.rx_errors++;
2297                priv->extra_stats.rx_bsy++;
2298
2299                gfar_receive(irq, dev_id);
2300
2301                if (netif_msg_rx_err(priv))
2302                        printk(KERN_DEBUG "%s: busy error (rstat: %x)\n",
2303                               dev->name, gfar_read(&priv->regs->rstat));
2304        }
2305        if (events & IEVENT_BABR) {
2306                dev->stats.rx_errors++;
2307                priv->extra_stats.rx_babr++;
2308
2309                if (netif_msg_rx_err(priv))
2310                        printk(KERN_DEBUG "%s: babbling RX error\n", dev->name);
2311        }
2312        if (events & IEVENT_EBERR) {
2313                priv->extra_stats.eberr++;
2314                if (netif_msg_rx_err(priv))
2315                        printk(KERN_DEBUG "%s: bus error\n", dev->name);
2316        }
2317        if ((events & IEVENT_RXC) && netif_msg_rx_status(priv))
2318                printk(KERN_DEBUG "%s: control frame\n", dev->name);
2319
2320        if (events & IEVENT_BABT) {
2321                priv->extra_stats.tx_babt++;
2322                if (netif_msg_tx_err(priv))
2323                        printk(KERN_DEBUG "%s: babbling TX error\n", dev->name);
2324        }
2325        return IRQ_HANDLED;
2326}
2327
2328static struct of_device_id gfar_match[] =
2329{
2330        {
2331                .type = "network",
2332                .compatible = "gianfar",
2333        },
2334        {},
2335};
2336MODULE_DEVICE_TABLE(of, gfar_match);
2337
2338/* Structure for a device driver */
2339static struct of_platform_driver gfar_driver = {
2340        .name = "fsl-gianfar",
2341        .match_table = gfar_match,
2342
2343        .probe = gfar_probe,
2344        .remove = gfar_remove,
2345        .suspend = gfar_suspend,
2346        .resume = gfar_resume,
2347};
2348
2349static int __init gfar_init(void)
2350{
2351        return of_register_platform_driver(&gfar_driver);
2352}
2353
2354static void __exit gfar_exit(void)
2355{
2356        of_unregister_platform_driver(&gfar_driver);
2357}
2358
2359module_init(gfar_init);
2360module_exit(gfar_exit);
2361
2362