linux/drivers/net/gianfar.c
<<
>>
Prefs
   1/*
   2 * drivers/net/gianfar.c
   3 *
   4 * Gianfar Ethernet Driver
   5 * This driver is designed for the non-CPM ethernet controllers
   6 * on the 85xx and 83xx family of integrated processors
   7 * Based on 8260_io/fcc_enet.c
   8 *
   9 * Author: Andy Fleming
  10 * Maintainer: Kumar Gala
  11 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
  12 *
  13 * Copyright 2002-2009, 2011 Freescale Semiconductor, Inc.
  14 * Copyright 2007 MontaVista Software, Inc.
  15 *
  16 * This program is free software; you can redistribute  it and/or modify it
  17 * under  the terms of  the GNU General  Public License as published by the
  18 * Free Software Foundation;  either version 2 of the  License, or (at your
  19 * option) any later version.
  20 *
  21 *  Gianfar:  AKA Lambda Draconis, "Dragon"
  22 *  RA 11 31 24.2
  23 *  Dec +69 19 52
  24 *  V 3.84
  25 *  B-V +1.62
  26 *
  27 *  Theory of operation
  28 *
  29 *  The driver is initialized through of_device. Configuration information
  30 *  is therefore conveyed through an OF-style device tree.
  31 *
  32 *  The Gianfar Ethernet Controller uses a ring of buffer
  33 *  descriptors.  The beginning is indicated by a register
  34 *  pointing to the physical address of the start of the ring.
  35 *  The end is determined by a "wrap" bit being set in the
  36 *  last descriptor of the ring.
  37 *
  38 *  When a packet is received, the RXF bit in the
  39 *  IEVENT register is set, triggering an interrupt when the
  40 *  corresponding bit in the IMASK register is also set (if
  41 *  interrupt coalescing is active, then the interrupt may not
  42 *  happen immediately, but will wait until either a set number
  43 *  of frames or amount of time have passed).  In NAPI, the
  44 *  interrupt handler will signal there is work to be done, and
  45 *  exit. This method will start at the last known empty
  46 *  descriptor, and process every subsequent descriptor until there
  47 *  are none left with data (NAPI will stop after a set number of
  48 *  packets to give time to other tasks, but will eventually
  49 *  process all the packets).  The data arrives inside a
  50 *  pre-allocated skb, and so after the skb is passed up to the
  51 *  stack, a new skb must be allocated, and the address field in
  52 *  the buffer descriptor must be updated to indicate this new
  53 *  skb.
  54 *
  55 *  When the kernel requests that a packet be transmitted, the
  56 *  driver starts where it left off last time, and points the
  57 *  descriptor at the buffer which was passed in.  The driver
  58 *  then informs the DMA engine that there are packets ready to
  59 *  be transmitted.  Once the controller is finished transmitting
  60 *  the packet, an interrupt may be triggered (under the same
  61 *  conditions as for reception, but depending on the TXF bit).
  62 *  The driver then cleans up the buffer.
  63 */
  64
  65#include <linux/kernel.h>
  66#include <linux/string.h>
  67#include <linux/errno.h>
  68#include <linux/unistd.h>
  69#include <linux/slab.h>
  70#include <linux/interrupt.h>
  71#include <linux/init.h>
  72#include <linux/delay.h>
  73#include <linux/netdevice.h>
  74#include <linux/etherdevice.h>
  75#include <linux/skbuff.h>
  76#include <linux/if_vlan.h>
  77#include <linux/spinlock.h>
  78#include <linux/mm.h>
  79#include <linux/of_mdio.h>
  80#include <linux/of_platform.h>
  81#include <linux/ip.h>
  82#include <linux/tcp.h>
  83#include <linux/udp.h>
  84#include <linux/in.h>
  85#include <linux/net_tstamp.h>
  86
  87#include <asm/io.h>
  88#include <asm/reg.h>
  89#include <asm/irq.h>
  90#include <asm/uaccess.h>
  91#include <linux/module.h>
  92#include <linux/dma-mapping.h>
  93#include <linux/crc32.h>
  94#include <linux/mii.h>
  95#include <linux/phy.h>
  96#include <linux/phy_fixed.h>
  97#include <linux/of.h>
  98#include <linux/of_net.h>
  99
 100#include "gianfar.h"
 101#include "fsl_pq_mdio.h"
 102
 103#define TX_TIMEOUT      (1*HZ)
 104#undef BRIEF_GFAR_ERRORS
 105#undef VERBOSE_GFAR_ERRORS
 106
 107const char gfar_driver_name[] = "Gianfar Ethernet";
 108const char gfar_driver_version[] = "1.3";
 109
 110static int gfar_enet_open(struct net_device *dev);
 111static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
 112static void gfar_reset_task(struct work_struct *work);
 113static void gfar_timeout(struct net_device *dev);
 114static int gfar_close(struct net_device *dev);
 115struct sk_buff *gfar_new_skb(struct net_device *dev);
 116static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
 117                struct sk_buff *skb);
 118static int gfar_set_mac_address(struct net_device *dev);
 119static int gfar_change_mtu(struct net_device *dev, int new_mtu);
 120static irqreturn_t gfar_error(int irq, void *dev_id);
 121static irqreturn_t gfar_transmit(int irq, void *dev_id);
 122static irqreturn_t gfar_interrupt(int irq, void *dev_id);
 123static void adjust_link(struct net_device *dev);
 124static void init_registers(struct net_device *dev);
 125static int init_phy(struct net_device *dev);
 126static int gfar_probe(struct platform_device *ofdev);
 127static int gfar_remove(struct platform_device *ofdev);
 128static void free_skb_resources(struct gfar_private *priv);
 129static void gfar_set_multi(struct net_device *dev);
 130static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
 131static void gfar_configure_serdes(struct net_device *dev);
 132static int gfar_poll(struct napi_struct *napi, int budget);
 133#ifdef CONFIG_NET_POLL_CONTROLLER
 134static void gfar_netpoll(struct net_device *dev);
 135#endif
 136int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
 137static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
 138static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
 139                              int amount_pull);
 140static void gfar_vlan_rx_register(struct net_device *netdev,
 141                                struct vlan_group *grp);
 142void gfar_halt(struct net_device *dev);
 143static void gfar_halt_nodisable(struct net_device *dev);
 144void gfar_start(struct net_device *dev);
 145static void gfar_clear_exact_match(struct net_device *dev);
 146static void gfar_set_mac_for_addr(struct net_device *dev, int num,
 147                                  const u8 *addr);
 148static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 149
 150MODULE_AUTHOR("Freescale Semiconductor, Inc");
 151MODULE_DESCRIPTION("Gianfar Ethernet Driver");
 152MODULE_LICENSE("GPL");
 153
 154static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
 155                            dma_addr_t buf)
 156{
 157        u32 lstatus;
 158
 159        bdp->bufPtr = buf;
 160
 161        lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
 162        if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
 163                lstatus |= BD_LFLAG(RXBD_WRAP);
 164
 165        eieio();
 166
 167        bdp->lstatus = lstatus;
 168}
 169
 170static int gfar_init_bds(struct net_device *ndev)
 171{
 172        struct gfar_private *priv = netdev_priv(ndev);
 173        struct gfar_priv_tx_q *tx_queue = NULL;
 174        struct gfar_priv_rx_q *rx_queue = NULL;
 175        struct txbd8 *txbdp;
 176        struct rxbd8 *rxbdp;
 177        int i, j;
 178
 179        for (i = 0; i < priv->num_tx_queues; i++) {
 180                tx_queue = priv->tx_queue[i];
 181                /* Initialize some variables in our dev structure */
 182                tx_queue->num_txbdfree = tx_queue->tx_ring_size;
 183                tx_queue->dirty_tx = tx_queue->tx_bd_base;
 184                tx_queue->cur_tx = tx_queue->tx_bd_base;
 185                tx_queue->skb_curtx = 0;
 186                tx_queue->skb_dirtytx = 0;
 187
 188                /* Initialize Transmit Descriptor Ring */
 189                txbdp = tx_queue->tx_bd_base;
 190                for (j = 0; j < tx_queue->tx_ring_size; j++) {
 191                        txbdp->lstatus = 0;
 192                        txbdp->bufPtr = 0;
 193                        txbdp++;
 194                }
 195
 196                /* Set the last descriptor in the ring to indicate wrap */
 197                txbdp--;
 198                txbdp->status |= TXBD_WRAP;
 199        }
 200
 201        for (i = 0; i < priv->num_rx_queues; i++) {
 202                rx_queue = priv->rx_queue[i];
 203                rx_queue->cur_rx = rx_queue->rx_bd_base;
 204                rx_queue->skb_currx = 0;
 205                rxbdp = rx_queue->rx_bd_base;
 206
 207                for (j = 0; j < rx_queue->rx_ring_size; j++) {
 208                        struct sk_buff *skb = rx_queue->rx_skbuff[j];
 209
 210                        if (skb) {
 211                                gfar_init_rxbdp(rx_queue, rxbdp,
 212                                                rxbdp->bufPtr);
 213                        } else {
 214                                skb = gfar_new_skb(ndev);
 215                                if (!skb) {
 216                                        pr_err("%s: Can't allocate RX buffers\n",
 217                                                        ndev->name);
 218                                        goto err_rxalloc_fail;
 219                                }
 220                                rx_queue->rx_skbuff[j] = skb;
 221
 222                                gfar_new_rxbdp(rx_queue, rxbdp, skb);
 223                        }
 224
 225                        rxbdp++;
 226                }
 227
 228        }
 229
 230        return 0;
 231
 232err_rxalloc_fail:
 233        free_skb_resources(priv);
 234        return -ENOMEM;
 235}
 236
 237static int gfar_alloc_skb_resources(struct net_device *ndev)
 238{
 239        void *vaddr;
 240        dma_addr_t addr;
 241        int i, j, k;
 242        struct gfar_private *priv = netdev_priv(ndev);
 243        struct device *dev = &priv->ofdev->dev;
 244        struct gfar_priv_tx_q *tx_queue = NULL;
 245        struct gfar_priv_rx_q *rx_queue = NULL;
 246
 247        priv->total_tx_ring_size = 0;
 248        for (i = 0; i < priv->num_tx_queues; i++)
 249                priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;
 250
 251        priv->total_rx_ring_size = 0;
 252        for (i = 0; i < priv->num_rx_queues; i++)
 253                priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
 254
 255        /* Allocate memory for the buffer descriptors */
 256        vaddr = dma_alloc_coherent(dev,
 257                        sizeof(struct txbd8) * priv->total_tx_ring_size +
 258                        sizeof(struct rxbd8) * priv->total_rx_ring_size,
 259                        &addr, GFP_KERNEL);
 260        if (!vaddr) {
 261                if (netif_msg_ifup(priv))
 262                        pr_err("%s: Could not allocate buffer descriptors!\n",
 263                               ndev->name);
 264                return -ENOMEM;
 265        }
 266
 267        for (i = 0; i < priv->num_tx_queues; i++) {
 268                tx_queue = priv->tx_queue[i];
 269                tx_queue->tx_bd_base = (struct txbd8 *) vaddr;
 270                tx_queue->tx_bd_dma_base = addr;
 271                tx_queue->dev = ndev;
 272                /* enet DMA only understands physical addresses */
 273                addr    += sizeof(struct txbd8) *tx_queue->tx_ring_size;
 274                vaddr   += sizeof(struct txbd8) *tx_queue->tx_ring_size;
 275        }
 276
 277        /* Start the rx descriptor ring where the tx ring leaves off */
 278        for (i = 0; i < priv->num_rx_queues; i++) {
 279                rx_queue = priv->rx_queue[i];
 280                rx_queue->rx_bd_base = (struct rxbd8 *) vaddr;
 281                rx_queue->rx_bd_dma_base = addr;
 282                rx_queue->dev = ndev;
 283                addr    += sizeof (struct rxbd8) * rx_queue->rx_ring_size;
 284                vaddr   += sizeof (struct rxbd8) * rx_queue->rx_ring_size;
 285        }
 286
 287        /* Setup the skbuff rings */
 288        for (i = 0; i < priv->num_tx_queues; i++) {
 289                tx_queue = priv->tx_queue[i];
 290                tx_queue->tx_skbuff = kmalloc(sizeof(*tx_queue->tx_skbuff) *
 291                                  tx_queue->tx_ring_size, GFP_KERNEL);
 292                if (!tx_queue->tx_skbuff) {
 293                        if (netif_msg_ifup(priv))
 294                                pr_err("%s: Could not allocate tx_skbuff\n",
 295                                                ndev->name);
 296                        goto cleanup;
 297                }
 298
 299                for (k = 0; k < tx_queue->tx_ring_size; k++)
 300                        tx_queue->tx_skbuff[k] = NULL;
 301        }
 302
 303        for (i = 0; i < priv->num_rx_queues; i++) {
 304                rx_queue = priv->rx_queue[i];
 305                rx_queue->rx_skbuff = kmalloc(sizeof(*rx_queue->rx_skbuff) *
 306                                  rx_queue->rx_ring_size, GFP_KERNEL);
 307
 308                if (!rx_queue->rx_skbuff) {
 309                        if (netif_msg_ifup(priv))
 310                                pr_err("%s: Could not allocate rx_skbuff\n",
 311                                       ndev->name);
 312                        goto cleanup;
 313                }
 314
 315                for (j = 0; j < rx_queue->rx_ring_size; j++)
 316                        rx_queue->rx_skbuff[j] = NULL;
 317        }
 318
 319        if (gfar_init_bds(ndev))
 320                goto cleanup;
 321
 322        return 0;
 323
 324cleanup:
 325        free_skb_resources(priv);
 326        return -ENOMEM;
 327}
 328
 329static void gfar_init_tx_rx_base(struct gfar_private *priv)
 330{
 331        struct gfar __iomem *regs = priv->gfargrp[0].regs;
 332        u32 __iomem *baddr;
 333        int i;
 334
 335        baddr = &regs->tbase0;
 336        for(i = 0; i < priv->num_tx_queues; i++) {
 337                gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
 338                baddr   += 2;
 339        }
 340
 341        baddr = &regs->rbase0;
 342        for(i = 0; i < priv->num_rx_queues; i++) {
 343                gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
 344                baddr   += 2;
 345        }
 346}
 347
 348static void gfar_init_mac(struct net_device *ndev)
 349{
 350        struct gfar_private *priv = netdev_priv(ndev);
 351        struct gfar __iomem *regs = priv->gfargrp[0].regs;
 352        u32 rctrl = 0;
 353        u32 tctrl = 0;
 354        u32 attrs = 0;
 355
 356        /* write the tx/rx base registers */
 357        gfar_init_tx_rx_base(priv);
 358
 359        /* Configure the coalescing support */
 360        gfar_configure_coalescing(priv, 0xFF, 0xFF);
 361
 362        if (priv->rx_filer_enable) {
 363                rctrl |= RCTRL_FILREN;
 364                /* Program the RIR0 reg with the required distribution */
 365                gfar_write(&regs->rir0, DEFAULT_RIR0);
 366        }
 367
 368        if (ndev->features & NETIF_F_RXCSUM)
 369                rctrl |= RCTRL_CHECKSUMMING;
 370
 371        if (priv->extended_hash) {
 372                rctrl |= RCTRL_EXTHASH;
 373
 374                gfar_clear_exact_match(ndev);
 375                rctrl |= RCTRL_EMEN;
 376        }
 377
 378        if (priv->padding) {
 379                rctrl &= ~RCTRL_PAL_MASK;
 380                rctrl |= RCTRL_PADDING(priv->padding);
 381        }
 382
 383        /* Insert receive time stamps into padding alignment bytes */
 384        if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) {
 385                rctrl &= ~RCTRL_PAL_MASK;
 386                rctrl |= RCTRL_PADDING(8);
 387                priv->padding = 8;
 388        }
 389
 390        /* Enable HW time stamping if requested from user space */
 391        if (priv->hwts_rx_en)
 392                rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;
 393
 394        /* keep vlan related bits if it's enabled */
 395        if (priv->vlgrp) {
 396                rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
 397                tctrl |= TCTRL_VLINS;
 398        }
 399
 400        /* Init rctrl based on our settings */
 401        gfar_write(&regs->rctrl, rctrl);
 402
 403        if (ndev->features & NETIF_F_IP_CSUM)
 404                tctrl |= TCTRL_INIT_CSUM;
 405
 406        tctrl |= TCTRL_TXSCHED_PRIO;
 407
 408        gfar_write(&regs->tctrl, tctrl);
 409
 410        /* Set the extraction length and index */
 411        attrs = ATTRELI_EL(priv->rx_stash_size) |
 412                ATTRELI_EI(priv->rx_stash_index);
 413
 414        gfar_write(&regs->attreli, attrs);
 415
 416        /* Start with defaults, and add stashing or locking
 417         * depending on the approprate variables */
 418        attrs = ATTR_INIT_SETTINGS;
 419
 420        if (priv->bd_stash_en)
 421                attrs |= ATTR_BDSTASH;
 422
 423        if (priv->rx_stash_size != 0)
 424                attrs |= ATTR_BUFSTASH;
 425
 426        gfar_write(&regs->attr, attrs);
 427
 428        gfar_write(&regs->fifo_tx_thr, priv->fifo_threshold);
 429        gfar_write(&regs->fifo_tx_starve, priv->fifo_starve);
 430        gfar_write(&regs->fifo_tx_starve_shutoff, priv->fifo_starve_off);
 431}
 432
 433static struct net_device_stats *gfar_get_stats(struct net_device *dev)
 434{
 435        struct gfar_private *priv = netdev_priv(dev);
 436        unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
 437        unsigned long tx_packets = 0, tx_bytes = 0;
 438        int i = 0;
 439
 440        for (i = 0; i < priv->num_rx_queues; i++) {
 441                rx_packets += priv->rx_queue[i]->stats.rx_packets;
 442                rx_bytes += priv->rx_queue[i]->stats.rx_bytes;
 443                rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
 444        }
 445
 446        dev->stats.rx_packets = rx_packets;
 447        dev->stats.rx_bytes = rx_bytes;
 448        dev->stats.rx_dropped = rx_dropped;
 449
 450        for (i = 0; i < priv->num_tx_queues; i++) {
 451                tx_bytes += priv->tx_queue[i]->stats.tx_bytes;
 452                tx_packets += priv->tx_queue[i]->stats.tx_packets;
 453        }
 454
 455        dev->stats.tx_bytes = tx_bytes;
 456        dev->stats.tx_packets = tx_packets;
 457
 458        return &dev->stats;
 459}
 460
 461static const struct net_device_ops gfar_netdev_ops = {
 462        .ndo_open = gfar_enet_open,
 463        .ndo_start_xmit = gfar_start_xmit,
 464        .ndo_stop = gfar_close,
 465        .ndo_change_mtu = gfar_change_mtu,
 466        .ndo_set_features = gfar_set_features,
 467        .ndo_set_multicast_list = gfar_set_multi,
 468        .ndo_tx_timeout = gfar_timeout,
 469        .ndo_do_ioctl = gfar_ioctl,
 470        .ndo_get_stats = gfar_get_stats,
 471        .ndo_vlan_rx_register = gfar_vlan_rx_register,
 472        .ndo_set_mac_address = eth_mac_addr,
 473        .ndo_validate_addr = eth_validate_addr,
 474#ifdef CONFIG_NET_POLL_CONTROLLER
 475        .ndo_poll_controller = gfar_netpoll,
 476#endif
 477};
 478
 479void lock_rx_qs(struct gfar_private *priv)
 480{
 481        int i = 0x0;
 482
 483        for (i = 0; i < priv->num_rx_queues; i++)
 484                spin_lock(&priv->rx_queue[i]->rxlock);
 485}
 486
 487void lock_tx_qs(struct gfar_private *priv)
 488{
 489        int i = 0x0;
 490
 491        for (i = 0; i < priv->num_tx_queues; i++)
 492                spin_lock(&priv->tx_queue[i]->txlock);
 493}
 494
 495void unlock_rx_qs(struct gfar_private *priv)
 496{
 497        int i = 0x0;
 498
 499        for (i = 0; i < priv->num_rx_queues; i++)
 500                spin_unlock(&priv->rx_queue[i]->rxlock);
 501}
 502
 503void unlock_tx_qs(struct gfar_private *priv)
 504{
 505        int i = 0x0;
 506
 507        for (i = 0; i < priv->num_tx_queues; i++)
 508                spin_unlock(&priv->tx_queue[i]->txlock);
 509}
 510
 511/* Returns 1 if incoming frames use an FCB */
 512static inline int gfar_uses_fcb(struct gfar_private *priv)
 513{
 514        return priv->vlgrp || (priv->ndev->features & NETIF_F_RXCSUM) ||
 515                (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER);
 516}
 517
 518static void free_tx_pointers(struct gfar_private *priv)
 519{
 520        int i = 0;
 521
 522        for (i = 0; i < priv->num_tx_queues; i++)
 523                kfree(priv->tx_queue[i]);
 524}
 525
 526static void free_rx_pointers(struct gfar_private *priv)
 527{
 528        int i = 0;
 529
 530        for (i = 0; i < priv->num_rx_queues; i++)
 531                kfree(priv->rx_queue[i]);
 532}
 533
 534static void unmap_group_regs(struct gfar_private *priv)
 535{
 536        int i = 0;
 537
 538        for (i = 0; i < MAXGROUPS; i++)
 539                if (priv->gfargrp[i].regs)
 540                        iounmap(priv->gfargrp[i].regs);
 541}
 542
 543static void disable_napi(struct gfar_private *priv)
 544{
 545        int i = 0;
 546
 547        for (i = 0; i < priv->num_grps; i++)
 548                napi_disable(&priv->gfargrp[i].napi);
 549}
 550
 551static void enable_napi(struct gfar_private *priv)
 552{
 553        int i = 0;
 554
 555        for (i = 0; i < priv->num_grps; i++)
 556                napi_enable(&priv->gfargrp[i].napi);
 557}
 558
 559static int gfar_parse_group(struct device_node *np,
 560                struct gfar_private *priv, const char *model)
 561{
 562        u32 *queue_mask;
 563
 564        priv->gfargrp[priv->num_grps].regs = of_iomap(np, 0);
 565        if (!priv->gfargrp[priv->num_grps].regs)
 566                return -ENOMEM;
 567
 568        priv->gfargrp[priv->num_grps].interruptTransmit =
 569                        irq_of_parse_and_map(np, 0);
 570
 571        /* If we aren't the FEC we have multiple interrupts */
 572        if (model && strcasecmp(model, "FEC")) {
 573                priv->gfargrp[priv->num_grps].interruptReceive =
 574                        irq_of_parse_and_map(np, 1);
 575                priv->gfargrp[priv->num_grps].interruptError =
 576                        irq_of_parse_and_map(np,2);
 577                if (priv->gfargrp[priv->num_grps].interruptTransmit == NO_IRQ ||
 578                    priv->gfargrp[priv->num_grps].interruptReceive  == NO_IRQ ||
 579                    priv->gfargrp[priv->num_grps].interruptError    == NO_IRQ)
 580                        return -EINVAL;
 581        }
 582
 583        priv->gfargrp[priv->num_grps].grp_id = priv->num_grps;
 584        priv->gfargrp[priv->num_grps].priv = priv;
 585        spin_lock_init(&priv->gfargrp[priv->num_grps].grplock);
 586        if(priv->mode == MQ_MG_MODE) {
 587                queue_mask = (u32 *)of_get_property(np,
 588                                        "fsl,rx-bit-map", NULL);
 589                priv->gfargrp[priv->num_grps].rx_bit_map =
 590                        queue_mask ?  *queue_mask :(DEFAULT_MAPPING >> priv->num_grps);
 591                queue_mask = (u32 *)of_get_property(np,
 592                                        "fsl,tx-bit-map", NULL);
 593                priv->gfargrp[priv->num_grps].tx_bit_map =
 594                        queue_mask ? *queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
 595        } else {
 596                priv->gfargrp[priv->num_grps].rx_bit_map = 0xFF;
 597                priv->gfargrp[priv->num_grps].tx_bit_map = 0xFF;
 598        }
 599        priv->num_grps++;
 600
 601        return 0;
 602}
 603
 604static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
 605{
 606        const char *model;
 607        const char *ctype;
 608        const void *mac_addr;
 609        int err = 0, i;
 610        struct net_device *dev = NULL;
 611        struct gfar_private *priv = NULL;
 612        struct device_node *np = ofdev->dev.of_node;
 613        struct device_node *child = NULL;
 614        const u32 *stash;
 615        const u32 *stash_len;
 616        const u32 *stash_idx;
 617        unsigned int num_tx_qs, num_rx_qs;
 618        u32 *tx_queues, *rx_queues;
 619
 620        if (!np || !of_device_is_available(np))
 621                return -ENODEV;
 622
 623        /* parse the num of tx and rx queues */
 624        tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL);
 625        num_tx_qs = tx_queues ? *tx_queues : 1;
 626
 627        if (num_tx_qs > MAX_TX_QS) {
 628                printk(KERN_ERR "num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
 629                                num_tx_qs, MAX_TX_QS);
 630                printk(KERN_ERR "Cannot do alloc_etherdev, aborting\n");
 631                return -EINVAL;
 632        }
 633
 634        rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL);
 635        num_rx_qs = rx_queues ? *rx_queues : 1;
 636
 637        if (num_rx_qs > MAX_RX_QS) {
 638                printk(KERN_ERR "num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
 639                                num_tx_qs, MAX_TX_QS);
 640                printk(KERN_ERR "Cannot do alloc_etherdev, aborting\n");
 641                return -EINVAL;
 642        }
 643
 644        *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);
 645        dev = *pdev;
 646        if (NULL == dev)
 647                return -ENOMEM;
 648
 649        priv = netdev_priv(dev);
 650        priv->node = ofdev->dev.of_node;
 651        priv->ndev = dev;
 652
 653        priv->num_tx_queues = num_tx_qs;
 654        netif_set_real_num_rx_queues(dev, num_rx_qs);
 655        priv->num_rx_queues = num_rx_qs;
 656        priv->num_grps = 0x0;
 657
 658        model = of_get_property(np, "model", NULL);
 659
 660        for (i = 0; i < MAXGROUPS; i++)
 661                priv->gfargrp[i].regs = NULL;
 662
 663        /* Parse and initialize group specific information */
 664        if (of_device_is_compatible(np, "fsl,etsec2")) {
 665                priv->mode = MQ_MG_MODE;
 666                for_each_child_of_node(np, child) {
 667                        err = gfar_parse_group(child, priv, model);
 668                        if (err)
 669                                goto err_grp_init;
 670                }
 671        } else {
 672                priv->mode = SQ_SG_MODE;
 673                err = gfar_parse_group(np, priv, model);
 674                if(err)
 675                        goto err_grp_init;
 676        }
 677
 678        for (i = 0; i < priv->num_tx_queues; i++)
 679               priv->tx_queue[i] = NULL;
 680        for (i = 0; i < priv->num_rx_queues; i++)
 681                priv->rx_queue[i] = NULL;
 682
 683        for (i = 0; i < priv->num_tx_queues; i++) {
 684                priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
 685                                            GFP_KERNEL);
 686                if (!priv->tx_queue[i]) {
 687                        err = -ENOMEM;
 688                        goto tx_alloc_failed;
 689                }
 690                priv->tx_queue[i]->tx_skbuff = NULL;
 691                priv->tx_queue[i]->qindex = i;
 692                priv->tx_queue[i]->dev = dev;
 693                spin_lock_init(&(priv->tx_queue[i]->txlock));
 694        }
 695
 696        for (i = 0; i < priv->num_rx_queues; i++) {
 697                priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
 698                                            GFP_KERNEL);
 699                if (!priv->rx_queue[i]) {
 700                        err = -ENOMEM;
 701                        goto rx_alloc_failed;
 702                }
 703                priv->rx_queue[i]->rx_skbuff = NULL;
 704                priv->rx_queue[i]->qindex = i;
 705                priv->rx_queue[i]->dev = dev;
 706                spin_lock_init(&(priv->rx_queue[i]->rxlock));
 707        }
 708
 709
 710        stash = of_get_property(np, "bd-stash", NULL);
 711
 712        if (stash) {
 713                priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
 714                priv->bd_stash_en = 1;
 715        }
 716
 717        stash_len = of_get_property(np, "rx-stash-len", NULL);
 718
 719        if (stash_len)
 720                priv->rx_stash_size = *stash_len;
 721
 722        stash_idx = of_get_property(np, "rx-stash-idx", NULL);
 723
 724        if (stash_idx)
 725                priv->rx_stash_index = *stash_idx;
 726
 727        if (stash_len || stash_idx)
 728                priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
 729
 730        mac_addr = of_get_mac_address(np);
 731        if (mac_addr)
 732                memcpy(dev->dev_addr, mac_addr, MAC_ADDR_LEN);
 733
 734        if (model && !strcasecmp(model, "TSEC"))
 735                priv->device_flags =
 736                        FSL_GIANFAR_DEV_HAS_GIGABIT |
 737                        FSL_GIANFAR_DEV_HAS_COALESCE |
 738                        FSL_GIANFAR_DEV_HAS_RMON |
 739                        FSL_GIANFAR_DEV_HAS_MULTI_INTR;
 740        if (model && !strcasecmp(model, "eTSEC"))
 741                priv->device_flags =
 742                        FSL_GIANFAR_DEV_HAS_GIGABIT |
 743                        FSL_GIANFAR_DEV_HAS_COALESCE |
 744                        FSL_GIANFAR_DEV_HAS_RMON |
 745                        FSL_GIANFAR_DEV_HAS_MULTI_INTR |
 746                        FSL_GIANFAR_DEV_HAS_PADDING |
 747                        FSL_GIANFAR_DEV_HAS_CSUM |
 748                        FSL_GIANFAR_DEV_HAS_VLAN |
 749                        FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
 750                        FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
 751                        FSL_GIANFAR_DEV_HAS_TIMER;
 752
 753        ctype = of_get_property(np, "phy-connection-type", NULL);
 754
 755        /* We only care about rgmii-id.  The rest are autodetected */
 756        if (ctype && !strcmp(ctype, "rgmii-id"))
 757                priv->interface = PHY_INTERFACE_MODE_RGMII_ID;
 758        else
 759                priv->interface = PHY_INTERFACE_MODE_MII;
 760
 761        if (of_get_property(np, "fsl,magic-packet", NULL))
 762                priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
 763
 764        priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
 765
 766        /* Find the TBI PHY.  If it's not there, we don't support SGMII */
 767        priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
 768
 769        return 0;
 770
 771rx_alloc_failed:
 772        free_rx_pointers(priv);
 773tx_alloc_failed:
 774        free_tx_pointers(priv);
 775err_grp_init:
 776        unmap_group_regs(priv);
 777        free_netdev(dev);
 778        return err;
 779}
 780
 781static int gfar_hwtstamp_ioctl(struct net_device *netdev,
 782                        struct ifreq *ifr, int cmd)
 783{
 784        struct hwtstamp_config config;
 785        struct gfar_private *priv = netdev_priv(netdev);
 786
 787        if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
 788                return -EFAULT;
 789
 790        /* reserved for future extensions */
 791        if (config.flags)
 792                return -EINVAL;
 793
 794        switch (config.tx_type) {
 795        case HWTSTAMP_TX_OFF:
 796                priv->hwts_tx_en = 0;
 797                break;
 798        case HWTSTAMP_TX_ON:
 799                if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
 800                        return -ERANGE;
 801                priv->hwts_tx_en = 1;
 802                break;
 803        default:
 804                return -ERANGE;
 805        }
 806
 807        switch (config.rx_filter) {
 808        case HWTSTAMP_FILTER_NONE:
 809                if (priv->hwts_rx_en) {
 810                        stop_gfar(netdev);
 811                        priv->hwts_rx_en = 0;
 812                        startup_gfar(netdev);
 813                }
 814                break;
 815        default:
 816                if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
 817                        return -ERANGE;
 818                if (!priv->hwts_rx_en) {
 819                        stop_gfar(netdev);
 820                        priv->hwts_rx_en = 1;
 821                        startup_gfar(netdev);
 822                }
 823                config.rx_filter = HWTSTAMP_FILTER_ALL;
 824                break;
 825        }
 826
 827        return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
 828                -EFAULT : 0;
 829}
 830
 831/* Ioctl MII Interface */
 832static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 833{
 834        struct gfar_private *priv = netdev_priv(dev);
 835
 836        if (!netif_running(dev))
 837                return -EINVAL;
 838
 839        if (cmd == SIOCSHWTSTAMP)
 840                return gfar_hwtstamp_ioctl(dev, rq, cmd);
 841
 842        if (!priv->phydev)
 843                return -ENODEV;
 844
 845        return phy_mii_ioctl(priv->phydev, rq, cmd);
 846}
 847
 848static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs)
 849{
 850        unsigned int new_bit_map = 0x0;
 851        int mask = 0x1 << (max_qs - 1), i;
 852        for (i = 0; i < max_qs; i++) {
 853                if (bit_map & mask)
 854                        new_bit_map = new_bit_map + (1 << i);
 855                mask = mask >> 0x1;
 856        }
 857        return new_bit_map;
 858}
 859
 860static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
 861                                   u32 class)
 862{
 863        u32 rqfpr = FPR_FILER_MASK;
 864        u32 rqfcr = 0x0;
 865
 866        rqfar--;
 867        rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
 868        priv->ftp_rqfpr[rqfar] = rqfpr;
 869        priv->ftp_rqfcr[rqfar] = rqfcr;
 870        gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
 871
 872        rqfar--;
 873        rqfcr = RQFCR_CMP_NOMATCH;
 874        priv->ftp_rqfpr[rqfar] = rqfpr;
 875        priv->ftp_rqfcr[rqfar] = rqfcr;
 876        gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
 877
 878        rqfar--;
 879        rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
 880        rqfpr = class;
 881        priv->ftp_rqfcr[rqfar] = rqfcr;
 882        priv->ftp_rqfpr[rqfar] = rqfpr;
 883        gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
 884
 885        rqfar--;
 886        rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
 887        rqfpr = class;
 888        priv->ftp_rqfcr[rqfar] = rqfcr;
 889        priv->ftp_rqfpr[rqfar] = rqfpr;
 890        gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
 891
 892        return rqfar;
 893}
 894
 895static void gfar_init_filer_table(struct gfar_private *priv)
 896{
 897        int i = 0x0;
 898        u32 rqfar = MAX_FILER_IDX;
 899        u32 rqfcr = 0x0;
 900        u32 rqfpr = FPR_FILER_MASK;
 901
 902        /* Default rule */
 903        rqfcr = RQFCR_CMP_MATCH;
 904        priv->ftp_rqfcr[rqfar] = rqfcr;
 905        priv->ftp_rqfpr[rqfar] = rqfpr;
 906        gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
 907
 908        rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
 909        rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP);
 910        rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP);
 911        rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4);
 912        rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP);
 913        rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP);
 914
 915        /* cur_filer_idx indicated the first non-masked rule */
 916        priv->cur_filer_idx = rqfar;
 917
 918        /* Rest are masked rules */
 919        rqfcr = RQFCR_CMP_NOMATCH;
 920        for (i = 0; i < rqfar; i++) {
 921                priv->ftp_rqfcr[i] = rqfcr;
 922                priv->ftp_rqfpr[i] = rqfpr;
 923                gfar_write_filer(priv, i, rqfcr, rqfpr);
 924        }
 925}
 926
 927static void gfar_detect_errata(struct gfar_private *priv)
 928{
 929        struct device *dev = &priv->ofdev->dev;
 930        unsigned int pvr = mfspr(SPRN_PVR);
 931        unsigned int svr = mfspr(SPRN_SVR);
 932        unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */
 933        unsigned int rev = svr & 0xffff;
 934
 935        /* MPC8313 Rev 2.0 and higher; All MPC837x */
 936        if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) ||
 937                        (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
 938                priv->errata |= GFAR_ERRATA_74;
 939
 940        /* MPC8313 and MPC837x all rev */
 941        if ((pvr == 0x80850010 && mod == 0x80b0) ||
 942                        (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
 943                priv->errata |= GFAR_ERRATA_76;
 944
 945        /* MPC8313 and MPC837x all rev */
 946        if ((pvr == 0x80850010 && mod == 0x80b0) ||
 947                        (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
 948                priv->errata |= GFAR_ERRATA_A002;
 949
 950        /* MPC8313 Rev < 2.0, MPC8548 rev 2.0 */
 951        if ((pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020) ||
 952                        (pvr == 0x80210020 && mod == 0x8030 && rev == 0x0020))
 953                priv->errata |= GFAR_ERRATA_12;
 954
 955        if (priv->errata)
 956                dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
 957                         priv->errata);
 958}
 959
 960/* Set up the ethernet device structure, private data,
 961 * and anything else we need before we start */
 962static int gfar_probe(struct platform_device *ofdev)
 963{
 964        u32 tempval;
 965        struct net_device *dev = NULL;
 966        struct gfar_private *priv = NULL;
 967        struct gfar __iomem *regs = NULL;
 968        int err = 0, i, grp_idx = 0;
 969        int len_devname;
 970        u32 rstat = 0, tstat = 0, rqueue = 0, tqueue = 0;
 971        u32 isrg = 0;
 972        u32 __iomem *baddr;
 973
 974        err = gfar_of_init(ofdev, &dev);
 975
 976        if (err)
 977                return err;
 978
 979        priv = netdev_priv(dev);
 980        priv->ndev = dev;
 981        priv->ofdev = ofdev;
 982        priv->node = ofdev->dev.of_node;
 983        SET_NETDEV_DEV(dev, &ofdev->dev);
 984
 985        spin_lock_init(&priv->bflock);
 986        INIT_WORK(&priv->reset_task, gfar_reset_task);
 987
 988        dev_set_drvdata(&ofdev->dev, priv);
 989        regs = priv->gfargrp[0].regs;
 990
 991        gfar_detect_errata(priv);
 992
 993        /* Stop the DMA engine now, in case it was running before */
 994        /* (The firmware could have used it, and left it running). */
 995        gfar_halt(dev);
 996
 997        /* Reset MAC layer */
 998        gfar_write(&regs->maccfg1, MACCFG1_SOFT_RESET);
 999
1000        /* We need to delay at least 3 TX clocks */
1001        udelay(2);
1002
1003        tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
1004        gfar_write(&regs->maccfg1, tempval);
1005
1006        /* Initialize MACCFG2. */
1007        tempval = MACCFG2_INIT_SETTINGS;
1008        if (gfar_has_errata(priv, GFAR_ERRATA_74))
1009                tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
1010        gfar_write(&regs->maccfg2, tempval);
1011
1012        /* Initialize ECNTRL */
1013        gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS);
1014
1015        /* Set the dev->base_addr to the gfar reg region */
1016        dev->base_addr = (unsigned long) regs;
1017
1018        SET_NETDEV_DEV(dev, &ofdev->dev);
1019
1020        /* Fill in the dev structure */
1021        dev->watchdog_timeo = TX_TIMEOUT;
1022        dev->mtu = 1500;
1023        dev->netdev_ops = &gfar_netdev_ops;
1024        dev->ethtool_ops = &gfar_ethtool_ops;
1025
1026        /* Register for napi ...We are registering NAPI for each grp */
1027        for (i = 0; i < priv->num_grps; i++)
1028                netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll, GFAR_DEV_WEIGHT);
1029
1030        if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
1031                dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
1032                        NETIF_F_RXCSUM;
1033                dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
1034                        NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
1035        }
1036
1037        priv->vlgrp = NULL;
1038
1039        if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN)
1040                dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1041
1042        if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
1043                priv->extended_hash = 1;
1044                priv->hash_width = 9;
1045
1046                priv->hash_regs[0] = &regs->igaddr0;
1047                priv->hash_regs[1] = &regs->igaddr1;
1048                priv->hash_regs[2] = &regs->igaddr2;
1049                priv->hash_regs[3] = &regs->igaddr3;
1050                priv->hash_regs[4] = &regs->igaddr4;
1051                priv->hash_regs[5] = &regs->igaddr5;
1052                priv->hash_regs[6] = &regs->igaddr6;
1053                priv->hash_regs[7] = &regs->igaddr7;
1054                priv->hash_regs[8] = &regs->gaddr0;
1055                priv->hash_regs[9] = &regs->gaddr1;
1056                priv->hash_regs[10] = &regs->gaddr2;
1057                priv->hash_regs[11] = &regs->gaddr3;
1058                priv->hash_regs[12] = &regs->gaddr4;
1059                priv->hash_regs[13] = &regs->gaddr5;
1060                priv->hash_regs[14] = &regs->gaddr6;
1061                priv->hash_regs[15] = &regs->gaddr7;
1062
1063        } else {
1064                priv->extended_hash = 0;
1065                priv->hash_width = 8;
1066
1067                priv->hash_regs[0] = &regs->gaddr0;
1068                priv->hash_regs[1] = &regs->gaddr1;
1069                priv->hash_regs[2] = &regs->gaddr2;
1070                priv->hash_regs[3] = &regs->gaddr3;
1071                priv->hash_regs[4] = &regs->gaddr4;
1072                priv->hash_regs[5] = &regs->gaddr5;
1073                priv->hash_regs[6] = &regs->gaddr6;
1074                priv->hash_regs[7] = &regs->gaddr7;
1075        }
1076
1077        if (priv->device_flags & FSL_GIANFAR_DEV_HAS_PADDING)
1078                priv->padding = DEFAULT_PADDING;
1079        else
1080                priv->padding = 0;
1081
1082        if (dev->features & NETIF_F_IP_CSUM ||
1083                        priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
1084                dev->hard_header_len += GMAC_FCB_LEN;
1085
1086        /* Program the isrg regs only if number of grps > 1 */
1087        if (priv->num_grps > 1) {
1088                baddr = &regs->isrg0;
1089                for (i = 0; i < priv->num_grps; i++) {
1090                        isrg |= (priv->gfargrp[i].rx_bit_map << ISRG_SHIFT_RX);
1091                        isrg |= (priv->gfargrp[i].tx_bit_map << ISRG_SHIFT_TX);
1092                        gfar_write(baddr, isrg);
1093                        baddr++;
1094                        isrg = 0x0;
1095                }
1096        }
1097
1098        /* Need to reverse the bit maps as  bit_map's MSB is q0
1099         * but, for_each_set_bit parses from right to left, which
1100         * basically reverses the queue numbers */
1101        for (i = 0; i< priv->num_grps; i++) {
1102                priv->gfargrp[i].tx_bit_map = reverse_bitmap(
1103                                priv->gfargrp[i].tx_bit_map, MAX_TX_QS);
1104                priv->gfargrp[i].rx_bit_map = reverse_bitmap(
1105                                priv->gfargrp[i].rx_bit_map, MAX_RX_QS);
1106        }
1107
1108        /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
1109         * also assign queues to groups */
1110        for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) {
1111                priv->gfargrp[grp_idx].num_rx_queues = 0x0;
1112                for_each_set_bit(i, &priv->gfargrp[grp_idx].rx_bit_map,
1113                                priv->num_rx_queues) {
1114                        priv->gfargrp[grp_idx].num_rx_queues++;
1115                        priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx];
1116                        rstat = rstat | (RSTAT_CLEAR_RHALT >> i);
1117                        rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
1118                }
1119                priv->gfargrp[grp_idx].num_tx_queues = 0x0;
1120                for_each_set_bit(i, &priv->gfargrp[grp_idx].tx_bit_map,
1121                                priv->num_tx_queues) {
1122                        priv->gfargrp[grp_idx].num_tx_queues++;
1123                        priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx];
1124                        tstat = tstat | (TSTAT_CLEAR_THALT >> i);
1125                        tqueue = tqueue | (TQUEUE_EN0 >> i);
1126                }
1127                priv->gfargrp[grp_idx].rstat = rstat;
1128                priv->gfargrp[grp_idx].tstat = tstat;
1129                rstat = tstat =0;
1130        }
1131
1132        gfar_write(&regs->rqueue, rqueue);
1133        gfar_write(&regs->tqueue, tqueue);
1134
1135        priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
1136
1137        /* Initializing some of the rx/tx queue level parameters */
1138        for (i = 0; i < priv->num_tx_queues; i++) {
1139                priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
1140                priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE;
1141                priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE;
1142                priv->tx_queue[i]->txic = DEFAULT_TXIC;
1143        }
1144
1145        for (i = 0; i < priv->num_rx_queues; i++) {
1146                priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE;
1147                priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE;
1148                priv->rx_queue[i]->rxic = DEFAULT_RXIC;
1149        }
1150
1151        /* enable filer if using multiple RX queues*/
1152        if(priv->num_rx_queues > 1)
1153                priv->rx_filer_enable = 1;
1154        /* Enable most messages by default */
1155        priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
1156
1157        /* Carrier starts down, phylib will bring it up */
1158        netif_carrier_off(dev);
1159
1160        err = register_netdev(dev);
1161
1162        if (err) {
1163                printk(KERN_ERR "%s: Cannot register net device, aborting.\n",
1164                                dev->name);
1165                goto register_fail;
1166        }
1167
1168        device_init_wakeup(&dev->dev,
1169                priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1170
1171        /* fill out IRQ number and name fields */
1172        len_devname = strlen(dev->name);
1173        for (i = 0; i < priv->num_grps; i++) {
1174                strncpy(&priv->gfargrp[i].int_name_tx[0], dev->name,
1175                                len_devname);
1176                if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1177                        strncpy(&priv->gfargrp[i].int_name_tx[len_devname],
1178                                "_g", sizeof("_g"));
1179                        priv->gfargrp[i].int_name_tx[
1180                                strlen(priv->gfargrp[i].int_name_tx)] = i+48;
1181                        strncpy(&priv->gfargrp[i].int_name_tx[strlen(
1182                                priv->gfargrp[i].int_name_tx)],
1183                                "_tx", sizeof("_tx") + 1);
1184
1185                        strncpy(&priv->gfargrp[i].int_name_rx[0], dev->name,
1186                                        len_devname);
1187                        strncpy(&priv->gfargrp[i].int_name_rx[len_devname],
1188                                        "_g", sizeof("_g"));
1189                        priv->gfargrp[i].int_name_rx[
1190                                strlen(priv->gfargrp[i].int_name_rx)] = i+48;
1191                        strncpy(&priv->gfargrp[i].int_name_rx[strlen(
1192                                priv->gfargrp[i].int_name_rx)],
1193                                "_rx", sizeof("_rx") + 1);
1194
1195                        strncpy(&priv->gfargrp[i].int_name_er[0], dev->name,
1196                                        len_devname);
1197                        strncpy(&priv->gfargrp[i].int_name_er[len_devname],
1198                                "_g", sizeof("_g"));
1199                        priv->gfargrp[i].int_name_er[strlen(
1200                                        priv->gfargrp[i].int_name_er)] = i+48;
1201                        strncpy(&priv->gfargrp[i].int_name_er[strlen(\
1202                                priv->gfargrp[i].int_name_er)],
1203                                "_er", sizeof("_er") + 1);
1204                } else
1205                        priv->gfargrp[i].int_name_tx[len_devname] = '\0';
1206        }
1207
1208        /* Initialize the filer table */
1209        gfar_init_filer_table(priv);
1210
1211        /* Create all the sysfs files */
1212        gfar_init_sysfs(dev);
1213
1214        /* Print out the device info */
1215        printk(KERN_INFO DEVICE_NAME "%pM\n", dev->name, dev->dev_addr);
1216
1217        /* Even more device info helps when determining which kernel */
1218        /* provided which set of benchmarks. */
1219        printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name);
1220        for (i = 0; i < priv->num_rx_queues; i++)
1221                printk(KERN_INFO "%s: RX BD ring size for Q[%d]: %d\n",
1222                        dev->name, i, priv->rx_queue[i]->rx_ring_size);
1223        for(i = 0; i < priv->num_tx_queues; i++)
1224                 printk(KERN_INFO "%s: TX BD ring size for Q[%d]: %d\n",
1225                        dev->name, i, priv->tx_queue[i]->tx_ring_size);
1226
1227        return 0;
1228
1229register_fail:
1230        unmap_group_regs(priv);
1231        free_tx_pointers(priv);
1232        free_rx_pointers(priv);
1233        if (priv->phy_node)
1234                of_node_put(priv->phy_node);
1235        if (priv->tbi_node)
1236                of_node_put(priv->tbi_node);
1237        free_netdev(dev);
1238        return err;
1239}
1240
1241static int gfar_remove(struct platform_device *ofdev)
1242{
1243        struct gfar_private *priv = dev_get_drvdata(&ofdev->dev);
1244
1245        if (priv->phy_node)
1246                of_node_put(priv->phy_node);
1247        if (priv->tbi_node)
1248                of_node_put(priv->tbi_node);
1249
1250        dev_set_drvdata(&ofdev->dev, NULL);
1251
1252        unregister_netdev(priv->ndev);
1253        unmap_group_regs(priv);
1254        free_netdev(priv->ndev);
1255
1256        return 0;
1257}
1258
1259#ifdef CONFIG_PM
1260
1261static int gfar_suspend(struct device *dev)
1262{
1263        struct gfar_private *priv = dev_get_drvdata(dev);
1264        struct net_device *ndev = priv->ndev;
1265        struct gfar __iomem *regs = priv->gfargrp[0].regs;
1266        unsigned long flags;
1267        u32 tempval;
1268
1269        int magic_packet = priv->wol_en &&
1270                (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1271
1272        netif_device_detach(ndev);
1273
1274        if (netif_running(ndev)) {
1275
1276                local_irq_save(flags);
1277                lock_tx_qs(priv);
1278                lock_rx_qs(priv);
1279
1280                gfar_halt_nodisable(ndev);
1281
1282                /* Disable Tx, and Rx if wake-on-LAN is disabled. */
1283                tempval = gfar_read(&regs->maccfg1);
1284
1285                tempval &= ~MACCFG1_TX_EN;
1286
1287                if (!magic_packet)
1288                        tempval &= ~MACCFG1_RX_EN;
1289
1290                gfar_write(&regs->maccfg1, tempval);
1291
1292                unlock_rx_qs(priv);
1293                unlock_tx_qs(priv);
1294                local_irq_restore(flags);
1295
1296                disable_napi(priv);
1297
1298                if (magic_packet) {
1299                        /* Enable interrupt on Magic Packet */
1300                        gfar_write(&regs->imask, IMASK_MAG);
1301
1302                        /* Enable Magic Packet mode */
1303                        tempval = gfar_read(&regs->maccfg2);
1304                        tempval |= MACCFG2_MPEN;
1305                        gfar_write(&regs->maccfg2, tempval);
1306                } else {
1307                        phy_stop(priv->phydev);
1308                }
1309        }
1310
1311        return 0;
1312}
1313
1314static int gfar_resume(struct device *dev)
1315{
1316        struct gfar_private *priv = dev_get_drvdata(dev);
1317        struct net_device *ndev = priv->ndev;
1318        struct gfar __iomem *regs = priv->gfargrp[0].regs;
1319        unsigned long flags;
1320        u32 tempval;
1321        int magic_packet = priv->wol_en &&
1322                (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1323
1324        if (!netif_running(ndev)) {
1325                netif_device_attach(ndev);
1326                return 0;
1327        }
1328
1329        if (!magic_packet && priv->phydev)
1330                phy_start(priv->phydev);
1331
1332        /* Disable Magic Packet mode, in case something
1333         * else woke us up.
1334         */
1335        local_irq_save(flags);
1336        lock_tx_qs(priv);
1337        lock_rx_qs(priv);
1338
1339        tempval = gfar_read(&regs->maccfg2);
1340        tempval &= ~MACCFG2_MPEN;
1341        gfar_write(&regs->maccfg2, tempval);
1342
1343        gfar_start(ndev);
1344
1345        unlock_rx_qs(priv);
1346        unlock_tx_qs(priv);
1347        local_irq_restore(flags);
1348
1349        netif_device_attach(ndev);
1350
1351        enable_napi(priv);
1352
1353        return 0;
1354}
1355
1356static int gfar_restore(struct device *dev)
1357{
1358        struct gfar_private *priv = dev_get_drvdata(dev);
1359        struct net_device *ndev = priv->ndev;
1360
1361        if (!netif_running(ndev))
1362                return 0;
1363
1364        gfar_init_bds(ndev);
1365        init_registers(ndev);
1366        gfar_set_mac_address(ndev);
1367        gfar_init_mac(ndev);
1368        gfar_start(ndev);
1369
1370        priv->oldlink = 0;
1371        priv->oldspeed = 0;
1372        priv->oldduplex = -1;
1373
1374        if (priv->phydev)
1375                phy_start(priv->phydev);
1376
1377        netif_device_attach(ndev);
1378        enable_napi(priv);
1379
1380        return 0;
1381}
1382
1383static struct dev_pm_ops gfar_pm_ops = {
1384        .suspend = gfar_suspend,
1385        .resume = gfar_resume,
1386        .freeze = gfar_suspend,
1387        .thaw = gfar_resume,
1388        .restore = gfar_restore,
1389};
1390
1391#define GFAR_PM_OPS (&gfar_pm_ops)
1392
1393#else
1394
1395#define GFAR_PM_OPS NULL
1396
1397#endif
1398
1399/* Reads the controller's registers to determine what interface
1400 * connects it to the PHY.
1401 */
1402static phy_interface_t gfar_get_interface(struct net_device *dev)
1403{
1404        struct gfar_private *priv = netdev_priv(dev);
1405        struct gfar __iomem *regs = priv->gfargrp[0].regs;
1406        u32 ecntrl;
1407
1408        ecntrl = gfar_read(&regs->ecntrl);
1409
1410        if (ecntrl & ECNTRL_SGMII_MODE)
1411                return PHY_INTERFACE_MODE_SGMII;
1412
1413        if (ecntrl & ECNTRL_TBI_MODE) {
1414                if (ecntrl & ECNTRL_REDUCED_MODE)
1415                        return PHY_INTERFACE_MODE_RTBI;
1416                else
1417                        return PHY_INTERFACE_MODE_TBI;
1418        }
1419
1420        if (ecntrl & ECNTRL_REDUCED_MODE) {
1421                if (ecntrl & ECNTRL_REDUCED_MII_MODE)
1422                        return PHY_INTERFACE_MODE_RMII;
1423                else {
1424                        phy_interface_t interface = priv->interface;
1425
1426                        /*
1427                         * This isn't autodetected right now, so it must
1428                         * be set by the device tree or platform code.
1429                         */
1430                        if (interface == PHY_INTERFACE_MODE_RGMII_ID)
1431                                return PHY_INTERFACE_MODE_RGMII_ID;
1432
1433                        return PHY_INTERFACE_MODE_RGMII;
1434                }
1435        }
1436
1437        if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
1438                return PHY_INTERFACE_MODE_GMII;
1439
1440        return PHY_INTERFACE_MODE_MII;
1441}
1442
1443
1444/* Initializes driver's PHY state, and attaches to the PHY.
1445 * Returns 0 on success.
1446 */
1447static int init_phy(struct net_device *dev)
1448{
1449        struct gfar_private *priv = netdev_priv(dev);
1450        uint gigabit_support =
1451                priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
1452                SUPPORTED_1000baseT_Full : 0;
1453        phy_interface_t interface;
1454
1455        priv->oldlink = 0;
1456        priv->oldspeed = 0;
1457        priv->oldduplex = -1;
1458
1459        interface = gfar_get_interface(dev);
1460
1461        priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
1462                                      interface);
1463        if (!priv->phydev)
1464                priv->phydev = of_phy_connect_fixed_link(dev, &adjust_link,
1465                                                         interface);
1466        if (!priv->phydev) {
1467                dev_err(&dev->dev, "could not attach to PHY\n");
1468                return -ENODEV;
1469        }
1470
1471        if (interface == PHY_INTERFACE_MODE_SGMII)
1472                gfar_configure_serdes(dev);
1473
1474        /* Remove any features not supported by the controller */
1475        priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
1476        priv->phydev->advertising = priv->phydev->supported;
1477
1478        return 0;
1479}
1480
1481/*
1482 * Initialize TBI PHY interface for communicating with the
1483 * SERDES lynx PHY on the chip.  We communicate with this PHY
1484 * through the MDIO bus on each controller, treating it as a
1485 * "normal" PHY at the address found in the TBIPA register.  We assume
1486 * that the TBIPA register is valid.  Either the MDIO bus code will set
1487 * it to a value that doesn't conflict with other PHYs on the bus, or the
1488 * value doesn't matter, as there are no other PHYs on the bus.
1489 */
1490static void gfar_configure_serdes(struct net_device *dev)
1491{
1492        struct gfar_private *priv = netdev_priv(dev);
1493        struct phy_device *tbiphy;
1494
1495        if (!priv->tbi_node) {
1496                dev_warn(&dev->dev, "error: SGMII mode requires that the "
1497                                    "device tree specify a tbi-handle\n");
1498                return;
1499        }
1500
1501        tbiphy = of_phy_find_device(priv->tbi_node);
1502        if (!tbiphy) {
1503                dev_err(&dev->dev, "error: Could not get TBI device\n");
1504                return;
1505        }
1506
1507        /*
1508         * If the link is already up, we must already be ok, and don't need to
1509         * configure and reset the TBI<->SerDes link.  Maybe U-Boot configured
1510         * everything for us?  Resetting it takes the link down and requires
1511         * several seconds for it to come back.
1512         */
1513        if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS)
1514                return;
1515
1516        /* Single clk mode, mii mode off(for serdes communication) */
1517        phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
1518
1519        phy_write(tbiphy, MII_ADVERTISE,
1520                        ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
1521                        ADVERTISE_1000XPSE_ASYM);
1522
1523        phy_write(tbiphy, MII_BMCR, BMCR_ANENABLE |
1524                        BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000);
1525}
1526
1527static void init_registers(struct net_device *dev)
1528{
1529        struct gfar_private *priv = netdev_priv(dev);
1530        struct gfar __iomem *regs = NULL;
1531        int i = 0;
1532
1533        for (i = 0; i < priv->num_grps; i++) {
1534                regs = priv->gfargrp[i].regs;
1535                /* Clear IEVENT */
1536                gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
1537
1538                /* Initialize IMASK */
1539                gfar_write(&regs->imask, IMASK_INIT_CLEAR);
1540        }
1541
1542        regs = priv->gfargrp[0].regs;
1543        /* Init hash registers to zero */
1544        gfar_write(&regs->igaddr0, 0);
1545        gfar_write(&regs->igaddr1, 0);
1546        gfar_write(&regs->igaddr2, 0);
1547        gfar_write(&regs->igaddr3, 0);
1548        gfar_write(&regs->igaddr4, 0);
1549        gfar_write(&regs->igaddr5, 0);
1550        gfar_write(&regs->igaddr6, 0);
1551        gfar_write(&regs->igaddr7, 0);
1552
1553        gfar_write(&regs->gaddr0, 0);
1554        gfar_write(&regs->gaddr1, 0);
1555        gfar_write(&regs->gaddr2, 0);
1556        gfar_write(&regs->gaddr3, 0);
1557        gfar_write(&regs->gaddr4, 0);
1558        gfar_write(&regs->gaddr5, 0);
1559        gfar_write(&regs->gaddr6, 0);
1560        gfar_write(&regs->gaddr7, 0);
1561
1562        /* Zero out the rmon mib registers if it has them */
1563        if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
1564                memset_io(&(regs->rmon), 0, sizeof (struct rmon_mib));
1565
1566                /* Mask off the CAM interrupts */
1567                gfar_write(&regs->rmon.cam1, 0xffffffff);
1568                gfar_write(&regs->rmon.cam2, 0xffffffff);
1569        }
1570
1571        /* Initialize the max receive buffer length */
1572        gfar_write(&regs->mrblr, priv->rx_buffer_size);
1573
1574        /* Initialize the Minimum Frame Length Register */
1575        gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
1576}
1577
1578static int __gfar_is_rx_idle(struct gfar_private *priv)
1579{
1580        u32 res;
1581
1582        /*
1583         * Normaly TSEC should not hang on GRS commands, so we should
1584         * actually wait for IEVENT_GRSC flag.
1585         */
1586        if (likely(!gfar_has_errata(priv, GFAR_ERRATA_A002)))
1587                return 0;
1588
1589        /*
1590         * Read the eTSEC register at offset 0xD1C. If bits 7-14 are
1591         * the same as bits 23-30, the eTSEC Rx is assumed to be idle
1592         * and the Rx can be safely reset.
1593         */
1594        res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c);
1595        res &= 0x7f807f80;
1596        if ((res & 0xffff) == (res >> 16))
1597                return 1;
1598
1599        return 0;
1600}
1601
1602/* Halt the receive and transmit queues */
1603static void gfar_halt_nodisable(struct net_device *dev)
1604{
1605        struct gfar_private *priv = netdev_priv(dev);
1606        struct gfar __iomem *regs = NULL;
1607        u32 tempval;
1608        int i = 0;
1609
1610        for (i = 0; i < priv->num_grps; i++) {
1611                regs = priv->gfargrp[i].regs;
1612                /* Mask all interrupts */
1613                gfar_write(&regs->imask, IMASK_INIT_CLEAR);
1614
1615                /* Clear all interrupts */
1616                gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
1617        }
1618
1619        regs = priv->gfargrp[0].regs;
1620        /* Stop the DMA, and wait for it to stop */
1621        tempval = gfar_read(&regs->dmactrl);
1622        if ((tempval & (DMACTRL_GRS | DMACTRL_GTS))
1623            != (DMACTRL_GRS | DMACTRL_GTS)) {
1624                int ret;
1625
1626                tempval |= (DMACTRL_GRS | DMACTRL_GTS);
1627                gfar_write(&regs->dmactrl, tempval);
1628
1629                do {
1630                        ret = spin_event_timeout(((gfar_read(&regs->ievent) &
1631                                 (IEVENT_GRSC | IEVENT_GTSC)) ==
1632                                 (IEVENT_GRSC | IEVENT_GTSC)), 1000000, 0);
1633                        if (!ret && !(gfar_read(&regs->ievent) & IEVENT_GRSC))
1634                                ret = __gfar_is_rx_idle(priv);
1635                } while (!ret);
1636        }
1637}
1638
1639/* Halt the receive and transmit queues */
1640void gfar_halt(struct net_device *dev)
1641{
1642        struct gfar_private *priv = netdev_priv(dev);
1643        struct gfar __iomem *regs = priv->gfargrp[0].regs;
1644        u32 tempval;
1645
1646        gfar_halt_nodisable(dev);
1647
1648        /* Disable Rx and Tx */
1649        tempval = gfar_read(&regs->maccfg1);
1650        tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
1651        gfar_write(&regs->maccfg1, tempval);
1652}
1653
1654static void free_grp_irqs(struct gfar_priv_grp *grp)
1655{
1656        free_irq(grp->interruptError, grp);
1657        free_irq(grp->interruptTransmit, grp);
1658        free_irq(grp->interruptReceive, grp);
1659}
1660
1661void stop_gfar(struct net_device *dev)
1662{
1663        struct gfar_private *priv = netdev_priv(dev);
1664        unsigned long flags;
1665        int i;
1666
1667        phy_stop(priv->phydev);
1668
1669
1670        /* Lock it down */
1671        local_irq_save(flags);
1672        lock_tx_qs(priv);
1673        lock_rx_qs(priv);
1674
1675        gfar_halt(dev);
1676
1677        unlock_rx_qs(priv);
1678        unlock_tx_qs(priv);
1679        local_irq_restore(flags);
1680
1681        /* Free the IRQs */
1682        if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1683                for (i = 0; i < priv->num_grps; i++)
1684                        free_grp_irqs(&priv->gfargrp[i]);
1685        } else {
1686                for (i = 0; i < priv->num_grps; i++)
1687                        free_irq(priv->gfargrp[i].interruptTransmit,
1688                                        &priv->gfargrp[i]);
1689        }
1690
1691        free_skb_resources(priv);
1692}
1693
1694static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
1695{
1696        struct txbd8 *txbdp;
1697        struct gfar_private *priv = netdev_priv(tx_queue->dev);
1698        int i, j;
1699
1700        txbdp = tx_queue->tx_bd_base;
1701
1702        for (i = 0; i < tx_queue->tx_ring_size; i++) {
1703                if (!tx_queue->tx_skbuff[i])
1704                        continue;
1705
1706                dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr,
1707                                txbdp->length, DMA_TO_DEVICE);
1708                txbdp->lstatus = 0;
1709                for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
1710                                j++) {
1711                        txbdp++;
1712                        dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr,
1713                                        txbdp->length, DMA_TO_DEVICE);
1714                }
1715                txbdp++;
1716                dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
1717                tx_queue->tx_skbuff[i] = NULL;
1718        }
1719        kfree(tx_queue->tx_skbuff);
1720}
1721
1722static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
1723{
1724        struct rxbd8 *rxbdp;
1725        struct gfar_private *priv = netdev_priv(rx_queue->dev);
1726        int i;
1727
1728        rxbdp = rx_queue->rx_bd_base;
1729
1730        for (i = 0; i < rx_queue->rx_ring_size; i++) {
1731                if (rx_queue->rx_skbuff[i]) {
1732                        dma_unmap_single(&priv->ofdev->dev,
1733                                        rxbdp->bufPtr, priv->rx_buffer_size,
1734                                        DMA_FROM_DEVICE);
1735                        dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
1736                        rx_queue->rx_skbuff[i] = NULL;
1737                }
1738                rxbdp->lstatus = 0;
1739                rxbdp->bufPtr = 0;
1740                rxbdp++;
1741        }
1742        kfree(rx_queue->rx_skbuff);
1743}
1744
1745/* If there are any tx skbs or rx skbs still around, free them.
1746 * Then free tx_skbuff and rx_skbuff */
1747static void free_skb_resources(struct gfar_private *priv)
1748{
1749        struct gfar_priv_tx_q *tx_queue = NULL;
1750        struct gfar_priv_rx_q *rx_queue = NULL;
1751        int i;
1752
1753        /* Go through all the buffer descriptors and free their data buffers */
1754        for (i = 0; i < priv->num_tx_queues; i++) {
1755                tx_queue = priv->tx_queue[i];
1756                if(tx_queue->tx_skbuff)
1757                        free_skb_tx_queue(tx_queue);
1758        }
1759
1760        for (i = 0; i < priv->num_rx_queues; i++) {
1761                rx_queue = priv->rx_queue[i];
1762                if(rx_queue->rx_skbuff)
1763                        free_skb_rx_queue(rx_queue);
1764        }
1765
1766        dma_free_coherent(&priv->ofdev->dev,
1767                        sizeof(struct txbd8) * priv->total_tx_ring_size +
1768                        sizeof(struct rxbd8) * priv->total_rx_ring_size,
1769                        priv->tx_queue[0]->tx_bd_base,
1770                        priv->tx_queue[0]->tx_bd_dma_base);
1771        skb_queue_purge(&priv->rx_recycle);
1772}
1773
1774void gfar_start(struct net_device *dev)
1775{
1776        struct gfar_private *priv = netdev_priv(dev);
1777        struct gfar __iomem *regs = priv->gfargrp[0].regs;
1778        u32 tempval;
1779        int i = 0;
1780
1781        /* Enable Rx and Tx in MACCFG1 */
1782        tempval = gfar_read(&regs->maccfg1);
1783        tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
1784        gfar_write(&regs->maccfg1, tempval);
1785
1786        /* Initialize DMACTRL to have WWR and WOP */
1787        tempval = gfar_read(&regs->dmactrl);
1788        tempval |= DMACTRL_INIT_SETTINGS;
1789        gfar_write(&regs->dmactrl, tempval);
1790
1791        /* Make sure we aren't stopped */
1792        tempval = gfar_read(&regs->dmactrl);
1793        tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
1794        gfar_write(&regs->dmactrl, tempval);
1795
1796        for (i = 0; i < priv->num_grps; i++) {
1797                regs = priv->gfargrp[i].regs;
1798                /* Clear THLT/RHLT, so that the DMA starts polling now */
1799                gfar_write(&regs->tstat, priv->gfargrp[i].tstat);
1800                gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
1801                /* Unmask the interrupts we look for */
1802                gfar_write(&regs->imask, IMASK_DEFAULT);
1803        }
1804
1805        dev->trans_start = jiffies; /* prevent tx timeout */
1806}
1807
1808void gfar_configure_coalescing(struct gfar_private *priv,
1809        unsigned long tx_mask, unsigned long rx_mask)
1810{
1811        struct gfar __iomem *regs = priv->gfargrp[0].regs;
1812        u32 __iomem *baddr;
1813        int i = 0;
1814
1815        /* Backward compatible case ---- even if we enable
1816         * multiple queues, there's only single reg to program
1817         */
1818        gfar_write(&regs->txic, 0);
1819        if(likely(priv->tx_queue[0]->txcoalescing))
1820                gfar_write(&regs->txic, priv->tx_queue[0]->txic);
1821
1822        gfar_write(&regs->rxic, 0);
1823        if(unlikely(priv->rx_queue[0]->rxcoalescing))
1824                gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
1825
1826        if (priv->mode == MQ_MG_MODE) {
1827                baddr = &regs->txic0;
1828                for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
1829                        if (likely(priv->tx_queue[i]->txcoalescing)) {
1830                                gfar_write(baddr + i, 0);
1831                                gfar_write(baddr + i, priv->tx_queue[i]->txic);
1832                        }
1833                }
1834
1835                baddr = &regs->rxic0;
1836                for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
1837                        if (likely(priv->rx_queue[i]->rxcoalescing)) {
1838                                gfar_write(baddr + i, 0);
1839                                gfar_write(baddr + i, priv->rx_queue[i]->rxic);
1840                        }
1841                }
1842        }
1843}
1844
1845static int register_grp_irqs(struct gfar_priv_grp *grp)
1846{
1847        struct gfar_private *priv = grp->priv;
1848        struct net_device *dev = priv->ndev;
1849        int err;
1850
1851        /* If the device has multiple interrupts, register for
1852         * them.  Otherwise, only register for the one */
1853        if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1854                /* Install our interrupt handlers for Error,
1855                 * Transmit, and Receive */
1856                if ((err = request_irq(grp->interruptError, gfar_error, 0,
1857                                grp->int_name_er,grp)) < 0) {
1858                        if (netif_msg_intr(priv))
1859                                printk(KERN_ERR "%s: Can't get IRQ %d\n",
1860                                        dev->name, grp->interruptError);
1861
1862                        goto err_irq_fail;
1863                }
1864
1865                if ((err = request_irq(grp->interruptTransmit, gfar_transmit,
1866                                0, grp->int_name_tx, grp)) < 0) {
1867                        if (netif_msg_intr(priv))
1868                                printk(KERN_ERR "%s: Can't get IRQ %d\n",
1869                                        dev->name, grp->interruptTransmit);
1870                        goto tx_irq_fail;
1871                }
1872
1873                if ((err = request_irq(grp->interruptReceive, gfar_receive, 0,
1874                                grp->int_name_rx, grp)) < 0) {
1875                        if (netif_msg_intr(priv))
1876                                printk(KERN_ERR "%s: Can't get IRQ %d\n",
1877                                        dev->name, grp->interruptReceive);
1878                        goto rx_irq_fail;
1879                }
1880        } else {
1881                if ((err = request_irq(grp->interruptTransmit, gfar_interrupt, 0,
1882                                grp->int_name_tx, grp)) < 0) {
1883                        if (netif_msg_intr(priv))
1884                                printk(KERN_ERR "%s: Can't get IRQ %d\n",
1885                                        dev->name, grp->interruptTransmit);
1886                        goto err_irq_fail;
1887                }
1888        }
1889
1890        return 0;
1891
1892rx_irq_fail:
1893        free_irq(grp->interruptTransmit, grp);
1894tx_irq_fail:
1895        free_irq(grp->interruptError, grp);
1896err_irq_fail:
1897        return err;
1898
1899}
1900
1901/* Bring the controller up and running */
1902int startup_gfar(struct net_device *ndev)
1903{
1904        struct gfar_private *priv = netdev_priv(ndev);
1905        struct gfar __iomem *regs = NULL;
1906        int err, i, j;
1907
1908        for (i = 0; i < priv->num_grps; i++) {
1909                regs= priv->gfargrp[i].regs;
1910                gfar_write(&regs->imask, IMASK_INIT_CLEAR);
1911        }
1912
1913        regs= priv->gfargrp[0].regs;
1914        err = gfar_alloc_skb_resources(ndev);
1915        if (err)
1916                return err;
1917
1918        gfar_init_mac(ndev);
1919
1920        for (i = 0; i < priv->num_grps; i++) {
1921                err = register_grp_irqs(&priv->gfargrp[i]);
1922                if (err) {
1923                        for (j = 0; j < i; j++)
1924                                free_grp_irqs(&priv->gfargrp[j]);
1925                        goto irq_fail;
1926                }
1927        }
1928
1929        /* Start the controller */
1930        gfar_start(ndev);
1931
1932        phy_start(priv->phydev);
1933
1934        gfar_configure_coalescing(priv, 0xFF, 0xFF);
1935
1936        return 0;
1937
1938irq_fail:
1939        free_skb_resources(priv);
1940        return err;
1941}
1942
1943/* Called when something needs to use the ethernet device */
1944/* Returns 0 for success. */
1945static int gfar_enet_open(struct net_device *dev)
1946{
1947        struct gfar_private *priv = netdev_priv(dev);
1948        int err;
1949
1950        enable_napi(priv);
1951
1952        skb_queue_head_init(&priv->rx_recycle);
1953
1954        /* Initialize a bunch of registers */
1955        init_registers(dev);
1956
1957        gfar_set_mac_address(dev);
1958
1959        err = init_phy(dev);
1960
1961        if (err) {
1962                disable_napi(priv);
1963                return err;
1964        }
1965
1966        err = startup_gfar(dev);
1967        if (err) {
1968                disable_napi(priv);
1969                return err;
1970        }
1971
1972        netif_tx_start_all_queues(dev);
1973
1974        device_set_wakeup_enable(&dev->dev, priv->wol_en);
1975
1976        return err;
1977}
1978
1979static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
1980{
1981        struct txfcb *fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN);
1982
1983        memset(fcb, 0, GMAC_FCB_LEN);
1984
1985        return fcb;
1986}
1987
1988static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb)
1989{
1990        u8 flags = 0;
1991
1992        /* If we're here, it's a IP packet with a TCP or UDP
1993         * payload.  We set it to checksum, using a pseudo-header
1994         * we provide
1995         */
1996        flags = TXFCB_DEFAULT;
1997
1998        /* Tell the controller what the protocol is */
1999        /* And provide the already calculated phcs */
2000        if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
2001                flags |= TXFCB_UDP;
2002                fcb->phcs = udp_hdr(skb)->check;
2003        } else
2004                fcb->phcs = tcp_hdr(skb)->check;
2005
2006        /* l3os is the distance between the start of the
2007         * frame (skb->data) and the start of the IP hdr.
2008         * l4os is the distance between the start of the
2009         * l3 hdr and the l4 hdr */
2010        fcb->l3os = (u16)(skb_network_offset(skb) - GMAC_FCB_LEN);
2011        fcb->l4os = skb_network_header_len(skb);
2012
2013        fcb->flags = flags;
2014}
2015
2016void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
2017{
2018        fcb->flags |= TXFCB_VLN;
2019        fcb->vlctl = vlan_tx_tag_get(skb);
2020}
2021
2022static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
2023                               struct txbd8 *base, int ring_size)
2024{
2025        struct txbd8 *new_bd = bdp + stride;
2026
2027        return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd;
2028}
2029
2030static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
2031                int ring_size)
2032{
2033        return skip_txbd(bdp, 1, base, ring_size);
2034}
2035
2036/* This is called by the kernel when a frame is ready for transmission. */
2037/* It is pointed to by the dev->hard_start_xmit function pointer */
2038static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2039{
2040        struct gfar_private *priv = netdev_priv(dev);
2041        struct gfar_priv_tx_q *tx_queue = NULL;
2042        struct netdev_queue *txq;
2043        struct gfar __iomem *regs = NULL;
2044        struct txfcb *fcb = NULL;
2045        struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
2046        u32 lstatus;
2047        int i, rq = 0, do_tstamp = 0;
2048        u32 bufaddr;
2049        unsigned long flags;
2050        unsigned int nr_frags, nr_txbds, length;
2051
2052        /*
2053         * TOE=1 frames larger than 2500 bytes may see excess delays
2054         * before start of transmission.
2055         */
2056        if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_76) &&
2057                        skb->ip_summed == CHECKSUM_PARTIAL &&
2058                        skb->len > 2500)) {
2059                int ret;
2060
2061                ret = skb_checksum_help(skb);
2062                if (ret)
2063                        return ret;
2064        }
2065
2066        rq = skb->queue_mapping;
2067        tx_queue = priv->tx_queue[rq];
2068        txq = netdev_get_tx_queue(dev, rq);
2069        base = tx_queue->tx_bd_base;
2070        regs = tx_queue->grp->regs;
2071
2072        /* check if time stamp should be generated */
2073        if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
2074                     priv->hwts_tx_en))
2075                do_tstamp = 1;
2076
2077        /* make space for additional header when fcb is needed */
2078        if (((skb->ip_summed == CHECKSUM_PARTIAL) ||
2079                        vlan_tx_tag_present(skb) ||
2080                        unlikely(do_tstamp)) &&
2081                        (skb_headroom(skb) < GMAC_FCB_LEN)) {
2082                struct sk_buff *skb_new;
2083
2084                skb_new = skb_realloc_headroom(skb, GMAC_FCB_LEN);
2085                if (!skb_new) {
2086                        dev->stats.tx_errors++;
2087                        kfree_skb(skb);
2088                        return NETDEV_TX_OK;
2089                }
2090                kfree_skb(skb);
2091                skb = skb_new;
2092        }
2093
2094        /* total number of fragments in the SKB */
2095        nr_frags = skb_shinfo(skb)->nr_frags;
2096
2097        /* calculate the required number of TxBDs for this skb */
2098        if (unlikely(do_tstamp))
2099                nr_txbds = nr_frags + 2;
2100        else
2101                nr_txbds = nr_frags + 1;
2102
2103        /* check if there is space to queue this packet */
2104        if (nr_txbds > tx_queue->num_txbdfree) {
2105                /* no space, stop the queue */
2106                netif_tx_stop_queue(txq);
2107                dev->stats.tx_fifo_errors++;
2108                return NETDEV_TX_BUSY;
2109        }
2110
2111        /* Update transmit stats */
2112        tx_queue->stats.tx_bytes += skb->len;
2113        tx_queue->stats.tx_packets++;
2114
2115        txbdp = txbdp_start = tx_queue->cur_tx;
2116        lstatus = txbdp->lstatus;
2117
2118        /* Time stamp insertion requires one additional TxBD */
2119        if (unlikely(do_tstamp))
2120                txbdp_tstamp = txbdp = next_txbd(txbdp, base,
2121                                tx_queue->tx_ring_size);
2122
2123        if (nr_frags == 0) {
2124                if (unlikely(do_tstamp))
2125                        txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_LAST |
2126                                        TXBD_INTERRUPT);
2127                else
2128                        lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
2129        } else {
2130                /* Place the fragment addresses and lengths into the TxBDs */
2131                for (i = 0; i < nr_frags; i++) {
2132                        /* Point at the next BD, wrapping as needed */
2133                        txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2134
2135                        length = skb_shinfo(skb)->frags[i].size;
2136
2137                        lstatus = txbdp->lstatus | length |
2138                                BD_LFLAG(TXBD_READY);
2139
2140                        /* Handle the last BD specially */
2141                        if (i == nr_frags - 1)
2142                                lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
2143
2144                        bufaddr = dma_map_page(&priv->ofdev->dev,
2145                                        skb_shinfo(skb)->frags[i].page,
2146                                        skb_shinfo(skb)->frags[i].page_offset,
2147                                        length,
2148                                        DMA_TO_DEVICE);
2149
2150                        /* set the TxBD length and buffer pointer */
2151                        txbdp->bufPtr = bufaddr;
2152                        txbdp->lstatus = lstatus;
2153                }
2154
2155                lstatus = txbdp_start->lstatus;
2156        }
2157
2158        /* Set up checksumming */
2159        if (CHECKSUM_PARTIAL == skb->ip_summed) {
2160                fcb = gfar_add_fcb(skb);
2161                /* as specified by errata */
2162                if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_12)
2163                             && ((unsigned long)fcb % 0x20) > 0x18)) {
2164                        __skb_pull(skb, GMAC_FCB_LEN);
2165                        skb_checksum_help(skb);
2166                } else {
2167                        lstatus |= BD_LFLAG(TXBD_TOE);
2168                        gfar_tx_checksum(skb, fcb);
2169                }
2170        }
2171
2172        if (vlan_tx_tag_present(skb)) {
2173                if (unlikely(NULL == fcb)) {
2174                        fcb = gfar_add_fcb(skb);
2175                        lstatus |= BD_LFLAG(TXBD_TOE);
2176                }
2177
2178                gfar_tx_vlan(skb, fcb);
2179        }
2180
2181        /* Setup tx hardware time stamping if requested */
2182        if (unlikely(do_tstamp)) {
2183                skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2184                if (fcb == NULL)
2185                        fcb = gfar_add_fcb(skb);
2186                fcb->ptp = 1;
2187                lstatus |= BD_LFLAG(TXBD_TOE);
2188        }
2189
2190        txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data,
2191                        skb_headlen(skb), DMA_TO_DEVICE);
2192
2193        /*
2194         * If time stamping is requested one additional TxBD must be set up. The
2195         * first TxBD points to the FCB and must have a data length of
2196         * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
2197         * the full frame length.
2198         */
2199        if (unlikely(do_tstamp)) {
2200                txbdp_tstamp->bufPtr = txbdp_start->bufPtr + GMAC_FCB_LEN;
2201                txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) |
2202                                (skb_headlen(skb) - GMAC_FCB_LEN);
2203                lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
2204        } else {
2205                lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
2206        }
2207
2208        /*
2209         * We can work in parallel with gfar_clean_tx_ring(), except
2210         * when modifying num_txbdfree. Note that we didn't grab the lock
2211         * when we were reading the num_txbdfree and checking for available
2212         * space, that's because outside of this function it can only grow,
2213         * and once we've got needed space, it cannot suddenly disappear.
2214         *
2215         * The lock also protects us from gfar_error(), which can modify
2216         * regs->tstat and thus retrigger the transfers, which is why we
2217         * also must grab the lock before setting ready bit for the first
2218         * to be transmitted BD.
2219         */
2220        spin_lock_irqsave(&tx_queue->txlock, flags);
2221
2222        /*
2223         * The powerpc-specific eieio() is used, as wmb() has too strong
2224         * semantics (it requires synchronization between cacheable and
2225         * uncacheable mappings, which eieio doesn't provide and which we
2226         * don't need), thus requiring a more expensive sync instruction.  At
2227         * some point, the set of architecture-independent barrier functions
2228         * should be expanded to include weaker barriers.
2229         */
2230        eieio();
2231
2232        txbdp_start->lstatus = lstatus;
2233
2234        eieio(); /* force lstatus write before tx_skbuff */
2235
2236        tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
2237
2238        /* Update the current skb pointer to the next entry we will use
2239         * (wrapping if necessary) */
2240        tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
2241                TX_RING_MOD_MASK(tx_queue->tx_ring_size);
2242
2243        tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2244
2245        /* reduce TxBD free count */
2246        tx_queue->num_txbdfree -= (nr_txbds);
2247
2248        /* If the next BD still needs to be cleaned up, then the bds
2249           are full.  We need to tell the kernel to stop sending us stuff. */
2250        if (!tx_queue->num_txbdfree) {
2251                netif_tx_stop_queue(txq);
2252
2253                dev->stats.tx_fifo_errors++;
2254        }
2255
2256        /* Tell the DMA to go go go */
2257        gfar_write(&regs->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
2258
2259        /* Unlock priv */
2260        spin_unlock_irqrestore(&tx_queue->txlock, flags);
2261
2262        return NETDEV_TX_OK;
2263}
2264
2265/* Stops the kernel queue, and halts the controller */
2266static int gfar_close(struct net_device *dev)
2267{
2268        struct gfar_private *priv = netdev_priv(dev);
2269
2270        disable_napi(priv);
2271
2272        cancel_work_sync(&priv->reset_task);
2273        stop_gfar(dev);
2274
2275        /* Disconnect from the PHY */
2276        phy_disconnect(priv->phydev);
2277        priv->phydev = NULL;
2278
2279        netif_tx_stop_all_queues(dev);
2280
2281        return 0;
2282}
2283
2284/* Changes the mac address if the controller is not running. */
2285static int gfar_set_mac_address(struct net_device *dev)
2286{
2287        gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
2288
2289        return 0;
2290}
2291
2292/* Check if rx parser should be activated */
2293void gfar_check_rx_parser_mode(struct gfar_private *priv)
2294{
2295        struct gfar __iomem *regs;
2296        u32 tempval;
2297
2298        regs = priv->gfargrp[0].regs;
2299
2300        tempval = gfar_read(&regs->rctrl);
2301        /* If parse is no longer required, then disable parser */
2302        if (tempval & RCTRL_REQ_PARSER)
2303                tempval |= RCTRL_PRSDEP_INIT;
2304        else
2305                tempval &= ~RCTRL_PRSDEP_INIT;
2306        gfar_write(&regs->rctrl, tempval);
2307}
2308
2309
2310/* Enables and disables VLAN insertion/extraction */
2311static void gfar_vlan_rx_register(struct net_device *dev,
2312                struct vlan_group *grp)
2313{
2314        struct gfar_private *priv = netdev_priv(dev);
2315        struct gfar __iomem *regs = NULL;
2316        unsigned long flags;
2317        u32 tempval;
2318
2319        regs = priv->gfargrp[0].regs;
2320        local_irq_save(flags);
2321        lock_rx_qs(priv);
2322
2323        priv->vlgrp = grp;
2324
2325        if (grp) {
2326                /* Enable VLAN tag insertion */
2327                tempval = gfar_read(&regs->tctrl);
2328                tempval |= TCTRL_VLINS;
2329
2330                gfar_write(&regs->tctrl, tempval);
2331
2332                /* Enable VLAN tag extraction */
2333                tempval = gfar_read(&regs->rctrl);
2334                tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT);
2335                gfar_write(&regs->rctrl, tempval);
2336        } else {
2337                /* Disable VLAN tag insertion */
2338                tempval = gfar_read(&regs->tctrl);
2339                tempval &= ~TCTRL_VLINS;
2340                gfar_write(&regs->tctrl, tempval);
2341
2342                /* Disable VLAN tag extraction */
2343                tempval = gfar_read(&regs->rctrl);
2344                tempval &= ~RCTRL_VLEX;
2345                gfar_write(&regs->rctrl, tempval);
2346
2347                gfar_check_rx_parser_mode(priv);
2348        }
2349
2350        gfar_change_mtu(dev, dev->mtu);
2351
2352        unlock_rx_qs(priv);
2353        local_irq_restore(flags);
2354}
2355
2356static int gfar_change_mtu(struct net_device *dev, int new_mtu)
2357{
2358        int tempsize, tempval;
2359        struct gfar_private *priv = netdev_priv(dev);
2360        struct gfar __iomem *regs = priv->gfargrp[0].regs;
2361        int oldsize = priv->rx_buffer_size;
2362        int frame_size = new_mtu + ETH_HLEN;
2363
2364        if (priv->vlgrp)
2365                frame_size += VLAN_HLEN;
2366
2367        if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
2368                if (netif_msg_drv(priv))
2369                        printk(KERN_ERR "%s: Invalid MTU setting\n",
2370                                        dev->name);
2371                return -EINVAL;
2372        }
2373
2374        if (gfar_uses_fcb(priv))
2375                frame_size += GMAC_FCB_LEN;
2376
2377        frame_size += priv->padding;
2378
2379        tempsize =
2380            (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
2381            INCREMENTAL_BUFFER_SIZE;
2382
2383        /* Only stop and start the controller if it isn't already
2384         * stopped, and we changed something */
2385        if ((oldsize != tempsize) && (dev->flags & IFF_UP))
2386                stop_gfar(dev);
2387
2388        priv->rx_buffer_size = tempsize;
2389
2390        dev->mtu = new_mtu;
2391
2392        gfar_write(&regs->mrblr, priv->rx_buffer_size);
2393        gfar_write(&regs->maxfrm, priv->rx_buffer_size);
2394
2395        /* If the mtu is larger than the max size for standard
2396         * ethernet frames (ie, a jumbo frame), then set maccfg2
2397         * to allow huge frames, and to check the length */
2398        tempval = gfar_read(&regs->maccfg2);
2399
2400        if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE ||
2401                        gfar_has_errata(priv, GFAR_ERRATA_74))
2402                tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
2403        else
2404                tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
2405
2406        gfar_write(&regs->maccfg2, tempval);
2407
2408        if ((oldsize != tempsize) && (dev->flags & IFF_UP))
2409                startup_gfar(dev);
2410
2411        return 0;
2412}
2413
2414/* gfar_reset_task gets scheduled when a packet has not been
2415 * transmitted after a set amount of time.
2416 * For now, assume that clearing out all the structures, and
2417 * starting over will fix the problem.
2418 */
2419static void gfar_reset_task(struct work_struct *work)
2420{
2421        struct gfar_private *priv = container_of(work, struct gfar_private,
2422                        reset_task);
2423        struct net_device *dev = priv->ndev;
2424
2425        if (dev->flags & IFF_UP) {
2426                netif_tx_stop_all_queues(dev);
2427                stop_gfar(dev);
2428                startup_gfar(dev);
2429                netif_tx_start_all_queues(dev);
2430        }
2431
2432        netif_tx_schedule_all(dev);
2433}
2434
2435static void gfar_timeout(struct net_device *dev)
2436{
2437        struct gfar_private *priv = netdev_priv(dev);
2438
2439        dev->stats.tx_errors++;
2440        schedule_work(&priv->reset_task);
2441}
2442
2443static void gfar_align_skb(struct sk_buff *skb)
2444{
2445        /* We need the data buffer to be aligned properly.  We will reserve
2446         * as many bytes as needed to align the data properly
2447         */
2448        skb_reserve(skb, RXBUF_ALIGNMENT -
2449                (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1)));
2450}
2451
2452/* Interrupt Handler for Transmit complete */
2453static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2454{
2455        struct net_device *dev = tx_queue->dev;
2456        struct gfar_private *priv = netdev_priv(dev);
2457        struct gfar_priv_rx_q *rx_queue = NULL;
2458        struct txbd8 *bdp, *next = NULL;
2459        struct txbd8 *lbdp = NULL;
2460        struct txbd8 *base = tx_queue->tx_bd_base;
2461        struct sk_buff *skb;
2462        int skb_dirtytx;
2463        int tx_ring_size = tx_queue->tx_ring_size;
2464        int frags = 0, nr_txbds = 0;
2465        int i;
2466        int howmany = 0;
2467        u32 lstatus;
2468        size_t buflen;
2469
2470        rx_queue = priv->rx_queue[tx_queue->qindex];
2471        bdp = tx_queue->dirty_tx;
2472        skb_dirtytx = tx_queue->skb_dirtytx;
2473
2474        while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
2475                unsigned long flags;
2476
2477                frags = skb_shinfo(skb)->nr_frags;
2478
2479                /*
2480                 * When time stamping, one additional TxBD must be freed.
2481                 * Also, we need to dma_unmap_single() the TxPAL.
2482                 */
2483                if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
2484                        nr_txbds = frags + 2;
2485                else
2486                        nr_txbds = frags + 1;
2487
2488                lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size);
2489
2490                lstatus = lbdp->lstatus;
2491
2492                /* Only clean completed frames */
2493                if ((lstatus & BD_LFLAG(TXBD_READY)) &&
2494                                (lstatus & BD_LENGTH_MASK))
2495                        break;
2496
2497                if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
2498                        next = next_txbd(bdp, base, tx_ring_size);
2499                        buflen = next->length + GMAC_FCB_LEN;
2500                } else
2501                        buflen = bdp->length;
2502
2503                dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
2504                                buflen, DMA_TO_DEVICE);
2505
2506                if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
2507                        struct skb_shared_hwtstamps shhwtstamps;
2508                        u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7);
2509                        memset(&shhwtstamps, 0, sizeof(shhwtstamps));
2510                        shhwtstamps.hwtstamp = ns_to_ktime(*ns);
2511                        skb_tstamp_tx(skb, &shhwtstamps);
2512                        bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2513                        bdp = next;
2514                }
2515
2516                bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2517                bdp = next_txbd(bdp, base, tx_ring_size);
2518
2519                for (i = 0; i < frags; i++) {
2520                        dma_unmap_page(&priv->ofdev->dev,
2521                                        bdp->bufPtr,
2522                                        bdp->length,
2523                                        DMA_TO_DEVICE);
2524                        bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2525                        bdp = next_txbd(bdp, base, tx_ring_size);
2526                }
2527
2528                /*
2529                 * If there's room in the queue (limit it to rx_buffer_size)
2530                 * we add this skb back into the pool, if it's the right size
2531                 */
2532                if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size &&
2533                                skb_recycle_check(skb, priv->rx_buffer_size +
2534                                        RXBUF_ALIGNMENT)) {
2535                        gfar_align_skb(skb);
2536                        skb_queue_head(&priv->rx_recycle, skb);
2537                } else
2538                        dev_kfree_skb_any(skb);
2539
2540                tx_queue->tx_skbuff[skb_dirtytx] = NULL;
2541
2542                skb_dirtytx = (skb_dirtytx + 1) &
2543                        TX_RING_MOD_MASK(tx_ring_size);
2544
2545                howmany++;
2546                spin_lock_irqsave(&tx_queue->txlock, flags);
2547                tx_queue->num_txbdfree += nr_txbds;
2548                spin_unlock_irqrestore(&tx_queue->txlock, flags);
2549        }
2550
2551        /* If we freed a buffer, we can restart transmission, if necessary */
2552        if (__netif_subqueue_stopped(dev, tx_queue->qindex) && tx_queue->num_txbdfree)
2553                netif_wake_subqueue(dev, tx_queue->qindex);
2554
2555        /* Update dirty indicators */
2556        tx_queue->skb_dirtytx = skb_dirtytx;
2557        tx_queue->dirty_tx = bdp;
2558
2559        return howmany;
2560}
2561
2562static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp)
2563{
2564        unsigned long flags;
2565
2566        spin_lock_irqsave(&gfargrp->grplock, flags);
2567        if (napi_schedule_prep(&gfargrp->napi)) {
2568                gfar_write(&gfargrp->regs->imask, IMASK_RTX_DISABLED);
2569                __napi_schedule(&gfargrp->napi);
2570        } else {
2571                /*
2572                 * Clear IEVENT, so interrupts aren't called again
2573                 * because of the packets that have already arrived.
2574                 */
2575                gfar_write(&gfargrp->regs->ievent, IEVENT_RTX_MASK);
2576        }
2577        spin_unlock_irqrestore(&gfargrp->grplock, flags);
2578
2579}
2580
2581/* Interrupt Handler for Transmit complete */
2582static irqreturn_t gfar_transmit(int irq, void *grp_id)
2583{
2584        gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
2585        return IRQ_HANDLED;
2586}
2587
2588static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
2589                struct sk_buff *skb)
2590{
2591        struct net_device *dev = rx_queue->dev;
2592        struct gfar_private *priv = netdev_priv(dev);
2593        dma_addr_t buf;
2594
2595        buf = dma_map_single(&priv->ofdev->dev, skb->data,
2596                             priv->rx_buffer_size, DMA_FROM_DEVICE);
2597        gfar_init_rxbdp(rx_queue, bdp, buf);
2598}
2599
2600static struct sk_buff * gfar_alloc_skb(struct net_device *dev)
2601{
2602        struct gfar_private *priv = netdev_priv(dev);
2603        struct sk_buff *skb = NULL;
2604
2605        skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT);
2606        if (!skb)
2607                return NULL;
2608
2609        gfar_align_skb(skb);
2610
2611        return skb;
2612}
2613
2614struct sk_buff * gfar_new_skb(struct net_device *dev)
2615{
2616        struct gfar_private *priv = netdev_priv(dev);
2617        struct sk_buff *skb = NULL;
2618
2619        skb = skb_dequeue(&priv->rx_recycle);
2620        if (!skb)
2621                skb = gfar_alloc_skb(dev);
2622
2623        return skb;
2624}
2625
2626static inline void count_errors(unsigned short status, struct net_device *dev)
2627{
2628        struct gfar_private *priv = netdev_priv(dev);
2629        struct net_device_stats *stats = &dev->stats;
2630        struct gfar_extra_stats *estats = &priv->extra_stats;
2631
2632        /* If the packet was truncated, none of the other errors
2633         * matter */
2634        if (status & RXBD_TRUNCATED) {
2635                stats->rx_length_errors++;
2636
2637                estats->rx_trunc++;
2638
2639                return;
2640        }
2641        /* Count the errors, if there were any */
2642        if (status & (RXBD_LARGE | RXBD_SHORT)) {
2643                stats->rx_length_errors++;
2644
2645                if (status & RXBD_LARGE)
2646                        estats->rx_large++;
2647                else
2648                        estats->rx_short++;
2649        }
2650        if (status & RXBD_NONOCTET) {
2651                stats->rx_frame_errors++;
2652                estats->rx_nonoctet++;
2653        }
2654        if (status & RXBD_CRCERR) {
2655                estats->rx_crcerr++;
2656                stats->rx_crc_errors++;
2657        }
2658        if (status & RXBD_OVERRUN) {
2659                estats->rx_overrun++;
2660                stats->rx_crc_errors++;
2661        }
2662}
2663
2664irqreturn_t gfar_receive(int irq, void *grp_id)
2665{
2666        gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
2667        return IRQ_HANDLED;
2668}
2669
2670static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
2671{
2672        /* If valid headers were found, and valid sums
2673         * were verified, then we tell the kernel that no
2674         * checksumming is necessary.  Otherwise, it is */
2675        if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU))
2676                skb->ip_summed = CHECKSUM_UNNECESSARY;
2677        else
2678                skb_checksum_none_assert(skb);
2679}
2680
2681
2682/* gfar_process_frame() -- handle one incoming packet if skb
2683 * isn't NULL.  */
2684static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
2685                              int amount_pull)
2686{
2687        struct gfar_private *priv = netdev_priv(dev);
2688        struct rxfcb *fcb = NULL;
2689
2690        int ret;
2691
2692        /* fcb is at the beginning if exists */
2693        fcb = (struct rxfcb *)skb->data;
2694
2695        /* Remove the FCB from the skb */
2696        /* Remove the padded bytes, if there are any */
2697        if (amount_pull) {
2698                skb_record_rx_queue(skb, fcb->rq);
2699                skb_pull(skb, amount_pull);
2700        }
2701
2702        /* Get receive timestamp from the skb */
2703        if (priv->hwts_rx_en) {
2704                struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
2705                u64 *ns = (u64 *) skb->data;
2706                memset(shhwtstamps, 0, sizeof(*shhwtstamps));
2707                shhwtstamps->hwtstamp = ns_to_ktime(*ns);
2708        }
2709
2710        if (priv->padding)
2711                skb_pull(skb, priv->padding);
2712
2713        if (dev->features & NETIF_F_RXCSUM)
2714                gfar_rx_checksum(skb, fcb);
2715
2716        /* Tell the skb what kind of packet this is */
2717        skb->protocol = eth_type_trans(skb, dev);
2718
2719        /* Send the packet up the stack */
2720        if (unlikely(priv->vlgrp && (fcb->flags & RXFCB_VLN)))
2721                ret = vlan_hwaccel_receive_skb(skb, priv->vlgrp, fcb->vlctl);
2722        else
2723                ret = netif_receive_skb(skb);
2724
2725        if (NET_RX_DROP == ret)
2726                priv->extra_stats.kernel_dropped++;
2727
2728        return 0;
2729}
2730
2731/* gfar_clean_rx_ring() -- Processes each frame in the rx ring
2732 *   until the budget/quota has been reached. Returns the number
2733 *   of frames handled
2734 */
2735int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
2736{
2737        struct net_device *dev = rx_queue->dev;
2738        struct rxbd8 *bdp, *base;
2739        struct sk_buff *skb;
2740        int pkt_len;
2741        int amount_pull;
2742        int howmany = 0;
2743        struct gfar_private *priv = netdev_priv(dev);
2744
2745        /* Get the first full descriptor */
2746        bdp = rx_queue->cur_rx;
2747        base = rx_queue->rx_bd_base;
2748
2749        amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0);
2750
2751        while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
2752                struct sk_buff *newskb;
2753                rmb();
2754
2755                /* Add another skb for the future */
2756                newskb = gfar_new_skb(dev);
2757
2758                skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
2759
2760                dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
2761                                priv->rx_buffer_size, DMA_FROM_DEVICE);
2762
2763                if (unlikely(!(bdp->status & RXBD_ERR) &&
2764                                bdp->length > priv->rx_buffer_size))
2765                        bdp->status = RXBD_LARGE;
2766
2767                /* We drop the frame if we failed to allocate a new buffer */
2768                if (unlikely(!newskb || !(bdp->status & RXBD_LAST) ||
2769                                 bdp->status & RXBD_ERR)) {
2770                        count_errors(bdp->status, dev);
2771
2772                        if (unlikely(!newskb))
2773                                newskb = skb;
2774                        else if (skb)
2775                                skb_queue_head(&priv->rx_recycle, skb);
2776                } else {
2777                        /* Increment the number of packets */
2778                        rx_queue->stats.rx_packets++;
2779                        howmany++;
2780
2781                        if (likely(skb)) {
2782                                pkt_len = bdp->length - ETH_FCS_LEN;
2783                                /* Remove the FCS from the packet length */
2784                                skb_put(skb, pkt_len);
2785                                rx_queue->stats.rx_bytes += pkt_len;
2786                                skb_record_rx_queue(skb, rx_queue->qindex);
2787                                gfar_process_frame(dev, skb, amount_pull);
2788
2789                        } else {
2790                                if (netif_msg_rx_err(priv))
2791                                        printk(KERN_WARNING
2792                                               "%s: Missing skb!\n", dev->name);
2793                                rx_queue->stats.rx_dropped++;
2794                                priv->extra_stats.rx_skbmissing++;
2795                        }
2796
2797                }
2798
2799                rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb;
2800
2801                /* Setup the new bdp */
2802                gfar_new_rxbdp(rx_queue, bdp, newskb);
2803
2804                /* Update to the next pointer */
2805                bdp = next_bd(bdp, base, rx_queue->rx_ring_size);
2806
2807                /* update to point at the next skb */
2808                rx_queue->skb_currx =
2809                    (rx_queue->skb_currx + 1) &
2810                    RX_RING_MOD_MASK(rx_queue->rx_ring_size);
2811        }
2812
2813        /* Update the current rxbd pointer to be the next one */
2814        rx_queue->cur_rx = bdp;
2815
2816        return howmany;
2817}
2818
2819static int gfar_poll(struct napi_struct *napi, int budget)
2820{
2821        struct gfar_priv_grp *gfargrp = container_of(napi,
2822                        struct gfar_priv_grp, napi);
2823        struct gfar_private *priv = gfargrp->priv;
2824        struct gfar __iomem *regs = gfargrp->regs;
2825        struct gfar_priv_tx_q *tx_queue = NULL;
2826        struct gfar_priv_rx_q *rx_queue = NULL;
2827        int rx_cleaned = 0, budget_per_queue = 0, rx_cleaned_per_queue = 0;
2828        int tx_cleaned = 0, i, left_over_budget = budget;
2829        unsigned long serviced_queues = 0;
2830        int num_queues = 0;
2831
2832        num_queues = gfargrp->num_rx_queues;
2833        budget_per_queue = budget/num_queues;
2834
2835        /* Clear IEVENT, so interrupts aren't called again
2836         * because of the packets that have already arrived */
2837        gfar_write(&regs->ievent, IEVENT_RTX_MASK);
2838
2839        while (num_queues && left_over_budget) {
2840
2841                budget_per_queue = left_over_budget/num_queues;
2842                left_over_budget = 0;
2843
2844                for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
2845                        if (test_bit(i, &serviced_queues))
2846                                continue;
2847                        rx_queue = priv->rx_queue[i];
2848                        tx_queue = priv->tx_queue[rx_queue->qindex];
2849
2850                        tx_cleaned += gfar_clean_tx_ring(tx_queue);
2851                        rx_cleaned_per_queue = gfar_clean_rx_ring(rx_queue,
2852                                                        budget_per_queue);
2853                        rx_cleaned += rx_cleaned_per_queue;
2854                        if(rx_cleaned_per_queue < budget_per_queue) {
2855                                left_over_budget = left_over_budget +
2856                                        (budget_per_queue - rx_cleaned_per_queue);
2857                                set_bit(i, &serviced_queues);
2858                                num_queues--;
2859                        }
2860                }
2861        }
2862
2863        if (tx_cleaned)
2864                return budget;
2865
2866        if (rx_cleaned < budget) {
2867                napi_complete(napi);
2868
2869                /* Clear the halt bit in RSTAT */
2870                gfar_write(&regs->rstat, gfargrp->rstat);
2871
2872                gfar_write(&regs->imask, IMASK_DEFAULT);
2873
2874                /* If we are coalescing interrupts, update the timer */
2875                /* Otherwise, clear it */
2876                gfar_configure_coalescing(priv,
2877                                gfargrp->rx_bit_map, gfargrp->tx_bit_map);
2878        }
2879
2880        return rx_cleaned;
2881}
2882
2883#ifdef CONFIG_NET_POLL_CONTROLLER
2884/*
2885 * Polling 'interrupt' - used by things like netconsole to send skbs
2886 * without having to re-enable interrupts. It's not called while
2887 * the interrupt routine is executing.
2888 */
2889static void gfar_netpoll(struct net_device *dev)
2890{
2891        struct gfar_private *priv = netdev_priv(dev);
2892        int i = 0;
2893
2894        /* If the device has multiple interrupts, run tx/rx */
2895        if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
2896                for (i = 0; i < priv->num_grps; i++) {
2897                        disable_irq(priv->gfargrp[i].interruptTransmit);
2898                        disable_irq(priv->gfargrp[i].interruptReceive);
2899                        disable_irq(priv->gfargrp[i].interruptError);
2900                        gfar_interrupt(priv->gfargrp[i].interruptTransmit,
2901                                                &priv->gfargrp[i]);
2902                        enable_irq(priv->gfargrp[i].interruptError);
2903                        enable_irq(priv->gfargrp[i].interruptReceive);
2904                        enable_irq(priv->gfargrp[i].interruptTransmit);
2905                }
2906        } else {
2907                for (i = 0; i < priv->num_grps; i++) {
2908                        disable_irq(priv->gfargrp[i].interruptTransmit);
2909                        gfar_interrupt(priv->gfargrp[i].interruptTransmit,
2910                                                &priv->gfargrp[i]);
2911                        enable_irq(priv->gfargrp[i].interruptTransmit);
2912                }
2913        }
2914}
2915#endif
2916
2917/* The interrupt handler for devices with one interrupt */
2918static irqreturn_t gfar_interrupt(int irq, void *grp_id)
2919{
2920        struct gfar_priv_grp *gfargrp = grp_id;
2921
2922        /* Save ievent for future reference */
2923        u32 events = gfar_read(&gfargrp->regs->ievent);
2924
2925        /* Check for reception */
2926        if (events & IEVENT_RX_MASK)
2927                gfar_receive(irq, grp_id);
2928
2929        /* Check for transmit completion */
2930        if (events & IEVENT_TX_MASK)
2931                gfar_transmit(irq, grp_id);
2932
2933        /* Check for errors */
2934        if (events & IEVENT_ERR_MASK)
2935                gfar_error(irq, grp_id);
2936
2937        return IRQ_HANDLED;
2938}
2939
2940/* Called every time the controller might need to be made
2941 * aware of new link state.  The PHY code conveys this
2942 * information through variables in the phydev structure, and this
2943 * function converts those variables into the appropriate
2944 * register values, and can bring down the device if needed.
2945 */
2946static void adjust_link(struct net_device *dev)
2947{
2948        struct gfar_private *priv = netdev_priv(dev);
2949        struct gfar __iomem *regs = priv->gfargrp[0].regs;
2950        unsigned long flags;
2951        struct phy_device *phydev = priv->phydev;
2952        int new_state = 0;
2953
2954        local_irq_save(flags);
2955        lock_tx_qs(priv);
2956
2957        if (phydev->link) {
2958                u32 tempval = gfar_read(&regs->maccfg2);
2959                u32 ecntrl = gfar_read(&regs->ecntrl);
2960
2961                /* Now we make sure that we can be in full duplex mode.
2962                 * If not, we operate in half-duplex mode. */
2963                if (phydev->duplex != priv->oldduplex) {
2964                        new_state = 1;
2965                        if (!(phydev->duplex))
2966                                tempval &= ~(MACCFG2_FULL_DUPLEX);
2967                        else
2968                                tempval |= MACCFG2_FULL_DUPLEX;
2969
2970                        priv->oldduplex = phydev->duplex;
2971                }
2972
2973                if (phydev->speed != priv->oldspeed) {
2974                        new_state = 1;
2975                        switch (phydev->speed) {
2976                        case 1000:
2977                                tempval =
2978                                    ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
2979
2980                                ecntrl &= ~(ECNTRL_R100);
2981                                break;
2982                        case 100:
2983                        case 10:
2984                                tempval =
2985                                    ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
2986
2987                                /* Reduced mode distinguishes
2988                                 * between 10 and 100 */
2989                                if (phydev->speed == SPEED_100)
2990                                        ecntrl |= ECNTRL_R100;
2991                                else
2992                                        ecntrl &= ~(ECNTRL_R100);
2993                                break;
2994                        default:
2995                                if (netif_msg_link(priv))
2996                                        printk(KERN_WARNING
2997                                                "%s: Ack!  Speed (%d) is not 10/100/1000!\n",
2998                                                dev->name, phydev->speed);
2999                                break;
3000                        }
3001
3002                        priv->oldspeed = phydev->speed;
3003                }
3004
3005                gfar_write(&regs->maccfg2, tempval);
3006                gfar_write(&regs->ecntrl, ecntrl);
3007
3008                if (!priv->oldlink) {
3009                        new_state = 1;
3010                        priv->oldlink = 1;
3011                }
3012        } else if (priv->oldlink) {
3013                new_state = 1;
3014                priv->oldlink = 0;
3015                priv->oldspeed = 0;
3016                priv->oldduplex = -1;
3017        }
3018
3019        if (new_state && netif_msg_link(priv))
3020                phy_print_status(phydev);
3021        unlock_tx_qs(priv);
3022        local_irq_restore(flags);
3023}
3024
3025/* Update the hash table based on the current list of multicast
3026 * addresses we subscribe to.  Also, change the promiscuity of
3027 * the device based on the flags (this function is called
3028 * whenever dev->flags is changed */
3029static void gfar_set_multi(struct net_device *dev)
3030{
3031        struct netdev_hw_addr *ha;
3032        struct gfar_private *priv = netdev_priv(dev);
3033        struct gfar __iomem *regs = priv->gfargrp[0].regs;
3034        u32 tempval;
3035
3036        if (dev->flags & IFF_PROMISC) {
3037                /* Set RCTRL to PROM */
3038                tempval = gfar_read(&regs->rctrl);
3039                tempval |= RCTRL_PROM;
3040                gfar_write(&regs->rctrl, tempval);
3041        } else {
3042                /* Set RCTRL to not PROM */
3043                tempval = gfar_read(&regs->rctrl);
3044                tempval &= ~(RCTRL_PROM);
3045                gfar_write(&regs->rctrl, tempval);
3046        }
3047
3048        if (dev->flags & IFF_ALLMULTI) {
3049                /* Set the hash to rx all multicast frames */
3050                gfar_write(&regs->igaddr0, 0xffffffff);
3051                gfar_write(&regs->igaddr1, 0xffffffff);
3052                gfar_write(&regs->igaddr2, 0xffffffff);
3053                gfar_write(&regs->igaddr3, 0xffffffff);
3054                gfar_write(&regs->igaddr4, 0xffffffff);
3055                gfar_write(&regs->igaddr5, 0xffffffff);
3056                gfar_write(&regs->igaddr6, 0xffffffff);
3057                gfar_write(&regs->igaddr7, 0xffffffff);
3058                gfar_write(&regs->gaddr0, 0xffffffff);
3059                gfar_write(&regs->gaddr1, 0xffffffff);
3060                gfar_write(&regs->gaddr2, 0xffffffff);
3061                gfar_write(&regs->gaddr3, 0xffffffff);
3062                gfar_write(&regs->gaddr4, 0xffffffff);
3063                gfar_write(&regs->gaddr5, 0xffffffff);
3064                gfar_write(&regs->gaddr6, 0xffffffff);
3065                gfar_write(&regs->gaddr7, 0xffffffff);
3066        } else {
3067                int em_num;
3068                int idx;
3069
3070                /* zero out the hash */
3071                gfar_write(&regs->igaddr0, 0x0);
3072                gfar_write(&regs->igaddr1, 0x0);
3073                gfar_write(&regs->igaddr2, 0x0);
3074                gfar_write(&regs->igaddr3, 0x0);
3075                gfar_write(&regs->igaddr4, 0x0);
3076                gfar_write(&regs->igaddr5, 0x0);
3077                gfar_write(&regs->igaddr6, 0x0);
3078                gfar_write(&regs->igaddr7, 0x0);
3079                gfar_write(&regs->gaddr0, 0x0);
3080                gfar_write(&regs->gaddr1, 0x0);
3081                gfar_write(&regs->gaddr2, 0x0);
3082                gfar_write(&regs->gaddr3, 0x0);
3083                gfar_write(&regs->gaddr4, 0x0);
3084                gfar_write(&regs->gaddr5, 0x0);
3085                gfar_write(&regs->gaddr6, 0x0);
3086                gfar_write(&regs->gaddr7, 0x0);
3087
3088                /* If we have extended hash tables, we need to
3089                 * clear the exact match registers to prepare for
3090                 * setting them */
3091                if (priv->extended_hash) {
3092                        em_num = GFAR_EM_NUM + 1;
3093                        gfar_clear_exact_match(dev);
3094                        idx = 1;
3095                } else {
3096                        idx = 0;
3097                        em_num = 0;
3098                }
3099
3100                if (netdev_mc_empty(dev))
3101                        return;
3102
3103                /* Parse the list, and set the appropriate bits */
3104                netdev_for_each_mc_addr(ha, dev) {
3105                        if (idx < em_num) {
3106                                gfar_set_mac_for_addr(dev, idx, ha->addr);
3107                                idx++;
3108                        } else
3109                                gfar_set_hash_for_addr(dev, ha->addr);
3110                }
3111        }
3112}
3113
3114
3115/* Clears each of the exact match registers to zero, so they
3116 * don't interfere with normal reception */
3117static void gfar_clear_exact_match(struct net_device *dev)
3118{
3119        int idx;
3120        static const u8 zero_arr[MAC_ADDR_LEN] = {0, 0, 0, 0, 0, 0};
3121
3122        for(idx = 1;idx < GFAR_EM_NUM + 1;idx++)
3123                gfar_set_mac_for_addr(dev, idx, zero_arr);
3124}
3125
3126/* Set the appropriate hash bit for the given addr */
3127/* The algorithm works like so:
3128 * 1) Take the Destination Address (ie the multicast address), and
3129 * do a CRC on it (little endian), and reverse the bits of the
3130 * result.
3131 * 2) Use the 8 most significant bits as a hash into a 256-entry
3132 * table.  The table is controlled through 8 32-bit registers:
3133 * gaddr0-7.  gaddr0's MSB is entry 0, and gaddr7's LSB is
3134 * gaddr7.  This means that the 3 most significant bits in the
3135 * hash index which gaddr register to use, and the 5 other bits
3136 * indicate which bit (assuming an IBM numbering scheme, which
3137 * for PowerPC (tm) is usually the case) in the register holds
3138 * the entry. */
3139static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
3140{
3141        u32 tempval;
3142        struct gfar_private *priv = netdev_priv(dev);
3143        u32 result = ether_crc(MAC_ADDR_LEN, addr);
3144        int width = priv->hash_width;
3145        u8 whichbit = (result >> (32 - width)) & 0x1f;
3146        u8 whichreg = result >> (32 - width + 5);
3147        u32 value = (1 << (31-whichbit));
3148
3149        tempval = gfar_read(priv->hash_regs[whichreg]);
3150        tempval |= value;
3151        gfar_write(priv->hash_regs[whichreg], tempval);
3152}
3153
3154
3155/* There are multiple MAC Address register pairs on some controllers
3156 * This function sets the numth pair to a given address
3157 */
3158static void gfar_set_mac_for_addr(struct net_device *dev, int num,
3159                                  const u8 *addr)
3160{
3161        struct gfar_private *priv = netdev_priv(dev);
3162        struct gfar __iomem *regs = priv->gfargrp[0].regs;
3163        int idx;
3164        char tmpbuf[MAC_ADDR_LEN];
3165        u32 tempval;
3166        u32 __iomem *macptr = &regs->macstnaddr1;
3167
3168        macptr += num*2;
3169
3170        /* Now copy it into the mac registers backwards, cuz */
3171        /* little endian is silly */
3172        for (idx = 0; idx < MAC_ADDR_LEN; idx++)
3173                tmpbuf[MAC_ADDR_LEN - 1 - idx] = addr[idx];
3174
3175        gfar_write(macptr, *((u32 *) (tmpbuf)));
3176
3177        tempval = *((u32 *) (tmpbuf + 4));
3178
3179        gfar_write(macptr+1, tempval);
3180}
3181
3182/* GFAR error interrupt handler */
3183static irqreturn_t gfar_error(int irq, void *grp_id)
3184{
3185        struct gfar_priv_grp *gfargrp = grp_id;
3186        struct gfar __iomem *regs = gfargrp->regs;
3187        struct gfar_private *priv= gfargrp->priv;
3188        struct net_device *dev = priv->ndev;
3189
3190        /* Save ievent for future reference */
3191        u32 events = gfar_read(&regs->ievent);
3192
3193        /* Clear IEVENT */
3194        gfar_write(&regs->ievent, events & IEVENT_ERR_MASK);
3195
3196        /* Magic Packet is not an error. */
3197        if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
3198            (events & IEVENT_MAG))
3199                events &= ~IEVENT_MAG;
3200
3201        /* Hmm... */
3202        if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
3203                printk(KERN_DEBUG "%s: error interrupt (ievent=0x%08x imask=0x%08x)\n",
3204                       dev->name, events, gfar_read(&regs->imask));
3205
3206        /* Update the error counters */
3207        if (events & IEVENT_TXE) {
3208                dev->stats.tx_errors++;
3209
3210                if (events & IEVENT_LC)
3211                        dev->stats.tx_window_errors++;
3212                if (events & IEVENT_CRL)
3213                        dev->stats.tx_aborted_errors++;
3214                if (events & IEVENT_XFUN) {
3215                        unsigned long flags;
3216
3217                        if (netif_msg_tx_err(priv))
3218                                printk(KERN_DEBUG "%s: TX FIFO underrun, "
3219                                       "packet dropped.\n", dev->name);
3220                        dev->stats.tx_dropped++;
3221                        priv->extra_stats.tx_underrun++;
3222
3223                        local_irq_save(flags);
3224                        lock_tx_qs(priv);
3225
3226                        /* Reactivate the Tx Queues */
3227                        gfar_write(&regs->tstat, gfargrp->tstat);
3228
3229                        unlock_tx_qs(priv);
3230                        local_irq_restore(flags);
3231                }
3232                if (netif_msg_tx_err(priv))
3233                        printk(KERN_DEBUG "%s: Transmit Error\n", dev->name);
3234        }
3235        if (events & IEVENT_BSY) {
3236                dev->stats.rx_errors++;
3237                priv->extra_stats.rx_bsy++;
3238
3239                gfar_receive(irq, grp_id);
3240
3241                if (netif_msg_rx_err(priv))
3242                        printk(KERN_DEBUG "%s: busy error (rstat: %x)\n",
3243                               dev->name, gfar_read(&regs->rstat));
3244        }
3245        if (events & IEVENT_BABR) {
3246                dev->stats.rx_errors++;
3247                priv->extra_stats.rx_babr++;
3248
3249                if (netif_msg_rx_err(priv))
3250                        printk(KERN_DEBUG "%s: babbling RX error\n", dev->name);
3251        }
3252        if (events & IEVENT_EBERR) {
3253                priv->extra_stats.eberr++;
3254                if (netif_msg_rx_err(priv))
3255                        printk(KERN_DEBUG "%s: bus error\n", dev->name);
3256        }
3257        if ((events & IEVENT_RXC) && netif_msg_rx_status(priv))
3258                printk(KERN_DEBUG "%s: control frame\n", dev->name);
3259
3260        if (events & IEVENT_BABT) {
3261                priv->extra_stats.tx_babt++;
3262                if (netif_msg_tx_err(priv))
3263                        printk(KERN_DEBUG "%s: babbling TX error\n", dev->name);
3264        }
3265        return IRQ_HANDLED;
3266}
3267
3268static struct of_device_id gfar_match[] =
3269{
3270        {
3271                .type = "network",
3272                .compatible = "gianfar",
3273        },
3274        {
3275                .compatible = "fsl,etsec2",
3276        },
3277        {},
3278};
3279MODULE_DEVICE_TABLE(of, gfar_match);
3280
3281/* Structure for a device driver */
3282static struct platform_driver gfar_driver = {
3283        .driver = {
3284                .name = "fsl-gianfar",
3285                .owner = THIS_MODULE,
3286                .pm = GFAR_PM_OPS,
3287                .of_match_table = gfar_match,
3288        },
3289        .probe = gfar_probe,
3290        .remove = gfar_remove,
3291};
3292
3293static int __init gfar_init(void)
3294{
3295        return platform_driver_register(&gfar_driver);
3296}
3297
3298static void __exit gfar_exit(void)
3299{
3300        platform_driver_unregister(&gfar_driver);
3301}
3302
3303module_init(gfar_init);
3304module_exit(gfar_exit);
3305
3306