linux/drivers/net/ethernet/freescale/gianfar.c
<<
>>
Prefs
   1/* drivers/net/ethernet/freescale/gianfar.c
   2 *
   3 * Gianfar Ethernet Driver
   4 * This driver is designed for the non-CPM ethernet controllers
   5 * on the 85xx and 83xx family of integrated processors
   6 * Based on 8260_io/fcc_enet.c
   7 *
   8 * Author: Andy Fleming
   9 * Maintainer: Kumar Gala
  10 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
  11 *
  12 * Copyright 2002-2009, 2011 Freescale Semiconductor, Inc.
  13 * Copyright 2007 MontaVista Software, Inc.
  14 *
  15 * This program is free software; you can redistribute  it and/or modify it
  16 * under  the terms of  the GNU General  Public License as published by the
  17 * Free Software Foundation;  either version 2 of the  License, or (at your
  18 * option) any later version.
  19 *
  20 *  Gianfar:  AKA Lambda Draconis, "Dragon"
  21 *  RA 11 31 24.2
  22 *  Dec +69 19 52
  23 *  V 3.84
  24 *  B-V +1.62
  25 *
  26 *  Theory of operation
  27 *
  28 *  The driver is initialized through of_device. Configuration information
  29 *  is therefore conveyed through an OF-style device tree.
  30 *
  31 *  The Gianfar Ethernet Controller uses a ring of buffer
  32 *  descriptors.  The beginning is indicated by a register
  33 *  pointing to the physical address of the start of the ring.
  34 *  The end is determined by a "wrap" bit being set in the
  35 *  last descriptor of the ring.
  36 *
  37 *  When a packet is received, the RXF bit in the
  38 *  IEVENT register is set, triggering an interrupt when the
  39 *  corresponding bit in the IMASK register is also set (if
  40 *  interrupt coalescing is active, then the interrupt may not
  41 *  happen immediately, but will wait until either a set number
  42 *  of frames or amount of time have passed).  In NAPI, the
  43 *  interrupt handler will signal there is work to be done, and
  44 *  exit. This method will start at the last known empty
  45 *  descriptor, and process every subsequent descriptor until there
  46 *  are none left with data (NAPI will stop after a set number of
  47 *  packets to give time to other tasks, but will eventually
  48 *  process all the packets).  The data arrives inside a
  49 *  pre-allocated skb, and so after the skb is passed up to the
  50 *  stack, a new skb must be allocated, and the address field in
  51 *  the buffer descriptor must be updated to indicate this new
  52 *  skb.
  53 *
  54 *  When the kernel requests that a packet be transmitted, the
  55 *  driver starts where it left off last time, and points the
  56 *  descriptor at the buffer which was passed in.  The driver
  57 *  then informs the DMA engine that there are packets ready to
  58 *  be transmitted.  Once the controller is finished transmitting
  59 *  the packet, an interrupt may be triggered (under the same
  60 *  conditions as for reception, but depending on the TXF bit).
  61 *  The driver then cleans up the buffer.
  62 */
  63
  64#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  65#define DEBUG
  66
  67#include <linux/kernel.h>
  68#include <linux/string.h>
  69#include <linux/errno.h>
  70#include <linux/unistd.h>
  71#include <linux/slab.h>
  72#include <linux/interrupt.h>
  73#include <linux/init.h>
  74#include <linux/delay.h>
  75#include <linux/netdevice.h>
  76#include <linux/etherdevice.h>
  77#include <linux/skbuff.h>
  78#include <linux/if_vlan.h>
  79#include <linux/spinlock.h>
  80#include <linux/mm.h>
  81#include <linux/of_mdio.h>
  82#include <linux/of_platform.h>
  83#include <linux/ip.h>
  84#include <linux/tcp.h>
  85#include <linux/udp.h>
  86#include <linux/in.h>
  87#include <linux/net_tstamp.h>
  88
  89#include <asm/io.h>
  90#include <asm/reg.h>
  91#include <asm/irq.h>
  92#include <asm/uaccess.h>
  93#include <linux/module.h>
  94#include <linux/dma-mapping.h>
  95#include <linux/crc32.h>
  96#include <linux/mii.h>
  97#include <linux/phy.h>
  98#include <linux/phy_fixed.h>
  99#include <linux/of.h>
 100#include <linux/of_net.h>
 101
 102#include "gianfar.h"
 103#include "fsl_pq_mdio.h"
 104
 105#define TX_TIMEOUT      (1*HZ)
 106
 107const char gfar_driver_version[] = "1.3";
 108
 109static int gfar_enet_open(struct net_device *dev);
 110static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
 111static void gfar_reset_task(struct work_struct *work);
 112static void gfar_timeout(struct net_device *dev);
 113static int gfar_close(struct net_device *dev);
 114struct sk_buff *gfar_new_skb(struct net_device *dev);
 115static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
 116                           struct sk_buff *skb);
 117static int gfar_set_mac_address(struct net_device *dev);
 118static int gfar_change_mtu(struct net_device *dev, int new_mtu);
 119static irqreturn_t gfar_error(int irq, void *dev_id);
 120static irqreturn_t gfar_transmit(int irq, void *dev_id);
 121static irqreturn_t gfar_interrupt(int irq, void *dev_id);
 122static void adjust_link(struct net_device *dev);
 123static void init_registers(struct net_device *dev);
 124static int init_phy(struct net_device *dev);
 125static int gfar_probe(struct platform_device *ofdev);
 126static int gfar_remove(struct platform_device *ofdev);
 127static void free_skb_resources(struct gfar_private *priv);
 128static void gfar_set_multi(struct net_device *dev);
 129static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
 130static void gfar_configure_serdes(struct net_device *dev);
 131static int gfar_poll(struct napi_struct *napi, int budget);
 132#ifdef CONFIG_NET_POLL_CONTROLLER
 133static void gfar_netpoll(struct net_device *dev);
 134#endif
 135int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
 136static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
 137static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
 138                              int amount_pull, struct napi_struct *napi);
 139void gfar_halt(struct net_device *dev);
 140static void gfar_halt_nodisable(struct net_device *dev);
 141void gfar_start(struct net_device *dev);
 142static void gfar_clear_exact_match(struct net_device *dev);
 143static void gfar_set_mac_for_addr(struct net_device *dev, int num,
 144                                  const u8 *addr);
 145static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 146
 147MODULE_AUTHOR("Freescale Semiconductor, Inc");
 148MODULE_DESCRIPTION("Gianfar Ethernet Driver");
 149MODULE_LICENSE("GPL");
 150
 151static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
 152                            dma_addr_t buf)
 153{
 154        u32 lstatus;
 155
 156        bdp->bufPtr = buf;
 157
 158        lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
 159        if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
 160                lstatus |= BD_LFLAG(RXBD_WRAP);
 161
 162        eieio();
 163
 164        bdp->lstatus = lstatus;
 165}
 166
 167static int gfar_init_bds(struct net_device *ndev)
 168{
 169        struct gfar_private *priv = netdev_priv(ndev);
 170        struct gfar_priv_tx_q *tx_queue = NULL;
 171        struct gfar_priv_rx_q *rx_queue = NULL;
 172        struct txbd8 *txbdp;
 173        struct rxbd8 *rxbdp;
 174        int i, j;
 175
 176        for (i = 0; i < priv->num_tx_queues; i++) {
 177                tx_queue = priv->tx_queue[i];
 178                /* Initialize some variables in our dev structure */
 179                tx_queue->num_txbdfree = tx_queue->tx_ring_size;
 180                tx_queue->dirty_tx = tx_queue->tx_bd_base;
 181                tx_queue->cur_tx = tx_queue->tx_bd_base;
 182                tx_queue->skb_curtx = 0;
 183                tx_queue->skb_dirtytx = 0;
 184
 185                /* Initialize Transmit Descriptor Ring */
 186                txbdp = tx_queue->tx_bd_base;
 187                for (j = 0; j < tx_queue->tx_ring_size; j++) {
 188                        txbdp->lstatus = 0;
 189                        txbdp->bufPtr = 0;
 190                        txbdp++;
 191                }
 192
 193                /* Set the last descriptor in the ring to indicate wrap */
 194                txbdp--;
 195                txbdp->status |= TXBD_WRAP;
 196        }
 197
 198        for (i = 0; i < priv->num_rx_queues; i++) {
 199                rx_queue = priv->rx_queue[i];
 200                rx_queue->cur_rx = rx_queue->rx_bd_base;
 201                rx_queue->skb_currx = 0;
 202                rxbdp = rx_queue->rx_bd_base;
 203
 204                for (j = 0; j < rx_queue->rx_ring_size; j++) {
 205                        struct sk_buff *skb = rx_queue->rx_skbuff[j];
 206
 207                        if (skb) {
 208                                gfar_init_rxbdp(rx_queue, rxbdp,
 209                                                rxbdp->bufPtr);
 210                        } else {
 211                                skb = gfar_new_skb(ndev);
 212                                if (!skb) {
 213                                        netdev_err(ndev, "Can't allocate RX buffers\n");
 214                                        goto err_rxalloc_fail;
 215                                }
 216                                rx_queue->rx_skbuff[j] = skb;
 217
 218                                gfar_new_rxbdp(rx_queue, rxbdp, skb);
 219                        }
 220
 221                        rxbdp++;
 222                }
 223
 224        }
 225
 226        return 0;
 227
 228err_rxalloc_fail:
 229        free_skb_resources(priv);
 230        return -ENOMEM;
 231}
 232
 233static int gfar_alloc_skb_resources(struct net_device *ndev)
 234{
 235        void *vaddr;
 236        dma_addr_t addr;
 237        int i, j, k;
 238        struct gfar_private *priv = netdev_priv(ndev);
 239        struct device *dev = &priv->ofdev->dev;
 240        struct gfar_priv_tx_q *tx_queue = NULL;
 241        struct gfar_priv_rx_q *rx_queue = NULL;
 242
 243        priv->total_tx_ring_size = 0;
 244        for (i = 0; i < priv->num_tx_queues; i++)
 245                priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;
 246
 247        priv->total_rx_ring_size = 0;
 248        for (i = 0; i < priv->num_rx_queues; i++)
 249                priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
 250
 251        /* Allocate memory for the buffer descriptors */
 252        vaddr = dma_alloc_coherent(dev,
 253                        sizeof(struct txbd8) * priv->total_tx_ring_size +
 254                        sizeof(struct rxbd8) * priv->total_rx_ring_size,
 255                        &addr, GFP_KERNEL);
 256        if (!vaddr) {
 257                netif_err(priv, ifup, ndev,
 258                          "Could not allocate buffer descriptors!\n");
 259                return -ENOMEM;
 260        }
 261
 262        for (i = 0; i < priv->num_tx_queues; i++) {
 263                tx_queue = priv->tx_queue[i];
 264                tx_queue->tx_bd_base = vaddr;
 265                tx_queue->tx_bd_dma_base = addr;
 266                tx_queue->dev = ndev;
 267                /* enet DMA only understands physical addresses */
 268                addr  += sizeof(struct txbd8) * tx_queue->tx_ring_size;
 269                vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
 270        }
 271
 272        /* Start the rx descriptor ring where the tx ring leaves off */
 273        for (i = 0; i < priv->num_rx_queues; i++) {
 274                rx_queue = priv->rx_queue[i];
 275                rx_queue->rx_bd_base = vaddr;
 276                rx_queue->rx_bd_dma_base = addr;
 277                rx_queue->dev = ndev;
 278                addr  += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
 279                vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
 280        }
 281
 282        /* Setup the skbuff rings */
 283        for (i = 0; i < priv->num_tx_queues; i++) {
 284                tx_queue = priv->tx_queue[i];
 285                tx_queue->tx_skbuff = kmalloc(sizeof(*tx_queue->tx_skbuff) *
 286                                              tx_queue->tx_ring_size,
 287                                              GFP_KERNEL);
 288                if (!tx_queue->tx_skbuff) {
 289                        netif_err(priv, ifup, ndev,
 290                                  "Could not allocate tx_skbuff\n");
 291                        goto cleanup;
 292                }
 293
 294                for (k = 0; k < tx_queue->tx_ring_size; k++)
 295                        tx_queue->tx_skbuff[k] = NULL;
 296        }
 297
 298        for (i = 0; i < priv->num_rx_queues; i++) {
 299                rx_queue = priv->rx_queue[i];
 300                rx_queue->rx_skbuff = kmalloc(sizeof(*rx_queue->rx_skbuff) *
 301                                              rx_queue->rx_ring_size,
 302                                              GFP_KERNEL);
 303
 304                if (!rx_queue->rx_skbuff) {
 305                        netif_err(priv, ifup, ndev,
 306                                  "Could not allocate rx_skbuff\n");
 307                        goto cleanup;
 308                }
 309
 310                for (j = 0; j < rx_queue->rx_ring_size; j++)
 311                        rx_queue->rx_skbuff[j] = NULL;
 312        }
 313
 314        if (gfar_init_bds(ndev))
 315                goto cleanup;
 316
 317        return 0;
 318
 319cleanup:
 320        free_skb_resources(priv);
 321        return -ENOMEM;
 322}
 323
 324static void gfar_init_tx_rx_base(struct gfar_private *priv)
 325{
 326        struct gfar __iomem *regs = priv->gfargrp[0].regs;
 327        u32 __iomem *baddr;
 328        int i;
 329
 330        baddr = &regs->tbase0;
 331        for (i = 0; i < priv->num_tx_queues; i++) {
 332                gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
 333                baddr += 2;
 334        }
 335
 336        baddr = &regs->rbase0;
 337        for (i = 0; i < priv->num_rx_queues; i++) {
 338                gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
 339                baddr += 2;
 340        }
 341}
 342
 343static void gfar_init_mac(struct net_device *ndev)
 344{
 345        struct gfar_private *priv = netdev_priv(ndev);
 346        struct gfar __iomem *regs = priv->gfargrp[0].regs;
 347        u32 rctrl = 0;
 348        u32 tctrl = 0;
 349        u32 attrs = 0;
 350
 351        /* write the tx/rx base registers */
 352        gfar_init_tx_rx_base(priv);
 353
 354        /* Configure the coalescing support */
 355        gfar_configure_coalescing(priv, 0xFF, 0xFF);
 356
 357        if (priv->rx_filer_enable) {
 358                rctrl |= RCTRL_FILREN;
 359                /* Program the RIR0 reg with the required distribution */
 360                gfar_write(&regs->rir0, DEFAULT_RIR0);
 361        }
 362
 363        if (ndev->features & NETIF_F_RXCSUM)
 364                rctrl |= RCTRL_CHECKSUMMING;
 365
 366        if (priv->extended_hash) {
 367                rctrl |= RCTRL_EXTHASH;
 368
 369                gfar_clear_exact_match(ndev);
 370                rctrl |= RCTRL_EMEN;
 371        }
 372
 373        if (priv->padding) {
 374                rctrl &= ~RCTRL_PAL_MASK;
 375                rctrl |= RCTRL_PADDING(priv->padding);
 376        }
 377
 378        /* Insert receive time stamps into padding alignment bytes */
 379        if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) {
 380                rctrl &= ~RCTRL_PAL_MASK;
 381                rctrl |= RCTRL_PADDING(8);
 382                priv->padding = 8;
 383        }
 384
 385        /* Enable HW time stamping if requested from user space */
 386        if (priv->hwts_rx_en)
 387                rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;
 388
 389        if (ndev->features & NETIF_F_HW_VLAN_RX)
 390                rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
 391
 392        /* Init rctrl based on our settings */
 393        gfar_write(&regs->rctrl, rctrl);
 394
 395        if (ndev->features & NETIF_F_IP_CSUM)
 396                tctrl |= TCTRL_INIT_CSUM;
 397
 398        tctrl |= TCTRL_TXSCHED_PRIO;
 399
 400        gfar_write(&regs->tctrl, tctrl);
 401
 402        /* Set the extraction length and index */
 403        attrs = ATTRELI_EL(priv->rx_stash_size) |
 404                ATTRELI_EI(priv->rx_stash_index);
 405
 406        gfar_write(&regs->attreli, attrs);
 407
 408        /* Start with defaults, and add stashing or locking
 409         * depending on the approprate variables
 410         */
 411        attrs = ATTR_INIT_SETTINGS;
 412
 413        if (priv->bd_stash_en)
 414                attrs |= ATTR_BDSTASH;
 415
 416        if (priv->rx_stash_size != 0)
 417                attrs |= ATTR_BUFSTASH;
 418
 419        gfar_write(&regs->attr, attrs);
 420
 421        gfar_write(&regs->fifo_tx_thr, priv->fifo_threshold);
 422        gfar_write(&regs->fifo_tx_starve, priv->fifo_starve);
 423        gfar_write(&regs->fifo_tx_starve_shutoff, priv->fifo_starve_off);
 424}
 425
 426static struct net_device_stats *gfar_get_stats(struct net_device *dev)
 427{
 428        struct gfar_private *priv = netdev_priv(dev);
 429        unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
 430        unsigned long tx_packets = 0, tx_bytes = 0;
 431        int i;
 432
 433        for (i = 0; i < priv->num_rx_queues; i++) {
 434                rx_packets += priv->rx_queue[i]->stats.rx_packets;
 435                rx_bytes   += priv->rx_queue[i]->stats.rx_bytes;
 436                rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
 437        }
 438
 439        dev->stats.rx_packets = rx_packets;
 440        dev->stats.rx_bytes   = rx_bytes;
 441        dev->stats.rx_dropped = rx_dropped;
 442
 443        for (i = 0; i < priv->num_tx_queues; i++) {
 444                tx_bytes += priv->tx_queue[i]->stats.tx_bytes;
 445                tx_packets += priv->tx_queue[i]->stats.tx_packets;
 446        }
 447
 448        dev->stats.tx_bytes   = tx_bytes;
 449        dev->stats.tx_packets = tx_packets;
 450
 451        return &dev->stats;
 452}
 453
 454static const struct net_device_ops gfar_netdev_ops = {
 455        .ndo_open = gfar_enet_open,
 456        .ndo_start_xmit = gfar_start_xmit,
 457        .ndo_stop = gfar_close,
 458        .ndo_change_mtu = gfar_change_mtu,
 459        .ndo_set_features = gfar_set_features,
 460        .ndo_set_rx_mode = gfar_set_multi,
 461        .ndo_tx_timeout = gfar_timeout,
 462        .ndo_do_ioctl = gfar_ioctl,
 463        .ndo_get_stats = gfar_get_stats,
 464        .ndo_set_mac_address = eth_mac_addr,
 465        .ndo_validate_addr = eth_validate_addr,
 466#ifdef CONFIG_NET_POLL_CONTROLLER
 467        .ndo_poll_controller = gfar_netpoll,
 468#endif
 469};
 470
 471void lock_rx_qs(struct gfar_private *priv)
 472{
 473        int i;
 474
 475        for (i = 0; i < priv->num_rx_queues; i++)
 476                spin_lock(&priv->rx_queue[i]->rxlock);
 477}
 478
 479void lock_tx_qs(struct gfar_private *priv)
 480{
 481        int i;
 482
 483        for (i = 0; i < priv->num_tx_queues; i++)
 484                spin_lock(&priv->tx_queue[i]->txlock);
 485}
 486
 487void unlock_rx_qs(struct gfar_private *priv)
 488{
 489        int i;
 490
 491        for (i = 0; i < priv->num_rx_queues; i++)
 492                spin_unlock(&priv->rx_queue[i]->rxlock);
 493}
 494
 495void unlock_tx_qs(struct gfar_private *priv)
 496{
 497        int i;
 498
 499        for (i = 0; i < priv->num_tx_queues; i++)
 500                spin_unlock(&priv->tx_queue[i]->txlock);
 501}
 502
 503static bool gfar_is_vlan_on(struct gfar_private *priv)
 504{
 505        return (priv->ndev->features & NETIF_F_HW_VLAN_RX) ||
 506               (priv->ndev->features & NETIF_F_HW_VLAN_TX);
 507}
 508
 509/* Returns 1 if incoming frames use an FCB */
 510static inline int gfar_uses_fcb(struct gfar_private *priv)
 511{
 512        return gfar_is_vlan_on(priv) ||
 513               (priv->ndev->features & NETIF_F_RXCSUM) ||
 514               (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER);
 515}
 516
 517static void free_tx_pointers(struct gfar_private *priv)
 518{
 519        int i;
 520
 521        for (i = 0; i < priv->num_tx_queues; i++)
 522                kfree(priv->tx_queue[i]);
 523}
 524
 525static void free_rx_pointers(struct gfar_private *priv)
 526{
 527        int i;
 528
 529        for (i = 0; i < priv->num_rx_queues; i++)
 530                kfree(priv->rx_queue[i]);
 531}
 532
 533static void unmap_group_regs(struct gfar_private *priv)
 534{
 535        int i;
 536
 537        for (i = 0; i < MAXGROUPS; i++)
 538                if (priv->gfargrp[i].regs)
 539                        iounmap(priv->gfargrp[i].regs);
 540}
 541
 542static void disable_napi(struct gfar_private *priv)
 543{
 544        int i;
 545
 546        for (i = 0; i < priv->num_grps; i++)
 547                napi_disable(&priv->gfargrp[i].napi);
 548}
 549
 550static void enable_napi(struct gfar_private *priv)
 551{
 552        int i;
 553
 554        for (i = 0; i < priv->num_grps; i++)
 555                napi_enable(&priv->gfargrp[i].napi);
 556}
 557
 558static int gfar_parse_group(struct device_node *np,
 559                            struct gfar_private *priv, const char *model)
 560{
 561        u32 *queue_mask;
 562
 563        priv->gfargrp[priv->num_grps].regs = of_iomap(np, 0);
 564        if (!priv->gfargrp[priv->num_grps].regs)
 565                return -ENOMEM;
 566
 567        priv->gfargrp[priv->num_grps].interruptTransmit =
 568                        irq_of_parse_and_map(np, 0);
 569
 570        /* If we aren't the FEC we have multiple interrupts */
 571        if (model && strcasecmp(model, "FEC")) {
 572                priv->gfargrp[priv->num_grps].interruptReceive =
 573                        irq_of_parse_and_map(np, 1);
 574                priv->gfargrp[priv->num_grps].interruptError =
 575                        irq_of_parse_and_map(np,2);
 576                if (priv->gfargrp[priv->num_grps].interruptTransmit == NO_IRQ ||
 577                    priv->gfargrp[priv->num_grps].interruptReceive  == NO_IRQ ||
 578                    priv->gfargrp[priv->num_grps].interruptError    == NO_IRQ)
 579                        return -EINVAL;
 580        }
 581
 582        priv->gfargrp[priv->num_grps].grp_id = priv->num_grps;
 583        priv->gfargrp[priv->num_grps].priv = priv;
 584        spin_lock_init(&priv->gfargrp[priv->num_grps].grplock);
 585        if (priv->mode == MQ_MG_MODE) {
 586                queue_mask = (u32 *)of_get_property(np, "fsl,rx-bit-map", NULL);
 587                priv->gfargrp[priv->num_grps].rx_bit_map = queue_mask ?
 588                        *queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
 589                queue_mask = (u32 *)of_get_property(np, "fsl,tx-bit-map", NULL);
 590                priv->gfargrp[priv->num_grps].tx_bit_map = queue_mask ?
 591                        *queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
 592        } else {
 593                priv->gfargrp[priv->num_grps].rx_bit_map = 0xFF;
 594                priv->gfargrp[priv->num_grps].tx_bit_map = 0xFF;
 595        }
 596        priv->num_grps++;
 597
 598        return 0;
 599}
 600
 601static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
 602{
 603        const char *model;
 604        const char *ctype;
 605        const void *mac_addr;
 606        int err = 0, i;
 607        struct net_device *dev = NULL;
 608        struct gfar_private *priv = NULL;
 609        struct device_node *np = ofdev->dev.of_node;
 610        struct device_node *child = NULL;
 611        const u32 *stash;
 612        const u32 *stash_len;
 613        const u32 *stash_idx;
 614        unsigned int num_tx_qs, num_rx_qs;
 615        u32 *tx_queues, *rx_queues;
 616
 617        if (!np || !of_device_is_available(np))
 618                return -ENODEV;
 619
 620        /* parse the num of tx and rx queues */
 621        tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL);
 622        num_tx_qs = tx_queues ? *tx_queues : 1;
 623
 624        if (num_tx_qs > MAX_TX_QS) {
 625                pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
 626                       num_tx_qs, MAX_TX_QS);
 627                pr_err("Cannot do alloc_etherdev, aborting\n");
 628                return -EINVAL;
 629        }
 630
 631        rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL);
 632        num_rx_qs = rx_queues ? *rx_queues : 1;
 633
 634        if (num_rx_qs > MAX_RX_QS) {
 635                pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
 636                       num_rx_qs, MAX_RX_QS);
 637                pr_err("Cannot do alloc_etherdev, aborting\n");
 638                return -EINVAL;
 639        }
 640
 641        *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);
 642        dev = *pdev;
 643        if (NULL == dev)
 644                return -ENOMEM;
 645
 646        priv = netdev_priv(dev);
 647        priv->node = ofdev->dev.of_node;
 648        priv->ndev = dev;
 649
 650        priv->num_tx_queues = num_tx_qs;
 651        netif_set_real_num_rx_queues(dev, num_rx_qs);
 652        priv->num_rx_queues = num_rx_qs;
 653        priv->num_grps = 0x0;
 654
 655        /* Init Rx queue filer rule set linked list */
 656        INIT_LIST_HEAD(&priv->rx_list.list);
 657        priv->rx_list.count = 0;
 658        mutex_init(&priv->rx_queue_access);
 659
 660        model = of_get_property(np, "model", NULL);
 661
 662        for (i = 0; i < MAXGROUPS; i++)
 663                priv->gfargrp[i].regs = NULL;
 664
 665        /* Parse and initialize group specific information */
 666        if (of_device_is_compatible(np, "fsl,etsec2")) {
 667                priv->mode = MQ_MG_MODE;
 668                for_each_child_of_node(np, child) {
 669                        err = gfar_parse_group(child, priv, model);
 670                        if (err)
 671                                goto err_grp_init;
 672                }
 673        } else {
 674                priv->mode = SQ_SG_MODE;
 675                err = gfar_parse_group(np, priv, model);
 676                if (err)
 677                        goto err_grp_init;
 678        }
 679
 680        for (i = 0; i < priv->num_tx_queues; i++)
 681               priv->tx_queue[i] = NULL;
 682        for (i = 0; i < priv->num_rx_queues; i++)
 683                priv->rx_queue[i] = NULL;
 684
 685        for (i = 0; i < priv->num_tx_queues; i++) {
 686                priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
 687                                            GFP_KERNEL);
 688                if (!priv->tx_queue[i]) {
 689                        err = -ENOMEM;
 690                        goto tx_alloc_failed;
 691                }
 692                priv->tx_queue[i]->tx_skbuff = NULL;
 693                priv->tx_queue[i]->qindex = i;
 694                priv->tx_queue[i]->dev = dev;
 695                spin_lock_init(&(priv->tx_queue[i]->txlock));
 696        }
 697
 698        for (i = 0; i < priv->num_rx_queues; i++) {
 699                priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
 700                                            GFP_KERNEL);
 701                if (!priv->rx_queue[i]) {
 702                        err = -ENOMEM;
 703                        goto rx_alloc_failed;
 704                }
 705                priv->rx_queue[i]->rx_skbuff = NULL;
 706                priv->rx_queue[i]->qindex = i;
 707                priv->rx_queue[i]->dev = dev;
 708                spin_lock_init(&(priv->rx_queue[i]->rxlock));
 709        }
 710
 711
 712        stash = of_get_property(np, "bd-stash", NULL);
 713
 714        if (stash) {
 715                priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
 716                priv->bd_stash_en = 1;
 717        }
 718
 719        stash_len = of_get_property(np, "rx-stash-len", NULL);
 720
 721        if (stash_len)
 722                priv->rx_stash_size = *stash_len;
 723
 724        stash_idx = of_get_property(np, "rx-stash-idx", NULL);
 725
 726        if (stash_idx)
 727                priv->rx_stash_index = *stash_idx;
 728
 729        if (stash_len || stash_idx)
 730                priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
 731
 732        mac_addr = of_get_mac_address(np);
 733
 734        if (mac_addr)
 735                memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
 736
 737        if (model && !strcasecmp(model, "TSEC"))
 738                priv->device_flags = FSL_GIANFAR_DEV_HAS_GIGABIT |
 739                                     FSL_GIANFAR_DEV_HAS_COALESCE |
 740                                     FSL_GIANFAR_DEV_HAS_RMON |
 741                                     FSL_GIANFAR_DEV_HAS_MULTI_INTR;
 742
 743        if (model && !strcasecmp(model, "eTSEC"))
 744                priv->device_flags = FSL_GIANFAR_DEV_HAS_GIGABIT |
 745                                     FSL_GIANFAR_DEV_HAS_COALESCE |
 746                                     FSL_GIANFAR_DEV_HAS_RMON |
 747                                     FSL_GIANFAR_DEV_HAS_MULTI_INTR |
 748                                     FSL_GIANFAR_DEV_HAS_PADDING |
 749                                     FSL_GIANFAR_DEV_HAS_CSUM |
 750                                     FSL_GIANFAR_DEV_HAS_VLAN |
 751                                     FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
 752                                     FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
 753                                     FSL_GIANFAR_DEV_HAS_TIMER;
 754
 755        ctype = of_get_property(np, "phy-connection-type", NULL);
 756
 757        /* We only care about rgmii-id.  The rest are autodetected */
 758        if (ctype && !strcmp(ctype, "rgmii-id"))
 759                priv->interface = PHY_INTERFACE_MODE_RGMII_ID;
 760        else
 761                priv->interface = PHY_INTERFACE_MODE_MII;
 762
 763        if (of_get_property(np, "fsl,magic-packet", NULL))
 764                priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
 765
 766        priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
 767
 768        /* Find the TBI PHY.  If it's not there, we don't support SGMII */
 769        priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
 770
 771        return 0;
 772
 773rx_alloc_failed:
 774        free_rx_pointers(priv);
 775tx_alloc_failed:
 776        free_tx_pointers(priv);
 777err_grp_init:
 778        unmap_group_regs(priv);
 779        free_netdev(dev);
 780        return err;
 781}
 782
 783static int gfar_hwtstamp_ioctl(struct net_device *netdev,
 784                               struct ifreq *ifr, int cmd)
 785{
 786        struct hwtstamp_config config;
 787        struct gfar_private *priv = netdev_priv(netdev);
 788
 789        if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
 790                return -EFAULT;
 791
 792        /* reserved for future extensions */
 793        if (config.flags)
 794                return -EINVAL;
 795
 796        switch (config.tx_type) {
 797        case HWTSTAMP_TX_OFF:
 798                priv->hwts_tx_en = 0;
 799                break;
 800        case HWTSTAMP_TX_ON:
 801                if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
 802                        return -ERANGE;
 803                priv->hwts_tx_en = 1;
 804                break;
 805        default:
 806                return -ERANGE;
 807        }
 808
 809        switch (config.rx_filter) {
 810        case HWTSTAMP_FILTER_NONE:
 811                if (priv->hwts_rx_en) {
 812                        stop_gfar(netdev);
 813                        priv->hwts_rx_en = 0;
 814                        startup_gfar(netdev);
 815                }
 816                break;
 817        default:
 818                if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
 819                        return -ERANGE;
 820                if (!priv->hwts_rx_en) {
 821                        stop_gfar(netdev);
 822                        priv->hwts_rx_en = 1;
 823                        startup_gfar(netdev);
 824                }
 825                config.rx_filter = HWTSTAMP_FILTER_ALL;
 826                break;
 827        }
 828
 829        return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
 830                -EFAULT : 0;
 831}
 832
 833/* Ioctl MII Interface */
 834static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 835{
 836        struct gfar_private *priv = netdev_priv(dev);
 837
 838        if (!netif_running(dev))
 839                return -EINVAL;
 840
 841        if (cmd == SIOCSHWTSTAMP)
 842                return gfar_hwtstamp_ioctl(dev, rq, cmd);
 843
 844        if (!priv->phydev)
 845                return -ENODEV;
 846
 847        return phy_mii_ioctl(priv->phydev, rq, cmd);
 848}
 849
 850static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs)
 851{
 852        unsigned int new_bit_map = 0x0;
 853        int mask = 0x1 << (max_qs - 1), i;
 854
 855        for (i = 0; i < max_qs; i++) {
 856                if (bit_map & mask)
 857                        new_bit_map = new_bit_map + (1 << i);
 858                mask = mask >> 0x1;
 859        }
 860        return new_bit_map;
 861}
 862
 863static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
 864                                   u32 class)
 865{
 866        u32 rqfpr = FPR_FILER_MASK;
 867        u32 rqfcr = 0x0;
 868
 869        rqfar--;
 870        rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
 871        priv->ftp_rqfpr[rqfar] = rqfpr;
 872        priv->ftp_rqfcr[rqfar] = rqfcr;
 873        gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
 874
 875        rqfar--;
 876        rqfcr = RQFCR_CMP_NOMATCH;
 877        priv->ftp_rqfpr[rqfar] = rqfpr;
 878        priv->ftp_rqfcr[rqfar] = rqfcr;
 879        gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
 880
 881        rqfar--;
 882        rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
 883        rqfpr = class;
 884        priv->ftp_rqfcr[rqfar] = rqfcr;
 885        priv->ftp_rqfpr[rqfar] = rqfpr;
 886        gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
 887
 888        rqfar--;
 889        rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
 890        rqfpr = class;
 891        priv->ftp_rqfcr[rqfar] = rqfcr;
 892        priv->ftp_rqfpr[rqfar] = rqfpr;
 893        gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
 894
 895        return rqfar;
 896}
 897
 898static void gfar_init_filer_table(struct gfar_private *priv)
 899{
 900        int i = 0x0;
 901        u32 rqfar = MAX_FILER_IDX;
 902        u32 rqfcr = 0x0;
 903        u32 rqfpr = FPR_FILER_MASK;
 904
 905        /* Default rule */
 906        rqfcr = RQFCR_CMP_MATCH;
 907        priv->ftp_rqfcr[rqfar] = rqfcr;
 908        priv->ftp_rqfpr[rqfar] = rqfpr;
 909        gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
 910
 911        rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
 912        rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP);
 913        rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP);
 914        rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4);
 915        rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP);
 916        rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP);
 917
 918        /* cur_filer_idx indicated the first non-masked rule */
 919        priv->cur_filer_idx = rqfar;
 920
 921        /* Rest are masked rules */
 922        rqfcr = RQFCR_CMP_NOMATCH;
 923        for (i = 0; i < rqfar; i++) {
 924                priv->ftp_rqfcr[i] = rqfcr;
 925                priv->ftp_rqfpr[i] = rqfpr;
 926                gfar_write_filer(priv, i, rqfcr, rqfpr);
 927        }
 928}
 929
 930static void gfar_detect_errata(struct gfar_private *priv)
 931{
 932        struct device *dev = &priv->ofdev->dev;
 933        unsigned int pvr = mfspr(SPRN_PVR);
 934        unsigned int svr = mfspr(SPRN_SVR);
 935        unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */
 936        unsigned int rev = svr & 0xffff;
 937
 938        /* MPC8313 Rev 2.0 and higher; All MPC837x */
 939        if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) ||
 940            (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
 941                priv->errata |= GFAR_ERRATA_74;
 942
 943        /* MPC8313 and MPC837x all rev */
 944        if ((pvr == 0x80850010 && mod == 0x80b0) ||
 945            (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
 946                priv->errata |= GFAR_ERRATA_76;
 947
 948        /* MPC8313 and MPC837x all rev */
 949        if ((pvr == 0x80850010 && mod == 0x80b0) ||
 950            (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
 951                priv->errata |= GFAR_ERRATA_A002;
 952
 953        /* MPC8313 Rev < 2.0, MPC8548 rev 2.0 */
 954        if ((pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020) ||
 955            (pvr == 0x80210020 && mod == 0x8030 && rev == 0x0020))
 956                priv->errata |= GFAR_ERRATA_12;
 957
 958        if (priv->errata)
 959                dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
 960                         priv->errata);
 961}
 962
 963/* Set up the ethernet device structure, private data,
 964 * and anything else we need before we start
 965 */
 966static int gfar_probe(struct platform_device *ofdev)
 967{
 968        u32 tempval;
 969        struct net_device *dev = NULL;
 970        struct gfar_private *priv = NULL;
 971        struct gfar __iomem *regs = NULL;
 972        int err = 0, i, grp_idx = 0;
 973        u32 rstat = 0, tstat = 0, rqueue = 0, tqueue = 0;
 974        u32 isrg = 0;
 975        u32 __iomem *baddr;
 976
 977        err = gfar_of_init(ofdev, &dev);
 978
 979        if (err)
 980                return err;
 981
 982        priv = netdev_priv(dev);
 983        priv->ndev = dev;
 984        priv->ofdev = ofdev;
 985        priv->node = ofdev->dev.of_node;
 986        SET_NETDEV_DEV(dev, &ofdev->dev);
 987
 988        spin_lock_init(&priv->bflock);
 989        INIT_WORK(&priv->reset_task, gfar_reset_task);
 990
 991        dev_set_drvdata(&ofdev->dev, priv);
 992        regs = priv->gfargrp[0].regs;
 993
 994        gfar_detect_errata(priv);
 995
 996        /* Stop the DMA engine now, in case it was running before
 997         * (The firmware could have used it, and left it running).
 998         */
 999        gfar_halt(dev);
1000
1001        /* Reset MAC layer */
1002        gfar_write(&regs->maccfg1, MACCFG1_SOFT_RESET);
1003
1004        /* We need to delay at least 3 TX clocks */
1005        udelay(2);
1006
1007        tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
1008        gfar_write(&regs->maccfg1, tempval);
1009
1010        /* Initialize MACCFG2. */
1011        tempval = MACCFG2_INIT_SETTINGS;
1012        if (gfar_has_errata(priv, GFAR_ERRATA_74))
1013                tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
1014        gfar_write(&regs->maccfg2, tempval);
1015
1016        /* Initialize ECNTRL */
1017        gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS);
1018
1019        /* Set the dev->base_addr to the gfar reg region */
1020        dev->base_addr = (unsigned long) regs;
1021
1022        SET_NETDEV_DEV(dev, &ofdev->dev);
1023
1024        /* Fill in the dev structure */
1025        dev->watchdog_timeo = TX_TIMEOUT;
1026        dev->mtu = 1500;
1027        dev->netdev_ops = &gfar_netdev_ops;
1028        dev->ethtool_ops = &gfar_ethtool_ops;
1029
1030        /* Register for napi ...We are registering NAPI for each grp */
1031        for (i = 0; i < priv->num_grps; i++)
1032                netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll,
1033                               GFAR_DEV_WEIGHT);
1034
1035        if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
1036                dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
1037                                   NETIF_F_RXCSUM;
1038                dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
1039                                 NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
1040        }
1041
1042        if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
1043                dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1044                dev->features |= NETIF_F_HW_VLAN_RX;
1045        }
1046
1047        if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
1048                priv->extended_hash = 1;
1049                priv->hash_width = 9;
1050
1051                priv->hash_regs[0] = &regs->igaddr0;
1052                priv->hash_regs[1] = &regs->igaddr1;
1053                priv->hash_regs[2] = &regs->igaddr2;
1054                priv->hash_regs[3] = &regs->igaddr3;
1055                priv->hash_regs[4] = &regs->igaddr4;
1056                priv->hash_regs[5] = &regs->igaddr5;
1057                priv->hash_regs[6] = &regs->igaddr6;
1058                priv->hash_regs[7] = &regs->igaddr7;
1059                priv->hash_regs[8] = &regs->gaddr0;
1060                priv->hash_regs[9] = &regs->gaddr1;
1061                priv->hash_regs[10] = &regs->gaddr2;
1062                priv->hash_regs[11] = &regs->gaddr3;
1063                priv->hash_regs[12] = &regs->gaddr4;
1064                priv->hash_regs[13] = &regs->gaddr5;
1065                priv->hash_regs[14] = &regs->gaddr6;
1066                priv->hash_regs[15] = &regs->gaddr7;
1067
1068        } else {
1069                priv->extended_hash = 0;
1070                priv->hash_width = 8;
1071
1072                priv->hash_regs[0] = &regs->gaddr0;
1073                priv->hash_regs[1] = &regs->gaddr1;
1074                priv->hash_regs[2] = &regs->gaddr2;
1075                priv->hash_regs[3] = &regs->gaddr3;
1076                priv->hash_regs[4] = &regs->gaddr4;
1077                priv->hash_regs[5] = &regs->gaddr5;
1078                priv->hash_regs[6] = &regs->gaddr6;
1079                priv->hash_regs[7] = &regs->gaddr7;
1080        }
1081
1082        if (priv->device_flags & FSL_GIANFAR_DEV_HAS_PADDING)
1083                priv->padding = DEFAULT_PADDING;
1084        else
1085                priv->padding = 0;
1086
1087        if (dev->features & NETIF_F_IP_CSUM ||
1088            priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
1089                dev->needed_headroom = GMAC_FCB_LEN;
1090
1091        /* Program the isrg regs only if number of grps > 1 */
1092        if (priv->num_grps > 1) {
1093                baddr = &regs->isrg0;
1094                for (i = 0; i < priv->num_grps; i++) {
1095                        isrg |= (priv->gfargrp[i].rx_bit_map << ISRG_SHIFT_RX);
1096                        isrg |= (priv->gfargrp[i].tx_bit_map << ISRG_SHIFT_TX);
1097                        gfar_write(baddr, isrg);
1098                        baddr++;
1099                        isrg = 0x0;
1100                }
1101        }
1102
1103        /* Need to reverse the bit maps as  bit_map's MSB is q0
1104         * but, for_each_set_bit parses from right to left, which
1105         * basically reverses the queue numbers
1106         */
1107        for (i = 0; i< priv->num_grps; i++) {
1108                priv->gfargrp[i].tx_bit_map =
1109                        reverse_bitmap(priv->gfargrp[i].tx_bit_map, MAX_TX_QS);
1110                priv->gfargrp[i].rx_bit_map =
1111                        reverse_bitmap(priv->gfargrp[i].rx_bit_map, MAX_RX_QS);
1112        }
1113
1114        /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
1115         * also assign queues to groups
1116         */
1117        for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) {
1118                priv->gfargrp[grp_idx].num_rx_queues = 0x0;
1119
1120                for_each_set_bit(i, &priv->gfargrp[grp_idx].rx_bit_map,
1121                                 priv->num_rx_queues) {
1122                        priv->gfargrp[grp_idx].num_rx_queues++;
1123                        priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx];
1124                        rstat = rstat | (RSTAT_CLEAR_RHALT >> i);
1125                        rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
1126                }
1127                priv->gfargrp[grp_idx].num_tx_queues = 0x0;
1128
1129                for_each_set_bit(i, &priv->gfargrp[grp_idx].tx_bit_map,
1130                                 priv->num_tx_queues) {
1131                        priv->gfargrp[grp_idx].num_tx_queues++;
1132                        priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx];
1133                        tstat = tstat | (TSTAT_CLEAR_THALT >> i);
1134                        tqueue = tqueue | (TQUEUE_EN0 >> i);
1135                }
1136                priv->gfargrp[grp_idx].rstat = rstat;
1137                priv->gfargrp[grp_idx].tstat = tstat;
1138                rstat = tstat =0;
1139        }
1140
1141        gfar_write(&regs->rqueue, rqueue);
1142        gfar_write(&regs->tqueue, tqueue);
1143
1144        priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
1145
1146        /* Initializing some of the rx/tx queue level parameters */
1147        for (i = 0; i < priv->num_tx_queues; i++) {
1148                priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
1149                priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE;
1150                priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE;
1151                priv->tx_queue[i]->txic = DEFAULT_TXIC;
1152        }
1153
1154        for (i = 0; i < priv->num_rx_queues; i++) {
1155                priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE;
1156                priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE;
1157                priv->rx_queue[i]->rxic = DEFAULT_RXIC;
1158        }
1159
1160        /* always enable rx filer */
1161        priv->rx_filer_enable = 1;
1162        /* Enable most messages by default */
1163        priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
1164
1165        /* Carrier starts down, phylib will bring it up */
1166        netif_carrier_off(dev);
1167
1168        err = register_netdev(dev);
1169
1170        if (err) {
1171                pr_err("%s: Cannot register net device, aborting\n", dev->name);
1172                goto register_fail;
1173        }
1174
1175        device_init_wakeup(&dev->dev,
1176                           priv->device_flags &
1177                           FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1178
1179        /* fill out IRQ number and name fields */
1180        for (i = 0; i < priv->num_grps; i++) {
1181                if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1182                        sprintf(priv->gfargrp[i].int_name_tx, "%s%s%c%s",
1183                                dev->name, "_g", '0' + i, "_tx");
1184                        sprintf(priv->gfargrp[i].int_name_rx, "%s%s%c%s",
1185                                dev->name, "_g", '0' + i, "_rx");
1186                        sprintf(priv->gfargrp[i].int_name_er, "%s%s%c%s",
1187                                dev->name, "_g", '0' + i, "_er");
1188                } else
1189                        strcpy(priv->gfargrp[i].int_name_tx, dev->name);
1190        }
1191
1192        /* Initialize the filer table */
1193        gfar_init_filer_table(priv);
1194
1195        /* Create all the sysfs files */
1196        gfar_init_sysfs(dev);
1197
1198        /* Print out the device info */
1199        netdev_info(dev, "mac: %pM\n", dev->dev_addr);
1200
1201        /* Even more device info helps when determining which kernel
1202         * provided which set of benchmarks.
1203         */
1204        netdev_info(dev, "Running with NAPI enabled\n");
1205        for (i = 0; i < priv->num_rx_queues; i++)
1206                netdev_info(dev, "RX BD ring size for Q[%d]: %d\n",
1207                            i, priv->rx_queue[i]->rx_ring_size);
1208        for (i = 0; i < priv->num_tx_queues; i++)
1209                netdev_info(dev, "TX BD ring size for Q[%d]: %d\n",
1210                            i, priv->tx_queue[i]->tx_ring_size);
1211
1212        return 0;
1213
1214register_fail:
1215        unmap_group_regs(priv);
1216        free_tx_pointers(priv);
1217        free_rx_pointers(priv);
1218        if (priv->phy_node)
1219                of_node_put(priv->phy_node);
1220        if (priv->tbi_node)
1221                of_node_put(priv->tbi_node);
1222        free_netdev(dev);
1223        return err;
1224}
1225
1226static int gfar_remove(struct platform_device *ofdev)
1227{
1228        struct gfar_private *priv = dev_get_drvdata(&ofdev->dev);
1229
1230        if (priv->phy_node)
1231                of_node_put(priv->phy_node);
1232        if (priv->tbi_node)
1233                of_node_put(priv->tbi_node);
1234
1235        dev_set_drvdata(&ofdev->dev, NULL);
1236
1237        unregister_netdev(priv->ndev);
1238        unmap_group_regs(priv);
1239        free_netdev(priv->ndev);
1240
1241        return 0;
1242}
1243
1244#ifdef CONFIG_PM
1245
1246static int gfar_suspend(struct device *dev)
1247{
1248        struct gfar_private *priv = dev_get_drvdata(dev);
1249        struct net_device *ndev = priv->ndev;
1250        struct gfar __iomem *regs = priv->gfargrp[0].regs;
1251        unsigned long flags;
1252        u32 tempval;
1253
1254        int magic_packet = priv->wol_en &&
1255                           (priv->device_flags &
1256                            FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1257
1258        netif_device_detach(ndev);
1259
1260        if (netif_running(ndev)) {
1261
1262                local_irq_save(flags);
1263                lock_tx_qs(priv);
1264                lock_rx_qs(priv);
1265
1266                gfar_halt_nodisable(ndev);
1267
1268                /* Disable Tx, and Rx if wake-on-LAN is disabled. */
1269                tempval = gfar_read(&regs->maccfg1);
1270
1271                tempval &= ~MACCFG1_TX_EN;
1272
1273                if (!magic_packet)
1274                        tempval &= ~MACCFG1_RX_EN;
1275
1276                gfar_write(&regs->maccfg1, tempval);
1277
1278                unlock_rx_qs(priv);
1279                unlock_tx_qs(priv);
1280                local_irq_restore(flags);
1281
1282                disable_napi(priv);
1283
1284                if (magic_packet) {
1285                        /* Enable interrupt on Magic Packet */
1286                        gfar_write(&regs->imask, IMASK_MAG);
1287
1288                        /* Enable Magic Packet mode */
1289                        tempval = gfar_read(&regs->maccfg2);
1290                        tempval |= MACCFG2_MPEN;
1291                        gfar_write(&regs->maccfg2, tempval);
1292                } else {
1293                        phy_stop(priv->phydev);
1294                }
1295        }
1296
1297        return 0;
1298}
1299
1300static int gfar_resume(struct device *dev)
1301{
1302        struct gfar_private *priv = dev_get_drvdata(dev);
1303        struct net_device *ndev = priv->ndev;
1304        struct gfar __iomem *regs = priv->gfargrp[0].regs;
1305        unsigned long flags;
1306        u32 tempval;
1307        int magic_packet = priv->wol_en &&
1308                           (priv->device_flags &
1309                            FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1310
1311        if (!netif_running(ndev)) {
1312                netif_device_attach(ndev);
1313                return 0;
1314        }
1315
1316        if (!magic_packet && priv->phydev)
1317                phy_start(priv->phydev);
1318
1319        /* Disable Magic Packet mode, in case something
1320         * else woke us up.
1321         */
1322        local_irq_save(flags);
1323        lock_tx_qs(priv);
1324        lock_rx_qs(priv);
1325
1326        tempval = gfar_read(&regs->maccfg2);
1327        tempval &= ~MACCFG2_MPEN;
1328        gfar_write(&regs->maccfg2, tempval);
1329
1330        gfar_start(ndev);
1331
1332        unlock_rx_qs(priv);
1333        unlock_tx_qs(priv);
1334        local_irq_restore(flags);
1335
1336        netif_device_attach(ndev);
1337
1338        enable_napi(priv);
1339
1340        return 0;
1341}
1342
1343static int gfar_restore(struct device *dev)
1344{
1345        struct gfar_private *priv = dev_get_drvdata(dev);
1346        struct net_device *ndev = priv->ndev;
1347
1348        if (!netif_running(ndev))
1349                return 0;
1350
1351        gfar_init_bds(ndev);
1352        init_registers(ndev);
1353        gfar_set_mac_address(ndev);
1354        gfar_init_mac(ndev);
1355        gfar_start(ndev);
1356
1357        priv->oldlink = 0;
1358        priv->oldspeed = 0;
1359        priv->oldduplex = -1;
1360
1361        if (priv->phydev)
1362                phy_start(priv->phydev);
1363
1364        netif_device_attach(ndev);
1365        enable_napi(priv);
1366
1367        return 0;
1368}
1369
1370static struct dev_pm_ops gfar_pm_ops = {
1371        .suspend = gfar_suspend,
1372        .resume = gfar_resume,
1373        .freeze = gfar_suspend,
1374        .thaw = gfar_resume,
1375        .restore = gfar_restore,
1376};
1377
1378#define GFAR_PM_OPS (&gfar_pm_ops)
1379
1380#else
1381
1382#define GFAR_PM_OPS NULL
1383
1384#endif
1385
1386/* Reads the controller's registers to determine what interface
1387 * connects it to the PHY.
1388 */
1389static phy_interface_t gfar_get_interface(struct net_device *dev)
1390{
1391        struct gfar_private *priv = netdev_priv(dev);
1392        struct gfar __iomem *regs = priv->gfargrp[0].regs;
1393        u32 ecntrl;
1394
1395        ecntrl = gfar_read(&regs->ecntrl);
1396
1397        if (ecntrl & ECNTRL_SGMII_MODE)
1398                return PHY_INTERFACE_MODE_SGMII;
1399
1400        if (ecntrl & ECNTRL_TBI_MODE) {
1401                if (ecntrl & ECNTRL_REDUCED_MODE)
1402                        return PHY_INTERFACE_MODE_RTBI;
1403                else
1404                        return PHY_INTERFACE_MODE_TBI;
1405        }
1406
1407        if (ecntrl & ECNTRL_REDUCED_MODE) {
1408                if (ecntrl & ECNTRL_REDUCED_MII_MODE) {
1409                        return PHY_INTERFACE_MODE_RMII;
1410                }
1411                else {
1412                        phy_interface_t interface = priv->interface;
1413
1414                        /* This isn't autodetected right now, so it must
1415                         * be set by the device tree or platform code.
1416                         */
1417                        if (interface == PHY_INTERFACE_MODE_RGMII_ID)
1418                                return PHY_INTERFACE_MODE_RGMII_ID;
1419
1420                        return PHY_INTERFACE_MODE_RGMII;
1421                }
1422        }
1423
1424        if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
1425                return PHY_INTERFACE_MODE_GMII;
1426
1427        return PHY_INTERFACE_MODE_MII;
1428}
1429
1430
1431/* Initializes driver's PHY state, and attaches to the PHY.
1432 * Returns 0 on success.
1433 */
1434static int init_phy(struct net_device *dev)
1435{
1436        struct gfar_private *priv = netdev_priv(dev);
1437        uint gigabit_support =
1438                priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
1439                SUPPORTED_1000baseT_Full : 0;
1440        phy_interface_t interface;
1441
1442        priv->oldlink = 0;
1443        priv->oldspeed = 0;
1444        priv->oldduplex = -1;
1445
1446        interface = gfar_get_interface(dev);
1447
1448        priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
1449                                      interface);
1450        if (!priv->phydev)
1451                priv->phydev = of_phy_connect_fixed_link(dev, &adjust_link,
1452                                                         interface);
1453        if (!priv->phydev) {
1454                dev_err(&dev->dev, "could not attach to PHY\n");
1455                return -ENODEV;
1456        }
1457
1458        if (interface == PHY_INTERFACE_MODE_SGMII)
1459                gfar_configure_serdes(dev);
1460
1461        /* Remove any features not supported by the controller */
1462        priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
1463        priv->phydev->advertising = priv->phydev->supported;
1464
1465        return 0;
1466}
1467
1468/* Initialize TBI PHY interface for communicating with the
1469 * SERDES lynx PHY on the chip.  We communicate with this PHY
1470 * through the MDIO bus on each controller, treating it as a
1471 * "normal" PHY at the address found in the TBIPA register.  We assume
1472 * that the TBIPA register is valid.  Either the MDIO bus code will set
1473 * it to a value that doesn't conflict with other PHYs on the bus, or the
1474 * value doesn't matter, as there are no other PHYs on the bus.
1475 */
1476static void gfar_configure_serdes(struct net_device *dev)
1477{
1478        struct gfar_private *priv = netdev_priv(dev);
1479        struct phy_device *tbiphy;
1480
1481        if (!priv->tbi_node) {
1482                dev_warn(&dev->dev, "error: SGMII mode requires that the "
1483                                    "device tree specify a tbi-handle\n");
1484                return;
1485        }
1486
1487        tbiphy = of_phy_find_device(priv->tbi_node);
1488        if (!tbiphy) {
1489                dev_err(&dev->dev, "error: Could not get TBI device\n");
1490                return;
1491        }
1492
1493        /* If the link is already up, we must already be ok, and don't need to
1494         * configure and reset the TBI<->SerDes link.  Maybe U-Boot configured
1495         * everything for us?  Resetting it takes the link down and requires
1496         * several seconds for it to come back.
1497         */
1498        if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS)
1499                return;
1500
1501        /* Single clk mode, mii mode off(for serdes communication) */
1502        phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
1503
1504        phy_write(tbiphy, MII_ADVERTISE,
1505                  ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
1506                  ADVERTISE_1000XPSE_ASYM);
1507
1508        phy_write(tbiphy, MII_BMCR,
1509                  BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX |
1510                  BMCR_SPEED1000);
1511}
1512
1513static void init_registers(struct net_device *dev)
1514{
1515        struct gfar_private *priv = netdev_priv(dev);
1516        struct gfar __iomem *regs = NULL;
1517        int i;
1518
1519        for (i = 0; i < priv->num_grps; i++) {
1520                regs = priv->gfargrp[i].regs;
1521                /* Clear IEVENT */
1522                gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
1523
1524                /* Initialize IMASK */
1525                gfar_write(&regs->imask, IMASK_INIT_CLEAR);
1526        }
1527
1528        regs = priv->gfargrp[0].regs;
1529        /* Init hash registers to zero */
1530        gfar_write(&regs->igaddr0, 0);
1531        gfar_write(&regs->igaddr1, 0);
1532        gfar_write(&regs->igaddr2, 0);
1533        gfar_write(&regs->igaddr3, 0);
1534        gfar_write(&regs->igaddr4, 0);
1535        gfar_write(&regs->igaddr5, 0);
1536        gfar_write(&regs->igaddr6, 0);
1537        gfar_write(&regs->igaddr7, 0);
1538
1539        gfar_write(&regs->gaddr0, 0);
1540        gfar_write(&regs->gaddr1, 0);
1541        gfar_write(&regs->gaddr2, 0);
1542        gfar_write(&regs->gaddr3, 0);
1543        gfar_write(&regs->gaddr4, 0);
1544        gfar_write(&regs->gaddr5, 0);
1545        gfar_write(&regs->gaddr6, 0);
1546        gfar_write(&regs->gaddr7, 0);
1547
1548        /* Zero out the rmon mib registers if it has them */
1549        if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
1550                memset_io(&(regs->rmon), 0, sizeof (struct rmon_mib));
1551
1552                /* Mask off the CAM interrupts */
1553                gfar_write(&regs->rmon.cam1, 0xffffffff);
1554                gfar_write(&regs->rmon.cam2, 0xffffffff);
1555        }
1556
1557        /* Initialize the max receive buffer length */
1558        gfar_write(&regs->mrblr, priv->rx_buffer_size);
1559
1560        /* Initialize the Minimum Frame Length Register */
1561        gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
1562}
1563
1564static int __gfar_is_rx_idle(struct gfar_private *priv)
1565{
1566        u32 res;
1567
1568        /* Normaly TSEC should not hang on GRS commands, so we should
1569         * actually wait for IEVENT_GRSC flag.
1570         */
1571        if (likely(!gfar_has_errata(priv, GFAR_ERRATA_A002)))
1572                return 0;
1573
1574        /* Read the eTSEC register at offset 0xD1C. If bits 7-14 are
1575         * the same as bits 23-30, the eTSEC Rx is assumed to be idle
1576         * and the Rx can be safely reset.
1577         */
1578        res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c);
1579        res &= 0x7f807f80;
1580        if ((res & 0xffff) == (res >> 16))
1581                return 1;
1582
1583        return 0;
1584}
1585
1586/* Halt the receive and transmit queues */
1587static void gfar_halt_nodisable(struct net_device *dev)
1588{
1589        struct gfar_private *priv = netdev_priv(dev);
1590        struct gfar __iomem *regs = NULL;
1591        u32 tempval;
1592        int i;
1593
1594        for (i = 0; i < priv->num_grps; i++) {
1595                regs = priv->gfargrp[i].regs;
1596                /* Mask all interrupts */
1597                gfar_write(&regs->imask, IMASK_INIT_CLEAR);
1598
1599                /* Clear all interrupts */
1600                gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
1601        }
1602
1603        regs = priv->gfargrp[0].regs;
1604        /* Stop the DMA, and wait for it to stop */
1605        tempval = gfar_read(&regs->dmactrl);
1606        if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) !=
1607            (DMACTRL_GRS | DMACTRL_GTS)) {
1608                int ret;
1609
1610                tempval |= (DMACTRL_GRS | DMACTRL_GTS);
1611                gfar_write(&regs->dmactrl, tempval);
1612
1613                do {
1614                        ret = spin_event_timeout(((gfar_read(&regs->ievent) &
1615                                 (IEVENT_GRSC | IEVENT_GTSC)) ==
1616                                 (IEVENT_GRSC | IEVENT_GTSC)), 1000000, 0);
1617                        if (!ret && !(gfar_read(&regs->ievent) & IEVENT_GRSC))
1618                                ret = __gfar_is_rx_idle(priv);
1619                } while (!ret);
1620        }
1621}
1622
1623/* Halt the receive and transmit queues */
1624void gfar_halt(struct net_device *dev)
1625{
1626        struct gfar_private *priv = netdev_priv(dev);
1627        struct gfar __iomem *regs = priv->gfargrp[0].regs;
1628        u32 tempval;
1629
1630        gfar_halt_nodisable(dev);
1631
1632        /* Disable Rx and Tx */
1633        tempval = gfar_read(&regs->maccfg1);
1634        tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
1635        gfar_write(&regs->maccfg1, tempval);
1636}
1637
1638static void free_grp_irqs(struct gfar_priv_grp *grp)
1639{
1640        free_irq(grp->interruptError, grp);
1641        free_irq(grp->interruptTransmit, grp);
1642        free_irq(grp->interruptReceive, grp);
1643}
1644
1645void stop_gfar(struct net_device *dev)
1646{
1647        struct gfar_private *priv = netdev_priv(dev);
1648        unsigned long flags;
1649        int i;
1650
1651        phy_stop(priv->phydev);
1652
1653
1654        /* Lock it down */
1655        local_irq_save(flags);
1656        lock_tx_qs(priv);
1657        lock_rx_qs(priv);
1658
1659        gfar_halt(dev);
1660
1661        unlock_rx_qs(priv);
1662        unlock_tx_qs(priv);
1663        local_irq_restore(flags);
1664
1665        /* Free the IRQs */
1666        if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1667                for (i = 0; i < priv->num_grps; i++)
1668                        free_grp_irqs(&priv->gfargrp[i]);
1669        } else {
1670                for (i = 0; i < priv->num_grps; i++)
1671                        free_irq(priv->gfargrp[i].interruptTransmit,
1672                                 &priv->gfargrp[i]);
1673        }
1674
1675        free_skb_resources(priv);
1676}
1677
1678static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
1679{
1680        struct txbd8 *txbdp;
1681        struct gfar_private *priv = netdev_priv(tx_queue->dev);
1682        int i, j;
1683
1684        txbdp = tx_queue->tx_bd_base;
1685
1686        for (i = 0; i < tx_queue->tx_ring_size; i++) {
1687                if (!tx_queue->tx_skbuff[i])
1688                        continue;
1689
1690                dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr,
1691                                 txbdp->length, DMA_TO_DEVICE);
1692                txbdp->lstatus = 0;
1693                for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
1694                     j++) {
1695                        txbdp++;
1696                        dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr,
1697                                       txbdp->length, DMA_TO_DEVICE);
1698                }
1699                txbdp++;
1700                dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
1701                tx_queue->tx_skbuff[i] = NULL;
1702        }
1703        kfree(tx_queue->tx_skbuff);
1704}
1705
1706static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
1707{
1708        struct rxbd8 *rxbdp;
1709        struct gfar_private *priv = netdev_priv(rx_queue->dev);
1710        int i;
1711
1712        rxbdp = rx_queue->rx_bd_base;
1713
1714        for (i = 0; i < rx_queue->rx_ring_size; i++) {
1715                if (rx_queue->rx_skbuff[i]) {
1716                        dma_unmap_single(&priv->ofdev->dev,
1717                                         rxbdp->bufPtr, priv->rx_buffer_size,
1718                                         DMA_FROM_DEVICE);
1719                        dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
1720                        rx_queue->rx_skbuff[i] = NULL;
1721                }
1722                rxbdp->lstatus = 0;
1723                rxbdp->bufPtr = 0;
1724                rxbdp++;
1725        }
1726        kfree(rx_queue->rx_skbuff);
1727}
1728
1729/* If there are any tx skbs or rx skbs still around, free them.
1730 * Then free tx_skbuff and rx_skbuff
1731 */
1732static void free_skb_resources(struct gfar_private *priv)
1733{
1734        struct gfar_priv_tx_q *tx_queue = NULL;
1735        struct gfar_priv_rx_q *rx_queue = NULL;
1736        int i;
1737
1738        /* Go through all the buffer descriptors and free their data buffers */
1739        for (i = 0; i < priv->num_tx_queues; i++) {
1740                struct netdev_queue *txq;
1741
1742                tx_queue = priv->tx_queue[i];
1743                txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex);
1744                if (tx_queue->tx_skbuff)
1745                        free_skb_tx_queue(tx_queue);
1746                netdev_tx_reset_queue(txq);
1747        }
1748
1749        for (i = 0; i < priv->num_rx_queues; i++) {
1750                rx_queue = priv->rx_queue[i];
1751                if (rx_queue->rx_skbuff)
1752                        free_skb_rx_queue(rx_queue);
1753        }
1754
1755        dma_free_coherent(&priv->ofdev->dev,
1756                          sizeof(struct txbd8) * priv->total_tx_ring_size +
1757                          sizeof(struct rxbd8) * priv->total_rx_ring_size,
1758                          priv->tx_queue[0]->tx_bd_base,
1759                          priv->tx_queue[0]->tx_bd_dma_base);
1760        skb_queue_purge(&priv->rx_recycle);
1761}
1762
1763void gfar_start(struct net_device *dev)
1764{
1765        struct gfar_private *priv = netdev_priv(dev);
1766        struct gfar __iomem *regs = priv->gfargrp[0].regs;
1767        u32 tempval;
1768        int i = 0;
1769
1770        /* Enable Rx and Tx in MACCFG1 */
1771        tempval = gfar_read(&regs->maccfg1);
1772        tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
1773        gfar_write(&regs->maccfg1, tempval);
1774
1775        /* Initialize DMACTRL to have WWR and WOP */
1776        tempval = gfar_read(&regs->dmactrl);
1777        tempval |= DMACTRL_INIT_SETTINGS;
1778        gfar_write(&regs->dmactrl, tempval);
1779
1780        /* Make sure we aren't stopped */
1781        tempval = gfar_read(&regs->dmactrl);
1782        tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
1783        gfar_write(&regs->dmactrl, tempval);
1784
1785        for (i = 0; i < priv->num_grps; i++) {
1786                regs = priv->gfargrp[i].regs;
1787                /* Clear THLT/RHLT, so that the DMA starts polling now */
1788                gfar_write(&regs->tstat, priv->gfargrp[i].tstat);
1789                gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
1790                /* Unmask the interrupts we look for */
1791                gfar_write(&regs->imask, IMASK_DEFAULT);
1792        }
1793
1794        dev->trans_start = jiffies; /* prevent tx timeout */
1795}
1796
1797void gfar_configure_coalescing(struct gfar_private *priv,
1798                               unsigned long tx_mask, unsigned long rx_mask)
1799{
1800        struct gfar __iomem *regs = priv->gfargrp[0].regs;
1801        u32 __iomem *baddr;
1802        int i = 0;
1803
1804        /* Backward compatible case ---- even if we enable
1805         * multiple queues, there's only single reg to program
1806         */
1807        gfar_write(&regs->txic, 0);
1808        if (likely(priv->tx_queue[0]->txcoalescing))
1809                gfar_write(&regs->txic, priv->tx_queue[0]->txic);
1810
1811        gfar_write(&regs->rxic, 0);
1812        if (unlikely(priv->rx_queue[0]->rxcoalescing))
1813                gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
1814
1815        if (priv->mode == MQ_MG_MODE) {
1816                baddr = &regs->txic0;
1817                for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
1818                        gfar_write(baddr + i, 0);
1819                        if (likely(priv->tx_queue[i]->txcoalescing))
1820                                gfar_write(baddr + i, priv->tx_queue[i]->txic);
1821                }
1822
1823                baddr = &regs->rxic0;
1824                for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
1825                        gfar_write(baddr + i, 0);
1826                        if (likely(priv->rx_queue[i]->rxcoalescing))
1827                                gfar_write(baddr + i, priv->rx_queue[i]->rxic);
1828                }
1829        }
1830}
1831
1832static int register_grp_irqs(struct gfar_priv_grp *grp)
1833{
1834        struct gfar_private *priv = grp->priv;
1835        struct net_device *dev = priv->ndev;
1836        int err;
1837
1838        /* If the device has multiple interrupts, register for
1839         * them.  Otherwise, only register for the one
1840         */
1841        if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1842                /* Install our interrupt handlers for Error,
1843                 * Transmit, and Receive
1844                 */
1845                if ((err = request_irq(grp->interruptError, gfar_error,
1846                                       0, grp->int_name_er, grp)) < 0) {
1847                        netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1848                                  grp->interruptError);
1849
1850                        goto err_irq_fail;
1851                }
1852
1853                if ((err = request_irq(grp->interruptTransmit, gfar_transmit,
1854                                       0, grp->int_name_tx, grp)) < 0) {
1855                        netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1856                                  grp->interruptTransmit);
1857                        goto tx_irq_fail;
1858                }
1859
1860                if ((err = request_irq(grp->interruptReceive, gfar_receive,
1861                                       0, grp->int_name_rx, grp)) < 0) {
1862                        netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1863                                  grp->interruptReceive);
1864                        goto rx_irq_fail;
1865                }
1866        } else {
1867                if ((err = request_irq(grp->interruptTransmit, gfar_interrupt,
1868                                       0, grp->int_name_tx, grp)) < 0) {
1869                        netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1870                                  grp->interruptTransmit);
1871                        goto err_irq_fail;
1872                }
1873        }
1874
1875        return 0;
1876
1877rx_irq_fail:
1878        free_irq(grp->interruptTransmit, grp);
1879tx_irq_fail:
1880        free_irq(grp->interruptError, grp);
1881err_irq_fail:
1882        return err;
1883
1884}
1885
1886/* Bring the controller up and running */
1887int startup_gfar(struct net_device *ndev)
1888{
1889        struct gfar_private *priv = netdev_priv(ndev);
1890        struct gfar __iomem *regs = NULL;
1891        int err, i, j;
1892
1893        for (i = 0; i < priv->num_grps; i++) {
1894                regs= priv->gfargrp[i].regs;
1895                gfar_write(&regs->imask, IMASK_INIT_CLEAR);
1896        }
1897
1898        regs= priv->gfargrp[0].regs;
1899        err = gfar_alloc_skb_resources(ndev);
1900        if (err)
1901                return err;
1902
1903        gfar_init_mac(ndev);
1904
1905        for (i = 0; i < priv->num_grps; i++) {
1906                err = register_grp_irqs(&priv->gfargrp[i]);
1907                if (err) {
1908                        for (j = 0; j < i; j++)
1909                                free_grp_irqs(&priv->gfargrp[j]);
1910                        goto irq_fail;
1911                }
1912        }
1913
1914        /* Start the controller */
1915        gfar_start(ndev);
1916
1917        phy_start(priv->phydev);
1918
1919        gfar_configure_coalescing(priv, 0xFF, 0xFF);
1920
1921        return 0;
1922
1923irq_fail:
1924        free_skb_resources(priv);
1925        return err;
1926}
1927
1928/* Called when something needs to use the ethernet device
1929 * Returns 0 for success.
1930 */
1931static int gfar_enet_open(struct net_device *dev)
1932{
1933        struct gfar_private *priv = netdev_priv(dev);
1934        int err;
1935
1936        enable_napi(priv);
1937
1938        skb_queue_head_init(&priv->rx_recycle);
1939
1940        /* Initialize a bunch of registers */
1941        init_registers(dev);
1942
1943        gfar_set_mac_address(dev);
1944
1945        err = init_phy(dev);
1946
1947        if (err) {
1948                disable_napi(priv);
1949                return err;
1950        }
1951
1952        err = startup_gfar(dev);
1953        if (err) {
1954                disable_napi(priv);
1955                return err;
1956        }
1957
1958        netif_tx_start_all_queues(dev);
1959
1960        device_set_wakeup_enable(&dev->dev, priv->wol_en);
1961
1962        return err;
1963}
1964
1965static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
1966{
1967        struct txfcb *fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN);
1968
1969        memset(fcb, 0, GMAC_FCB_LEN);
1970
1971        return fcb;
1972}
1973
1974static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
1975                                    int fcb_length)
1976{
1977        /* If we're here, it's a IP packet with a TCP or UDP
1978         * payload.  We set it to checksum, using a pseudo-header
1979         * we provide
1980         */
1981        u8 flags = TXFCB_DEFAULT;
1982
1983        /* Tell the controller what the protocol is
1984         * And provide the already calculated phcs
1985         */
1986        if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
1987                flags |= TXFCB_UDP;
1988                fcb->phcs = udp_hdr(skb)->check;
1989        } else
1990                fcb->phcs = tcp_hdr(skb)->check;
1991
1992        /* l3os is the distance between the start of the
1993         * frame (skb->data) and the start of the IP hdr.
1994         * l4os is the distance between the start of the
1995         * l3 hdr and the l4 hdr
1996         */
1997        fcb->l3os = (u16)(skb_network_offset(skb) - fcb_length);
1998        fcb->l4os = skb_network_header_len(skb);
1999
2000        fcb->flags = flags;
2001}
2002
2003void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
2004{
2005        fcb->flags |= TXFCB_VLN;
2006        fcb->vlctl = vlan_tx_tag_get(skb);
2007}
2008
2009static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
2010                                      struct txbd8 *base, int ring_size)
2011{
2012        struct txbd8 *new_bd = bdp + stride;
2013
2014        return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd;
2015}
2016
2017static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
2018                                      int ring_size)
2019{
2020        return skip_txbd(bdp, 1, base, ring_size);
2021}
2022
2023/* This is called by the kernel when a frame is ready for transmission.
2024 * It is pointed to by the dev->hard_start_xmit function pointer
2025 */
2026static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2027{
2028        struct gfar_private *priv = netdev_priv(dev);
2029        struct gfar_priv_tx_q *tx_queue = NULL;
2030        struct netdev_queue *txq;
2031        struct gfar __iomem *regs = NULL;
2032        struct txfcb *fcb = NULL;
2033        struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
2034        u32 lstatus;
2035        int i, rq = 0, do_tstamp = 0;
2036        u32 bufaddr;
2037        unsigned long flags;
2038        unsigned int nr_frags, nr_txbds, length, fcb_length = GMAC_FCB_LEN;
2039
2040        /* TOE=1 frames larger than 2500 bytes may see excess delays
2041         * before start of transmission.
2042         */
2043        if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_76) &&
2044                     skb->ip_summed == CHECKSUM_PARTIAL &&
2045                     skb->len > 2500)) {
2046                int ret;
2047
2048                ret = skb_checksum_help(skb);
2049                if (ret)
2050                        return ret;
2051        }
2052
2053        rq = skb->queue_mapping;
2054        tx_queue = priv->tx_queue[rq];
2055        txq = netdev_get_tx_queue(dev, rq);
2056        base = tx_queue->tx_bd_base;
2057        regs = tx_queue->grp->regs;
2058
2059        /* check if time stamp should be generated */
2060        if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
2061                     priv->hwts_tx_en)) {
2062                do_tstamp = 1;
2063                fcb_length = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
2064        }
2065
2066        /* make space for additional header when fcb is needed */
2067        if (((skb->ip_summed == CHECKSUM_PARTIAL) ||
2068             vlan_tx_tag_present(skb) ||
2069             unlikely(do_tstamp)) &&
2070            (skb_headroom(skb) < fcb_length)) {
2071                struct sk_buff *skb_new;
2072
2073                skb_new = skb_realloc_headroom(skb, fcb_length);
2074                if (!skb_new) {
2075                        dev->stats.tx_errors++;
2076                        kfree_skb(skb);
2077                        return NETDEV_TX_OK;
2078                }
2079
2080                if (skb->sk)
2081                        skb_set_owner_w(skb_new, skb->sk);
2082                consume_skb(skb);
2083                skb = skb_new;
2084        }
2085
2086        /* total number of fragments in the SKB */
2087        nr_frags = skb_shinfo(skb)->nr_frags;
2088
2089        /* calculate the required number of TxBDs for this skb */
2090        if (unlikely(do_tstamp))
2091                nr_txbds = nr_frags + 2;
2092        else
2093                nr_txbds = nr_frags + 1;
2094
2095        /* check if there is space to queue this packet */
2096        if (nr_txbds > tx_queue->num_txbdfree) {
2097                /* no space, stop the queue */
2098                netif_tx_stop_queue(txq);
2099                dev->stats.tx_fifo_errors++;
2100                return NETDEV_TX_BUSY;
2101        }
2102
2103        /* Update transmit stats */
2104        tx_queue->stats.tx_bytes += skb->len;
2105        tx_queue->stats.tx_packets++;
2106
2107        txbdp = txbdp_start = tx_queue->cur_tx;
2108        lstatus = txbdp->lstatus;
2109
2110        /* Time stamp insertion requires one additional TxBD */
2111        if (unlikely(do_tstamp))
2112                txbdp_tstamp = txbdp = next_txbd(txbdp, base,
2113                                                 tx_queue->tx_ring_size);
2114
2115        if (nr_frags == 0) {
2116                if (unlikely(do_tstamp))
2117                        txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_LAST |
2118                                                          TXBD_INTERRUPT);
2119                else
2120                        lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
2121        } else {
2122                /* Place the fragment addresses and lengths into the TxBDs */
2123                for (i = 0; i < nr_frags; i++) {
2124                        /* Point at the next BD, wrapping as needed */
2125                        txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2126
2127                        length = skb_shinfo(skb)->frags[i].size;
2128
2129                        lstatus = txbdp->lstatus | length |
2130                                  BD_LFLAG(TXBD_READY);
2131
2132                        /* Handle the last BD specially */
2133                        if (i == nr_frags - 1)
2134                                lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
2135
2136                        bufaddr = skb_frag_dma_map(&priv->ofdev->dev,
2137                                                   &skb_shinfo(skb)->frags[i],
2138                                                   0,
2139                                                   length,
2140                                                   DMA_TO_DEVICE);
2141
2142                        /* set the TxBD length and buffer pointer */
2143                        txbdp->bufPtr = bufaddr;
2144                        txbdp->lstatus = lstatus;
2145                }
2146
2147                lstatus = txbdp_start->lstatus;
2148        }
2149
2150        /* Add TxPAL between FCB and frame if required */
2151        if (unlikely(do_tstamp)) {
2152                skb_push(skb, GMAC_TXPAL_LEN);
2153                memset(skb->data, 0, GMAC_TXPAL_LEN);
2154        }
2155
2156        /* Set up checksumming */
2157        if (CHECKSUM_PARTIAL == skb->ip_summed) {
2158                fcb = gfar_add_fcb(skb);
2159                /* as specified by errata */
2160                if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_12) &&
2161                             ((unsigned long)fcb % 0x20) > 0x18)) {
2162                        __skb_pull(skb, GMAC_FCB_LEN);
2163                        skb_checksum_help(skb);
2164                } else {
2165                        lstatus |= BD_LFLAG(TXBD_TOE);
2166                        gfar_tx_checksum(skb, fcb, fcb_length);
2167                }
2168        }
2169
2170        if (vlan_tx_tag_present(skb)) {
2171                if (unlikely(NULL == fcb)) {
2172                        fcb = gfar_add_fcb(skb);
2173                        lstatus |= BD_LFLAG(TXBD_TOE);
2174                }
2175
2176                gfar_tx_vlan(skb, fcb);
2177        }
2178
2179        /* Setup tx hardware time stamping if requested */
2180        if (unlikely(do_tstamp)) {
2181                skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2182                if (fcb == NULL)
2183                        fcb = gfar_add_fcb(skb);
2184                fcb->ptp = 1;
2185                lstatus |= BD_LFLAG(TXBD_TOE);
2186        }
2187
2188        txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data,
2189                                             skb_headlen(skb), DMA_TO_DEVICE);
2190
2191        /* If time stamping is requested one additional TxBD must be set up. The
2192         * first TxBD points to the FCB and must have a data length of
2193         * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
2194         * the full frame length.
2195         */
2196        if (unlikely(do_tstamp)) {
2197                txbdp_tstamp->bufPtr = txbdp_start->bufPtr + fcb_length;
2198                txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) |
2199                                         (skb_headlen(skb) - fcb_length);
2200                lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
2201        } else {
2202                lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
2203        }
2204
2205        netdev_tx_sent_queue(txq, skb->len);
2206
2207        /* We can work in parallel with gfar_clean_tx_ring(), except
2208         * when modifying num_txbdfree. Note that we didn't grab the lock
2209         * when we were reading the num_txbdfree and checking for available
2210         * space, that's because outside of this function it can only grow,
2211         * and once we've got needed space, it cannot suddenly disappear.
2212         *
2213         * The lock also protects us from gfar_error(), which can modify
2214         * regs->tstat and thus retrigger the transfers, which is why we
2215         * also must grab the lock before setting ready bit for the first
2216         * to be transmitted BD.
2217         */
2218        spin_lock_irqsave(&tx_queue->txlock, flags);
2219
2220        /* The powerpc-specific eieio() is used, as wmb() has too strong
2221         * semantics (it requires synchronization between cacheable and
2222         * uncacheable mappings, which eieio doesn't provide and which we
2223         * don't need), thus requiring a more expensive sync instruction.  At
2224         * some point, the set of architecture-independent barrier functions
2225         * should be expanded to include weaker barriers.
2226         */
2227        eieio();
2228
2229        txbdp_start->lstatus = lstatus;
2230
2231        eieio(); /* force lstatus write before tx_skbuff */
2232
2233        tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
2234
2235        /* Update the current skb pointer to the next entry we will use
2236         * (wrapping if necessary)
2237         */
2238        tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
2239                              TX_RING_MOD_MASK(tx_queue->tx_ring_size);
2240
2241        tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2242
2243        /* reduce TxBD free count */
2244        tx_queue->num_txbdfree -= (nr_txbds);
2245
2246        /* If the next BD still needs to be cleaned up, then the bds
2247         * are full.  We need to tell the kernel to stop sending us stuff.
2248         */
2249        if (!tx_queue->num_txbdfree) {
2250                netif_tx_stop_queue(txq);
2251
2252                dev->stats.tx_fifo_errors++;
2253        }
2254
2255        /* Tell the DMA to go go go */
2256        gfar_write(&regs->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
2257
2258        /* Unlock priv */
2259        spin_unlock_irqrestore(&tx_queue->txlock, flags);
2260
2261        return NETDEV_TX_OK;
2262}
2263
2264/* Stops the kernel queue, and halts the controller */
2265static int gfar_close(struct net_device *dev)
2266{
2267        struct gfar_private *priv = netdev_priv(dev);
2268
2269        disable_napi(priv);
2270
2271        cancel_work_sync(&priv->reset_task);
2272        stop_gfar(dev);
2273
2274        /* Disconnect from the PHY */
2275        phy_disconnect(priv->phydev);
2276        priv->phydev = NULL;
2277
2278        netif_tx_stop_all_queues(dev);
2279
2280        return 0;
2281}
2282
2283/* Changes the mac address if the controller is not running. */
2284static int gfar_set_mac_address(struct net_device *dev)
2285{
2286        gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
2287
2288        return 0;
2289}
2290
2291/* Check if rx parser should be activated */
2292void gfar_check_rx_parser_mode(struct gfar_private *priv)
2293{
2294        struct gfar __iomem *regs;
2295        u32 tempval;
2296
2297        regs = priv->gfargrp[0].regs;
2298
2299        tempval = gfar_read(&regs->rctrl);
2300        /* If parse is no longer required, then disable parser */
2301        if (tempval & RCTRL_REQ_PARSER)
2302                tempval |= RCTRL_PRSDEP_INIT;
2303        else
2304                tempval &= ~RCTRL_PRSDEP_INIT;
2305        gfar_write(&regs->rctrl, tempval);
2306}
2307
2308/* Enables and disables VLAN insertion/extraction */
2309void gfar_vlan_mode(struct net_device *dev, netdev_features_t features)
2310{
2311        struct gfar_private *priv = netdev_priv(dev);
2312        struct gfar __iomem *regs = NULL;
2313        unsigned long flags;
2314        u32 tempval;
2315
2316        regs = priv->gfargrp[0].regs;
2317        local_irq_save(flags);
2318        lock_rx_qs(priv);
2319
2320        if (features & NETIF_F_HW_VLAN_TX) {
2321                /* Enable VLAN tag insertion */
2322                tempval = gfar_read(&regs->tctrl);
2323                tempval |= TCTRL_VLINS;
2324                gfar_write(&regs->tctrl, tempval);
2325        } else {
2326                /* Disable VLAN tag insertion */
2327                tempval = gfar_read(&regs->tctrl);
2328                tempval &= ~TCTRL_VLINS;
2329                gfar_write(&regs->tctrl, tempval);
2330        }
2331
2332        if (features & NETIF_F_HW_VLAN_RX) {
2333                /* Enable VLAN tag extraction */
2334                tempval = gfar_read(&regs->rctrl);
2335                tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT);
2336                gfar_write(&regs->rctrl, tempval);
2337        } else {
2338                /* Disable VLAN tag extraction */
2339                tempval = gfar_read(&regs->rctrl);
2340                tempval &= ~RCTRL_VLEX;
2341                gfar_write(&regs->rctrl, tempval);
2342
2343                gfar_check_rx_parser_mode(priv);
2344        }
2345
2346        gfar_change_mtu(dev, dev->mtu);
2347
2348        unlock_rx_qs(priv);
2349        local_irq_restore(flags);
2350}
2351
2352static int gfar_change_mtu(struct net_device *dev, int new_mtu)
2353{
2354        int tempsize, tempval;
2355        struct gfar_private *priv = netdev_priv(dev);
2356        struct gfar __iomem *regs = priv->gfargrp[0].regs;
2357        int oldsize = priv->rx_buffer_size;
2358        int frame_size = new_mtu + ETH_HLEN;
2359
2360        if (gfar_is_vlan_on(priv))
2361                frame_size += VLAN_HLEN;
2362
2363        if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
2364                netif_err(priv, drv, dev, "Invalid MTU setting\n");
2365                return -EINVAL;
2366        }
2367
2368        if (gfar_uses_fcb(priv))
2369                frame_size += GMAC_FCB_LEN;
2370
2371        frame_size += priv->padding;
2372
2373        tempsize = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
2374                   INCREMENTAL_BUFFER_SIZE;
2375
2376        /* Only stop and start the controller if it isn't already
2377         * stopped, and we changed something
2378         */
2379        if ((oldsize != tempsize) && (dev->flags & IFF_UP))
2380                stop_gfar(dev);
2381
2382        priv->rx_buffer_size = tempsize;
2383
2384        dev->mtu = new_mtu;
2385
2386        gfar_write(&regs->mrblr, priv->rx_buffer_size);
2387        gfar_write(&regs->maxfrm, priv->rx_buffer_size);
2388
2389        /* If the mtu is larger than the max size for standard
2390         * ethernet frames (ie, a jumbo frame), then set maccfg2
2391         * to allow huge frames, and to check the length
2392         */
2393        tempval = gfar_read(&regs->maccfg2);
2394
2395        if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE ||
2396            gfar_has_errata(priv, GFAR_ERRATA_74))
2397                tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
2398        else
2399                tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
2400
2401        gfar_write(&regs->maccfg2, tempval);
2402
2403        if ((oldsize != tempsize) && (dev->flags & IFF_UP))
2404                startup_gfar(dev);
2405
2406        return 0;
2407}
2408
2409/* gfar_reset_task gets scheduled when a packet has not been
2410 * transmitted after a set amount of time.
2411 * For now, assume that clearing out all the structures, and
2412 * starting over will fix the problem.
2413 */
2414static void gfar_reset_task(struct work_struct *work)
2415{
2416        struct gfar_private *priv = container_of(work, struct gfar_private,
2417                                                 reset_task);
2418        struct net_device *dev = priv->ndev;
2419
2420        if (dev->flags & IFF_UP) {
2421                netif_tx_stop_all_queues(dev);
2422                stop_gfar(dev);
2423                startup_gfar(dev);
2424                netif_tx_start_all_queues(dev);
2425        }
2426
2427        netif_tx_schedule_all(dev);
2428}
2429
2430static void gfar_timeout(struct net_device *dev)
2431{
2432        struct gfar_private *priv = netdev_priv(dev);
2433
2434        dev->stats.tx_errors++;
2435        schedule_work(&priv->reset_task);
2436}
2437
2438static void gfar_align_skb(struct sk_buff *skb)
2439{
2440        /* We need the data buffer to be aligned properly.  We will reserve
2441         * as many bytes as needed to align the data properly
2442         */
2443        skb_reserve(skb, RXBUF_ALIGNMENT -
2444                    (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1)));
2445}
2446
2447/* Interrupt Handler for Transmit complete */
2448static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2449{
2450        struct net_device *dev = tx_queue->dev;
2451        struct netdev_queue *txq;
2452        struct gfar_private *priv = netdev_priv(dev);
2453        struct gfar_priv_rx_q *rx_queue = NULL;
2454        struct txbd8 *bdp, *next = NULL;
2455        struct txbd8 *lbdp = NULL;
2456        struct txbd8 *base = tx_queue->tx_bd_base;
2457        struct sk_buff *skb;
2458        int skb_dirtytx;
2459        int tx_ring_size = tx_queue->tx_ring_size;
2460        int frags = 0, nr_txbds = 0;
2461        int i;
2462        int howmany = 0;
2463        int tqi = tx_queue->qindex;
2464        unsigned int bytes_sent = 0;
2465        u32 lstatus;
2466        size_t buflen;
2467
2468        rx_queue = priv->rx_queue[tqi];
2469        txq = netdev_get_tx_queue(dev, tqi);
2470        bdp = tx_queue->dirty_tx;
2471        skb_dirtytx = tx_queue->skb_dirtytx;
2472
2473        while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
2474                unsigned long flags;
2475
2476                frags = skb_shinfo(skb)->nr_frags;
2477
2478                /* When time stamping, one additional TxBD must be freed.
2479                 * Also, we need to dma_unmap_single() the TxPAL.
2480                 */
2481                if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
2482                        nr_txbds = frags + 2;
2483                else
2484                        nr_txbds = frags + 1;
2485
2486                lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size);
2487
2488                lstatus = lbdp->lstatus;
2489
2490                /* Only clean completed frames */
2491                if ((lstatus & BD_LFLAG(TXBD_READY)) &&
2492                    (lstatus & BD_LENGTH_MASK))
2493                        break;
2494
2495                if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
2496                        next = next_txbd(bdp, base, tx_ring_size);
2497                        buflen = next->length + GMAC_FCB_LEN + GMAC_TXPAL_LEN;
2498                } else
2499                        buflen = bdp->length;
2500
2501                dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
2502                                 buflen, DMA_TO_DEVICE);
2503
2504                if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
2505                        struct skb_shared_hwtstamps shhwtstamps;
2506                        u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7);
2507
2508                        memset(&shhwtstamps, 0, sizeof(shhwtstamps));
2509                        shhwtstamps.hwtstamp = ns_to_ktime(*ns);
2510                        skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN);
2511                        skb_tstamp_tx(skb, &shhwtstamps);
2512                        bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2513                        bdp = next;
2514                }
2515
2516                bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2517                bdp = next_txbd(bdp, base, tx_ring_size);
2518
2519                for (i = 0; i < frags; i++) {
2520                        dma_unmap_page(&priv->ofdev->dev, bdp->bufPtr,
2521                                       bdp->length, DMA_TO_DEVICE);
2522                        bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2523                        bdp = next_txbd(bdp, base, tx_ring_size);
2524                }
2525
2526                bytes_sent += skb->len;
2527
2528                /* If there's room in the queue (limit it to rx_buffer_size)
2529                 * we add this skb back into the pool, if it's the right size
2530                 */
2531                if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size &&
2532                    skb_recycle_check(skb, priv->rx_buffer_size +
2533                                      RXBUF_ALIGNMENT)) {
2534                        gfar_align_skb(skb);
2535                        skb_queue_head(&priv->rx_recycle, skb);
2536                } else
2537                        dev_kfree_skb_any(skb);
2538
2539                tx_queue->tx_skbuff[skb_dirtytx] = NULL;
2540
2541                skb_dirtytx = (skb_dirtytx + 1) &
2542                              TX_RING_MOD_MASK(tx_ring_size);
2543
2544                howmany++;
2545                spin_lock_irqsave(&tx_queue->txlock, flags);
2546                tx_queue->num_txbdfree += nr_txbds;
2547                spin_unlock_irqrestore(&tx_queue->txlock, flags);
2548        }
2549
2550        /* If we freed a buffer, we can restart transmission, if necessary */
2551        if (netif_tx_queue_stopped(txq) && tx_queue->num_txbdfree)
2552                netif_wake_subqueue(dev, tqi);
2553
2554        /* Update dirty indicators */
2555        tx_queue->skb_dirtytx = skb_dirtytx;
2556        tx_queue->dirty_tx = bdp;
2557
2558        netdev_tx_completed_queue(txq, howmany, bytes_sent);
2559
2560        return howmany;
2561}
2562
2563static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp)
2564{
2565        unsigned long flags;
2566
2567        spin_lock_irqsave(&gfargrp->grplock, flags);
2568        if (napi_schedule_prep(&gfargrp->napi)) {
2569                gfar_write(&gfargrp->regs->imask, IMASK_RTX_DISABLED);
2570                __napi_schedule(&gfargrp->napi);
2571        } else {
2572                /* Clear IEVENT, so interrupts aren't called again
2573                 * because of the packets that have already arrived.
2574                 */
2575                gfar_write(&gfargrp->regs->ievent, IEVENT_RTX_MASK);
2576        }
2577        spin_unlock_irqrestore(&gfargrp->grplock, flags);
2578
2579}
2580
2581/* Interrupt Handler for Transmit complete */
2582static irqreturn_t gfar_transmit(int irq, void *grp_id)
2583{
2584        gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
2585        return IRQ_HANDLED;
2586}
2587
2588static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
2589                           struct sk_buff *skb)
2590{
2591        struct net_device *dev = rx_queue->dev;
2592        struct gfar_private *priv = netdev_priv(dev);
2593        dma_addr_t buf;
2594
2595        buf = dma_map_single(&priv->ofdev->dev, skb->data,
2596                             priv->rx_buffer_size, DMA_FROM_DEVICE);
2597        gfar_init_rxbdp(rx_queue, bdp, buf);
2598}
2599
2600static struct sk_buff *gfar_alloc_skb(struct net_device *dev)
2601{
2602        struct gfar_private *priv = netdev_priv(dev);
2603        struct sk_buff *skb = NULL;
2604
2605        skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT);
2606        if (!skb)
2607                return NULL;
2608
2609        gfar_align_skb(skb);
2610
2611        return skb;
2612}
2613
2614struct sk_buff *gfar_new_skb(struct net_device *dev)
2615{
2616        struct gfar_private *priv = netdev_priv(dev);
2617        struct sk_buff *skb = NULL;
2618
2619        skb = skb_dequeue(&priv->rx_recycle);
2620        if (!skb)
2621                skb = gfar_alloc_skb(dev);
2622
2623        return skb;
2624}
2625
2626static inline void count_errors(unsigned short status, struct net_device *dev)
2627{
2628        struct gfar_private *priv = netdev_priv(dev);
2629        struct net_device_stats *stats = &dev->stats;
2630        struct gfar_extra_stats *estats = &priv->extra_stats;
2631
2632        /* If the packet was truncated, none of the other errors matter */
2633        if (status & RXBD_TRUNCATED) {
2634                stats->rx_length_errors++;
2635
2636                estats->rx_trunc++;
2637
2638                return;
2639        }
2640        /* Count the errors, if there were any */
2641        if (status & (RXBD_LARGE | RXBD_SHORT)) {
2642                stats->rx_length_errors++;
2643
2644                if (status & RXBD_LARGE)
2645                        estats->rx_large++;
2646                else
2647                        estats->rx_short++;
2648        }
2649        if (status & RXBD_NONOCTET) {
2650                stats->rx_frame_errors++;
2651                estats->rx_nonoctet++;
2652        }
2653        if (status & RXBD_CRCERR) {
2654                estats->rx_crcerr++;
2655                stats->rx_crc_errors++;
2656        }
2657        if (status & RXBD_OVERRUN) {
2658                estats->rx_overrun++;
2659                stats->rx_crc_errors++;
2660        }
2661}
2662
2663irqreturn_t gfar_receive(int irq, void *grp_id)
2664{
2665        gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
2666        return IRQ_HANDLED;
2667}
2668
2669static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
2670{
2671        /* If valid headers were found, and valid sums
2672         * were verified, then we tell the kernel that no
2673         * checksumming is necessary.  Otherwise, it is [FIXME]
2674         */
2675        if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU))
2676                skb->ip_summed = CHECKSUM_UNNECESSARY;
2677        else
2678                skb_checksum_none_assert(skb);
2679}
2680
2681
2682/* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
2683static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
2684                              int amount_pull, struct napi_struct *napi)
2685{
2686        struct gfar_private *priv = netdev_priv(dev);
2687        struct rxfcb *fcb = NULL;
2688
2689        gro_result_t ret;
2690
2691        /* fcb is at the beginning if exists */
2692        fcb = (struct rxfcb *)skb->data;
2693
2694        /* Remove the FCB from the skb
2695         * Remove the padded bytes, if there are any
2696         */
2697        if (amount_pull) {
2698                skb_record_rx_queue(skb, fcb->rq);
2699                skb_pull(skb, amount_pull);
2700        }
2701
2702        /* Get receive timestamp from the skb */
2703        if (priv->hwts_rx_en) {
2704                struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
2705                u64 *ns = (u64 *) skb->data;
2706
2707                memset(shhwtstamps, 0, sizeof(*shhwtstamps));
2708                shhwtstamps->hwtstamp = ns_to_ktime(*ns);
2709        }
2710
2711        if (priv->padding)
2712                skb_pull(skb, priv->padding);
2713
2714        if (dev->features & NETIF_F_RXCSUM)
2715                gfar_rx_checksum(skb, fcb);
2716
2717        /* Tell the skb what kind of packet this is */
2718        skb->protocol = eth_type_trans(skb, dev);
2719
2720        /* There's need to check for NETIF_F_HW_VLAN_RX here.
2721         * Even if vlan rx accel is disabled, on some chips
2722         * RXFCB_VLN is pseudo randomly set.
2723         */
2724        if (dev->features & NETIF_F_HW_VLAN_RX &&
2725            fcb->flags & RXFCB_VLN)
2726                __vlan_hwaccel_put_tag(skb, fcb->vlctl);
2727
2728        /* Send the packet up the stack */
2729        ret = napi_gro_receive(napi, skb);
2730
2731        if (GRO_DROP == ret)
2732                priv->extra_stats.kernel_dropped++;
2733
2734        return 0;
2735}
2736
2737/* gfar_clean_rx_ring() -- Processes each frame in the rx ring
2738 * until the budget/quota has been reached. Returns the number
2739 * of frames handled
2740 */
2741int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
2742{
2743        struct net_device *dev = rx_queue->dev;
2744        struct rxbd8 *bdp, *base;
2745        struct sk_buff *skb;
2746        int pkt_len;
2747        int amount_pull;
2748        int howmany = 0;
2749        struct gfar_private *priv = netdev_priv(dev);
2750
2751        /* Get the first full descriptor */
2752        bdp = rx_queue->cur_rx;
2753        base = rx_queue->rx_bd_base;
2754
2755        amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0);
2756
2757        while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
2758                struct sk_buff *newskb;
2759
2760                rmb();
2761
2762                /* Add another skb for the future */
2763                newskb = gfar_new_skb(dev);
2764
2765                skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
2766
2767                dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
2768                                 priv->rx_buffer_size, DMA_FROM_DEVICE);
2769
2770                if (unlikely(!(bdp->status & RXBD_ERR) &&
2771                             bdp->length > priv->rx_buffer_size))
2772                        bdp->status = RXBD_LARGE;
2773
2774                /* We drop the frame if we failed to allocate a new buffer */
2775                if (unlikely(!newskb || !(bdp->status & RXBD_LAST) ||
2776                             bdp->status & RXBD_ERR)) {
2777                        count_errors(bdp->status, dev);
2778
2779                        if (unlikely(!newskb))
2780                                newskb = skb;
2781                        else if (skb)
2782                                skb_queue_head(&priv->rx_recycle, skb);
2783                } else {
2784                        /* Increment the number of packets */
2785                        rx_queue->stats.rx_packets++;
2786                        howmany++;
2787
2788                        if (likely(skb)) {
2789                                pkt_len = bdp->length - ETH_FCS_LEN;
2790                                /* Remove the FCS from the packet length */
2791                                skb_put(skb, pkt_len);
2792                                rx_queue->stats.rx_bytes += pkt_len;
2793                                skb_record_rx_queue(skb, rx_queue->qindex);
2794                                gfar_process_frame(dev, skb, amount_pull,
2795                                                   &rx_queue->grp->napi);
2796
2797                        } else {
2798                                netif_warn(priv, rx_err, dev, "Missing skb!\n");
2799                                rx_queue->stats.rx_dropped++;
2800                                priv->extra_stats.rx_skbmissing++;
2801                        }
2802
2803                }
2804
2805                rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb;
2806
2807                /* Setup the new bdp */
2808                gfar_new_rxbdp(rx_queue, bdp, newskb);
2809
2810                /* Update to the next pointer */
2811                bdp = next_bd(bdp, base, rx_queue->rx_ring_size);
2812
2813                /* update to point at the next skb */
2814                rx_queue->skb_currx = (rx_queue->skb_currx + 1) &
2815                                      RX_RING_MOD_MASK(rx_queue->rx_ring_size);
2816        }
2817
2818        /* Update the current rxbd pointer to be the next one */
2819        rx_queue->cur_rx = bdp;
2820
2821        return howmany;
2822}
2823
2824static int gfar_poll(struct napi_struct *napi, int budget)
2825{
2826        struct gfar_priv_grp *gfargrp =
2827                container_of(napi, struct gfar_priv_grp, napi);
2828        struct gfar_private *priv = gfargrp->priv;
2829        struct gfar __iomem *regs = gfargrp->regs;
2830        struct gfar_priv_tx_q *tx_queue = NULL;
2831        struct gfar_priv_rx_q *rx_queue = NULL;
2832        int rx_cleaned = 0, budget_per_queue = 0, rx_cleaned_per_queue = 0;
2833        int tx_cleaned = 0, i, left_over_budget = budget;
2834        unsigned long serviced_queues = 0;
2835        int num_queues = 0;
2836
2837        num_queues = gfargrp->num_rx_queues;
2838        budget_per_queue = budget/num_queues;
2839
2840        /* Clear IEVENT, so interrupts aren't called again
2841         * because of the packets that have already arrived
2842         */
2843        gfar_write(&regs->ievent, IEVENT_RTX_MASK);
2844
2845        while (num_queues && left_over_budget) {
2846                budget_per_queue = left_over_budget/num_queues;
2847                left_over_budget = 0;
2848
2849                for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
2850                        if (test_bit(i, &serviced_queues))
2851                                continue;
2852                        rx_queue = priv->rx_queue[i];
2853                        tx_queue = priv->tx_queue[rx_queue->qindex];
2854
2855                        tx_cleaned += gfar_clean_tx_ring(tx_queue);
2856                        rx_cleaned_per_queue =
2857                                gfar_clean_rx_ring(rx_queue, budget_per_queue);
2858                        rx_cleaned += rx_cleaned_per_queue;
2859                        if (rx_cleaned_per_queue < budget_per_queue) {
2860                                left_over_budget = left_over_budget +
2861                                        (budget_per_queue -
2862                                         rx_cleaned_per_queue);
2863                                set_bit(i, &serviced_queues);
2864                                num_queues--;
2865                        }
2866                }
2867        }
2868
2869        if (tx_cleaned)
2870                return budget;
2871
2872        if (rx_cleaned < budget) {
2873                napi_complete(napi);
2874
2875                /* Clear the halt bit in RSTAT */
2876                gfar_write(&regs->rstat, gfargrp->rstat);
2877
2878                gfar_write(&regs->imask, IMASK_DEFAULT);
2879
2880                /* If we are coalescing interrupts, update the timer
2881                 * Otherwise, clear it
2882                 */
2883                gfar_configure_coalescing(priv, gfargrp->rx_bit_map,
2884                                          gfargrp->tx_bit_map);
2885        }
2886
2887        return rx_cleaned;
2888}
2889
2890#ifdef CONFIG_NET_POLL_CONTROLLER
2891/* Polling 'interrupt' - used by things like netconsole to send skbs
2892 * without having to re-enable interrupts. It's not called while
2893 * the interrupt routine is executing.
2894 */
2895static void gfar_netpoll(struct net_device *dev)
2896{
2897        struct gfar_private *priv = netdev_priv(dev);
2898        int i;
2899
2900        /* If the device has multiple interrupts, run tx/rx */
2901        if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
2902                for (i = 0; i < priv->num_grps; i++) {
2903                        disable_irq(priv->gfargrp[i].interruptTransmit);
2904                        disable_irq(priv->gfargrp[i].interruptReceive);
2905                        disable_irq(priv->gfargrp[i].interruptError);
2906                        gfar_interrupt(priv->gfargrp[i].interruptTransmit,
2907                                       &priv->gfargrp[i]);
2908                        enable_irq(priv->gfargrp[i].interruptError);
2909                        enable_irq(priv->gfargrp[i].interruptReceive);
2910                        enable_irq(priv->gfargrp[i].interruptTransmit);
2911                }
2912        } else {
2913                for (i = 0; i < priv->num_grps; i++) {
2914                        disable_irq(priv->gfargrp[i].interruptTransmit);
2915                        gfar_interrupt(priv->gfargrp[i].interruptTransmit,
2916                                       &priv->gfargrp[i]);
2917                        enable_irq(priv->gfargrp[i].interruptTransmit);
2918                }
2919        }
2920}
2921#endif
2922
2923/* The interrupt handler for devices with one interrupt */
2924static irqreturn_t gfar_interrupt(int irq, void *grp_id)
2925{
2926        struct gfar_priv_grp *gfargrp = grp_id;
2927
2928        /* Save ievent for future reference */
2929        u32 events = gfar_read(&gfargrp->regs->ievent);
2930
2931        /* Check for reception */
2932        if (events & IEVENT_RX_MASK)
2933                gfar_receive(irq, grp_id);
2934
2935        /* Check for transmit completion */
2936        if (events & IEVENT_TX_MASK)
2937                gfar_transmit(irq, grp_id);
2938
2939        /* Check for errors */
2940        if (events & IEVENT_ERR_MASK)
2941                gfar_error(irq, grp_id);
2942
2943        return IRQ_HANDLED;
2944}
2945
2946/* Called every time the controller might need to be made
2947 * aware of new link state.  The PHY code conveys this
2948 * information through variables in the phydev structure, and this
2949 * function converts those variables into the appropriate
2950 * register values, and can bring down the device if needed.
2951 */
2952static void adjust_link(struct net_device *dev)
2953{
2954        struct gfar_private *priv = netdev_priv(dev);
2955        struct gfar __iomem *regs = priv->gfargrp[0].regs;
2956        unsigned long flags;
2957        struct phy_device *phydev = priv->phydev;
2958        int new_state = 0;
2959
2960        local_irq_save(flags);
2961        lock_tx_qs(priv);
2962
2963        if (phydev->link) {
2964                u32 tempval = gfar_read(&regs->maccfg2);
2965                u32 ecntrl = gfar_read(&regs->ecntrl);
2966
2967                /* Now we make sure that we can be in full duplex mode.
2968                 * If not, we operate in half-duplex mode.
2969                 */
2970                if (phydev->duplex != priv->oldduplex) {
2971                        new_state = 1;
2972                        if (!(phydev->duplex))
2973                                tempval &= ~(MACCFG2_FULL_DUPLEX);
2974                        else
2975                                tempval |= MACCFG2_FULL_DUPLEX;
2976
2977                        priv->oldduplex = phydev->duplex;
2978                }
2979
2980                if (phydev->speed != priv->oldspeed) {
2981                        new_state = 1;
2982                        switch (phydev->speed) {
2983                        case 1000:
2984                                tempval =
2985                                    ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
2986
2987                                ecntrl &= ~(ECNTRL_R100);
2988                                break;
2989                        case 100:
2990                        case 10:
2991                                tempval =
2992                                    ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
2993
2994                                /* Reduced mode distinguishes
2995                                 * between 10 and 100
2996                                 */
2997                                if (phydev->speed == SPEED_100)
2998                                        ecntrl |= ECNTRL_R100;
2999                                else
3000                                        ecntrl &= ~(ECNTRL_R100);
3001                                break;
3002                        default:
3003                                netif_warn(priv, link, dev,
3004                                           "Ack!  Speed (%d) is not 10/100/1000!\n",
3005                                           phydev->speed);
3006                                break;
3007                        }
3008
3009                        priv->oldspeed = phydev->speed;
3010                }
3011
3012                gfar_write(&regs->maccfg2, tempval);
3013                gfar_write(&regs->ecntrl, ecntrl);
3014
3015                if (!priv->oldlink) {
3016                        new_state = 1;
3017                        priv->oldlink = 1;
3018                }
3019        } else if (priv->oldlink) {
3020                new_state = 1;
3021                priv->oldlink = 0;
3022                priv->oldspeed = 0;
3023                priv->oldduplex = -1;
3024        }
3025
3026        if (new_state && netif_msg_link(priv))
3027                phy_print_status(phydev);
3028        unlock_tx_qs(priv);
3029        local_irq_restore(flags);
3030}
3031
3032/* Update the hash table based on the current list of multicast
3033 * addresses we subscribe to.  Also, change the promiscuity of
3034 * the device based on the flags (this function is called
3035 * whenever dev->flags is changed
3036 */
3037static void gfar_set_multi(struct net_device *dev)
3038{
3039        struct netdev_hw_addr *ha;
3040        struct gfar_private *priv = netdev_priv(dev);
3041        struct gfar __iomem *regs = priv->gfargrp[0].regs;
3042        u32 tempval;
3043
3044        if (dev->flags & IFF_PROMISC) {
3045                /* Set RCTRL to PROM */
3046                tempval = gfar_read(&regs->rctrl);
3047                tempval |= RCTRL_PROM;
3048                gfar_write(&regs->rctrl, tempval);
3049        } else {
3050                /* Set RCTRL to not PROM */
3051                tempval = gfar_read(&regs->rctrl);
3052                tempval &= ~(RCTRL_PROM);
3053                gfar_write(&regs->rctrl, tempval);
3054        }
3055
3056        if (dev->flags & IFF_ALLMULTI) {
3057                /* Set the hash to rx all multicast frames */
3058                gfar_write(&regs->igaddr0, 0xffffffff);
3059                gfar_write(&regs->igaddr1, 0xffffffff);
3060                gfar_write(&regs->igaddr2, 0xffffffff);
3061                gfar_write(&regs->igaddr3, 0xffffffff);
3062                gfar_write(&regs->igaddr4, 0xffffffff);
3063                gfar_write(&regs->igaddr5, 0xffffffff);
3064                gfar_write(&regs->igaddr6, 0xffffffff);
3065                gfar_write(&regs->igaddr7, 0xffffffff);
3066                gfar_write(&regs->gaddr0, 0xffffffff);
3067                gfar_write(&regs->gaddr1, 0xffffffff);
3068                gfar_write(&regs->gaddr2, 0xffffffff);
3069                gfar_write(&regs->gaddr3, 0xffffffff);
3070                gfar_write(&regs->gaddr4, 0xffffffff);
3071                gfar_write(&regs->gaddr5, 0xffffffff);
3072                gfar_write(&regs->gaddr6, 0xffffffff);
3073                gfar_write(&regs->gaddr7, 0xffffffff);
3074        } else {
3075                int em_num;
3076                int idx;
3077
3078                /* zero out the hash */
3079                gfar_write(&regs->igaddr0, 0x0);
3080                gfar_write(&regs->igaddr1, 0x0);
3081                gfar_write(&regs->igaddr2, 0x0);
3082                gfar_write(&regs->igaddr3, 0x0);
3083                gfar_write(&regs->igaddr4, 0x0);
3084                gfar_write(&regs->igaddr5, 0x0);
3085                gfar_write(&regs->igaddr6, 0x0);
3086                gfar_write(&regs->igaddr7, 0x0);
3087                gfar_write(&regs->gaddr0, 0x0);
3088                gfar_write(&regs->gaddr1, 0x0);
3089                gfar_write(&regs->gaddr2, 0x0);
3090                gfar_write(&regs->gaddr3, 0x0);
3091                gfar_write(&regs->gaddr4, 0x0);
3092                gfar_write(&regs->gaddr5, 0x0);
3093                gfar_write(&regs->gaddr6, 0x0);
3094                gfar_write(&regs->gaddr7, 0x0);
3095
3096                /* If we have extended hash tables, we need to
3097                 * clear the exact match registers to prepare for
3098                 * setting them
3099                 */
3100                if (priv->extended_hash) {
3101                        em_num = GFAR_EM_NUM + 1;
3102                        gfar_clear_exact_match(dev);
3103                        idx = 1;
3104                } else {
3105                        idx = 0;
3106                        em_num = 0;
3107                }
3108
3109                if (netdev_mc_empty(dev))
3110                        return;
3111
3112                /* Parse the list, and set the appropriate bits */
3113                netdev_for_each_mc_addr(ha, dev) {
3114                        if (idx < em_num) {
3115                                gfar_set_mac_for_addr(dev, idx, ha->addr);
3116                                idx++;
3117                        } else
3118                                gfar_set_hash_for_addr(dev, ha->addr);
3119                }
3120        }
3121}
3122
3123
3124/* Clears each of the exact match registers to zero, so they
3125 * don't interfere with normal reception
3126 */
3127static void gfar_clear_exact_match(struct net_device *dev)
3128{
3129        int idx;
3130        static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
3131
3132        for (idx = 1; idx < GFAR_EM_NUM + 1; idx++)
3133                gfar_set_mac_for_addr(dev, idx, zero_arr);
3134}
3135
3136/* Set the appropriate hash bit for the given addr */
3137/* The algorithm works like so:
3138 * 1) Take the Destination Address (ie the multicast address), and
3139 * do a CRC on it (little endian), and reverse the bits of the
3140 * result.
3141 * 2) Use the 8 most significant bits as a hash into a 256-entry
3142 * table.  The table is controlled through 8 32-bit registers:
3143 * gaddr0-7.  gaddr0's MSB is entry 0, and gaddr7's LSB is
3144 * gaddr7.  This means that the 3 most significant bits in the
3145 * hash index which gaddr register to use, and the 5 other bits
3146 * indicate which bit (assuming an IBM numbering scheme, which
3147 * for PowerPC (tm) is usually the case) in the register holds
3148 * the entry.
3149 */
3150static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
3151{
3152        u32 tempval;
3153        struct gfar_private *priv = netdev_priv(dev);
3154        u32 result = ether_crc(ETH_ALEN, addr);
3155        int width = priv->hash_width;
3156        u8 whichbit = (result >> (32 - width)) & 0x1f;
3157        u8 whichreg = result >> (32 - width + 5);
3158        u32 value = (1 << (31-whichbit));
3159
3160        tempval = gfar_read(priv->hash_regs[whichreg]);
3161        tempval |= value;
3162        gfar_write(priv->hash_regs[whichreg], tempval);
3163}
3164
3165
3166/* There are multiple MAC Address register pairs on some controllers
3167 * This function sets the numth pair to a given address
3168 */
3169static void gfar_set_mac_for_addr(struct net_device *dev, int num,
3170                                  const u8 *addr)
3171{
3172        struct gfar_private *priv = netdev_priv(dev);
3173        struct gfar __iomem *regs = priv->gfargrp[0].regs;
3174        int idx;
3175        char tmpbuf[ETH_ALEN];
3176        u32 tempval;
3177        u32 __iomem *macptr = &regs->macstnaddr1;
3178
3179        macptr += num*2;
3180
3181        /* Now copy it into the mac registers backwards, cuz
3182         * little endian is silly
3183         */
3184        for (idx = 0; idx < ETH_ALEN; idx++)
3185                tmpbuf[ETH_ALEN - 1 - idx] = addr[idx];
3186
3187        gfar_write(macptr, *((u32 *) (tmpbuf)));
3188
3189        tempval = *((u32 *) (tmpbuf + 4));
3190
3191        gfar_write(macptr+1, tempval);
3192}
3193
3194/* GFAR error interrupt handler */
3195static irqreturn_t gfar_error(int irq, void *grp_id)
3196{
3197        struct gfar_priv_grp *gfargrp = grp_id;
3198        struct gfar __iomem *regs = gfargrp->regs;
3199        struct gfar_private *priv= gfargrp->priv;
3200        struct net_device *dev = priv->ndev;
3201
3202        /* Save ievent for future reference */
3203        u32 events = gfar_read(&regs->ievent);
3204
3205        /* Clear IEVENT */
3206        gfar_write(&regs->ievent, events & IEVENT_ERR_MASK);
3207
3208        /* Magic Packet is not an error. */
3209        if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
3210            (events & IEVENT_MAG))
3211                events &= ~IEVENT_MAG;
3212
3213        /* Hmm... */
3214        if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
3215                netdev_dbg(dev,
3216                           "error interrupt (ievent=0x%08x imask=0x%08x)\n",
3217                           events, gfar_read(&regs->imask));
3218
3219        /* Update the error counters */
3220        if (events & IEVENT_TXE) {
3221                dev->stats.tx_errors++;
3222
3223                if (events & IEVENT_LC)
3224                        dev->stats.tx_window_errors++;
3225                if (events & IEVENT_CRL)
3226                        dev->stats.tx_aborted_errors++;
3227                if (events & IEVENT_XFUN) {
3228                        unsigned long flags;
3229
3230                        netif_dbg(priv, tx_err, dev,
3231                                  "TX FIFO underrun, packet dropped\n");
3232                        dev->stats.tx_dropped++;
3233                        priv->extra_stats.tx_underrun++;
3234
3235                        local_irq_save(flags);
3236                        lock_tx_qs(priv);
3237
3238                        /* Reactivate the Tx Queues */
3239                        gfar_write(&regs->tstat, gfargrp->tstat);
3240
3241                        unlock_tx_qs(priv);
3242                        local_irq_restore(flags);
3243                }
3244                netif_dbg(priv, tx_err, dev, "Transmit Error\n");
3245        }
3246        if (events & IEVENT_BSY) {
3247                dev->stats.rx_errors++;
3248                priv->extra_stats.rx_bsy++;
3249
3250                gfar_receive(irq, grp_id);
3251
3252                netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n",
3253                          gfar_read(&regs->rstat));
3254        }
3255        if (events & IEVENT_BABR) {
3256                dev->stats.rx_errors++;
3257                priv->extra_stats.rx_babr++;
3258
3259                netif_dbg(priv, rx_err, dev, "babbling RX error\n");
3260        }
3261        if (events & IEVENT_EBERR) {
3262                priv->extra_stats.eberr++;
3263                netif_dbg(priv, rx_err, dev, "bus error\n");
3264        }
3265        if (events & IEVENT_RXC)
3266                netif_dbg(priv, rx_status, dev, "control frame\n");
3267
3268        if (events & IEVENT_BABT) {
3269                priv->extra_stats.tx_babt++;
3270                netif_dbg(priv, tx_err, dev, "babbling TX error\n");
3271        }
3272        return IRQ_HANDLED;
3273}
3274
3275static struct of_device_id gfar_match[] =
3276{
3277        {
3278                .type = "network",
3279                .compatible = "gianfar",
3280        },
3281        {
3282                .compatible = "fsl,etsec2",
3283        },
3284        {},
3285};
3286MODULE_DEVICE_TABLE(of, gfar_match);
3287
3288/* Structure for a device driver */
3289static struct platform_driver gfar_driver = {
3290        .driver = {
3291                .name = "fsl-gianfar",
3292                .owner = THIS_MODULE,
3293                .pm = GFAR_PM_OPS,
3294                .of_match_table = gfar_match,
3295        },
3296        .probe = gfar_probe,
3297        .remove = gfar_remove,
3298};
3299
3300module_platform_driver(gfar_driver);
3301