linux/drivers/net/ethernet/freescale/gianfar.c
<<
>>
Prefs
   1/* drivers/net/ethernet/freescale/gianfar.c
   2 *
   3 * Gianfar Ethernet Driver
   4 * This driver is designed for the non-CPM ethernet controllers
   5 * on the 85xx and 83xx family of integrated processors
   6 * Based on 8260_io/fcc_enet.c
   7 *
   8 * Author: Andy Fleming
   9 * Maintainer: Kumar Gala
  10 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
  11 *
  12 * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc.
  13 * Copyright 2007 MontaVista Software, Inc.
  14 *
  15 * This program is free software; you can redistribute  it and/or modify it
  16 * under  the terms of  the GNU General  Public License as published by the
  17 * Free Software Foundation;  either version 2 of the  License, or (at your
  18 * option) any later version.
  19 *
  20 *  Gianfar:  AKA Lambda Draconis, "Dragon"
  21 *  RA 11 31 24.2
  22 *  Dec +69 19 52
  23 *  V 3.84
  24 *  B-V +1.62
  25 *
  26 *  Theory of operation
  27 *
  28 *  The driver is initialized through of_device. Configuration information
  29 *  is therefore conveyed through an OF-style device tree.
  30 *
  31 *  The Gianfar Ethernet Controller uses a ring of buffer
  32 *  descriptors.  The beginning is indicated by a register
  33 *  pointing to the physical address of the start of the ring.
  34 *  The end is determined by a "wrap" bit being set in the
  35 *  last descriptor of the ring.
  36 *
  37 *  When a packet is received, the RXF bit in the
  38 *  IEVENT register is set, triggering an interrupt when the
  39 *  corresponding bit in the IMASK register is also set (if
  40 *  interrupt coalescing is active, then the interrupt may not
  41 *  happen immediately, but will wait until either a set number
  42 *  of frames or amount of time have passed).  In NAPI, the
  43 *  interrupt handler will signal there is work to be done, and
  44 *  exit. This method will start at the last known empty
  45 *  descriptor, and process every subsequent descriptor until there
  46 *  are none left with data (NAPI will stop after a set number of
  47 *  packets to give time to other tasks, but will eventually
  48 *  process all the packets).  The data arrives inside a
  49 *  pre-allocated skb, and so after the skb is passed up to the
  50 *  stack, a new skb must be allocated, and the address field in
  51 *  the buffer descriptor must be updated to indicate this new
  52 *  skb.
  53 *
  54 *  When the kernel requests that a packet be transmitted, the
  55 *  driver starts where it left off last time, and points the
  56 *  descriptor at the buffer which was passed in.  The driver
  57 *  then informs the DMA engine that there are packets ready to
  58 *  be transmitted.  Once the controller is finished transmitting
  59 *  the packet, an interrupt may be triggered (under the same
  60 *  conditions as for reception, but depending on the TXF bit).
  61 *  The driver then cleans up the buffer.
  62 */
  63
  64#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  65#define DEBUG
  66
  67#include <linux/kernel.h>
  68#include <linux/string.h>
  69#include <linux/errno.h>
  70#include <linux/unistd.h>
  71#include <linux/slab.h>
  72#include <linux/interrupt.h>
  73#include <linux/delay.h>
  74#include <linux/netdevice.h>
  75#include <linux/etherdevice.h>
  76#include <linux/skbuff.h>
  77#include <linux/if_vlan.h>
  78#include <linux/spinlock.h>
  79#include <linux/mm.h>
  80#include <linux/of_address.h>
  81#include <linux/of_irq.h>
  82#include <linux/of_mdio.h>
  83#include <linux/of_platform.h>
  84#include <linux/ip.h>
  85#include <linux/tcp.h>
  86#include <linux/udp.h>
  87#include <linux/in.h>
  88#include <linux/net_tstamp.h>
  89
  90#include <asm/io.h>
  91#include <asm/reg.h>
  92#include <asm/mpc85xx.h>
  93#include <asm/irq.h>
  94#include <asm/uaccess.h>
  95#include <linux/module.h>
  96#include <linux/dma-mapping.h>
  97#include <linux/crc32.h>
  98#include <linux/mii.h>
  99#include <linux/phy.h>
 100#include <linux/phy_fixed.h>
 101#include <linux/of.h>
 102#include <linux/of_net.h>
 103
 104#include "gianfar.h"
 105
 106#define TX_TIMEOUT      (1*HZ)
 107
 108const char gfar_driver_version[] = "1.3";
 109
 110static int gfar_enet_open(struct net_device *dev);
 111static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
 112static void gfar_reset_task(struct work_struct *work);
 113static void gfar_timeout(struct net_device *dev);
 114static int gfar_close(struct net_device *dev);
 115struct sk_buff *gfar_new_skb(struct net_device *dev);
 116static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
 117                           struct sk_buff *skb);
 118static int gfar_set_mac_address(struct net_device *dev);
 119static int gfar_change_mtu(struct net_device *dev, int new_mtu);
 120static irqreturn_t gfar_error(int irq, void *dev_id);
 121static irqreturn_t gfar_transmit(int irq, void *dev_id);
 122static irqreturn_t gfar_interrupt(int irq, void *dev_id);
 123static void adjust_link(struct net_device *dev);
 124static noinline void gfar_update_link_state(struct gfar_private *priv);
 125static int init_phy(struct net_device *dev);
 126static int gfar_probe(struct platform_device *ofdev);
 127static int gfar_remove(struct platform_device *ofdev);
 128static void free_skb_resources(struct gfar_private *priv);
 129static void gfar_set_multi(struct net_device *dev);
 130static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
 131static void gfar_configure_serdes(struct net_device *dev);
 132static int gfar_poll_rx(struct napi_struct *napi, int budget);
 133static int gfar_poll_tx(struct napi_struct *napi, int budget);
 134static int gfar_poll_rx_sq(struct napi_struct *napi, int budget);
 135static int gfar_poll_tx_sq(struct napi_struct *napi, int budget);
 136#ifdef CONFIG_NET_POLL_CONTROLLER
 137static void gfar_netpoll(struct net_device *dev);
 138#endif
 139int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
 140static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
 141static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
 142                               int amount_pull, struct napi_struct *napi);
 143static void gfar_halt_nodisable(struct gfar_private *priv);
 144static void gfar_clear_exact_match(struct net_device *dev);
 145static void gfar_set_mac_for_addr(struct net_device *dev, int num,
 146                                  const u8 *addr);
 147static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 148
 149MODULE_AUTHOR("Freescale Semiconductor, Inc");
 150MODULE_DESCRIPTION("Gianfar Ethernet Driver");
 151MODULE_LICENSE("GPL");
 152
 153static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
 154                            dma_addr_t buf)
 155{
 156        u32 lstatus;
 157
 158        bdp->bufPtr = buf;
 159
 160        lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
 161        if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
 162                lstatus |= BD_LFLAG(RXBD_WRAP);
 163
 164        eieio();
 165
 166        bdp->lstatus = lstatus;
 167}
 168
 169static int gfar_init_bds(struct net_device *ndev)
 170{
 171        struct gfar_private *priv = netdev_priv(ndev);
 172        struct gfar_priv_tx_q *tx_queue = NULL;
 173        struct gfar_priv_rx_q *rx_queue = NULL;
 174        struct txbd8 *txbdp;
 175        struct rxbd8 *rxbdp;
 176        int i, j;
 177
 178        for (i = 0; i < priv->num_tx_queues; i++) {
 179                tx_queue = priv->tx_queue[i];
 180                /* Initialize some variables in our dev structure */
 181                tx_queue->num_txbdfree = tx_queue->tx_ring_size;
 182                tx_queue->dirty_tx = tx_queue->tx_bd_base;
 183                tx_queue->cur_tx = tx_queue->tx_bd_base;
 184                tx_queue->skb_curtx = 0;
 185                tx_queue->skb_dirtytx = 0;
 186
 187                /* Initialize Transmit Descriptor Ring */
 188                txbdp = tx_queue->tx_bd_base;
 189                for (j = 0; j < tx_queue->tx_ring_size; j++) {
 190                        txbdp->lstatus = 0;
 191                        txbdp->bufPtr = 0;
 192                        txbdp++;
 193                }
 194
 195                /* Set the last descriptor in the ring to indicate wrap */
 196                txbdp--;
 197                txbdp->status |= TXBD_WRAP;
 198        }
 199
 200        for (i = 0; i < priv->num_rx_queues; i++) {
 201                rx_queue = priv->rx_queue[i];
 202                rx_queue->cur_rx = rx_queue->rx_bd_base;
 203                rx_queue->skb_currx = 0;
 204                rxbdp = rx_queue->rx_bd_base;
 205
 206                for (j = 0; j < rx_queue->rx_ring_size; j++) {
 207                        struct sk_buff *skb = rx_queue->rx_skbuff[j];
 208
 209                        if (skb) {
 210                                gfar_init_rxbdp(rx_queue, rxbdp,
 211                                                rxbdp->bufPtr);
 212                        } else {
 213                                skb = gfar_new_skb(ndev);
 214                                if (!skb) {
 215                                        netdev_err(ndev, "Can't allocate RX buffers\n");
 216                                        return -ENOMEM;
 217                                }
 218                                rx_queue->rx_skbuff[j] = skb;
 219
 220                                gfar_new_rxbdp(rx_queue, rxbdp, skb);
 221                        }
 222
 223                        rxbdp++;
 224                }
 225
 226        }
 227
 228        return 0;
 229}
 230
 231static int gfar_alloc_skb_resources(struct net_device *ndev)
 232{
 233        void *vaddr;
 234        dma_addr_t addr;
 235        int i, j, k;
 236        struct gfar_private *priv = netdev_priv(ndev);
 237        struct device *dev = priv->dev;
 238        struct gfar_priv_tx_q *tx_queue = NULL;
 239        struct gfar_priv_rx_q *rx_queue = NULL;
 240
 241        priv->total_tx_ring_size = 0;
 242        for (i = 0; i < priv->num_tx_queues; i++)
 243                priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;
 244
 245        priv->total_rx_ring_size = 0;
 246        for (i = 0; i < priv->num_rx_queues; i++)
 247                priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
 248
 249        /* Allocate memory for the buffer descriptors */
 250        vaddr = dma_alloc_coherent(dev,
 251                                   (priv->total_tx_ring_size *
 252                                    sizeof(struct txbd8)) +
 253                                   (priv->total_rx_ring_size *
 254                                    sizeof(struct rxbd8)),
 255                                   &addr, GFP_KERNEL);
 256        if (!vaddr)
 257                return -ENOMEM;
 258
 259        for (i = 0; i < priv->num_tx_queues; i++) {
 260                tx_queue = priv->tx_queue[i];
 261                tx_queue->tx_bd_base = vaddr;
 262                tx_queue->tx_bd_dma_base = addr;
 263                tx_queue->dev = ndev;
 264                /* enet DMA only understands physical addresses */
 265                addr  += sizeof(struct txbd8) * tx_queue->tx_ring_size;
 266                vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
 267        }
 268
 269        /* Start the rx descriptor ring where the tx ring leaves off */
 270        for (i = 0; i < priv->num_rx_queues; i++) {
 271                rx_queue = priv->rx_queue[i];
 272                rx_queue->rx_bd_base = vaddr;
 273                rx_queue->rx_bd_dma_base = addr;
 274                rx_queue->dev = ndev;
 275                addr  += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
 276                vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
 277        }
 278
 279        /* Setup the skbuff rings */
 280        for (i = 0; i < priv->num_tx_queues; i++) {
 281                tx_queue = priv->tx_queue[i];
 282                tx_queue->tx_skbuff =
 283                        kmalloc_array(tx_queue->tx_ring_size,
 284                                      sizeof(*tx_queue->tx_skbuff),
 285                                      GFP_KERNEL);
 286                if (!tx_queue->tx_skbuff)
 287                        goto cleanup;
 288
 289                for (k = 0; k < tx_queue->tx_ring_size; k++)
 290                        tx_queue->tx_skbuff[k] = NULL;
 291        }
 292
 293        for (i = 0; i < priv->num_rx_queues; i++) {
 294                rx_queue = priv->rx_queue[i];
 295                rx_queue->rx_skbuff =
 296                        kmalloc_array(rx_queue->rx_ring_size,
 297                                      sizeof(*rx_queue->rx_skbuff),
 298                                      GFP_KERNEL);
 299                if (!rx_queue->rx_skbuff)
 300                        goto cleanup;
 301
 302                for (j = 0; j < rx_queue->rx_ring_size; j++)
 303                        rx_queue->rx_skbuff[j] = NULL;
 304        }
 305
 306        if (gfar_init_bds(ndev))
 307                goto cleanup;
 308
 309        return 0;
 310
 311cleanup:
 312        free_skb_resources(priv);
 313        return -ENOMEM;
 314}
 315
 316static void gfar_init_tx_rx_base(struct gfar_private *priv)
 317{
 318        struct gfar __iomem *regs = priv->gfargrp[0].regs;
 319        u32 __iomem *baddr;
 320        int i;
 321
 322        baddr = &regs->tbase0;
 323        for (i = 0; i < priv->num_tx_queues; i++) {
 324                gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
 325                baddr += 2;
 326        }
 327
 328        baddr = &regs->rbase0;
 329        for (i = 0; i < priv->num_rx_queues; i++) {
 330                gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
 331                baddr += 2;
 332        }
 333}
 334
 335static void gfar_rx_buff_size_config(struct gfar_private *priv)
 336{
 337        int frame_size = priv->ndev->mtu + ETH_HLEN;
 338
 339        /* set this when rx hw offload (TOE) functions are being used */
 340        priv->uses_rxfcb = 0;
 341
 342        if (priv->ndev->features & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX))
 343                priv->uses_rxfcb = 1;
 344
 345        if (priv->hwts_rx_en)
 346                priv->uses_rxfcb = 1;
 347
 348        if (priv->uses_rxfcb)
 349                frame_size += GMAC_FCB_LEN;
 350
 351        frame_size += priv->padding;
 352
 353        frame_size = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
 354                     INCREMENTAL_BUFFER_SIZE;
 355
 356        priv->rx_buffer_size = frame_size;
 357}
 358
 359static void gfar_mac_rx_config(struct gfar_private *priv)
 360{
 361        struct gfar __iomem *regs = priv->gfargrp[0].regs;
 362        u32 rctrl = 0;
 363
 364        if (priv->rx_filer_enable) {
 365                rctrl |= RCTRL_FILREN;
 366                /* Program the RIR0 reg with the required distribution */
 367                if (priv->poll_mode == GFAR_SQ_POLLING)
 368                        gfar_write(&regs->rir0, DEFAULT_2RXQ_RIR0);
 369                else /* GFAR_MQ_POLLING */
 370                        gfar_write(&regs->rir0, DEFAULT_8RXQ_RIR0);
 371        }
 372
 373        /* Restore PROMISC mode */
 374        if (priv->ndev->flags & IFF_PROMISC)
 375                rctrl |= RCTRL_PROM;
 376
 377        if (priv->ndev->features & NETIF_F_RXCSUM)
 378                rctrl |= RCTRL_CHECKSUMMING;
 379
 380        if (priv->extended_hash)
 381                rctrl |= RCTRL_EXTHASH | RCTRL_EMEN;
 382
 383        if (priv->padding) {
 384                rctrl &= ~RCTRL_PAL_MASK;
 385                rctrl |= RCTRL_PADDING(priv->padding);
 386        }
 387
 388        /* Enable HW time stamping if requested from user space */
 389        if (priv->hwts_rx_en)
 390                rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;
 391
 392        if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
 393                rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
 394
 395        /* Init rctrl based on our settings */
 396        gfar_write(&regs->rctrl, rctrl);
 397}
 398
 399static void gfar_mac_tx_config(struct gfar_private *priv)
 400{
 401        struct gfar __iomem *regs = priv->gfargrp[0].regs;
 402        u32 tctrl = 0;
 403
 404        if (priv->ndev->features & NETIF_F_IP_CSUM)
 405                tctrl |= TCTRL_INIT_CSUM;
 406
 407        if (priv->prio_sched_en)
 408                tctrl |= TCTRL_TXSCHED_PRIO;
 409        else {
 410                tctrl |= TCTRL_TXSCHED_WRRS;
 411                gfar_write(&regs->tr03wt, DEFAULT_WRRS_WEIGHT);
 412                gfar_write(&regs->tr47wt, DEFAULT_WRRS_WEIGHT);
 413        }
 414
 415        if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
 416                tctrl |= TCTRL_VLINS;
 417
 418        gfar_write(&regs->tctrl, tctrl);
 419}
 420
 421static void gfar_configure_coalescing(struct gfar_private *priv,
 422                               unsigned long tx_mask, unsigned long rx_mask)
 423{
 424        struct gfar __iomem *regs = priv->gfargrp[0].regs;
 425        u32 __iomem *baddr;
 426
 427        if (priv->mode == MQ_MG_MODE) {
 428                int i = 0;
 429
 430                baddr = &regs->txic0;
 431                for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
 432                        gfar_write(baddr + i, 0);
 433                        if (likely(priv->tx_queue[i]->txcoalescing))
 434                                gfar_write(baddr + i, priv->tx_queue[i]->txic);
 435                }
 436
 437                baddr = &regs->rxic0;
 438                for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
 439                        gfar_write(baddr + i, 0);
 440                        if (likely(priv->rx_queue[i]->rxcoalescing))
 441                                gfar_write(baddr + i, priv->rx_queue[i]->rxic);
 442                }
 443        } else {
 444                /* Backward compatible case -- even if we enable
 445                 * multiple queues, there's only single reg to program
 446                 */
 447                gfar_write(&regs->txic, 0);
 448                if (likely(priv->tx_queue[0]->txcoalescing))
 449                        gfar_write(&regs->txic, priv->tx_queue[0]->txic);
 450
 451                gfar_write(&regs->rxic, 0);
 452                if (unlikely(priv->rx_queue[0]->rxcoalescing))
 453                        gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
 454        }
 455}
 456
 457void gfar_configure_coalescing_all(struct gfar_private *priv)
 458{
 459        gfar_configure_coalescing(priv, 0xFF, 0xFF);
 460}
 461
 462static struct net_device_stats *gfar_get_stats(struct net_device *dev)
 463{
 464        struct gfar_private *priv = netdev_priv(dev);
 465        unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
 466        unsigned long tx_packets = 0, tx_bytes = 0;
 467        int i;
 468
 469        for (i = 0; i < priv->num_rx_queues; i++) {
 470                rx_packets += priv->rx_queue[i]->stats.rx_packets;
 471                rx_bytes   += priv->rx_queue[i]->stats.rx_bytes;
 472                rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
 473        }
 474
 475        dev->stats.rx_packets = rx_packets;
 476        dev->stats.rx_bytes   = rx_bytes;
 477        dev->stats.rx_dropped = rx_dropped;
 478
 479        for (i = 0; i < priv->num_tx_queues; i++) {
 480                tx_bytes += priv->tx_queue[i]->stats.tx_bytes;
 481                tx_packets += priv->tx_queue[i]->stats.tx_packets;
 482        }
 483
 484        dev->stats.tx_bytes   = tx_bytes;
 485        dev->stats.tx_packets = tx_packets;
 486
 487        return &dev->stats;
 488}
 489
 490static const struct net_device_ops gfar_netdev_ops = {
 491        .ndo_open = gfar_enet_open,
 492        .ndo_start_xmit = gfar_start_xmit,
 493        .ndo_stop = gfar_close,
 494        .ndo_change_mtu = gfar_change_mtu,
 495        .ndo_set_features = gfar_set_features,
 496        .ndo_set_rx_mode = gfar_set_multi,
 497        .ndo_tx_timeout = gfar_timeout,
 498        .ndo_do_ioctl = gfar_ioctl,
 499        .ndo_get_stats = gfar_get_stats,
 500        .ndo_set_mac_address = eth_mac_addr,
 501        .ndo_validate_addr = eth_validate_addr,
 502#ifdef CONFIG_NET_POLL_CONTROLLER
 503        .ndo_poll_controller = gfar_netpoll,
 504#endif
 505};
 506
 507static void gfar_ints_disable(struct gfar_private *priv)
 508{
 509        int i;
 510        for (i = 0; i < priv->num_grps; i++) {
 511                struct gfar __iomem *regs = priv->gfargrp[i].regs;
 512                /* Clear IEVENT */
 513                gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
 514
 515                /* Initialize IMASK */
 516                gfar_write(&regs->imask, IMASK_INIT_CLEAR);
 517        }
 518}
 519
 520static void gfar_ints_enable(struct gfar_private *priv)
 521{
 522        int i;
 523        for (i = 0; i < priv->num_grps; i++) {
 524                struct gfar __iomem *regs = priv->gfargrp[i].regs;
 525                /* Unmask the interrupts we look for */
 526                gfar_write(&regs->imask, IMASK_DEFAULT);
 527        }
 528}
 529
 530void lock_tx_qs(struct gfar_private *priv)
 531{
 532        int i;
 533
 534        for (i = 0; i < priv->num_tx_queues; i++)
 535                spin_lock(&priv->tx_queue[i]->txlock);
 536}
 537
 538void unlock_tx_qs(struct gfar_private *priv)
 539{
 540        int i;
 541
 542        for (i = 0; i < priv->num_tx_queues; i++)
 543                spin_unlock(&priv->tx_queue[i]->txlock);
 544}
 545
 546static int gfar_alloc_tx_queues(struct gfar_private *priv)
 547{
 548        int i;
 549
 550        for (i = 0; i < priv->num_tx_queues; i++) {
 551                priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
 552                                            GFP_KERNEL);
 553                if (!priv->tx_queue[i])
 554                        return -ENOMEM;
 555
 556                priv->tx_queue[i]->tx_skbuff = NULL;
 557                priv->tx_queue[i]->qindex = i;
 558                priv->tx_queue[i]->dev = priv->ndev;
 559                spin_lock_init(&(priv->tx_queue[i]->txlock));
 560        }
 561        return 0;
 562}
 563
 564static int gfar_alloc_rx_queues(struct gfar_private *priv)
 565{
 566        int i;
 567
 568        for (i = 0; i < priv->num_rx_queues; i++) {
 569                priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
 570                                            GFP_KERNEL);
 571                if (!priv->rx_queue[i])
 572                        return -ENOMEM;
 573
 574                priv->rx_queue[i]->rx_skbuff = NULL;
 575                priv->rx_queue[i]->qindex = i;
 576                priv->rx_queue[i]->dev = priv->ndev;
 577        }
 578        return 0;
 579}
 580
 581static void gfar_free_tx_queues(struct gfar_private *priv)
 582{
 583        int i;
 584
 585        for (i = 0; i < priv->num_tx_queues; i++)
 586                kfree(priv->tx_queue[i]);
 587}
 588
 589static void gfar_free_rx_queues(struct gfar_private *priv)
 590{
 591        int i;
 592
 593        for (i = 0; i < priv->num_rx_queues; i++)
 594                kfree(priv->rx_queue[i]);
 595}
 596
 597static void unmap_group_regs(struct gfar_private *priv)
 598{
 599        int i;
 600
 601        for (i = 0; i < MAXGROUPS; i++)
 602                if (priv->gfargrp[i].regs)
 603                        iounmap(priv->gfargrp[i].regs);
 604}
 605
 606static void free_gfar_dev(struct gfar_private *priv)
 607{
 608        int i, j;
 609
 610        for (i = 0; i < priv->num_grps; i++)
 611                for (j = 0; j < GFAR_NUM_IRQS; j++) {
 612                        kfree(priv->gfargrp[i].irqinfo[j]);
 613                        priv->gfargrp[i].irqinfo[j] = NULL;
 614                }
 615
 616        free_netdev(priv->ndev);
 617}
 618
 619static void disable_napi(struct gfar_private *priv)
 620{
 621        int i;
 622
 623        for (i = 0; i < priv->num_grps; i++) {
 624                napi_disable(&priv->gfargrp[i].napi_rx);
 625                napi_disable(&priv->gfargrp[i].napi_tx);
 626        }
 627}
 628
 629static void enable_napi(struct gfar_private *priv)
 630{
 631        int i;
 632
 633        for (i = 0; i < priv->num_grps; i++) {
 634                napi_enable(&priv->gfargrp[i].napi_rx);
 635                napi_enable(&priv->gfargrp[i].napi_tx);
 636        }
 637}
 638
 639static int gfar_parse_group(struct device_node *np,
 640                            struct gfar_private *priv, const char *model)
 641{
 642        struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps];
 643        int i;
 644
 645        for (i = 0; i < GFAR_NUM_IRQS; i++) {
 646                grp->irqinfo[i] = kzalloc(sizeof(struct gfar_irqinfo),
 647                                          GFP_KERNEL);
 648                if (!grp->irqinfo[i])
 649                        return -ENOMEM;
 650        }
 651
 652        grp->regs = of_iomap(np, 0);
 653        if (!grp->regs)
 654                return -ENOMEM;
 655
 656        gfar_irq(grp, TX)->irq = irq_of_parse_and_map(np, 0);
 657
 658        /* If we aren't the FEC we have multiple interrupts */
 659        if (model && strcasecmp(model, "FEC")) {
 660                gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1);
 661                gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2);
 662                if (gfar_irq(grp, TX)->irq == NO_IRQ ||
 663                    gfar_irq(grp, RX)->irq == NO_IRQ ||
 664                    gfar_irq(grp, ER)->irq == NO_IRQ)
 665                        return -EINVAL;
 666        }
 667
 668        grp->priv = priv;
 669        spin_lock_init(&grp->grplock);
 670        if (priv->mode == MQ_MG_MODE) {
 671                u32 *rxq_mask, *txq_mask;
 672                rxq_mask = (u32 *)of_get_property(np, "fsl,rx-bit-map", NULL);
 673                txq_mask = (u32 *)of_get_property(np, "fsl,tx-bit-map", NULL);
 674
 675                if (priv->poll_mode == GFAR_SQ_POLLING) {
 676                        /* One Q per interrupt group: Q0 to G0, Q1 to G1 */
 677                        grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
 678                        grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
 679                } else { /* GFAR_MQ_POLLING */
 680                        grp->rx_bit_map = rxq_mask ?
 681                        *rxq_mask : (DEFAULT_MAPPING >> priv->num_grps);
 682                        grp->tx_bit_map = txq_mask ?
 683                        *txq_mask : (DEFAULT_MAPPING >> priv->num_grps);
 684                }
 685        } else {
 686                grp->rx_bit_map = 0xFF;
 687                grp->tx_bit_map = 0xFF;
 688        }
 689
 690        /* bit_map's MSB is q0 (from q0 to q7) but, for_each_set_bit parses
 691         * right to left, so we need to revert the 8 bits to get the q index
 692         */
 693        grp->rx_bit_map = bitrev8(grp->rx_bit_map);
 694        grp->tx_bit_map = bitrev8(grp->tx_bit_map);
 695
 696        /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
 697         * also assign queues to groups
 698         */
 699        for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) {
 700                if (!grp->rx_queue)
 701                        grp->rx_queue = priv->rx_queue[i];
 702                grp->num_rx_queues++;
 703                grp->rstat |= (RSTAT_CLEAR_RHALT >> i);
 704                priv->rqueue |= ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
 705                priv->rx_queue[i]->grp = grp;
 706        }
 707
 708        for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) {
 709                if (!grp->tx_queue)
 710                        grp->tx_queue = priv->tx_queue[i];
 711                grp->num_tx_queues++;
 712                grp->tstat |= (TSTAT_CLEAR_THALT >> i);
 713                priv->tqueue |= (TQUEUE_EN0 >> i);
 714                priv->tx_queue[i]->grp = grp;
 715        }
 716
 717        priv->num_grps++;
 718
 719        return 0;
 720}
 721
 722static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
 723{
 724        const char *model;
 725        const char *ctype;
 726        const void *mac_addr;
 727        int err = 0, i;
 728        struct net_device *dev = NULL;
 729        struct gfar_private *priv = NULL;
 730        struct device_node *np = ofdev->dev.of_node;
 731        struct device_node *child = NULL;
 732        const u32 *stash;
 733        const u32 *stash_len;
 734        const u32 *stash_idx;
 735        unsigned int num_tx_qs, num_rx_qs;
 736        u32 *tx_queues, *rx_queues;
 737        unsigned short mode, poll_mode;
 738
 739        if (!np || !of_device_is_available(np))
 740                return -ENODEV;
 741
 742        if (of_device_is_compatible(np, "fsl,etsec2")) {
 743                mode = MQ_MG_MODE;
 744                poll_mode = GFAR_SQ_POLLING;
 745        } else {
 746                mode = SQ_SG_MODE;
 747                poll_mode = GFAR_SQ_POLLING;
 748        }
 749
 750        /* parse the num of HW tx and rx queues */
 751        tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL);
 752        rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL);
 753
 754        if (mode == SQ_SG_MODE) {
 755                num_tx_qs = 1;
 756                num_rx_qs = 1;
 757        } else { /* MQ_MG_MODE */
 758                /* get the actual number of supported groups */
 759                unsigned int num_grps = of_get_available_child_count(np);
 760
 761                if (num_grps == 0 || num_grps > MAXGROUPS) {
 762                        dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n",
 763                                num_grps);
 764                        pr_err("Cannot do alloc_etherdev, aborting\n");
 765                        return -EINVAL;
 766                }
 767
 768                if (poll_mode == GFAR_SQ_POLLING) {
 769                        num_tx_qs = num_grps; /* one txq per int group */
 770                        num_rx_qs = num_grps; /* one rxq per int group */
 771                } else { /* GFAR_MQ_POLLING */
 772                        num_tx_qs = tx_queues ? *tx_queues : 1;
 773                        num_rx_qs = rx_queues ? *rx_queues : 1;
 774                }
 775        }
 776
 777        if (num_tx_qs > MAX_TX_QS) {
 778                pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
 779                       num_tx_qs, MAX_TX_QS);
 780                pr_err("Cannot do alloc_etherdev, aborting\n");
 781                return -EINVAL;
 782        }
 783
 784        if (num_rx_qs > MAX_RX_QS) {
 785                pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
 786                       num_rx_qs, MAX_RX_QS);
 787                pr_err("Cannot do alloc_etherdev, aborting\n");
 788                return -EINVAL;
 789        }
 790
 791        *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);
 792        dev = *pdev;
 793        if (NULL == dev)
 794                return -ENOMEM;
 795
 796        priv = netdev_priv(dev);
 797        priv->ndev = dev;
 798
 799        priv->mode = mode;
 800        priv->poll_mode = poll_mode;
 801
 802        priv->num_tx_queues = num_tx_qs;
 803        netif_set_real_num_rx_queues(dev, num_rx_qs);
 804        priv->num_rx_queues = num_rx_qs;
 805
 806        err = gfar_alloc_tx_queues(priv);
 807        if (err)
 808                goto tx_alloc_failed;
 809
 810        err = gfar_alloc_rx_queues(priv);
 811        if (err)
 812                goto rx_alloc_failed;
 813
 814        /* Init Rx queue filer rule set linked list */
 815        INIT_LIST_HEAD(&priv->rx_list.list);
 816        priv->rx_list.count = 0;
 817        mutex_init(&priv->rx_queue_access);
 818
 819        model = of_get_property(np, "model", NULL);
 820
 821        for (i = 0; i < MAXGROUPS; i++)
 822                priv->gfargrp[i].regs = NULL;
 823
 824        /* Parse and initialize group specific information */
 825        if (priv->mode == MQ_MG_MODE) {
 826                for_each_child_of_node(np, child) {
 827                        err = gfar_parse_group(child, priv, model);
 828                        if (err)
 829                                goto err_grp_init;
 830                }
 831        } else { /* SQ_SG_MODE */
 832                err = gfar_parse_group(np, priv, model);
 833                if (err)
 834                        goto err_grp_init;
 835        }
 836
 837        stash = of_get_property(np, "bd-stash", NULL);
 838
 839        if (stash) {
 840                priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
 841                priv->bd_stash_en = 1;
 842        }
 843
 844        stash_len = of_get_property(np, "rx-stash-len", NULL);
 845
 846        if (stash_len)
 847                priv->rx_stash_size = *stash_len;
 848
 849        stash_idx = of_get_property(np, "rx-stash-idx", NULL);
 850
 851        if (stash_idx)
 852                priv->rx_stash_index = *stash_idx;
 853
 854        if (stash_len || stash_idx)
 855                priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
 856
 857        mac_addr = of_get_mac_address(np);
 858
 859        if (mac_addr)
 860                memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
 861
 862        if (model && !strcasecmp(model, "TSEC"))
 863                priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
 864                                     FSL_GIANFAR_DEV_HAS_COALESCE |
 865                                     FSL_GIANFAR_DEV_HAS_RMON |
 866                                     FSL_GIANFAR_DEV_HAS_MULTI_INTR;
 867
 868        if (model && !strcasecmp(model, "eTSEC"))
 869                priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
 870                                     FSL_GIANFAR_DEV_HAS_COALESCE |
 871                                     FSL_GIANFAR_DEV_HAS_RMON |
 872                                     FSL_GIANFAR_DEV_HAS_MULTI_INTR |
 873                                     FSL_GIANFAR_DEV_HAS_CSUM |
 874                                     FSL_GIANFAR_DEV_HAS_VLAN |
 875                                     FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
 876                                     FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
 877                                     FSL_GIANFAR_DEV_HAS_TIMER;
 878
 879        ctype = of_get_property(np, "phy-connection-type", NULL);
 880
 881        /* We only care about rgmii-id.  The rest are autodetected */
 882        if (ctype && !strcmp(ctype, "rgmii-id"))
 883                priv->interface = PHY_INTERFACE_MODE_RGMII_ID;
 884        else
 885                priv->interface = PHY_INTERFACE_MODE_MII;
 886
 887        if (of_get_property(np, "fsl,magic-packet", NULL))
 888                priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
 889
 890        priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
 891
 892        /* Find the TBI PHY.  If it's not there, we don't support SGMII */
 893        priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
 894
 895        return 0;
 896
 897err_grp_init:
 898        unmap_group_regs(priv);
 899rx_alloc_failed:
 900        gfar_free_rx_queues(priv);
 901tx_alloc_failed:
 902        gfar_free_tx_queues(priv);
 903        free_gfar_dev(priv);
 904        return err;
 905}
 906
 907static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
 908{
 909        struct hwtstamp_config config;
 910        struct gfar_private *priv = netdev_priv(netdev);
 911
 912        if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
 913                return -EFAULT;
 914
 915        /* reserved for future extensions */
 916        if (config.flags)
 917                return -EINVAL;
 918
 919        switch (config.tx_type) {
 920        case HWTSTAMP_TX_OFF:
 921                priv->hwts_tx_en = 0;
 922                break;
 923        case HWTSTAMP_TX_ON:
 924                if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
 925                        return -ERANGE;
 926                priv->hwts_tx_en = 1;
 927                break;
 928        default:
 929                return -ERANGE;
 930        }
 931
 932        switch (config.rx_filter) {
 933        case HWTSTAMP_FILTER_NONE:
 934                if (priv->hwts_rx_en) {
 935                        priv->hwts_rx_en = 0;
 936                        reset_gfar(netdev);
 937                }
 938                break;
 939        default:
 940                if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
 941                        return -ERANGE;
 942                if (!priv->hwts_rx_en) {
 943                        priv->hwts_rx_en = 1;
 944                        reset_gfar(netdev);
 945                }
 946                config.rx_filter = HWTSTAMP_FILTER_ALL;
 947                break;
 948        }
 949
 950        return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
 951                -EFAULT : 0;
 952}
 953
 954static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
 955{
 956        struct hwtstamp_config config;
 957        struct gfar_private *priv = netdev_priv(netdev);
 958
 959        config.flags = 0;
 960        config.tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
 961        config.rx_filter = (priv->hwts_rx_en ?
 962                            HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
 963
 964        return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
 965                -EFAULT : 0;
 966}
 967
 968static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 969{
 970        struct gfar_private *priv = netdev_priv(dev);
 971
 972        if (!netif_running(dev))
 973                return -EINVAL;
 974
 975        if (cmd == SIOCSHWTSTAMP)
 976                return gfar_hwtstamp_set(dev, rq);
 977        if (cmd == SIOCGHWTSTAMP)
 978                return gfar_hwtstamp_get(dev, rq);
 979
 980        if (!priv->phydev)
 981                return -ENODEV;
 982
 983        return phy_mii_ioctl(priv->phydev, rq, cmd);
 984}
 985
 986static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
 987                                   u32 class)
 988{
 989        u32 rqfpr = FPR_FILER_MASK;
 990        u32 rqfcr = 0x0;
 991
 992        rqfar--;
 993        rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
 994        priv->ftp_rqfpr[rqfar] = rqfpr;
 995        priv->ftp_rqfcr[rqfar] = rqfcr;
 996        gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
 997
 998        rqfar--;
 999        rqfcr = RQFCR_CMP_NOMATCH;
1000        priv->ftp_rqfpr[rqfar] = rqfpr;
1001        priv->ftp_rqfcr[rqfar] = rqfcr;
1002        gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1003
1004        rqfar--;
1005        rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
1006        rqfpr = class;
1007        priv->ftp_rqfcr[rqfar] = rqfcr;
1008        priv->ftp_rqfpr[rqfar] = rqfpr;
1009        gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1010
1011        rqfar--;
1012        rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
1013        rqfpr = class;
1014        priv->ftp_rqfcr[rqfar] = rqfcr;
1015        priv->ftp_rqfpr[rqfar] = rqfpr;
1016        gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1017
1018        return rqfar;
1019}
1020
1021static void gfar_init_filer_table(struct gfar_private *priv)
1022{
1023        int i = 0x0;
1024        u32 rqfar = MAX_FILER_IDX;
1025        u32 rqfcr = 0x0;
1026        u32 rqfpr = FPR_FILER_MASK;
1027
1028        /* Default rule */
1029        rqfcr = RQFCR_CMP_MATCH;
1030        priv->ftp_rqfcr[rqfar] = rqfcr;
1031        priv->ftp_rqfpr[rqfar] = rqfpr;
1032        gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1033
1034        rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
1035        rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP);
1036        rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP);
1037        rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4);
1038        rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP);
1039        rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP);
1040
1041        /* cur_filer_idx indicated the first non-masked rule */
1042        priv->cur_filer_idx = rqfar;
1043
1044        /* Rest are masked rules */
1045        rqfcr = RQFCR_CMP_NOMATCH;
1046        for (i = 0; i < rqfar; i++) {
1047                priv->ftp_rqfcr[i] = rqfcr;
1048                priv->ftp_rqfpr[i] = rqfpr;
1049                gfar_write_filer(priv, i, rqfcr, rqfpr);
1050        }
1051}
1052
1053static void __gfar_detect_errata_83xx(struct gfar_private *priv)
1054{
1055        unsigned int pvr = mfspr(SPRN_PVR);
1056        unsigned int svr = mfspr(SPRN_SVR);
1057        unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */
1058        unsigned int rev = svr & 0xffff;
1059
1060        /* MPC8313 Rev 2.0 and higher; All MPC837x */
1061        if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) ||
1062            (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
1063                priv->errata |= GFAR_ERRATA_74;
1064
1065        /* MPC8313 and MPC837x all rev */
1066        if ((pvr == 0x80850010 && mod == 0x80b0) ||
1067            (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
1068                priv->errata |= GFAR_ERRATA_76;
1069
1070        /* MPC8313 Rev < 2.0 */
1071        if (pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020)
1072                priv->errata |= GFAR_ERRATA_12;
1073}
1074
1075static void __gfar_detect_errata_85xx(struct gfar_private *priv)
1076{
1077        unsigned int svr = mfspr(SPRN_SVR);
1078
1079        if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20))
1080                priv->errata |= GFAR_ERRATA_12;
1081        if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) ||
1082            ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20)))
1083                priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */
1084}
1085
1086static void gfar_detect_errata(struct gfar_private *priv)
1087{
1088        struct device *dev = &priv->ofdev->dev;
1089
1090        /* no plans to fix */
1091        priv->errata |= GFAR_ERRATA_A002;
1092
1093        if (pvr_version_is(PVR_VER_E500V1) || pvr_version_is(PVR_VER_E500V2))
1094                __gfar_detect_errata_85xx(priv);
1095        else /* non-mpc85xx parts, i.e. e300 core based */
1096                __gfar_detect_errata_83xx(priv);
1097
1098        if (priv->errata)
1099                dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
1100                         priv->errata);
1101}
1102
1103void gfar_mac_reset(struct gfar_private *priv)
1104{
1105        struct gfar __iomem *regs = priv->gfargrp[0].regs;
1106        u32 tempval;
1107
1108        /* Reset MAC layer */
1109        gfar_write(&regs->maccfg1, MACCFG1_SOFT_RESET);
1110
1111        /* We need to delay at least 3 TX clocks */
1112        udelay(3);
1113
1114        /* the soft reset bit is not self-resetting, so we need to
1115         * clear it before resuming normal operation
1116         */
1117        gfar_write(&regs->maccfg1, 0);
1118
1119        udelay(3);
1120
1121        /* Compute rx_buff_size based on config flags */
1122        gfar_rx_buff_size_config(priv);
1123
1124        /* Initialize the max receive frame/buffer lengths */
1125        gfar_write(&regs->maxfrm, priv->rx_buffer_size);
1126        gfar_write(&regs->mrblr, priv->rx_buffer_size);
1127
1128        /* Initialize the Minimum Frame Length Register */
1129        gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
1130
1131        /* Initialize MACCFG2. */
1132        tempval = MACCFG2_INIT_SETTINGS;
1133
1134        /* If the mtu is larger than the max size for standard
1135         * ethernet frames (ie, a jumbo frame), then set maccfg2
1136         * to allow huge frames, and to check the length
1137         */
1138        if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE ||
1139            gfar_has_errata(priv, GFAR_ERRATA_74))
1140                tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
1141
1142        gfar_write(&regs->maccfg2, tempval);
1143
1144        /* Clear mac addr hash registers */
1145        gfar_write(&regs->igaddr0, 0);
1146        gfar_write(&regs->igaddr1, 0);
1147        gfar_write(&regs->igaddr2, 0);
1148        gfar_write(&regs->igaddr3, 0);
1149        gfar_write(&regs->igaddr4, 0);
1150        gfar_write(&regs->igaddr5, 0);
1151        gfar_write(&regs->igaddr6, 0);
1152        gfar_write(&regs->igaddr7, 0);
1153
1154        gfar_write(&regs->gaddr0, 0);
1155        gfar_write(&regs->gaddr1, 0);
1156        gfar_write(&regs->gaddr2, 0);
1157        gfar_write(&regs->gaddr3, 0);
1158        gfar_write(&regs->gaddr4, 0);
1159        gfar_write(&regs->gaddr5, 0);
1160        gfar_write(&regs->gaddr6, 0);
1161        gfar_write(&regs->gaddr7, 0);
1162
1163        if (priv->extended_hash)
1164                gfar_clear_exact_match(priv->ndev);
1165
1166        gfar_mac_rx_config(priv);
1167
1168        gfar_mac_tx_config(priv);
1169
1170        gfar_set_mac_address(priv->ndev);
1171
1172        gfar_set_multi(priv->ndev);
1173
1174        /* clear ievent and imask before configuring coalescing */
1175        gfar_ints_disable(priv);
1176
1177        /* Configure the coalescing support */
1178        gfar_configure_coalescing_all(priv);
1179}
1180
1181static void gfar_hw_init(struct gfar_private *priv)
1182{
1183        struct gfar __iomem *regs = priv->gfargrp[0].regs;
1184        u32 attrs;
1185
1186        /* Stop the DMA engine now, in case it was running before
1187         * (The firmware could have used it, and left it running).
1188         */
1189        gfar_halt(priv);
1190
1191        gfar_mac_reset(priv);
1192
1193        /* Zero out the rmon mib registers if it has them */
1194        if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
1195                memset_io(&(regs->rmon), 0, sizeof(struct rmon_mib));
1196
1197                /* Mask off the CAM interrupts */
1198                gfar_write(&regs->rmon.cam1, 0xffffffff);
1199                gfar_write(&regs->rmon.cam2, 0xffffffff);
1200        }
1201
1202        /* Initialize ECNTRL */
1203        gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS);
1204
1205        /* Set the extraction length and index */
1206        attrs = ATTRELI_EL(priv->rx_stash_size) |
1207                ATTRELI_EI(priv->rx_stash_index);
1208
1209        gfar_write(&regs->attreli, attrs);
1210
1211        /* Start with defaults, and add stashing
1212         * depending on driver parameters
1213         */
1214        attrs = ATTR_INIT_SETTINGS;
1215
1216        if (priv->bd_stash_en)
1217                attrs |= ATTR_BDSTASH;
1218
1219        if (priv->rx_stash_size != 0)
1220                attrs |= ATTR_BUFSTASH;
1221
1222        gfar_write(&regs->attr, attrs);
1223
1224        /* FIFO configs */
1225        gfar_write(&regs->fifo_tx_thr, DEFAULT_FIFO_TX_THR);
1226        gfar_write(&regs->fifo_tx_starve, DEFAULT_FIFO_TX_STARVE);
1227        gfar_write(&regs->fifo_tx_starve_shutoff, DEFAULT_FIFO_TX_STARVE_OFF);
1228
1229        /* Program the interrupt steering regs, only for MG devices */
1230        if (priv->num_grps > 1)
1231                gfar_write_isrg(priv);
1232}
1233
1234static void __init gfar_init_addr_hash_table(struct gfar_private *priv)
1235{
1236        struct gfar __iomem *regs = priv->gfargrp[0].regs;
1237
1238        if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
1239                priv->extended_hash = 1;
1240                priv->hash_width = 9;
1241
1242                priv->hash_regs[0] = &regs->igaddr0;
1243                priv->hash_regs[1] = &regs->igaddr1;
1244                priv->hash_regs[2] = &regs->igaddr2;
1245                priv->hash_regs[3] = &regs->igaddr3;
1246                priv->hash_regs[4] = &regs->igaddr4;
1247                priv->hash_regs[5] = &regs->igaddr5;
1248                priv->hash_regs[6] = &regs->igaddr6;
1249                priv->hash_regs[7] = &regs->igaddr7;
1250                priv->hash_regs[8] = &regs->gaddr0;
1251                priv->hash_regs[9] = &regs->gaddr1;
1252                priv->hash_regs[10] = &regs->gaddr2;
1253                priv->hash_regs[11] = &regs->gaddr3;
1254                priv->hash_regs[12] = &regs->gaddr4;
1255                priv->hash_regs[13] = &regs->gaddr5;
1256                priv->hash_regs[14] = &regs->gaddr6;
1257                priv->hash_regs[15] = &regs->gaddr7;
1258
1259        } else {
1260                priv->extended_hash = 0;
1261                priv->hash_width = 8;
1262
1263                priv->hash_regs[0] = &regs->gaddr0;
1264                priv->hash_regs[1] = &regs->gaddr1;
1265                priv->hash_regs[2] = &regs->gaddr2;
1266                priv->hash_regs[3] = &regs->gaddr3;
1267                priv->hash_regs[4] = &regs->gaddr4;
1268                priv->hash_regs[5] = &regs->gaddr5;
1269                priv->hash_regs[6] = &regs->gaddr6;
1270                priv->hash_regs[7] = &regs->gaddr7;
1271        }
1272}
1273
1274/* Set up the ethernet device structure, private data,
1275 * and anything else we need before we start
1276 */
1277static int gfar_probe(struct platform_device *ofdev)
1278{
1279        struct net_device *dev = NULL;
1280        struct gfar_private *priv = NULL;
1281        int err = 0, i;
1282
1283        err = gfar_of_init(ofdev, &dev);
1284
1285        if (err)
1286                return err;
1287
1288        priv = netdev_priv(dev);
1289        priv->ndev = dev;
1290        priv->ofdev = ofdev;
1291        priv->dev = &ofdev->dev;
1292        SET_NETDEV_DEV(dev, &ofdev->dev);
1293
1294        spin_lock_init(&priv->bflock);
1295        INIT_WORK(&priv->reset_task, gfar_reset_task);
1296
1297        platform_set_drvdata(ofdev, priv);
1298
1299        gfar_detect_errata(priv);
1300
1301        /* Set the dev->base_addr to the gfar reg region */
1302        dev->base_addr = (unsigned long) priv->gfargrp[0].regs;
1303
1304        /* Fill in the dev structure */
1305        dev->watchdog_timeo = TX_TIMEOUT;
1306        dev->mtu = 1500;
1307        dev->netdev_ops = &gfar_netdev_ops;
1308        dev->ethtool_ops = &gfar_ethtool_ops;
1309
1310        /* Register for napi ...We are registering NAPI for each grp */
1311        for (i = 0; i < priv->num_grps; i++) {
1312                if (priv->poll_mode == GFAR_SQ_POLLING) {
1313                        netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
1314                                       gfar_poll_rx_sq, GFAR_DEV_WEIGHT);
1315                        netif_napi_add(dev, &priv->gfargrp[i].napi_tx,
1316                                       gfar_poll_tx_sq, 2);
1317                } else {
1318                        netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
1319                                       gfar_poll_rx, GFAR_DEV_WEIGHT);
1320                        netif_napi_add(dev, &priv->gfargrp[i].napi_tx,
1321                                       gfar_poll_tx, 2);
1322                }
1323        }
1324
1325        if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
1326                dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
1327                                   NETIF_F_RXCSUM;
1328                dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
1329                                 NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
1330        }
1331
1332        if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
1333                dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
1334                                    NETIF_F_HW_VLAN_CTAG_RX;
1335                dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
1336        }
1337
1338        gfar_init_addr_hash_table(priv);
1339
1340        /* Insert receive time stamps into padding alignment bytes */
1341        if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
1342                priv->padding = 8;
1343
1344        if (dev->features & NETIF_F_IP_CSUM ||
1345            priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
1346                dev->needed_headroom = GMAC_FCB_LEN;
1347
1348        priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
1349
1350        /* Initializing some of the rx/tx queue level parameters */
1351        for (i = 0; i < priv->num_tx_queues; i++) {
1352                priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
1353                priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE;
1354                priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE;
1355                priv->tx_queue[i]->txic = DEFAULT_TXIC;
1356        }
1357
1358        for (i = 0; i < priv->num_rx_queues; i++) {
1359                priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE;
1360                priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE;
1361                priv->rx_queue[i]->rxic = DEFAULT_RXIC;
1362        }
1363
1364        /* always enable rx filer */
1365        priv->rx_filer_enable = 1;
1366        /* Enable most messages by default */
1367        priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
1368        /* use pritority h/w tx queue scheduling for single queue devices */
1369        if (priv->num_tx_queues == 1)
1370                priv->prio_sched_en = 1;
1371
1372        set_bit(GFAR_DOWN, &priv->state);
1373
1374        gfar_hw_init(priv);
1375
1376        err = register_netdev(dev);
1377
1378        if (err) {
1379                pr_err("%s: Cannot register net device, aborting\n", dev->name);
1380                goto register_fail;
1381        }
1382
1383        /* Carrier starts down, phylib will bring it up */
1384        netif_carrier_off(dev);
1385
1386        device_init_wakeup(&dev->dev,
1387                           priv->device_flags &
1388                           FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1389
1390        /* fill out IRQ number and name fields */
1391        for (i = 0; i < priv->num_grps; i++) {
1392                struct gfar_priv_grp *grp = &priv->gfargrp[i];
1393                if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1394                        sprintf(gfar_irq(grp, TX)->name, "%s%s%c%s",
1395                                dev->name, "_g", '0' + i, "_tx");
1396                        sprintf(gfar_irq(grp, RX)->name, "%s%s%c%s",
1397                                dev->name, "_g", '0' + i, "_rx");
1398                        sprintf(gfar_irq(grp, ER)->name, "%s%s%c%s",
1399                                dev->name, "_g", '0' + i, "_er");
1400                } else
1401                        strcpy(gfar_irq(grp, TX)->name, dev->name);
1402        }
1403
1404        /* Initialize the filer table */
1405        gfar_init_filer_table(priv);
1406
1407        /* Print out the device info */
1408        netdev_info(dev, "mac: %pM\n", dev->dev_addr);
1409
1410        /* Even more device info helps when determining which kernel
1411         * provided which set of benchmarks.
1412         */
1413        netdev_info(dev, "Running with NAPI enabled\n");
1414        for (i = 0; i < priv->num_rx_queues; i++)
1415                netdev_info(dev, "RX BD ring size for Q[%d]: %d\n",
1416                            i, priv->rx_queue[i]->rx_ring_size);
1417        for (i = 0; i < priv->num_tx_queues; i++)
1418                netdev_info(dev, "TX BD ring size for Q[%d]: %d\n",
1419                            i, priv->tx_queue[i]->tx_ring_size);
1420
1421        return 0;
1422
1423register_fail:
1424        unmap_group_regs(priv);
1425        gfar_free_rx_queues(priv);
1426        gfar_free_tx_queues(priv);
1427        if (priv->phy_node)
1428                of_node_put(priv->phy_node);
1429        if (priv->tbi_node)
1430                of_node_put(priv->tbi_node);
1431        free_gfar_dev(priv);
1432        return err;
1433}
1434
1435static int gfar_remove(struct platform_device *ofdev)
1436{
1437        struct gfar_private *priv = platform_get_drvdata(ofdev);
1438
1439        if (priv->phy_node)
1440                of_node_put(priv->phy_node);
1441        if (priv->tbi_node)
1442                of_node_put(priv->tbi_node);
1443
1444        unregister_netdev(priv->ndev);
1445        unmap_group_regs(priv);
1446        gfar_free_rx_queues(priv);
1447        gfar_free_tx_queues(priv);
1448        free_gfar_dev(priv);
1449
1450        return 0;
1451}
1452
1453#ifdef CONFIG_PM
1454
1455static int gfar_suspend(struct device *dev)
1456{
1457        struct gfar_private *priv = dev_get_drvdata(dev);
1458        struct net_device *ndev = priv->ndev;
1459        struct gfar __iomem *regs = priv->gfargrp[0].regs;
1460        unsigned long flags;
1461        u32 tempval;
1462
1463        int magic_packet = priv->wol_en &&
1464                           (priv->device_flags &
1465                            FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1466
1467        netif_device_detach(ndev);
1468
1469        if (netif_running(ndev)) {
1470
1471                local_irq_save(flags);
1472                lock_tx_qs(priv);
1473
1474                gfar_halt_nodisable(priv);
1475
1476                /* Disable Tx, and Rx if wake-on-LAN is disabled. */
1477                tempval = gfar_read(&regs->maccfg1);
1478
1479                tempval &= ~MACCFG1_TX_EN;
1480
1481                if (!magic_packet)
1482                        tempval &= ~MACCFG1_RX_EN;
1483
1484                gfar_write(&regs->maccfg1, tempval);
1485
1486                unlock_tx_qs(priv);
1487                local_irq_restore(flags);
1488
1489                disable_napi(priv);
1490
1491                if (magic_packet) {
1492                        /* Enable interrupt on Magic Packet */
1493                        gfar_write(&regs->imask, IMASK_MAG);
1494
1495                        /* Enable Magic Packet mode */
1496                        tempval = gfar_read(&regs->maccfg2);
1497                        tempval |= MACCFG2_MPEN;
1498                        gfar_write(&regs->maccfg2, tempval);
1499                } else {
1500                        phy_stop(priv->phydev);
1501                }
1502        }
1503
1504        return 0;
1505}
1506
1507static int gfar_resume(struct device *dev)
1508{
1509        struct gfar_private *priv = dev_get_drvdata(dev);
1510        struct net_device *ndev = priv->ndev;
1511        struct gfar __iomem *regs = priv->gfargrp[0].regs;
1512        unsigned long flags;
1513        u32 tempval;
1514        int magic_packet = priv->wol_en &&
1515                           (priv->device_flags &
1516                            FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1517
1518        if (!netif_running(ndev)) {
1519                netif_device_attach(ndev);
1520                return 0;
1521        }
1522
1523        if (!magic_packet && priv->phydev)
1524                phy_start(priv->phydev);
1525
1526        /* Disable Magic Packet mode, in case something
1527         * else woke us up.
1528         */
1529        local_irq_save(flags);
1530        lock_tx_qs(priv);
1531
1532        tempval = gfar_read(&regs->maccfg2);
1533        tempval &= ~MACCFG2_MPEN;
1534        gfar_write(&regs->maccfg2, tempval);
1535
1536        gfar_start(priv);
1537
1538        unlock_tx_qs(priv);
1539        local_irq_restore(flags);
1540
1541        netif_device_attach(ndev);
1542
1543        enable_napi(priv);
1544
1545        return 0;
1546}
1547
1548static int gfar_restore(struct device *dev)
1549{
1550        struct gfar_private *priv = dev_get_drvdata(dev);
1551        struct net_device *ndev = priv->ndev;
1552
1553        if (!netif_running(ndev)) {
1554                netif_device_attach(ndev);
1555
1556                return 0;
1557        }
1558
1559        if (gfar_init_bds(ndev)) {
1560                free_skb_resources(priv);
1561                return -ENOMEM;
1562        }
1563
1564        gfar_mac_reset(priv);
1565
1566        gfar_init_tx_rx_base(priv);
1567
1568        gfar_start(priv);
1569
1570        priv->oldlink = 0;
1571        priv->oldspeed = 0;
1572        priv->oldduplex = -1;
1573
1574        if (priv->phydev)
1575                phy_start(priv->phydev);
1576
1577        netif_device_attach(ndev);
1578        enable_napi(priv);
1579
1580        return 0;
1581}
1582
1583static struct dev_pm_ops gfar_pm_ops = {
1584        .suspend = gfar_suspend,
1585        .resume = gfar_resume,
1586        .freeze = gfar_suspend,
1587        .thaw = gfar_resume,
1588        .restore = gfar_restore,
1589};
1590
1591#define GFAR_PM_OPS (&gfar_pm_ops)
1592
1593#else
1594
1595#define GFAR_PM_OPS NULL
1596
1597#endif
1598
1599/* Reads the controller's registers to determine what interface
1600 * connects it to the PHY.
1601 */
1602static phy_interface_t gfar_get_interface(struct net_device *dev)
1603{
1604        struct gfar_private *priv = netdev_priv(dev);
1605        struct gfar __iomem *regs = priv->gfargrp[0].regs;
1606        u32 ecntrl;
1607
1608        ecntrl = gfar_read(&regs->ecntrl);
1609
1610        if (ecntrl & ECNTRL_SGMII_MODE)
1611                return PHY_INTERFACE_MODE_SGMII;
1612
1613        if (ecntrl & ECNTRL_TBI_MODE) {
1614                if (ecntrl & ECNTRL_REDUCED_MODE)
1615                        return PHY_INTERFACE_MODE_RTBI;
1616                else
1617                        return PHY_INTERFACE_MODE_TBI;
1618        }
1619
1620        if (ecntrl & ECNTRL_REDUCED_MODE) {
1621                if (ecntrl & ECNTRL_REDUCED_MII_MODE) {
1622                        return PHY_INTERFACE_MODE_RMII;
1623                }
1624                else {
1625                        phy_interface_t interface = priv->interface;
1626
1627                        /* This isn't autodetected right now, so it must
1628                         * be set by the device tree or platform code.
1629                         */
1630                        if (interface == PHY_INTERFACE_MODE_RGMII_ID)
1631                                return PHY_INTERFACE_MODE_RGMII_ID;
1632
1633                        return PHY_INTERFACE_MODE_RGMII;
1634                }
1635        }
1636
1637        if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
1638                return PHY_INTERFACE_MODE_GMII;
1639
1640        return PHY_INTERFACE_MODE_MII;
1641}
1642
1643
1644/* Initializes driver's PHY state, and attaches to the PHY.
1645 * Returns 0 on success.
1646 */
1647static int init_phy(struct net_device *dev)
1648{
1649        struct gfar_private *priv = netdev_priv(dev);
1650        uint gigabit_support =
1651                priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
1652                GFAR_SUPPORTED_GBIT : 0;
1653        phy_interface_t interface;
1654
1655        priv->oldlink = 0;
1656        priv->oldspeed = 0;
1657        priv->oldduplex = -1;
1658
1659        interface = gfar_get_interface(dev);
1660
1661        priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
1662                                      interface);
1663        if (!priv->phydev)
1664                priv->phydev = of_phy_connect_fixed_link(dev, &adjust_link,
1665                                                         interface);
1666        if (!priv->phydev) {
1667                dev_err(&dev->dev, "could not attach to PHY\n");
1668                return -ENODEV;
1669        }
1670
1671        if (interface == PHY_INTERFACE_MODE_SGMII)
1672                gfar_configure_serdes(dev);
1673
1674        /* Remove any features not supported by the controller */
1675        priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
1676        priv->phydev->advertising = priv->phydev->supported;
1677
1678        return 0;
1679}
1680
1681/* Initialize TBI PHY interface for communicating with the
1682 * SERDES lynx PHY on the chip.  We communicate with this PHY
1683 * through the MDIO bus on each controller, treating it as a
1684 * "normal" PHY at the address found in the TBIPA register.  We assume
1685 * that the TBIPA register is valid.  Either the MDIO bus code will set
1686 * it to a value that doesn't conflict with other PHYs on the bus, or the
1687 * value doesn't matter, as there are no other PHYs on the bus.
1688 */
1689static void gfar_configure_serdes(struct net_device *dev)
1690{
1691        struct gfar_private *priv = netdev_priv(dev);
1692        struct phy_device *tbiphy;
1693
1694        if (!priv->tbi_node) {
1695                dev_warn(&dev->dev, "error: SGMII mode requires that the "
1696                                    "device tree specify a tbi-handle\n");
1697                return;
1698        }
1699
1700        tbiphy = of_phy_find_device(priv->tbi_node);
1701        if (!tbiphy) {
1702                dev_err(&dev->dev, "error: Could not get TBI device\n");
1703                return;
1704        }
1705
1706        /* If the link is already up, we must already be ok, and don't need to
1707         * configure and reset the TBI<->SerDes link.  Maybe U-Boot configured
1708         * everything for us?  Resetting it takes the link down and requires
1709         * several seconds for it to come back.
1710         */
1711        if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS)
1712                return;
1713
1714        /* Single clk mode, mii mode off(for serdes communication) */
1715        phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
1716
1717        phy_write(tbiphy, MII_ADVERTISE,
1718                  ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
1719                  ADVERTISE_1000XPSE_ASYM);
1720
1721        phy_write(tbiphy, MII_BMCR,
1722                  BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX |
1723                  BMCR_SPEED1000);
1724}
1725
1726static int __gfar_is_rx_idle(struct gfar_private *priv)
1727{
1728        u32 res;
1729
1730        /* Normaly TSEC should not hang on GRS commands, so we should
1731         * actually wait for IEVENT_GRSC flag.
1732         */
1733        if (!gfar_has_errata(priv, GFAR_ERRATA_A002))
1734                return 0;
1735
1736        /* Read the eTSEC register at offset 0xD1C. If bits 7-14 are
1737         * the same as bits 23-30, the eTSEC Rx is assumed to be idle
1738         * and the Rx can be safely reset.
1739         */
1740        res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c);
1741        res &= 0x7f807f80;
1742        if ((res & 0xffff) == (res >> 16))
1743                return 1;
1744
1745        return 0;
1746}
1747
1748/* Halt the receive and transmit queues */
1749static void gfar_halt_nodisable(struct gfar_private *priv)
1750{
1751        struct gfar __iomem *regs = priv->gfargrp[0].regs;
1752        u32 tempval;
1753
1754        gfar_ints_disable(priv);
1755
1756        /* Stop the DMA, and wait for it to stop */
1757        tempval = gfar_read(&regs->dmactrl);
1758        if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) !=
1759            (DMACTRL_GRS | DMACTRL_GTS)) {
1760                int ret;
1761
1762                tempval |= (DMACTRL_GRS | DMACTRL_GTS);
1763                gfar_write(&regs->dmactrl, tempval);
1764
1765                do {
1766                        ret = spin_event_timeout(((gfar_read(&regs->ievent) &
1767                                 (IEVENT_GRSC | IEVENT_GTSC)) ==
1768                                 (IEVENT_GRSC | IEVENT_GTSC)), 1000000, 0);
1769                        if (!ret && !(gfar_read(&regs->ievent) & IEVENT_GRSC))
1770                                ret = __gfar_is_rx_idle(priv);
1771                } while (!ret);
1772        }
1773}
1774
1775/* Halt the receive and transmit queues */
1776void gfar_halt(struct gfar_private *priv)
1777{
1778        struct gfar __iomem *regs = priv->gfargrp[0].regs;
1779        u32 tempval;
1780
1781        /* Dissable the Rx/Tx hw queues */
1782        gfar_write(&regs->rqueue, 0);
1783        gfar_write(&regs->tqueue, 0);
1784
1785        mdelay(10);
1786
1787        gfar_halt_nodisable(priv);
1788
1789        /* Disable Rx/Tx DMA */
1790        tempval = gfar_read(&regs->maccfg1);
1791        tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
1792        gfar_write(&regs->maccfg1, tempval);
1793}
1794
1795void stop_gfar(struct net_device *dev)
1796{
1797        struct gfar_private *priv = netdev_priv(dev);
1798
1799        netif_tx_stop_all_queues(dev);
1800
1801        smp_mb__before_clear_bit();
1802        set_bit(GFAR_DOWN, &priv->state);
1803        smp_mb__after_clear_bit();
1804
1805        disable_napi(priv);
1806
1807        /* disable ints and gracefully shut down Rx/Tx DMA */
1808        gfar_halt(priv);
1809
1810        phy_stop(priv->phydev);
1811
1812        free_skb_resources(priv);
1813}
1814
1815static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
1816{
1817        struct txbd8 *txbdp;
1818        struct gfar_private *priv = netdev_priv(tx_queue->dev);
1819        int i, j;
1820
1821        txbdp = tx_queue->tx_bd_base;
1822
1823        for (i = 0; i < tx_queue->tx_ring_size; i++) {
1824                if (!tx_queue->tx_skbuff[i])
1825                        continue;
1826
1827                dma_unmap_single(priv->dev, txbdp->bufPtr,
1828                                 txbdp->length, DMA_TO_DEVICE);
1829                txbdp->lstatus = 0;
1830                for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
1831                     j++) {
1832                        txbdp++;
1833                        dma_unmap_page(priv->dev, txbdp->bufPtr,
1834                                       txbdp->length, DMA_TO_DEVICE);
1835                }
1836                txbdp++;
1837                dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
1838                tx_queue->tx_skbuff[i] = NULL;
1839        }
1840        kfree(tx_queue->tx_skbuff);
1841        tx_queue->tx_skbuff = NULL;
1842}
1843
1844static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
1845{
1846        struct rxbd8 *rxbdp;
1847        struct gfar_private *priv = netdev_priv(rx_queue->dev);
1848        int i;
1849
1850        rxbdp = rx_queue->rx_bd_base;
1851
1852        for (i = 0; i < rx_queue->rx_ring_size; i++) {
1853                if (rx_queue->rx_skbuff[i]) {
1854                        dma_unmap_single(priv->dev, rxbdp->bufPtr,
1855                                         priv->rx_buffer_size,
1856                                         DMA_FROM_DEVICE);
1857                        dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
1858                        rx_queue->rx_skbuff[i] = NULL;
1859                }
1860                rxbdp->lstatus = 0;
1861                rxbdp->bufPtr = 0;
1862                rxbdp++;
1863        }
1864        kfree(rx_queue->rx_skbuff);
1865        rx_queue->rx_skbuff = NULL;
1866}
1867
1868/* If there are any tx skbs or rx skbs still around, free them.
1869 * Then free tx_skbuff and rx_skbuff
1870 */
1871static void free_skb_resources(struct gfar_private *priv)
1872{
1873        struct gfar_priv_tx_q *tx_queue = NULL;
1874        struct gfar_priv_rx_q *rx_queue = NULL;
1875        int i;
1876
1877        /* Go through all the buffer descriptors and free their data buffers */
1878        for (i = 0; i < priv->num_tx_queues; i++) {
1879                struct netdev_queue *txq;
1880
1881                tx_queue = priv->tx_queue[i];
1882                txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex);
1883                if (tx_queue->tx_skbuff)
1884                        free_skb_tx_queue(tx_queue);
1885                netdev_tx_reset_queue(txq);
1886        }
1887
1888        for (i = 0; i < priv->num_rx_queues; i++) {
1889                rx_queue = priv->rx_queue[i];
1890                if (rx_queue->rx_skbuff)
1891                        free_skb_rx_queue(rx_queue);
1892        }
1893
1894        dma_free_coherent(priv->dev,
1895                          sizeof(struct txbd8) * priv->total_tx_ring_size +
1896                          sizeof(struct rxbd8) * priv->total_rx_ring_size,
1897                          priv->tx_queue[0]->tx_bd_base,
1898                          priv->tx_queue[0]->tx_bd_dma_base);
1899}
1900
1901void gfar_start(struct gfar_private *priv)
1902{
1903        struct gfar __iomem *regs = priv->gfargrp[0].regs;
1904        u32 tempval;
1905        int i = 0;
1906
1907        /* Enable Rx/Tx hw queues */
1908        gfar_write(&regs->rqueue, priv->rqueue);
1909        gfar_write(&regs->tqueue, priv->tqueue);
1910
1911        /* Initialize DMACTRL to have WWR and WOP */
1912        tempval = gfar_read(&regs->dmactrl);
1913        tempval |= DMACTRL_INIT_SETTINGS;
1914        gfar_write(&regs->dmactrl, tempval);
1915
1916        /* Make sure we aren't stopped */
1917        tempval = gfar_read(&regs->dmactrl);
1918        tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
1919        gfar_write(&regs->dmactrl, tempval);
1920
1921        for (i = 0; i < priv->num_grps; i++) {
1922                regs = priv->gfargrp[i].regs;
1923                /* Clear THLT/RHLT, so that the DMA starts polling now */
1924                gfar_write(&regs->tstat, priv->gfargrp[i].tstat);
1925                gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
1926        }
1927
1928        /* Enable Rx/Tx DMA */
1929        tempval = gfar_read(&regs->maccfg1);
1930        tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
1931        gfar_write(&regs->maccfg1, tempval);
1932
1933        gfar_ints_enable(priv);
1934
1935        priv->ndev->trans_start = jiffies; /* prevent tx timeout */
1936}
1937
1938static void free_grp_irqs(struct gfar_priv_grp *grp)
1939{
1940        free_irq(gfar_irq(grp, TX)->irq, grp);
1941        free_irq(gfar_irq(grp, RX)->irq, grp);
1942        free_irq(gfar_irq(grp, ER)->irq, grp);
1943}
1944
1945static int register_grp_irqs(struct gfar_priv_grp *grp)
1946{
1947        struct gfar_private *priv = grp->priv;
1948        struct net_device *dev = priv->ndev;
1949        int err;
1950
1951        /* If the device has multiple interrupts, register for
1952         * them.  Otherwise, only register for the one
1953         */
1954        if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1955                /* Install our interrupt handlers for Error,
1956                 * Transmit, and Receive
1957                 */
1958                err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0,
1959                                  gfar_irq(grp, ER)->name, grp);
1960                if (err < 0) {
1961                        netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1962                                  gfar_irq(grp, ER)->irq);
1963
1964                        goto err_irq_fail;
1965                }
1966                err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0,
1967                                  gfar_irq(grp, TX)->name, grp);
1968                if (err < 0) {
1969                        netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1970                                  gfar_irq(grp, TX)->irq);
1971                        goto tx_irq_fail;
1972                }
1973                err = request_irq(gfar_irq(grp, RX)->irq, gfar_receive, 0,
1974                                  gfar_irq(grp, RX)->name, grp);
1975                if (err < 0) {
1976                        netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1977                                  gfar_irq(grp, RX)->irq);
1978                        goto rx_irq_fail;
1979                }
1980        } else {
1981                err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0,
1982                                  gfar_irq(grp, TX)->name, grp);
1983                if (err < 0) {
1984                        netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1985                                  gfar_irq(grp, TX)->irq);
1986                        goto err_irq_fail;
1987                }
1988        }
1989
1990        return 0;
1991
1992rx_irq_fail:
1993        free_irq(gfar_irq(grp, TX)->irq, grp);
1994tx_irq_fail:
1995        free_irq(gfar_irq(grp, ER)->irq, grp);
1996err_irq_fail:
1997        return err;
1998
1999}
2000
2001static void gfar_free_irq(struct gfar_private *priv)
2002{
2003        int i;
2004
2005        /* Free the IRQs */
2006        if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
2007                for (i = 0; i < priv->num_grps; i++)
2008                        free_grp_irqs(&priv->gfargrp[i]);
2009        } else {
2010                for (i = 0; i < priv->num_grps; i++)
2011                        free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq,
2012                                 &priv->gfargrp[i]);
2013        }
2014}
2015
2016static int gfar_request_irq(struct gfar_private *priv)
2017{
2018        int err, i, j;
2019
2020        for (i = 0; i < priv->num_grps; i++) {
2021                err = register_grp_irqs(&priv->gfargrp[i]);
2022                if (err) {
2023                        for (j = 0; j < i; j++)
2024                                free_grp_irqs(&priv->gfargrp[j]);
2025                        return err;
2026                }
2027        }
2028
2029        return 0;
2030}
2031
2032/* Bring the controller up and running */
2033int startup_gfar(struct net_device *ndev)
2034{
2035        struct gfar_private *priv = netdev_priv(ndev);
2036        int err;
2037
2038        gfar_mac_reset(priv);
2039
2040        err = gfar_alloc_skb_resources(ndev);
2041        if (err)
2042                return err;
2043
2044        gfar_init_tx_rx_base(priv);
2045
2046        smp_mb__before_clear_bit();
2047        clear_bit(GFAR_DOWN, &priv->state);
2048        smp_mb__after_clear_bit();
2049
2050        /* Start Rx/Tx DMA and enable the interrupts */
2051        gfar_start(priv);
2052
2053        phy_start(priv->phydev);
2054
2055        enable_napi(priv);
2056
2057        netif_tx_wake_all_queues(ndev);
2058
2059        return 0;
2060}
2061
2062/* Called when something needs to use the ethernet device
2063 * Returns 0 for success.
2064 */
2065static int gfar_enet_open(struct net_device *dev)
2066{
2067        struct gfar_private *priv = netdev_priv(dev);
2068        int err;
2069
2070        err = init_phy(dev);
2071        if (err)
2072                return err;
2073
2074        err = gfar_request_irq(priv);
2075        if (err)
2076                return err;
2077
2078        err = startup_gfar(dev);
2079        if (err)
2080                return err;
2081
2082        device_set_wakeup_enable(&dev->dev, priv->wol_en);
2083
2084        return err;
2085}
2086
2087static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
2088{
2089        struct txfcb *fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN);
2090
2091        memset(fcb, 0, GMAC_FCB_LEN);
2092
2093        return fcb;
2094}
2095
2096static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
2097                                    int fcb_length)
2098{
2099        /* If we're here, it's a IP packet with a TCP or UDP
2100         * payload.  We set it to checksum, using a pseudo-header
2101         * we provide
2102         */
2103        u8 flags = TXFCB_DEFAULT;
2104
2105        /* Tell the controller what the protocol is
2106         * And provide the already calculated phcs
2107         */
2108        if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
2109                flags |= TXFCB_UDP;
2110                fcb->phcs = udp_hdr(skb)->check;
2111        } else
2112                fcb->phcs = tcp_hdr(skb)->check;
2113
2114        /* l3os is the distance between the start of the
2115         * frame (skb->data) and the start of the IP hdr.
2116         * l4os is the distance between the start of the
2117         * l3 hdr and the l4 hdr
2118         */
2119        fcb->l3os = (u16)(skb_network_offset(skb) - fcb_length);
2120        fcb->l4os = skb_network_header_len(skb);
2121
2122        fcb->flags = flags;
2123}
2124
2125void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
2126{
2127        fcb->flags |= TXFCB_VLN;
2128        fcb->vlctl = vlan_tx_tag_get(skb);
2129}
2130
2131static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
2132                                      struct txbd8 *base, int ring_size)
2133{
2134        struct txbd8 *new_bd = bdp + stride;
2135
2136        return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd;
2137}
2138
2139static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
2140                                      int ring_size)
2141{
2142        return skip_txbd(bdp, 1, base, ring_size);
2143}
2144
2145/* eTSEC12: csum generation not supported for some fcb offsets */
2146static inline bool gfar_csum_errata_12(struct gfar_private *priv,
2147                                       unsigned long fcb_addr)
2148{
2149        return (gfar_has_errata(priv, GFAR_ERRATA_12) &&
2150               (fcb_addr % 0x20) > 0x18);
2151}
2152
2153/* eTSEC76: csum generation for frames larger than 2500 may
2154 * cause excess delays before start of transmission
2155 */
2156static inline bool gfar_csum_errata_76(struct gfar_private *priv,
2157                                       unsigned int len)
2158{
2159        return (gfar_has_errata(priv, GFAR_ERRATA_76) &&
2160               (len > 2500));
2161}
2162
2163/* This is called by the kernel when a frame is ready for transmission.
2164 * It is pointed to by the dev->hard_start_xmit function pointer
2165 */
2166static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2167{
2168        struct gfar_private *priv = netdev_priv(dev);
2169        struct gfar_priv_tx_q *tx_queue = NULL;
2170        struct netdev_queue *txq;
2171        struct gfar __iomem *regs = NULL;
2172        struct txfcb *fcb = NULL;
2173        struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
2174        u32 lstatus;
2175        int i, rq = 0;
2176        int do_tstamp, do_csum, do_vlan;
2177        u32 bufaddr;
2178        unsigned long flags;
2179        unsigned int nr_frags, nr_txbds, bytes_sent, fcb_len = 0;
2180
2181        rq = skb->queue_mapping;
2182        tx_queue = priv->tx_queue[rq];
2183        txq = netdev_get_tx_queue(dev, rq);
2184        base = tx_queue->tx_bd_base;
2185        regs = tx_queue->grp->regs;
2186
2187        do_csum = (CHECKSUM_PARTIAL == skb->ip_summed);
2188        do_vlan = vlan_tx_tag_present(skb);
2189        do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2190                    priv->hwts_tx_en;
2191
2192        if (do_csum || do_vlan)
2193                fcb_len = GMAC_FCB_LEN;
2194
2195        /* check if time stamp should be generated */
2196        if (unlikely(do_tstamp))
2197                fcb_len = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
2198
2199        /* make space for additional header when fcb is needed */
2200        if (fcb_len && unlikely(skb_headroom(skb) < fcb_len)) {
2201                struct sk_buff *skb_new;
2202
2203                skb_new = skb_realloc_headroom(skb, fcb_len);
2204                if (!skb_new) {
2205                        dev->stats.tx_errors++;
2206                        dev_kfree_skb_any(skb);
2207                        return NETDEV_TX_OK;
2208                }
2209
2210                if (skb->sk)
2211                        skb_set_owner_w(skb_new, skb->sk);
2212                dev_consume_skb_any(skb);
2213                skb = skb_new;
2214        }
2215
2216        /* total number of fragments in the SKB */
2217        nr_frags = skb_shinfo(skb)->nr_frags;
2218
2219        /* calculate the required number of TxBDs for this skb */
2220        if (unlikely(do_tstamp))
2221                nr_txbds = nr_frags + 2;
2222        else
2223                nr_txbds = nr_frags + 1;
2224
2225        /* check if there is space to queue this packet */
2226        if (nr_txbds > tx_queue->num_txbdfree) {
2227                /* no space, stop the queue */
2228                netif_tx_stop_queue(txq);
2229                dev->stats.tx_fifo_errors++;
2230                return NETDEV_TX_BUSY;
2231        }
2232
2233        /* Update transmit stats */
2234        bytes_sent = skb->len;
2235        tx_queue->stats.tx_bytes += bytes_sent;
2236        /* keep Tx bytes on wire for BQL accounting */
2237        GFAR_CB(skb)->bytes_sent = bytes_sent;
2238        tx_queue->stats.tx_packets++;
2239
2240        txbdp = txbdp_start = tx_queue->cur_tx;
2241        lstatus = txbdp->lstatus;
2242
2243        /* Time stamp insertion requires one additional TxBD */
2244        if (unlikely(do_tstamp))
2245                txbdp_tstamp = txbdp = next_txbd(txbdp, base,
2246                                                 tx_queue->tx_ring_size);
2247
2248        if (nr_frags == 0) {
2249                if (unlikely(do_tstamp))
2250                        txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_LAST |
2251                                                          TXBD_INTERRUPT);
2252                else
2253                        lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
2254        } else {
2255                /* Place the fragment addresses and lengths into the TxBDs */
2256                for (i = 0; i < nr_frags; i++) {
2257                        unsigned int frag_len;
2258                        /* Point at the next BD, wrapping as needed */
2259                        txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2260
2261                        frag_len = skb_shinfo(skb)->frags[i].size;
2262
2263                        lstatus = txbdp->lstatus | frag_len |
2264                                  BD_LFLAG(TXBD_READY);
2265
2266                        /* Handle the last BD specially */
2267                        if (i == nr_frags - 1)
2268                                lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
2269
2270                        bufaddr = skb_frag_dma_map(priv->dev,
2271                                                   &skb_shinfo(skb)->frags[i],
2272                                                   0,
2273                                                   frag_len,
2274                                                   DMA_TO_DEVICE);
2275
2276                        /* set the TxBD length and buffer pointer */
2277                        txbdp->bufPtr = bufaddr;
2278                        txbdp->lstatus = lstatus;
2279                }
2280
2281                lstatus = txbdp_start->lstatus;
2282        }
2283
2284        /* Add TxPAL between FCB and frame if required */
2285        if (unlikely(do_tstamp)) {
2286                skb_push(skb, GMAC_TXPAL_LEN);
2287                memset(skb->data, 0, GMAC_TXPAL_LEN);
2288        }
2289
2290        /* Add TxFCB if required */
2291        if (fcb_len) {
2292                fcb = gfar_add_fcb(skb);
2293                lstatus |= BD_LFLAG(TXBD_TOE);
2294        }
2295
2296        /* Set up checksumming */
2297        if (do_csum) {
2298                gfar_tx_checksum(skb, fcb, fcb_len);
2299
2300                if (unlikely(gfar_csum_errata_12(priv, (unsigned long)fcb)) ||
2301                    unlikely(gfar_csum_errata_76(priv, skb->len))) {
2302                        __skb_pull(skb, GMAC_FCB_LEN);
2303                        skb_checksum_help(skb);
2304                        if (do_vlan || do_tstamp) {
2305                                /* put back a new fcb for vlan/tstamp TOE */
2306                                fcb = gfar_add_fcb(skb);
2307                        } else {
2308                                /* Tx TOE not used */
2309                                lstatus &= ~(BD_LFLAG(TXBD_TOE));
2310                                fcb = NULL;
2311                        }
2312                }
2313        }
2314
2315        if (do_vlan)
2316                gfar_tx_vlan(skb, fcb);
2317
2318        /* Setup tx hardware time stamping if requested */
2319        if (unlikely(do_tstamp)) {
2320                skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2321                fcb->ptp = 1;
2322        }
2323
2324        txbdp_start->bufPtr = dma_map_single(priv->dev, skb->data,
2325                                             skb_headlen(skb), DMA_TO_DEVICE);
2326
2327        /* If time stamping is requested one additional TxBD must be set up. The
2328         * first TxBD points to the FCB and must have a data length of
2329         * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
2330         * the full frame length.
2331         */
2332        if (unlikely(do_tstamp)) {
2333                txbdp_tstamp->bufPtr = txbdp_start->bufPtr + fcb_len;
2334                txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) |
2335                                         (skb_headlen(skb) - fcb_len);
2336                lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
2337        } else {
2338                lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
2339        }
2340
2341        netdev_tx_sent_queue(txq, bytes_sent);
2342
2343        /* We can work in parallel with gfar_clean_tx_ring(), except
2344         * when modifying num_txbdfree. Note that we didn't grab the lock
2345         * when we were reading the num_txbdfree and checking for available
2346         * space, that's because outside of this function it can only grow,
2347         * and once we've got needed space, it cannot suddenly disappear.
2348         *
2349         * The lock also protects us from gfar_error(), which can modify
2350         * regs->tstat and thus retrigger the transfers, which is why we
2351         * also must grab the lock before setting ready bit for the first
2352         * to be transmitted BD.
2353         */
2354        spin_lock_irqsave(&tx_queue->txlock, flags);
2355
2356        /* The powerpc-specific eieio() is used, as wmb() has too strong
2357         * semantics (it requires synchronization between cacheable and
2358         * uncacheable mappings, which eieio doesn't provide and which we
2359         * don't need), thus requiring a more expensive sync instruction.  At
2360         * some point, the set of architecture-independent barrier functions
2361         * should be expanded to include weaker barriers.
2362         */
2363        eieio();
2364
2365        txbdp_start->lstatus = lstatus;
2366
2367        eieio(); /* force lstatus write before tx_skbuff */
2368
2369        tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
2370
2371        /* Update the current skb pointer to the next entry we will use
2372         * (wrapping if necessary)
2373         */
2374        tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
2375                              TX_RING_MOD_MASK(tx_queue->tx_ring_size);
2376
2377        tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2378
2379        /* reduce TxBD free count */
2380        tx_queue->num_txbdfree -= (nr_txbds);
2381
2382        /* If the next BD still needs to be cleaned up, then the bds
2383         * are full.  We need to tell the kernel to stop sending us stuff.
2384         */
2385        if (!tx_queue->num_txbdfree) {
2386                netif_tx_stop_queue(txq);
2387
2388                dev->stats.tx_fifo_errors++;
2389        }
2390
2391        /* Tell the DMA to go go go */
2392        gfar_write(&regs->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
2393
2394        /* Unlock priv */
2395        spin_unlock_irqrestore(&tx_queue->txlock, flags);
2396
2397        return NETDEV_TX_OK;
2398}
2399
2400/* Stops the kernel queue, and halts the controller */
2401static int gfar_close(struct net_device *dev)
2402{
2403        struct gfar_private *priv = netdev_priv(dev);
2404
2405        cancel_work_sync(&priv->reset_task);
2406        stop_gfar(dev);
2407
2408        /* Disconnect from the PHY */
2409        phy_disconnect(priv->phydev);
2410        priv->phydev = NULL;
2411
2412        gfar_free_irq(priv);
2413
2414        return 0;
2415}
2416
2417/* Changes the mac address if the controller is not running. */
2418static int gfar_set_mac_address(struct net_device *dev)
2419{
2420        gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
2421
2422        return 0;
2423}
2424
2425static int gfar_change_mtu(struct net_device *dev, int new_mtu)
2426{
2427        struct gfar_private *priv = netdev_priv(dev);
2428        int frame_size = new_mtu + ETH_HLEN;
2429
2430        if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
2431                netif_err(priv, drv, dev, "Invalid MTU setting\n");
2432                return -EINVAL;
2433        }
2434
2435        while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
2436                cpu_relax();
2437
2438        if (dev->flags & IFF_UP)
2439                stop_gfar(dev);
2440
2441        dev->mtu = new_mtu;
2442
2443        if (dev->flags & IFF_UP)
2444                startup_gfar(dev);
2445
2446        clear_bit_unlock(GFAR_RESETTING, &priv->state);
2447
2448        return 0;
2449}
2450
2451void reset_gfar(struct net_device *ndev)
2452{
2453        struct gfar_private *priv = netdev_priv(ndev);
2454
2455        while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
2456                cpu_relax();
2457
2458        stop_gfar(ndev);
2459        startup_gfar(ndev);
2460
2461        clear_bit_unlock(GFAR_RESETTING, &priv->state);
2462}
2463
2464/* gfar_reset_task gets scheduled when a packet has not been
2465 * transmitted after a set amount of time.
2466 * For now, assume that clearing out all the structures, and
2467 * starting over will fix the problem.
2468 */
2469static void gfar_reset_task(struct work_struct *work)
2470{
2471        struct gfar_private *priv = container_of(work, struct gfar_private,
2472                                                 reset_task);
2473        reset_gfar(priv->ndev);
2474}
2475
2476static void gfar_timeout(struct net_device *dev)
2477{
2478        struct gfar_private *priv = netdev_priv(dev);
2479
2480        dev->stats.tx_errors++;
2481        schedule_work(&priv->reset_task);
2482}
2483
2484static void gfar_align_skb(struct sk_buff *skb)
2485{
2486        /* We need the data buffer to be aligned properly.  We will reserve
2487         * as many bytes as needed to align the data properly
2488         */
2489        skb_reserve(skb, RXBUF_ALIGNMENT -
2490                    (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1)));
2491}
2492
2493/* Interrupt Handler for Transmit complete */
2494static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2495{
2496        struct net_device *dev = tx_queue->dev;
2497        struct netdev_queue *txq;
2498        struct gfar_private *priv = netdev_priv(dev);
2499        struct txbd8 *bdp, *next = NULL;
2500        struct txbd8 *lbdp = NULL;
2501        struct txbd8 *base = tx_queue->tx_bd_base;
2502        struct sk_buff *skb;
2503        int skb_dirtytx;
2504        int tx_ring_size = tx_queue->tx_ring_size;
2505        int frags = 0, nr_txbds = 0;
2506        int i;
2507        int howmany = 0;
2508        int tqi = tx_queue->qindex;
2509        unsigned int bytes_sent = 0;
2510        u32 lstatus;
2511        size_t buflen;
2512
2513        txq = netdev_get_tx_queue(dev, tqi);
2514        bdp = tx_queue->dirty_tx;
2515        skb_dirtytx = tx_queue->skb_dirtytx;
2516
2517        while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
2518                unsigned long flags;
2519
2520                frags = skb_shinfo(skb)->nr_frags;
2521
2522                /* When time stamping, one additional TxBD must be freed.
2523                 * Also, we need to dma_unmap_single() the TxPAL.
2524                 */
2525                if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
2526                        nr_txbds = frags + 2;
2527                else
2528                        nr_txbds = frags + 1;
2529
2530                lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size);
2531
2532                lstatus = lbdp->lstatus;
2533
2534                /* Only clean completed frames */
2535                if ((lstatus & BD_LFLAG(TXBD_READY)) &&
2536                    (lstatus & BD_LENGTH_MASK))
2537                        break;
2538
2539                if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
2540                        next = next_txbd(bdp, base, tx_ring_size);
2541                        buflen = next->length + GMAC_FCB_LEN + GMAC_TXPAL_LEN;
2542                } else
2543                        buflen = bdp->length;
2544
2545                dma_unmap_single(priv->dev, bdp->bufPtr,
2546                                 buflen, DMA_TO_DEVICE);
2547
2548                if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
2549                        struct skb_shared_hwtstamps shhwtstamps;
2550                        u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7);
2551
2552                        memset(&shhwtstamps, 0, sizeof(shhwtstamps));
2553                        shhwtstamps.hwtstamp = ns_to_ktime(*ns);
2554                        skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN);
2555                        skb_tstamp_tx(skb, &shhwtstamps);
2556                        bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2557                        bdp = next;
2558                }
2559
2560                bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2561                bdp = next_txbd(bdp, base, tx_ring_size);
2562
2563                for (i = 0; i < frags; i++) {
2564                        dma_unmap_page(priv->dev, bdp->bufPtr,
2565                                       bdp->length, DMA_TO_DEVICE);
2566                        bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2567                        bdp = next_txbd(bdp, base, tx_ring_size);
2568                }
2569
2570                bytes_sent += GFAR_CB(skb)->bytes_sent;
2571
2572                dev_kfree_skb_any(skb);
2573
2574                tx_queue->tx_skbuff[skb_dirtytx] = NULL;
2575
2576                skb_dirtytx = (skb_dirtytx + 1) &
2577                              TX_RING_MOD_MASK(tx_ring_size);
2578
2579                howmany++;
2580                spin_lock_irqsave(&tx_queue->txlock, flags);
2581                tx_queue->num_txbdfree += nr_txbds;
2582                spin_unlock_irqrestore(&tx_queue->txlock, flags);
2583        }
2584
2585        /* If we freed a buffer, we can restart transmission, if necessary */
2586        if (tx_queue->num_txbdfree &&
2587            netif_tx_queue_stopped(txq) &&
2588            !(test_bit(GFAR_DOWN, &priv->state)))
2589                netif_wake_subqueue(priv->ndev, tqi);
2590
2591        /* Update dirty indicators */
2592        tx_queue->skb_dirtytx = skb_dirtytx;
2593        tx_queue->dirty_tx = bdp;
2594
2595        netdev_tx_completed_queue(txq, howmany, bytes_sent);
2596}
2597
2598static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
2599                           struct sk_buff *skb)
2600{
2601        struct net_device *dev = rx_queue->dev;
2602        struct gfar_private *priv = netdev_priv(dev);
2603        dma_addr_t buf;
2604
2605        buf = dma_map_single(priv->dev, skb->data,
2606                             priv->rx_buffer_size, DMA_FROM_DEVICE);
2607        gfar_init_rxbdp(rx_queue, bdp, buf);
2608}
2609
2610static struct sk_buff *gfar_alloc_skb(struct net_device *dev)
2611{
2612        struct gfar_private *priv = netdev_priv(dev);
2613        struct sk_buff *skb;
2614
2615        skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT);
2616        if (!skb)
2617                return NULL;
2618
2619        gfar_align_skb(skb);
2620
2621        return skb;
2622}
2623
2624struct sk_buff *gfar_new_skb(struct net_device *dev)
2625{
2626        return gfar_alloc_skb(dev);
2627}
2628
2629static inline void count_errors(unsigned short status, struct net_device *dev)
2630{
2631        struct gfar_private *priv = netdev_priv(dev);
2632        struct net_device_stats *stats = &dev->stats;
2633        struct gfar_extra_stats *estats = &priv->extra_stats;
2634
2635        /* If the packet was truncated, none of the other errors matter */
2636        if (status & RXBD_TRUNCATED) {
2637                stats->rx_length_errors++;
2638
2639                atomic64_inc(&estats->rx_trunc);
2640
2641                return;
2642        }
2643        /* Count the errors, if there were any */
2644        if (status & (RXBD_LARGE | RXBD_SHORT)) {
2645                stats->rx_length_errors++;
2646
2647                if (status & RXBD_LARGE)
2648                        atomic64_inc(&estats->rx_large);
2649                else
2650                        atomic64_inc(&estats->rx_short);
2651        }
2652        if (status & RXBD_NONOCTET) {
2653                stats->rx_frame_errors++;
2654                atomic64_inc(&estats->rx_nonoctet);
2655        }
2656        if (status & RXBD_CRCERR) {
2657                atomic64_inc(&estats->rx_crcerr);
2658                stats->rx_crc_errors++;
2659        }
2660        if (status & RXBD_OVERRUN) {
2661                atomic64_inc(&estats->rx_overrun);
2662                stats->rx_crc_errors++;
2663        }
2664}
2665
2666irqreturn_t gfar_receive(int irq, void *grp_id)
2667{
2668        struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
2669        unsigned long flags;
2670        u32 imask;
2671
2672        if (likely(napi_schedule_prep(&grp->napi_rx))) {
2673                spin_lock_irqsave(&grp->grplock, flags);
2674                imask = gfar_read(&grp->regs->imask);
2675                imask &= IMASK_RX_DISABLED;
2676                gfar_write(&grp->regs->imask, imask);
2677                spin_unlock_irqrestore(&grp->grplock, flags);
2678                __napi_schedule(&grp->napi_rx);
2679        } else {
2680                /* Clear IEVENT, so interrupts aren't called again
2681                 * because of the packets that have already arrived.
2682                 */
2683                gfar_write(&grp->regs->ievent, IEVENT_RX_MASK);
2684        }
2685
2686        return IRQ_HANDLED;
2687}
2688
2689/* Interrupt Handler for Transmit complete */
2690static irqreturn_t gfar_transmit(int irq, void *grp_id)
2691{
2692        struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
2693        unsigned long flags;
2694        u32 imask;
2695
2696        if (likely(napi_schedule_prep(&grp->napi_tx))) {
2697                spin_lock_irqsave(&grp->grplock, flags);
2698                imask = gfar_read(&grp->regs->imask);
2699                imask &= IMASK_TX_DISABLED;
2700                gfar_write(&grp->regs->imask, imask);
2701                spin_unlock_irqrestore(&grp->grplock, flags);
2702                __napi_schedule(&grp->napi_tx);
2703        } else {
2704                /* Clear IEVENT, so interrupts aren't called again
2705                 * because of the packets that have already arrived.
2706                 */
2707                gfar_write(&grp->regs->ievent, IEVENT_TX_MASK);
2708        }
2709
2710        return IRQ_HANDLED;
2711}
2712
2713static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
2714{
2715        /* If valid headers were found, and valid sums
2716         * were verified, then we tell the kernel that no
2717         * checksumming is necessary.  Otherwise, it is [FIXME]
2718         */
2719        if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU))
2720                skb->ip_summed = CHECKSUM_UNNECESSARY;
2721        else
2722                skb_checksum_none_assert(skb);
2723}
2724
2725
2726/* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
2727static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
2728                               int amount_pull, struct napi_struct *napi)
2729{
2730        struct gfar_private *priv = netdev_priv(dev);
2731        struct rxfcb *fcb = NULL;
2732
2733        /* fcb is at the beginning if exists */
2734        fcb = (struct rxfcb *)skb->data;
2735
2736        /* Remove the FCB from the skb
2737         * Remove the padded bytes, if there are any
2738         */
2739        if (amount_pull) {
2740                skb_record_rx_queue(skb, fcb->rq);
2741                skb_pull(skb, amount_pull);
2742        }
2743
2744        /* Get receive timestamp from the skb */
2745        if (priv->hwts_rx_en) {
2746                struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
2747                u64 *ns = (u64 *) skb->data;
2748
2749                memset(shhwtstamps, 0, sizeof(*shhwtstamps));
2750                shhwtstamps->hwtstamp = ns_to_ktime(*ns);
2751        }
2752
2753        if (priv->padding)
2754                skb_pull(skb, priv->padding);
2755
2756        if (dev->features & NETIF_F_RXCSUM)
2757                gfar_rx_checksum(skb, fcb);
2758
2759        /* Tell the skb what kind of packet this is */
2760        skb->protocol = eth_type_trans(skb, dev);
2761
2762        /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here.
2763         * Even if vlan rx accel is disabled, on some chips
2764         * RXFCB_VLN is pseudo randomly set.
2765         */
2766        if (dev->features & NETIF_F_HW_VLAN_CTAG_RX &&
2767            fcb->flags & RXFCB_VLN)
2768                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), fcb->vlctl);
2769
2770        /* Send the packet up the stack */
2771        napi_gro_receive(napi, skb);
2772
2773}
2774
2775/* gfar_clean_rx_ring() -- Processes each frame in the rx ring
2776 * until the budget/quota has been reached. Returns the number
2777 * of frames handled
2778 */
2779int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
2780{
2781        struct net_device *dev = rx_queue->dev;
2782        struct rxbd8 *bdp, *base;
2783        struct sk_buff *skb;
2784        int pkt_len;
2785        int amount_pull;
2786        int howmany = 0;
2787        struct gfar_private *priv = netdev_priv(dev);
2788
2789        /* Get the first full descriptor */
2790        bdp = rx_queue->cur_rx;
2791        base = rx_queue->rx_bd_base;
2792
2793        amount_pull = priv->uses_rxfcb ? GMAC_FCB_LEN : 0;
2794
2795        while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
2796                struct sk_buff *newskb;
2797
2798                rmb();
2799
2800                /* Add another skb for the future */
2801                newskb = gfar_new_skb(dev);
2802
2803                skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
2804
2805                dma_unmap_single(priv->dev, bdp->bufPtr,
2806                                 priv->rx_buffer_size, DMA_FROM_DEVICE);
2807
2808                if (unlikely(!(bdp->status & RXBD_ERR) &&
2809                             bdp->length > priv->rx_buffer_size))
2810                        bdp->status = RXBD_LARGE;
2811
2812                /* We drop the frame if we failed to allocate a new buffer */
2813                if (unlikely(!newskb || !(bdp->status & RXBD_LAST) ||
2814                             bdp->status & RXBD_ERR)) {
2815                        count_errors(bdp->status, dev);
2816
2817                        if (unlikely(!newskb))
2818                                newskb = skb;
2819                        else if (skb)
2820                                dev_kfree_skb(skb);
2821                } else {
2822                        /* Increment the number of packets */
2823                        rx_queue->stats.rx_packets++;
2824                        howmany++;
2825
2826                        if (likely(skb)) {
2827                                pkt_len = bdp->length - ETH_FCS_LEN;
2828                                /* Remove the FCS from the packet length */
2829                                skb_put(skb, pkt_len);
2830                                rx_queue->stats.rx_bytes += pkt_len;
2831                                skb_record_rx_queue(skb, rx_queue->qindex);
2832                                gfar_process_frame(dev, skb, amount_pull,
2833                                                   &rx_queue->grp->napi_rx);
2834
2835                        } else {
2836                                netif_warn(priv, rx_err, dev, "Missing skb!\n");
2837                                rx_queue->stats.rx_dropped++;
2838                                atomic64_inc(&priv->extra_stats.rx_skbmissing);
2839                        }
2840
2841                }
2842
2843                rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb;
2844
2845                /* Setup the new bdp */
2846                gfar_new_rxbdp(rx_queue, bdp, newskb);
2847
2848                /* Update to the next pointer */
2849                bdp = next_bd(bdp, base, rx_queue->rx_ring_size);
2850
2851                /* update to point at the next skb */
2852                rx_queue->skb_currx = (rx_queue->skb_currx + 1) &
2853                                      RX_RING_MOD_MASK(rx_queue->rx_ring_size);
2854        }
2855
2856        /* Update the current rxbd pointer to be the next one */
2857        rx_queue->cur_rx = bdp;
2858
2859        return howmany;
2860}
2861
2862static int gfar_poll_rx_sq(struct napi_struct *napi, int budget)
2863{
2864        struct gfar_priv_grp *gfargrp =
2865                container_of(napi, struct gfar_priv_grp, napi_rx);
2866        struct gfar __iomem *regs = gfargrp->regs;
2867        struct gfar_priv_rx_q *rx_queue = gfargrp->rx_queue;
2868        int work_done = 0;
2869
2870        /* Clear IEVENT, so interrupts aren't called again
2871         * because of the packets that have already arrived
2872         */
2873        gfar_write(&regs->ievent, IEVENT_RX_MASK);
2874
2875        work_done = gfar_clean_rx_ring(rx_queue, budget);
2876
2877        if (work_done < budget) {
2878                u32 imask;
2879                napi_complete(napi);
2880                /* Clear the halt bit in RSTAT */
2881                gfar_write(&regs->rstat, gfargrp->rstat);
2882
2883                spin_lock_irq(&gfargrp->grplock);
2884                imask = gfar_read(&regs->imask);
2885                imask |= IMASK_RX_DEFAULT;
2886                gfar_write(&regs->imask, imask);
2887                spin_unlock_irq(&gfargrp->grplock);
2888        }
2889
2890        return work_done;
2891}
2892
2893static int gfar_poll_tx_sq(struct napi_struct *napi, int budget)
2894{
2895        struct gfar_priv_grp *gfargrp =
2896                container_of(napi, struct gfar_priv_grp, napi_tx);
2897        struct gfar __iomem *regs = gfargrp->regs;
2898        struct gfar_priv_tx_q *tx_queue = gfargrp->tx_queue;
2899        u32 imask;
2900
2901        /* Clear IEVENT, so interrupts aren't called again
2902         * because of the packets that have already arrived
2903         */
2904        gfar_write(&regs->ievent, IEVENT_TX_MASK);
2905
2906        /* run Tx cleanup to completion */
2907        if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx])
2908                gfar_clean_tx_ring(tx_queue);
2909
2910        napi_complete(napi);
2911
2912        spin_lock_irq(&gfargrp->grplock);
2913        imask = gfar_read(&regs->imask);
2914        imask |= IMASK_TX_DEFAULT;
2915        gfar_write(&regs->imask, imask);
2916        spin_unlock_irq(&gfargrp->grplock);
2917
2918        return 0;
2919}
2920
2921static int gfar_poll_rx(struct napi_struct *napi, int budget)
2922{
2923        struct gfar_priv_grp *gfargrp =
2924                container_of(napi, struct gfar_priv_grp, napi_rx);
2925        struct gfar_private *priv = gfargrp->priv;
2926        struct gfar __iomem *regs = gfargrp->regs;
2927        struct gfar_priv_rx_q *rx_queue = NULL;
2928        int work_done = 0, work_done_per_q = 0;
2929        int i, budget_per_q = 0;
2930        unsigned long rstat_rxf;
2931        int num_act_queues;
2932
2933        /* Clear IEVENT, so interrupts aren't called again
2934         * because of the packets that have already arrived
2935         */
2936        gfar_write(&regs->ievent, IEVENT_RX_MASK);
2937
2938        rstat_rxf = gfar_read(&regs->rstat) & RSTAT_RXF_MASK;
2939
2940        num_act_queues = bitmap_weight(&rstat_rxf, MAX_RX_QS);
2941        if (num_act_queues)
2942                budget_per_q = budget/num_act_queues;
2943
2944        for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
2945                /* skip queue if not active */
2946                if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i)))
2947                        continue;
2948
2949                rx_queue = priv->rx_queue[i];
2950                work_done_per_q =
2951                        gfar_clean_rx_ring(rx_queue, budget_per_q);
2952                work_done += work_done_per_q;
2953
2954                /* finished processing this queue */
2955                if (work_done_per_q < budget_per_q) {
2956                        /* clear active queue hw indication */
2957                        gfar_write(&regs->rstat,
2958                                   RSTAT_CLEAR_RXF0 >> i);
2959                        num_act_queues--;
2960
2961                        if (!num_act_queues)
2962                                break;
2963                }
2964        }
2965
2966        if (!num_act_queues) {
2967                u32 imask;
2968                napi_complete(napi);
2969
2970                /* Clear the halt bit in RSTAT */
2971                gfar_write(&regs->rstat, gfargrp->rstat);
2972
2973                spin_lock_irq(&gfargrp->grplock);
2974                imask = gfar_read(&regs->imask);
2975                imask |= IMASK_RX_DEFAULT;
2976                gfar_write(&regs->imask, imask);
2977                spin_unlock_irq(&gfargrp->grplock);
2978        }
2979
2980        return work_done;
2981}
2982
2983static int gfar_poll_tx(struct napi_struct *napi, int budget)
2984{
2985        struct gfar_priv_grp *gfargrp =
2986                container_of(napi, struct gfar_priv_grp, napi_tx);
2987        struct gfar_private *priv = gfargrp->priv;
2988        struct gfar __iomem *regs = gfargrp->regs;
2989        struct gfar_priv_tx_q *tx_queue = NULL;
2990        int has_tx_work = 0;
2991        int i;
2992
2993        /* Clear IEVENT, so interrupts aren't called again
2994         * because of the packets that have already arrived
2995         */
2996        gfar_write(&regs->ievent, IEVENT_TX_MASK);
2997
2998        for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) {
2999                tx_queue = priv->tx_queue[i];
3000                /* run Tx cleanup to completion */
3001                if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) {
3002                        gfar_clean_tx_ring(tx_queue);
3003                        has_tx_work = 1;
3004                }
3005        }
3006
3007        if (!has_tx_work) {
3008                u32 imask;
3009                napi_complete(napi);
3010
3011                spin_lock_irq(&gfargrp->grplock);
3012                imask = gfar_read(&regs->imask);
3013                imask |= IMASK_TX_DEFAULT;
3014                gfar_write(&regs->imask, imask);
3015                spin_unlock_irq(&gfargrp->grplock);
3016        }
3017
3018        return 0;
3019}
3020
3021
3022#ifdef CONFIG_NET_POLL_CONTROLLER
3023/* Polling 'interrupt' - used by things like netconsole to send skbs
3024 * without having to re-enable interrupts. It's not called while
3025 * the interrupt routine is executing.
3026 */
3027static void gfar_netpoll(struct net_device *dev)
3028{
3029        struct gfar_private *priv = netdev_priv(dev);
3030        int i;
3031
3032        /* If the device has multiple interrupts, run tx/rx */
3033        if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
3034                for (i = 0; i < priv->num_grps; i++) {
3035                        struct gfar_priv_grp *grp = &priv->gfargrp[i];
3036
3037                        disable_irq(gfar_irq(grp, TX)->irq);
3038                        disable_irq(gfar_irq(grp, RX)->irq);
3039                        disable_irq(gfar_irq(grp, ER)->irq);
3040                        gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
3041                        enable_irq(gfar_irq(grp, ER)->irq);
3042                        enable_irq(gfar_irq(grp, RX)->irq);
3043                        enable_irq(gfar_irq(grp, TX)->irq);
3044                }
3045        } else {
3046                for (i = 0; i < priv->num_grps; i++) {
3047                        struct gfar_priv_grp *grp = &priv->gfargrp[i];
3048
3049                        disable_irq(gfar_irq(grp, TX)->irq);
3050                        gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
3051                        enable_irq(gfar_irq(grp, TX)->irq);
3052                }
3053        }
3054}
3055#endif
3056
3057/* The interrupt handler for devices with one interrupt */
3058static irqreturn_t gfar_interrupt(int irq, void *grp_id)
3059{
3060        struct gfar_priv_grp *gfargrp = grp_id;
3061
3062        /* Save ievent for future reference */
3063        u32 events = gfar_read(&gfargrp->regs->ievent);
3064
3065        /* Check for reception */
3066        if (events & IEVENT_RX_MASK)
3067                gfar_receive(irq, grp_id);
3068
3069        /* Check for transmit completion */
3070        if (events & IEVENT_TX_MASK)
3071                gfar_transmit(irq, grp_id);
3072
3073        /* Check for errors */
3074        if (events & IEVENT_ERR_MASK)
3075                gfar_error(irq, grp_id);
3076
3077        return IRQ_HANDLED;
3078}
3079
3080/* Called every time the controller might need to be made
3081 * aware of new link state.  The PHY code conveys this
3082 * information through variables in the phydev structure, and this
3083 * function converts those variables into the appropriate
3084 * register values, and can bring down the device if needed.
3085 */
3086static void adjust_link(struct net_device *dev)
3087{
3088        struct gfar_private *priv = netdev_priv(dev);
3089        struct phy_device *phydev = priv->phydev;
3090
3091        if (unlikely(phydev->link != priv->oldlink ||
3092                     phydev->duplex != priv->oldduplex ||
3093                     phydev->speed != priv->oldspeed))
3094                gfar_update_link_state(priv);
3095}
3096
3097/* Update the hash table based on the current list of multicast
3098 * addresses we subscribe to.  Also, change the promiscuity of
3099 * the device based on the flags (this function is called
3100 * whenever dev->flags is changed
3101 */
3102static void gfar_set_multi(struct net_device *dev)
3103{
3104        struct netdev_hw_addr *ha;
3105        struct gfar_private *priv = netdev_priv(dev);
3106        struct gfar __iomem *regs = priv->gfargrp[0].regs;
3107        u32 tempval;
3108
3109        if (dev->flags & IFF_PROMISC) {
3110                /* Set RCTRL to PROM */
3111                tempval = gfar_read(&regs->rctrl);
3112                tempval |= RCTRL_PROM;
3113                gfar_write(&regs->rctrl, tempval);
3114        } else {
3115                /* Set RCTRL to not PROM */
3116                tempval = gfar_read(&regs->rctrl);
3117                tempval &= ~(RCTRL_PROM);
3118                gfar_write(&regs->rctrl, tempval);
3119        }
3120
3121        if (dev->flags & IFF_ALLMULTI) {
3122                /* Set the hash to rx all multicast frames */
3123                gfar_write(&regs->igaddr0, 0xffffffff);
3124                gfar_write(&regs->igaddr1, 0xffffffff);
3125                gfar_write(&regs->igaddr2, 0xffffffff);
3126                gfar_write(&regs->igaddr3, 0xffffffff);
3127                gfar_write(&regs->igaddr4, 0xffffffff);
3128                gfar_write(&regs->igaddr5, 0xffffffff);
3129                gfar_write(&regs->igaddr6, 0xffffffff);
3130                gfar_write(&regs->igaddr7, 0xffffffff);
3131                gfar_write(&regs->gaddr0, 0xffffffff);
3132                gfar_write(&regs->gaddr1, 0xffffffff);
3133                gfar_write(&regs->gaddr2, 0xffffffff);
3134                gfar_write(&regs->gaddr3, 0xffffffff);
3135                gfar_write(&regs->gaddr4, 0xffffffff);
3136                gfar_write(&regs->gaddr5, 0xffffffff);
3137                gfar_write(&regs->gaddr6, 0xffffffff);
3138                gfar_write(&regs->gaddr7, 0xffffffff);
3139        } else {
3140                int em_num;
3141                int idx;
3142
3143                /* zero out the hash */
3144                gfar_write(&regs->igaddr0, 0x0);
3145                gfar_write(&regs->igaddr1, 0x0);
3146                gfar_write(&regs->igaddr2, 0x0);
3147                gfar_write(&regs->igaddr3, 0x0);
3148                gfar_write(&regs->igaddr4, 0x0);
3149                gfar_write(&regs->igaddr5, 0x0);
3150                gfar_write(&regs->igaddr6, 0x0);
3151                gfar_write(&regs->igaddr7, 0x0);
3152                gfar_write(&regs->gaddr0, 0x0);
3153                gfar_write(&regs->gaddr1, 0x0);
3154                gfar_write(&regs->gaddr2, 0x0);
3155                gfar_write(&regs->gaddr3, 0x0);
3156                gfar_write(&regs->gaddr4, 0x0);
3157                gfar_write(&regs->gaddr5, 0x0);
3158                gfar_write(&regs->gaddr6, 0x0);
3159                gfar_write(&regs->gaddr7, 0x0);
3160
3161                /* If we have extended hash tables, we need to
3162                 * clear the exact match registers to prepare for
3163                 * setting them
3164                 */
3165                if (priv->extended_hash) {
3166                        em_num = GFAR_EM_NUM + 1;
3167                        gfar_clear_exact_match(dev);
3168                        idx = 1;
3169                } else {
3170                        idx = 0;
3171                        em_num = 0;
3172                }
3173
3174                if (netdev_mc_empty(dev))
3175                        return;
3176
3177                /* Parse the list, and set the appropriate bits */
3178                netdev_for_each_mc_addr(ha, dev) {
3179                        if (idx < em_num) {
3180                                gfar_set_mac_for_addr(dev, idx, ha->addr);
3181                                idx++;
3182                        } else
3183                                gfar_set_hash_for_addr(dev, ha->addr);
3184                }
3185        }
3186}
3187
3188
3189/* Clears each of the exact match registers to zero, so they
3190 * don't interfere with normal reception
3191 */
3192static void gfar_clear_exact_match(struct net_device *dev)
3193{
3194        int idx;
3195        static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
3196
3197        for (idx = 1; idx < GFAR_EM_NUM + 1; idx++)
3198                gfar_set_mac_for_addr(dev, idx, zero_arr);
3199}
3200
3201/* Set the appropriate hash bit for the given addr */
3202/* The algorithm works like so:
3203 * 1) Take the Destination Address (ie the multicast address), and
3204 * do a CRC on it (little endian), and reverse the bits of the
3205 * result.
3206 * 2) Use the 8 most significant bits as a hash into a 256-entry
3207 * table.  The table is controlled through 8 32-bit registers:
3208 * gaddr0-7.  gaddr0's MSB is entry 0, and gaddr7's LSB is
3209 * gaddr7.  This means that the 3 most significant bits in the
3210 * hash index which gaddr register to use, and the 5 other bits
3211 * indicate which bit (assuming an IBM numbering scheme, which
3212 * for PowerPC (tm) is usually the case) in the register holds
3213 * the entry.
3214 */
3215static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
3216{
3217        u32 tempval;
3218        struct gfar_private *priv = netdev_priv(dev);
3219        u32 result = ether_crc(ETH_ALEN, addr);
3220        int width = priv->hash_width;
3221        u8 whichbit = (result >> (32 - width)) & 0x1f;
3222        u8 whichreg = result >> (32 - width + 5);
3223        u32 value = (1 << (31-whichbit));
3224
3225        tempval = gfar_read(priv->hash_regs[whichreg]);
3226        tempval |= value;
3227        gfar_write(priv->hash_regs[whichreg], tempval);
3228}
3229
3230
3231/* There are multiple MAC Address register pairs on some controllers
3232 * This function sets the numth pair to a given address
3233 */
3234static void gfar_set_mac_for_addr(struct net_device *dev, int num,
3235                                  const u8 *addr)
3236{
3237        struct gfar_private *priv = netdev_priv(dev);
3238        struct gfar __iomem *regs = priv->gfargrp[0].regs;
3239        int idx;
3240        char tmpbuf[ETH_ALEN];
3241        u32 tempval;
3242        u32 __iomem *macptr = &regs->macstnaddr1;
3243
3244        macptr += num*2;
3245
3246        /* Now copy it into the mac registers backwards, cuz
3247         * little endian is silly
3248         */
3249        for (idx = 0; idx < ETH_ALEN; idx++)
3250                tmpbuf[ETH_ALEN - 1 - idx] = addr[idx];
3251
3252        gfar_write(macptr, *((u32 *) (tmpbuf)));
3253
3254        tempval = *((u32 *) (tmpbuf + 4));
3255
3256        gfar_write(macptr+1, tempval);
3257}
3258
3259/* GFAR error interrupt handler */
3260static irqreturn_t gfar_error(int irq, void *grp_id)
3261{
3262        struct gfar_priv_grp *gfargrp = grp_id;
3263        struct gfar __iomem *regs = gfargrp->regs;
3264        struct gfar_private *priv= gfargrp->priv;
3265        struct net_device *dev = priv->ndev;
3266
3267        /* Save ievent for future reference */
3268        u32 events = gfar_read(&regs->ievent);
3269
3270        /* Clear IEVENT */
3271        gfar_write(&regs->ievent, events & IEVENT_ERR_MASK);
3272
3273        /* Magic Packet is not an error. */
3274        if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
3275            (events & IEVENT_MAG))
3276                events &= ~IEVENT_MAG;
3277
3278        /* Hmm... */
3279        if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
3280                netdev_dbg(dev,
3281                           "error interrupt (ievent=0x%08x imask=0x%08x)\n",
3282                           events, gfar_read(&regs->imask));
3283
3284        /* Update the error counters */
3285        if (events & IEVENT_TXE) {
3286                dev->stats.tx_errors++;
3287
3288                if (events & IEVENT_LC)
3289                        dev->stats.tx_window_errors++;
3290                if (events & IEVENT_CRL)
3291                        dev->stats.tx_aborted_errors++;
3292                if (events & IEVENT_XFUN) {
3293                        unsigned long flags;
3294
3295                        netif_dbg(priv, tx_err, dev,
3296                                  "TX FIFO underrun, packet dropped\n");
3297                        dev->stats.tx_dropped++;
3298                        atomic64_inc(&priv->extra_stats.tx_underrun);
3299
3300                        local_irq_save(flags);
3301                        lock_tx_qs(priv);
3302
3303                        /* Reactivate the Tx Queues */
3304                        gfar_write(&regs->tstat, gfargrp->tstat);
3305
3306                        unlock_tx_qs(priv);
3307                        local_irq_restore(flags);
3308                }
3309                netif_dbg(priv, tx_err, dev, "Transmit Error\n");
3310        }
3311        if (events & IEVENT_BSY) {
3312                dev->stats.rx_errors++;
3313                atomic64_inc(&priv->extra_stats.rx_bsy);
3314
3315                gfar_receive(irq, grp_id);
3316
3317                netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n",
3318                          gfar_read(&regs->rstat));
3319        }
3320        if (events & IEVENT_BABR) {
3321                dev->stats.rx_errors++;
3322                atomic64_inc(&priv->extra_stats.rx_babr);
3323
3324                netif_dbg(priv, rx_err, dev, "babbling RX error\n");
3325        }
3326        if (events & IEVENT_EBERR) {
3327                atomic64_inc(&priv->extra_stats.eberr);
3328                netif_dbg(priv, rx_err, dev, "bus error\n");
3329        }
3330        if (events & IEVENT_RXC)
3331                netif_dbg(priv, rx_status, dev, "control frame\n");
3332
3333        if (events & IEVENT_BABT) {
3334                atomic64_inc(&priv->extra_stats.tx_babt);
3335                netif_dbg(priv, tx_err, dev, "babbling TX error\n");
3336        }
3337        return IRQ_HANDLED;
3338}
3339
3340static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
3341{
3342        struct phy_device *phydev = priv->phydev;
3343        u32 val = 0;
3344
3345        if (!phydev->duplex)
3346                return val;
3347
3348        if (!priv->pause_aneg_en) {
3349                if (priv->tx_pause_en)
3350                        val |= MACCFG1_TX_FLOW;
3351                if (priv->rx_pause_en)
3352                        val |= MACCFG1_RX_FLOW;
3353        } else {
3354                u16 lcl_adv, rmt_adv;
3355                u8 flowctrl;
3356                /* get link partner capabilities */
3357                rmt_adv = 0;
3358                if (phydev->pause)
3359                        rmt_adv = LPA_PAUSE_CAP;
3360                if (phydev->asym_pause)
3361                        rmt_adv |= LPA_PAUSE_ASYM;
3362
3363                lcl_adv = mii_advertise_flowctrl(phydev->advertising);
3364
3365                flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
3366                if (flowctrl & FLOW_CTRL_TX)
3367                        val |= MACCFG1_TX_FLOW;
3368                if (flowctrl & FLOW_CTRL_RX)
3369                        val |= MACCFG1_RX_FLOW;
3370        }
3371
3372        return val;
3373}
3374
3375static noinline void gfar_update_link_state(struct gfar_private *priv)
3376{
3377        struct gfar __iomem *regs = priv->gfargrp[0].regs;
3378        struct phy_device *phydev = priv->phydev;
3379
3380        if (unlikely(test_bit(GFAR_RESETTING, &priv->state)))
3381                return;
3382
3383        if (phydev->link) {
3384                u32 tempval1 = gfar_read(&regs->maccfg1);
3385                u32 tempval = gfar_read(&regs->maccfg2);
3386                u32 ecntrl = gfar_read(&regs->ecntrl);
3387
3388                if (phydev->duplex != priv->oldduplex) {
3389                        if (!(phydev->duplex))
3390                                tempval &= ~(MACCFG2_FULL_DUPLEX);
3391                        else
3392                                tempval |= MACCFG2_FULL_DUPLEX;
3393
3394                        priv->oldduplex = phydev->duplex;
3395                }
3396
3397                if (phydev->speed != priv->oldspeed) {
3398                        switch (phydev->speed) {
3399                        case 1000:
3400                                tempval =
3401                                    ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
3402
3403                                ecntrl &= ~(ECNTRL_R100);
3404                                break;
3405                        case 100:
3406                        case 10:
3407                                tempval =
3408                                    ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
3409
3410                                /* Reduced mode distinguishes
3411                                 * between 10 and 100
3412                                 */
3413                                if (phydev->speed == SPEED_100)
3414                                        ecntrl |= ECNTRL_R100;
3415                                else
3416                                        ecntrl &= ~(ECNTRL_R100);
3417                                break;
3418                        default:
3419                                netif_warn(priv, link, priv->ndev,
3420                                           "Ack!  Speed (%d) is not 10/100/1000!\n",
3421                                           phydev->speed);
3422                                break;
3423                        }
3424
3425                        priv->oldspeed = phydev->speed;
3426                }
3427
3428                tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
3429                tempval1 |= gfar_get_flowctrl_cfg(priv);
3430
3431                gfar_write(&regs->maccfg1, tempval1);
3432                gfar_write(&regs->maccfg2, tempval);
3433                gfar_write(&regs->ecntrl, ecntrl);
3434
3435                if (!priv->oldlink)
3436                        priv->oldlink = 1;
3437
3438        } else if (priv->oldlink) {
3439                priv->oldlink = 0;
3440                priv->oldspeed = 0;
3441                priv->oldduplex = -1;
3442        }
3443
3444        if (netif_msg_link(priv))
3445                phy_print_status(phydev);
3446}
3447
3448static struct of_device_id gfar_match[] =
3449{
3450        {
3451                .type = "network",
3452                .compatible = "gianfar",
3453        },
3454        {
3455                .compatible = "fsl,etsec2",
3456        },
3457        {},
3458};
3459MODULE_DEVICE_TABLE(of, gfar_match);
3460
3461/* Structure for a device driver */
3462static struct platform_driver gfar_driver = {
3463        .driver = {
3464                .name = "fsl-gianfar",
3465                .owner = THIS_MODULE,
3466                .pm = GFAR_PM_OPS,
3467                .of_match_table = gfar_match,
3468        },
3469        .probe = gfar_probe,
3470        .remove = gfar_remove,
3471};
3472
3473module_platform_driver(gfar_driver);
3474