linux/drivers/net/sungem.c
<<
>>
Prefs
   1/* $Id: sungem.c,v 1.44.2.22 2002/03/13 01:18:12 davem Exp $
   2 * sungem.c: Sun GEM ethernet driver.
   3 *
   4 * Copyright (C) 2000, 2001, 2002, 2003 David S. Miller (davem@redhat.com)
   5 *
   6 * Support for Apple GMAC and assorted PHYs, WOL, Power Management
   7 * (C) 2001,2002,2003 Benjamin Herrenscmidt (benh@kernel.crashing.org)
   8 * (C) 2004,2005 Benjamin Herrenscmidt, IBM Corp.
   9 *
  10 * NAPI and NETPOLL support
  11 * (C) 2004 by Eric Lemoine (eric.lemoine@gmail.com)
  12 *
  13 * TODO:
  14 *  - Now that the driver was significantly simplified, I need to rework
  15 *    the locking. I'm sure we don't need _2_ spinlocks, and we probably
  16 *    can avoid taking most of them for so long period of time (and schedule
  17 *    instead). The main issues at this point are caused by the netdev layer
  18 *    though:
  19 *
  20 *    gem_change_mtu() and gem_set_multicast() are called with a read_lock()
  21 *    help by net/core/dev.c, thus they can't schedule. That means they can't
  22 *    call napi_disable() neither, thus force gem_poll() to keep a spinlock
  23 *    where it could have been dropped. change_mtu especially would love also to
  24 *    be able to msleep instead of horrid locked delays when resetting the HW,
  25 *    but that read_lock() makes it impossible, unless I defer it's action to
  26 *    the reset task, which means it'll be asynchronous (won't take effect until
  27 *    the system schedules a bit).
  28 *
  29 *    Also, it would probably be possible to also remove most of the long-life
  30 *    locking in open/resume code path (gem_reinit_chip) by beeing more careful
  31 *    about when we can start taking interrupts or get xmit() called...
  32 */
  33
  34#include <linux/module.h>
  35#include <linux/kernel.h>
  36#include <linux/types.h>
  37#include <linux/fcntl.h>
  38#include <linux/interrupt.h>
  39#include <linux/ioport.h>
  40#include <linux/in.h>
  41#include <linux/sched.h>
  42#include <linux/slab.h>
  43#include <linux/string.h>
  44#include <linux/delay.h>
  45#include <linux/init.h>
  46#include <linux/errno.h>
  47#include <linux/pci.h>
  48#include <linux/dma-mapping.h>
  49#include <linux/netdevice.h>
  50#include <linux/etherdevice.h>
  51#include <linux/skbuff.h>
  52#include <linux/mii.h>
  53#include <linux/ethtool.h>
  54#include <linux/crc32.h>
  55#include <linux/random.h>
  56#include <linux/workqueue.h>
  57#include <linux/if_vlan.h>
  58#include <linux/bitops.h>
  59#include <linux/mutex.h>
  60#include <linux/mm.h>
  61
  62#include <asm/system.h>
  63#include <asm/io.h>
  64#include <asm/byteorder.h>
  65#include <asm/uaccess.h>
  66#include <asm/irq.h>
  67
  68#ifdef CONFIG_SPARC
  69#include <asm/idprom.h>
  70#include <asm/prom.h>
  71#endif
  72
  73#ifdef CONFIG_PPC_PMAC
  74#include <asm/pci-bridge.h>
  75#include <asm/prom.h>
  76#include <asm/machdep.h>
  77#include <asm/pmac_feature.h>
  78#endif
  79
  80#include "sungem_phy.h"
  81#include "sungem.h"
  82
  83/* Stripping FCS is causing problems, disabled for now */
  84#undef STRIP_FCS
  85
  86#define DEFAULT_MSG     (NETIF_MSG_DRV          | \
  87                         NETIF_MSG_PROBE        | \
  88                         NETIF_MSG_LINK)
  89
  90#define ADVERTISE_MASK  (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | \
  91                         SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | \
  92                         SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full | \
  93                         SUPPORTED_Pause | SUPPORTED_Autoneg)
  94
  95#define DRV_NAME        "sungem"
  96#define DRV_VERSION     "0.98"
  97#define DRV_RELDATE     "8/24/03"
  98#define DRV_AUTHOR      "David S. Miller (davem@redhat.com)"
  99
 100static char version[] __devinitdata =
 101        DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n";
 102
 103MODULE_AUTHOR(DRV_AUTHOR);
 104MODULE_DESCRIPTION("Sun GEM Gbit ethernet driver");
 105MODULE_LICENSE("GPL");
 106
 107#define GEM_MODULE_NAME "gem"
 108#define PFX GEM_MODULE_NAME ": "
 109
 110static struct pci_device_id gem_pci_tbl[] = {
 111        { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_GEM,
 112          PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
 113
 114        /* These models only differ from the original GEM in
 115         * that their tx/rx fifos are of a different size and
 116         * they only support 10/100 speeds. -DaveM
 117         *
 118         * Apple's GMAC does support gigabit on machines with
 119         * the BCM54xx PHYs. -BenH
 120         */
 121        { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_RIO_GEM,
 122          PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
 123        { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMAC,
 124          PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
 125        { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMACP,
 126          PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
 127        { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMAC2,
 128          PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
 129        { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_K2_GMAC,
 130          PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
 131        { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_SH_SUNGEM,
 132          PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
 133        { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_IPID2_GMAC,
 134          PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
 135        {0, }
 136};
 137
 138MODULE_DEVICE_TABLE(pci, gem_pci_tbl);
 139
 140static u16 __phy_read(struct gem *gp, int phy_addr, int reg)
 141{
 142        u32 cmd;
 143        int limit = 10000;
 144
 145        cmd  = (1 << 30);
 146        cmd |= (2 << 28);
 147        cmd |= (phy_addr << 23) & MIF_FRAME_PHYAD;
 148        cmd |= (reg << 18) & MIF_FRAME_REGAD;
 149        cmd |= (MIF_FRAME_TAMSB);
 150        writel(cmd, gp->regs + MIF_FRAME);
 151
 152        while (--limit) {
 153                cmd = readl(gp->regs + MIF_FRAME);
 154                if (cmd & MIF_FRAME_TALSB)
 155                        break;
 156
 157                udelay(10);
 158        }
 159
 160        if (!limit)
 161                cmd = 0xffff;
 162
 163        return cmd & MIF_FRAME_DATA;
 164}
 165
 166static inline int _phy_read(struct net_device *dev, int mii_id, int reg)
 167{
 168        struct gem *gp = netdev_priv(dev);
 169        return __phy_read(gp, mii_id, reg);
 170}
 171
 172static inline u16 phy_read(struct gem *gp, int reg)
 173{
 174        return __phy_read(gp, gp->mii_phy_addr, reg);
 175}
 176
 177static void __phy_write(struct gem *gp, int phy_addr, int reg, u16 val)
 178{
 179        u32 cmd;
 180        int limit = 10000;
 181
 182        cmd  = (1 << 30);
 183        cmd |= (1 << 28);
 184        cmd |= (phy_addr << 23) & MIF_FRAME_PHYAD;
 185        cmd |= (reg << 18) & MIF_FRAME_REGAD;
 186        cmd |= (MIF_FRAME_TAMSB);
 187        cmd |= (val & MIF_FRAME_DATA);
 188        writel(cmd, gp->regs + MIF_FRAME);
 189
 190        while (limit--) {
 191                cmd = readl(gp->regs + MIF_FRAME);
 192                if (cmd & MIF_FRAME_TALSB)
 193                        break;
 194
 195                udelay(10);
 196        }
 197}
 198
 199static inline void _phy_write(struct net_device *dev, int mii_id, int reg, int val)
 200{
 201        struct gem *gp = netdev_priv(dev);
 202        __phy_write(gp, mii_id, reg, val & 0xffff);
 203}
 204
 205static inline void phy_write(struct gem *gp, int reg, u16 val)
 206{
 207        __phy_write(gp, gp->mii_phy_addr, reg, val);
 208}
 209
 210static inline void gem_enable_ints(struct gem *gp)
 211{
 212        /* Enable all interrupts but TXDONE */
 213        writel(GREG_STAT_TXDONE, gp->regs + GREG_IMASK);
 214}
 215
 216static inline void gem_disable_ints(struct gem *gp)
 217{
 218        /* Disable all interrupts, including TXDONE */
 219        writel(GREG_STAT_NAPI | GREG_STAT_TXDONE, gp->regs + GREG_IMASK);
 220}
 221
 222static void gem_get_cell(struct gem *gp)
 223{
 224        BUG_ON(gp->cell_enabled < 0);
 225        gp->cell_enabled++;
 226#ifdef CONFIG_PPC_PMAC
 227        if (gp->cell_enabled == 1) {
 228                mb();
 229                pmac_call_feature(PMAC_FTR_GMAC_ENABLE, gp->of_node, 0, 1);
 230                udelay(10);
 231        }
 232#endif /* CONFIG_PPC_PMAC */
 233}
 234
 235/* Turn off the chip's clock */
 236static void gem_put_cell(struct gem *gp)
 237{
 238        BUG_ON(gp->cell_enabled <= 0);
 239        gp->cell_enabled--;
 240#ifdef CONFIG_PPC_PMAC
 241        if (gp->cell_enabled == 0) {
 242                mb();
 243                pmac_call_feature(PMAC_FTR_GMAC_ENABLE, gp->of_node, 0, 0);
 244                udelay(10);
 245        }
 246#endif /* CONFIG_PPC_PMAC */
 247}
 248
 249static void gem_handle_mif_event(struct gem *gp, u32 reg_val, u32 changed_bits)
 250{
 251        if (netif_msg_intr(gp))
 252                printk(KERN_DEBUG "%s: mif interrupt\n", gp->dev->name);
 253}
 254
 255static int gem_pcs_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
 256{
 257        u32 pcs_istat = readl(gp->regs + PCS_ISTAT);
 258        u32 pcs_miistat;
 259
 260        if (netif_msg_intr(gp))
 261                printk(KERN_DEBUG "%s: pcs interrupt, pcs_istat: 0x%x\n",
 262                        gp->dev->name, pcs_istat);
 263
 264        if (!(pcs_istat & PCS_ISTAT_LSC)) {
 265                printk(KERN_ERR "%s: PCS irq but no link status change???\n",
 266                       dev->name);
 267                return 0;
 268        }
 269
 270        /* The link status bit latches on zero, so you must
 271         * read it twice in such a case to see a transition
 272         * to the link being up.
 273         */
 274        pcs_miistat = readl(gp->regs + PCS_MIISTAT);
 275        if (!(pcs_miistat & PCS_MIISTAT_LS))
 276                pcs_miistat |=
 277                        (readl(gp->regs + PCS_MIISTAT) &
 278                         PCS_MIISTAT_LS);
 279
 280        if (pcs_miistat & PCS_MIISTAT_ANC) {
 281                /* The remote-fault indication is only valid
 282                 * when autoneg has completed.
 283                 */
 284                if (pcs_miistat & PCS_MIISTAT_RF)
 285                        printk(KERN_INFO "%s: PCS AutoNEG complete, "
 286                               "RemoteFault\n", dev->name);
 287                else
 288                        printk(KERN_INFO "%s: PCS AutoNEG complete.\n",
 289                               dev->name);
 290        }
 291
 292        if (pcs_miistat & PCS_MIISTAT_LS) {
 293                printk(KERN_INFO "%s: PCS link is now up.\n",
 294                       dev->name);
 295                netif_carrier_on(gp->dev);
 296        } else {
 297                printk(KERN_INFO "%s: PCS link is now down.\n",
 298                       dev->name);
 299                netif_carrier_off(gp->dev);
 300                /* If this happens and the link timer is not running,
 301                 * reset so we re-negotiate.
 302                 */
 303                if (!timer_pending(&gp->link_timer))
 304                        return 1;
 305        }
 306
 307        return 0;
 308}
 309
 310static int gem_txmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
 311{
 312        u32 txmac_stat = readl(gp->regs + MAC_TXSTAT);
 313
 314        if (netif_msg_intr(gp))
 315                printk(KERN_DEBUG "%s: txmac interrupt, txmac_stat: 0x%x\n",
 316                        gp->dev->name, txmac_stat);
 317
 318        /* Defer timer expiration is quite normal,
 319         * don't even log the event.
 320         */
 321        if ((txmac_stat & MAC_TXSTAT_DTE) &&
 322            !(txmac_stat & ~MAC_TXSTAT_DTE))
 323                return 0;
 324
 325        if (txmac_stat & MAC_TXSTAT_URUN) {
 326                printk(KERN_ERR "%s: TX MAC xmit underrun.\n",
 327                       dev->name);
 328                gp->net_stats.tx_fifo_errors++;
 329        }
 330
 331        if (txmac_stat & MAC_TXSTAT_MPE) {
 332                printk(KERN_ERR "%s: TX MAC max packet size error.\n",
 333                       dev->name);
 334                gp->net_stats.tx_errors++;
 335        }
 336
 337        /* The rest are all cases of one of the 16-bit TX
 338         * counters expiring.
 339         */
 340        if (txmac_stat & MAC_TXSTAT_NCE)
 341                gp->net_stats.collisions += 0x10000;
 342
 343        if (txmac_stat & MAC_TXSTAT_ECE) {
 344                gp->net_stats.tx_aborted_errors += 0x10000;
 345                gp->net_stats.collisions += 0x10000;
 346        }
 347
 348        if (txmac_stat & MAC_TXSTAT_LCE) {
 349                gp->net_stats.tx_aborted_errors += 0x10000;
 350                gp->net_stats.collisions += 0x10000;
 351        }
 352
 353        /* We do not keep track of MAC_TXSTAT_FCE and
 354         * MAC_TXSTAT_PCE events.
 355         */
 356        return 0;
 357}
 358
 359/* When we get a RX fifo overflow, the RX unit in GEM is probably hung
 360 * so we do the following.
 361 *
 362 * If any part of the reset goes wrong, we return 1 and that causes the
 363 * whole chip to be reset.
 364 */
 365static int gem_rxmac_reset(struct gem *gp)
 366{
 367        struct net_device *dev = gp->dev;
 368        int limit, i;
 369        u64 desc_dma;
 370        u32 val;
 371
 372        /* First, reset & disable MAC RX. */
 373        writel(MAC_RXRST_CMD, gp->regs + MAC_RXRST);
 374        for (limit = 0; limit < 5000; limit++) {
 375                if (!(readl(gp->regs + MAC_RXRST) & MAC_RXRST_CMD))
 376                        break;
 377                udelay(10);
 378        }
 379        if (limit == 5000) {
 380                printk(KERN_ERR "%s: RX MAC will not reset, resetting whole "
 381                       "chip.\n", dev->name);
 382                return 1;
 383        }
 384
 385        writel(gp->mac_rx_cfg & ~MAC_RXCFG_ENAB,
 386               gp->regs + MAC_RXCFG);
 387        for (limit = 0; limit < 5000; limit++) {
 388                if (!(readl(gp->regs + MAC_RXCFG) & MAC_RXCFG_ENAB))
 389                        break;
 390                udelay(10);
 391        }
 392        if (limit == 5000) {
 393                printk(KERN_ERR "%s: RX MAC will not disable, resetting whole "
 394                       "chip.\n", dev->name);
 395                return 1;
 396        }
 397
 398        /* Second, disable RX DMA. */
 399        writel(0, gp->regs + RXDMA_CFG);
 400        for (limit = 0; limit < 5000; limit++) {
 401                if (!(readl(gp->regs + RXDMA_CFG) & RXDMA_CFG_ENABLE))
 402                        break;
 403                udelay(10);
 404        }
 405        if (limit == 5000) {
 406                printk(KERN_ERR "%s: RX DMA will not disable, resetting whole "
 407                       "chip.\n", dev->name);
 408                return 1;
 409        }
 410
 411        udelay(5000);
 412
 413        /* Execute RX reset command. */
 414        writel(gp->swrst_base | GREG_SWRST_RXRST,
 415               gp->regs + GREG_SWRST);
 416        for (limit = 0; limit < 5000; limit++) {
 417                if (!(readl(gp->regs + GREG_SWRST) & GREG_SWRST_RXRST))
 418                        break;
 419                udelay(10);
 420        }
 421        if (limit == 5000) {
 422                printk(KERN_ERR "%s: RX reset command will not execute, resetting "
 423                       "whole chip.\n", dev->name);
 424                return 1;
 425        }
 426
 427        /* Refresh the RX ring. */
 428        for (i = 0; i < RX_RING_SIZE; i++) {
 429                struct gem_rxd *rxd = &gp->init_block->rxd[i];
 430
 431                if (gp->rx_skbs[i] == NULL) {
 432                        printk(KERN_ERR "%s: Parts of RX ring empty, resetting "
 433                               "whole chip.\n", dev->name);
 434                        return 1;
 435                }
 436
 437                rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp));
 438        }
 439        gp->rx_new = gp->rx_old = 0;
 440
 441        /* Now we must reprogram the rest of RX unit. */
 442        desc_dma = (u64) gp->gblock_dvma;
 443        desc_dma += (INIT_BLOCK_TX_RING_SIZE * sizeof(struct gem_txd));
 444        writel(desc_dma >> 32, gp->regs + RXDMA_DBHI);
 445        writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW);
 446        writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK);
 447        val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) |
 448               ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128);
 449        writel(val, gp->regs + RXDMA_CFG);
 450        if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN)
 451                writel(((5 & RXDMA_BLANK_IPKTS) |
 452                        ((8 << 12) & RXDMA_BLANK_ITIME)),
 453                       gp->regs + RXDMA_BLANK);
 454        else
 455                writel(((5 & RXDMA_BLANK_IPKTS) |
 456                        ((4 << 12) & RXDMA_BLANK_ITIME)),
 457                       gp->regs + RXDMA_BLANK);
 458        val  = (((gp->rx_pause_off / 64) << 0) & RXDMA_PTHRESH_OFF);
 459        val |= (((gp->rx_pause_on / 64) << 12) & RXDMA_PTHRESH_ON);
 460        writel(val, gp->regs + RXDMA_PTHRESH);
 461        val = readl(gp->regs + RXDMA_CFG);
 462        writel(val | RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG);
 463        writel(MAC_RXSTAT_RCV, gp->regs + MAC_RXMASK);
 464        val = readl(gp->regs + MAC_RXCFG);
 465        writel(val | MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG);
 466
 467        return 0;
 468}
 469
 470static int gem_rxmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
 471{
 472        u32 rxmac_stat = readl(gp->regs + MAC_RXSTAT);
 473        int ret = 0;
 474
 475        if (netif_msg_intr(gp))
 476                printk(KERN_DEBUG "%s: rxmac interrupt, rxmac_stat: 0x%x\n",
 477                        gp->dev->name, rxmac_stat);
 478
 479        if (rxmac_stat & MAC_RXSTAT_OFLW) {
 480                u32 smac = readl(gp->regs + MAC_SMACHINE);
 481
 482                printk(KERN_ERR "%s: RX MAC fifo overflow smac[%08x].\n",
 483                                dev->name, smac);
 484                gp->net_stats.rx_over_errors++;
 485                gp->net_stats.rx_fifo_errors++;
 486
 487                ret = gem_rxmac_reset(gp);
 488        }
 489
 490        if (rxmac_stat & MAC_RXSTAT_ACE)
 491                gp->net_stats.rx_frame_errors += 0x10000;
 492
 493        if (rxmac_stat & MAC_RXSTAT_CCE)
 494                gp->net_stats.rx_crc_errors += 0x10000;
 495
 496        if (rxmac_stat & MAC_RXSTAT_LCE)
 497                gp->net_stats.rx_length_errors += 0x10000;
 498
 499        /* We do not track MAC_RXSTAT_FCE and MAC_RXSTAT_VCE
 500         * events.
 501         */
 502        return ret;
 503}
 504
 505static int gem_mac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
 506{
 507        u32 mac_cstat = readl(gp->regs + MAC_CSTAT);
 508
 509        if (netif_msg_intr(gp))
 510                printk(KERN_DEBUG "%s: mac interrupt, mac_cstat: 0x%x\n",
 511                        gp->dev->name, mac_cstat);
 512
 513        /* This interrupt is just for pause frame and pause
 514         * tracking.  It is useful for diagnostics and debug
 515         * but probably by default we will mask these events.
 516         */
 517        if (mac_cstat & MAC_CSTAT_PS)
 518                gp->pause_entered++;
 519
 520        if (mac_cstat & MAC_CSTAT_PRCV)
 521                gp->pause_last_time_recvd = (mac_cstat >> 16);
 522
 523        return 0;
 524}
 525
 526static int gem_mif_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
 527{
 528        u32 mif_status = readl(gp->regs + MIF_STATUS);
 529        u32 reg_val, changed_bits;
 530
 531        reg_val = (mif_status & MIF_STATUS_DATA) >> 16;
 532        changed_bits = (mif_status & MIF_STATUS_STAT);
 533
 534        gem_handle_mif_event(gp, reg_val, changed_bits);
 535
 536        return 0;
 537}
 538
 539static int gem_pci_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
 540{
 541        u32 pci_estat = readl(gp->regs + GREG_PCIESTAT);
 542
 543        if (gp->pdev->vendor == PCI_VENDOR_ID_SUN &&
 544            gp->pdev->device == PCI_DEVICE_ID_SUN_GEM) {
 545                printk(KERN_ERR "%s: PCI error [%04x] ",
 546                       dev->name, pci_estat);
 547
 548                if (pci_estat & GREG_PCIESTAT_BADACK)
 549                        printk("<No ACK64# during ABS64 cycle> ");
 550                if (pci_estat & GREG_PCIESTAT_DTRTO)
 551                        printk("<Delayed transaction timeout> ");
 552                if (pci_estat & GREG_PCIESTAT_OTHER)
 553                        printk("<other>");
 554                printk("\n");
 555        } else {
 556                pci_estat |= GREG_PCIESTAT_OTHER;
 557                printk(KERN_ERR "%s: PCI error\n", dev->name);
 558        }
 559
 560        if (pci_estat & GREG_PCIESTAT_OTHER) {
 561                u16 pci_cfg_stat;
 562
 563                /* Interrogate PCI config space for the
 564                 * true cause.
 565                 */
 566                pci_read_config_word(gp->pdev, PCI_STATUS,
 567                                     &pci_cfg_stat);
 568                printk(KERN_ERR "%s: Read PCI cfg space status [%04x]\n",
 569                       dev->name, pci_cfg_stat);
 570                if (pci_cfg_stat & PCI_STATUS_PARITY)
 571                        printk(KERN_ERR "%s: PCI parity error detected.\n",
 572                               dev->name);
 573                if (pci_cfg_stat & PCI_STATUS_SIG_TARGET_ABORT)
 574                        printk(KERN_ERR "%s: PCI target abort.\n",
 575                               dev->name);
 576                if (pci_cfg_stat & PCI_STATUS_REC_TARGET_ABORT)
 577                        printk(KERN_ERR "%s: PCI master acks target abort.\n",
 578                               dev->name);
 579                if (pci_cfg_stat & PCI_STATUS_REC_MASTER_ABORT)
 580                        printk(KERN_ERR "%s: PCI master abort.\n",
 581                               dev->name);
 582                if (pci_cfg_stat & PCI_STATUS_SIG_SYSTEM_ERROR)
 583                        printk(KERN_ERR "%s: PCI system error SERR#.\n",
 584                               dev->name);
 585                if (pci_cfg_stat & PCI_STATUS_DETECTED_PARITY)
 586                        printk(KERN_ERR "%s: PCI parity error.\n",
 587                               dev->name);
 588
 589                /* Write the error bits back to clear them. */
 590                pci_cfg_stat &= (PCI_STATUS_PARITY |
 591                                 PCI_STATUS_SIG_TARGET_ABORT |
 592                                 PCI_STATUS_REC_TARGET_ABORT |
 593                                 PCI_STATUS_REC_MASTER_ABORT |
 594                                 PCI_STATUS_SIG_SYSTEM_ERROR |
 595                                 PCI_STATUS_DETECTED_PARITY);
 596                pci_write_config_word(gp->pdev,
 597                                      PCI_STATUS, pci_cfg_stat);
 598        }
 599
 600        /* For all PCI errors, we should reset the chip. */
 601        return 1;
 602}
 603
 604/* All non-normal interrupt conditions get serviced here.
 605 * Returns non-zero if we should just exit the interrupt
 606 * handler right now (ie. if we reset the card which invalidates
 607 * all of the other original irq status bits).
 608 */
 609static int gem_abnormal_irq(struct net_device *dev, struct gem *gp, u32 gem_status)
 610{
 611        if (gem_status & GREG_STAT_RXNOBUF) {
 612                /* Frame arrived, no free RX buffers available. */
 613                if (netif_msg_rx_err(gp))
 614                        printk(KERN_DEBUG "%s: no buffer for rx frame\n",
 615                                gp->dev->name);
 616                gp->net_stats.rx_dropped++;
 617        }
 618
 619        if (gem_status & GREG_STAT_RXTAGERR) {
 620                /* corrupt RX tag framing */
 621                if (netif_msg_rx_err(gp))
 622                        printk(KERN_DEBUG "%s: corrupt rx tag framing\n",
 623                                gp->dev->name);
 624                gp->net_stats.rx_errors++;
 625
 626                goto do_reset;
 627        }
 628
 629        if (gem_status & GREG_STAT_PCS) {
 630                if (gem_pcs_interrupt(dev, gp, gem_status))
 631                        goto do_reset;
 632        }
 633
 634        if (gem_status & GREG_STAT_TXMAC) {
 635                if (gem_txmac_interrupt(dev, gp, gem_status))
 636                        goto do_reset;
 637        }
 638
 639        if (gem_status & GREG_STAT_RXMAC) {
 640                if (gem_rxmac_interrupt(dev, gp, gem_status))
 641                        goto do_reset;
 642        }
 643
 644        if (gem_status & GREG_STAT_MAC) {
 645                if (gem_mac_interrupt(dev, gp, gem_status))
 646                        goto do_reset;
 647        }
 648
 649        if (gem_status & GREG_STAT_MIF) {
 650                if (gem_mif_interrupt(dev, gp, gem_status))
 651                        goto do_reset;
 652        }
 653
 654        if (gem_status & GREG_STAT_PCIERR) {
 655                if (gem_pci_interrupt(dev, gp, gem_status))
 656                        goto do_reset;
 657        }
 658
 659        return 0;
 660
 661do_reset:
 662        gp->reset_task_pending = 1;
 663        schedule_work(&gp->reset_task);
 664
 665        return 1;
 666}
 667
 668static __inline__ void gem_tx(struct net_device *dev, struct gem *gp, u32 gem_status)
 669{
 670        int entry, limit;
 671
 672        if (netif_msg_intr(gp))
 673                printk(KERN_DEBUG "%s: tx interrupt, gem_status: 0x%x\n",
 674                        gp->dev->name, gem_status);
 675
 676        entry = gp->tx_old;
 677        limit = ((gem_status & GREG_STAT_TXNR) >> GREG_STAT_TXNR_SHIFT);
 678        while (entry != limit) {
 679                struct sk_buff *skb;
 680                struct gem_txd *txd;
 681                dma_addr_t dma_addr;
 682                u32 dma_len;
 683                int frag;
 684
 685                if (netif_msg_tx_done(gp))
 686                        printk(KERN_DEBUG "%s: tx done, slot %d\n",
 687                                gp->dev->name, entry);
 688                skb = gp->tx_skbs[entry];
 689                if (skb_shinfo(skb)->nr_frags) {
 690                        int last = entry + skb_shinfo(skb)->nr_frags;
 691                        int walk = entry;
 692                        int incomplete = 0;
 693
 694                        last &= (TX_RING_SIZE - 1);
 695                        for (;;) {
 696                                walk = NEXT_TX(walk);
 697                                if (walk == limit)
 698                                        incomplete = 1;
 699                                if (walk == last)
 700                                        break;
 701                        }
 702                        if (incomplete)
 703                                break;
 704                }
 705                gp->tx_skbs[entry] = NULL;
 706                gp->net_stats.tx_bytes += skb->len;
 707
 708                for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
 709                        txd = &gp->init_block->txd[entry];
 710
 711                        dma_addr = le64_to_cpu(txd->buffer);
 712                        dma_len = le64_to_cpu(txd->control_word) & TXDCTRL_BUFSZ;
 713
 714                        pci_unmap_page(gp->pdev, dma_addr, dma_len, PCI_DMA_TODEVICE);
 715                        entry = NEXT_TX(entry);
 716                }
 717
 718                gp->net_stats.tx_packets++;
 719                dev_kfree_skb_irq(skb);
 720        }
 721        gp->tx_old = entry;
 722
 723        if (netif_queue_stopped(dev) &&
 724            TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1))
 725                netif_wake_queue(dev);
 726}
 727
 728static __inline__ void gem_post_rxds(struct gem *gp, int limit)
 729{
 730        int cluster_start, curr, count, kick;
 731
 732        cluster_start = curr = (gp->rx_new & ~(4 - 1));
 733        count = 0;
 734        kick = -1;
 735        wmb();
 736        while (curr != limit) {
 737                curr = NEXT_RX(curr);
 738                if (++count == 4) {
 739                        struct gem_rxd *rxd =
 740                                &gp->init_block->rxd[cluster_start];
 741                        for (;;) {
 742                                rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp));
 743                                rxd++;
 744                                cluster_start = NEXT_RX(cluster_start);
 745                                if (cluster_start == curr)
 746                                        break;
 747                        }
 748                        kick = curr;
 749                        count = 0;
 750                }
 751        }
 752        if (kick >= 0) {
 753                mb();
 754                writel(kick, gp->regs + RXDMA_KICK);
 755        }
 756}
 757
 758static int gem_rx(struct gem *gp, int work_to_do)
 759{
 760        int entry, drops, work_done = 0;
 761        u32 done;
 762        __sum16 csum;
 763
 764        if (netif_msg_rx_status(gp))
 765                printk(KERN_DEBUG "%s: rx interrupt, done: %d, rx_new: %d\n",
 766                        gp->dev->name, readl(gp->regs + RXDMA_DONE), gp->rx_new);
 767
 768        entry = gp->rx_new;
 769        drops = 0;
 770        done = readl(gp->regs + RXDMA_DONE);
 771        for (;;) {
 772                struct gem_rxd *rxd = &gp->init_block->rxd[entry];
 773                struct sk_buff *skb;
 774                u64 status = le64_to_cpu(rxd->status_word);
 775                dma_addr_t dma_addr;
 776                int len;
 777
 778                if ((status & RXDCTRL_OWN) != 0)
 779                        break;
 780
 781                if (work_done >= RX_RING_SIZE || work_done >= work_to_do)
 782                        break;
 783
 784                /* When writing back RX descriptor, GEM writes status
 785                 * then buffer address, possibly in seperate transactions.
 786                 * If we don't wait for the chip to write both, we could
 787                 * post a new buffer to this descriptor then have GEM spam
 788                 * on the buffer address.  We sync on the RX completion
 789                 * register to prevent this from happening.
 790                 */
 791                if (entry == done) {
 792                        done = readl(gp->regs + RXDMA_DONE);
 793                        if (entry == done)
 794                                break;
 795                }
 796
 797                /* We can now account for the work we're about to do */
 798                work_done++;
 799
 800                skb = gp->rx_skbs[entry];
 801
 802                len = (status & RXDCTRL_BUFSZ) >> 16;
 803                if ((len < ETH_ZLEN) || (status & RXDCTRL_BAD)) {
 804                        gp->net_stats.rx_errors++;
 805                        if (len < ETH_ZLEN)
 806                                gp->net_stats.rx_length_errors++;
 807                        if (len & RXDCTRL_BAD)
 808                                gp->net_stats.rx_crc_errors++;
 809
 810                        /* We'll just return it to GEM. */
 811                drop_it:
 812                        gp->net_stats.rx_dropped++;
 813                        goto next;
 814                }
 815
 816                dma_addr = le64_to_cpu(rxd->buffer);
 817                if (len > RX_COPY_THRESHOLD) {
 818                        struct sk_buff *new_skb;
 819
 820                        new_skb = gem_alloc_skb(RX_BUF_ALLOC_SIZE(gp), GFP_ATOMIC);
 821                        if (new_skb == NULL) {
 822                                drops++;
 823                                goto drop_it;
 824                        }
 825                        pci_unmap_page(gp->pdev, dma_addr,
 826                                       RX_BUF_ALLOC_SIZE(gp),
 827                                       PCI_DMA_FROMDEVICE);
 828                        gp->rx_skbs[entry] = new_skb;
 829                        new_skb->dev = gp->dev;
 830                        skb_put(new_skb, (gp->rx_buf_sz + RX_OFFSET));
 831                        rxd->buffer = cpu_to_le64(pci_map_page(gp->pdev,
 832                                                               virt_to_page(new_skb->data),
 833                                                               offset_in_page(new_skb->data),
 834                                                               RX_BUF_ALLOC_SIZE(gp),
 835                                                               PCI_DMA_FROMDEVICE));
 836                        skb_reserve(new_skb, RX_OFFSET);
 837
 838                        /* Trim the original skb for the netif. */
 839                        skb_trim(skb, len);
 840                } else {
 841                        struct sk_buff *copy_skb = dev_alloc_skb(len + 2);
 842
 843                        if (copy_skb == NULL) {
 844                                drops++;
 845                                goto drop_it;
 846                        }
 847
 848                        skb_reserve(copy_skb, 2);
 849                        skb_put(copy_skb, len);
 850                        pci_dma_sync_single_for_cpu(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
 851                        skb_copy_from_linear_data(skb, copy_skb->data, len);
 852                        pci_dma_sync_single_for_device(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
 853
 854                        /* We'll reuse the original ring buffer. */
 855                        skb = copy_skb;
 856                }
 857
 858                csum = (__force __sum16)htons((status & RXDCTRL_TCPCSUM) ^ 0xffff);
 859                skb->csum = csum_unfold(csum);
 860                skb->ip_summed = CHECKSUM_COMPLETE;
 861                skb->protocol = eth_type_trans(skb, gp->dev);
 862
 863                netif_receive_skb(skb);
 864
 865                gp->net_stats.rx_packets++;
 866                gp->net_stats.rx_bytes += len;
 867
 868        next:
 869                entry = NEXT_RX(entry);
 870        }
 871
 872        gem_post_rxds(gp, entry);
 873
 874        gp->rx_new = entry;
 875
 876        if (drops)
 877                printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n",
 878                       gp->dev->name);
 879
 880        return work_done;
 881}
 882
 883static int gem_poll(struct napi_struct *napi, int budget)
 884{
 885        struct gem *gp = container_of(napi, struct gem, napi);
 886        struct net_device *dev = gp->dev;
 887        unsigned long flags;
 888        int work_done;
 889
 890        /*
 891         * NAPI locking nightmare: See comment at head of driver
 892         */
 893        spin_lock_irqsave(&gp->lock, flags);
 894
 895        work_done = 0;
 896        do {
 897                /* Handle anomalies */
 898                if (gp->status & GREG_STAT_ABNORMAL) {
 899                        if (gem_abnormal_irq(dev, gp, gp->status))
 900                                break;
 901                }
 902
 903                /* Run TX completion thread */
 904                spin_lock(&gp->tx_lock);
 905                gem_tx(dev, gp, gp->status);
 906                spin_unlock(&gp->tx_lock);
 907
 908                spin_unlock_irqrestore(&gp->lock, flags);
 909
 910                /* Run RX thread. We don't use any locking here,
 911                 * code willing to do bad things - like cleaning the
 912                 * rx ring - must call napi_disable(), which
 913                 * schedule_timeout()'s if polling is already disabled.
 914                 */
 915                work_done += gem_rx(gp, budget - work_done);
 916
 917                if (work_done >= budget)
 918                        return work_done;
 919
 920                spin_lock_irqsave(&gp->lock, flags);
 921
 922                gp->status = readl(gp->regs + GREG_STAT);
 923        } while (gp->status & GREG_STAT_NAPI);
 924
 925        __napi_complete(napi);
 926        gem_enable_ints(gp);
 927
 928        spin_unlock_irqrestore(&gp->lock, flags);
 929
 930        return work_done;
 931}
 932
 933static irqreturn_t gem_interrupt(int irq, void *dev_id)
 934{
 935        struct net_device *dev = dev_id;
 936        struct gem *gp = netdev_priv(dev);
 937        unsigned long flags;
 938
 939        /* Swallow interrupts when shutting the chip down, though
 940         * that shouldn't happen, we should have done free_irq() at
 941         * this point...
 942         */
 943        if (!gp->running)
 944                return IRQ_HANDLED;
 945
 946        spin_lock_irqsave(&gp->lock, flags);
 947
 948        if (napi_schedule_prep(&gp->napi)) {
 949                u32 gem_status = readl(gp->regs + GREG_STAT);
 950
 951                if (gem_status == 0) {
 952                        napi_enable(&gp->napi);
 953                        spin_unlock_irqrestore(&gp->lock, flags);
 954                        return IRQ_NONE;
 955                }
 956                gp->status = gem_status;
 957                gem_disable_ints(gp);
 958                __napi_schedule(&gp->napi);
 959        }
 960
 961        spin_unlock_irqrestore(&gp->lock, flags);
 962
 963        /* If polling was disabled at the time we received that
 964         * interrupt, we may return IRQ_HANDLED here while we
 965         * should return IRQ_NONE. No big deal...
 966         */
 967        return IRQ_HANDLED;
 968}
 969
 970#ifdef CONFIG_NET_POLL_CONTROLLER
 971static void gem_poll_controller(struct net_device *dev)
 972{
 973        /* gem_interrupt is safe to reentrance so no need
 974         * to disable_irq here.
 975         */
 976        gem_interrupt(dev->irq, dev);
 977}
 978#endif
 979
 980static void gem_tx_timeout(struct net_device *dev)
 981{
 982        struct gem *gp = netdev_priv(dev);
 983
 984        printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
 985        if (!gp->running) {
 986                printk("%s: hrm.. hw not running !\n", dev->name);
 987                return;
 988        }
 989        printk(KERN_ERR "%s: TX_STATE[%08x:%08x:%08x]\n",
 990               dev->name,
 991               readl(gp->regs + TXDMA_CFG),
 992               readl(gp->regs + MAC_TXSTAT),
 993               readl(gp->regs + MAC_TXCFG));
 994        printk(KERN_ERR "%s: RX_STATE[%08x:%08x:%08x]\n",
 995               dev->name,
 996               readl(gp->regs + RXDMA_CFG),
 997               readl(gp->regs + MAC_RXSTAT),
 998               readl(gp->regs + MAC_RXCFG));
 999
1000        spin_lock_irq(&gp->lock);
1001        spin_lock(&gp->tx_lock);
1002
1003        gp->reset_task_pending = 1;
1004        schedule_work(&gp->reset_task);
1005
1006        spin_unlock(&gp->tx_lock);
1007        spin_unlock_irq(&gp->lock);
1008}
1009
1010static __inline__ int gem_intme(int entry)
1011{
1012        /* Algorithm: IRQ every 1/2 of descriptors. */
1013        if (!(entry & ((TX_RING_SIZE>>1)-1)))
1014                return 1;
1015
1016        return 0;
1017}
1018
1019static netdev_tx_t gem_start_xmit(struct sk_buff *skb,
1020                                  struct net_device *dev)
1021{
1022        struct gem *gp = netdev_priv(dev);
1023        int entry;
1024        u64 ctrl;
1025        unsigned long flags;
1026
1027        ctrl = 0;
1028        if (skb->ip_summed == CHECKSUM_PARTIAL) {
1029                const u64 csum_start_off = skb_transport_offset(skb);
1030                const u64 csum_stuff_off = csum_start_off + skb->csum_offset;
1031
1032                ctrl = (TXDCTRL_CENAB |
1033                        (csum_start_off << 15) |
1034                        (csum_stuff_off << 21));
1035        }
1036
1037        local_irq_save(flags);
1038        if (!spin_trylock(&gp->tx_lock)) {
1039                /* Tell upper layer to requeue */
1040                local_irq_restore(flags);
1041                return NETDEV_TX_LOCKED;
1042        }
1043        /* We raced with gem_do_stop() */
1044        if (!gp->running) {
1045                spin_unlock_irqrestore(&gp->tx_lock, flags);
1046                return NETDEV_TX_BUSY;
1047        }
1048
1049        /* This is a hard error, log it. */
1050        if (TX_BUFFS_AVAIL(gp) <= (skb_shinfo(skb)->nr_frags + 1)) {
1051                netif_stop_queue(dev);
1052                spin_unlock_irqrestore(&gp->tx_lock, flags);
1053                printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
1054                       dev->name);
1055                return NETDEV_TX_BUSY;
1056        }
1057
1058        entry = gp->tx_new;
1059        gp->tx_skbs[entry] = skb;
1060
1061        if (skb_shinfo(skb)->nr_frags == 0) {
1062                struct gem_txd *txd = &gp->init_block->txd[entry];
1063                dma_addr_t mapping;
1064                u32 len;
1065
1066                len = skb->len;
1067                mapping = pci_map_page(gp->pdev,
1068                                       virt_to_page(skb->data),
1069                                       offset_in_page(skb->data),
1070                                       len, PCI_DMA_TODEVICE);
1071                ctrl |= TXDCTRL_SOF | TXDCTRL_EOF | len;
1072                if (gem_intme(entry))
1073                        ctrl |= TXDCTRL_INTME;
1074                txd->buffer = cpu_to_le64(mapping);
1075                wmb();
1076                txd->control_word = cpu_to_le64(ctrl);
1077                entry = NEXT_TX(entry);
1078        } else {
1079                struct gem_txd *txd;
1080                u32 first_len;
1081                u64 intme;
1082                dma_addr_t first_mapping;
1083                int frag, first_entry = entry;
1084
1085                intme = 0;
1086                if (gem_intme(entry))
1087                        intme |= TXDCTRL_INTME;
1088
1089                /* We must give this initial chunk to the device last.
1090                 * Otherwise we could race with the device.
1091                 */
1092                first_len = skb_headlen(skb);
1093                first_mapping = pci_map_page(gp->pdev, virt_to_page(skb->data),
1094                                             offset_in_page(skb->data),
1095                                             first_len, PCI_DMA_TODEVICE);
1096                entry = NEXT_TX(entry);
1097
1098                for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
1099                        skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
1100                        u32 len;
1101                        dma_addr_t mapping;
1102                        u64 this_ctrl;
1103
1104                        len = this_frag->size;
1105                        mapping = pci_map_page(gp->pdev,
1106                                               this_frag->page,
1107                                               this_frag->page_offset,
1108                                               len, PCI_DMA_TODEVICE);
1109                        this_ctrl = ctrl;
1110                        if (frag == skb_shinfo(skb)->nr_frags - 1)
1111                                this_ctrl |= TXDCTRL_EOF;
1112
1113                        txd = &gp->init_block->txd[entry];
1114                        txd->buffer = cpu_to_le64(mapping);
1115                        wmb();
1116                        txd->control_word = cpu_to_le64(this_ctrl | len);
1117
1118                        if (gem_intme(entry))
1119                                intme |= TXDCTRL_INTME;
1120
1121                        entry = NEXT_TX(entry);
1122                }
1123                txd = &gp->init_block->txd[first_entry];
1124                txd->buffer = cpu_to_le64(first_mapping);
1125                wmb();
1126                txd->control_word =
1127                        cpu_to_le64(ctrl | TXDCTRL_SOF | intme | first_len);
1128        }
1129
1130        gp->tx_new = entry;
1131        if (TX_BUFFS_AVAIL(gp) <= (MAX_SKB_FRAGS + 1))
1132                netif_stop_queue(dev);
1133
1134        if (netif_msg_tx_queued(gp))
1135                printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n",
1136                       dev->name, entry, skb->len);
1137        mb();
1138        writel(gp->tx_new, gp->regs + TXDMA_KICK);
1139        spin_unlock_irqrestore(&gp->tx_lock, flags);
1140
1141        dev->trans_start = jiffies;
1142
1143        return NETDEV_TX_OK;
1144}
1145
1146static void gem_pcs_reset(struct gem *gp)
1147{
1148        int limit;
1149        u32 val;
1150
1151        /* Reset PCS unit. */
1152        val = readl(gp->regs + PCS_MIICTRL);
1153        val |= PCS_MIICTRL_RST;
1154        writel(val, gp->regs + PCS_MIICTRL);
1155
1156        limit = 32;
1157        while (readl(gp->regs + PCS_MIICTRL) & PCS_MIICTRL_RST) {
1158                udelay(100);
1159                if (limit-- <= 0)
1160                        break;
1161        }
1162        if (limit < 0)
1163                printk(KERN_WARNING "%s: PCS reset bit would not clear.\n",
1164                       gp->dev->name);
1165}
1166
1167static void gem_pcs_reinit_adv(struct gem *gp)
1168{
1169        u32 val;
1170
1171        /* Make sure PCS is disabled while changing advertisement
1172         * configuration.
1173         */
1174        val = readl(gp->regs + PCS_CFG);
1175        val &= ~(PCS_CFG_ENABLE | PCS_CFG_TO);
1176        writel(val, gp->regs + PCS_CFG);
1177
1178        /* Advertise all capabilities except assymetric
1179         * pause.
1180         */
1181        val = readl(gp->regs + PCS_MIIADV);
1182        val |= (PCS_MIIADV_FD | PCS_MIIADV_HD |
1183                PCS_MIIADV_SP | PCS_MIIADV_AP);
1184        writel(val, gp->regs + PCS_MIIADV);
1185
1186        /* Enable and restart auto-negotiation, disable wrapback/loopback,
1187         * and re-enable PCS.
1188         */
1189        val = readl(gp->regs + PCS_MIICTRL);
1190        val |= (PCS_MIICTRL_RAN | PCS_MIICTRL_ANE);
1191        val &= ~PCS_MIICTRL_WB;
1192        writel(val, gp->regs + PCS_MIICTRL);
1193
1194        val = readl(gp->regs + PCS_CFG);
1195        val |= PCS_CFG_ENABLE;
1196        writel(val, gp->regs + PCS_CFG);
1197
1198        /* Make sure serialink loopback is off.  The meaning
1199         * of this bit is logically inverted based upon whether
1200         * you are in Serialink or SERDES mode.
1201         */
1202        val = readl(gp->regs + PCS_SCTRL);
1203        if (gp->phy_type == phy_serialink)
1204                val &= ~PCS_SCTRL_LOOP;
1205        else
1206                val |= PCS_SCTRL_LOOP;
1207        writel(val, gp->regs + PCS_SCTRL);
1208}
1209
1210#define STOP_TRIES 32
1211
1212/* Must be invoked under gp->lock and gp->tx_lock. */
1213static void gem_reset(struct gem *gp)
1214{
1215        int limit;
1216        u32 val;
1217
1218        /* Make sure we won't get any more interrupts */
1219        writel(0xffffffff, gp->regs + GREG_IMASK);
1220
1221        /* Reset the chip */
1222        writel(gp->swrst_base | GREG_SWRST_TXRST | GREG_SWRST_RXRST,
1223               gp->regs + GREG_SWRST);
1224
1225        limit = STOP_TRIES;
1226
1227        do {
1228                udelay(20);
1229                val = readl(gp->regs + GREG_SWRST);
1230                if (limit-- <= 0)
1231                        break;
1232        } while (val & (GREG_SWRST_TXRST | GREG_SWRST_RXRST));
1233
1234        if (limit < 0)
1235                printk(KERN_ERR "%s: SW reset is ghetto.\n", gp->dev->name);
1236
1237        if (gp->phy_type == phy_serialink || gp->phy_type == phy_serdes)
1238                gem_pcs_reinit_adv(gp);
1239}
1240
1241/* Must be invoked under gp->lock and gp->tx_lock. */
1242static void gem_start_dma(struct gem *gp)
1243{
1244        u32 val;
1245
1246        /* We are ready to rock, turn everything on. */
1247        val = readl(gp->regs + TXDMA_CFG);
1248        writel(val | TXDMA_CFG_ENABLE, gp->regs + TXDMA_CFG);
1249        val = readl(gp->regs + RXDMA_CFG);
1250        writel(val | RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG);
1251        val = readl(gp->regs + MAC_TXCFG);
1252        writel(val | MAC_TXCFG_ENAB, gp->regs + MAC_TXCFG);
1253        val = readl(gp->regs + MAC_RXCFG);
1254        writel(val | MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG);
1255
1256        (void) readl(gp->regs + MAC_RXCFG);
1257        udelay(100);
1258
1259        gem_enable_ints(gp);
1260
1261        writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK);
1262}
1263
1264/* Must be invoked under gp->lock and gp->tx_lock. DMA won't be
1265 * actually stopped before about 4ms tho ...
1266 */
1267static void gem_stop_dma(struct gem *gp)
1268{
1269        u32 val;
1270
1271        /* We are done rocking, turn everything off. */
1272        val = readl(gp->regs + TXDMA_CFG);
1273        writel(val & ~TXDMA_CFG_ENABLE, gp->regs + TXDMA_CFG);
1274        val = readl(gp->regs + RXDMA_CFG);
1275        writel(val & ~RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG);
1276        val = readl(gp->regs + MAC_TXCFG);
1277        writel(val & ~MAC_TXCFG_ENAB, gp->regs + MAC_TXCFG);
1278        val = readl(gp->regs + MAC_RXCFG);
1279        writel(val & ~MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG);
1280
1281        (void) readl(gp->regs + MAC_RXCFG);
1282
1283        /* Need to wait a bit ... done by the caller */
1284}
1285
1286
1287/* Must be invoked under gp->lock and gp->tx_lock. */
1288// XXX dbl check what that function should do when called on PCS PHY
1289static void gem_begin_auto_negotiation(struct gem *gp, struct ethtool_cmd *ep)
1290{
1291        u32 advertise, features;
1292        int autoneg;
1293        int speed;
1294        int duplex;
1295
1296        if (gp->phy_type != phy_mii_mdio0 &&
1297            gp->phy_type != phy_mii_mdio1)
1298                goto non_mii;
1299
1300        /* Setup advertise */
1301        if (found_mii_phy(gp))
1302                features = gp->phy_mii.def->features;
1303        else
1304                features = 0;
1305
1306        advertise = features & ADVERTISE_MASK;
1307        if (gp->phy_mii.advertising != 0)
1308                advertise &= gp->phy_mii.advertising;
1309
1310        autoneg = gp->want_autoneg;
1311        speed = gp->phy_mii.speed;
1312        duplex = gp->phy_mii.duplex;
1313
1314        /* Setup link parameters */
1315        if (!ep)
1316                goto start_aneg;
1317        if (ep->autoneg == AUTONEG_ENABLE) {
1318                advertise = ep->advertising;
1319                autoneg = 1;
1320        } else {
1321                autoneg = 0;
1322                speed = ep->speed;
1323                duplex = ep->duplex;
1324        }
1325
1326start_aneg:
1327        /* Sanitize settings based on PHY capabilities */
1328        if ((features & SUPPORTED_Autoneg) == 0)
1329                autoneg = 0;
1330        if (speed == SPEED_1000 &&
1331            !(features & (SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full)))
1332                speed = SPEED_100;
1333        if (speed == SPEED_100 &&
1334            !(features & (SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full)))
1335                speed = SPEED_10;
1336        if (duplex == DUPLEX_FULL &&
1337            !(features & (SUPPORTED_1000baseT_Full |
1338                          SUPPORTED_100baseT_Full |
1339                          SUPPORTED_10baseT_Full)))
1340                duplex = DUPLEX_HALF;
1341        if (speed == 0)
1342                speed = SPEED_10;
1343
1344        /* If we are asleep, we don't try to actually setup the PHY, we
1345         * just store the settings
1346         */
1347        if (gp->asleep) {
1348                gp->phy_mii.autoneg = gp->want_autoneg = autoneg;
1349                gp->phy_mii.speed = speed;
1350                gp->phy_mii.duplex = duplex;
1351                return;
1352        }
1353
1354        /* Configure PHY & start aneg */
1355        gp->want_autoneg = autoneg;
1356        if (autoneg) {
1357                if (found_mii_phy(gp))
1358                        gp->phy_mii.def->ops->setup_aneg(&gp->phy_mii, advertise);
1359                gp->lstate = link_aneg;
1360        } else {
1361                if (found_mii_phy(gp))
1362                        gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, speed, duplex);
1363                gp->lstate = link_force_ok;
1364        }
1365
1366non_mii:
1367        gp->timer_ticks = 0;
1368        mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10));
1369}
1370
1371/* A link-up condition has occurred, initialize and enable the
1372 * rest of the chip.
1373 *
1374 * Must be invoked under gp->lock and gp->tx_lock.
1375 */
1376static int gem_set_link_modes(struct gem *gp)
1377{
1378        u32 val;
1379        int full_duplex, speed, pause;
1380
1381        full_duplex = 0;
1382        speed = SPEED_10;
1383        pause = 0;
1384
1385        if (found_mii_phy(gp)) {
1386                if (gp->phy_mii.def->ops->read_link(&gp->phy_mii))
1387                        return 1;
1388                full_duplex = (gp->phy_mii.duplex == DUPLEX_FULL);
1389                speed = gp->phy_mii.speed;
1390                pause = gp->phy_mii.pause;
1391        } else if (gp->phy_type == phy_serialink ||
1392                   gp->phy_type == phy_serdes) {
1393                u32 pcs_lpa = readl(gp->regs + PCS_MIILP);
1394
1395                if ((pcs_lpa & PCS_MIIADV_FD) || gp->phy_type == phy_serdes)
1396                        full_duplex = 1;
1397                speed = SPEED_1000;
1398        }
1399
1400        if (netif_msg_link(gp))
1401                printk(KERN_INFO "%s: Link is up at %d Mbps, %s-duplex.\n",
1402                        gp->dev->name, speed, (full_duplex ? "full" : "half"));
1403
1404        if (!gp->running)
1405                return 0;
1406
1407        val = (MAC_TXCFG_EIPG0 | MAC_TXCFG_NGU);
1408        if (full_duplex) {
1409                val |= (MAC_TXCFG_ICS | MAC_TXCFG_ICOLL);
1410        } else {
1411                /* MAC_TXCFG_NBO must be zero. */
1412        }
1413        writel(val, gp->regs + MAC_TXCFG);
1414
1415        val = (MAC_XIFCFG_OE | MAC_XIFCFG_LLED);
1416        if (!full_duplex &&
1417            (gp->phy_type == phy_mii_mdio0 ||
1418             gp->phy_type == phy_mii_mdio1)) {
1419                val |= MAC_XIFCFG_DISE;
1420        } else if (full_duplex) {
1421                val |= MAC_XIFCFG_FLED;
1422        }
1423
1424        if (speed == SPEED_1000)
1425                val |= (MAC_XIFCFG_GMII);
1426
1427        writel(val, gp->regs + MAC_XIFCFG);
1428
1429        /* If gigabit and half-duplex, enable carrier extension
1430         * mode.  Else, disable it.
1431         */
1432        if (speed == SPEED_1000 && !full_duplex) {
1433                val = readl(gp->regs + MAC_TXCFG);
1434                writel(val | MAC_TXCFG_TCE, gp->regs + MAC_TXCFG);
1435
1436                val = readl(gp->regs + MAC_RXCFG);
1437                writel(val | MAC_RXCFG_RCE, gp->regs + MAC_RXCFG);
1438        } else {
1439                val = readl(gp->regs + MAC_TXCFG);
1440                writel(val & ~MAC_TXCFG_TCE, gp->regs + MAC_TXCFG);
1441
1442                val = readl(gp->regs + MAC_RXCFG);
1443                writel(val & ~MAC_RXCFG_RCE, gp->regs + MAC_RXCFG);
1444        }
1445
1446        if (gp->phy_type == phy_serialink ||
1447            gp->phy_type == phy_serdes) {
1448                u32 pcs_lpa = readl(gp->regs + PCS_MIILP);
1449
1450                if (pcs_lpa & (PCS_MIIADV_SP | PCS_MIIADV_AP))
1451                        pause = 1;
1452        }
1453
1454        if (netif_msg_link(gp)) {
1455                if (pause) {
1456                        printk(KERN_INFO "%s: Pause is enabled "
1457                               "(rxfifo: %d off: %d on: %d)\n",
1458                               gp->dev->name,
1459                               gp->rx_fifo_sz,
1460                               gp->rx_pause_off,
1461                               gp->rx_pause_on);
1462                } else {
1463                        printk(KERN_INFO "%s: Pause is disabled\n",
1464                               gp->dev->name);
1465                }
1466        }
1467
1468        if (!full_duplex)
1469                writel(512, gp->regs + MAC_STIME);
1470        else
1471                writel(64, gp->regs + MAC_STIME);
1472        val = readl(gp->regs + MAC_MCCFG);
1473        if (pause)
1474                val |= (MAC_MCCFG_SPE | MAC_MCCFG_RPE);
1475        else
1476                val &= ~(MAC_MCCFG_SPE | MAC_MCCFG_RPE);
1477        writel(val, gp->regs + MAC_MCCFG);
1478
1479        gem_start_dma(gp);
1480
1481        return 0;
1482}
1483
1484/* Must be invoked under gp->lock and gp->tx_lock. */
1485static int gem_mdio_link_not_up(struct gem *gp)
1486{
1487        switch (gp->lstate) {
1488        case link_force_ret:
1489                if (netif_msg_link(gp))
1490                        printk(KERN_INFO "%s: Autoneg failed again, keeping"
1491                                " forced mode\n", gp->dev->name);
1492                gp->phy_mii.def->ops->setup_forced(&gp->phy_mii,
1493                        gp->last_forced_speed, DUPLEX_HALF);
1494                gp->timer_ticks = 5;
1495                gp->lstate = link_force_ok;
1496                return 0;
1497        case link_aneg:
1498                /* We try forced modes after a failed aneg only on PHYs that don't
1499                 * have "magic_aneg" bit set, which means they internally do the
1500                 * while forced-mode thingy. On these, we just restart aneg
1501                 */
1502                if (gp->phy_mii.def->magic_aneg)
1503                        return 1;
1504                if (netif_msg_link(gp))
1505                        printk(KERN_INFO "%s: switching to forced 100bt\n",
1506                                gp->dev->name);
1507                /* Try forced modes. */
1508                gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, SPEED_100,
1509                        DUPLEX_HALF);
1510                gp->timer_ticks = 5;
1511                gp->lstate = link_force_try;
1512                return 0;
1513        case link_force_try:
1514                /* Downgrade from 100 to 10 Mbps if necessary.
1515                 * If already at 10Mbps, warn user about the
1516                 * situation every 10 ticks.
1517                 */
1518                if (gp->phy_mii.speed == SPEED_100) {
1519                        gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, SPEED_10,
1520                                DUPLEX_HALF);
1521                        gp->timer_ticks = 5;
1522                        if (netif_msg_link(gp))
1523                                printk(KERN_INFO "%s: switching to forced 10bt\n",
1524                                        gp->dev->name);
1525                        return 0;
1526                } else
1527                        return 1;
1528        default:
1529                return 0;
1530        }
1531}
1532
1533static void gem_link_timer(unsigned long data)
1534{
1535        struct gem *gp = (struct gem *) data;
1536        int restart_aneg = 0;
1537
1538        if (gp->asleep)
1539                return;
1540
1541        spin_lock_irq(&gp->lock);
1542        spin_lock(&gp->tx_lock);
1543        gem_get_cell(gp);
1544
1545        /* If the reset task is still pending, we just
1546         * reschedule the link timer
1547         */
1548        if (gp->reset_task_pending)
1549                goto restart;
1550
1551        if (gp->phy_type == phy_serialink ||
1552            gp->phy_type == phy_serdes) {
1553                u32 val = readl(gp->regs + PCS_MIISTAT);
1554
1555                if (!(val & PCS_MIISTAT_LS))
1556                        val = readl(gp->regs + PCS_MIISTAT);
1557
1558                if ((val & PCS_MIISTAT_LS) != 0) {
1559                        if (gp->lstate == link_up)
1560                                goto restart;
1561
1562                        gp->lstate = link_up;
1563                        netif_carrier_on(gp->dev);
1564                        (void)gem_set_link_modes(gp);
1565                }
1566                goto restart;
1567        }
1568        if (found_mii_phy(gp) && gp->phy_mii.def->ops->poll_link(&gp->phy_mii)) {
1569                /* Ok, here we got a link. If we had it due to a forced
1570                 * fallback, and we were configured for autoneg, we do
1571                 * retry a short autoneg pass. If you know your hub is
1572                 * broken, use ethtool ;)
1573                 */
1574                if (gp->lstate == link_force_try && gp->want_autoneg) {
1575                        gp->lstate = link_force_ret;
1576                        gp->last_forced_speed = gp->phy_mii.speed;
1577                        gp->timer_ticks = 5;
1578                        if (netif_msg_link(gp))
1579                                printk(KERN_INFO "%s: Got link after fallback, retrying"
1580                                        " autoneg once...\n", gp->dev->name);
1581                        gp->phy_mii.def->ops->setup_aneg(&gp->phy_mii, gp->phy_mii.advertising);
1582                } else if (gp->lstate != link_up) {
1583                        gp->lstate = link_up;
1584                        netif_carrier_on(gp->dev);
1585                        if (gem_set_link_modes(gp))
1586                                restart_aneg = 1;
1587                }
1588        } else {
1589                /* If the link was previously up, we restart the
1590                 * whole process
1591                 */
1592                if (gp->lstate == link_up) {
1593                        gp->lstate = link_down;
1594                        if (netif_msg_link(gp))
1595                                printk(KERN_INFO "%s: Link down\n",
1596                                        gp->dev->name);
1597                        netif_carrier_off(gp->dev);
1598                        gp->reset_task_pending = 1;
1599                        schedule_work(&gp->reset_task);
1600                        restart_aneg = 1;
1601                } else if (++gp->timer_ticks > 10) {
1602                        if (found_mii_phy(gp))
1603                                restart_aneg = gem_mdio_link_not_up(gp);
1604                        else
1605                                restart_aneg = 1;
1606                }
1607        }
1608        if (restart_aneg) {
1609                gem_begin_auto_negotiation(gp, NULL);
1610                goto out_unlock;
1611        }
1612restart:
1613        mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10));
1614out_unlock:
1615        gem_put_cell(gp);
1616        spin_unlock(&gp->tx_lock);
1617        spin_unlock_irq(&gp->lock);
1618}
1619
1620/* Must be invoked under gp->lock and gp->tx_lock. */
1621static void gem_clean_rings(struct gem *gp)
1622{
1623        struct gem_init_block *gb = gp->init_block;
1624        struct sk_buff *skb;
1625        int i;
1626        dma_addr_t dma_addr;
1627
1628        for (i = 0; i < RX_RING_SIZE; i++) {
1629                struct gem_rxd *rxd;
1630
1631                rxd = &gb->rxd[i];
1632                if (gp->rx_skbs[i] != NULL) {
1633                        skb = gp->rx_skbs[i];
1634                        dma_addr = le64_to_cpu(rxd->buffer);
1635                        pci_unmap_page(gp->pdev, dma_addr,
1636                                       RX_BUF_ALLOC_SIZE(gp),
1637                                       PCI_DMA_FROMDEVICE);
1638                        dev_kfree_skb_any(skb);
1639                        gp->rx_skbs[i] = NULL;
1640                }
1641                rxd->status_word = 0;
1642                wmb();
1643                rxd->buffer = 0;
1644        }
1645
1646        for (i = 0; i < TX_RING_SIZE; i++) {
1647                if (gp->tx_skbs[i] != NULL) {
1648                        struct gem_txd *txd;
1649                        int frag;
1650
1651                        skb = gp->tx_skbs[i];
1652                        gp->tx_skbs[i] = NULL;
1653
1654                        for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
1655                                int ent = i & (TX_RING_SIZE - 1);
1656
1657                                txd = &gb->txd[ent];
1658                                dma_addr = le64_to_cpu(txd->buffer);
1659                                pci_unmap_page(gp->pdev, dma_addr,
1660                                               le64_to_cpu(txd->control_word) &
1661                                               TXDCTRL_BUFSZ, PCI_DMA_TODEVICE);
1662
1663                                if (frag != skb_shinfo(skb)->nr_frags)
1664                                        i++;
1665                        }
1666                        dev_kfree_skb_any(skb);
1667                }
1668        }
1669}
1670
1671/* Must be invoked under gp->lock and gp->tx_lock. */
1672static void gem_init_rings(struct gem *gp)
1673{
1674        struct gem_init_block *gb = gp->init_block;
1675        struct net_device *dev = gp->dev;
1676        int i;
1677        dma_addr_t dma_addr;
1678
1679        gp->rx_new = gp->rx_old = gp->tx_new = gp->tx_old = 0;
1680
1681        gem_clean_rings(gp);
1682
1683        gp->rx_buf_sz = max(dev->mtu + ETH_HLEN + VLAN_HLEN,
1684                            (unsigned)VLAN_ETH_FRAME_LEN);
1685
1686        for (i = 0; i < RX_RING_SIZE; i++) {
1687                struct sk_buff *skb;
1688                struct gem_rxd *rxd = &gb->rxd[i];
1689
1690                skb = gem_alloc_skb(RX_BUF_ALLOC_SIZE(gp), GFP_ATOMIC);
1691                if (!skb) {
1692                        rxd->buffer = 0;
1693                        rxd->status_word = 0;
1694                        continue;
1695                }
1696
1697                gp->rx_skbs[i] = skb;
1698                skb->dev = dev;
1699                skb_put(skb, (gp->rx_buf_sz + RX_OFFSET));
1700                dma_addr = pci_map_page(gp->pdev,
1701                                        virt_to_page(skb->data),
1702                                        offset_in_page(skb->data),
1703                                        RX_BUF_ALLOC_SIZE(gp),
1704                                        PCI_DMA_FROMDEVICE);
1705                rxd->buffer = cpu_to_le64(dma_addr);
1706                wmb();
1707                rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp));
1708                skb_reserve(skb, RX_OFFSET);
1709        }
1710
1711        for (i = 0; i < TX_RING_SIZE; i++) {
1712                struct gem_txd *txd = &gb->txd[i];
1713
1714                txd->control_word = 0;
1715                wmb();
1716                txd->buffer = 0;
1717        }
1718        wmb();
1719}
1720
1721/* Init PHY interface and start link poll state machine */
1722static void gem_init_phy(struct gem *gp)
1723{
1724        u32 mifcfg;
1725
1726        /* Revert MIF CFG setting done on stop_phy */
1727        mifcfg = readl(gp->regs + MIF_CFG);
1728        mifcfg &= ~MIF_CFG_BBMODE;
1729        writel(mifcfg, gp->regs + MIF_CFG);
1730
1731        if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE) {
1732                int i;
1733
1734                /* Those delay sucks, the HW seem to love them though, I'll
1735                 * serisouly consider breaking some locks here to be able
1736                 * to schedule instead
1737                 */
1738                for (i = 0; i < 3; i++) {
1739#ifdef CONFIG_PPC_PMAC
1740                        pmac_call_feature(PMAC_FTR_GMAC_PHY_RESET, gp->of_node, 0, 0);
1741                        msleep(20);
1742#endif
1743                        /* Some PHYs used by apple have problem getting back to us,
1744                         * we do an additional reset here
1745                         */
1746                        phy_write(gp, MII_BMCR, BMCR_RESET);
1747                        msleep(20);
1748                        if (phy_read(gp, MII_BMCR) != 0xffff)
1749                                break;
1750                        if (i == 2)
1751                                printk(KERN_WARNING "%s: GMAC PHY not responding !\n",
1752                                       gp->dev->name);
1753                }
1754        }
1755
1756        if (gp->pdev->vendor == PCI_VENDOR_ID_SUN &&
1757            gp->pdev->device == PCI_DEVICE_ID_SUN_GEM) {
1758                u32 val;
1759
1760                /* Init datapath mode register. */
1761                if (gp->phy_type == phy_mii_mdio0 ||
1762                    gp->phy_type == phy_mii_mdio1) {
1763                        val = PCS_DMODE_MGM;
1764                } else if (gp->phy_type == phy_serialink) {
1765                        val = PCS_DMODE_SM | PCS_DMODE_GMOE;
1766                } else {
1767                        val = PCS_DMODE_ESM;
1768                }
1769
1770                writel(val, gp->regs + PCS_DMODE);
1771        }
1772
1773        if (gp->phy_type == phy_mii_mdio0 ||
1774            gp->phy_type == phy_mii_mdio1) {
1775                // XXX check for errors
1776                mii_phy_probe(&gp->phy_mii, gp->mii_phy_addr);
1777
1778                /* Init PHY */
1779                if (gp->phy_mii.def && gp->phy_mii.def->ops->init)
1780                        gp->phy_mii.def->ops->init(&gp->phy_mii);
1781        } else {
1782                gem_pcs_reset(gp);
1783                gem_pcs_reinit_adv(gp);
1784        }
1785
1786        /* Default aneg parameters */
1787        gp->timer_ticks = 0;
1788        gp->lstate = link_down;
1789        netif_carrier_off(gp->dev);
1790
1791        /* Can I advertise gigabit here ? I'd need BCM PHY docs... */
1792        spin_lock_irq(&gp->lock);
1793        gem_begin_auto_negotiation(gp, NULL);
1794        spin_unlock_irq(&gp->lock);
1795}
1796
1797/* Must be invoked under gp->lock and gp->tx_lock. */
1798static void gem_init_dma(struct gem *gp)
1799{
1800        u64 desc_dma = (u64) gp->gblock_dvma;
1801        u32 val;
1802
1803        val = (TXDMA_CFG_BASE | (0x7ff << 10) | TXDMA_CFG_PMODE);
1804        writel(val, gp->regs + TXDMA_CFG);
1805
1806        writel(desc_dma >> 32, gp->regs + TXDMA_DBHI);
1807        writel(desc_dma & 0xffffffff, gp->regs + TXDMA_DBLOW);
1808        desc_dma += (INIT_BLOCK_TX_RING_SIZE * sizeof(struct gem_txd));
1809
1810        writel(0, gp->regs + TXDMA_KICK);
1811
1812        val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) |
1813               ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128);
1814        writel(val, gp->regs + RXDMA_CFG);
1815
1816        writel(desc_dma >> 32, gp->regs + RXDMA_DBHI);
1817        writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW);
1818
1819        writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK);
1820
1821        val  = (((gp->rx_pause_off / 64) << 0) & RXDMA_PTHRESH_OFF);
1822        val |= (((gp->rx_pause_on / 64) << 12) & RXDMA_PTHRESH_ON);
1823        writel(val, gp->regs + RXDMA_PTHRESH);
1824
1825        if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN)
1826                writel(((5 & RXDMA_BLANK_IPKTS) |
1827                        ((8 << 12) & RXDMA_BLANK_ITIME)),
1828                       gp->regs + RXDMA_BLANK);
1829        else
1830                writel(((5 & RXDMA_BLANK_IPKTS) |
1831                        ((4 << 12) & RXDMA_BLANK_ITIME)),
1832                       gp->regs + RXDMA_BLANK);
1833}
1834
1835/* Must be invoked under gp->lock and gp->tx_lock. */
1836static u32 gem_setup_multicast(struct gem *gp)
1837{
1838        u32 rxcfg = 0;
1839        int i;
1840
1841        if ((gp->dev->flags & IFF_ALLMULTI) ||
1842            (gp->dev->mc_count > 256)) {
1843                for (i=0; i<16; i++)
1844                        writel(0xffff, gp->regs + MAC_HASH0 + (i << 2));
1845                rxcfg |= MAC_RXCFG_HFE;
1846        } else if (gp->dev->flags & IFF_PROMISC) {
1847                rxcfg |= MAC_RXCFG_PROM;
1848        } else {
1849                u16 hash_table[16];
1850                u32 crc;
1851                struct dev_mc_list *dmi = gp->dev->mc_list;
1852                int i;
1853
1854                for (i = 0; i < 16; i++)
1855                        hash_table[i] = 0;
1856
1857                for (i = 0; i < gp->dev->mc_count; i++) {
1858                        char *addrs = dmi->dmi_addr;
1859
1860                        dmi = dmi->next;
1861
1862                        if (!(*addrs & 1))
1863                                continue;
1864
1865                        crc = ether_crc_le(6, addrs);
1866                        crc >>= 24;
1867                        hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
1868                }
1869                for (i=0; i<16; i++)
1870                        writel(hash_table[i], gp->regs + MAC_HASH0 + (i << 2));
1871                rxcfg |= MAC_RXCFG_HFE;
1872        }
1873
1874        return rxcfg;
1875}
1876
1877/* Must be invoked under gp->lock and gp->tx_lock. */
1878static void gem_init_mac(struct gem *gp)
1879{
1880        unsigned char *e = &gp->dev->dev_addr[0];
1881
1882        writel(0x1bf0, gp->regs + MAC_SNDPAUSE);
1883
1884        writel(0x00, gp->regs + MAC_IPG0);
1885        writel(0x08, gp->regs + MAC_IPG1);
1886        writel(0x04, gp->regs + MAC_IPG2);
1887        writel(0x40, gp->regs + MAC_STIME);
1888        writel(0x40, gp->regs + MAC_MINFSZ);
1889
1890        /* Ethernet payload + header + FCS + optional VLAN tag. */
1891        writel(0x20000000 | (gp->rx_buf_sz + 4), gp->regs + MAC_MAXFSZ);
1892
1893        writel(0x07, gp->regs + MAC_PASIZE);
1894        writel(0x04, gp->regs + MAC_JAMSIZE);
1895        writel(0x10, gp->regs + MAC_ATTLIM);
1896        writel(0x8808, gp->regs + MAC_MCTYPE);
1897
1898        writel((e[5] | (e[4] << 8)) & 0x3ff, gp->regs + MAC_RANDSEED);
1899
1900        writel((e[4] << 8) | e[5], gp->regs + MAC_ADDR0);
1901        writel((e[2] << 8) | e[3], gp->regs + MAC_ADDR1);
1902        writel((e[0] << 8) | e[1], gp->regs + MAC_ADDR2);
1903
1904        writel(0, gp->regs + MAC_ADDR3);
1905        writel(0, gp->regs + MAC_ADDR4);
1906        writel(0, gp->regs + MAC_ADDR5);
1907
1908        writel(0x0001, gp->regs + MAC_ADDR6);
1909        writel(0xc200, gp->regs + MAC_ADDR7);
1910        writel(0x0180, gp->regs + MAC_ADDR8);
1911
1912        writel(0, gp->regs + MAC_AFILT0);
1913        writel(0, gp->regs + MAC_AFILT1);
1914        writel(0, gp->regs + MAC_AFILT2);
1915        writel(0, gp->regs + MAC_AF21MSK);
1916        writel(0, gp->regs + MAC_AF0MSK);
1917
1918        gp->mac_rx_cfg = gem_setup_multicast(gp);
1919#ifdef STRIP_FCS
1920        gp->mac_rx_cfg |= MAC_RXCFG_SFCS;
1921#endif
1922        writel(0, gp->regs + MAC_NCOLL);
1923        writel(0, gp->regs + MAC_FASUCC);
1924        writel(0, gp->regs + MAC_ECOLL);
1925        writel(0, gp->regs + MAC_LCOLL);
1926        writel(0, gp->regs + MAC_DTIMER);
1927        writel(0, gp->regs + MAC_PATMPS);
1928        writel(0, gp->regs + MAC_RFCTR);
1929        writel(0, gp->regs + MAC_LERR);
1930        writel(0, gp->regs + MAC_AERR);
1931        writel(0, gp->regs + MAC_FCSERR);
1932        writel(0, gp->regs + MAC_RXCVERR);
1933
1934        /* Clear RX/TX/MAC/XIF config, we will set these up and enable
1935         * them once a link is established.
1936         */
1937        writel(0, gp->regs + MAC_TXCFG);
1938        writel(gp->mac_rx_cfg, gp->regs + MAC_RXCFG);
1939        writel(0, gp->regs + MAC_MCCFG);
1940        writel(0, gp->regs + MAC_XIFCFG);
1941
1942        /* Setup MAC interrupts.  We want to get all of the interesting
1943         * counter expiration events, but we do not want to hear about
1944         * normal rx/tx as the DMA engine tells us that.
1945         */
1946        writel(MAC_TXSTAT_XMIT, gp->regs + MAC_TXMASK);
1947        writel(MAC_RXSTAT_RCV, gp->regs + MAC_RXMASK);
1948
1949        /* Don't enable even the PAUSE interrupts for now, we
1950         * make no use of those events other than to record them.
1951         */
1952        writel(0xffffffff, gp->regs + MAC_MCMASK);
1953
1954        /* Don't enable GEM's WOL in normal operations
1955         */
1956        if (gp->has_wol)
1957                writel(0, gp->regs + WOL_WAKECSR);
1958}
1959
1960/* Must be invoked under gp->lock and gp->tx_lock. */
1961static void gem_init_pause_thresholds(struct gem *gp)
1962{
1963        u32 cfg;
1964
1965        /* Calculate pause thresholds.  Setting the OFF threshold to the
1966         * full RX fifo size effectively disables PAUSE generation which
1967         * is what we do for 10/100 only GEMs which have FIFOs too small
1968         * to make real gains from PAUSE.
1969         */
1970        if (gp->rx_fifo_sz <= (2 * 1024)) {
1971                gp->rx_pause_off = gp->rx_pause_on = gp->rx_fifo_sz;
1972        } else {
1973                int max_frame = (gp->rx_buf_sz + 4 + 64) & ~63;
1974                int off = (gp->rx_fifo_sz - (max_frame * 2));
1975                int on = off - max_frame;
1976
1977                gp->rx_pause_off = off;
1978                gp->rx_pause_on = on;
1979        }
1980
1981
1982        /* Configure the chip "burst" DMA mode & enable some
1983         * HW bug fixes on Apple version
1984         */
1985        cfg  = 0;
1986        if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE)
1987                cfg |= GREG_CFG_RONPAULBIT | GREG_CFG_ENBUG2FIX;
1988#if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA)
1989        cfg |= GREG_CFG_IBURST;
1990#endif
1991        cfg |= ((31 << 1) & GREG_CFG_TXDMALIM);
1992        cfg |= ((31 << 6) & GREG_CFG_RXDMALIM);
1993        writel(cfg, gp->regs + GREG_CFG);
1994
1995        /* If Infinite Burst didn't stick, then use different
1996         * thresholds (and Apple bug fixes don't exist)
1997         */
1998        if (!(readl(gp->regs + GREG_CFG) & GREG_CFG_IBURST)) {
1999                cfg = ((2 << 1) & GREG_CFG_TXDMALIM);
2000                cfg |= ((8 << 6) & GREG_CFG_RXDMALIM);
2001                writel(cfg, gp->regs + GREG_CFG);
2002        }
2003}
2004
2005static int gem_check_invariants(struct gem *gp)
2006{
2007        struct pci_dev *pdev = gp->pdev;
2008        u32 mif_cfg;
2009
2010        /* On Apple's sungem, we can't rely on registers as the chip
2011         * was been powered down by the firmware. The PHY is looked
2012         * up later on.
2013         */
2014        if (pdev->vendor == PCI_VENDOR_ID_APPLE) {
2015                gp->phy_type = phy_mii_mdio0;
2016                gp->tx_fifo_sz = readl(gp->regs + TXDMA_FSZ) * 64;
2017                gp->rx_fifo_sz = readl(gp->regs + RXDMA_FSZ) * 64;
2018                gp->swrst_base = 0;
2019
2020                mif_cfg = readl(gp->regs + MIF_CFG);
2021                mif_cfg &= ~(MIF_CFG_PSELECT|MIF_CFG_POLL|MIF_CFG_BBMODE|MIF_CFG_MDI1);
2022                mif_cfg |= MIF_CFG_MDI0;
2023                writel(mif_cfg, gp->regs + MIF_CFG);
2024                writel(PCS_DMODE_MGM, gp->regs + PCS_DMODE);
2025                writel(MAC_XIFCFG_OE, gp->regs + MAC_XIFCFG);
2026
2027                /* We hard-code the PHY address so we can properly bring it out of
2028                 * reset later on, we can't really probe it at this point, though
2029                 * that isn't an issue.
2030                 */
2031                if (gp->pdev->device == PCI_DEVICE_ID_APPLE_K2_GMAC)
2032                        gp->mii_phy_addr = 1;
2033                else
2034                        gp->mii_phy_addr = 0;
2035
2036                return 0;
2037        }
2038
2039        mif_cfg = readl(gp->regs + MIF_CFG);
2040
2041        if (pdev->vendor == PCI_VENDOR_ID_SUN &&
2042            pdev->device == PCI_DEVICE_ID_SUN_RIO_GEM) {
2043                /* One of the MII PHYs _must_ be present
2044                 * as this chip has no gigabit PHY.
2045                 */
2046                if ((mif_cfg & (MIF_CFG_MDI0 | MIF_CFG_MDI1)) == 0) {
2047                        printk(KERN_ERR PFX "RIO GEM lacks MII phy, mif_cfg[%08x]\n",
2048                               mif_cfg);
2049                        return -1;
2050                }
2051        }
2052
2053        /* Determine initial PHY interface type guess.  MDIO1 is the
2054         * external PHY and thus takes precedence over MDIO0.
2055         */
2056
2057        if (mif_cfg & MIF_CFG_MDI1) {
2058                gp->phy_type = phy_mii_mdio1;
2059                mif_cfg |= MIF_CFG_PSELECT;
2060                writel(mif_cfg, gp->regs + MIF_CFG);
2061        } else if (mif_cfg & MIF_CFG_MDI0) {
2062                gp->phy_type = phy_mii_mdio0;
2063                mif_cfg &= ~MIF_CFG_PSELECT;
2064                writel(mif_cfg, gp->regs + MIF_CFG);
2065        } else {
2066#ifdef CONFIG_SPARC
2067                const char *p;
2068
2069                p = of_get_property(gp->of_node, "shared-pins", NULL);
2070                if (p && !strcmp(p, "serdes"))
2071                        gp->phy_type = phy_serdes;
2072                else
2073#endif
2074                        gp->phy_type = phy_serialink;
2075        }
2076        if (gp->phy_type == phy_mii_mdio1 ||
2077            gp->phy_type == phy_mii_mdio0) {
2078                int i;
2079
2080                for (i = 0; i < 32; i++) {
2081                        gp->mii_phy_addr = i;
2082                        if (phy_read(gp, MII_BMCR) != 0xffff)
2083                                break;
2084                }
2085                if (i == 32) {
2086                        if (pdev->device != PCI_DEVICE_ID_SUN_GEM) {
2087                                printk(KERN_ERR PFX "RIO MII phy will not respond.\n");
2088                                return -1;
2089                        }
2090                        gp->phy_type = phy_serdes;
2091                }
2092        }
2093
2094        /* Fetch the FIFO configurations now too. */
2095        gp->tx_fifo_sz = readl(gp->regs + TXDMA_FSZ) * 64;
2096        gp->rx_fifo_sz = readl(gp->regs + RXDMA_FSZ) * 64;
2097
2098        if (pdev->vendor == PCI_VENDOR_ID_SUN) {
2099                if (pdev->device == PCI_DEVICE_ID_SUN_GEM) {
2100                        if (gp->tx_fifo_sz != (9 * 1024) ||
2101                            gp->rx_fifo_sz != (20 * 1024)) {
2102                                printk(KERN_ERR PFX "GEM has bogus fifo sizes tx(%d) rx(%d)\n",
2103                                       gp->tx_fifo_sz, gp->rx_fifo_sz);
2104                                return -1;
2105                        }
2106                        gp->swrst_base = 0;
2107                } else {
2108                        if (gp->tx_fifo_sz != (2 * 1024) ||
2109                            gp->rx_fifo_sz != (2 * 1024)) {
2110                                printk(KERN_ERR PFX "RIO GEM has bogus fifo sizes tx(%d) rx(%d)\n",
2111                                       gp->tx_fifo_sz, gp->rx_fifo_sz);
2112                                return -1;
2113                        }
2114                        gp->swrst_base = (64 / 4) << GREG_SWRST_CACHE_SHIFT;
2115                }
2116        }
2117
2118        return 0;
2119}
2120
2121/* Must be invoked under gp->lock and gp->tx_lock. */
2122static void gem_reinit_chip(struct gem *gp)
2123{
2124        /* Reset the chip */
2125        gem_reset(gp);
2126
2127        /* Make sure ints are disabled */
2128        gem_disable_ints(gp);
2129
2130        /* Allocate & setup ring buffers */
2131        gem_init_rings(gp);
2132
2133        /* Configure pause thresholds */
2134        gem_init_pause_thresholds(gp);
2135
2136        /* Init DMA & MAC engines */
2137        gem_init_dma(gp);
2138        gem_init_mac(gp);
2139}
2140
2141
2142/* Must be invoked with no lock held. */
2143static void gem_stop_phy(struct gem *gp, int wol)
2144{
2145        u32 mifcfg;
2146        unsigned long flags;
2147
2148        /* Let the chip settle down a bit, it seems that helps
2149         * for sleep mode on some models
2150         */
2151        msleep(10);
2152
2153        /* Make sure we aren't polling PHY status change. We
2154         * don't currently use that feature though
2155         */
2156        mifcfg = readl(gp->regs + MIF_CFG);
2157        mifcfg &= ~MIF_CFG_POLL;
2158        writel(mifcfg, gp->regs + MIF_CFG);
2159
2160        if (wol && gp->has_wol) {
2161                unsigned char *e = &gp->dev->dev_addr[0];
2162                u32 csr;
2163
2164                /* Setup wake-on-lan for MAGIC packet */
2165                writel(MAC_RXCFG_HFE | MAC_RXCFG_SFCS | MAC_RXCFG_ENAB,
2166                       gp->regs + MAC_RXCFG);
2167                writel((e[4] << 8) | e[5], gp->regs + WOL_MATCH0);
2168                writel((e[2] << 8) | e[3], gp->regs + WOL_MATCH1);
2169                writel((e[0] << 8) | e[1], gp->regs + WOL_MATCH2);
2170
2171                writel(WOL_MCOUNT_N | WOL_MCOUNT_M, gp->regs + WOL_MCOUNT);
2172                csr = WOL_WAKECSR_ENABLE;
2173                if ((readl(gp->regs + MAC_XIFCFG) & MAC_XIFCFG_GMII) == 0)
2174                        csr |= WOL_WAKECSR_MII;
2175                writel(csr, gp->regs + WOL_WAKECSR);
2176        } else {
2177                writel(0, gp->regs + MAC_RXCFG);
2178                (void)readl(gp->regs + MAC_RXCFG);
2179                /* Machine sleep will die in strange ways if we
2180                 * dont wait a bit here, looks like the chip takes
2181                 * some time to really shut down
2182                 */
2183                msleep(10);
2184        }
2185
2186        writel(0, gp->regs + MAC_TXCFG);
2187        writel(0, gp->regs + MAC_XIFCFG);
2188        writel(0, gp->regs + TXDMA_CFG);
2189        writel(0, gp->regs + RXDMA_CFG);
2190
2191        if (!wol) {
2192                spin_lock_irqsave(&gp->lock, flags);
2193                spin_lock(&gp->tx_lock);
2194                gem_reset(gp);
2195                writel(MAC_TXRST_CMD, gp->regs + MAC_TXRST);
2196                writel(MAC_RXRST_CMD, gp->regs + MAC_RXRST);
2197                spin_unlock(&gp->tx_lock);
2198                spin_unlock_irqrestore(&gp->lock, flags);
2199
2200                /* No need to take the lock here */
2201
2202                if (found_mii_phy(gp) && gp->phy_mii.def->ops->suspend)
2203                        gp->phy_mii.def->ops->suspend(&gp->phy_mii);
2204
2205                /* According to Apple, we must set the MDIO pins to this begnign
2206                 * state or we may 1) eat more current, 2) damage some PHYs
2207                 */
2208                writel(mifcfg | MIF_CFG_BBMODE, gp->regs + MIF_CFG);
2209                writel(0, gp->regs + MIF_BBCLK);
2210                writel(0, gp->regs + MIF_BBDATA);
2211                writel(0, gp->regs + MIF_BBOENAB);
2212                writel(MAC_XIFCFG_GMII | MAC_XIFCFG_LBCK, gp->regs + MAC_XIFCFG);
2213                (void) readl(gp->regs + MAC_XIFCFG);
2214        }
2215}
2216
2217
2218static int gem_do_start(struct net_device *dev)
2219{
2220        struct gem *gp = netdev_priv(dev);
2221        unsigned long flags;
2222
2223        spin_lock_irqsave(&gp->lock, flags);
2224        spin_lock(&gp->tx_lock);
2225
2226        /* Enable the cell */
2227        gem_get_cell(gp);
2228
2229        /* Init & setup chip hardware */
2230        gem_reinit_chip(gp);
2231
2232        gp->running = 1;
2233
2234        napi_enable(&gp->napi);
2235
2236        if (gp->lstate == link_up) {
2237                netif_carrier_on(gp->dev);
2238                gem_set_link_modes(gp);
2239        }
2240
2241        netif_wake_queue(gp->dev);
2242
2243        spin_unlock(&gp->tx_lock);
2244        spin_unlock_irqrestore(&gp->lock, flags);
2245
2246        if (request_irq(gp->pdev->irq, gem_interrupt,
2247                                   IRQF_SHARED, dev->name, (void *)dev)) {
2248                printk(KERN_ERR "%s: failed to request irq !\n", gp->dev->name);
2249
2250                spin_lock_irqsave(&gp->lock, flags);
2251                spin_lock(&gp->tx_lock);
2252
2253                napi_disable(&gp->napi);
2254
2255                gp->running =  0;
2256                gem_reset(gp);
2257                gem_clean_rings(gp);
2258                gem_put_cell(gp);
2259
2260                spin_unlock(&gp->tx_lock);
2261                spin_unlock_irqrestore(&gp->lock, flags);
2262
2263                return -EAGAIN;
2264        }
2265
2266        return 0;
2267}
2268
2269static void gem_do_stop(struct net_device *dev, int wol)
2270{
2271        struct gem *gp = netdev_priv(dev);
2272        unsigned long flags;
2273
2274        spin_lock_irqsave(&gp->lock, flags);
2275        spin_lock(&gp->tx_lock);
2276
2277        gp->running = 0;
2278
2279        /* Stop netif queue */
2280        netif_stop_queue(dev);
2281
2282        /* Make sure ints are disabled */
2283        gem_disable_ints(gp);
2284
2285        /* We can drop the lock now */
2286        spin_unlock(&gp->tx_lock);
2287        spin_unlock_irqrestore(&gp->lock, flags);
2288
2289        /* If we are going to sleep with WOL */
2290        gem_stop_dma(gp);
2291        msleep(10);
2292        if (!wol)
2293                gem_reset(gp);
2294        msleep(10);
2295
2296        /* Get rid of rings */
2297        gem_clean_rings(gp);
2298
2299        /* No irq needed anymore */
2300        free_irq(gp->pdev->irq, (void *) dev);
2301
2302        /* Cell not needed neither if no WOL */
2303        if (!wol) {
2304                spin_lock_irqsave(&gp->lock, flags);
2305                gem_put_cell(gp);
2306                spin_unlock_irqrestore(&gp->lock, flags);
2307        }
2308}
2309
2310static void gem_reset_task(struct work_struct *work)
2311{
2312        struct gem *gp = container_of(work, struct gem, reset_task);
2313
2314        mutex_lock(&gp->pm_mutex);
2315
2316        if (gp->opened)
2317                napi_disable(&gp->napi);
2318
2319        spin_lock_irq(&gp->lock);
2320        spin_lock(&gp->tx_lock);
2321
2322        if (gp->running) {
2323                netif_stop_queue(gp->dev);
2324
2325                /* Reset the chip & rings */
2326                gem_reinit_chip(gp);
2327                if (gp->lstate == link_up)
2328                        gem_set_link_modes(gp);
2329                netif_wake_queue(gp->dev);
2330        }
2331
2332        gp->reset_task_pending = 0;
2333
2334        spin_unlock(&gp->tx_lock);
2335        spin_unlock_irq(&gp->lock);
2336
2337        if (gp->opened)
2338                napi_enable(&gp->napi);
2339
2340        mutex_unlock(&gp->pm_mutex);
2341}
2342
2343
2344static int gem_open(struct net_device *dev)
2345{
2346        struct gem *gp = netdev_priv(dev);
2347        int rc = 0;
2348
2349        mutex_lock(&gp->pm_mutex);
2350
2351        /* We need the cell enabled */
2352        if (!gp->asleep)
2353                rc = gem_do_start(dev);
2354        gp->opened = (rc == 0);
2355
2356        mutex_unlock(&gp->pm_mutex);
2357
2358        return rc;
2359}
2360
2361static int gem_close(struct net_device *dev)
2362{
2363        struct gem *gp = netdev_priv(dev);
2364
2365        mutex_lock(&gp->pm_mutex);
2366
2367        napi_disable(&gp->napi);
2368
2369        gp->opened = 0;
2370        if (!gp->asleep)
2371                gem_do_stop(dev, 0);
2372
2373        mutex_unlock(&gp->pm_mutex);
2374
2375        return 0;
2376}
2377
2378#ifdef CONFIG_PM
2379static int gem_suspend(struct pci_dev *pdev, pm_message_t state)
2380{
2381        struct net_device *dev = pci_get_drvdata(pdev);
2382        struct gem *gp = netdev_priv(dev);
2383        unsigned long flags;
2384
2385        mutex_lock(&gp->pm_mutex);
2386
2387        printk(KERN_INFO "%s: suspending, WakeOnLan %s\n",
2388               dev->name,
2389               (gp->wake_on_lan && gp->opened) ? "enabled" : "disabled");
2390
2391        /* Keep the cell enabled during the entire operation */
2392        spin_lock_irqsave(&gp->lock, flags);
2393        spin_lock(&gp->tx_lock);
2394        gem_get_cell(gp);
2395        spin_unlock(&gp->tx_lock);
2396        spin_unlock_irqrestore(&gp->lock, flags);
2397
2398        /* If the driver is opened, we stop the MAC */
2399        if (gp->opened) {
2400                napi_disable(&gp->napi);
2401
2402                /* Stop traffic, mark us closed */
2403                netif_device_detach(dev);
2404
2405                /* Switch off MAC, remember WOL setting */
2406                gp->asleep_wol = gp->wake_on_lan;
2407                gem_do_stop(dev, gp->asleep_wol);
2408        } else
2409                gp->asleep_wol = 0;
2410
2411        /* Mark us asleep */
2412        gp->asleep = 1;
2413        wmb();
2414
2415        /* Stop the link timer */
2416        del_timer_sync(&gp->link_timer);
2417
2418        /* Now we release the mutex to not block the reset task who
2419         * can take it too. We are marked asleep, so there will be no
2420         * conflict here
2421         */
2422        mutex_unlock(&gp->pm_mutex);
2423
2424        /* Wait for a pending reset task to complete */
2425        while (gp->reset_task_pending)
2426                yield();
2427        flush_scheduled_work();
2428
2429        /* Shut the PHY down eventually and setup WOL */
2430        gem_stop_phy(gp, gp->asleep_wol);
2431
2432        /* Make sure bus master is disabled */
2433        pci_disable_device(gp->pdev);
2434
2435        /* Release the cell, no need to take a lock at this point since
2436         * nothing else can happen now
2437         */
2438        gem_put_cell(gp);
2439
2440        return 0;
2441}
2442
2443static int gem_resume(struct pci_dev *pdev)
2444{
2445        struct net_device *dev = pci_get_drvdata(pdev);
2446        struct gem *gp = netdev_priv(dev);
2447        unsigned long flags;
2448
2449        printk(KERN_INFO "%s: resuming\n", dev->name);
2450
2451        mutex_lock(&gp->pm_mutex);
2452
2453        /* Keep the cell enabled during the entire operation, no need to
2454         * take a lock here tho since nothing else can happen while we are
2455         * marked asleep
2456         */
2457        gem_get_cell(gp);
2458
2459        /* Make sure PCI access and bus master are enabled */
2460        if (pci_enable_device(gp->pdev)) {
2461                printk(KERN_ERR "%s: Can't re-enable chip !\n",
2462                       dev->name);
2463                /* Put cell and forget it for now, it will be considered as
2464                 * still asleep, a new sleep cycle may bring it back
2465                 */
2466                gem_put_cell(gp);
2467                mutex_unlock(&gp->pm_mutex);
2468                return 0;
2469        }
2470        pci_set_master(gp->pdev);
2471
2472        /* Reset everything */
2473        gem_reset(gp);
2474
2475        /* Mark us woken up */
2476        gp->asleep = 0;
2477        wmb();
2478
2479        /* Bring the PHY back. Again, lock is useless at this point as
2480         * nothing can be happening until we restart the whole thing
2481         */
2482        gem_init_phy(gp);
2483
2484        /* If we were opened, bring everything back */
2485        if (gp->opened) {
2486                /* Restart MAC */
2487                gem_do_start(dev);
2488
2489                /* Re-attach net device */
2490                netif_device_attach(dev);
2491        }
2492
2493        spin_lock_irqsave(&gp->lock, flags);
2494        spin_lock(&gp->tx_lock);
2495
2496        /* If we had WOL enabled, the cell clock was never turned off during
2497         * sleep, so we end up beeing unbalanced. Fix that here
2498         */
2499        if (gp->asleep_wol)
2500                gem_put_cell(gp);
2501
2502        /* This function doesn't need to hold the cell, it will be held if the
2503         * driver is open by gem_do_start().
2504         */
2505        gem_put_cell(gp);
2506
2507        spin_unlock(&gp->tx_lock);
2508        spin_unlock_irqrestore(&gp->lock, flags);
2509
2510        mutex_unlock(&gp->pm_mutex);
2511
2512        return 0;
2513}
2514#endif /* CONFIG_PM */
2515
2516static struct net_device_stats *gem_get_stats(struct net_device *dev)
2517{
2518        struct gem *gp = netdev_priv(dev);
2519        struct net_device_stats *stats = &gp->net_stats;
2520
2521        spin_lock_irq(&gp->lock);
2522        spin_lock(&gp->tx_lock);
2523
2524        /* I have seen this being called while the PM was in progress,
2525         * so we shield against this
2526         */
2527        if (gp->running) {
2528                stats->rx_crc_errors += readl(gp->regs + MAC_FCSERR);
2529                writel(0, gp->regs + MAC_FCSERR);
2530
2531                stats->rx_frame_errors += readl(gp->regs + MAC_AERR);
2532                writel(0, gp->regs + MAC_AERR);
2533
2534                stats->rx_length_errors += readl(gp->regs + MAC_LERR);
2535                writel(0, gp->regs + MAC_LERR);
2536
2537                stats->tx_aborted_errors += readl(gp->regs + MAC_ECOLL);
2538                stats->collisions +=
2539                        (readl(gp->regs + MAC_ECOLL) +
2540                         readl(gp->regs + MAC_LCOLL));
2541                writel(0, gp->regs + MAC_ECOLL);
2542                writel(0, gp->regs + MAC_LCOLL);
2543        }
2544
2545        spin_unlock(&gp->tx_lock);
2546        spin_unlock_irq(&gp->lock);
2547
2548        return &gp->net_stats;
2549}
2550
2551static int gem_set_mac_address(struct net_device *dev, void *addr)
2552{
2553        struct sockaddr *macaddr = (struct sockaddr *) addr;
2554        struct gem *gp = netdev_priv(dev);
2555        unsigned char *e = &dev->dev_addr[0];
2556
2557        if (!is_valid_ether_addr(macaddr->sa_data))
2558                return -EADDRNOTAVAIL;
2559
2560        if (!netif_running(dev) || !netif_device_present(dev)) {
2561                /* We'll just catch it later when the
2562                 * device is up'd or resumed.
2563                 */
2564                memcpy(dev->dev_addr, macaddr->sa_data, dev->addr_len);
2565                return 0;
2566        }
2567
2568        mutex_lock(&gp->pm_mutex);
2569        memcpy(dev->dev_addr, macaddr->sa_data, dev->addr_len);
2570        if (gp->running) {
2571                writel((e[4] << 8) | e[5], gp->regs + MAC_ADDR0);
2572                writel((e[2] << 8) | e[3], gp->regs + MAC_ADDR1);
2573                writel((e[0] << 8) | e[1], gp->regs + MAC_ADDR2);
2574        }
2575        mutex_unlock(&gp->pm_mutex);
2576
2577        return 0;
2578}
2579
2580static void gem_set_multicast(struct net_device *dev)
2581{
2582        struct gem *gp = netdev_priv(dev);
2583        u32 rxcfg, rxcfg_new;
2584        int limit = 10000;
2585
2586
2587        spin_lock_irq(&gp->lock);
2588        spin_lock(&gp->tx_lock);
2589
2590        if (!gp->running)
2591                goto bail;
2592
2593        netif_stop_queue(dev);
2594
2595        rxcfg = readl(gp->regs + MAC_RXCFG);
2596        rxcfg_new = gem_setup_multicast(gp);
2597#ifdef STRIP_FCS
2598        rxcfg_new |= MAC_RXCFG_SFCS;
2599#endif
2600        gp->mac_rx_cfg = rxcfg_new;
2601
2602        writel(rxcfg & ~MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG);
2603        while (readl(gp->regs + MAC_RXCFG) & MAC_RXCFG_ENAB) {
2604                if (!limit--)
2605                        break;
2606                udelay(10);
2607        }
2608
2609        rxcfg &= ~(MAC_RXCFG_PROM | MAC_RXCFG_HFE);
2610        rxcfg |= rxcfg_new;
2611
2612        writel(rxcfg, gp->regs + MAC_RXCFG);
2613
2614        netif_wake_queue(dev);
2615
2616 bail:
2617        spin_unlock(&gp->tx_lock);
2618        spin_unlock_irq(&gp->lock);
2619}
2620
2621/* Jumbo-grams don't seem to work :-( */
2622#define GEM_MIN_MTU     68
2623#if 1
2624#define GEM_MAX_MTU     1500
2625#else
2626#define GEM_MAX_MTU     9000
2627#endif
2628
2629static int gem_change_mtu(struct net_device *dev, int new_mtu)
2630{
2631        struct gem *gp = netdev_priv(dev);
2632
2633        if (new_mtu < GEM_MIN_MTU || new_mtu > GEM_MAX_MTU)
2634                return -EINVAL;
2635
2636        if (!netif_running(dev) || !netif_device_present(dev)) {
2637                /* We'll just catch it later when the
2638                 * device is up'd or resumed.
2639                 */
2640                dev->mtu = new_mtu;
2641                return 0;
2642        }
2643
2644        mutex_lock(&gp->pm_mutex);
2645        spin_lock_irq(&gp->lock);
2646        spin_lock(&gp->tx_lock);
2647        dev->mtu = new_mtu;
2648        if (gp->running) {
2649                gem_reinit_chip(gp);
2650                if (gp->lstate == link_up)
2651                        gem_set_link_modes(gp);
2652        }
2653        spin_unlock(&gp->tx_lock);
2654        spin_unlock_irq(&gp->lock);
2655        mutex_unlock(&gp->pm_mutex);
2656
2657        return 0;
2658}
2659
2660static void gem_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2661{
2662        struct gem *gp = netdev_priv(dev);
2663
2664        strcpy(info->driver, DRV_NAME);
2665        strcpy(info->version, DRV_VERSION);
2666        strcpy(info->bus_info, pci_name(gp->pdev));
2667}
2668
2669static int gem_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2670{
2671        struct gem *gp = netdev_priv(dev);
2672
2673        if (gp->phy_type == phy_mii_mdio0 ||
2674            gp->phy_type == phy_mii_mdio1) {
2675                if (gp->phy_mii.def)
2676                        cmd->supported = gp->phy_mii.def->features;
2677                else
2678                        cmd->supported = (SUPPORTED_10baseT_Half |
2679                                          SUPPORTED_10baseT_Full);
2680
2681                /* XXX hardcoded stuff for now */
2682                cmd->port = PORT_MII;
2683                cmd->transceiver = XCVR_EXTERNAL;
2684                cmd->phy_address = 0; /* XXX fixed PHYAD */
2685
2686                /* Return current PHY settings */
2687                spin_lock_irq(&gp->lock);
2688                cmd->autoneg = gp->want_autoneg;
2689                cmd->speed = gp->phy_mii.speed;
2690                cmd->duplex = gp->phy_mii.duplex;
2691                cmd->advertising = gp->phy_mii.advertising;
2692
2693                /* If we started with a forced mode, we don't have a default
2694                 * advertise set, we need to return something sensible so
2695                 * userland can re-enable autoneg properly.
2696                 */
2697                if (cmd->advertising == 0)
2698                        cmd->advertising = cmd->supported;
2699                spin_unlock_irq(&gp->lock);
2700        } else { // XXX PCS ?
2701                cmd->supported =
2702                        (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
2703                         SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
2704                         SUPPORTED_Autoneg);
2705                cmd->advertising = cmd->supported;
2706                cmd->speed = 0;
2707                cmd->duplex = cmd->port = cmd->phy_address =
2708                        cmd->transceiver = cmd->autoneg = 0;
2709
2710                /* serdes means usually a Fibre connector, with most fixed */
2711                if (gp->phy_type == phy_serdes) {
2712                        cmd->port = PORT_FIBRE;
2713                        cmd->supported = (SUPPORTED_1000baseT_Half |
2714                                SUPPORTED_1000baseT_Full |
2715                                SUPPORTED_FIBRE | SUPPORTED_Autoneg |
2716                                SUPPORTED_Pause | SUPPORTED_Asym_Pause);
2717                        cmd->advertising = cmd->supported;
2718                        cmd->transceiver = XCVR_INTERNAL;
2719                        if (gp->lstate == link_up)
2720                                cmd->speed = SPEED_1000;
2721                        cmd->duplex = DUPLEX_FULL;
2722                        cmd->autoneg = 1;
2723                }
2724        }
2725        cmd->maxtxpkt = cmd->maxrxpkt = 0;
2726
2727        return 0;
2728}
2729
2730static int gem_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2731{
2732        struct gem *gp = netdev_priv(dev);
2733
2734        /* Verify the settings we care about. */
2735        if (cmd->autoneg != AUTONEG_ENABLE &&
2736            cmd->autoneg != AUTONEG_DISABLE)
2737                return -EINVAL;
2738
2739        if (cmd->autoneg == AUTONEG_ENABLE &&
2740            cmd->advertising == 0)
2741                return -EINVAL;
2742
2743        if (cmd->autoneg == AUTONEG_DISABLE &&
2744            ((cmd->speed != SPEED_1000 &&
2745              cmd->speed != SPEED_100 &&
2746              cmd->speed != SPEED_10) ||
2747             (cmd->duplex != DUPLEX_HALF &&
2748              cmd->duplex != DUPLEX_FULL)))
2749                return -EINVAL;
2750
2751        /* Apply settings and restart link process. */
2752        spin_lock_irq(&gp->lock);
2753        gem_get_cell(gp);
2754        gem_begin_auto_negotiation(gp, cmd);
2755        gem_put_cell(gp);
2756        spin_unlock_irq(&gp->lock);
2757
2758        return 0;
2759}
2760
2761static int gem_nway_reset(struct net_device *dev)
2762{
2763        struct gem *gp = netdev_priv(dev);
2764
2765        if (!gp->want_autoneg)
2766                return -EINVAL;
2767
2768        /* Restart link process. */
2769        spin_lock_irq(&gp->lock);
2770        gem_get_cell(gp);
2771        gem_begin_auto_negotiation(gp, NULL);
2772        gem_put_cell(gp);
2773        spin_unlock_irq(&gp->lock);
2774
2775        return 0;
2776}
2777
2778static u32 gem_get_msglevel(struct net_device *dev)
2779{
2780        struct gem *gp = netdev_priv(dev);
2781        return gp->msg_enable;
2782}
2783
2784static void gem_set_msglevel(struct net_device *dev, u32 value)
2785{
2786        struct gem *gp = netdev_priv(dev);
2787        gp->msg_enable = value;
2788}
2789
2790
2791/* Add more when I understand how to program the chip */
2792/* like WAKE_UCAST | WAKE_MCAST | WAKE_BCAST */
2793
2794#define WOL_SUPPORTED_MASK      (WAKE_MAGIC)
2795
2796static void gem_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2797{
2798        struct gem *gp = netdev_priv(dev);
2799
2800        /* Add more when I understand how to program the chip */
2801        if (gp->has_wol) {
2802                wol->supported = WOL_SUPPORTED_MASK;
2803                wol->wolopts = gp->wake_on_lan;
2804        } else {
2805                wol->supported = 0;
2806                wol->wolopts = 0;
2807        }
2808}
2809
2810static int gem_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2811{
2812        struct gem *gp = netdev_priv(dev);
2813
2814        if (!gp->has_wol)
2815                return -EOPNOTSUPP;
2816        gp->wake_on_lan = wol->wolopts & WOL_SUPPORTED_MASK;
2817        return 0;
2818}
2819
2820static const struct ethtool_ops gem_ethtool_ops = {
2821        .get_drvinfo            = gem_get_drvinfo,
2822        .get_link               = ethtool_op_get_link,
2823        .get_settings           = gem_get_settings,
2824        .set_settings           = gem_set_settings,
2825        .nway_reset             = gem_nway_reset,
2826        .get_msglevel           = gem_get_msglevel,
2827        .set_msglevel           = gem_set_msglevel,
2828        .get_wol                = gem_get_wol,
2829        .set_wol                = gem_set_wol,
2830};
2831
2832static int gem_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2833{
2834        struct gem *gp = netdev_priv(dev);
2835        struct mii_ioctl_data *data = if_mii(ifr);
2836        int rc = -EOPNOTSUPP;
2837        unsigned long flags;
2838
2839        /* Hold the PM mutex while doing ioctl's or we may collide
2840         * with power management.
2841         */
2842        mutex_lock(&gp->pm_mutex);
2843
2844        spin_lock_irqsave(&gp->lock, flags);
2845        gem_get_cell(gp);
2846        spin_unlock_irqrestore(&gp->lock, flags);
2847
2848        switch (cmd) {
2849        case SIOCGMIIPHY:               /* Get address of MII PHY in use. */
2850                data->phy_id = gp->mii_phy_addr;
2851                /* Fallthrough... */
2852
2853        case SIOCGMIIREG:               /* Read MII PHY register. */
2854                if (!gp->running)
2855                        rc = -EAGAIN;
2856                else {
2857                        data->val_out = __phy_read(gp, data->phy_id & 0x1f,
2858                                                   data->reg_num & 0x1f);
2859                        rc = 0;
2860                }
2861                break;
2862
2863        case SIOCSMIIREG:               /* Write MII PHY register. */
2864                if (!gp->running)
2865                        rc = -EAGAIN;
2866                else {
2867                        __phy_write(gp, data->phy_id & 0x1f, data->reg_num & 0x1f,
2868                                    data->val_in);
2869                        rc = 0;
2870                }
2871                break;
2872        };
2873
2874        spin_lock_irqsave(&gp->lock, flags);
2875        gem_put_cell(gp);
2876        spin_unlock_irqrestore(&gp->lock, flags);
2877
2878        mutex_unlock(&gp->pm_mutex);
2879
2880        return rc;
2881}
2882
2883#if (!defined(CONFIG_SPARC) && !defined(CONFIG_PPC_PMAC))
2884/* Fetch MAC address from vital product data of PCI ROM. */
2885static int find_eth_addr_in_vpd(void __iomem *rom_base, int len, unsigned char *dev_addr)
2886{
2887        int this_offset;
2888
2889        for (this_offset = 0x20; this_offset < len; this_offset++) {
2890                void __iomem *p = rom_base + this_offset;
2891                int i;
2892
2893                if (readb(p + 0) != 0x90 ||
2894                    readb(p + 1) != 0x00 ||
2895                    readb(p + 2) != 0x09 ||
2896                    readb(p + 3) != 0x4e ||
2897                    readb(p + 4) != 0x41 ||
2898                    readb(p + 5) != 0x06)
2899                        continue;
2900
2901                this_offset += 6;
2902                p += 6;
2903
2904                for (i = 0; i < 6; i++)
2905                        dev_addr[i] = readb(p + i);
2906                return 1;
2907        }
2908        return 0;
2909}
2910
2911static void get_gem_mac_nonobp(struct pci_dev *pdev, unsigned char *dev_addr)
2912{
2913        size_t size;
2914        void __iomem *p = pci_map_rom(pdev, &size);
2915
2916        if (p) {
2917                        int found;
2918
2919                found = readb(p) == 0x55 &&
2920                        readb(p + 1) == 0xaa &&
2921                        find_eth_addr_in_vpd(p, (64 * 1024), dev_addr);
2922                pci_unmap_rom(pdev, p);
2923                if (found)
2924                        return;
2925        }
2926
2927        /* Sun MAC prefix then 3 random bytes. */
2928        dev_addr[0] = 0x08;
2929        dev_addr[1] = 0x00;
2930        dev_addr[2] = 0x20;
2931        get_random_bytes(dev_addr + 3, 3);
2932        return;
2933}
2934#endif /* not Sparc and not PPC */
2935
2936static int __devinit gem_get_device_address(struct gem *gp)
2937{
2938#if defined(CONFIG_SPARC) || defined(CONFIG_PPC_PMAC)
2939        struct net_device *dev = gp->dev;
2940        const unsigned char *addr;
2941
2942        addr = of_get_property(gp->of_node, "local-mac-address", NULL);
2943        if (addr == NULL) {
2944#ifdef CONFIG_SPARC
2945                addr = idprom->id_ethaddr;
2946#else
2947                printk("\n");
2948                printk(KERN_ERR "%s: can't get mac-address\n", dev->name);
2949                return -1;
2950#endif
2951        }
2952        memcpy(dev->dev_addr, addr, 6);
2953#else
2954        get_gem_mac_nonobp(gp->pdev, gp->dev->dev_addr);
2955#endif
2956        return 0;
2957}
2958
2959static void gem_remove_one(struct pci_dev *pdev)
2960{
2961        struct net_device *dev = pci_get_drvdata(pdev);
2962
2963        if (dev) {
2964                struct gem *gp = netdev_priv(dev);
2965
2966                unregister_netdev(dev);
2967
2968                /* Stop the link timer */
2969                del_timer_sync(&gp->link_timer);
2970
2971                /* We shouldn't need any locking here */
2972                gem_get_cell(gp);
2973
2974                /* Wait for a pending reset task to complete */
2975                while (gp->reset_task_pending)
2976                        yield();
2977                flush_scheduled_work();
2978
2979                /* Shut the PHY down */
2980                gem_stop_phy(gp, 0);
2981
2982                gem_put_cell(gp);
2983
2984                /* Make sure bus master is disabled */
2985                pci_disable_device(gp->pdev);
2986
2987                /* Free resources */
2988                pci_free_consistent(pdev,
2989                                    sizeof(struct gem_init_block),
2990                                    gp->init_block,
2991                                    gp->gblock_dvma);
2992                iounmap(gp->regs);
2993                pci_release_regions(pdev);
2994                free_netdev(dev);
2995
2996                pci_set_drvdata(pdev, NULL);
2997        }
2998}
2999
3000static const struct net_device_ops gem_netdev_ops = {
3001        .ndo_open               = gem_open,
3002        .ndo_stop               = gem_close,
3003        .ndo_start_xmit         = gem_start_xmit,
3004        .ndo_get_stats          = gem_get_stats,
3005        .ndo_set_multicast_list = gem_set_multicast,
3006        .ndo_do_ioctl           = gem_ioctl,
3007        .ndo_tx_timeout         = gem_tx_timeout,
3008        .ndo_change_mtu         = gem_change_mtu,
3009        .ndo_validate_addr      = eth_validate_addr,
3010        .ndo_set_mac_address    = gem_set_mac_address,
3011#ifdef CONFIG_NET_POLL_CONTROLLER
3012        .ndo_poll_controller    = gem_poll_controller,
3013#endif
3014};
3015
3016static int __devinit gem_init_one(struct pci_dev *pdev,
3017                                  const struct pci_device_id *ent)
3018{
3019        static int gem_version_printed = 0;
3020        unsigned long gemreg_base, gemreg_len;
3021        struct net_device *dev;
3022        struct gem *gp;
3023        int err, pci_using_dac;
3024
3025        if (gem_version_printed++ == 0)
3026                printk(KERN_INFO "%s", version);
3027
3028        /* Apple gmac note: during probe, the chip is powered up by
3029         * the arch code to allow the code below to work (and to let
3030         * the chip be probed on the config space. It won't stay powered
3031         * up until the interface is brought up however, so we can't rely
3032         * on register configuration done at this point.
3033         */
3034        err = pci_enable_device(pdev);
3035        if (err) {
3036                printk(KERN_ERR PFX "Cannot enable MMIO operation, "
3037                       "aborting.\n");
3038                return err;
3039        }
3040        pci_set_master(pdev);
3041
3042        /* Configure DMA attributes. */
3043
3044        /* All of the GEM documentation states that 64-bit DMA addressing
3045         * is fully supported and should work just fine.  However the
3046         * front end for RIO based GEMs is different and only supports
3047         * 32-bit addressing.
3048         *
3049         * For now we assume the various PPC GEMs are 32-bit only as well.
3050         */
3051        if (pdev->vendor == PCI_VENDOR_ID_SUN &&
3052            pdev->device == PCI_DEVICE_ID_SUN_GEM &&
3053            !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
3054                pci_using_dac = 1;
3055        } else {
3056                err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3057                if (err) {
3058                        printk(KERN_ERR PFX "No usable DMA configuration, "
3059                               "aborting.\n");
3060                        goto err_disable_device;
3061                }
3062                pci_using_dac = 0;
3063        }
3064
3065        gemreg_base = pci_resource_start(pdev, 0);
3066        gemreg_len = pci_resource_len(pdev, 0);
3067
3068        if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0) {
3069                printk(KERN_ERR PFX "Cannot find proper PCI device "
3070                       "base address, aborting.\n");
3071                err = -ENODEV;
3072                goto err_disable_device;
3073        }
3074
3075        dev = alloc_etherdev(sizeof(*gp));
3076        if (!dev) {
3077                printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
3078                err = -ENOMEM;
3079                goto err_disable_device;
3080        }
3081        SET_NETDEV_DEV(dev, &pdev->dev);
3082
3083        gp = netdev_priv(dev);
3084
3085        err = pci_request_regions(pdev, DRV_NAME);
3086        if (err) {
3087                printk(KERN_ERR PFX "Cannot obtain PCI resources, "
3088                       "aborting.\n");
3089                goto err_out_free_netdev;
3090        }
3091
3092        gp->pdev = pdev;
3093        dev->base_addr = (long) pdev;
3094        gp->dev = dev;
3095
3096        gp->msg_enable = DEFAULT_MSG;
3097
3098        spin_lock_init(&gp->lock);
3099        spin_lock_init(&gp->tx_lock);
3100        mutex_init(&gp->pm_mutex);
3101
3102        init_timer(&gp->link_timer);
3103        gp->link_timer.function = gem_link_timer;
3104        gp->link_timer.data = (unsigned long) gp;
3105
3106        INIT_WORK(&gp->reset_task, gem_reset_task);
3107
3108        gp->lstate = link_down;
3109        gp->timer_ticks = 0;
3110        netif_carrier_off(dev);
3111
3112        gp->regs = ioremap(gemreg_base, gemreg_len);
3113        if (!gp->regs) {
3114                printk(KERN_ERR PFX "Cannot map device registers, "
3115                       "aborting.\n");
3116                err = -EIO;
3117                goto err_out_free_res;
3118        }
3119
3120        /* On Apple, we want a reference to the Open Firmware device-tree
3121         * node. We use it for clock control.
3122         */
3123#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_SPARC)
3124        gp->of_node = pci_device_to_OF_node(pdev);
3125#endif
3126
3127        /* Only Apple version supports WOL afaik */
3128        if (pdev->vendor == PCI_VENDOR_ID_APPLE)
3129                gp->has_wol = 1;
3130
3131        /* Make sure cell is enabled */
3132        gem_get_cell(gp);
3133
3134        /* Make sure everything is stopped and in init state */
3135        gem_reset(gp);
3136
3137        /* Fill up the mii_phy structure (even if we won't use it) */
3138        gp->phy_mii.dev = dev;
3139        gp->phy_mii.mdio_read = _phy_read;
3140        gp->phy_mii.mdio_write = _phy_write;
3141#ifdef CONFIG_PPC_PMAC
3142        gp->phy_mii.platform_data = gp->of_node;
3143#endif
3144        /* By default, we start with autoneg */
3145        gp->want_autoneg = 1;
3146
3147        /* Check fifo sizes, PHY type, etc... */
3148        if (gem_check_invariants(gp)) {
3149                err = -ENODEV;
3150                goto err_out_iounmap;
3151        }
3152
3153        /* It is guaranteed that the returned buffer will be at least
3154         * PAGE_SIZE aligned.
3155         */
3156        gp->init_block = (struct gem_init_block *)
3157                pci_alloc_consistent(pdev, sizeof(struct gem_init_block),
3158                                     &gp->gblock_dvma);
3159        if (!gp->init_block) {
3160                printk(KERN_ERR PFX "Cannot allocate init block, "
3161                       "aborting.\n");
3162                err = -ENOMEM;
3163                goto err_out_iounmap;
3164        }
3165
3166        if (gem_get_device_address(gp))
3167                goto err_out_free_consistent;
3168
3169        dev->netdev_ops = &gem_netdev_ops;
3170        netif_napi_add(dev, &gp->napi, gem_poll, 64);
3171        dev->ethtool_ops = &gem_ethtool_ops;
3172        dev->watchdog_timeo = 5 * HZ;
3173        dev->irq = pdev->irq;
3174        dev->dma = 0;
3175
3176        /* Set that now, in case PM kicks in now */
3177        pci_set_drvdata(pdev, dev);
3178
3179        /* Detect & init PHY, start autoneg, we release the cell now
3180         * too, it will be managed by whoever needs it
3181         */
3182        gem_init_phy(gp);
3183
3184        spin_lock_irq(&gp->lock);
3185        gem_put_cell(gp);
3186        spin_unlock_irq(&gp->lock);
3187
3188        /* Register with kernel */
3189        if (register_netdev(dev)) {
3190                printk(KERN_ERR PFX "Cannot register net device, "
3191                       "aborting.\n");
3192                err = -ENOMEM;
3193                goto err_out_free_consistent;
3194        }
3195
3196        printk(KERN_INFO "%s: Sun GEM (PCI) 10/100/1000BaseT Ethernet %pM\n",
3197               dev->name, dev->dev_addr);
3198
3199        if (gp->phy_type == phy_mii_mdio0 ||
3200            gp->phy_type == phy_mii_mdio1)
3201                printk(KERN_INFO "%s: Found %s PHY\n", dev->name,
3202                        gp->phy_mii.def ? gp->phy_mii.def->name : "no");
3203
3204        /* GEM can do it all... */
3205        dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_LLTX;
3206        if (pci_using_dac)
3207                dev->features |= NETIF_F_HIGHDMA;
3208
3209        return 0;
3210
3211err_out_free_consistent:
3212        gem_remove_one(pdev);
3213err_out_iounmap:
3214        gem_put_cell(gp);
3215        iounmap(gp->regs);
3216
3217err_out_free_res:
3218        pci_release_regions(pdev);
3219
3220err_out_free_netdev:
3221        free_netdev(dev);
3222err_disable_device:
3223        pci_disable_device(pdev);
3224        return err;
3225
3226}
3227
3228
3229static struct pci_driver gem_driver = {
3230        .name           = GEM_MODULE_NAME,
3231        .id_table       = gem_pci_tbl,
3232        .probe          = gem_init_one,
3233        .remove         = gem_remove_one,
3234#ifdef CONFIG_PM
3235        .suspend        = gem_suspend,
3236        .resume         = gem_resume,
3237#endif /* CONFIG_PM */
3238};
3239
3240static int __init gem_init(void)
3241{
3242        return pci_register_driver(&gem_driver);
3243}
3244
3245static void __exit gem_cleanup(void)
3246{
3247        pci_unregister_driver(&gem_driver);
3248}
3249
3250module_init(gem_init);
3251module_exit(gem_cleanup);
3252