linux/drivers/net/ibm_newemac/core.c
<<
>>
Prefs
   1/*
   2 * drivers/net/ibm_newemac/core.c
   3 *
   4 * Driver for PowerPC 4xx on-chip ethernet controller.
   5 *
   6 * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
   7 *                <benh@kernel.crashing.org>
   8 *
   9 * Based on the arch/ppc version of the driver:
  10 *
  11 * Copyright (c) 2004, 2005 Zultys Technologies.
  12 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
  13 *
  14 * Based on original work by
  15 *      Matt Porter <mporter@kernel.crashing.org>
  16 *      (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
  17 *      Armin Kuster <akuster@mvista.com>
  18 *      Johnnie Peters <jpeters@mvista.com>
  19 *
  20 * This program is free software; you can redistribute  it and/or modify it
  21 * under  the terms of  the GNU General  Public License as published by the
  22 * Free Software Foundation;  either version 2 of the  License, or (at your
  23 * option) any later version.
  24 *
  25 */
  26
  27#include <linux/module.h>
  28#include <linux/sched.h>
  29#include <linux/string.h>
  30#include <linux/errno.h>
  31#include <linux/delay.h>
  32#include <linux/types.h>
  33#include <linux/pci.h>
  34#include <linux/etherdevice.h>
  35#include <linux/skbuff.h>
  36#include <linux/crc32.h>
  37#include <linux/ethtool.h>
  38#include <linux/mii.h>
  39#include <linux/bitops.h>
  40#include <linux/workqueue.h>
  41#include <linux/of.h>
  42#include <linux/slab.h>
  43
  44#include <asm/processor.h>
  45#include <asm/io.h>
  46#include <asm/dma.h>
  47#include <asm/uaccess.h>
  48#include <asm/dcr.h>
  49#include <asm/dcr-regs.h>
  50
  51#include "core.h"
  52
  53/*
  54 * Lack of dma_unmap_???? calls is intentional.
  55 *
  56 * API-correct usage requires additional support state information to be
  57 * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to
  58 * EMAC design (e.g. TX buffer passed from network stack can be split into
  59 * several BDs, dma_map_single/dma_map_page can be used to map particular BD),
  60 * maintaining such information will add additional overhead.
  61 * Current DMA API implementation for 4xx processors only ensures cache coherency
  62 * and dma_unmap_???? routines are empty and are likely to stay this way.
  63 * I decided to omit dma_unmap_??? calls because I don't want to add additional
  64 * complexity just for the sake of following some abstract API, when it doesn't
  65 * add any real benefit to the driver. I understand that this decision maybe
  66 * controversial, but I really tried to make code API-correct and efficient
  67 * at the same time and didn't come up with code I liked :(.                --ebs
  68 */
  69
  70#define DRV_NAME        "emac"
  71#define DRV_VERSION     "3.54"
  72#define DRV_DESC        "PPC 4xx OCP EMAC driver"
  73
  74MODULE_DESCRIPTION(DRV_DESC);
  75MODULE_AUTHOR
  76    ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");
  77MODULE_LICENSE("GPL");
  78
  79/*
  80 * PPC64 doesn't (yet) have a cacheable_memcpy
  81 */
  82#ifdef CONFIG_PPC64
  83#define cacheable_memcpy(d,s,n) memcpy((d),(s),(n))
  84#endif
  85
  86/* minimum number of free TX descriptors required to wake up TX process */
  87#define EMAC_TX_WAKEUP_THRESH           (NUM_TX_BUFF / 4)
  88
  89/* If packet size is less than this number, we allocate small skb and copy packet
  90 * contents into it instead of just sending original big skb up
  91 */
  92#define EMAC_RX_COPY_THRESH             CONFIG_IBM_NEW_EMAC_RX_COPY_THRESHOLD
  93
  94/* Since multiple EMACs share MDIO lines in various ways, we need
  95 * to avoid re-using the same PHY ID in cases where the arch didn't
  96 * setup precise phy_map entries
  97 *
  98 * XXX This is something that needs to be reworked as we can have multiple
  99 * EMAC "sets" (multiple ASICs containing several EMACs) though we can
 100 * probably require in that case to have explicit PHY IDs in the device-tree
 101 */
 102static u32 busy_phy_map;
 103static DEFINE_MUTEX(emac_phy_map_lock);
 104
 105/* This is the wait queue used to wait on any event related to probe, that
 106 * is discovery of MALs, other EMACs, ZMII/RGMIIs, etc...
 107 */
 108static DECLARE_WAIT_QUEUE_HEAD(emac_probe_wait);
 109
 110/* Having stable interface names is a doomed idea. However, it would be nice
 111 * if we didn't have completely random interface names at boot too :-) It's
 112 * just a matter of making everybody's life easier. Since we are doing
 113 * threaded probing, it's a bit harder though. The base idea here is that
 114 * we make up a list of all emacs in the device-tree before we register the
 115 * driver. Every emac will then wait for the previous one in the list to
 116 * initialize before itself. We should also keep that list ordered by
 117 * cell_index.
 118 * That list is only 4 entries long, meaning that additional EMACs don't
 119 * get ordering guarantees unless EMAC_BOOT_LIST_SIZE is increased.
 120 */
 121
 122#define EMAC_BOOT_LIST_SIZE     4
 123static struct device_node *emac_boot_list[EMAC_BOOT_LIST_SIZE];
 124
 125/* How long should I wait for dependent devices ? */
 126#define EMAC_PROBE_DEP_TIMEOUT  (HZ * 5)
 127
 128/* I don't want to litter system log with timeout errors
 129 * when we have brain-damaged PHY.
 130 */
 131static inline void emac_report_timeout_error(struct emac_instance *dev,
 132                                             const char *error)
 133{
 134        if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX |
 135                                  EMAC_FTR_460EX_PHY_CLK_FIX |
 136                                  EMAC_FTR_440EP_PHY_CLK_FIX))
 137                DBG(dev, "%s" NL, error);
 138        else if (net_ratelimit())
 139                printk(KERN_ERR "%s: %s\n", dev->ofdev->dev.of_node->full_name,
 140                        error);
 141}
 142
 143/* EMAC PHY clock workaround:
 144 * 440EP/440GR has more sane SDR0_MFR register implementation than 440GX,
 145 * which allows controlling each EMAC clock
 146 */
 147static inline void emac_rx_clk_tx(struct emac_instance *dev)
 148{
 149#ifdef CONFIG_PPC_DCR_NATIVE
 150        if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
 151                dcri_clrset(SDR0, SDR0_MFR,
 152                            0, SDR0_MFR_ECS >> dev->cell_index);
 153#endif
 154}
 155
 156static inline void emac_rx_clk_default(struct emac_instance *dev)
 157{
 158#ifdef CONFIG_PPC_DCR_NATIVE
 159        if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
 160                dcri_clrset(SDR0, SDR0_MFR,
 161                            SDR0_MFR_ECS >> dev->cell_index, 0);
 162#endif
 163}
 164
 165/* PHY polling intervals */
 166#define PHY_POLL_LINK_ON        HZ
 167#define PHY_POLL_LINK_OFF       (HZ / 5)
 168
 169/* Graceful stop timeouts in us.
 170 * We should allow up to 1 frame time (full-duplex, ignoring collisions)
 171 */
 172#define STOP_TIMEOUT_10         1230
 173#define STOP_TIMEOUT_100        124
 174#define STOP_TIMEOUT_1000       13
 175#define STOP_TIMEOUT_1000_JUMBO 73
 176
 177static unsigned char default_mcast_addr[] = {
 178        0x01, 0x80, 0xC2, 0x00, 0x00, 0x01
 179};
 180
 181/* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */
 182static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = {
 183        "rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
 184        "tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",
 185        "rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",
 186        "rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",
 187        "rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",
 188        "rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",
 189        "rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",
 190        "rx_bad_packet", "rx_runt_packet", "rx_short_event",
 191        "rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",
 192        "rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",
 193        "tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",
 194        "tx_bd_excessive_collisions", "tx_bd_late_collision",
 195        "tx_bd_multple_collisions", "tx_bd_single_collision",
 196        "tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",
 197        "tx_errors"
 198};
 199
 200static irqreturn_t emac_irq(int irq, void *dev_instance);
 201static void emac_clean_tx_ring(struct emac_instance *dev);
 202static void __emac_set_multicast_list(struct emac_instance *dev);
 203
 204static inline int emac_phy_supports_gige(int phy_mode)
 205{
 206        return  phy_mode == PHY_MODE_GMII ||
 207                phy_mode == PHY_MODE_RGMII ||
 208                phy_mode == PHY_MODE_SGMII ||
 209                phy_mode == PHY_MODE_TBI ||
 210                phy_mode == PHY_MODE_RTBI;
 211}
 212
 213static inline int emac_phy_gpcs(int phy_mode)
 214{
 215        return  phy_mode == PHY_MODE_SGMII ||
 216                phy_mode == PHY_MODE_TBI ||
 217                phy_mode == PHY_MODE_RTBI;
 218}
 219
 220static inline void emac_tx_enable(struct emac_instance *dev)
 221{
 222        struct emac_regs __iomem *p = dev->emacp;
 223        u32 r;
 224
 225        DBG(dev, "tx_enable" NL);
 226
 227        r = in_be32(&p->mr0);
 228        if (!(r & EMAC_MR0_TXE))
 229                out_be32(&p->mr0, r | EMAC_MR0_TXE);
 230}
 231
 232static void emac_tx_disable(struct emac_instance *dev)
 233{
 234        struct emac_regs __iomem *p = dev->emacp;
 235        u32 r;
 236
 237        DBG(dev, "tx_disable" NL);
 238
 239        r = in_be32(&p->mr0);
 240        if (r & EMAC_MR0_TXE) {
 241                int n = dev->stop_timeout;
 242                out_be32(&p->mr0, r & ~EMAC_MR0_TXE);
 243                while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n) {
 244                        udelay(1);
 245                        --n;
 246                }
 247                if (unlikely(!n))
 248                        emac_report_timeout_error(dev, "TX disable timeout");
 249        }
 250}
 251
 252static void emac_rx_enable(struct emac_instance *dev)
 253{
 254        struct emac_regs __iomem *p = dev->emacp;
 255        u32 r;
 256
 257        if (unlikely(test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags)))
 258                goto out;
 259
 260        DBG(dev, "rx_enable" NL);
 261
 262        r = in_be32(&p->mr0);
 263        if (!(r & EMAC_MR0_RXE)) {
 264                if (unlikely(!(r & EMAC_MR0_RXI))) {
 265                        /* Wait if previous async disable is still in progress */
 266                        int n = dev->stop_timeout;
 267                        while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
 268                                udelay(1);
 269                                --n;
 270                        }
 271                        if (unlikely(!n))
 272                                emac_report_timeout_error(dev,
 273                                                          "RX disable timeout");
 274                }
 275                out_be32(&p->mr0, r | EMAC_MR0_RXE);
 276        }
 277 out:
 278        ;
 279}
 280
 281static void emac_rx_disable(struct emac_instance *dev)
 282{
 283        struct emac_regs __iomem *p = dev->emacp;
 284        u32 r;
 285
 286        DBG(dev, "rx_disable" NL);
 287
 288        r = in_be32(&p->mr0);
 289        if (r & EMAC_MR0_RXE) {
 290                int n = dev->stop_timeout;
 291                out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
 292                while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
 293                        udelay(1);
 294                        --n;
 295                }
 296                if (unlikely(!n))
 297                        emac_report_timeout_error(dev, "RX disable timeout");
 298        }
 299}
 300
 301static inline void emac_netif_stop(struct emac_instance *dev)
 302{
 303        netif_tx_lock_bh(dev->ndev);
 304        netif_addr_lock(dev->ndev);
 305        dev->no_mcast = 1;
 306        netif_addr_unlock(dev->ndev);
 307        netif_tx_unlock_bh(dev->ndev);
 308        dev->ndev->trans_start = jiffies;       /* prevent tx timeout */
 309        mal_poll_disable(dev->mal, &dev->commac);
 310        netif_tx_disable(dev->ndev);
 311}
 312
 313static inline void emac_netif_start(struct emac_instance *dev)
 314{
 315        netif_tx_lock_bh(dev->ndev);
 316        netif_addr_lock(dev->ndev);
 317        dev->no_mcast = 0;
 318        if (dev->mcast_pending && netif_running(dev->ndev))
 319                __emac_set_multicast_list(dev);
 320        netif_addr_unlock(dev->ndev);
 321        netif_tx_unlock_bh(dev->ndev);
 322
 323        netif_wake_queue(dev->ndev);
 324
 325        /* NOTE: unconditional netif_wake_queue is only appropriate
 326         * so long as all callers are assured to have free tx slots
 327         * (taken from tg3... though the case where that is wrong is
 328         *  not terribly harmful)
 329         */
 330        mal_poll_enable(dev->mal, &dev->commac);
 331}
 332
 333static inline void emac_rx_disable_async(struct emac_instance *dev)
 334{
 335        struct emac_regs __iomem *p = dev->emacp;
 336        u32 r;
 337
 338        DBG(dev, "rx_disable_async" NL);
 339
 340        r = in_be32(&p->mr0);
 341        if (r & EMAC_MR0_RXE)
 342                out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
 343}
 344
 345static int emac_reset(struct emac_instance *dev)
 346{
 347        struct emac_regs __iomem *p = dev->emacp;
 348        int n = 20;
 349
 350        DBG(dev, "reset" NL);
 351
 352        if (!dev->reset_failed) {
 353                /* 40x erratum suggests stopping RX channel before reset,
 354                 * we stop TX as well
 355                 */
 356                emac_rx_disable(dev);
 357                emac_tx_disable(dev);
 358        }
 359
 360#ifdef CONFIG_PPC_DCR_NATIVE
 361        /* Enable internal clock source */
 362        if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX))
 363                dcri_clrset(SDR0, SDR0_ETH_CFG,
 364                            0, SDR0_ETH_CFG_ECS << dev->cell_index);
 365#endif
 366
 367        out_be32(&p->mr0, EMAC_MR0_SRST);
 368        while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n)
 369                --n;
 370
 371#ifdef CONFIG_PPC_DCR_NATIVE
 372         /* Enable external clock source */
 373        if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX))
 374                dcri_clrset(SDR0, SDR0_ETH_CFG,
 375                            SDR0_ETH_CFG_ECS << dev->cell_index, 0);
 376#endif
 377
 378        if (n) {
 379                dev->reset_failed = 0;
 380                return 0;
 381        } else {
 382                emac_report_timeout_error(dev, "reset timeout");
 383                dev->reset_failed = 1;
 384                return -ETIMEDOUT;
 385        }
 386}
 387
 388static void emac_hash_mc(struct emac_instance *dev)
 389{
 390        const int regs = EMAC_XAHT_REGS(dev);
 391        u32 *gaht_base = emac_gaht_base(dev);
 392        u32 gaht_temp[regs];
 393        struct netdev_hw_addr *ha;
 394        int i;
 395
 396        DBG(dev, "hash_mc %d" NL, netdev_mc_count(dev->ndev));
 397
 398        memset(gaht_temp, 0, sizeof (gaht_temp));
 399
 400        netdev_for_each_mc_addr(ha, dev->ndev) {
 401                int slot, reg, mask;
 402                DBG2(dev, "mc %pM" NL, ha->addr);
 403
 404                slot = EMAC_XAHT_CRC_TO_SLOT(dev,
 405                                             ether_crc(ETH_ALEN, ha->addr));
 406                reg = EMAC_XAHT_SLOT_TO_REG(dev, slot);
 407                mask = EMAC_XAHT_SLOT_TO_MASK(dev, slot);
 408
 409                gaht_temp[reg] |= mask;
 410        }
 411
 412        for (i = 0; i < regs; i++)
 413                out_be32(gaht_base + i, gaht_temp[i]);
 414}
 415
 416static inline u32 emac_iff2rmr(struct net_device *ndev)
 417{
 418        struct emac_instance *dev = netdev_priv(ndev);
 419        u32 r;
 420
 421        r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE;
 422
 423        if (emac_has_feature(dev, EMAC_FTR_EMAC4))
 424            r |= EMAC4_RMR_BASE;
 425        else
 426            r |= EMAC_RMR_BASE;
 427
 428        if (ndev->flags & IFF_PROMISC)
 429                r |= EMAC_RMR_PME;
 430        else if (ndev->flags & IFF_ALLMULTI ||
 431                         (netdev_mc_count(ndev) > EMAC_XAHT_SLOTS(dev)))
 432                r |= EMAC_RMR_PMME;
 433        else if (!netdev_mc_empty(ndev))
 434                r |= EMAC_RMR_MAE;
 435
 436        return r;
 437}
 438
 439static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
 440{
 441        u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC_MR1_TR0_MULT;
 442
 443        DBG2(dev, "__emac_calc_base_mr1" NL);
 444
 445        switch(tx_size) {
 446        case 2048:
 447                ret |= EMAC_MR1_TFS_2K;
 448                break;
 449        default:
 450                printk(KERN_WARNING "%s: Unknown Tx FIFO size %d\n",
 451                       dev->ndev->name, tx_size);
 452        }
 453
 454        switch(rx_size) {
 455        case 16384:
 456                ret |= EMAC_MR1_RFS_16K;
 457                break;
 458        case 4096:
 459                ret |= EMAC_MR1_RFS_4K;
 460                break;
 461        default:
 462                printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
 463                       dev->ndev->name, rx_size);
 464        }
 465
 466        return ret;
 467}
 468
 469static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
 470{
 471        u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC4_MR1_TR |
 472                EMAC4_MR1_OBCI(dev->opb_bus_freq / 1000000);
 473
 474        DBG2(dev, "__emac4_calc_base_mr1" NL);
 475
 476        switch(tx_size) {
 477        case 16384:
 478                ret |= EMAC4_MR1_TFS_16K;
 479                break;
 480        case 4096:
 481                ret |= EMAC4_MR1_TFS_4K;
 482                break;
 483        case 2048:
 484                ret |= EMAC4_MR1_TFS_2K;
 485                break;
 486        default:
 487                printk(KERN_WARNING "%s: Unknown Tx FIFO size %d\n",
 488                       dev->ndev->name, tx_size);
 489        }
 490
 491        switch(rx_size) {
 492        case 16384:
 493                ret |= EMAC4_MR1_RFS_16K;
 494                break;
 495        case 4096:
 496                ret |= EMAC4_MR1_RFS_4K;
 497                break;
 498        case 2048:
 499                ret |= EMAC4_MR1_RFS_2K;
 500                break;
 501        default:
 502                printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
 503                       dev->ndev->name, rx_size);
 504        }
 505
 506        return ret;
 507}
 508
 509static u32 emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
 510{
 511        return emac_has_feature(dev, EMAC_FTR_EMAC4) ?
 512                __emac4_calc_base_mr1(dev, tx_size, rx_size) :
 513                __emac_calc_base_mr1(dev, tx_size, rx_size);
 514}
 515
 516static inline u32 emac_calc_trtr(struct emac_instance *dev, unsigned int size)
 517{
 518        if (emac_has_feature(dev, EMAC_FTR_EMAC4))
 519                return ((size >> 6) - 1) << EMAC_TRTR_SHIFT_EMAC4;
 520        else
 521                return ((size >> 6) - 1) << EMAC_TRTR_SHIFT;
 522}
 523
 524static inline u32 emac_calc_rwmr(struct emac_instance *dev,
 525                                 unsigned int low, unsigned int high)
 526{
 527        if (emac_has_feature(dev, EMAC_FTR_EMAC4))
 528                return (low << 22) | ( (high & 0x3ff) << 6);
 529        else
 530                return (low << 23) | ( (high & 0x1ff) << 7);
 531}
 532
 533static int emac_configure(struct emac_instance *dev)
 534{
 535        struct emac_regs __iomem *p = dev->emacp;
 536        struct net_device *ndev = dev->ndev;
 537        int tx_size, rx_size, link = netif_carrier_ok(dev->ndev);
 538        u32 r, mr1 = 0;
 539
 540        DBG(dev, "configure" NL);
 541
 542        if (!link) {
 543                out_be32(&p->mr1, in_be32(&p->mr1)
 544                         | EMAC_MR1_FDE | EMAC_MR1_ILE);
 545                udelay(100);
 546        } else if (emac_reset(dev) < 0)
 547                return -ETIMEDOUT;
 548
 549        if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
 550                tah_reset(dev->tah_dev);
 551
 552        DBG(dev, " link = %d duplex = %d, pause = %d, asym_pause = %d\n",
 553            link, dev->phy.duplex, dev->phy.pause, dev->phy.asym_pause);
 554
 555        /* Default fifo sizes */
 556        tx_size = dev->tx_fifo_size;
 557        rx_size = dev->rx_fifo_size;
 558
 559        /* No link, force loopback */
 560        if (!link)
 561                mr1 = EMAC_MR1_FDE | EMAC_MR1_ILE;
 562
 563        /* Check for full duplex */
 564        else if (dev->phy.duplex == DUPLEX_FULL)
 565                mr1 |= EMAC_MR1_FDE | EMAC_MR1_MWSW_001;
 566
 567        /* Adjust fifo sizes, mr1 and timeouts based on link speed */
 568        dev->stop_timeout = STOP_TIMEOUT_10;
 569        switch (dev->phy.speed) {
 570        case SPEED_1000:
 571                if (emac_phy_gpcs(dev->phy.mode)) {
 572                        mr1 |= EMAC_MR1_MF_1000GPCS | EMAC_MR1_MF_IPPA(
 573                                (dev->phy.gpcs_address != 0xffffffff) ?
 574                                 dev->phy.gpcs_address : dev->phy.address);
 575
 576                        /* Put some arbitrary OUI, Manuf & Rev IDs so we can
 577                         * identify this GPCS PHY later.
 578                         */
 579                        out_be32(&p->u1.emac4.ipcr, 0xdeadbeef);
 580                } else
 581                        mr1 |= EMAC_MR1_MF_1000;
 582
 583                /* Extended fifo sizes */
 584                tx_size = dev->tx_fifo_size_gige;
 585                rx_size = dev->rx_fifo_size_gige;
 586
 587                if (dev->ndev->mtu > ETH_DATA_LEN) {
 588                        if (emac_has_feature(dev, EMAC_FTR_EMAC4))
 589                                mr1 |= EMAC4_MR1_JPSM;
 590                        else
 591                                mr1 |= EMAC_MR1_JPSM;
 592                        dev->stop_timeout = STOP_TIMEOUT_1000_JUMBO;
 593                } else
 594                        dev->stop_timeout = STOP_TIMEOUT_1000;
 595                break;
 596        case SPEED_100:
 597                mr1 |= EMAC_MR1_MF_100;
 598                dev->stop_timeout = STOP_TIMEOUT_100;
 599                break;
 600        default: /* make gcc happy */
 601                break;
 602        }
 603
 604        if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
 605                rgmii_set_speed(dev->rgmii_dev, dev->rgmii_port,
 606                                dev->phy.speed);
 607        if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
 608                zmii_set_speed(dev->zmii_dev, dev->zmii_port, dev->phy.speed);
 609
 610        /* on 40x erratum forces us to NOT use integrated flow control,
 611         * let's hope it works on 44x ;)
 612         */
 613        if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x) &&
 614            dev->phy.duplex == DUPLEX_FULL) {
 615                if (dev->phy.pause)
 616                        mr1 |= EMAC_MR1_EIFC | EMAC_MR1_APP;
 617                else if (dev->phy.asym_pause)
 618                        mr1 |= EMAC_MR1_APP;
 619        }
 620
 621        /* Add base settings & fifo sizes & program MR1 */
 622        mr1 |= emac_calc_base_mr1(dev, tx_size, rx_size);
 623        out_be32(&p->mr1, mr1);
 624
 625        /* Set individual MAC address */
 626        out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
 627        out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
 628                 (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
 629                 ndev->dev_addr[5]);
 630
 631        /* VLAN Tag Protocol ID */
 632        out_be32(&p->vtpid, 0x8100);
 633
 634        /* Receive mode register */
 635        r = emac_iff2rmr(ndev);
 636        if (r & EMAC_RMR_MAE)
 637                emac_hash_mc(dev);
 638        out_be32(&p->rmr, r);
 639
 640        /* FIFOs thresholds */
 641        if (emac_has_feature(dev, EMAC_FTR_EMAC4))
 642                r = EMAC4_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
 643                               tx_size / 2 / dev->fifo_entry_size);
 644        else
 645                r = EMAC_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
 646                              tx_size / 2 / dev->fifo_entry_size);
 647        out_be32(&p->tmr1, r);
 648        out_be32(&p->trtr, emac_calc_trtr(dev, tx_size / 2));
 649
 650        /* PAUSE frame is sent when RX FIFO reaches its high-water mark,
 651           there should be still enough space in FIFO to allow the our link
 652           partner time to process this frame and also time to send PAUSE
 653           frame itself.
 654
 655           Here is the worst case scenario for the RX FIFO "headroom"
 656           (from "The Switch Book") (100Mbps, without preamble, inter-frame gap):
 657
 658           1) One maximum-length frame on TX                    1522 bytes
 659           2) One PAUSE frame time                                64 bytes
 660           3) PAUSE frame decode time allowance                   64 bytes
 661           4) One maximum-length frame on RX                    1522 bytes
 662           5) Round-trip propagation delay of the link (100Mb)    15 bytes
 663           ----------
 664           3187 bytes
 665
 666           I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes)
 667           low-water mark  to RX_FIFO_SIZE / 8 (512 bytes)
 668         */
 669        r = emac_calc_rwmr(dev, rx_size / 8 / dev->fifo_entry_size,
 670                           rx_size / 4 / dev->fifo_entry_size);
 671        out_be32(&p->rwmr, r);
 672
 673        /* Set PAUSE timer to the maximum */
 674        out_be32(&p->ptr, 0xffff);
 675
 676        /* IRQ sources */
 677        r = EMAC_ISR_OVR | EMAC_ISR_BP | EMAC_ISR_SE |
 678                EMAC_ISR_ALE | EMAC_ISR_BFCS | EMAC_ISR_PTLE | EMAC_ISR_ORE |
 679                EMAC_ISR_IRE | EMAC_ISR_TE;
 680        if (emac_has_feature(dev, EMAC_FTR_EMAC4))
 681            r |= EMAC4_ISR_TXPE | EMAC4_ISR_RXPE /* | EMAC4_ISR_TXUE |
 682                                                  EMAC4_ISR_RXOE | */;
 683        out_be32(&p->iser,  r);
 684
 685        /* We need to take GPCS PHY out of isolate mode after EMAC reset */
 686        if (emac_phy_gpcs(dev->phy.mode)) {
 687                if (dev->phy.gpcs_address != 0xffffffff)
 688                        emac_mii_reset_gpcs(&dev->phy);
 689                else
 690                        emac_mii_reset_phy(&dev->phy);
 691        }
 692
 693        return 0;
 694}
 695
 696static void emac_reinitialize(struct emac_instance *dev)
 697{
 698        DBG(dev, "reinitialize" NL);
 699
 700        emac_netif_stop(dev);
 701        if (!emac_configure(dev)) {
 702                emac_tx_enable(dev);
 703                emac_rx_enable(dev);
 704        }
 705        emac_netif_start(dev);
 706}
 707
 708static void emac_full_tx_reset(struct emac_instance *dev)
 709{
 710        DBG(dev, "full_tx_reset" NL);
 711
 712        emac_tx_disable(dev);
 713        mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
 714        emac_clean_tx_ring(dev);
 715        dev->tx_cnt = dev->tx_slot = dev->ack_slot = 0;
 716
 717        emac_configure(dev);
 718
 719        mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
 720        emac_tx_enable(dev);
 721        emac_rx_enable(dev);
 722}
 723
 724static void emac_reset_work(struct work_struct *work)
 725{
 726        struct emac_instance *dev = container_of(work, struct emac_instance, reset_work);
 727
 728        DBG(dev, "reset_work" NL);
 729
 730        mutex_lock(&dev->link_lock);
 731        if (dev->opened) {
 732                emac_netif_stop(dev);
 733                emac_full_tx_reset(dev);
 734                emac_netif_start(dev);
 735        }
 736        mutex_unlock(&dev->link_lock);
 737}
 738
 739static void emac_tx_timeout(struct net_device *ndev)
 740{
 741        struct emac_instance *dev = netdev_priv(ndev);
 742
 743        DBG(dev, "tx_timeout" NL);
 744
 745        schedule_work(&dev->reset_work);
 746}
 747
 748
 749static inline int emac_phy_done(struct emac_instance *dev, u32 stacr)
 750{
 751        int done = !!(stacr & EMAC_STACR_OC);
 752
 753        if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
 754                done = !done;
 755
 756        return done;
 757};
 758
 759static int __emac_mdio_read(struct emac_instance *dev, u8 id, u8 reg)
 760{
 761        struct emac_regs __iomem *p = dev->emacp;
 762        u32 r = 0;
 763        int n, err = -ETIMEDOUT;
 764
 765        mutex_lock(&dev->mdio_lock);
 766
 767        DBG2(dev, "mdio_read(%02x,%02x)" NL, id, reg);
 768
 769        /* Enable proper MDIO port */
 770        if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
 771                zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
 772        if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
 773                rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
 774
 775        /* Wait for management interface to become idle */
 776        n = 20;
 777        while (!emac_phy_done(dev, in_be32(&p->stacr))) {
 778                udelay(1);
 779                if (!--n) {
 780                        DBG2(dev, " -> timeout wait idle\n");
 781                        goto bail;
 782                }
 783        }
 784
 785        /* Issue read command */
 786        if (emac_has_feature(dev, EMAC_FTR_EMAC4))
 787                r = EMAC4_STACR_BASE(dev->opb_bus_freq);
 788        else
 789                r = EMAC_STACR_BASE(dev->opb_bus_freq);
 790        if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
 791                r |= EMAC_STACR_OC;
 792        if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
 793                r |= EMACX_STACR_STAC_READ;
 794        else
 795                r |= EMAC_STACR_STAC_READ;
 796        r |= (reg & EMAC_STACR_PRA_MASK)
 797                | ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT);
 798        out_be32(&p->stacr, r);
 799
 800        /* Wait for read to complete */
 801        n = 200;
 802        while (!emac_phy_done(dev, (r = in_be32(&p->stacr)))) {
 803                udelay(1);
 804                if (!--n) {
 805                        DBG2(dev, " -> timeout wait complete\n");
 806                        goto bail;
 807                }
 808        }
 809
 810        if (unlikely(r & EMAC_STACR_PHYE)) {
 811                DBG(dev, "mdio_read(%02x, %02x) failed" NL, id, reg);
 812                err = -EREMOTEIO;
 813                goto bail;
 814        }
 815
 816        r = ((r >> EMAC_STACR_PHYD_SHIFT) & EMAC_STACR_PHYD_MASK);
 817
 818        DBG2(dev, "mdio_read -> %04x" NL, r);
 819        err = 0;
 820 bail:
 821        if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
 822                rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
 823        if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
 824                zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
 825        mutex_unlock(&dev->mdio_lock);
 826
 827        return err == 0 ? r : err;
 828}
 829
 830static void __emac_mdio_write(struct emac_instance *dev, u8 id, u8 reg,
 831                              u16 val)
 832{
 833        struct emac_regs __iomem *p = dev->emacp;
 834        u32 r = 0;
 835        int n, err = -ETIMEDOUT;
 836
 837        mutex_lock(&dev->mdio_lock);
 838
 839        DBG2(dev, "mdio_write(%02x,%02x,%04x)" NL, id, reg, val);
 840
 841        /* Enable proper MDIO port */
 842        if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
 843                zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
 844        if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
 845                rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
 846
 847        /* Wait for management interface to be idle */
 848        n = 20;
 849        while (!emac_phy_done(dev, in_be32(&p->stacr))) {
 850                udelay(1);
 851                if (!--n) {
 852                        DBG2(dev, " -> timeout wait idle\n");
 853                        goto bail;
 854                }
 855        }
 856
 857        /* Issue write command */
 858        if (emac_has_feature(dev, EMAC_FTR_EMAC4))
 859                r = EMAC4_STACR_BASE(dev->opb_bus_freq);
 860        else
 861                r = EMAC_STACR_BASE(dev->opb_bus_freq);
 862        if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
 863                r |= EMAC_STACR_OC;
 864        if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
 865                r |= EMACX_STACR_STAC_WRITE;
 866        else
 867                r |= EMAC_STACR_STAC_WRITE;
 868        r |= (reg & EMAC_STACR_PRA_MASK) |
 869                ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT) |
 870                (val << EMAC_STACR_PHYD_SHIFT);
 871        out_be32(&p->stacr, r);
 872
 873        /* Wait for write to complete */
 874        n = 200;
 875        while (!emac_phy_done(dev, in_be32(&p->stacr))) {
 876                udelay(1);
 877                if (!--n) {
 878                        DBG2(dev, " -> timeout wait complete\n");
 879                        goto bail;
 880                }
 881        }
 882        err = 0;
 883 bail:
 884        if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
 885                rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
 886        if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
 887                zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
 888        mutex_unlock(&dev->mdio_lock);
 889}
 890
 891static int emac_mdio_read(struct net_device *ndev, int id, int reg)
 892{
 893        struct emac_instance *dev = netdev_priv(ndev);
 894        int res;
 895
 896        res = __emac_mdio_read((dev->mdio_instance &&
 897                                dev->phy.gpcs_address != id) ?
 898                                dev->mdio_instance : dev,
 899                               (u8) id, (u8) reg);
 900        return res;
 901}
 902
 903static void emac_mdio_write(struct net_device *ndev, int id, int reg, int val)
 904{
 905        struct emac_instance *dev = netdev_priv(ndev);
 906
 907        __emac_mdio_write((dev->mdio_instance &&
 908                           dev->phy.gpcs_address != id) ?
 909                           dev->mdio_instance : dev,
 910                          (u8) id, (u8) reg, (u16) val);
 911}
 912
 913/* Tx lock BH */
 914static void __emac_set_multicast_list(struct emac_instance *dev)
 915{
 916        struct emac_regs __iomem *p = dev->emacp;
 917        u32 rmr = emac_iff2rmr(dev->ndev);
 918
 919        DBG(dev, "__multicast %08x" NL, rmr);
 920
 921        /* I decided to relax register access rules here to avoid
 922         * full EMAC reset.
 923         *
 924         * There is a real problem with EMAC4 core if we use MWSW_001 bit
 925         * in MR1 register and do a full EMAC reset.
 926         * One TX BD status update is delayed and, after EMAC reset, it
 927         * never happens, resulting in TX hung (it'll be recovered by TX
 928         * timeout handler eventually, but this is just gross).
 929         * So we either have to do full TX reset or try to cheat here :)
 930         *
 931         * The only required change is to RX mode register, so I *think* all
 932         * we need is just to stop RX channel. This seems to work on all
 933         * tested SoCs.                                                --ebs
 934         *
 935         * If we need the full reset, we might just trigger the workqueue
 936         * and do it async... a bit nasty but should work --BenH
 937         */
 938        dev->mcast_pending = 0;
 939        emac_rx_disable(dev);
 940        if (rmr & EMAC_RMR_MAE)
 941                emac_hash_mc(dev);
 942        out_be32(&p->rmr, rmr);
 943        emac_rx_enable(dev);
 944}
 945
 946/* Tx lock BH */
 947static void emac_set_multicast_list(struct net_device *ndev)
 948{
 949        struct emac_instance *dev = netdev_priv(ndev);
 950
 951        DBG(dev, "multicast" NL);
 952
 953        BUG_ON(!netif_running(dev->ndev));
 954
 955        if (dev->no_mcast) {
 956                dev->mcast_pending = 1;
 957                return;
 958        }
 959        __emac_set_multicast_list(dev);
 960}
 961
 962static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu)
 963{
 964        int rx_sync_size = emac_rx_sync_size(new_mtu);
 965        int rx_skb_size = emac_rx_skb_size(new_mtu);
 966        int i, ret = 0;
 967
 968        mutex_lock(&dev->link_lock);
 969        emac_netif_stop(dev);
 970        emac_rx_disable(dev);
 971        mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
 972
 973        if (dev->rx_sg_skb) {
 974                ++dev->estats.rx_dropped_resize;
 975                dev_kfree_skb(dev->rx_sg_skb);
 976                dev->rx_sg_skb = NULL;
 977        }
 978
 979        /* Make a first pass over RX ring and mark BDs ready, dropping
 980         * non-processed packets on the way. We need this as a separate pass
 981         * to simplify error recovery in the case of allocation failure later.
 982         */
 983        for (i = 0; i < NUM_RX_BUFF; ++i) {
 984                if (dev->rx_desc[i].ctrl & MAL_RX_CTRL_FIRST)
 985                        ++dev->estats.rx_dropped_resize;
 986
 987                dev->rx_desc[i].data_len = 0;
 988                dev->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY |
 989                    (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
 990        }
 991
 992        /* Reallocate RX ring only if bigger skb buffers are required */
 993        if (rx_skb_size <= dev->rx_skb_size)
 994                goto skip;
 995
 996        /* Second pass, allocate new skbs */
 997        for (i = 0; i < NUM_RX_BUFF; ++i) {
 998                struct sk_buff *skb = alloc_skb(rx_skb_size, GFP_ATOMIC);
 999                if (!skb) {
1000                        ret = -ENOMEM;
1001                        goto oom;
1002                }
1003
1004                BUG_ON(!dev->rx_skb[i]);
1005                dev_kfree_skb(dev->rx_skb[i]);
1006
1007                skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
1008                dev->rx_desc[i].data_ptr =
1009                    dma_map_single(&dev->ofdev->dev, skb->data - 2, rx_sync_size,
1010                                   DMA_FROM_DEVICE) + 2;
1011                dev->rx_skb[i] = skb;
1012        }
1013 skip:
1014        /* Check if we need to change "Jumbo" bit in MR1 */
1015        if ((new_mtu > ETH_DATA_LEN) ^ (dev->ndev->mtu > ETH_DATA_LEN)) {
1016                /* This is to prevent starting RX channel in emac_rx_enable() */
1017                set_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1018
1019                dev->ndev->mtu = new_mtu;
1020                emac_full_tx_reset(dev);
1021        }
1022
1023        mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(new_mtu));
1024 oom:
1025        /* Restart RX */
1026        clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1027        dev->rx_slot = 0;
1028        mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1029        emac_rx_enable(dev);
1030        emac_netif_start(dev);
1031        mutex_unlock(&dev->link_lock);
1032
1033        return ret;
1034}
1035
1036/* Process ctx, rtnl_lock semaphore */
1037static int emac_change_mtu(struct net_device *ndev, int new_mtu)
1038{
1039        struct emac_instance *dev = netdev_priv(ndev);
1040        int ret = 0;
1041
1042        if (new_mtu < EMAC_MIN_MTU || new_mtu > dev->max_mtu)
1043                return -EINVAL;
1044
1045        DBG(dev, "change_mtu(%d)" NL, new_mtu);
1046
1047        if (netif_running(ndev)) {
1048                /* Check if we really need to reinitialize RX ring */
1049                if (emac_rx_skb_size(ndev->mtu) != emac_rx_skb_size(new_mtu))
1050                        ret = emac_resize_rx_ring(dev, new_mtu);
1051        }
1052
1053        if (!ret) {
1054                ndev->mtu = new_mtu;
1055                dev->rx_skb_size = emac_rx_skb_size(new_mtu);
1056                dev->rx_sync_size = emac_rx_sync_size(new_mtu);
1057        }
1058
1059        return ret;
1060}
1061
1062static void emac_clean_tx_ring(struct emac_instance *dev)
1063{
1064        int i;
1065
1066        for (i = 0; i < NUM_TX_BUFF; ++i) {
1067                if (dev->tx_skb[i]) {
1068                        dev_kfree_skb(dev->tx_skb[i]);
1069                        dev->tx_skb[i] = NULL;
1070                        if (dev->tx_desc[i].ctrl & MAL_TX_CTRL_READY)
1071                                ++dev->estats.tx_dropped;
1072                }
1073                dev->tx_desc[i].ctrl = 0;
1074                dev->tx_desc[i].data_ptr = 0;
1075        }
1076}
1077
1078static void emac_clean_rx_ring(struct emac_instance *dev)
1079{
1080        int i;
1081
1082        for (i = 0; i < NUM_RX_BUFF; ++i)
1083                if (dev->rx_skb[i]) {
1084                        dev->rx_desc[i].ctrl = 0;
1085                        dev_kfree_skb(dev->rx_skb[i]);
1086                        dev->rx_skb[i] = NULL;
1087                        dev->rx_desc[i].data_ptr = 0;
1088                }
1089
1090        if (dev->rx_sg_skb) {
1091                dev_kfree_skb(dev->rx_sg_skb);
1092                dev->rx_sg_skb = NULL;
1093        }
1094}
1095
1096static inline int emac_alloc_rx_skb(struct emac_instance *dev, int slot,
1097                                    gfp_t flags)
1098{
1099        struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags);
1100        if (unlikely(!skb))
1101                return -ENOMEM;
1102
1103        dev->rx_skb[slot] = skb;
1104        dev->rx_desc[slot].data_len = 0;
1105
1106        skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
1107        dev->rx_desc[slot].data_ptr =
1108            dma_map_single(&dev->ofdev->dev, skb->data - 2, dev->rx_sync_size,
1109                           DMA_FROM_DEVICE) + 2;
1110        wmb();
1111        dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1112            (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1113
1114        return 0;
1115}
1116
1117static void emac_print_link_status(struct emac_instance *dev)
1118{
1119        if (netif_carrier_ok(dev->ndev))
1120                printk(KERN_INFO "%s: link is up, %d %s%s\n",
1121                       dev->ndev->name, dev->phy.speed,
1122                       dev->phy.duplex == DUPLEX_FULL ? "FDX" : "HDX",
1123                       dev->phy.pause ? ", pause enabled" :
1124                       dev->phy.asym_pause ? ", asymmetric pause enabled" : "");
1125        else
1126                printk(KERN_INFO "%s: link is down\n", dev->ndev->name);
1127}
1128
1129/* Process ctx, rtnl_lock semaphore */
1130static int emac_open(struct net_device *ndev)
1131{
1132        struct emac_instance *dev = netdev_priv(ndev);
1133        int err, i;
1134
1135        DBG(dev, "open" NL);
1136
1137        /* Setup error IRQ handler */
1138        err = request_irq(dev->emac_irq, emac_irq, 0, "EMAC", dev);
1139        if (err) {
1140                printk(KERN_ERR "%s: failed to request IRQ %d\n",
1141                       ndev->name, dev->emac_irq);
1142                return err;
1143        }
1144
1145        /* Allocate RX ring */
1146        for (i = 0; i < NUM_RX_BUFF; ++i)
1147                if (emac_alloc_rx_skb(dev, i, GFP_KERNEL)) {
1148                        printk(KERN_ERR "%s: failed to allocate RX ring\n",
1149                               ndev->name);
1150                        goto oom;
1151                }
1152
1153        dev->tx_cnt = dev->tx_slot = dev->ack_slot = dev->rx_slot = 0;
1154        clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1155        dev->rx_sg_skb = NULL;
1156
1157        mutex_lock(&dev->link_lock);
1158        dev->opened = 1;
1159
1160        /* Start PHY polling now.
1161         */
1162        if (dev->phy.address >= 0) {
1163                int link_poll_interval;
1164                if (dev->phy.def->ops->poll_link(&dev->phy)) {
1165                        dev->phy.def->ops->read_link(&dev->phy);
1166                        emac_rx_clk_default(dev);
1167                        netif_carrier_on(dev->ndev);
1168                        link_poll_interval = PHY_POLL_LINK_ON;
1169                } else {
1170                        emac_rx_clk_tx(dev);
1171                        netif_carrier_off(dev->ndev);
1172                        link_poll_interval = PHY_POLL_LINK_OFF;
1173                }
1174                dev->link_polling = 1;
1175                wmb();
1176                schedule_delayed_work(&dev->link_work, link_poll_interval);
1177                emac_print_link_status(dev);
1178        } else
1179                netif_carrier_on(dev->ndev);
1180
1181        /* Required for Pause packet support in EMAC */
1182        dev_mc_add_global(ndev, default_mcast_addr);
1183
1184        emac_configure(dev);
1185        mal_poll_add(dev->mal, &dev->commac);
1186        mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
1187        mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(ndev->mtu));
1188        mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1189        emac_tx_enable(dev);
1190        emac_rx_enable(dev);
1191        emac_netif_start(dev);
1192
1193        mutex_unlock(&dev->link_lock);
1194
1195        return 0;
1196 oom:
1197        emac_clean_rx_ring(dev);
1198        free_irq(dev->emac_irq, dev);
1199
1200        return -ENOMEM;
1201}
1202
1203/* BHs disabled */
1204#if 0
1205static int emac_link_differs(struct emac_instance *dev)
1206{
1207        u32 r = in_be32(&dev->emacp->mr1);
1208
1209        int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF;
1210        int speed, pause, asym_pause;
1211
1212        if (r & EMAC_MR1_MF_1000)
1213                speed = SPEED_1000;
1214        else if (r & EMAC_MR1_MF_100)
1215                speed = SPEED_100;
1216        else
1217                speed = SPEED_10;
1218
1219        switch (r & (EMAC_MR1_EIFC | EMAC_MR1_APP)) {
1220        case (EMAC_MR1_EIFC | EMAC_MR1_APP):
1221                pause = 1;
1222                asym_pause = 0;
1223                break;
1224        case EMAC_MR1_APP:
1225                pause = 0;
1226                asym_pause = 1;
1227                break;
1228        default:
1229                pause = asym_pause = 0;
1230        }
1231        return speed != dev->phy.speed || duplex != dev->phy.duplex ||
1232            pause != dev->phy.pause || asym_pause != dev->phy.asym_pause;
1233}
1234#endif
1235
1236static void emac_link_timer(struct work_struct *work)
1237{
1238        struct emac_instance *dev =
1239                container_of(to_delayed_work(work),
1240                             struct emac_instance, link_work);
1241        int link_poll_interval;
1242
1243        mutex_lock(&dev->link_lock);
1244        DBG2(dev, "link timer" NL);
1245
1246        if (!dev->opened)
1247                goto bail;
1248
1249        if (dev->phy.def->ops->poll_link(&dev->phy)) {
1250                if (!netif_carrier_ok(dev->ndev)) {
1251                        emac_rx_clk_default(dev);
1252                        /* Get new link parameters */
1253                        dev->phy.def->ops->read_link(&dev->phy);
1254
1255                        netif_carrier_on(dev->ndev);
1256                        emac_netif_stop(dev);
1257                        emac_full_tx_reset(dev);
1258                        emac_netif_start(dev);
1259                        emac_print_link_status(dev);
1260                }
1261                link_poll_interval = PHY_POLL_LINK_ON;
1262        } else {
1263                if (netif_carrier_ok(dev->ndev)) {
1264                        emac_rx_clk_tx(dev);
1265                        netif_carrier_off(dev->ndev);
1266                        netif_tx_disable(dev->ndev);
1267                        emac_reinitialize(dev);
1268                        emac_print_link_status(dev);
1269                }
1270                link_poll_interval = PHY_POLL_LINK_OFF;
1271        }
1272        schedule_delayed_work(&dev->link_work, link_poll_interval);
1273 bail:
1274        mutex_unlock(&dev->link_lock);
1275}
1276
1277static void emac_force_link_update(struct emac_instance *dev)
1278{
1279        netif_carrier_off(dev->ndev);
1280        smp_rmb();
1281        if (dev->link_polling) {
1282                cancel_delayed_work_sync(&dev->link_work);
1283                if (dev->link_polling)
1284                        schedule_delayed_work(&dev->link_work,  PHY_POLL_LINK_OFF);
1285        }
1286}
1287
1288/* Process ctx, rtnl_lock semaphore */
1289static int emac_close(struct net_device *ndev)
1290{
1291        struct emac_instance *dev = netdev_priv(ndev);
1292
1293        DBG(dev, "close" NL);
1294
1295        if (dev->phy.address >= 0) {
1296                dev->link_polling = 0;
1297                cancel_delayed_work_sync(&dev->link_work);
1298        }
1299        mutex_lock(&dev->link_lock);
1300        emac_netif_stop(dev);
1301        dev->opened = 0;
1302        mutex_unlock(&dev->link_lock);
1303
1304        emac_rx_disable(dev);
1305        emac_tx_disable(dev);
1306        mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
1307        mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
1308        mal_poll_del(dev->mal, &dev->commac);
1309
1310        emac_clean_tx_ring(dev);
1311        emac_clean_rx_ring(dev);
1312
1313        free_irq(dev->emac_irq, dev);
1314
1315        netif_carrier_off(ndev);
1316
1317        return 0;
1318}
1319
1320static inline u16 emac_tx_csum(struct emac_instance *dev,
1321                               struct sk_buff *skb)
1322{
1323        if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
1324                (skb->ip_summed == CHECKSUM_PARTIAL)) {
1325                ++dev->stats.tx_packets_csum;
1326                return EMAC_TX_CTRL_TAH_CSUM;
1327        }
1328        return 0;
1329}
1330
1331static inline int emac_xmit_finish(struct emac_instance *dev, int len)
1332{
1333        struct emac_regs __iomem *p = dev->emacp;
1334        struct net_device *ndev = dev->ndev;
1335
1336        /* Send the packet out. If the if makes a significant perf
1337         * difference, then we can store the TMR0 value in "dev"
1338         * instead
1339         */
1340        if (emac_has_feature(dev, EMAC_FTR_EMAC4))
1341                out_be32(&p->tmr0, EMAC4_TMR0_XMIT);
1342        else
1343                out_be32(&p->tmr0, EMAC_TMR0_XMIT);
1344
1345        if (unlikely(++dev->tx_cnt == NUM_TX_BUFF)) {
1346                netif_stop_queue(ndev);
1347                DBG2(dev, "stopped TX queue" NL);
1348        }
1349
1350        ndev->trans_start = jiffies;
1351        ++dev->stats.tx_packets;
1352        dev->stats.tx_bytes += len;
1353
1354        return NETDEV_TX_OK;
1355}
1356
1357/* Tx lock BH */
1358static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1359{
1360        struct emac_instance *dev = netdev_priv(ndev);
1361        unsigned int len = skb->len;
1362        int slot;
1363
1364        u16 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1365            MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb);
1366
1367        slot = dev->tx_slot++;
1368        if (dev->tx_slot == NUM_TX_BUFF) {
1369                dev->tx_slot = 0;
1370                ctrl |= MAL_TX_CTRL_WRAP;
1371        }
1372
1373        DBG2(dev, "xmit(%u) %d" NL, len, slot);
1374
1375        dev->tx_skb[slot] = skb;
1376        dev->tx_desc[slot].data_ptr = dma_map_single(&dev->ofdev->dev,
1377                                                     skb->data, len,
1378                                                     DMA_TO_DEVICE);
1379        dev->tx_desc[slot].data_len = (u16) len;
1380        wmb();
1381        dev->tx_desc[slot].ctrl = ctrl;
1382
1383        return emac_xmit_finish(dev, len);
1384}
1385
1386static inline int emac_xmit_split(struct emac_instance *dev, int slot,
1387                                  u32 pd, int len, int last, u16 base_ctrl)
1388{
1389        while (1) {
1390                u16 ctrl = base_ctrl;
1391                int chunk = min(len, MAL_MAX_TX_SIZE);
1392                len -= chunk;
1393
1394                slot = (slot + 1) % NUM_TX_BUFF;
1395
1396                if (last && !len)
1397                        ctrl |= MAL_TX_CTRL_LAST;
1398                if (slot == NUM_TX_BUFF - 1)
1399                        ctrl |= MAL_TX_CTRL_WRAP;
1400
1401                dev->tx_skb[slot] = NULL;
1402                dev->tx_desc[slot].data_ptr = pd;
1403                dev->tx_desc[slot].data_len = (u16) chunk;
1404                dev->tx_desc[slot].ctrl = ctrl;
1405                ++dev->tx_cnt;
1406
1407                if (!len)
1408                        break;
1409
1410                pd += chunk;
1411        }
1412        return slot;
1413}
1414
1415/* Tx lock BH disabled (SG version for TAH equipped EMACs) */
1416static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
1417{
1418        struct emac_instance *dev = netdev_priv(ndev);
1419        int nr_frags = skb_shinfo(skb)->nr_frags;
1420        int len = skb->len, chunk;
1421        int slot, i;
1422        u16 ctrl;
1423        u32 pd;
1424
1425        /* This is common "fast" path */
1426        if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE))
1427                return emac_start_xmit(skb, ndev);
1428
1429        len -= skb->data_len;
1430
1431        /* Note, this is only an *estimation*, we can still run out of empty
1432         * slots because of the additional fragmentation into
1433         * MAL_MAX_TX_SIZE-sized chunks
1434         */
1435        if (unlikely(dev->tx_cnt + nr_frags + mal_tx_chunks(len) > NUM_TX_BUFF))
1436                goto stop_queue;
1437
1438        ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1439            emac_tx_csum(dev, skb);
1440        slot = dev->tx_slot;
1441
1442        /* skb data */
1443        dev->tx_skb[slot] = NULL;
1444        chunk = min(len, MAL_MAX_TX_SIZE);
1445        dev->tx_desc[slot].data_ptr = pd =
1446            dma_map_single(&dev->ofdev->dev, skb->data, len, DMA_TO_DEVICE);
1447        dev->tx_desc[slot].data_len = (u16) chunk;
1448        len -= chunk;
1449        if (unlikely(len))
1450                slot = emac_xmit_split(dev, slot, pd + chunk, len, !nr_frags,
1451                                       ctrl);
1452        /* skb fragments */
1453        for (i = 0; i < nr_frags; ++i) {
1454                struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
1455                len = frag->size;
1456
1457                if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))
1458                        goto undo_frame;
1459
1460                pd = dma_map_page(&dev->ofdev->dev, frag->page, frag->page_offset, len,
1461                                  DMA_TO_DEVICE);
1462
1463                slot = emac_xmit_split(dev, slot, pd, len, i == nr_frags - 1,
1464                                       ctrl);
1465        }
1466
1467        DBG2(dev, "xmit_sg(%u) %d - %d" NL, skb->len, dev->tx_slot, slot);
1468
1469        /* Attach skb to the last slot so we don't release it too early */
1470        dev->tx_skb[slot] = skb;
1471
1472        /* Send the packet out */
1473        if (dev->tx_slot == NUM_TX_BUFF - 1)
1474                ctrl |= MAL_TX_CTRL_WRAP;
1475        wmb();
1476        dev->tx_desc[dev->tx_slot].ctrl = ctrl;
1477        dev->tx_slot = (slot + 1) % NUM_TX_BUFF;
1478
1479        return emac_xmit_finish(dev, skb->len);
1480
1481 undo_frame:
1482        /* Well, too bad. Our previous estimation was overly optimistic.
1483         * Undo everything.
1484         */
1485        while (slot != dev->tx_slot) {
1486                dev->tx_desc[slot].ctrl = 0;
1487                --dev->tx_cnt;
1488                if (--slot < 0)
1489                        slot = NUM_TX_BUFF - 1;
1490        }
1491        ++dev->estats.tx_undo;
1492
1493 stop_queue:
1494        netif_stop_queue(ndev);
1495        DBG2(dev, "stopped TX queue" NL);
1496        return NETDEV_TX_BUSY;
1497}
1498
1499/* Tx lock BHs */
1500static void emac_parse_tx_error(struct emac_instance *dev, u16 ctrl)
1501{
1502        struct emac_error_stats *st = &dev->estats;
1503
1504        DBG(dev, "BD TX error %04x" NL, ctrl);
1505
1506        ++st->tx_bd_errors;
1507        if (ctrl & EMAC_TX_ST_BFCS)
1508                ++st->tx_bd_bad_fcs;
1509        if (ctrl & EMAC_TX_ST_LCS)
1510                ++st->tx_bd_carrier_loss;
1511        if (ctrl & EMAC_TX_ST_ED)
1512                ++st->tx_bd_excessive_deferral;
1513        if (ctrl & EMAC_TX_ST_EC)
1514                ++st->tx_bd_excessive_collisions;
1515        if (ctrl & EMAC_TX_ST_LC)
1516                ++st->tx_bd_late_collision;
1517        if (ctrl & EMAC_TX_ST_MC)
1518                ++st->tx_bd_multple_collisions;
1519        if (ctrl & EMAC_TX_ST_SC)
1520                ++st->tx_bd_single_collision;
1521        if (ctrl & EMAC_TX_ST_UR)
1522                ++st->tx_bd_underrun;
1523        if (ctrl & EMAC_TX_ST_SQE)
1524                ++st->tx_bd_sqe;
1525}
1526
1527static void emac_poll_tx(void *param)
1528{
1529        struct emac_instance *dev = param;
1530        u32 bad_mask;
1531
1532        DBG2(dev, "poll_tx, %d %d" NL, dev->tx_cnt, dev->ack_slot);
1533
1534        if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
1535                bad_mask = EMAC_IS_BAD_TX_TAH;
1536        else
1537                bad_mask = EMAC_IS_BAD_TX;
1538
1539        netif_tx_lock_bh(dev->ndev);
1540        if (dev->tx_cnt) {
1541                u16 ctrl;
1542                int slot = dev->ack_slot, n = 0;
1543        again:
1544                ctrl = dev->tx_desc[slot].ctrl;
1545                if (!(ctrl & MAL_TX_CTRL_READY)) {
1546                        struct sk_buff *skb = dev->tx_skb[slot];
1547                        ++n;
1548
1549                        if (skb) {
1550                                dev_kfree_skb(skb);
1551                                dev->tx_skb[slot] = NULL;
1552                        }
1553                        slot = (slot + 1) % NUM_TX_BUFF;
1554
1555                        if (unlikely(ctrl & bad_mask))
1556                                emac_parse_tx_error(dev, ctrl);
1557
1558                        if (--dev->tx_cnt)
1559                                goto again;
1560                }
1561                if (n) {
1562                        dev->ack_slot = slot;
1563                        if (netif_queue_stopped(dev->ndev) &&
1564                            dev->tx_cnt < EMAC_TX_WAKEUP_THRESH)
1565                                netif_wake_queue(dev->ndev);
1566
1567                        DBG2(dev, "tx %d pkts" NL, n);
1568                }
1569        }
1570        netif_tx_unlock_bh(dev->ndev);
1571}
1572
1573static inline void emac_recycle_rx_skb(struct emac_instance *dev, int slot,
1574                                       int len)
1575{
1576        struct sk_buff *skb = dev->rx_skb[slot];
1577
1578        DBG2(dev, "recycle %d %d" NL, slot, len);
1579
1580        if (len)
1581                dma_map_single(&dev->ofdev->dev, skb->data - 2,
1582                               EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE);
1583
1584        dev->rx_desc[slot].data_len = 0;
1585        wmb();
1586        dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1587            (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1588}
1589
1590static void emac_parse_rx_error(struct emac_instance *dev, u16 ctrl)
1591{
1592        struct emac_error_stats *st = &dev->estats;
1593
1594        DBG(dev, "BD RX error %04x" NL, ctrl);
1595
1596        ++st->rx_bd_errors;
1597        if (ctrl & EMAC_RX_ST_OE)
1598                ++st->rx_bd_overrun;
1599        if (ctrl & EMAC_RX_ST_BP)
1600                ++st->rx_bd_bad_packet;
1601        if (ctrl & EMAC_RX_ST_RP)
1602                ++st->rx_bd_runt_packet;
1603        if (ctrl & EMAC_RX_ST_SE)
1604                ++st->rx_bd_short_event;
1605        if (ctrl & EMAC_RX_ST_AE)
1606                ++st->rx_bd_alignment_error;
1607        if (ctrl & EMAC_RX_ST_BFCS)
1608                ++st->rx_bd_bad_fcs;
1609        if (ctrl & EMAC_RX_ST_PTL)
1610                ++st->rx_bd_packet_too_long;
1611        if (ctrl & EMAC_RX_ST_ORE)
1612                ++st->rx_bd_out_of_range;
1613        if (ctrl & EMAC_RX_ST_IRE)
1614                ++st->rx_bd_in_range;
1615}
1616
1617static inline void emac_rx_csum(struct emac_instance *dev,
1618                                struct sk_buff *skb, u16 ctrl)
1619{
1620#ifdef CONFIG_IBM_NEW_EMAC_TAH
1621        if (!ctrl && dev->tah_dev) {
1622                skb->ip_summed = CHECKSUM_UNNECESSARY;
1623                ++dev->stats.rx_packets_csum;
1624        }
1625#endif
1626}
1627
1628static inline int emac_rx_sg_append(struct emac_instance *dev, int slot)
1629{
1630        if (likely(dev->rx_sg_skb != NULL)) {
1631                int len = dev->rx_desc[slot].data_len;
1632                int tot_len = dev->rx_sg_skb->len + len;
1633
1634                if (unlikely(tot_len + 2 > dev->rx_skb_size)) {
1635                        ++dev->estats.rx_dropped_mtu;
1636                        dev_kfree_skb(dev->rx_sg_skb);
1637                        dev->rx_sg_skb = NULL;
1638                } else {
1639                        cacheable_memcpy(skb_tail_pointer(dev->rx_sg_skb),
1640                                         dev->rx_skb[slot]->data, len);
1641                        skb_put(dev->rx_sg_skb, len);
1642                        emac_recycle_rx_skb(dev, slot, len);
1643                        return 0;
1644                }
1645        }
1646        emac_recycle_rx_skb(dev, slot, 0);
1647        return -1;
1648}
1649
1650/* NAPI poll context */
1651static int emac_poll_rx(void *param, int budget)
1652{
1653        struct emac_instance *dev = param;
1654        int slot = dev->rx_slot, received = 0;
1655
1656        DBG2(dev, "poll_rx(%d)" NL, budget);
1657
1658 again:
1659        while (budget > 0) {
1660                int len;
1661                struct sk_buff *skb;
1662                u16 ctrl = dev->rx_desc[slot].ctrl;
1663
1664                if (ctrl & MAL_RX_CTRL_EMPTY)
1665                        break;
1666
1667                skb = dev->rx_skb[slot];
1668                mb();
1669                len = dev->rx_desc[slot].data_len;
1670
1671                if (unlikely(!MAL_IS_SINGLE_RX(ctrl)))
1672                        goto sg;
1673
1674                ctrl &= EMAC_BAD_RX_MASK;
1675                if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1676                        emac_parse_rx_error(dev, ctrl);
1677                        ++dev->estats.rx_dropped_error;
1678                        emac_recycle_rx_skb(dev, slot, 0);
1679                        len = 0;
1680                        goto next;
1681                }
1682
1683                if (len < ETH_HLEN) {
1684                        ++dev->estats.rx_dropped_stack;
1685                        emac_recycle_rx_skb(dev, slot, len);
1686                        goto next;
1687                }
1688
1689                if (len && len < EMAC_RX_COPY_THRESH) {
1690                        struct sk_buff *copy_skb =
1691                            alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC);
1692                        if (unlikely(!copy_skb))
1693                                goto oom;
1694
1695                        skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2);
1696                        cacheable_memcpy(copy_skb->data - 2, skb->data - 2,
1697                                         len + 2);
1698                        emac_recycle_rx_skb(dev, slot, len);
1699                        skb = copy_skb;
1700                } else if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC)))
1701                        goto oom;
1702
1703                skb_put(skb, len);
1704        push_packet:
1705                skb->protocol = eth_type_trans(skb, dev->ndev);
1706                emac_rx_csum(dev, skb, ctrl);
1707
1708                if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
1709                        ++dev->estats.rx_dropped_stack;
1710        next:
1711                ++dev->stats.rx_packets;
1712        skip:
1713                dev->stats.rx_bytes += len;
1714                slot = (slot + 1) % NUM_RX_BUFF;
1715                --budget;
1716                ++received;
1717                continue;
1718        sg:
1719                if (ctrl & MAL_RX_CTRL_FIRST) {
1720                        BUG_ON(dev->rx_sg_skb);
1721                        if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) {
1722                                DBG(dev, "rx OOM %d" NL, slot);
1723                                ++dev->estats.rx_dropped_oom;
1724                                emac_recycle_rx_skb(dev, slot, 0);
1725                        } else {
1726                                dev->rx_sg_skb = skb;
1727                                skb_put(skb, len);
1728                        }
1729                } else if (!emac_rx_sg_append(dev, slot) &&
1730                           (ctrl & MAL_RX_CTRL_LAST)) {
1731
1732                        skb = dev->rx_sg_skb;
1733                        dev->rx_sg_skb = NULL;
1734
1735                        ctrl &= EMAC_BAD_RX_MASK;
1736                        if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1737                                emac_parse_rx_error(dev, ctrl);
1738                                ++dev->estats.rx_dropped_error;
1739                                dev_kfree_skb(skb);
1740                                len = 0;
1741                        } else
1742                                goto push_packet;
1743                }
1744                goto skip;
1745        oom:
1746                DBG(dev, "rx OOM %d" NL, slot);
1747                /* Drop the packet and recycle skb */
1748                ++dev->estats.rx_dropped_oom;
1749                emac_recycle_rx_skb(dev, slot, 0);
1750                goto next;
1751        }
1752
1753        if (received) {
1754                DBG2(dev, "rx %d BDs" NL, received);
1755                dev->rx_slot = slot;
1756        }
1757
1758        if (unlikely(budget && test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags))) {
1759                mb();
1760                if (!(dev->rx_desc[slot].ctrl & MAL_RX_CTRL_EMPTY)) {
1761                        DBG2(dev, "rx restart" NL);
1762                        received = 0;
1763                        goto again;
1764                }
1765
1766                if (dev->rx_sg_skb) {
1767                        DBG2(dev, "dropping partial rx packet" NL);
1768                        ++dev->estats.rx_dropped_error;
1769                        dev_kfree_skb(dev->rx_sg_skb);
1770                        dev->rx_sg_skb = NULL;
1771                }
1772
1773                clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1774                mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1775                emac_rx_enable(dev);
1776                dev->rx_slot = 0;
1777        }
1778        return received;
1779}
1780
1781/* NAPI poll context */
1782static int emac_peek_rx(void *param)
1783{
1784        struct emac_instance *dev = param;
1785
1786        return !(dev->rx_desc[dev->rx_slot].ctrl & MAL_RX_CTRL_EMPTY);
1787}
1788
1789/* NAPI poll context */
1790static int emac_peek_rx_sg(void *param)
1791{
1792        struct emac_instance *dev = param;
1793
1794        int slot = dev->rx_slot;
1795        while (1) {
1796                u16 ctrl = dev->rx_desc[slot].ctrl;
1797                if (ctrl & MAL_RX_CTRL_EMPTY)
1798                        return 0;
1799                else if (ctrl & MAL_RX_CTRL_LAST)
1800                        return 1;
1801
1802                slot = (slot + 1) % NUM_RX_BUFF;
1803
1804                /* I'm just being paranoid here :) */
1805                if (unlikely(slot == dev->rx_slot))
1806                        return 0;
1807        }
1808}
1809
1810/* Hard IRQ */
1811static void emac_rxde(void *param)
1812{
1813        struct emac_instance *dev = param;
1814
1815        ++dev->estats.rx_stopped;
1816        emac_rx_disable_async(dev);
1817}
1818
1819/* Hard IRQ */
1820static irqreturn_t emac_irq(int irq, void *dev_instance)
1821{
1822        struct emac_instance *dev = dev_instance;
1823        struct emac_regs __iomem *p = dev->emacp;
1824        struct emac_error_stats *st = &dev->estats;
1825        u32 isr;
1826
1827        spin_lock(&dev->lock);
1828
1829        isr = in_be32(&p->isr);
1830        out_be32(&p->isr, isr);
1831
1832        DBG(dev, "isr = %08x" NL, isr);
1833
1834        if (isr & EMAC4_ISR_TXPE)
1835                ++st->tx_parity;
1836        if (isr & EMAC4_ISR_RXPE)
1837                ++st->rx_parity;
1838        if (isr & EMAC4_ISR_TXUE)
1839                ++st->tx_underrun;
1840        if (isr & EMAC4_ISR_RXOE)
1841                ++st->rx_fifo_overrun;
1842        if (isr & EMAC_ISR_OVR)
1843                ++st->rx_overrun;
1844        if (isr & EMAC_ISR_BP)
1845                ++st->rx_bad_packet;
1846        if (isr & EMAC_ISR_RP)
1847                ++st->rx_runt_packet;
1848        if (isr & EMAC_ISR_SE)
1849                ++st->rx_short_event;
1850        if (isr & EMAC_ISR_ALE)
1851                ++st->rx_alignment_error;
1852        if (isr & EMAC_ISR_BFCS)
1853                ++st->rx_bad_fcs;
1854        if (isr & EMAC_ISR_PTLE)
1855                ++st->rx_packet_too_long;
1856        if (isr & EMAC_ISR_ORE)
1857                ++st->rx_out_of_range;
1858        if (isr & EMAC_ISR_IRE)
1859                ++st->rx_in_range;
1860        if (isr & EMAC_ISR_SQE)
1861                ++st->tx_sqe;
1862        if (isr & EMAC_ISR_TE)
1863                ++st->tx_errors;
1864
1865        spin_unlock(&dev->lock);
1866
1867        return IRQ_HANDLED;
1868}
1869
1870static struct net_device_stats *emac_stats(struct net_device *ndev)
1871{
1872        struct emac_instance *dev = netdev_priv(ndev);
1873        struct emac_stats *st = &dev->stats;
1874        struct emac_error_stats *est = &dev->estats;
1875        struct net_device_stats *nst = &dev->nstats;
1876        unsigned long flags;
1877
1878        DBG2(dev, "stats" NL);
1879
1880        /* Compute "legacy" statistics */
1881        spin_lock_irqsave(&dev->lock, flags);
1882        nst->rx_packets = (unsigned long)st->rx_packets;
1883        nst->rx_bytes = (unsigned long)st->rx_bytes;
1884        nst->tx_packets = (unsigned long)st->tx_packets;
1885        nst->tx_bytes = (unsigned long)st->tx_bytes;
1886        nst->rx_dropped = (unsigned long)(est->rx_dropped_oom +
1887                                          est->rx_dropped_error +
1888                                          est->rx_dropped_resize +
1889                                          est->rx_dropped_mtu);
1890        nst->tx_dropped = (unsigned long)est->tx_dropped;
1891
1892        nst->rx_errors = (unsigned long)est->rx_bd_errors;
1893        nst->rx_fifo_errors = (unsigned long)(est->rx_bd_overrun +
1894                                              est->rx_fifo_overrun +
1895                                              est->rx_overrun);
1896        nst->rx_frame_errors = (unsigned long)(est->rx_bd_alignment_error +
1897                                               est->rx_alignment_error);
1898        nst->rx_crc_errors = (unsigned long)(est->rx_bd_bad_fcs +
1899                                             est->rx_bad_fcs);
1900        nst->rx_length_errors = (unsigned long)(est->rx_bd_runt_packet +
1901                                                est->rx_bd_short_event +
1902                                                est->rx_bd_packet_too_long +
1903                                                est->rx_bd_out_of_range +
1904                                                est->rx_bd_in_range +
1905                                                est->rx_runt_packet +
1906                                                est->rx_short_event +
1907                                                est->rx_packet_too_long +
1908                                                est->rx_out_of_range +
1909                                                est->rx_in_range);
1910
1911        nst->tx_errors = (unsigned long)(est->tx_bd_errors + est->tx_errors);
1912        nst->tx_fifo_errors = (unsigned long)(est->tx_bd_underrun +
1913                                              est->tx_underrun);
1914        nst->tx_carrier_errors = (unsigned long)est->tx_bd_carrier_loss;
1915        nst->collisions = (unsigned long)(est->tx_bd_excessive_deferral +
1916                                          est->tx_bd_excessive_collisions +
1917                                          est->tx_bd_late_collision +
1918                                          est->tx_bd_multple_collisions);
1919        spin_unlock_irqrestore(&dev->lock, flags);
1920        return nst;
1921}
1922
1923static struct mal_commac_ops emac_commac_ops = {
1924        .poll_tx = &emac_poll_tx,
1925        .poll_rx = &emac_poll_rx,
1926        .peek_rx = &emac_peek_rx,
1927        .rxde = &emac_rxde,
1928};
1929
1930static struct mal_commac_ops emac_commac_sg_ops = {
1931        .poll_tx = &emac_poll_tx,
1932        .poll_rx = &emac_poll_rx,
1933        .peek_rx = &emac_peek_rx_sg,
1934        .rxde = &emac_rxde,
1935};
1936
1937/* Ethtool support */
1938static int emac_ethtool_get_settings(struct net_device *ndev,
1939                                     struct ethtool_cmd *cmd)
1940{
1941        struct emac_instance *dev = netdev_priv(ndev);
1942
1943        cmd->supported = dev->phy.features;
1944        cmd->port = PORT_MII;
1945        cmd->phy_address = dev->phy.address;
1946        cmd->transceiver =
1947            dev->phy.address >= 0 ? XCVR_EXTERNAL : XCVR_INTERNAL;
1948
1949        mutex_lock(&dev->link_lock);
1950        cmd->advertising = dev->phy.advertising;
1951        cmd->autoneg = dev->phy.autoneg;
1952        cmd->speed = dev->phy.speed;
1953        cmd->duplex = dev->phy.duplex;
1954        mutex_unlock(&dev->link_lock);
1955
1956        return 0;
1957}
1958
1959static int emac_ethtool_set_settings(struct net_device *ndev,
1960                                     struct ethtool_cmd *cmd)
1961{
1962        struct emac_instance *dev = netdev_priv(ndev);
1963        u32 f = dev->phy.features;
1964
1965        DBG(dev, "set_settings(%d, %d, %d, 0x%08x)" NL,
1966            cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising);
1967
1968        /* Basic sanity checks */
1969        if (dev->phy.address < 0)
1970                return -EOPNOTSUPP;
1971        if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
1972                return -EINVAL;
1973        if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
1974                return -EINVAL;
1975        if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL)
1976                return -EINVAL;
1977
1978        if (cmd->autoneg == AUTONEG_DISABLE) {
1979                switch (cmd->speed) {
1980                case SPEED_10:
1981                        if (cmd->duplex == DUPLEX_HALF &&
1982                            !(f & SUPPORTED_10baseT_Half))
1983                                return -EINVAL;
1984                        if (cmd->duplex == DUPLEX_FULL &&
1985                            !(f & SUPPORTED_10baseT_Full))
1986                                return -EINVAL;
1987                        break;
1988                case SPEED_100:
1989                        if (cmd->duplex == DUPLEX_HALF &&
1990                            !(f & SUPPORTED_100baseT_Half))
1991                                return -EINVAL;
1992                        if (cmd->duplex == DUPLEX_FULL &&
1993                            !(f & SUPPORTED_100baseT_Full))
1994                                return -EINVAL;
1995                        break;
1996                case SPEED_1000:
1997                        if (cmd->duplex == DUPLEX_HALF &&
1998                            !(f & SUPPORTED_1000baseT_Half))
1999                                return -EINVAL;
2000                        if (cmd->duplex == DUPLEX_FULL &&
2001                            !(f & SUPPORTED_1000baseT_Full))
2002                                return -EINVAL;
2003                        break;
2004                default:
2005                        return -EINVAL;
2006                }
2007
2008                mutex_lock(&dev->link_lock);
2009                dev->phy.def->ops->setup_forced(&dev->phy, cmd->speed,
2010                                                cmd->duplex);
2011                mutex_unlock(&dev->link_lock);
2012
2013        } else {
2014                if (!(f & SUPPORTED_Autoneg))
2015                        return -EINVAL;
2016
2017                mutex_lock(&dev->link_lock);
2018                dev->phy.def->ops->setup_aneg(&dev->phy,
2019                                              (cmd->advertising & f) |
2020                                              (dev->phy.advertising &
2021                                               (ADVERTISED_Pause |
2022                                                ADVERTISED_Asym_Pause)));
2023                mutex_unlock(&dev->link_lock);
2024        }
2025        emac_force_link_update(dev);
2026
2027        return 0;
2028}
2029
2030static void emac_ethtool_get_ringparam(struct net_device *ndev,
2031                                       struct ethtool_ringparam *rp)
2032{
2033        rp->rx_max_pending = rp->rx_pending = NUM_RX_BUFF;
2034        rp->tx_max_pending = rp->tx_pending = NUM_TX_BUFF;
2035}
2036
2037static void emac_ethtool_get_pauseparam(struct net_device *ndev,
2038                                        struct ethtool_pauseparam *pp)
2039{
2040        struct emac_instance *dev = netdev_priv(ndev);
2041
2042        mutex_lock(&dev->link_lock);
2043        if ((dev->phy.features & SUPPORTED_Autoneg) &&
2044            (dev->phy.advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause)))
2045                pp->autoneg = 1;
2046
2047        if (dev->phy.duplex == DUPLEX_FULL) {
2048                if (dev->phy.pause)
2049                        pp->rx_pause = pp->tx_pause = 1;
2050                else if (dev->phy.asym_pause)
2051                        pp->tx_pause = 1;
2052        }
2053        mutex_unlock(&dev->link_lock);
2054}
2055
2056static int emac_get_regs_len(struct emac_instance *dev)
2057{
2058        if (emac_has_feature(dev, EMAC_FTR_EMAC4))
2059                return sizeof(struct emac_ethtool_regs_subhdr) +
2060                        EMAC4_ETHTOOL_REGS_SIZE(dev);
2061        else
2062                return sizeof(struct emac_ethtool_regs_subhdr) +
2063                        EMAC_ETHTOOL_REGS_SIZE(dev);
2064}
2065
2066static int emac_ethtool_get_regs_len(struct net_device *ndev)
2067{
2068        struct emac_instance *dev = netdev_priv(ndev);
2069        int size;
2070
2071        size = sizeof(struct emac_ethtool_regs_hdr) +
2072                emac_get_regs_len(dev) + mal_get_regs_len(dev->mal);
2073        if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2074                size += zmii_get_regs_len(dev->zmii_dev);
2075        if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2076                size += rgmii_get_regs_len(dev->rgmii_dev);
2077        if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2078                size += tah_get_regs_len(dev->tah_dev);
2079
2080        return size;
2081}
2082
2083static void *emac_dump_regs(struct emac_instance *dev, void *buf)
2084{
2085        struct emac_ethtool_regs_subhdr *hdr = buf;
2086
2087        hdr->index = dev->cell_index;
2088        if (emac_has_feature(dev, EMAC_FTR_EMAC4)) {
2089                hdr->version = EMAC4_ETHTOOL_REGS_VER;
2090                memcpy_fromio(hdr + 1, dev->emacp, EMAC4_ETHTOOL_REGS_SIZE(dev));
2091                return (void *)(hdr + 1) + EMAC4_ETHTOOL_REGS_SIZE(dev);
2092        } else {
2093                hdr->version = EMAC_ETHTOOL_REGS_VER;
2094                memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE(dev));
2095                return (void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE(dev);
2096        }
2097}
2098
2099static void emac_ethtool_get_regs(struct net_device *ndev,
2100                                  struct ethtool_regs *regs, void *buf)
2101{
2102        struct emac_instance *dev = netdev_priv(ndev);
2103        struct emac_ethtool_regs_hdr *hdr = buf;
2104
2105        hdr->components = 0;
2106        buf = hdr + 1;
2107
2108        buf = mal_dump_regs(dev->mal, buf);
2109        buf = emac_dump_regs(dev, buf);
2110        if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) {
2111                hdr->components |= EMAC_ETHTOOL_REGS_ZMII;
2112                buf = zmii_dump_regs(dev->zmii_dev, buf);
2113        }
2114        if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) {
2115                hdr->components |= EMAC_ETHTOOL_REGS_RGMII;
2116                buf = rgmii_dump_regs(dev->rgmii_dev, buf);
2117        }
2118        if (emac_has_feature(dev, EMAC_FTR_HAS_TAH)) {
2119                hdr->components |= EMAC_ETHTOOL_REGS_TAH;
2120                buf = tah_dump_regs(dev->tah_dev, buf);
2121        }
2122}
2123
2124static int emac_ethtool_nway_reset(struct net_device *ndev)
2125{
2126        struct emac_instance *dev = netdev_priv(ndev);
2127        int res = 0;
2128
2129        DBG(dev, "nway_reset" NL);
2130
2131        if (dev->phy.address < 0)
2132                return -EOPNOTSUPP;
2133
2134        mutex_lock(&dev->link_lock);
2135        if (!dev->phy.autoneg) {
2136                res = -EINVAL;
2137                goto out;
2138        }
2139
2140        dev->phy.def->ops->setup_aneg(&dev->phy, dev->phy.advertising);
2141 out:
2142        mutex_unlock(&dev->link_lock);
2143        emac_force_link_update(dev);
2144        return res;
2145}
2146
2147static int emac_ethtool_get_sset_count(struct net_device *ndev, int stringset)
2148{
2149        if (stringset == ETH_SS_STATS)
2150                return EMAC_ETHTOOL_STATS_COUNT;
2151        else
2152                return -EINVAL;
2153}
2154
2155static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset,
2156                                     u8 * buf)
2157{
2158        if (stringset == ETH_SS_STATS)
2159                memcpy(buf, &emac_stats_keys, sizeof(emac_stats_keys));
2160}
2161
2162static void emac_ethtool_get_ethtool_stats(struct net_device *ndev,
2163                                           struct ethtool_stats *estats,
2164                                           u64 * tmp_stats)
2165{
2166        struct emac_instance *dev = netdev_priv(ndev);
2167
2168        memcpy(tmp_stats, &dev->stats, sizeof(dev->stats));
2169        tmp_stats += sizeof(dev->stats) / sizeof(u64);
2170        memcpy(tmp_stats, &dev->estats, sizeof(dev->estats));
2171}
2172
2173static void emac_ethtool_get_drvinfo(struct net_device *ndev,
2174                                     struct ethtool_drvinfo *info)
2175{
2176        struct emac_instance *dev = netdev_priv(ndev);
2177
2178        strcpy(info->driver, "ibm_emac");
2179        strcpy(info->version, DRV_VERSION);
2180        info->fw_version[0] = '\0';
2181        sprintf(info->bus_info, "PPC 4xx EMAC-%d %s",
2182                dev->cell_index, dev->ofdev->dev.of_node->full_name);
2183        info->regdump_len = emac_ethtool_get_regs_len(ndev);
2184}
2185
2186static const struct ethtool_ops emac_ethtool_ops = {
2187        .get_settings = emac_ethtool_get_settings,
2188        .set_settings = emac_ethtool_set_settings,
2189        .get_drvinfo = emac_ethtool_get_drvinfo,
2190
2191        .get_regs_len = emac_ethtool_get_regs_len,
2192        .get_regs = emac_ethtool_get_regs,
2193
2194        .nway_reset = emac_ethtool_nway_reset,
2195
2196        .get_ringparam = emac_ethtool_get_ringparam,
2197        .get_pauseparam = emac_ethtool_get_pauseparam,
2198
2199        .get_strings = emac_ethtool_get_strings,
2200        .get_sset_count = emac_ethtool_get_sset_count,
2201        .get_ethtool_stats = emac_ethtool_get_ethtool_stats,
2202
2203        .get_link = ethtool_op_get_link,
2204};
2205
2206static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2207{
2208        struct emac_instance *dev = netdev_priv(ndev);
2209        struct mii_ioctl_data *data = if_mii(rq);
2210
2211        DBG(dev, "ioctl %08x" NL, cmd);
2212
2213        if (dev->phy.address < 0)
2214                return -EOPNOTSUPP;
2215
2216        switch (cmd) {
2217        case SIOCGMIIPHY:
2218                data->phy_id = dev->phy.address;
2219                /* Fall through */
2220        case SIOCGMIIREG:
2221                data->val_out = emac_mdio_read(ndev, dev->phy.address,
2222                                               data->reg_num);
2223                return 0;
2224
2225        case SIOCSMIIREG:
2226                emac_mdio_write(ndev, dev->phy.address, data->reg_num,
2227                                data->val_in);
2228                return 0;
2229        default:
2230                return -EOPNOTSUPP;
2231        }
2232}
2233
2234struct emac_depentry {
2235        u32                     phandle;
2236        struct device_node      *node;
2237        struct platform_device  *ofdev;
2238        void                    *drvdata;
2239};
2240
2241#define EMAC_DEP_MAL_IDX        0
2242#define EMAC_DEP_ZMII_IDX       1
2243#define EMAC_DEP_RGMII_IDX      2
2244#define EMAC_DEP_TAH_IDX        3
2245#define EMAC_DEP_MDIO_IDX       4
2246#define EMAC_DEP_PREV_IDX       5
2247#define EMAC_DEP_COUNT          6
2248
2249static int __devinit emac_check_deps(struct emac_instance *dev,
2250                                     struct emac_depentry *deps)
2251{
2252        int i, there = 0;
2253        struct device_node *np;
2254
2255        for (i = 0; i < EMAC_DEP_COUNT; i++) {
2256                /* no dependency on that item, allright */
2257                if (deps[i].phandle == 0) {
2258                        there++;
2259                        continue;
2260                }
2261                /* special case for blist as the dependency might go away */
2262                if (i == EMAC_DEP_PREV_IDX) {
2263                        np = *(dev->blist - 1);
2264                        if (np == NULL) {
2265                                deps[i].phandle = 0;
2266                                there++;
2267                                continue;
2268                        }
2269                        if (deps[i].node == NULL)
2270                                deps[i].node = of_node_get(np);
2271                }
2272                if (deps[i].node == NULL)
2273                        deps[i].node = of_find_node_by_phandle(deps[i].phandle);
2274                if (deps[i].node == NULL)
2275                        continue;
2276                if (deps[i].ofdev == NULL)
2277                        deps[i].ofdev = of_find_device_by_node(deps[i].node);
2278                if (deps[i].ofdev == NULL)
2279                        continue;
2280                if (deps[i].drvdata == NULL)
2281                        deps[i].drvdata = dev_get_drvdata(&deps[i].ofdev->dev);
2282                if (deps[i].drvdata != NULL)
2283                        there++;
2284        }
2285        return there == EMAC_DEP_COUNT;
2286}
2287
2288static void emac_put_deps(struct emac_instance *dev)
2289{
2290        if (dev->mal_dev)
2291                of_dev_put(dev->mal_dev);
2292        if (dev->zmii_dev)
2293                of_dev_put(dev->zmii_dev);
2294        if (dev->rgmii_dev)
2295                of_dev_put(dev->rgmii_dev);
2296        if (dev->mdio_dev)
2297                of_dev_put(dev->mdio_dev);
2298        if (dev->tah_dev)
2299                of_dev_put(dev->tah_dev);
2300}
2301
2302static int __devinit emac_of_bus_notify(struct notifier_block *nb,
2303                                        unsigned long action, void *data)
2304{
2305        /* We are only intereted in device addition */
2306        if (action == BUS_NOTIFY_BOUND_DRIVER)
2307                wake_up_all(&emac_probe_wait);
2308        return 0;
2309}
2310
2311static struct notifier_block emac_of_bus_notifier __devinitdata = {
2312        .notifier_call = emac_of_bus_notify
2313};
2314
2315static int __devinit emac_wait_deps(struct emac_instance *dev)
2316{
2317        struct emac_depentry deps[EMAC_DEP_COUNT];
2318        int i, err;
2319
2320        memset(&deps, 0, sizeof(deps));
2321
2322        deps[EMAC_DEP_MAL_IDX].phandle = dev->mal_ph;
2323        deps[EMAC_DEP_ZMII_IDX].phandle = dev->zmii_ph;
2324        deps[EMAC_DEP_RGMII_IDX].phandle = dev->rgmii_ph;
2325        if (dev->tah_ph)
2326                deps[EMAC_DEP_TAH_IDX].phandle = dev->tah_ph;
2327        if (dev->mdio_ph)
2328                deps[EMAC_DEP_MDIO_IDX].phandle = dev->mdio_ph;
2329        if (dev->blist && dev->blist > emac_boot_list)
2330                deps[EMAC_DEP_PREV_IDX].phandle = 0xffffffffu;
2331        bus_register_notifier(&platform_bus_type, &emac_of_bus_notifier);
2332        wait_event_timeout(emac_probe_wait,
2333                           emac_check_deps(dev, deps),
2334                           EMAC_PROBE_DEP_TIMEOUT);
2335        bus_unregister_notifier(&platform_bus_type, &emac_of_bus_notifier);
2336        err = emac_check_deps(dev, deps) ? 0 : -ENODEV;
2337        for (i = 0; i < EMAC_DEP_COUNT; i++) {
2338                if (deps[i].node)
2339                        of_node_put(deps[i].node);
2340                if (err && deps[i].ofdev)
2341                        of_dev_put(deps[i].ofdev);
2342        }
2343        if (err == 0) {
2344                dev->mal_dev = deps[EMAC_DEP_MAL_IDX].ofdev;
2345                dev->zmii_dev = deps[EMAC_DEP_ZMII_IDX].ofdev;
2346                dev->rgmii_dev = deps[EMAC_DEP_RGMII_IDX].ofdev;
2347                dev->tah_dev = deps[EMAC_DEP_TAH_IDX].ofdev;
2348                dev->mdio_dev = deps[EMAC_DEP_MDIO_IDX].ofdev;
2349        }
2350        if (deps[EMAC_DEP_PREV_IDX].ofdev)
2351                of_dev_put(deps[EMAC_DEP_PREV_IDX].ofdev);
2352        return err;
2353}
2354
2355static int __devinit emac_read_uint_prop(struct device_node *np, const char *name,
2356                                         u32 *val, int fatal)
2357{
2358        int len;
2359        const u32 *prop = of_get_property(np, name, &len);
2360        if (prop == NULL || len < sizeof(u32)) {
2361                if (fatal)
2362                        printk(KERN_ERR "%s: missing %s property\n",
2363                               np->full_name, name);
2364                return -ENODEV;
2365        }
2366        *val = *prop;
2367        return 0;
2368}
2369
2370static int __devinit emac_init_phy(struct emac_instance *dev)
2371{
2372        struct device_node *np = dev->ofdev->dev.of_node;
2373        struct net_device *ndev = dev->ndev;
2374        u32 phy_map, adv;
2375        int i;
2376
2377        dev->phy.dev = ndev;
2378        dev->phy.mode = dev->phy_mode;
2379
2380        /* PHY-less configuration.
2381         * XXX I probably should move these settings to the dev tree
2382         */
2383        if (dev->phy_address == 0xffffffff && dev->phy_map == 0xffffffff) {
2384                emac_reset(dev);
2385
2386                /* PHY-less configuration.
2387                 * XXX I probably should move these settings to the dev tree
2388                 */
2389                dev->phy.address = -1;
2390                dev->phy.features = SUPPORTED_MII;
2391                if (emac_phy_supports_gige(dev->phy_mode))
2392                        dev->phy.features |= SUPPORTED_1000baseT_Full;
2393                else
2394                        dev->phy.features |= SUPPORTED_100baseT_Full;
2395                dev->phy.pause = 1;
2396
2397                return 0;
2398        }
2399
2400        mutex_lock(&emac_phy_map_lock);
2401        phy_map = dev->phy_map | busy_phy_map;
2402
2403        DBG(dev, "PHY maps %08x %08x" NL, dev->phy_map, busy_phy_map);
2404
2405        dev->phy.mdio_read = emac_mdio_read;
2406        dev->phy.mdio_write = emac_mdio_write;
2407
2408        /* Enable internal clock source */
2409#ifdef CONFIG_PPC_DCR_NATIVE
2410        if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2411                dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
2412#endif
2413        /* PHY clock workaround */
2414        emac_rx_clk_tx(dev);
2415
2416        /* Enable internal clock source on 440GX*/
2417#ifdef CONFIG_PPC_DCR_NATIVE
2418        if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2419                dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
2420#endif
2421        /* Configure EMAC with defaults so we can at least use MDIO
2422         * This is needed mostly for 440GX
2423         */
2424        if (emac_phy_gpcs(dev->phy.mode)) {
2425                /* XXX
2426                 * Make GPCS PHY address equal to EMAC index.
2427                 * We probably should take into account busy_phy_map
2428                 * and/or phy_map here.
2429                 *
2430                 * Note that the busy_phy_map is currently global
2431                 * while it should probably be per-ASIC...
2432                 */
2433                dev->phy.gpcs_address = dev->gpcs_address;
2434                if (dev->phy.gpcs_address == 0xffffffff)
2435                        dev->phy.address = dev->cell_index;
2436        }
2437
2438        emac_configure(dev);
2439
2440        if (dev->phy_address != 0xffffffff)
2441                phy_map = ~(1 << dev->phy_address);
2442
2443        for (i = 0; i < 0x20; phy_map >>= 1, ++i)
2444                if (!(phy_map & 1)) {
2445                        int r;
2446                        busy_phy_map |= 1 << i;
2447
2448                        /* Quick check if there is a PHY at the address */
2449                        r = emac_mdio_read(dev->ndev, i, MII_BMCR);
2450                        if (r == 0xffff || r < 0)
2451                                continue;
2452                        if (!emac_mii_phy_probe(&dev->phy, i))
2453                                break;
2454                }
2455
2456        /* Enable external clock source */
2457#ifdef CONFIG_PPC_DCR_NATIVE
2458        if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2459                dcri_clrset(SDR0, SDR0_MFR, SDR0_MFR_ECS, 0);
2460#endif
2461        mutex_unlock(&emac_phy_map_lock);
2462        if (i == 0x20) {
2463                printk(KERN_WARNING "%s: can't find PHY!\n", np->full_name);
2464                return -ENXIO;
2465        }
2466
2467        /* Init PHY */
2468        if (dev->phy.def->ops->init)
2469                dev->phy.def->ops->init(&dev->phy);
2470
2471        /* Disable any PHY features not supported by the platform */
2472        dev->phy.def->features &= ~dev->phy_feat_exc;
2473
2474        /* Setup initial link parameters */
2475        if (dev->phy.features & SUPPORTED_Autoneg) {
2476                adv = dev->phy.features;
2477                if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x))
2478                        adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
2479                /* Restart autonegotiation */
2480                dev->phy.def->ops->setup_aneg(&dev->phy, adv);
2481        } else {
2482                u32 f = dev->phy.def->features;
2483                int speed = SPEED_10, fd = DUPLEX_HALF;
2484
2485                /* Select highest supported speed/duplex */
2486                if (f & SUPPORTED_1000baseT_Full) {
2487                        speed = SPEED_1000;
2488                        fd = DUPLEX_FULL;
2489                } else if (f & SUPPORTED_1000baseT_Half)
2490                        speed = SPEED_1000;
2491                else if (f & SUPPORTED_100baseT_Full) {
2492                        speed = SPEED_100;
2493                        fd = DUPLEX_FULL;
2494                } else if (f & SUPPORTED_100baseT_Half)
2495                        speed = SPEED_100;
2496                else if (f & SUPPORTED_10baseT_Full)
2497                        fd = DUPLEX_FULL;
2498
2499                /* Force link parameters */
2500                dev->phy.def->ops->setup_forced(&dev->phy, speed, fd);
2501        }
2502        return 0;
2503}
2504
2505static int __devinit emac_init_config(struct emac_instance *dev)
2506{
2507        struct device_node *np = dev->ofdev->dev.of_node;
2508        const void *p;
2509        unsigned int plen;
2510        const char *pm, *phy_modes[] = {
2511                [PHY_MODE_NA] = "",
2512                [PHY_MODE_MII] = "mii",
2513                [PHY_MODE_RMII] = "rmii",
2514                [PHY_MODE_SMII] = "smii",
2515                [PHY_MODE_RGMII] = "rgmii",
2516                [PHY_MODE_TBI] = "tbi",
2517                [PHY_MODE_GMII] = "gmii",
2518                [PHY_MODE_RTBI] = "rtbi",
2519                [PHY_MODE_SGMII] = "sgmii",
2520        };
2521
2522        /* Read config from device-tree */
2523        if (emac_read_uint_prop(np, "mal-device", &dev->mal_ph, 1))
2524                return -ENXIO;
2525        if (emac_read_uint_prop(np, "mal-tx-channel", &dev->mal_tx_chan, 1))
2526                return -ENXIO;
2527        if (emac_read_uint_prop(np, "mal-rx-channel", &dev->mal_rx_chan, 1))
2528                return -ENXIO;
2529        if (emac_read_uint_prop(np, "cell-index", &dev->cell_index, 1))
2530                return -ENXIO;
2531        if (emac_read_uint_prop(np, "max-frame-size", &dev->max_mtu, 0))
2532                dev->max_mtu = 1500;
2533        if (emac_read_uint_prop(np, "rx-fifo-size", &dev->rx_fifo_size, 0))
2534                dev->rx_fifo_size = 2048;
2535        if (emac_read_uint_prop(np, "tx-fifo-size", &dev->tx_fifo_size, 0))
2536                dev->tx_fifo_size = 2048;
2537        if (emac_read_uint_prop(np, "rx-fifo-size-gige", &dev->rx_fifo_size_gige, 0))
2538                dev->rx_fifo_size_gige = dev->rx_fifo_size;
2539        if (emac_read_uint_prop(np, "tx-fifo-size-gige", &dev->tx_fifo_size_gige, 0))
2540                dev->tx_fifo_size_gige = dev->tx_fifo_size;
2541        if (emac_read_uint_prop(np, "phy-address", &dev->phy_address, 0))
2542                dev->phy_address = 0xffffffff;
2543        if (emac_read_uint_prop(np, "phy-map", &dev->phy_map, 0))
2544                dev->phy_map = 0xffffffff;
2545        if (emac_read_uint_prop(np, "gpcs-address", &dev->gpcs_address, 0))
2546                dev->gpcs_address = 0xffffffff;
2547        if (emac_read_uint_prop(np->parent, "clock-frequency", &dev->opb_bus_freq, 1))
2548                return -ENXIO;
2549        if (emac_read_uint_prop(np, "tah-device", &dev->tah_ph, 0))
2550                dev->tah_ph = 0;
2551        if (emac_read_uint_prop(np, "tah-channel", &dev->tah_port, 0))
2552                dev->tah_port = 0;
2553        if (emac_read_uint_prop(np, "mdio-device", &dev->mdio_ph, 0))
2554                dev->mdio_ph = 0;
2555        if (emac_read_uint_prop(np, "zmii-device", &dev->zmii_ph, 0))
2556                dev->zmii_ph = 0;
2557        if (emac_read_uint_prop(np, "zmii-channel", &dev->zmii_port, 0))
2558                dev->zmii_port = 0xffffffff;
2559        if (emac_read_uint_prop(np, "rgmii-device", &dev->rgmii_ph, 0))
2560                dev->rgmii_ph = 0;
2561        if (emac_read_uint_prop(np, "rgmii-channel", &dev->rgmii_port, 0))
2562                dev->rgmii_port = 0xffffffff;
2563        if (emac_read_uint_prop(np, "fifo-entry-size", &dev->fifo_entry_size, 0))
2564                dev->fifo_entry_size = 16;
2565        if (emac_read_uint_prop(np, "mal-burst-size", &dev->mal_burst_size, 0))
2566                dev->mal_burst_size = 256;
2567
2568        /* PHY mode needs some decoding */
2569        dev->phy_mode = PHY_MODE_NA;
2570        pm = of_get_property(np, "phy-mode", &plen);
2571        if (pm != NULL) {
2572                int i;
2573                for (i = 0; i < ARRAY_SIZE(phy_modes); i++)
2574                        if (!strcasecmp(pm, phy_modes[i])) {
2575                                dev->phy_mode = i;
2576                                break;
2577                        }
2578        }
2579
2580        /* Backward compat with non-final DT */
2581        if (dev->phy_mode == PHY_MODE_NA && pm != NULL && plen == 4) {
2582                u32 nmode = *(const u32 *)pm;
2583                if (nmode > PHY_MODE_NA && nmode <= PHY_MODE_SGMII)
2584                        dev->phy_mode = nmode;
2585        }
2586
2587        /* Check EMAC version */
2588        if (of_device_is_compatible(np, "ibm,emac4sync")) {
2589                dev->features |= (EMAC_FTR_EMAC4 | EMAC_FTR_EMAC4SYNC);
2590                if (of_device_is_compatible(np, "ibm,emac-460ex") ||
2591                    of_device_is_compatible(np, "ibm,emac-460gt"))
2592                        dev->features |= EMAC_FTR_460EX_PHY_CLK_FIX;
2593                if (of_device_is_compatible(np, "ibm,emac-405ex") ||
2594                    of_device_is_compatible(np, "ibm,emac-405exr"))
2595                        dev->features |= EMAC_FTR_440EP_PHY_CLK_FIX;
2596        } else if (of_device_is_compatible(np, "ibm,emac4")) {
2597                dev->features |= EMAC_FTR_EMAC4;
2598                if (of_device_is_compatible(np, "ibm,emac-440gx"))
2599                        dev->features |= EMAC_FTR_440GX_PHY_CLK_FIX;
2600        } else {
2601                if (of_device_is_compatible(np, "ibm,emac-440ep") ||
2602                    of_device_is_compatible(np, "ibm,emac-440gr"))
2603                        dev->features |= EMAC_FTR_440EP_PHY_CLK_FIX;
2604                if (of_device_is_compatible(np, "ibm,emac-405ez")) {
2605#ifdef CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL
2606                        dev->features |= EMAC_FTR_NO_FLOW_CONTROL_40x;
2607#else
2608                        printk(KERN_ERR "%s: Flow control not disabled!\n",
2609                                        np->full_name);
2610                        return -ENXIO;
2611#endif
2612                }
2613
2614        }
2615
2616        /* Fixup some feature bits based on the device tree */
2617        if (of_get_property(np, "has-inverted-stacr-oc", NULL))
2618                dev->features |= EMAC_FTR_STACR_OC_INVERT;
2619        if (of_get_property(np, "has-new-stacr-staopc", NULL))
2620                dev->features |= EMAC_FTR_HAS_NEW_STACR;
2621
2622        /* CAB lacks the appropriate properties */
2623        if (of_device_is_compatible(np, "ibm,emac-axon"))
2624                dev->features |= EMAC_FTR_HAS_NEW_STACR |
2625                        EMAC_FTR_STACR_OC_INVERT;
2626
2627        /* Enable TAH/ZMII/RGMII features as found */
2628        if (dev->tah_ph != 0) {
2629#ifdef CONFIG_IBM_NEW_EMAC_TAH
2630                dev->features |= EMAC_FTR_HAS_TAH;
2631#else
2632                printk(KERN_ERR "%s: TAH support not enabled !\n",
2633                       np->full_name);
2634                return -ENXIO;
2635#endif
2636        }
2637
2638        if (dev->zmii_ph != 0) {
2639#ifdef CONFIG_IBM_NEW_EMAC_ZMII
2640                dev->features |= EMAC_FTR_HAS_ZMII;
2641#else
2642                printk(KERN_ERR "%s: ZMII support not enabled !\n",
2643                       np->full_name);
2644                return -ENXIO;
2645#endif
2646        }
2647
2648        if (dev->rgmii_ph != 0) {
2649#ifdef CONFIG_IBM_NEW_EMAC_RGMII
2650                dev->features |= EMAC_FTR_HAS_RGMII;
2651#else
2652                printk(KERN_ERR "%s: RGMII support not enabled !\n",
2653                       np->full_name);
2654                return -ENXIO;
2655#endif
2656        }
2657
2658        /* Read MAC-address */
2659        p = of_get_property(np, "local-mac-address", NULL);
2660        if (p == NULL) {
2661                printk(KERN_ERR "%s: Can't find local-mac-address property\n",
2662                       np->full_name);
2663                return -ENXIO;
2664        }
2665        memcpy(dev->ndev->dev_addr, p, 6);
2666
2667        /* IAHT and GAHT filter parameterization */
2668        if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) {
2669                dev->xaht_slots_shift = EMAC4SYNC_XAHT_SLOTS_SHIFT;
2670                dev->xaht_width_shift = EMAC4SYNC_XAHT_WIDTH_SHIFT;
2671        } else {
2672                dev->xaht_slots_shift = EMAC4_XAHT_SLOTS_SHIFT;
2673                dev->xaht_width_shift = EMAC4_XAHT_WIDTH_SHIFT;
2674        }
2675
2676        DBG(dev, "features     : 0x%08x / 0x%08x\n", dev->features, EMAC_FTRS_POSSIBLE);
2677        DBG(dev, "tx_fifo_size : %d (%d gige)\n", dev->tx_fifo_size, dev->tx_fifo_size_gige);
2678        DBG(dev, "rx_fifo_size : %d (%d gige)\n", dev->rx_fifo_size, dev->rx_fifo_size_gige);
2679        DBG(dev, "max_mtu      : %d\n", dev->max_mtu);
2680        DBG(dev, "OPB freq     : %d\n", dev->opb_bus_freq);
2681
2682        return 0;
2683}
2684
2685static const struct net_device_ops emac_netdev_ops = {
2686        .ndo_open               = emac_open,
2687        .ndo_stop               = emac_close,
2688        .ndo_get_stats          = emac_stats,
2689        .ndo_set_multicast_list = emac_set_multicast_list,
2690        .ndo_do_ioctl           = emac_ioctl,
2691        .ndo_tx_timeout         = emac_tx_timeout,
2692        .ndo_validate_addr      = eth_validate_addr,
2693        .ndo_set_mac_address    = eth_mac_addr,
2694        .ndo_start_xmit         = emac_start_xmit,
2695        .ndo_change_mtu         = eth_change_mtu,
2696};
2697
2698static const struct net_device_ops emac_gige_netdev_ops = {
2699        .ndo_open               = emac_open,
2700        .ndo_stop               = emac_close,
2701        .ndo_get_stats          = emac_stats,
2702        .ndo_set_multicast_list = emac_set_multicast_list,
2703        .ndo_do_ioctl           = emac_ioctl,
2704        .ndo_tx_timeout         = emac_tx_timeout,
2705        .ndo_validate_addr      = eth_validate_addr,
2706        .ndo_set_mac_address    = eth_mac_addr,
2707        .ndo_start_xmit         = emac_start_xmit_sg,
2708        .ndo_change_mtu         = emac_change_mtu,
2709};
2710
2711static int __devinit emac_probe(struct platform_device *ofdev)
2712{
2713        struct net_device *ndev;
2714        struct emac_instance *dev;
2715        struct device_node *np = ofdev->dev.of_node;
2716        struct device_node **blist = NULL;
2717        int err, i;
2718
2719        /* Skip unused/unwired EMACS.  We leave the check for an unused
2720         * property here for now, but new flat device trees should set a
2721         * status property to "disabled" instead.
2722         */
2723        if (of_get_property(np, "unused", NULL) || !of_device_is_available(np))
2724                return -ENODEV;
2725
2726        /* Find ourselves in the bootlist if we are there */
2727        for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
2728                if (emac_boot_list[i] == np)
2729                        blist = &emac_boot_list[i];
2730
2731        /* Allocate our net_device structure */
2732        err = -ENOMEM;
2733        ndev = alloc_etherdev(sizeof(struct emac_instance));
2734        if (!ndev) {
2735                printk(KERN_ERR "%s: could not allocate ethernet device!\n",
2736                       np->full_name);
2737                goto err_gone;
2738        }
2739        dev = netdev_priv(ndev);
2740        dev->ndev = ndev;
2741        dev->ofdev = ofdev;
2742        dev->blist = blist;
2743        SET_NETDEV_DEV(ndev, &ofdev->dev);
2744
2745        /* Initialize some embedded data structures */
2746        mutex_init(&dev->mdio_lock);
2747        mutex_init(&dev->link_lock);
2748        spin_lock_init(&dev->lock);
2749        INIT_WORK(&dev->reset_work, emac_reset_work);
2750
2751        /* Init various config data based on device-tree */
2752        err = emac_init_config(dev);
2753        if (err != 0)
2754                goto err_free;
2755
2756        /* Get interrupts. EMAC irq is mandatory, WOL irq is optional */
2757        dev->emac_irq = irq_of_parse_and_map(np, 0);
2758        dev->wol_irq = irq_of_parse_and_map(np, 1);
2759        if (dev->emac_irq == NO_IRQ) {
2760                printk(KERN_ERR "%s: Can't map main interrupt\n", np->full_name);
2761                goto err_free;
2762        }
2763        ndev->irq = dev->emac_irq;
2764
2765        /* Map EMAC regs */
2766        if (of_address_to_resource(np, 0, &dev->rsrc_regs)) {
2767                printk(KERN_ERR "%s: Can't get registers address\n",
2768                       np->full_name);
2769                goto err_irq_unmap;
2770        }
2771        // TODO : request_mem_region
2772        dev->emacp = ioremap(dev->rsrc_regs.start,
2773                             dev->rsrc_regs.end - dev->rsrc_regs.start + 1);
2774        if (dev->emacp == NULL) {
2775                printk(KERN_ERR "%s: Can't map device registers!\n",
2776                       np->full_name);
2777                err = -ENOMEM;
2778                goto err_irq_unmap;
2779        }
2780
2781        /* Wait for dependent devices */
2782        err = emac_wait_deps(dev);
2783        if (err) {
2784                printk(KERN_ERR
2785                       "%s: Timeout waiting for dependent devices\n",
2786                       np->full_name);
2787                /*  display more info about what's missing ? */
2788                goto err_reg_unmap;
2789        }
2790        dev->mal = dev_get_drvdata(&dev->mal_dev->dev);
2791        if (dev->mdio_dev != NULL)
2792                dev->mdio_instance = dev_get_drvdata(&dev->mdio_dev->dev);
2793
2794        /* Register with MAL */
2795        dev->commac.ops = &emac_commac_ops;
2796        dev->commac.dev = dev;
2797        dev->commac.tx_chan_mask = MAL_CHAN_MASK(dev->mal_tx_chan);
2798        dev->commac.rx_chan_mask = MAL_CHAN_MASK(dev->mal_rx_chan);
2799        err = mal_register_commac(dev->mal, &dev->commac);
2800        if (err) {
2801                printk(KERN_ERR "%s: failed to register with mal %s!\n",
2802                       np->full_name, dev->mal_dev->dev.of_node->full_name);
2803                goto err_rel_deps;
2804        }
2805        dev->rx_skb_size = emac_rx_skb_size(ndev->mtu);
2806        dev->rx_sync_size = emac_rx_sync_size(ndev->mtu);
2807
2808        /* Get pointers to BD rings */
2809        dev->tx_desc =
2810            dev->mal->bd_virt + mal_tx_bd_offset(dev->mal, dev->mal_tx_chan);
2811        dev->rx_desc =
2812            dev->mal->bd_virt + mal_rx_bd_offset(dev->mal, dev->mal_rx_chan);
2813
2814        DBG(dev, "tx_desc %p" NL, dev->tx_desc);
2815        DBG(dev, "rx_desc %p" NL, dev->rx_desc);
2816
2817        /* Clean rings */
2818        memset(dev->tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor));
2819        memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor));
2820        memset(dev->tx_skb, 0, NUM_TX_BUFF * sizeof(struct sk_buff *));
2821        memset(dev->rx_skb, 0, NUM_RX_BUFF * sizeof(struct sk_buff *));
2822
2823        /* Attach to ZMII, if needed */
2824        if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII) &&
2825            (err = zmii_attach(dev->zmii_dev, dev->zmii_port, &dev->phy_mode)) != 0)
2826                goto err_unreg_commac;
2827
2828        /* Attach to RGMII, if needed */
2829        if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII) &&
2830            (err = rgmii_attach(dev->rgmii_dev, dev->rgmii_port, dev->phy_mode)) != 0)
2831                goto err_detach_zmii;
2832
2833        /* Attach to TAH, if needed */
2834        if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
2835            (err = tah_attach(dev->tah_dev, dev->tah_port)) != 0)
2836                goto err_detach_rgmii;
2837
2838        /* Set some link defaults before we can find out real parameters */
2839        dev->phy.speed = SPEED_100;
2840        dev->phy.duplex = DUPLEX_FULL;
2841        dev->phy.autoneg = AUTONEG_DISABLE;
2842        dev->phy.pause = dev->phy.asym_pause = 0;
2843        dev->stop_timeout = STOP_TIMEOUT_100;
2844        INIT_DELAYED_WORK(&dev->link_work, emac_link_timer);
2845
2846        /* Find PHY if any */
2847        err = emac_init_phy(dev);
2848        if (err != 0)
2849                goto err_detach_tah;
2850
2851        if (dev->tah_dev) {
2852                ndev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG;
2853                ndev->features |= ndev->hw_features | NETIF_F_RXCSUM;
2854        }
2855        ndev->watchdog_timeo = 5 * HZ;
2856        if (emac_phy_supports_gige(dev->phy_mode)) {
2857                ndev->netdev_ops = &emac_gige_netdev_ops;
2858                dev->commac.ops = &emac_commac_sg_ops;
2859        } else
2860                ndev->netdev_ops = &emac_netdev_ops;
2861        SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops);
2862
2863        netif_carrier_off(ndev);
2864
2865        err = register_netdev(ndev);
2866        if (err) {
2867                printk(KERN_ERR "%s: failed to register net device (%d)!\n",
2868                       np->full_name, err);
2869                goto err_detach_tah;
2870        }
2871
2872        /* Set our drvdata last as we don't want them visible until we are
2873         * fully initialized
2874         */
2875        wmb();
2876        dev_set_drvdata(&ofdev->dev, dev);
2877
2878        /* There's a new kid in town ! Let's tell everybody */
2879        wake_up_all(&emac_probe_wait);
2880
2881
2882        printk(KERN_INFO "%s: EMAC-%d %s, MAC %pM\n",
2883               ndev->name, dev->cell_index, np->full_name, ndev->dev_addr);
2884
2885        if (dev->phy_mode == PHY_MODE_SGMII)
2886                printk(KERN_NOTICE "%s: in SGMII mode\n", ndev->name);
2887
2888        if (dev->phy.address >= 0)
2889                printk("%s: found %s PHY (0x%02x)\n", ndev->name,
2890                       dev->phy.def->name, dev->phy.address);
2891
2892        emac_dbg_register(dev);
2893
2894        /* Life is good */
2895        return 0;
2896
2897        /* I have a bad feeling about this ... */
2898
2899 err_detach_tah:
2900        if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2901                tah_detach(dev->tah_dev, dev->tah_port);
2902 err_detach_rgmii:
2903        if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2904                rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
2905 err_detach_zmii:
2906        if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2907                zmii_detach(dev->zmii_dev, dev->zmii_port);
2908 err_unreg_commac:
2909        mal_unregister_commac(dev->mal, &dev->commac);
2910 err_rel_deps:
2911        emac_put_deps(dev);
2912 err_reg_unmap:
2913        iounmap(dev->emacp);
2914 err_irq_unmap:
2915        if (dev->wol_irq != NO_IRQ)
2916                irq_dispose_mapping(dev->wol_irq);
2917        if (dev->emac_irq != NO_IRQ)
2918                irq_dispose_mapping(dev->emac_irq);
2919 err_free:
2920        free_netdev(ndev);
2921 err_gone:
2922        /* if we were on the bootlist, remove us as we won't show up and
2923         * wake up all waiters to notify them in case they were waiting
2924         * on us
2925         */
2926        if (blist) {
2927                *blist = NULL;
2928                wake_up_all(&emac_probe_wait);
2929        }
2930        return err;
2931}
2932
2933static int __devexit emac_remove(struct platform_device *ofdev)
2934{
2935        struct emac_instance *dev = dev_get_drvdata(&ofdev->dev);
2936
2937        DBG(dev, "remove" NL);
2938
2939        dev_set_drvdata(&ofdev->dev, NULL);
2940
2941        unregister_netdev(dev->ndev);
2942
2943        cancel_work_sync(&dev->reset_work);
2944
2945        if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2946                tah_detach(dev->tah_dev, dev->tah_port);
2947        if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2948                rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
2949        if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2950                zmii_detach(dev->zmii_dev, dev->zmii_port);
2951
2952        mal_unregister_commac(dev->mal, &dev->commac);
2953        emac_put_deps(dev);
2954
2955        emac_dbg_unregister(dev);
2956        iounmap(dev->emacp);
2957
2958        if (dev->wol_irq != NO_IRQ)
2959                irq_dispose_mapping(dev->wol_irq);
2960        if (dev->emac_irq != NO_IRQ)
2961                irq_dispose_mapping(dev->emac_irq);
2962
2963        free_netdev(dev->ndev);
2964
2965        return 0;
2966}
2967
2968/* XXX Features in here should be replaced by properties... */
2969static struct of_device_id emac_match[] =
2970{
2971        {
2972                .type           = "network",
2973                .compatible     = "ibm,emac",
2974        },
2975        {
2976                .type           = "network",
2977                .compatible     = "ibm,emac4",
2978        },
2979        {
2980                .type           = "network",
2981                .compatible     = "ibm,emac4sync",
2982        },
2983        {},
2984};
2985MODULE_DEVICE_TABLE(of, emac_match);
2986
2987static struct platform_driver emac_driver = {
2988        .driver = {
2989                .name = "emac",
2990                .owner = THIS_MODULE,
2991                .of_match_table = emac_match,
2992        },
2993        .probe = emac_probe,
2994        .remove = emac_remove,
2995};
2996
2997static void __init emac_make_bootlist(void)
2998{
2999        struct device_node *np = NULL;
3000        int j, max, i = 0, k;
3001        int cell_indices[EMAC_BOOT_LIST_SIZE];
3002
3003        /* Collect EMACs */
3004        while((np = of_find_all_nodes(np)) != NULL) {
3005                const u32 *idx;
3006
3007                if (of_match_node(emac_match, np) == NULL)
3008                        continue;
3009                if (of_get_property(np, "unused", NULL))
3010                        continue;
3011                idx = of_get_property(np, "cell-index", NULL);
3012                if (idx == NULL)
3013                        continue;
3014                cell_indices[i] = *idx;
3015                emac_boot_list[i++] = of_node_get(np);
3016                if (i >= EMAC_BOOT_LIST_SIZE) {
3017                        of_node_put(np);
3018                        break;
3019                }
3020        }
3021        max = i;
3022
3023        /* Bubble sort them (doh, what a creative algorithm :-) */
3024        for (i = 0; max > 1 && (i < (max - 1)); i++)
3025                for (j = i; j < max; j++) {
3026                        if (cell_indices[i] > cell_indices[j]) {
3027                                np = emac_boot_list[i];
3028                                emac_boot_list[i] = emac_boot_list[j];
3029                                emac_boot_list[j] = np;
3030                                k = cell_indices[i];
3031                                cell_indices[i] = cell_indices[j];
3032                                cell_indices[j] = k;
3033                        }
3034                }
3035}
3036
3037static int __init emac_init(void)
3038{
3039        int rc;
3040
3041        printk(KERN_INFO DRV_DESC ", version " DRV_VERSION "\n");
3042
3043        /* Init debug stuff */
3044        emac_init_debug();
3045
3046        /* Build EMAC boot list */
3047        emac_make_bootlist();
3048
3049        /* Init submodules */
3050        rc = mal_init();
3051        if (rc)
3052                goto err;
3053        rc = zmii_init();
3054        if (rc)
3055                goto err_mal;
3056        rc = rgmii_init();
3057        if (rc)
3058                goto err_zmii;
3059        rc = tah_init();
3060        if (rc)
3061                goto err_rgmii;
3062        rc = platform_driver_register(&emac_driver);
3063        if (rc)
3064                goto err_tah;
3065
3066        return 0;
3067
3068 err_tah:
3069        tah_exit();
3070 err_rgmii:
3071        rgmii_exit();
3072 err_zmii:
3073        zmii_exit();
3074 err_mal:
3075        mal_exit();
3076 err:
3077        return rc;
3078}
3079
3080static void __exit emac_exit(void)
3081{
3082        int i;
3083
3084        platform_driver_unregister(&emac_driver);
3085
3086        tah_exit();
3087        rgmii_exit();
3088        zmii_exit();
3089        mal_exit();
3090        emac_fini_debug();
3091
3092        /* Destroy EMAC boot list */
3093        for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
3094                if (emac_boot_list[i])
3095                        of_node_put(emac_boot_list[i]);
3096}
3097
3098module_init(emac_init);
3099module_exit(emac_exit);
3100