linux/drivers/net/ethernet/marvell/skge.c
<<
>>
Prefs
   1/*
   2 * New driver for Marvell Yukon chipset and SysKonnect Gigabit
   3 * Ethernet adapters. Based on earlier sk98lin, e100 and
   4 * FreeBSD if_sk drivers.
   5 *
   6 * This driver intentionally does not support all the features
   7 * of the original driver such as link fail-over and link management because
   8 * those should be done at higher levels.
   9 *
  10 * Copyright (C) 2004, 2005 Stephen Hemminger <shemminger@osdl.org>
  11 *
  12 * This program is free software; you can redistribute it and/or modify
  13 * it under the terms of the GNU General Public License as published by
  14 * the Free Software Foundation; either version 2 of the License.
  15 *
  16 * This program is distributed in the hope that it will be useful,
  17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  19 * GNU General Public License for more details.
  20 *
  21 * You should have received a copy of the GNU General Public License
  22 * along with this program; if not, write to the Free Software
  23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  24 */
  25
  26#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  27
  28#include <linux/in.h>
  29#include <linux/kernel.h>
  30#include <linux/module.h>
  31#include <linux/moduleparam.h>
  32#include <linux/netdevice.h>
  33#include <linux/etherdevice.h>
  34#include <linux/ethtool.h>
  35#include <linux/pci.h>
  36#include <linux/if_vlan.h>
  37#include <linux/ip.h>
  38#include <linux/delay.h>
  39#include <linux/crc32.h>
  40#include <linux/dma-mapping.h>
  41#include <linux/debugfs.h>
  42#include <linux/sched.h>
  43#include <linux/seq_file.h>
  44#include <linux/mii.h>
  45#include <linux/slab.h>
  46#include <linux/dmi.h>
  47#include <linux/prefetch.h>
  48#include <asm/irq.h>
  49
  50#include "skge.h"
  51
  52#define DRV_NAME                "skge"
  53#define DRV_VERSION             "1.14"
  54
  55#define DEFAULT_TX_RING_SIZE    128
  56#define DEFAULT_RX_RING_SIZE    512
  57#define MAX_TX_RING_SIZE        1024
  58#define TX_LOW_WATER            (MAX_SKB_FRAGS + 1)
  59#define MAX_RX_RING_SIZE        4096
  60#define RX_COPY_THRESHOLD       128
  61#define RX_BUF_SIZE             1536
  62#define PHY_RETRIES             1000
  63#define ETH_JUMBO_MTU           9000
  64#define TX_WATCHDOG             (5 * HZ)
  65#define NAPI_WEIGHT             64
  66#define BLINK_MS                250
  67#define LINK_HZ                 HZ
  68
  69#define SKGE_EEPROM_MAGIC       0x9933aabb
  70
  71
  72MODULE_DESCRIPTION("SysKonnect Gigabit Ethernet driver");
  73MODULE_AUTHOR("Stephen Hemminger <shemminger@linux-foundation.org>");
  74MODULE_LICENSE("GPL");
  75MODULE_VERSION(DRV_VERSION);
  76
  77static const u32 default_msg = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
  78                                NETIF_MSG_LINK | NETIF_MSG_IFUP |
  79                                NETIF_MSG_IFDOWN);
  80
  81static int debug = -1;  /* defaults above */
  82module_param(debug, int, 0);
  83MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
  84
  85static const struct pci_device_id skge_id_table[] = {
  86        { PCI_DEVICE(PCI_VENDOR_ID_3COM, 0x1700) },       /* 3Com 3C940 */
  87        { PCI_DEVICE(PCI_VENDOR_ID_3COM, 0x80EB) },       /* 3Com 3C940B */
  88#ifdef CONFIG_SKGE_GENESIS
  89        { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x4300) }, /* SK-9xx */
  90#endif
  91        { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x4320) }, /* SK-98xx V2.0 */
  92        { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b01) },      /* D-Link DGE-530T (rev.B) */
  93        { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4c00) },      /* D-Link DGE-530T */
  94        { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4302) },      /* D-Link DGE-530T Rev C1 */
  95        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4320) },    /* Marvell Yukon 88E8001/8003/8010 */
  96        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5005) },    /* Belkin */
  97        { PCI_DEVICE(PCI_VENDOR_ID_CNET, 0x434E) },       /* CNet PowerG-2000 */
  98        { PCI_DEVICE(PCI_VENDOR_ID_LINKSYS, 0x1064) },    /* Linksys EG1064 v2 */
  99        { PCI_VENDOR_ID_LINKSYS, 0x1032, PCI_ANY_ID, 0x0015 }, /* Linksys EG1032 v2 */
 100        { 0 }
 101};
 102MODULE_DEVICE_TABLE(pci, skge_id_table);
 103
 104static int skge_up(struct net_device *dev);
 105static int skge_down(struct net_device *dev);
 106static void skge_phy_reset(struct skge_port *skge);
 107static void skge_tx_clean(struct net_device *dev);
 108static int xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val);
 109static int gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val);
 110static void genesis_get_stats(struct skge_port *skge, u64 *data);
 111static void yukon_get_stats(struct skge_port *skge, u64 *data);
 112static void yukon_init(struct skge_hw *hw, int port);
 113static void genesis_mac_init(struct skge_hw *hw, int port);
 114static void genesis_link_up(struct skge_port *skge);
 115static void skge_set_multicast(struct net_device *dev);
 116static irqreturn_t skge_intr(int irq, void *dev_id);
 117
 118/* Avoid conditionals by using array */
 119static const int txqaddr[] = { Q_XA1, Q_XA2 };
 120static const int rxqaddr[] = { Q_R1, Q_R2 };
 121static const u32 rxirqmask[] = { IS_R1_F, IS_R2_F };
 122static const u32 txirqmask[] = { IS_XA1_F, IS_XA2_F };
 123static const u32 napimask[] = { IS_R1_F|IS_XA1_F, IS_R2_F|IS_XA2_F };
 124static const u32 portmask[] = { IS_PORT_1, IS_PORT_2 };
 125
 126static inline bool is_genesis(const struct skge_hw *hw)
 127{
 128#ifdef CONFIG_SKGE_GENESIS
 129        return hw->chip_id == CHIP_ID_GENESIS;
 130#else
 131        return false;
 132#endif
 133}
 134
 135static int skge_get_regs_len(struct net_device *dev)
 136{
 137        return 0x4000;
 138}
 139
 140/*
 141 * Returns copy of whole control register region
 142 * Note: skip RAM address register because accessing it will
 143 *       cause bus hangs!
 144 */
 145static void skge_get_regs(struct net_device *dev, struct ethtool_regs *regs,
 146                          void *p)
 147{
 148        const struct skge_port *skge = netdev_priv(dev);
 149        const void __iomem *io = skge->hw->regs;
 150
 151        regs->version = 1;
 152        memset(p, 0, regs->len);
 153        memcpy_fromio(p, io, B3_RAM_ADDR);
 154
 155        memcpy_fromio(p + B3_RI_WTO_R1, io + B3_RI_WTO_R1,
 156                      regs->len - B3_RI_WTO_R1);
 157}
 158
 159/* Wake on Lan only supported on Yukon chips with rev 1 or above */
 160static u32 wol_supported(const struct skge_hw *hw)
 161{
 162        if (is_genesis(hw))
 163                return 0;
 164
 165        if (hw->chip_id == CHIP_ID_YUKON && hw->chip_rev == 0)
 166                return 0;
 167
 168        return WAKE_MAGIC | WAKE_PHY;
 169}
 170
 171static void skge_wol_init(struct skge_port *skge)
 172{
 173        struct skge_hw *hw = skge->hw;
 174        int port = skge->port;
 175        u16 ctrl;
 176
 177        skge_write16(hw, B0_CTST, CS_RST_CLR);
 178        skge_write16(hw, SK_REG(port, GMAC_LINK_CTRL), GMLC_RST_CLR);
 179
 180        /* Turn on Vaux */
 181        skge_write8(hw, B0_POWER_CTRL,
 182                    PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF);
 183
 184        /* WA code for COMA mode -- clear PHY reset */
 185        if (hw->chip_id == CHIP_ID_YUKON_LITE &&
 186            hw->chip_rev >= CHIP_REV_YU_LITE_A3) {
 187                u32 reg = skge_read32(hw, B2_GP_IO);
 188                reg |= GP_DIR_9;
 189                reg &= ~GP_IO_9;
 190                skge_write32(hw, B2_GP_IO, reg);
 191        }
 192
 193        skge_write32(hw, SK_REG(port, GPHY_CTRL),
 194                     GPC_DIS_SLEEP |
 195                     GPC_HWCFG_M_3 | GPC_HWCFG_M_2 | GPC_HWCFG_M_1 | GPC_HWCFG_M_0 |
 196                     GPC_ANEG_1 | GPC_RST_SET);
 197
 198        skge_write32(hw, SK_REG(port, GPHY_CTRL),
 199                     GPC_DIS_SLEEP |
 200                     GPC_HWCFG_M_3 | GPC_HWCFG_M_2 | GPC_HWCFG_M_1 | GPC_HWCFG_M_0 |
 201                     GPC_ANEG_1 | GPC_RST_CLR);
 202
 203        skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR);
 204
 205        /* Force to 10/100 skge_reset will re-enable on resume   */
 206        gm_phy_write(hw, port, PHY_MARV_AUNE_ADV,
 207                     (PHY_AN_100FULL | PHY_AN_100HALF |
 208                      PHY_AN_10FULL | PHY_AN_10HALF | PHY_AN_CSMA));
 209        /* no 1000 HD/FD */
 210        gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, 0);
 211        gm_phy_write(hw, port, PHY_MARV_CTRL,
 212                     PHY_CT_RESET | PHY_CT_SPS_LSB | PHY_CT_ANE |
 213                     PHY_CT_RE_CFG | PHY_CT_DUP_MD);
 214
 215
 216        /* Set GMAC to no flow control and auto update for speed/duplex */
 217        gma_write16(hw, port, GM_GP_CTRL,
 218                    GM_GPCR_FC_TX_DIS|GM_GPCR_TX_ENA|GM_GPCR_RX_ENA|
 219                    GM_GPCR_DUP_FULL|GM_GPCR_FC_RX_DIS|GM_GPCR_AU_FCT_DIS);
 220
 221        /* Set WOL address */
 222        memcpy_toio(hw->regs + WOL_REGS(port, WOL_MAC_ADDR),
 223                    skge->netdev->dev_addr, ETH_ALEN);
 224
 225        /* Turn on appropriate WOL control bits */
 226        skge_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), WOL_CTL_CLEAR_RESULT);
 227        ctrl = 0;
 228        if (skge->wol & WAKE_PHY)
 229                ctrl |= WOL_CTL_ENA_PME_ON_LINK_CHG|WOL_CTL_ENA_LINK_CHG_UNIT;
 230        else
 231                ctrl |= WOL_CTL_DIS_PME_ON_LINK_CHG|WOL_CTL_DIS_LINK_CHG_UNIT;
 232
 233        if (skge->wol & WAKE_MAGIC)
 234                ctrl |= WOL_CTL_ENA_PME_ON_MAGIC_PKT|WOL_CTL_ENA_MAGIC_PKT_UNIT;
 235        else
 236                ctrl |= WOL_CTL_DIS_PME_ON_MAGIC_PKT|WOL_CTL_DIS_MAGIC_PKT_UNIT;
 237
 238        ctrl |= WOL_CTL_DIS_PME_ON_PATTERN|WOL_CTL_DIS_PATTERN_UNIT;
 239        skge_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), ctrl);
 240
 241        /* block receiver */
 242        skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
 243}
 244
 245static void skge_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
 246{
 247        struct skge_port *skge = netdev_priv(dev);
 248
 249        wol->supported = wol_supported(skge->hw);
 250        wol->wolopts = skge->wol;
 251}
 252
 253static int skge_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
 254{
 255        struct skge_port *skge = netdev_priv(dev);
 256        struct skge_hw *hw = skge->hw;
 257
 258        if ((wol->wolopts & ~wol_supported(hw)) ||
 259            !device_can_wakeup(&hw->pdev->dev))
 260                return -EOPNOTSUPP;
 261
 262        skge->wol = wol->wolopts;
 263
 264        device_set_wakeup_enable(&hw->pdev->dev, skge->wol);
 265
 266        return 0;
 267}
 268
 269/* Determine supported/advertised modes based on hardware.
 270 * Note: ethtool ADVERTISED_xxx == SUPPORTED_xxx
 271 */
 272static u32 skge_supported_modes(const struct skge_hw *hw)
 273{
 274        u32 supported;
 275
 276        if (hw->copper) {
 277                supported = (SUPPORTED_10baseT_Half |
 278                             SUPPORTED_10baseT_Full |
 279                             SUPPORTED_100baseT_Half |
 280                             SUPPORTED_100baseT_Full |
 281                             SUPPORTED_1000baseT_Half |
 282                             SUPPORTED_1000baseT_Full |
 283                             SUPPORTED_Autoneg |
 284                             SUPPORTED_TP);
 285
 286                if (is_genesis(hw))
 287                        supported &= ~(SUPPORTED_10baseT_Half |
 288                                       SUPPORTED_10baseT_Full |
 289                                       SUPPORTED_100baseT_Half |
 290                                       SUPPORTED_100baseT_Full);
 291
 292                else if (hw->chip_id == CHIP_ID_YUKON)
 293                        supported &= ~SUPPORTED_1000baseT_Half;
 294        } else
 295                supported = (SUPPORTED_1000baseT_Full |
 296                             SUPPORTED_1000baseT_Half |
 297                             SUPPORTED_FIBRE |
 298                             SUPPORTED_Autoneg);
 299
 300        return supported;
 301}
 302
 303static int skge_get_settings(struct net_device *dev,
 304                             struct ethtool_cmd *ecmd)
 305{
 306        struct skge_port *skge = netdev_priv(dev);
 307        struct skge_hw *hw = skge->hw;
 308
 309        ecmd->transceiver = XCVR_INTERNAL;
 310        ecmd->supported = skge_supported_modes(hw);
 311
 312        if (hw->copper) {
 313                ecmd->port = PORT_TP;
 314                ecmd->phy_address = hw->phy_addr;
 315        } else
 316                ecmd->port = PORT_FIBRE;
 317
 318        ecmd->advertising = skge->advertising;
 319        ecmd->autoneg = skge->autoneg;
 320        ethtool_cmd_speed_set(ecmd, skge->speed);
 321        ecmd->duplex = skge->duplex;
 322        return 0;
 323}
 324
 325static int skge_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
 326{
 327        struct skge_port *skge = netdev_priv(dev);
 328        const struct skge_hw *hw = skge->hw;
 329        u32 supported = skge_supported_modes(hw);
 330        int err = 0;
 331
 332        if (ecmd->autoneg == AUTONEG_ENABLE) {
 333                ecmd->advertising = supported;
 334                skge->duplex = -1;
 335                skge->speed = -1;
 336        } else {
 337                u32 setting;
 338                u32 speed = ethtool_cmd_speed(ecmd);
 339
 340                switch (speed) {
 341                case SPEED_1000:
 342                        if (ecmd->duplex == DUPLEX_FULL)
 343                                setting = SUPPORTED_1000baseT_Full;
 344                        else if (ecmd->duplex == DUPLEX_HALF)
 345                                setting = SUPPORTED_1000baseT_Half;
 346                        else
 347                                return -EINVAL;
 348                        break;
 349                case SPEED_100:
 350                        if (ecmd->duplex == DUPLEX_FULL)
 351                                setting = SUPPORTED_100baseT_Full;
 352                        else if (ecmd->duplex == DUPLEX_HALF)
 353                                setting = SUPPORTED_100baseT_Half;
 354                        else
 355                                return -EINVAL;
 356                        break;
 357
 358                case SPEED_10:
 359                        if (ecmd->duplex == DUPLEX_FULL)
 360                                setting = SUPPORTED_10baseT_Full;
 361                        else if (ecmd->duplex == DUPLEX_HALF)
 362                                setting = SUPPORTED_10baseT_Half;
 363                        else
 364                                return -EINVAL;
 365                        break;
 366                default:
 367                        return -EINVAL;
 368                }
 369
 370                if ((setting & supported) == 0)
 371                        return -EINVAL;
 372
 373                skge->speed = speed;
 374                skge->duplex = ecmd->duplex;
 375        }
 376
 377        skge->autoneg = ecmd->autoneg;
 378        skge->advertising = ecmd->advertising;
 379
 380        if (netif_running(dev)) {
 381                skge_down(dev);
 382                err = skge_up(dev);
 383                if (err) {
 384                        dev_close(dev);
 385                        return err;
 386                }
 387        }
 388
 389        return 0;
 390}
 391
 392static void skge_get_drvinfo(struct net_device *dev,
 393                             struct ethtool_drvinfo *info)
 394{
 395        struct skge_port *skge = netdev_priv(dev);
 396
 397        strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
 398        strlcpy(info->version, DRV_VERSION, sizeof(info->version));
 399        strlcpy(info->bus_info, pci_name(skge->hw->pdev),
 400                sizeof(info->bus_info));
 401}
 402
 403static const struct skge_stat {
 404        char       name[ETH_GSTRING_LEN];
 405        u16        xmac_offset;
 406        u16        gma_offset;
 407} skge_stats[] = {
 408        { "tx_bytes",           XM_TXO_OK_HI,  GM_TXO_OK_HI },
 409        { "rx_bytes",           XM_RXO_OK_HI,  GM_RXO_OK_HI },
 410
 411        { "tx_broadcast",       XM_TXF_BC_OK,  GM_TXF_BC_OK },
 412        { "rx_broadcast",       XM_RXF_BC_OK,  GM_RXF_BC_OK },
 413        { "tx_multicast",       XM_TXF_MC_OK,  GM_TXF_MC_OK },
 414        { "rx_multicast",       XM_RXF_MC_OK,  GM_RXF_MC_OK },
 415        { "tx_unicast",         XM_TXF_UC_OK,  GM_TXF_UC_OK },
 416        { "rx_unicast",         XM_RXF_UC_OK,  GM_RXF_UC_OK },
 417        { "tx_mac_pause",       XM_TXF_MPAUSE, GM_TXF_MPAUSE },
 418        { "rx_mac_pause",       XM_RXF_MPAUSE, GM_RXF_MPAUSE },
 419
 420        { "collisions",         XM_TXF_SNG_COL, GM_TXF_SNG_COL },
 421        { "multi_collisions",   XM_TXF_MUL_COL, GM_TXF_MUL_COL },
 422        { "aborted",            XM_TXF_ABO_COL, GM_TXF_ABO_COL },
 423        { "late_collision",     XM_TXF_LAT_COL, GM_TXF_LAT_COL },
 424        { "fifo_underrun",      XM_TXE_FIFO_UR, GM_TXE_FIFO_UR },
 425        { "fifo_overflow",      XM_RXE_FIFO_OV, GM_RXE_FIFO_OV },
 426
 427        { "rx_toolong",         XM_RXF_LNG_ERR, GM_RXF_LNG_ERR },
 428        { "rx_jabber",          XM_RXF_JAB_PKT, GM_RXF_JAB_PKT },
 429        { "rx_runt",            XM_RXE_RUNT,    GM_RXE_FRAG },
 430        { "rx_too_long",        XM_RXF_LNG_ERR, GM_RXF_LNG_ERR },
 431        { "rx_fcs_error",       XM_RXF_FCS_ERR, GM_RXF_FCS_ERR },
 432};
 433
 434static int skge_get_sset_count(struct net_device *dev, int sset)
 435{
 436        switch (sset) {
 437        case ETH_SS_STATS:
 438                return ARRAY_SIZE(skge_stats);
 439        default:
 440                return -EOPNOTSUPP;
 441        }
 442}
 443
 444static void skge_get_ethtool_stats(struct net_device *dev,
 445                                   struct ethtool_stats *stats, u64 *data)
 446{
 447        struct skge_port *skge = netdev_priv(dev);
 448
 449        if (is_genesis(skge->hw))
 450                genesis_get_stats(skge, data);
 451        else
 452                yukon_get_stats(skge, data);
 453}
 454
 455/* Use hardware MIB variables for critical path statistics and
 456 * transmit feedback not reported at interrupt.
 457 * Other errors are accounted for in interrupt handler.
 458 */
 459static struct net_device_stats *skge_get_stats(struct net_device *dev)
 460{
 461        struct skge_port *skge = netdev_priv(dev);
 462        u64 data[ARRAY_SIZE(skge_stats)];
 463
 464        if (is_genesis(skge->hw))
 465                genesis_get_stats(skge, data);
 466        else
 467                yukon_get_stats(skge, data);
 468
 469        dev->stats.tx_bytes = data[0];
 470        dev->stats.rx_bytes = data[1];
 471        dev->stats.tx_packets = data[2] + data[4] + data[6];
 472        dev->stats.rx_packets = data[3] + data[5] + data[7];
 473        dev->stats.multicast = data[3] + data[5];
 474        dev->stats.collisions = data[10];
 475        dev->stats.tx_aborted_errors = data[12];
 476
 477        return &dev->stats;
 478}
 479
 480static void skge_get_strings(struct net_device *dev, u32 stringset, u8 *data)
 481{
 482        int i;
 483
 484        switch (stringset) {
 485        case ETH_SS_STATS:
 486                for (i = 0; i < ARRAY_SIZE(skge_stats); i++)
 487                        memcpy(data + i * ETH_GSTRING_LEN,
 488                               skge_stats[i].name, ETH_GSTRING_LEN);
 489                break;
 490        }
 491}
 492
 493static void skge_get_ring_param(struct net_device *dev,
 494                                struct ethtool_ringparam *p)
 495{
 496        struct skge_port *skge = netdev_priv(dev);
 497
 498        p->rx_max_pending = MAX_RX_RING_SIZE;
 499        p->tx_max_pending = MAX_TX_RING_SIZE;
 500
 501        p->rx_pending = skge->rx_ring.count;
 502        p->tx_pending = skge->tx_ring.count;
 503}
 504
 505static int skge_set_ring_param(struct net_device *dev,
 506                               struct ethtool_ringparam *p)
 507{
 508        struct skge_port *skge = netdev_priv(dev);
 509        int err = 0;
 510
 511        if (p->rx_pending == 0 || p->rx_pending > MAX_RX_RING_SIZE ||
 512            p->tx_pending < TX_LOW_WATER || p->tx_pending > MAX_TX_RING_SIZE)
 513                return -EINVAL;
 514
 515        skge->rx_ring.count = p->rx_pending;
 516        skge->tx_ring.count = p->tx_pending;
 517
 518        if (netif_running(dev)) {
 519                skge_down(dev);
 520                err = skge_up(dev);
 521                if (err)
 522                        dev_close(dev);
 523        }
 524
 525        return err;
 526}
 527
 528static u32 skge_get_msglevel(struct net_device *netdev)
 529{
 530        struct skge_port *skge = netdev_priv(netdev);
 531        return skge->msg_enable;
 532}
 533
 534static void skge_set_msglevel(struct net_device *netdev, u32 value)
 535{
 536        struct skge_port *skge = netdev_priv(netdev);
 537        skge->msg_enable = value;
 538}
 539
 540static int skge_nway_reset(struct net_device *dev)
 541{
 542        struct skge_port *skge = netdev_priv(dev);
 543
 544        if (skge->autoneg != AUTONEG_ENABLE || !netif_running(dev))
 545                return -EINVAL;
 546
 547        skge_phy_reset(skge);
 548        return 0;
 549}
 550
 551static void skge_get_pauseparam(struct net_device *dev,
 552                                struct ethtool_pauseparam *ecmd)
 553{
 554        struct skge_port *skge = netdev_priv(dev);
 555
 556        ecmd->rx_pause = ((skge->flow_control == FLOW_MODE_SYMMETRIC) ||
 557                          (skge->flow_control == FLOW_MODE_SYM_OR_REM));
 558        ecmd->tx_pause = (ecmd->rx_pause ||
 559                          (skge->flow_control == FLOW_MODE_LOC_SEND));
 560
 561        ecmd->autoneg = ecmd->rx_pause || ecmd->tx_pause;
 562}
 563
 564static int skge_set_pauseparam(struct net_device *dev,
 565                               struct ethtool_pauseparam *ecmd)
 566{
 567        struct skge_port *skge = netdev_priv(dev);
 568        struct ethtool_pauseparam old;
 569        int err = 0;
 570
 571        skge_get_pauseparam(dev, &old);
 572
 573        if (ecmd->autoneg != old.autoneg)
 574                skge->flow_control = ecmd->autoneg ? FLOW_MODE_NONE : FLOW_MODE_SYMMETRIC;
 575        else {
 576                if (ecmd->rx_pause && ecmd->tx_pause)
 577                        skge->flow_control = FLOW_MODE_SYMMETRIC;
 578                else if (ecmd->rx_pause && !ecmd->tx_pause)
 579                        skge->flow_control = FLOW_MODE_SYM_OR_REM;
 580                else if (!ecmd->rx_pause && ecmd->tx_pause)
 581                        skge->flow_control = FLOW_MODE_LOC_SEND;
 582                else
 583                        skge->flow_control = FLOW_MODE_NONE;
 584        }
 585
 586        if (netif_running(dev)) {
 587                skge_down(dev);
 588                err = skge_up(dev);
 589                if (err) {
 590                        dev_close(dev);
 591                        return err;
 592                }
 593        }
 594
 595        return 0;
 596}
 597
 598/* Chip internal frequency for clock calculations */
 599static inline u32 hwkhz(const struct skge_hw *hw)
 600{
 601        return is_genesis(hw) ? 53125 : 78125;
 602}
 603
 604/* Chip HZ to microseconds */
 605static inline u32 skge_clk2usec(const struct skge_hw *hw, u32 ticks)
 606{
 607        return (ticks * 1000) / hwkhz(hw);
 608}
 609
 610/* Microseconds to chip HZ */
 611static inline u32 skge_usecs2clk(const struct skge_hw *hw, u32 usec)
 612{
 613        return hwkhz(hw) * usec / 1000;
 614}
 615
 616static int skge_get_coalesce(struct net_device *dev,
 617                             struct ethtool_coalesce *ecmd)
 618{
 619        struct skge_port *skge = netdev_priv(dev);
 620        struct skge_hw *hw = skge->hw;
 621        int port = skge->port;
 622
 623        ecmd->rx_coalesce_usecs = 0;
 624        ecmd->tx_coalesce_usecs = 0;
 625
 626        if (skge_read32(hw, B2_IRQM_CTRL) & TIM_START) {
 627                u32 delay = skge_clk2usec(hw, skge_read32(hw, B2_IRQM_INI));
 628                u32 msk = skge_read32(hw, B2_IRQM_MSK);
 629
 630                if (msk & rxirqmask[port])
 631                        ecmd->rx_coalesce_usecs = delay;
 632                if (msk & txirqmask[port])
 633                        ecmd->tx_coalesce_usecs = delay;
 634        }
 635
 636        return 0;
 637}
 638
 639/* Note: interrupt timer is per board, but can turn on/off per port */
 640static int skge_set_coalesce(struct net_device *dev,
 641                             struct ethtool_coalesce *ecmd)
 642{
 643        struct skge_port *skge = netdev_priv(dev);
 644        struct skge_hw *hw = skge->hw;
 645        int port = skge->port;
 646        u32 msk = skge_read32(hw, B2_IRQM_MSK);
 647        u32 delay = 25;
 648
 649        if (ecmd->rx_coalesce_usecs == 0)
 650                msk &= ~rxirqmask[port];
 651        else if (ecmd->rx_coalesce_usecs < 25 ||
 652                 ecmd->rx_coalesce_usecs > 33333)
 653                return -EINVAL;
 654        else {
 655                msk |= rxirqmask[port];
 656                delay = ecmd->rx_coalesce_usecs;
 657        }
 658
 659        if (ecmd->tx_coalesce_usecs == 0)
 660                msk &= ~txirqmask[port];
 661        else if (ecmd->tx_coalesce_usecs < 25 ||
 662                 ecmd->tx_coalesce_usecs > 33333)
 663                return -EINVAL;
 664        else {
 665                msk |= txirqmask[port];
 666                delay = min(delay, ecmd->rx_coalesce_usecs);
 667        }
 668
 669        skge_write32(hw, B2_IRQM_MSK, msk);
 670        if (msk == 0)
 671                skge_write32(hw, B2_IRQM_CTRL, TIM_STOP);
 672        else {
 673                skge_write32(hw, B2_IRQM_INI, skge_usecs2clk(hw, delay));
 674                skge_write32(hw, B2_IRQM_CTRL, TIM_START);
 675        }
 676        return 0;
 677}
 678
 679enum led_mode { LED_MODE_OFF, LED_MODE_ON, LED_MODE_TST };
 680static void skge_led(struct skge_port *skge, enum led_mode mode)
 681{
 682        struct skge_hw *hw = skge->hw;
 683        int port = skge->port;
 684
 685        spin_lock_bh(&hw->phy_lock);
 686        if (is_genesis(hw)) {
 687                switch (mode) {
 688                case LED_MODE_OFF:
 689                        if (hw->phy_type == SK_PHY_BCOM)
 690                                xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, PHY_B_PEC_LED_OFF);
 691                        else {
 692                                skge_write32(hw, SK_REG(port, TX_LED_VAL), 0);
 693                                skge_write8(hw, SK_REG(port, TX_LED_CTRL), LED_T_OFF);
 694                        }
 695                        skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_OFF);
 696                        skge_write32(hw, SK_REG(port, RX_LED_VAL), 0);
 697                        skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_T_OFF);
 698                        break;
 699
 700                case LED_MODE_ON:
 701                        skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_ON);
 702                        skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_LINKSYNC_ON);
 703
 704                        skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_START);
 705                        skge_write8(hw, SK_REG(port, TX_LED_CTRL), LED_START);
 706
 707                        break;
 708
 709                case LED_MODE_TST:
 710                        skge_write8(hw, SK_REG(port, RX_LED_TST), LED_T_ON);
 711                        skge_write32(hw, SK_REG(port, RX_LED_VAL), 100);
 712                        skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_START);
 713
 714                        if (hw->phy_type == SK_PHY_BCOM)
 715                                xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, PHY_B_PEC_LED_ON);
 716                        else {
 717                                skge_write8(hw, SK_REG(port, TX_LED_TST), LED_T_ON);
 718                                skge_write32(hw, SK_REG(port, TX_LED_VAL), 100);
 719                                skge_write8(hw, SK_REG(port, TX_LED_CTRL), LED_START);
 720                        }
 721
 722                }
 723        } else {
 724                switch (mode) {
 725                case LED_MODE_OFF:
 726                        gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0);
 727                        gm_phy_write(hw, port, PHY_MARV_LED_OVER,
 728                                     PHY_M_LED_MO_DUP(MO_LED_OFF)  |
 729                                     PHY_M_LED_MO_10(MO_LED_OFF)   |
 730                                     PHY_M_LED_MO_100(MO_LED_OFF)  |
 731                                     PHY_M_LED_MO_1000(MO_LED_OFF) |
 732                                     PHY_M_LED_MO_RX(MO_LED_OFF));
 733                        break;
 734                case LED_MODE_ON:
 735                        gm_phy_write(hw, port, PHY_MARV_LED_CTRL,
 736                                     PHY_M_LED_PULS_DUR(PULS_170MS) |
 737                                     PHY_M_LED_BLINK_RT(BLINK_84MS) |
 738                                     PHY_M_LEDC_TX_CTRL |
 739                                     PHY_M_LEDC_DP_CTRL);
 740
 741                        gm_phy_write(hw, port, PHY_MARV_LED_OVER,
 742                                     PHY_M_LED_MO_RX(MO_LED_OFF) |
 743                                     (skge->speed == SPEED_100 ?
 744                                      PHY_M_LED_MO_100(MO_LED_ON) : 0));
 745                        break;
 746                case LED_MODE_TST:
 747                        gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0);
 748                        gm_phy_write(hw, port, PHY_MARV_LED_OVER,
 749                                     PHY_M_LED_MO_DUP(MO_LED_ON)  |
 750                                     PHY_M_LED_MO_10(MO_LED_ON)   |
 751                                     PHY_M_LED_MO_100(MO_LED_ON)  |
 752                                     PHY_M_LED_MO_1000(MO_LED_ON) |
 753                                     PHY_M_LED_MO_RX(MO_LED_ON));
 754                }
 755        }
 756        spin_unlock_bh(&hw->phy_lock);
 757}
 758
 759/* blink LED's for finding board */
 760static int skge_set_phys_id(struct net_device *dev,
 761                            enum ethtool_phys_id_state state)
 762{
 763        struct skge_port *skge = netdev_priv(dev);
 764
 765        switch (state) {
 766        case ETHTOOL_ID_ACTIVE:
 767                return 2;       /* cycle on/off twice per second */
 768
 769        case ETHTOOL_ID_ON:
 770                skge_led(skge, LED_MODE_TST);
 771                break;
 772
 773        case ETHTOOL_ID_OFF:
 774                skge_led(skge, LED_MODE_OFF);
 775                break;
 776
 777        case ETHTOOL_ID_INACTIVE:
 778                /* back to regular LED state */
 779                skge_led(skge, netif_running(dev) ? LED_MODE_ON : LED_MODE_OFF);
 780        }
 781
 782        return 0;
 783}
 784
 785static int skge_get_eeprom_len(struct net_device *dev)
 786{
 787        struct skge_port *skge = netdev_priv(dev);
 788        u32 reg2;
 789
 790        pci_read_config_dword(skge->hw->pdev, PCI_DEV_REG2, &reg2);
 791        return 1 << (((reg2 & PCI_VPD_ROM_SZ) >> 14) + 8);
 792}
 793
 794static u32 skge_vpd_read(struct pci_dev *pdev, int cap, u16 offset)
 795{
 796        u32 val;
 797
 798        pci_write_config_word(pdev, cap + PCI_VPD_ADDR, offset);
 799
 800        do {
 801                pci_read_config_word(pdev, cap + PCI_VPD_ADDR, &offset);
 802        } while (!(offset & PCI_VPD_ADDR_F));
 803
 804        pci_read_config_dword(pdev, cap + PCI_VPD_DATA, &val);
 805        return val;
 806}
 807
 808static void skge_vpd_write(struct pci_dev *pdev, int cap, u16 offset, u32 val)
 809{
 810        pci_write_config_dword(pdev, cap + PCI_VPD_DATA, val);
 811        pci_write_config_word(pdev, cap + PCI_VPD_ADDR,
 812                              offset | PCI_VPD_ADDR_F);
 813
 814        do {
 815                pci_read_config_word(pdev, cap + PCI_VPD_ADDR, &offset);
 816        } while (offset & PCI_VPD_ADDR_F);
 817}
 818
 819static int skge_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
 820                           u8 *data)
 821{
 822        struct skge_port *skge = netdev_priv(dev);
 823        struct pci_dev *pdev = skge->hw->pdev;
 824        int cap = pci_find_capability(pdev, PCI_CAP_ID_VPD);
 825        int length = eeprom->len;
 826        u16 offset = eeprom->offset;
 827
 828        if (!cap)
 829                return -EINVAL;
 830
 831        eeprom->magic = SKGE_EEPROM_MAGIC;
 832
 833        while (length > 0) {
 834                u32 val = skge_vpd_read(pdev, cap, offset);
 835                int n = min_t(int, length, sizeof(val));
 836
 837                memcpy(data, &val, n);
 838                length -= n;
 839                data += n;
 840                offset += n;
 841        }
 842        return 0;
 843}
 844
 845static int skge_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
 846                           u8 *data)
 847{
 848        struct skge_port *skge = netdev_priv(dev);
 849        struct pci_dev *pdev = skge->hw->pdev;
 850        int cap = pci_find_capability(pdev, PCI_CAP_ID_VPD);
 851        int length = eeprom->len;
 852        u16 offset = eeprom->offset;
 853
 854        if (!cap)
 855                return -EINVAL;
 856
 857        if (eeprom->magic != SKGE_EEPROM_MAGIC)
 858                return -EINVAL;
 859
 860        while (length > 0) {
 861                u32 val;
 862                int n = min_t(int, length, sizeof(val));
 863
 864                if (n < sizeof(val))
 865                        val = skge_vpd_read(pdev, cap, offset);
 866                memcpy(&val, data, n);
 867
 868                skge_vpd_write(pdev, cap, offset, val);
 869
 870                length -= n;
 871                data += n;
 872                offset += n;
 873        }
 874        return 0;
 875}
 876
 877static const struct ethtool_ops skge_ethtool_ops = {
 878        .get_settings   = skge_get_settings,
 879        .set_settings   = skge_set_settings,
 880        .get_drvinfo    = skge_get_drvinfo,
 881        .get_regs_len   = skge_get_regs_len,
 882        .get_regs       = skge_get_regs,
 883        .get_wol        = skge_get_wol,
 884        .set_wol        = skge_set_wol,
 885        .get_msglevel   = skge_get_msglevel,
 886        .set_msglevel   = skge_set_msglevel,
 887        .nway_reset     = skge_nway_reset,
 888        .get_link       = ethtool_op_get_link,
 889        .get_eeprom_len = skge_get_eeprom_len,
 890        .get_eeprom     = skge_get_eeprom,
 891        .set_eeprom     = skge_set_eeprom,
 892        .get_ringparam  = skge_get_ring_param,
 893        .set_ringparam  = skge_set_ring_param,
 894        .get_pauseparam = skge_get_pauseparam,
 895        .set_pauseparam = skge_set_pauseparam,
 896        .get_coalesce   = skge_get_coalesce,
 897        .set_coalesce   = skge_set_coalesce,
 898        .get_strings    = skge_get_strings,
 899        .set_phys_id    = skge_set_phys_id,
 900        .get_sset_count = skge_get_sset_count,
 901        .get_ethtool_stats = skge_get_ethtool_stats,
 902};
 903
 904/*
 905 * Allocate ring elements and chain them together
 906 * One-to-one association of board descriptors with ring elements
 907 */
 908static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u32 base)
 909{
 910        struct skge_tx_desc *d;
 911        struct skge_element *e;
 912        int i;
 913
 914        ring->start = kcalloc(ring->count, sizeof(*e), GFP_KERNEL);
 915        if (!ring->start)
 916                return -ENOMEM;
 917
 918        for (i = 0, e = ring->start, d = vaddr; i < ring->count; i++, e++, d++) {
 919                e->desc = d;
 920                if (i == ring->count - 1) {
 921                        e->next = ring->start;
 922                        d->next_offset = base;
 923                } else {
 924                        e->next = e + 1;
 925                        d->next_offset = base + (i+1) * sizeof(*d);
 926                }
 927        }
 928        ring->to_use = ring->to_clean = ring->start;
 929
 930        return 0;
 931}
 932
 933/* Allocate and setup a new buffer for receiving */
 934static int skge_rx_setup(struct skge_port *skge, struct skge_element *e,
 935                         struct sk_buff *skb, unsigned int bufsize)
 936{
 937        struct skge_rx_desc *rd = e->desc;
 938        dma_addr_t map;
 939
 940        map = pci_map_single(skge->hw->pdev, skb->data, bufsize,
 941                             PCI_DMA_FROMDEVICE);
 942
 943        if (pci_dma_mapping_error(skge->hw->pdev, map))
 944                return -1;
 945
 946        rd->dma_lo = lower_32_bits(map);
 947        rd->dma_hi = upper_32_bits(map);
 948        e->skb = skb;
 949        rd->csum1_start = ETH_HLEN;
 950        rd->csum2_start = ETH_HLEN;
 951        rd->csum1 = 0;
 952        rd->csum2 = 0;
 953
 954        wmb();
 955
 956        rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize;
 957        dma_unmap_addr_set(e, mapaddr, map);
 958        dma_unmap_len_set(e, maplen, bufsize);
 959        return 0;
 960}
 961
 962/* Resume receiving using existing skb,
 963 * Note: DMA address is not changed by chip.
 964 *       MTU not changed while receiver active.
 965 */
 966static inline void skge_rx_reuse(struct skge_element *e, unsigned int size)
 967{
 968        struct skge_rx_desc *rd = e->desc;
 969
 970        rd->csum2 = 0;
 971        rd->csum2_start = ETH_HLEN;
 972
 973        wmb();
 974
 975        rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | size;
 976}
 977
 978
 979/* Free all  buffers in receive ring, assumes receiver stopped */
 980static void skge_rx_clean(struct skge_port *skge)
 981{
 982        struct skge_hw *hw = skge->hw;
 983        struct skge_ring *ring = &skge->rx_ring;
 984        struct skge_element *e;
 985
 986        e = ring->start;
 987        do {
 988                struct skge_rx_desc *rd = e->desc;
 989                rd->control = 0;
 990                if (e->skb) {
 991                        pci_unmap_single(hw->pdev,
 992                                         dma_unmap_addr(e, mapaddr),
 993                                         dma_unmap_len(e, maplen),
 994                                         PCI_DMA_FROMDEVICE);
 995                        dev_kfree_skb(e->skb);
 996                        e->skb = NULL;
 997                }
 998        } while ((e = e->next) != ring->start);
 999}
1000
1001
1002/* Allocate buffers for receive ring
1003 * For receive:  to_clean is next received frame.
1004 */
1005static int skge_rx_fill(struct net_device *dev)
1006{
1007        struct skge_port *skge = netdev_priv(dev);
1008        struct skge_ring *ring = &skge->rx_ring;
1009        struct skge_element *e;
1010
1011        e = ring->start;
1012        do {
1013                struct sk_buff *skb;
1014
1015                skb = __netdev_alloc_skb(dev, skge->rx_buf_size + NET_IP_ALIGN,
1016                                         GFP_KERNEL);
1017                if (!skb)
1018                        return -ENOMEM;
1019
1020                skb_reserve(skb, NET_IP_ALIGN);
1021                if (skge_rx_setup(skge, e, skb, skge->rx_buf_size) < 0) {
1022                        dev_kfree_skb(skb);
1023                        return -EIO;
1024                }
1025        } while ((e = e->next) != ring->start);
1026
1027        ring->to_clean = ring->start;
1028        return 0;
1029}
1030
1031static const char *skge_pause(enum pause_status status)
1032{
1033        switch (status) {
1034        case FLOW_STAT_NONE:
1035                return "none";
1036        case FLOW_STAT_REM_SEND:
1037                return "rx only";
1038        case FLOW_STAT_LOC_SEND:
1039                return "tx_only";
1040        case FLOW_STAT_SYMMETRIC:               /* Both station may send PAUSE */
1041                return "both";
1042        default:
1043                return "indeterminated";
1044        }
1045}
1046
1047
1048static void skge_link_up(struct skge_port *skge)
1049{
1050        skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG),
1051                    LED_BLK_OFF|LED_SYNC_OFF|LED_REG_ON);
1052
1053        netif_carrier_on(skge->netdev);
1054        netif_wake_queue(skge->netdev);
1055
1056        netif_info(skge, link, skge->netdev,
1057                   "Link is up at %d Mbps, %s duplex, flow control %s\n",
1058                   skge->speed,
1059                   skge->duplex == DUPLEX_FULL ? "full" : "half",
1060                   skge_pause(skge->flow_status));
1061}
1062
1063static void skge_link_down(struct skge_port *skge)
1064{
1065        skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), LED_REG_OFF);
1066        netif_carrier_off(skge->netdev);
1067        netif_stop_queue(skge->netdev);
1068
1069        netif_info(skge, link, skge->netdev, "Link is down\n");
1070}
1071
1072static void xm_link_down(struct skge_hw *hw, int port)
1073{
1074        struct net_device *dev = hw->dev[port];
1075        struct skge_port *skge = netdev_priv(dev);
1076
1077        xm_write16(hw, port, XM_IMSK, XM_IMSK_DISABLE);
1078
1079        if (netif_carrier_ok(dev))
1080                skge_link_down(skge);
1081}
1082
1083static int __xm_phy_read(struct skge_hw *hw, int port, u16 reg, u16 *val)
1084{
1085        int i;
1086
1087        xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr);
1088        *val = xm_read16(hw, port, XM_PHY_DATA);
1089
1090        if (hw->phy_type == SK_PHY_XMAC)
1091                goto ready;
1092
1093        for (i = 0; i < PHY_RETRIES; i++) {
1094                if (xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_RDY)
1095                        goto ready;
1096                udelay(1);
1097        }
1098
1099        return -ETIMEDOUT;
1100 ready:
1101        *val = xm_read16(hw, port, XM_PHY_DATA);
1102
1103        return 0;
1104}
1105
1106static u16 xm_phy_read(struct skge_hw *hw, int port, u16 reg)
1107{
1108        u16 v = 0;
1109        if (__xm_phy_read(hw, port, reg, &v))
1110                pr_warn("%s: phy read timed out\n", hw->dev[port]->name);
1111        return v;
1112}
1113
1114static int xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val)
1115{
1116        int i;
1117
1118        xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr);
1119        for (i = 0; i < PHY_RETRIES; i++) {
1120                if (!(xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_BUSY))
1121                        goto ready;
1122                udelay(1);
1123        }
1124        return -EIO;
1125
1126 ready:
1127        xm_write16(hw, port, XM_PHY_DATA, val);
1128        for (i = 0; i < PHY_RETRIES; i++) {
1129                if (!(xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_BUSY))
1130                        return 0;
1131                udelay(1);
1132        }
1133        return -ETIMEDOUT;
1134}
1135
1136static void genesis_init(struct skge_hw *hw)
1137{
1138        /* set blink source counter */
1139        skge_write32(hw, B2_BSC_INI, (SK_BLK_DUR * SK_FACT_53) / 100);
1140        skge_write8(hw, B2_BSC_CTRL, BSC_START);
1141
1142        /* configure mac arbiter */
1143        skge_write16(hw, B3_MA_TO_CTRL, MA_RST_CLR);
1144
1145        /* configure mac arbiter timeout values */
1146        skge_write8(hw, B3_MA_TOINI_RX1, SK_MAC_TO_53);
1147        skge_write8(hw, B3_MA_TOINI_RX2, SK_MAC_TO_53);
1148        skge_write8(hw, B3_MA_TOINI_TX1, SK_MAC_TO_53);
1149        skge_write8(hw, B3_MA_TOINI_TX2, SK_MAC_TO_53);
1150
1151        skge_write8(hw, B3_MA_RCINI_RX1, 0);
1152        skge_write8(hw, B3_MA_RCINI_RX2, 0);
1153        skge_write8(hw, B3_MA_RCINI_TX1, 0);
1154        skge_write8(hw, B3_MA_RCINI_TX2, 0);
1155
1156        /* configure packet arbiter timeout */
1157        skge_write16(hw, B3_PA_CTRL, PA_RST_CLR);
1158        skge_write16(hw, B3_PA_TOINI_RX1, SK_PKT_TO_MAX);
1159        skge_write16(hw, B3_PA_TOINI_TX1, SK_PKT_TO_MAX);
1160        skge_write16(hw, B3_PA_TOINI_RX2, SK_PKT_TO_MAX);
1161        skge_write16(hw, B3_PA_TOINI_TX2, SK_PKT_TO_MAX);
1162}
1163
1164static void genesis_reset(struct skge_hw *hw, int port)
1165{
1166        static const u8 zero[8]  = { 0 };
1167        u32 reg;
1168
1169        skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0);
1170
1171        /* reset the statistics module */
1172        xm_write32(hw, port, XM_GP_PORT, XM_GP_RES_STAT);
1173        xm_write16(hw, port, XM_IMSK, XM_IMSK_DISABLE);
1174        xm_write32(hw, port, XM_MODE, 0);               /* clear Mode Reg */
1175        xm_write16(hw, port, XM_TX_CMD, 0);     /* reset TX CMD Reg */
1176        xm_write16(hw, port, XM_RX_CMD, 0);     /* reset RX CMD Reg */
1177
1178        /* disable Broadcom PHY IRQ */
1179        if (hw->phy_type == SK_PHY_BCOM)
1180                xm_write16(hw, port, PHY_BCOM_INT_MASK, 0xffff);
1181
1182        xm_outhash(hw, port, XM_HSM, zero);
1183
1184        /* Flush TX and RX fifo */
1185        reg = xm_read32(hw, port, XM_MODE);
1186        xm_write32(hw, port, XM_MODE, reg | XM_MD_FTF);
1187        xm_write32(hw, port, XM_MODE, reg | XM_MD_FRF);
1188}
1189
1190/* Convert mode to MII values  */
1191static const u16 phy_pause_map[] = {
1192        [FLOW_MODE_NONE] =      0,
1193        [FLOW_MODE_LOC_SEND] =  PHY_AN_PAUSE_ASYM,
1194        [FLOW_MODE_SYMMETRIC] = PHY_AN_PAUSE_CAP,
1195        [FLOW_MODE_SYM_OR_REM]  = PHY_AN_PAUSE_CAP | PHY_AN_PAUSE_ASYM,
1196};
1197
1198/* special defines for FIBER (88E1011S only) */
1199static const u16 fiber_pause_map[] = {
1200        [FLOW_MODE_NONE]        = PHY_X_P_NO_PAUSE,
1201        [FLOW_MODE_LOC_SEND]    = PHY_X_P_ASYM_MD,
1202        [FLOW_MODE_SYMMETRIC]   = PHY_X_P_SYM_MD,
1203        [FLOW_MODE_SYM_OR_REM]  = PHY_X_P_BOTH_MD,
1204};
1205
1206
1207/* Check status of Broadcom phy link */
1208static void bcom_check_link(struct skge_hw *hw, int port)
1209{
1210        struct net_device *dev = hw->dev[port];
1211        struct skge_port *skge = netdev_priv(dev);
1212        u16 status;
1213
1214        /* read twice because of latch */
1215        xm_phy_read(hw, port, PHY_BCOM_STAT);
1216        status = xm_phy_read(hw, port, PHY_BCOM_STAT);
1217
1218        if ((status & PHY_ST_LSYNC) == 0) {
1219                xm_link_down(hw, port);
1220                return;
1221        }
1222
1223        if (skge->autoneg == AUTONEG_ENABLE) {
1224                u16 lpa, aux;
1225
1226                if (!(status & PHY_ST_AN_OVER))
1227                        return;
1228
1229                lpa = xm_phy_read(hw, port, PHY_XMAC_AUNE_LP);
1230                if (lpa & PHY_B_AN_RF) {
1231                        netdev_notice(dev, "remote fault\n");
1232                        return;
1233                }
1234
1235                aux = xm_phy_read(hw, port, PHY_BCOM_AUX_STAT);
1236
1237                /* Check Duplex mismatch */
1238                switch (aux & PHY_B_AS_AN_RES_MSK) {
1239                case PHY_B_RES_1000FD:
1240                        skge->duplex = DUPLEX_FULL;
1241                        break;
1242                case PHY_B_RES_1000HD:
1243                        skge->duplex = DUPLEX_HALF;
1244                        break;
1245                default:
1246                        netdev_notice(dev, "duplex mismatch\n");
1247                        return;
1248                }
1249
1250                /* We are using IEEE 802.3z/D5.0 Table 37-4 */
1251                switch (aux & PHY_B_AS_PAUSE_MSK) {
1252                case PHY_B_AS_PAUSE_MSK:
1253                        skge->flow_status = FLOW_STAT_SYMMETRIC;
1254                        break;
1255                case PHY_B_AS_PRR:
1256                        skge->flow_status = FLOW_STAT_REM_SEND;
1257                        break;
1258                case PHY_B_AS_PRT:
1259                        skge->flow_status = FLOW_STAT_LOC_SEND;
1260                        break;
1261                default:
1262                        skge->flow_status = FLOW_STAT_NONE;
1263                }
1264                skge->speed = SPEED_1000;
1265        }
1266
1267        if (!netif_carrier_ok(dev))
1268                genesis_link_up(skge);
1269}
1270
1271/* Broadcom 5400 only supports giagabit! SysKonnect did not put an additional
1272 * Phy on for 100 or 10Mbit operation
1273 */
1274static void bcom_phy_init(struct skge_port *skge)
1275{
1276        struct skge_hw *hw = skge->hw;
1277        int port = skge->port;
1278        int i;
1279        u16 id1, r, ext, ctl;
1280
1281        /* magic workaround patterns for Broadcom */
1282        static const struct {
1283                u16 reg;
1284                u16 val;
1285        } A1hack[] = {
1286                { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 },
1287                { 0x17, 0x0013 }, { 0x15, 0x0404 }, { 0x17, 0x8006 },
1288                { 0x15, 0x0132 }, { 0x17, 0x8006 }, { 0x15, 0x0232 },
1289                { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 },
1290        }, C0hack[] = {
1291                { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1204 },
1292                { 0x17, 0x0013 }, { 0x15, 0x0A04 }, { 0x18, 0x0420 },
1293        };
1294
1295        /* read Id from external PHY (all have the same address) */
1296        id1 = xm_phy_read(hw, port, PHY_XMAC_ID1);
1297
1298        /* Optimize MDIO transfer by suppressing preamble. */
1299        r = xm_read16(hw, port, XM_MMU_CMD);
1300        r |=  XM_MMU_NO_PRE;
1301        xm_write16(hw, port, XM_MMU_CMD, r);
1302
1303        switch (id1) {
1304        case PHY_BCOM_ID1_C0:
1305                /*
1306                 * Workaround BCOM Errata for the C0 type.
1307                 * Write magic patterns to reserved registers.
1308                 */
1309                for (i = 0; i < ARRAY_SIZE(C0hack); i++)
1310                        xm_phy_write(hw, port,
1311                                     C0hack[i].reg, C0hack[i].val);
1312
1313                break;
1314        case PHY_BCOM_ID1_A1:
1315                /*
1316                 * Workaround BCOM Errata for the A1 type.
1317                 * Write magic patterns to reserved registers.
1318                 */
1319                for (i = 0; i < ARRAY_SIZE(A1hack); i++)
1320                        xm_phy_write(hw, port,
1321                                     A1hack[i].reg, A1hack[i].val);
1322                break;
1323        }
1324
1325        /*
1326         * Workaround BCOM Errata (#10523) for all BCom PHYs.
1327         * Disable Power Management after reset.
1328         */
1329        r = xm_phy_read(hw, port, PHY_BCOM_AUX_CTRL);
1330        r |= PHY_B_AC_DIS_PM;
1331        xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL, r);
1332
1333        /* Dummy read */
1334        xm_read16(hw, port, XM_ISRC);
1335
1336        ext = PHY_B_PEC_EN_LTR; /* enable tx led */
1337        ctl = PHY_CT_SP1000;    /* always 1000mbit */
1338
1339        if (skge->autoneg == AUTONEG_ENABLE) {
1340                /*
1341                 * Workaround BCOM Errata #1 for the C5 type.
1342                 * 1000Base-T Link Acquisition Failure in Slave Mode
1343                 * Set Repeater/DTE bit 10 of the 1000Base-T Control Register
1344                 */
1345                u16 adv = PHY_B_1000C_RD;
1346                if (skge->advertising & ADVERTISED_1000baseT_Half)
1347                        adv |= PHY_B_1000C_AHD;
1348                if (skge->advertising & ADVERTISED_1000baseT_Full)
1349                        adv |= PHY_B_1000C_AFD;
1350                xm_phy_write(hw, port, PHY_BCOM_1000T_CTRL, adv);
1351
1352                ctl |= PHY_CT_ANE | PHY_CT_RE_CFG;
1353        } else {
1354                if (skge->duplex == DUPLEX_FULL)
1355                        ctl |= PHY_CT_DUP_MD;
1356                /* Force to slave */
1357                xm_phy_write(hw, port, PHY_BCOM_1000T_CTRL, PHY_B_1000C_MSE);
1358        }
1359
1360        /* Set autonegotiation pause parameters */
1361        xm_phy_write(hw, port, PHY_BCOM_AUNE_ADV,
1362                     phy_pause_map[skge->flow_control] | PHY_AN_CSMA);
1363
1364        /* Handle Jumbo frames */
1365        if (hw->dev[port]->mtu > ETH_DATA_LEN) {
1366                xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL,
1367                             PHY_B_AC_TX_TST | PHY_B_AC_LONG_PACK);
1368
1369                ext |= PHY_B_PEC_HIGH_LA;
1370
1371        }
1372
1373        xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, ext);
1374        xm_phy_write(hw, port, PHY_BCOM_CTRL, ctl);
1375
1376        /* Use link status change interrupt */
1377        xm_phy_write(hw, port, PHY_BCOM_INT_MASK, PHY_B_DEF_MSK);
1378}
1379
1380static void xm_phy_init(struct skge_port *skge)
1381{
1382        struct skge_hw *hw = skge->hw;
1383        int port = skge->port;
1384        u16 ctrl = 0;
1385
1386        if (skge->autoneg == AUTONEG_ENABLE) {
1387                if (skge->advertising & ADVERTISED_1000baseT_Half)
1388                        ctrl |= PHY_X_AN_HD;
1389                if (skge->advertising & ADVERTISED_1000baseT_Full)
1390                        ctrl |= PHY_X_AN_FD;
1391
1392                ctrl |= fiber_pause_map[skge->flow_control];
1393
1394                xm_phy_write(hw, port, PHY_XMAC_AUNE_ADV, ctrl);
1395
1396                /* Restart Auto-negotiation */
1397                ctrl = PHY_CT_ANE | PHY_CT_RE_CFG;
1398        } else {
1399                /* Set DuplexMode in Config register */
1400                if (skge->duplex == DUPLEX_FULL)
1401                        ctrl |= PHY_CT_DUP_MD;
1402                /*
1403                 * Do NOT enable Auto-negotiation here. This would hold
1404                 * the link down because no IDLEs are transmitted
1405                 */
1406        }
1407
1408        xm_phy_write(hw, port, PHY_XMAC_CTRL, ctrl);
1409
1410        /* Poll PHY for status changes */
1411        mod_timer(&skge->link_timer, jiffies + LINK_HZ);
1412}
1413
1414static int xm_check_link(struct net_device *dev)
1415{
1416        struct skge_port *skge = netdev_priv(dev);
1417        struct skge_hw *hw = skge->hw;
1418        int port = skge->port;
1419        u16 status;
1420
1421        /* read twice because of latch */
1422        xm_phy_read(hw, port, PHY_XMAC_STAT);
1423        status = xm_phy_read(hw, port, PHY_XMAC_STAT);
1424
1425        if ((status & PHY_ST_LSYNC) == 0) {
1426                xm_link_down(hw, port);
1427                return 0;
1428        }
1429
1430        if (skge->autoneg == AUTONEG_ENABLE) {
1431                u16 lpa, res;
1432
1433                if (!(status & PHY_ST_AN_OVER))
1434                        return 0;
1435
1436                lpa = xm_phy_read(hw, port, PHY_XMAC_AUNE_LP);
1437                if (lpa & PHY_B_AN_RF) {
1438                        netdev_notice(dev, "remote fault\n");
1439                        return 0;
1440                }
1441
1442                res = xm_phy_read(hw, port, PHY_XMAC_RES_ABI);
1443
1444                /* Check Duplex mismatch */
1445                switch (res & (PHY_X_RS_HD | PHY_X_RS_FD)) {
1446                case PHY_X_RS_FD:
1447                        skge->duplex = DUPLEX_FULL;
1448                        break;
1449                case PHY_X_RS_HD:
1450                        skge->duplex = DUPLEX_HALF;
1451                        break;
1452                default:
1453                        netdev_notice(dev, "duplex mismatch\n");
1454                        return 0;
1455                }
1456
1457                /* We are using IEEE 802.3z/D5.0 Table 37-4 */
1458                if ((skge->flow_control == FLOW_MODE_SYMMETRIC ||
1459                     skge->flow_control == FLOW_MODE_SYM_OR_REM) &&
1460                    (lpa & PHY_X_P_SYM_MD))
1461                        skge->flow_status = FLOW_STAT_SYMMETRIC;
1462                else if (skge->flow_control == FLOW_MODE_SYM_OR_REM &&
1463                         (lpa & PHY_X_RS_PAUSE) == PHY_X_P_ASYM_MD)
1464                        /* Enable PAUSE receive, disable PAUSE transmit */
1465                        skge->flow_status  = FLOW_STAT_REM_SEND;
1466                else if (skge->flow_control == FLOW_MODE_LOC_SEND &&
1467                         (lpa & PHY_X_RS_PAUSE) == PHY_X_P_BOTH_MD)
1468                        /* Disable PAUSE receive, enable PAUSE transmit */
1469                        skge->flow_status = FLOW_STAT_LOC_SEND;
1470                else
1471                        skge->flow_status = FLOW_STAT_NONE;
1472
1473                skge->speed = SPEED_1000;
1474        }
1475
1476        if (!netif_carrier_ok(dev))
1477                genesis_link_up(skge);
1478        return 1;
1479}
1480
1481/* Poll to check for link coming up.
1482 *
1483 * Since internal PHY is wired to a level triggered pin, can't
1484 * get an interrupt when carrier is detected, need to poll for
1485 * link coming up.
1486 */
1487static void xm_link_timer(unsigned long arg)
1488{
1489        struct skge_port *skge = (struct skge_port *) arg;
1490        struct net_device *dev = skge->netdev;
1491        struct skge_hw *hw = skge->hw;
1492        int port = skge->port;
1493        int i;
1494        unsigned long flags;
1495
1496        if (!netif_running(dev))
1497                return;
1498
1499        spin_lock_irqsave(&hw->phy_lock, flags);
1500
1501        /*
1502         * Verify that the link by checking GPIO register three times.
1503         * This pin has the signal from the link_sync pin connected to it.
1504         */
1505        for (i = 0; i < 3; i++) {
1506                if (xm_read16(hw, port, XM_GP_PORT) & XM_GP_INP_ASS)
1507                        goto link_down;
1508        }
1509
1510        /* Re-enable interrupt to detect link down */
1511        if (xm_check_link(dev)) {
1512                u16 msk = xm_read16(hw, port, XM_IMSK);
1513                msk &= ~XM_IS_INP_ASS;
1514                xm_write16(hw, port, XM_IMSK, msk);
1515                xm_read16(hw, port, XM_ISRC);
1516        } else {
1517link_down:
1518                mod_timer(&skge->link_timer,
1519                          round_jiffies(jiffies + LINK_HZ));
1520        }
1521        spin_unlock_irqrestore(&hw->phy_lock, flags);
1522}
1523
1524static void genesis_mac_init(struct skge_hw *hw, int port)
1525{
1526        struct net_device *dev = hw->dev[port];
1527        struct skge_port *skge = netdev_priv(dev);
1528        int jumbo = hw->dev[port]->mtu > ETH_DATA_LEN;
1529        int i;
1530        u32 r;
1531        static const u8 zero[6]  = { 0 };
1532
1533        for (i = 0; i < 10; i++) {
1534                skge_write16(hw, SK_REG(port, TX_MFF_CTRL1),
1535                             MFF_SET_MAC_RST);
1536                if (skge_read16(hw, SK_REG(port, TX_MFF_CTRL1)) & MFF_SET_MAC_RST)
1537                        goto reset_ok;
1538                udelay(1);
1539        }
1540
1541        netdev_warn(dev, "genesis reset failed\n");
1542
1543 reset_ok:
1544        /* Unreset the XMAC. */
1545        skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_CLR_MAC_RST);
1546
1547        /*
1548         * Perform additional initialization for external PHYs,
1549         * namely for the 1000baseTX cards that use the XMAC's
1550         * GMII mode.
1551         */
1552        if (hw->phy_type != SK_PHY_XMAC) {
1553                /* Take external Phy out of reset */
1554                r = skge_read32(hw, B2_GP_IO);
1555                if (port == 0)
1556                        r |= GP_DIR_0|GP_IO_0;
1557                else
1558                        r |= GP_DIR_2|GP_IO_2;
1559
1560                skge_write32(hw, B2_GP_IO, r);
1561
1562                /* Enable GMII interface */
1563                xm_write16(hw, port, XM_HW_CFG, XM_HW_GMII_MD);
1564        }
1565
1566
1567        switch (hw->phy_type) {
1568        case SK_PHY_XMAC:
1569                xm_phy_init(skge);
1570                break;
1571        case SK_PHY_BCOM:
1572                bcom_phy_init(skge);
1573                bcom_check_link(hw, port);
1574        }
1575
1576        /* Set Station Address */
1577        xm_outaddr(hw, port, XM_SA, dev->dev_addr);
1578
1579        /* We don't use match addresses so clear */
1580        for (i = 1; i < 16; i++)
1581                xm_outaddr(hw, port, XM_EXM(i), zero);
1582
1583        /* Clear MIB counters */
1584        xm_write16(hw, port, XM_STAT_CMD,
1585                        XM_SC_CLR_RXC | XM_SC_CLR_TXC);
1586        /* Clear two times according to Errata #3 */
1587        xm_write16(hw, port, XM_STAT_CMD,
1588                        XM_SC_CLR_RXC | XM_SC_CLR_TXC);
1589
1590        /* configure Rx High Water Mark (XM_RX_HI_WM) */
1591        xm_write16(hw, port, XM_RX_HI_WM, 1450);
1592
1593        /* We don't need the FCS appended to the packet. */
1594        r = XM_RX_LENERR_OK | XM_RX_STRIP_FCS;
1595        if (jumbo)
1596                r |= XM_RX_BIG_PK_OK;
1597
1598        if (skge->duplex == DUPLEX_HALF) {
1599                /*
1600                 * If in manual half duplex mode the other side might be in
1601                 * full duplex mode, so ignore if a carrier extension is not seen
1602                 * on frames received
1603                 */
1604                r |= XM_RX_DIS_CEXT;
1605        }
1606        xm_write16(hw, port, XM_RX_CMD, r);
1607
1608        /* We want short frames padded to 60 bytes. */
1609        xm_write16(hw, port, XM_TX_CMD, XM_TX_AUTO_PAD);
1610
1611        /* Increase threshold for jumbo frames on dual port */
1612        if (hw->ports > 1 && jumbo)
1613                xm_write16(hw, port, XM_TX_THR, 1020);
1614        else
1615                xm_write16(hw, port, XM_TX_THR, 512);
1616
1617        /*
1618         * Enable the reception of all error frames. This is is
1619         * a necessary evil due to the design of the XMAC. The
1620         * XMAC's receive FIFO is only 8K in size, however jumbo
1621         * frames can be up to 9000 bytes in length. When bad
1622         * frame filtering is enabled, the XMAC's RX FIFO operates
1623         * in 'store and forward' mode. For this to work, the
1624         * entire frame has to fit into the FIFO, but that means
1625         * that jumbo frames larger than 8192 bytes will be
1626         * truncated. Disabling all bad frame filtering causes
1627         * the RX FIFO to operate in streaming mode, in which
1628         * case the XMAC will start transferring frames out of the
1629         * RX FIFO as soon as the FIFO threshold is reached.
1630         */
1631        xm_write32(hw, port, XM_MODE, XM_DEF_MODE);
1632
1633
1634        /*
1635         * Initialize the Receive Counter Event Mask (XM_RX_EV_MSK)
1636         *      - Enable all bits excepting 'Octets Rx OK Low CntOv'
1637         *        and 'Octets Rx OK Hi Cnt Ov'.
1638         */
1639        xm_write32(hw, port, XM_RX_EV_MSK, XMR_DEF_MSK);
1640
1641        /*
1642         * Initialize the Transmit Counter Event Mask (XM_TX_EV_MSK)
1643         *      - Enable all bits excepting 'Octets Tx OK Low CntOv'
1644         *        and 'Octets Tx OK Hi Cnt Ov'.
1645         */
1646        xm_write32(hw, port, XM_TX_EV_MSK, XMT_DEF_MSK);
1647
1648        /* Configure MAC arbiter */
1649        skge_write16(hw, B3_MA_TO_CTRL, MA_RST_CLR);
1650
1651        /* configure timeout values */
1652        skge_write8(hw, B3_MA_TOINI_RX1, 72);
1653        skge_write8(hw, B3_MA_TOINI_RX2, 72);
1654        skge_write8(hw, B3_MA_TOINI_TX1, 72);
1655        skge_write8(hw, B3_MA_TOINI_TX2, 72);
1656
1657        skge_write8(hw, B3_MA_RCINI_RX1, 0);
1658        skge_write8(hw, B3_MA_RCINI_RX2, 0);
1659        skge_write8(hw, B3_MA_RCINI_TX1, 0);
1660        skge_write8(hw, B3_MA_RCINI_TX2, 0);
1661
1662        /* Configure Rx MAC FIFO */
1663        skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_RST_CLR);
1664        skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_ENA_TIM_PAT);
1665        skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_ENA_OP_MD);
1666
1667        /* Configure Tx MAC FIFO */
1668        skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_RST_CLR);
1669        skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_TX_CTRL_DEF);
1670        skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_ENA_OP_MD);
1671
1672        if (jumbo) {
1673                /* Enable frame flushing if jumbo frames used */
1674                skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_ENA_FLUSH);
1675        } else {
1676                /* enable timeout timers if normal frames */
1677                skge_write16(hw, B3_PA_CTRL,
1678                             (port == 0) ? PA_ENA_TO_TX1 : PA_ENA_TO_TX2);
1679        }
1680}
1681
1682static void genesis_stop(struct skge_port *skge)
1683{
1684        struct skge_hw *hw = skge->hw;
1685        int port = skge->port;
1686        unsigned retries = 1000;
1687        u16 cmd;
1688
1689        /* Disable Tx and Rx */
1690        cmd = xm_read16(hw, port, XM_MMU_CMD);
1691        cmd &= ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX);
1692        xm_write16(hw, port, XM_MMU_CMD, cmd);
1693
1694        genesis_reset(hw, port);
1695
1696        /* Clear Tx packet arbiter timeout IRQ */
1697        skge_write16(hw, B3_PA_CTRL,
1698                     port == 0 ? PA_CLR_TO_TX1 : PA_CLR_TO_TX2);
1699
1700        /* Reset the MAC */
1701        skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_CLR_MAC_RST);
1702        do {
1703                skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_SET_MAC_RST);
1704                if (!(skge_read16(hw, SK_REG(port, TX_MFF_CTRL1)) & MFF_SET_MAC_RST))
1705                        break;
1706        } while (--retries > 0);
1707
1708        /* For external PHYs there must be special handling */
1709        if (hw->phy_type != SK_PHY_XMAC) {
1710                u32 reg = skge_read32(hw, B2_GP_IO);
1711                if (port == 0) {
1712                        reg |= GP_DIR_0;
1713                        reg &= ~GP_IO_0;
1714                } else {
1715                        reg |= GP_DIR_2;
1716                        reg &= ~GP_IO_2;
1717                }
1718                skge_write32(hw, B2_GP_IO, reg);
1719                skge_read32(hw, B2_GP_IO);
1720        }
1721
1722        xm_write16(hw, port, XM_MMU_CMD,
1723                        xm_read16(hw, port, XM_MMU_CMD)
1724                        & ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX));
1725
1726        xm_read16(hw, port, XM_MMU_CMD);
1727}
1728
1729
1730static void genesis_get_stats(struct skge_port *skge, u64 *data)
1731{
1732        struct skge_hw *hw = skge->hw;
1733        int port = skge->port;
1734        int i;
1735        unsigned long timeout = jiffies + HZ;
1736
1737        xm_write16(hw, port,
1738                        XM_STAT_CMD, XM_SC_SNP_TXC | XM_SC_SNP_RXC);
1739
1740        /* wait for update to complete */
1741        while (xm_read16(hw, port, XM_STAT_CMD)
1742               & (XM_SC_SNP_TXC | XM_SC_SNP_RXC)) {
1743                if (time_after(jiffies, timeout))
1744                        break;
1745                udelay(10);
1746        }
1747
1748        /* special case for 64 bit octet counter */
1749        data[0] = (u64) xm_read32(hw, port, XM_TXO_OK_HI) << 32
1750                | xm_read32(hw, port, XM_TXO_OK_LO);
1751        data[1] = (u64) xm_read32(hw, port, XM_RXO_OK_HI) << 32
1752                | xm_read32(hw, port, XM_RXO_OK_LO);
1753
1754        for (i = 2; i < ARRAY_SIZE(skge_stats); i++)
1755                data[i] = xm_read32(hw, port, skge_stats[i].xmac_offset);
1756}
1757
1758static void genesis_mac_intr(struct skge_hw *hw, int port)
1759{
1760        struct net_device *dev = hw->dev[port];
1761        struct skge_port *skge = netdev_priv(dev);
1762        u16 status = xm_read16(hw, port, XM_ISRC);
1763
1764        netif_printk(skge, intr, KERN_DEBUG, skge->netdev,
1765                     "mac interrupt status 0x%x\n", status);
1766
1767        if (hw->phy_type == SK_PHY_XMAC && (status & XM_IS_INP_ASS)) {
1768                xm_link_down(hw, port);
1769                mod_timer(&skge->link_timer, jiffies + 1);
1770        }
1771
1772        if (status & XM_IS_TXF_UR) {
1773                xm_write32(hw, port, XM_MODE, XM_MD_FTF);
1774                ++dev->stats.tx_fifo_errors;
1775        }
1776}
1777
1778static void genesis_link_up(struct skge_port *skge)
1779{
1780        struct skge_hw *hw = skge->hw;
1781        int port = skge->port;
1782        u16 cmd, msk;
1783        u32 mode;
1784
1785        cmd = xm_read16(hw, port, XM_MMU_CMD);
1786
1787        /*
1788         * enabling pause frame reception is required for 1000BT
1789         * because the XMAC is not reset if the link is going down
1790         */
1791        if (skge->flow_status == FLOW_STAT_NONE ||
1792            skge->flow_status == FLOW_STAT_LOC_SEND)
1793                /* Disable Pause Frame Reception */
1794                cmd |= XM_MMU_IGN_PF;
1795        else
1796                /* Enable Pause Frame Reception */
1797                cmd &= ~XM_MMU_IGN_PF;
1798
1799        xm_write16(hw, port, XM_MMU_CMD, cmd);
1800
1801        mode = xm_read32(hw, port, XM_MODE);
1802        if (skge->flow_status == FLOW_STAT_SYMMETRIC ||
1803            skge->flow_status == FLOW_STAT_LOC_SEND) {
1804                /*
1805                 * Configure Pause Frame Generation
1806                 * Use internal and external Pause Frame Generation.
1807                 * Sending pause frames is edge triggered.
1808                 * Send a Pause frame with the maximum pause time if
1809                 * internal oder external FIFO full condition occurs.
1810                 * Send a zero pause time frame to re-start transmission.
1811                 */
1812                /* XM_PAUSE_DA = '010000C28001' (default) */
1813                /* XM_MAC_PTIME = 0xffff (maximum) */
1814                /* remember this value is defined in big endian (!) */
1815                xm_write16(hw, port, XM_MAC_PTIME, 0xffff);
1816
1817                mode |= XM_PAUSE_MODE;
1818                skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_ENA_PAUSE);
1819        } else {
1820                /*
1821                 * disable pause frame generation is required for 1000BT
1822                 * because the XMAC is not reset if the link is going down
1823                 */
1824                /* Disable Pause Mode in Mode Register */
1825                mode &= ~XM_PAUSE_MODE;
1826
1827                skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_DIS_PAUSE);
1828        }
1829
1830        xm_write32(hw, port, XM_MODE, mode);
1831
1832        /* Turn on detection of Tx underrun */
1833        msk = xm_read16(hw, port, XM_IMSK);
1834        msk &= ~XM_IS_TXF_UR;
1835        xm_write16(hw, port, XM_IMSK, msk);
1836
1837        xm_read16(hw, port, XM_ISRC);
1838
1839        /* get MMU Command Reg. */
1840        cmd = xm_read16(hw, port, XM_MMU_CMD);
1841        if (hw->phy_type != SK_PHY_XMAC && skge->duplex == DUPLEX_FULL)
1842                cmd |= XM_MMU_GMII_FD;
1843
1844        /*
1845         * Workaround BCOM Errata (#10523) for all BCom Phys
1846         * Enable Power Management after link up
1847         */
1848        if (hw->phy_type == SK_PHY_BCOM) {
1849                xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL,
1850                             xm_phy_read(hw, port, PHY_BCOM_AUX_CTRL)
1851                             & ~PHY_B_AC_DIS_PM);
1852                xm_phy_write(hw, port, PHY_BCOM_INT_MASK, PHY_B_DEF_MSK);
1853        }
1854
1855        /* enable Rx/Tx */
1856        xm_write16(hw, port, XM_MMU_CMD,
1857                        cmd | XM_MMU_ENA_RX | XM_MMU_ENA_TX);
1858        skge_link_up(skge);
1859}
1860
1861
1862static inline void bcom_phy_intr(struct skge_port *skge)
1863{
1864        struct skge_hw *hw = skge->hw;
1865        int port = skge->port;
1866        u16 isrc;
1867
1868        isrc = xm_phy_read(hw, port, PHY_BCOM_INT_STAT);
1869        netif_printk(skge, intr, KERN_DEBUG, skge->netdev,
1870                     "phy interrupt status 0x%x\n", isrc);
1871
1872        if (isrc & PHY_B_IS_PSE)
1873                pr_err("%s: uncorrectable pair swap error\n",
1874                       hw->dev[port]->name);
1875
1876        /* Workaround BCom Errata:
1877         *      enable and disable loopback mode if "NO HCD" occurs.
1878         */
1879        if (isrc & PHY_B_IS_NO_HDCL) {
1880                u16 ctrl = xm_phy_read(hw, port, PHY_BCOM_CTRL);
1881                xm_phy_write(hw, port, PHY_BCOM_CTRL,
1882                                  ctrl | PHY_CT_LOOP);
1883                xm_phy_write(hw, port, PHY_BCOM_CTRL,
1884                                  ctrl & ~PHY_CT_LOOP);
1885        }
1886
1887        if (isrc & (PHY_B_IS_AN_PR | PHY_B_IS_LST_CHANGE))
1888                bcom_check_link(hw, port);
1889
1890}
1891
1892static int gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val)
1893{
1894        int i;
1895
1896        gma_write16(hw, port, GM_SMI_DATA, val);
1897        gma_write16(hw, port, GM_SMI_CTRL,
1898                         GM_SMI_CT_PHY_AD(hw->phy_addr) | GM_SMI_CT_REG_AD(reg));
1899        for (i = 0; i < PHY_RETRIES; i++) {
1900                udelay(1);
1901
1902                if (!(gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_BUSY))
1903                        return 0;
1904        }
1905
1906        pr_warn("%s: phy write timeout\n", hw->dev[port]->name);
1907        return -EIO;
1908}
1909
1910static int __gm_phy_read(struct skge_hw *hw, int port, u16 reg, u16 *val)
1911{
1912        int i;
1913
1914        gma_write16(hw, port, GM_SMI_CTRL,
1915                         GM_SMI_CT_PHY_AD(hw->phy_addr)
1916                         | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD);
1917
1918        for (i = 0; i < PHY_RETRIES; i++) {
1919                udelay(1);
1920                if (gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_RD_VAL)
1921                        goto ready;
1922        }
1923
1924        return -ETIMEDOUT;
1925 ready:
1926        *val = gma_read16(hw, port, GM_SMI_DATA);
1927        return 0;
1928}
1929
1930static u16 gm_phy_read(struct skge_hw *hw, int port, u16 reg)
1931{
1932        u16 v = 0;
1933        if (__gm_phy_read(hw, port, reg, &v))
1934                pr_warn("%s: phy read timeout\n", hw->dev[port]->name);
1935        return v;
1936}
1937
1938/* Marvell Phy Initialization */
1939static void yukon_init(struct skge_hw *hw, int port)
1940{
1941        struct skge_port *skge = netdev_priv(hw->dev[port]);
1942        u16 ctrl, ct1000, adv;
1943
1944        if (skge->autoneg == AUTONEG_ENABLE) {
1945                u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL);
1946
1947                ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK |
1948                          PHY_M_EC_MAC_S_MSK);
1949                ectrl |= PHY_M_EC_MAC_S(MAC_TX_CLK_25_MHZ);
1950
1951                ectrl |= PHY_M_EC_M_DSC(0) | PHY_M_EC_S_DSC(1);
1952
1953                gm_phy_write(hw, port, PHY_MARV_EXT_CTRL, ectrl);
1954        }
1955
1956        ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL);
1957        if (skge->autoneg == AUTONEG_DISABLE)
1958                ctrl &= ~PHY_CT_ANE;
1959
1960        ctrl |= PHY_CT_RESET;
1961        gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
1962
1963        ctrl = 0;
1964        ct1000 = 0;
1965        adv = PHY_AN_CSMA;
1966
1967        if (skge->autoneg == AUTONEG_ENABLE) {
1968                if (hw->copper) {
1969                        if (skge->advertising & ADVERTISED_1000baseT_Full)
1970                                ct1000 |= PHY_M_1000C_AFD;
1971                        if (skge->advertising & ADVERTISED_1000baseT_Half)
1972                                ct1000 |= PHY_M_1000C_AHD;
1973                        if (skge->advertising & ADVERTISED_100baseT_Full)
1974                                adv |= PHY_M_AN_100_FD;
1975                        if (skge->advertising & ADVERTISED_100baseT_Half)
1976                                adv |= PHY_M_AN_100_HD;
1977                        if (skge->advertising & ADVERTISED_10baseT_Full)
1978                                adv |= PHY_M_AN_10_FD;
1979                        if (skge->advertising & ADVERTISED_10baseT_Half)
1980                                adv |= PHY_M_AN_10_HD;
1981
1982                        /* Set Flow-control capabilities */
1983                        adv |= phy_pause_map[skge->flow_control];
1984                } else {
1985                        if (skge->advertising & ADVERTISED_1000baseT_Full)
1986                                adv |= PHY_M_AN_1000X_AFD;
1987                        if (skge->advertising & ADVERTISED_1000baseT_Half)
1988                                adv |= PHY_M_AN_1000X_AHD;
1989
1990                        adv |= fiber_pause_map[skge->flow_control];
1991                }
1992
1993                /* Restart Auto-negotiation */
1994                ctrl |= PHY_CT_ANE | PHY_CT_RE_CFG;
1995        } else {
1996                /* forced speed/duplex settings */
1997                ct1000 = PHY_M_1000C_MSE;
1998
1999                if (skge->duplex == DUPLEX_FULL)
2000                        ctrl |= PHY_CT_DUP_MD;
2001
2002                switch (skge->speed) {
2003                case SPEED_1000:
2004                        ctrl |= PHY_CT_SP1000;
2005                        break;
2006                case SPEED_100:
2007                        ctrl |= PHY_CT_SP100;
2008                        break;
2009                }
2010
2011                ctrl |= PHY_CT_RESET;
2012        }
2013
2014        gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, ct1000);
2015
2016        gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, adv);
2017        gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
2018
2019        /* Enable phy interrupt on autonegotiation complete (or link up) */
2020        if (skge->autoneg == AUTONEG_ENABLE)
2021                gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_AN_MSK);
2022        else
2023                gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_DEF_MSK);
2024}
2025
2026static void yukon_reset(struct skge_hw *hw, int port)
2027{
2028        gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0);/* disable PHY IRQs */
2029        gma_write16(hw, port, GM_MC_ADDR_H1, 0);        /* clear MC hash */
2030        gma_write16(hw, port, GM_MC_ADDR_H2, 0);
2031        gma_write16(hw, port, GM_MC_ADDR_H3, 0);
2032        gma_write16(hw, port, GM_MC_ADDR_H4, 0);
2033
2034        gma_write16(hw, port, GM_RX_CTRL,
2035                         gma_read16(hw, port, GM_RX_CTRL)
2036                         | GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
2037}
2038
2039/* Apparently, early versions of Yukon-Lite had wrong chip_id? */
2040static int is_yukon_lite_a0(struct skge_hw *hw)
2041{
2042        u32 reg;
2043        int ret;
2044
2045        if (hw->chip_id != CHIP_ID_YUKON)
2046                return 0;
2047
2048        reg = skge_read32(hw, B2_FAR);
2049        skge_write8(hw, B2_FAR + 3, 0xff);
2050        ret = (skge_read8(hw, B2_FAR + 3) != 0);
2051        skge_write32(hw, B2_FAR, reg);
2052        return ret;
2053}
2054
2055static void yukon_mac_init(struct skge_hw *hw, int port)
2056{
2057        struct skge_port *skge = netdev_priv(hw->dev[port]);
2058        int i;
2059        u32 reg;
2060        const u8 *addr = hw->dev[port]->dev_addr;
2061
2062        /* WA code for COMA mode -- set PHY reset */
2063        if (hw->chip_id == CHIP_ID_YUKON_LITE &&
2064            hw->chip_rev >= CHIP_REV_YU_LITE_A3) {
2065                reg = skge_read32(hw, B2_GP_IO);
2066                reg |= GP_DIR_9 | GP_IO_9;
2067                skge_write32(hw, B2_GP_IO, reg);
2068        }
2069
2070        /* hard reset */
2071        skge_write32(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
2072        skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET);
2073
2074        /* WA code for COMA mode -- clear PHY reset */
2075        if (hw->chip_id == CHIP_ID_YUKON_LITE &&
2076            hw->chip_rev >= CHIP_REV_YU_LITE_A3) {
2077                reg = skge_read32(hw, B2_GP_IO);
2078                reg |= GP_DIR_9;
2079                reg &= ~GP_IO_9;
2080                skge_write32(hw, B2_GP_IO, reg);
2081        }
2082
2083        /* Set hardware config mode */
2084        reg = GPC_INT_POL_HI | GPC_DIS_FC | GPC_DIS_SLEEP |
2085                GPC_ENA_XC | GPC_ANEG_ADV_ALL_M | GPC_ENA_PAUSE;
2086        reg |= hw->copper ? GPC_HWCFG_GMII_COP : GPC_HWCFG_GMII_FIB;
2087
2088        /* Clear GMC reset */
2089        skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_SET);
2090        skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_CLR);
2091        skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON | GMC_RST_CLR);
2092
2093        if (skge->autoneg == AUTONEG_DISABLE) {
2094                reg = GM_GPCR_AU_ALL_DIS;
2095                gma_write16(hw, port, GM_GP_CTRL,
2096                                 gma_read16(hw, port, GM_GP_CTRL) | reg);
2097
2098                switch (skge->speed) {
2099                case SPEED_1000:
2100                        reg &= ~GM_GPCR_SPEED_100;
2101                        reg |= GM_GPCR_SPEED_1000;
2102                        break;
2103                case SPEED_100:
2104                        reg &= ~GM_GPCR_SPEED_1000;
2105                        reg |= GM_GPCR_SPEED_100;
2106                        break;
2107                case SPEED_10:
2108                        reg &= ~(GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100);
2109                        break;
2110                }
2111
2112                if (skge->duplex == DUPLEX_FULL)
2113                        reg |= GM_GPCR_DUP_FULL;
2114        } else
2115                reg = GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100 | GM_GPCR_DUP_FULL;
2116
2117        switch (skge->flow_control) {
2118        case FLOW_MODE_NONE:
2119                skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
2120                reg |= GM_GPCR_FC_TX_DIS | GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS;
2121                break;
2122        case FLOW_MODE_LOC_SEND:
2123                /* disable Rx flow-control */
2124                reg |= GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS;
2125                break;
2126        case FLOW_MODE_SYMMETRIC:
2127        case FLOW_MODE_SYM_OR_REM:
2128                /* enable Tx & Rx flow-control */
2129                break;
2130        }
2131
2132        gma_write16(hw, port, GM_GP_CTRL, reg);
2133        skge_read16(hw, SK_REG(port, GMAC_IRQ_SRC));
2134
2135        yukon_init(hw, port);
2136
2137        /* MIB clear */
2138        reg = gma_read16(hw, port, GM_PHY_ADDR);
2139        gma_write16(hw, port, GM_PHY_ADDR, reg | GM_PAR_MIB_CLR);
2140
2141        for (i = 0; i < GM_MIB_CNT_SIZE; i++)
2142                gma_read16(hw, port, GM_MIB_CNT_BASE + 8*i);
2143        gma_write16(hw, port, GM_PHY_ADDR, reg);
2144
2145        /* transmit control */
2146        gma_write16(hw, port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF));
2147
2148        /* receive control reg: unicast + multicast + no FCS  */
2149        gma_write16(hw, port, GM_RX_CTRL,
2150                         GM_RXCR_UCF_ENA | GM_RXCR_CRC_DIS | GM_RXCR_MCF_ENA);
2151
2152        /* transmit flow control */
2153        gma_write16(hw, port, GM_TX_FLOW_CTRL, 0xffff);
2154
2155        /* transmit parameter */
2156        gma_write16(hw, port, GM_TX_PARAM,
2157                         TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) |
2158                         TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) |
2159                         TX_IPG_JAM_DATA(TX_IPG_JAM_DEF));
2160
2161        /* configure the Serial Mode Register */
2162        reg = DATA_BLIND_VAL(DATA_BLIND_DEF)
2163                | GM_SMOD_VLAN_ENA
2164                | IPG_DATA_VAL(IPG_DATA_DEF);
2165
2166        if (hw->dev[port]->mtu > ETH_DATA_LEN)
2167                reg |= GM_SMOD_JUMBO_ENA;
2168
2169        gma_write16(hw, port, GM_SERIAL_MODE, reg);
2170
2171        /* physical address: used for pause frames */
2172        gma_set_addr(hw, port, GM_SRC_ADDR_1L, addr);
2173        /* virtual address for data */
2174        gma_set_addr(hw, port, GM_SRC_ADDR_2L, addr);
2175
2176        /* enable interrupt mask for counter overflows */
2177        gma_write16(hw, port, GM_TX_IRQ_MSK, 0);
2178        gma_write16(hw, port, GM_RX_IRQ_MSK, 0);
2179        gma_write16(hw, port, GM_TR_IRQ_MSK, 0);
2180
2181        /* Initialize Mac Fifo */
2182
2183        /* Configure Rx MAC FIFO */
2184        skge_write16(hw, SK_REG(port, RX_GMF_FL_MSK), RX_FF_FL_DEF_MSK);
2185        reg = GMF_OPER_ON | GMF_RX_F_FL_ON;
2186
2187        /* disable Rx GMAC FIFO Flush for YUKON-Lite Rev. A0 only */
2188        if (is_yukon_lite_a0(hw))
2189                reg &= ~GMF_RX_F_FL_ON;
2190
2191        skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR);
2192        skge_write16(hw, SK_REG(port, RX_GMF_CTRL_T), reg);
2193        /*
2194         * because Pause Packet Truncation in GMAC is not working
2195         * we have to increase the Flush Threshold to 64 bytes
2196         * in order to flush pause packets in Rx FIFO on Yukon-1
2197         */
2198        skge_write16(hw, SK_REG(port, RX_GMF_FL_THR), RX_GMF_FL_THR_DEF+1);
2199
2200        /* Configure Tx MAC FIFO */
2201        skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR);
2202        skge_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON);
2203}
2204
2205/* Go into power down mode */
2206static void yukon_suspend(struct skge_hw *hw, int port)
2207{
2208        u16 ctrl;
2209
2210        ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
2211        ctrl |= PHY_M_PC_POL_R_DIS;
2212        gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
2213
2214        ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL);
2215        ctrl |= PHY_CT_RESET;
2216        gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
2217
2218        /* switch IEEE compatible power down mode on */
2219        ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL);
2220        ctrl |= PHY_CT_PDOWN;
2221        gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
2222}
2223
2224static void yukon_stop(struct skge_port *skge)
2225{
2226        struct skge_hw *hw = skge->hw;
2227        int port = skge->port;
2228
2229        skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0);
2230        yukon_reset(hw, port);
2231
2232        gma_write16(hw, port, GM_GP_CTRL,
2233                         gma_read16(hw, port, GM_GP_CTRL)
2234                         & ~(GM_GPCR_TX_ENA|GM_GPCR_RX_ENA));
2235        gma_read16(hw, port, GM_GP_CTRL);
2236
2237        yukon_suspend(hw, port);
2238
2239        /* set GPHY Control reset */
2240        skge_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
2241        skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET);
2242}
2243
2244static void yukon_get_stats(struct skge_port *skge, u64 *data)
2245{
2246        struct skge_hw *hw = skge->hw;
2247        int port = skge->port;
2248        int i;
2249
2250        data[0] = (u64) gma_read32(hw, port, GM_TXO_OK_HI) << 32
2251                | gma_read32(hw, port, GM_TXO_OK_LO);
2252        data[1] = (u64) gma_read32(hw, port, GM_RXO_OK_HI) << 32
2253                | gma_read32(hw, port, GM_RXO_OK_LO);
2254
2255        for (i = 2; i < ARRAY_SIZE(skge_stats); i++)
2256                data[i] = gma_read32(hw, port,
2257                                          skge_stats[i].gma_offset);
2258}
2259
2260static void yukon_mac_intr(struct skge_hw *hw, int port)
2261{
2262        struct net_device *dev = hw->dev[port];
2263        struct skge_port *skge = netdev_priv(dev);
2264        u8 status = skge_read8(hw, SK_REG(port, GMAC_IRQ_SRC));
2265
2266        netif_printk(skge, intr, KERN_DEBUG, skge->netdev,
2267                     "mac interrupt status 0x%x\n", status);
2268
2269        if (status & GM_IS_RX_FF_OR) {
2270                ++dev->stats.rx_fifo_errors;
2271                skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_CLI_RX_FO);
2272        }
2273
2274        if (status & GM_IS_TX_FF_UR) {
2275                ++dev->stats.tx_fifo_errors;
2276                skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_FU);
2277        }
2278
2279}
2280
2281static u16 yukon_speed(const struct skge_hw *hw, u16 aux)
2282{
2283        switch (aux & PHY_M_PS_SPEED_MSK) {
2284        case PHY_M_PS_SPEED_1000:
2285                return SPEED_1000;
2286        case PHY_M_PS_SPEED_100:
2287                return SPEED_100;
2288        default:
2289                return SPEED_10;
2290        }
2291}
2292
2293static void yukon_link_up(struct skge_port *skge)
2294{
2295        struct skge_hw *hw = skge->hw;
2296        int port = skge->port;
2297        u16 reg;
2298
2299        /* Enable Transmit FIFO Underrun */
2300        skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), GMAC_DEF_MSK);
2301
2302        reg = gma_read16(hw, port, GM_GP_CTRL);
2303        if (skge->duplex == DUPLEX_FULL || skge->autoneg == AUTONEG_ENABLE)
2304                reg |= GM_GPCR_DUP_FULL;
2305
2306        /* enable Rx/Tx */
2307        reg |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA;
2308        gma_write16(hw, port, GM_GP_CTRL, reg);
2309
2310        gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_DEF_MSK);
2311        skge_link_up(skge);
2312}
2313
2314static void yukon_link_down(struct skge_port *skge)
2315{
2316        struct skge_hw *hw = skge->hw;
2317        int port = skge->port;
2318        u16 ctrl;
2319
2320        ctrl = gma_read16(hw, port, GM_GP_CTRL);
2321        ctrl &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
2322        gma_write16(hw, port, GM_GP_CTRL, ctrl);
2323
2324        if (skge->flow_status == FLOW_STAT_REM_SEND) {
2325                ctrl = gm_phy_read(hw, port, PHY_MARV_AUNE_ADV);
2326                ctrl |= PHY_M_AN_ASP;
2327                /* restore Asymmetric Pause bit */
2328                gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, ctrl);
2329        }
2330
2331        skge_link_down(skge);
2332
2333        yukon_init(hw, port);
2334}
2335
2336static void yukon_phy_intr(struct skge_port *skge)
2337{
2338        struct skge_hw *hw = skge->hw;
2339        int port = skge->port;
2340        const char *reason = NULL;
2341        u16 istatus, phystat;
2342
2343        istatus = gm_phy_read(hw, port, PHY_MARV_INT_STAT);
2344        phystat = gm_phy_read(hw, port, PHY_MARV_PHY_STAT);
2345
2346        netif_printk(skge, intr, KERN_DEBUG, skge->netdev,
2347                     "phy interrupt status 0x%x 0x%x\n", istatus, phystat);
2348
2349        if (istatus & PHY_M_IS_AN_COMPL) {
2350                if (gm_phy_read(hw, port, PHY_MARV_AUNE_LP)
2351                    & PHY_M_AN_RF) {
2352                        reason = "remote fault";
2353                        goto failed;
2354                }
2355
2356                if (gm_phy_read(hw, port, PHY_MARV_1000T_STAT) & PHY_B_1000S_MSF) {
2357                        reason = "master/slave fault";
2358                        goto failed;
2359                }
2360
2361                if (!(phystat & PHY_M_PS_SPDUP_RES)) {
2362                        reason = "speed/duplex";
2363                        goto failed;
2364                }
2365
2366                skge->duplex = (phystat & PHY_M_PS_FULL_DUP)
2367                        ? DUPLEX_FULL : DUPLEX_HALF;
2368                skge->speed = yukon_speed(hw, phystat);
2369
2370                /* We are using IEEE 802.3z/D5.0 Table 37-4 */
2371                switch (phystat & PHY_M_PS_PAUSE_MSK) {
2372                case PHY_M_PS_PAUSE_MSK:
2373                        skge->flow_status = FLOW_STAT_SYMMETRIC;
2374                        break;
2375                case PHY_M_PS_RX_P_EN:
2376                        skge->flow_status = FLOW_STAT_REM_SEND;
2377                        break;
2378                case PHY_M_PS_TX_P_EN:
2379                        skge->flow_status = FLOW_STAT_LOC_SEND;
2380                        break;
2381                default:
2382                        skge->flow_status = FLOW_STAT_NONE;
2383                }
2384
2385                if (skge->flow_status == FLOW_STAT_NONE ||
2386                    (skge->speed < SPEED_1000 && skge->duplex == DUPLEX_HALF))
2387                        skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
2388                else
2389                        skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON);
2390                yukon_link_up(skge);
2391                return;
2392        }
2393
2394        if (istatus & PHY_M_IS_LSP_CHANGE)
2395                skge->speed = yukon_speed(hw, phystat);
2396
2397        if (istatus & PHY_M_IS_DUP_CHANGE)
2398                skge->duplex = (phystat & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF;
2399        if (istatus & PHY_M_IS_LST_CHANGE) {
2400                if (phystat & PHY_M_PS_LINK_UP)
2401                        yukon_link_up(skge);
2402                else
2403                        yukon_link_down(skge);
2404        }
2405        return;
2406 failed:
2407        pr_err("%s: autonegotiation failed (%s)\n", skge->netdev->name, reason);
2408
2409        /* XXX restart autonegotiation? */
2410}
2411
2412static void skge_phy_reset(struct skge_port *skge)
2413{
2414        struct skge_hw *hw = skge->hw;
2415        int port = skge->port;
2416        struct net_device *dev = hw->dev[port];
2417
2418        netif_stop_queue(skge->netdev);
2419        netif_carrier_off(skge->netdev);
2420
2421        spin_lock_bh(&hw->phy_lock);
2422        if (is_genesis(hw)) {
2423                genesis_reset(hw, port);
2424                genesis_mac_init(hw, port);
2425        } else {
2426                yukon_reset(hw, port);
2427                yukon_init(hw, port);
2428        }
2429        spin_unlock_bh(&hw->phy_lock);
2430
2431        skge_set_multicast(dev);
2432}
2433
2434/* Basic MII support */
2435static int skge_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2436{
2437        struct mii_ioctl_data *data = if_mii(ifr);
2438        struct skge_port *skge = netdev_priv(dev);
2439        struct skge_hw *hw = skge->hw;
2440        int err = -EOPNOTSUPP;
2441
2442        if (!netif_running(dev))
2443                return -ENODEV; /* Phy still in reset */
2444
2445        switch (cmd) {
2446        case SIOCGMIIPHY:
2447                data->phy_id = hw->phy_addr;
2448
2449                /* fallthru */
2450        case SIOCGMIIREG: {
2451                u16 val = 0;
2452                spin_lock_bh(&hw->phy_lock);
2453
2454                if (is_genesis(hw))
2455                        err = __xm_phy_read(hw, skge->port, data->reg_num & 0x1f, &val);
2456                else
2457                        err = __gm_phy_read(hw, skge->port, data->reg_num & 0x1f, &val);
2458                spin_unlock_bh(&hw->phy_lock);
2459                data->val_out = val;
2460                break;
2461        }
2462
2463        case SIOCSMIIREG:
2464                spin_lock_bh(&hw->phy_lock);
2465                if (is_genesis(hw))
2466                        err = xm_phy_write(hw, skge->port, data->reg_num & 0x1f,
2467                                   data->val_in);
2468                else
2469                        err = gm_phy_write(hw, skge->port, data->reg_num & 0x1f,
2470                                   data->val_in);
2471                spin_unlock_bh(&hw->phy_lock);
2472                break;
2473        }
2474        return err;
2475}
2476
2477static void skge_ramset(struct skge_hw *hw, u16 q, u32 start, size_t len)
2478{
2479        u32 end;
2480
2481        start /= 8;
2482        len /= 8;
2483        end = start + len - 1;
2484
2485        skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_RST_CLR);
2486        skge_write32(hw, RB_ADDR(q, RB_START), start);
2487        skge_write32(hw, RB_ADDR(q, RB_WP), start);
2488        skge_write32(hw, RB_ADDR(q, RB_RP), start);
2489        skge_write32(hw, RB_ADDR(q, RB_END), end);
2490
2491        if (q == Q_R1 || q == Q_R2) {
2492                /* Set thresholds on receive queue's */
2493                skge_write32(hw, RB_ADDR(q, RB_RX_UTPP),
2494                             start + (2*len)/3);
2495                skge_write32(hw, RB_ADDR(q, RB_RX_LTPP),
2496                             start + (len/3));
2497        } else {
2498                /* Enable store & forward on Tx queue's because
2499                 * Tx FIFO is only 4K on Genesis and 1K on Yukon
2500                 */
2501                skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_STFWD);
2502        }
2503
2504        skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_OP_MD);
2505}
2506
2507/* Setup Bus Memory Interface */
2508static void skge_qset(struct skge_port *skge, u16 q,
2509                      const struct skge_element *e)
2510{
2511        struct skge_hw *hw = skge->hw;
2512        u32 watermark = 0x600;
2513        u64 base = skge->dma + (e->desc - skge->mem);
2514
2515        /* optimization to reduce window on 32bit/33mhz */
2516        if ((skge_read16(hw, B0_CTST) & (CS_BUS_CLOCK | CS_BUS_SLOT_SZ)) == 0)
2517                watermark /= 2;
2518
2519        skge_write32(hw, Q_ADDR(q, Q_CSR), CSR_CLR_RESET);
2520        skge_write32(hw, Q_ADDR(q, Q_F), watermark);
2521        skge_write32(hw, Q_ADDR(q, Q_DA_H), (u32)(base >> 32));
2522        skge_write32(hw, Q_ADDR(q, Q_DA_L), (u32)base);
2523}
2524
2525static int skge_up(struct net_device *dev)
2526{
2527        struct skge_port *skge = netdev_priv(dev);
2528        struct skge_hw *hw = skge->hw;
2529        int port = skge->port;
2530        u32 chunk, ram_addr;
2531        size_t rx_size, tx_size;
2532        int err;
2533
2534        if (!is_valid_ether_addr(dev->dev_addr))
2535                return -EINVAL;
2536
2537        netif_info(skge, ifup, skge->netdev, "enabling interface\n");
2538
2539        if (dev->mtu > RX_BUF_SIZE)
2540                skge->rx_buf_size = dev->mtu + ETH_HLEN;
2541        else
2542                skge->rx_buf_size = RX_BUF_SIZE;
2543
2544
2545        rx_size = skge->rx_ring.count * sizeof(struct skge_rx_desc);
2546        tx_size = skge->tx_ring.count * sizeof(struct skge_tx_desc);
2547        skge->mem_size = tx_size + rx_size;
2548        skge->mem = pci_alloc_consistent(hw->pdev, skge->mem_size, &skge->dma);
2549        if (!skge->mem)
2550                return -ENOMEM;
2551
2552        BUG_ON(skge->dma & 7);
2553
2554        if (upper_32_bits(skge->dma) != upper_32_bits(skge->dma + skge->mem_size)) {
2555                dev_err(&hw->pdev->dev, "pci_alloc_consistent region crosses 4G boundary\n");
2556                err = -EINVAL;
2557                goto free_pci_mem;
2558        }
2559
2560        memset(skge->mem, 0, skge->mem_size);
2561
2562        err = skge_ring_alloc(&skge->rx_ring, skge->mem, skge->dma);
2563        if (err)
2564                goto free_pci_mem;
2565
2566        err = skge_rx_fill(dev);
2567        if (err)
2568                goto free_rx_ring;
2569
2570        err = skge_ring_alloc(&skge->tx_ring, skge->mem + rx_size,
2571                              skge->dma + rx_size);
2572        if (err)
2573                goto free_rx_ring;
2574
2575        if (hw->ports == 1) {
2576                err = request_irq(hw->pdev->irq, skge_intr, IRQF_SHARED,
2577                                  dev->name, hw);
2578                if (err) {
2579                        netdev_err(dev, "Unable to allocate interrupt %d error: %d\n",
2580                                   hw->pdev->irq, err);
2581                        goto free_tx_ring;
2582                }
2583        }
2584
2585        /* Initialize MAC */
2586        netif_carrier_off(dev);
2587        spin_lock_bh(&hw->phy_lock);
2588        if (is_genesis(hw))
2589                genesis_mac_init(hw, port);
2590        else
2591                yukon_mac_init(hw, port);
2592        spin_unlock_bh(&hw->phy_lock);
2593
2594        /* Configure RAMbuffers - equally between ports and tx/rx */
2595        chunk = (hw->ram_size  - hw->ram_offset) / (hw->ports * 2);
2596        ram_addr = hw->ram_offset + 2 * chunk * port;
2597
2598        skge_ramset(hw, rxqaddr[port], ram_addr, chunk);
2599        skge_qset(skge, rxqaddr[port], skge->rx_ring.to_clean);
2600
2601        BUG_ON(skge->tx_ring.to_use != skge->tx_ring.to_clean);
2602        skge_ramset(hw, txqaddr[port], ram_addr+chunk, chunk);
2603        skge_qset(skge, txqaddr[port], skge->tx_ring.to_use);
2604
2605        /* Start receiver BMU */
2606        wmb();
2607        skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_START | CSR_IRQ_CL_F);
2608        skge_led(skge, LED_MODE_ON);
2609
2610        spin_lock_irq(&hw->hw_lock);
2611        hw->intr_mask |= portmask[port];
2612        skge_write32(hw, B0_IMSK, hw->intr_mask);
2613        skge_read32(hw, B0_IMSK);
2614        spin_unlock_irq(&hw->hw_lock);
2615
2616        napi_enable(&skge->napi);
2617
2618        skge_set_multicast(dev);
2619
2620        return 0;
2621
2622 free_tx_ring:
2623        kfree(skge->tx_ring.start);
2624 free_rx_ring:
2625        skge_rx_clean(skge);
2626        kfree(skge->rx_ring.start);
2627 free_pci_mem:
2628        pci_free_consistent(hw->pdev, skge->mem_size, skge->mem, skge->dma);
2629        skge->mem = NULL;
2630
2631        return err;
2632}
2633
2634/* stop receiver */
2635static void skge_rx_stop(struct skge_hw *hw, int port)
2636{
2637        skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_STOP);
2638        skge_write32(hw, RB_ADDR(port ? Q_R2 : Q_R1, RB_CTRL),
2639                     RB_RST_SET|RB_DIS_OP_MD);
2640        skge_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_SET_RESET);
2641}
2642
2643static int skge_down(struct net_device *dev)
2644{
2645        struct skge_port *skge = netdev_priv(dev);
2646        struct skge_hw *hw = skge->hw;
2647        int port = skge->port;
2648
2649        if (skge->mem == NULL)
2650                return 0;
2651
2652        netif_info(skge, ifdown, skge->netdev, "disabling interface\n");
2653
2654        netif_tx_disable(dev);
2655
2656        if (is_genesis(hw) && hw->phy_type == SK_PHY_XMAC)
2657                del_timer_sync(&skge->link_timer);
2658
2659        napi_disable(&skge->napi);
2660        netif_carrier_off(dev);
2661
2662        spin_lock_irq(&hw->hw_lock);
2663        hw->intr_mask &= ~portmask[port];
2664        skge_write32(hw, B0_IMSK, (hw->ports == 1) ? 0 : hw->intr_mask);
2665        skge_read32(hw, B0_IMSK);
2666        spin_unlock_irq(&hw->hw_lock);
2667
2668        if (hw->ports == 1)
2669                free_irq(hw->pdev->irq, hw);
2670
2671        skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), LED_REG_OFF);
2672        if (is_genesis(hw))
2673                genesis_stop(skge);
2674        else
2675                yukon_stop(skge);
2676
2677        /* Stop transmitter */
2678        skge_write8(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_STOP);
2679        skge_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL),
2680                     RB_RST_SET|RB_DIS_OP_MD);
2681
2682
2683        /* Disable Force Sync bit and Enable Alloc bit */
2684        skge_write8(hw, SK_REG(port, TXA_CTRL),
2685                    TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC);
2686
2687        /* Stop Interval Timer and Limit Counter of Tx Arbiter */
2688        skge_write32(hw, SK_REG(port, TXA_ITI_INI), 0L);
2689        skge_write32(hw, SK_REG(port, TXA_LIM_INI), 0L);
2690
2691        /* Reset PCI FIFO */
2692        skge_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_SET_RESET);
2693        skge_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), RB_RST_SET);
2694
2695        /* Reset the RAM Buffer async Tx queue */
2696        skge_write8(hw, RB_ADDR(port == 0 ? Q_XA1 : Q_XA2, RB_CTRL), RB_RST_SET);
2697
2698        skge_rx_stop(hw, port);
2699
2700        if (is_genesis(hw)) {
2701                skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_RST_SET);
2702                skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_RST_SET);
2703        } else {
2704                skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
2705                skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET);
2706        }
2707
2708        skge_led(skge, LED_MODE_OFF);
2709
2710        netif_tx_lock_bh(dev);
2711        skge_tx_clean(dev);
2712        netif_tx_unlock_bh(dev);
2713
2714        skge_rx_clean(skge);
2715
2716        kfree(skge->rx_ring.start);
2717        kfree(skge->tx_ring.start);
2718        pci_free_consistent(hw->pdev, skge->mem_size, skge->mem, skge->dma);
2719        skge->mem = NULL;
2720        return 0;
2721}
2722
2723static inline int skge_avail(const struct skge_ring *ring)
2724{
2725        smp_mb();
2726        return ((ring->to_clean > ring->to_use) ? 0 : ring->count)
2727                + (ring->to_clean - ring->to_use) - 1;
2728}
2729
2730static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
2731                                   struct net_device *dev)
2732{
2733        struct skge_port *skge = netdev_priv(dev);
2734        struct skge_hw *hw = skge->hw;
2735        struct skge_element *e;
2736        struct skge_tx_desc *td;
2737        int i;
2738        u32 control, len;
2739        dma_addr_t map;
2740
2741        if (skb_padto(skb, ETH_ZLEN))
2742                return NETDEV_TX_OK;
2743
2744        if (unlikely(skge_avail(&skge->tx_ring) < skb_shinfo(skb)->nr_frags + 1))
2745                return NETDEV_TX_BUSY;
2746
2747        e = skge->tx_ring.to_use;
2748        td = e->desc;
2749        BUG_ON(td->control & BMU_OWN);
2750        e->skb = skb;
2751        len = skb_headlen(skb);
2752        map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE);
2753        if (pci_dma_mapping_error(hw->pdev, map))
2754                goto mapping_error;
2755
2756        dma_unmap_addr_set(e, mapaddr, map);
2757        dma_unmap_len_set(e, maplen, len);
2758
2759        td->dma_lo = lower_32_bits(map);
2760        td->dma_hi = upper_32_bits(map);
2761
2762        if (skb->ip_summed == CHECKSUM_PARTIAL) {
2763                const int offset = skb_checksum_start_offset(skb);
2764
2765                /* This seems backwards, but it is what the sk98lin
2766                 * does.  Looks like hardware is wrong?
2767                 */
2768                if (ipip_hdr(skb)->protocol == IPPROTO_UDP &&
2769                    hw->chip_rev == 0 && hw->chip_id == CHIP_ID_YUKON)
2770                        control = BMU_TCP_CHECK;
2771                else
2772                        control = BMU_UDP_CHECK;
2773
2774                td->csum_offs = 0;
2775                td->csum_start = offset;
2776                td->csum_write = offset + skb->csum_offset;
2777        } else
2778                control = BMU_CHECK;
2779
2780        if (!skb_shinfo(skb)->nr_frags) /* single buffer i.e. no fragments */
2781                control |= BMU_EOF | BMU_IRQ_EOF;
2782        else {
2783                struct skge_tx_desc *tf = td;
2784
2785                control |= BMU_STFWD;
2786                for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2787                        const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2788
2789                        map = skb_frag_dma_map(&hw->pdev->dev, frag, 0,
2790                                               skb_frag_size(frag), DMA_TO_DEVICE);
2791                        if (dma_mapping_error(&hw->pdev->dev, map))
2792                                goto mapping_unwind;
2793
2794                        e = e->next;
2795                        e->skb = skb;
2796                        tf = e->desc;
2797                        BUG_ON(tf->control & BMU_OWN);
2798
2799                        tf->dma_lo = lower_32_bits(map);
2800                        tf->dma_hi = upper_32_bits(map);
2801                        dma_unmap_addr_set(e, mapaddr, map);
2802                        dma_unmap_len_set(e, maplen, skb_frag_size(frag));
2803
2804                        tf->control = BMU_OWN | BMU_SW | control | skb_frag_size(frag);
2805                }
2806                tf->control |= BMU_EOF | BMU_IRQ_EOF;
2807        }
2808        /* Make sure all the descriptors written */
2809        wmb();
2810        td->control = BMU_OWN | BMU_SW | BMU_STF | control | len;
2811        wmb();
2812
2813        netdev_sent_queue(dev, skb->len);
2814
2815        skge_write8(hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_START);
2816
2817        netif_printk(skge, tx_queued, KERN_DEBUG, skge->netdev,
2818                     "tx queued, slot %td, len %d\n",
2819                     e - skge->tx_ring.start, skb->len);
2820
2821        skge->tx_ring.to_use = e->next;
2822        smp_wmb();
2823
2824        if (skge_avail(&skge->tx_ring) <= TX_LOW_WATER) {
2825                netdev_dbg(dev, "transmit queue full\n");
2826                netif_stop_queue(dev);
2827        }
2828
2829        return NETDEV_TX_OK;
2830
2831mapping_unwind:
2832        e = skge->tx_ring.to_use;
2833        pci_unmap_single(hw->pdev,
2834                         dma_unmap_addr(e, mapaddr),
2835                         dma_unmap_len(e, maplen),
2836                         PCI_DMA_TODEVICE);
2837        while (i-- > 0) {
2838                e = e->next;
2839                pci_unmap_page(hw->pdev,
2840                               dma_unmap_addr(e, mapaddr),
2841                               dma_unmap_len(e, maplen),
2842                               PCI_DMA_TODEVICE);
2843        }
2844
2845mapping_error:
2846        if (net_ratelimit())
2847                dev_warn(&hw->pdev->dev, "%s: tx mapping error\n", dev->name);
2848        dev_kfree_skb_any(skb);
2849        return NETDEV_TX_OK;
2850}
2851
2852
2853/* Free resources associated with this reing element */
2854static inline void skge_tx_unmap(struct pci_dev *pdev, struct skge_element *e,
2855                                 u32 control)
2856{
2857        /* skb header vs. fragment */
2858        if (control & BMU_STF)
2859                pci_unmap_single(pdev, dma_unmap_addr(e, mapaddr),
2860                                 dma_unmap_len(e, maplen),
2861                                 PCI_DMA_TODEVICE);
2862        else
2863                pci_unmap_page(pdev, dma_unmap_addr(e, mapaddr),
2864                               dma_unmap_len(e, maplen),
2865                               PCI_DMA_TODEVICE);
2866}
2867
2868/* Free all buffers in transmit ring */
2869static void skge_tx_clean(struct net_device *dev)
2870{
2871        struct skge_port *skge = netdev_priv(dev);
2872        struct skge_element *e;
2873
2874        for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) {
2875                struct skge_tx_desc *td = e->desc;
2876
2877                skge_tx_unmap(skge->hw->pdev, e, td->control);
2878
2879                if (td->control & BMU_EOF)
2880                        dev_kfree_skb(e->skb);
2881                td->control = 0;
2882        }
2883
2884        netdev_reset_queue(dev);
2885        skge->tx_ring.to_clean = e;
2886}
2887
2888static void skge_tx_timeout(struct net_device *dev)
2889{
2890        struct skge_port *skge = netdev_priv(dev);
2891
2892        netif_printk(skge, timer, KERN_DEBUG, skge->netdev, "tx timeout\n");
2893
2894        skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_STOP);
2895        skge_tx_clean(dev);
2896        netif_wake_queue(dev);
2897}
2898
2899static int skge_change_mtu(struct net_device *dev, int new_mtu)
2900{
2901        int err;
2902
2903        if (!netif_running(dev)) {
2904                dev->mtu = new_mtu;
2905                return 0;
2906        }
2907
2908        skge_down(dev);
2909
2910        dev->mtu = new_mtu;
2911
2912        err = skge_up(dev);
2913        if (err)
2914                dev_close(dev);
2915
2916        return err;
2917}
2918
2919static const u8 pause_mc_addr[ETH_ALEN] = { 0x1, 0x80, 0xc2, 0x0, 0x0, 0x1 };
2920
2921static void genesis_add_filter(u8 filter[8], const u8 *addr)
2922{
2923        u32 crc, bit;
2924
2925        crc = ether_crc_le(ETH_ALEN, addr);
2926        bit = ~crc & 0x3f;
2927        filter[bit/8] |= 1 << (bit%8);
2928}
2929
2930static void genesis_set_multicast(struct net_device *dev)
2931{
2932        struct skge_port *skge = netdev_priv(dev);
2933        struct skge_hw *hw = skge->hw;
2934        int port = skge->port;
2935        struct netdev_hw_addr *ha;
2936        u32 mode;
2937        u8 filter[8];
2938
2939        mode = xm_read32(hw, port, XM_MODE);
2940        mode |= XM_MD_ENA_HASH;
2941        if (dev->flags & IFF_PROMISC)
2942                mode |= XM_MD_ENA_PROM;
2943        else
2944                mode &= ~XM_MD_ENA_PROM;
2945
2946        if (dev->flags & IFF_ALLMULTI)
2947                memset(filter, 0xff, sizeof(filter));
2948        else {
2949                memset(filter, 0, sizeof(filter));
2950
2951                if (skge->flow_status == FLOW_STAT_REM_SEND ||
2952                    skge->flow_status == FLOW_STAT_SYMMETRIC)
2953                        genesis_add_filter(filter, pause_mc_addr);
2954
2955                netdev_for_each_mc_addr(ha, dev)
2956                        genesis_add_filter(filter, ha->addr);
2957        }
2958
2959        xm_write32(hw, port, XM_MODE, mode);
2960        xm_outhash(hw, port, XM_HSM, filter);
2961}
2962
2963static void yukon_add_filter(u8 filter[8], const u8 *addr)
2964{
2965         u32 bit = ether_crc(ETH_ALEN, addr) & 0x3f;
2966         filter[bit/8] |= 1 << (bit%8);
2967}
2968
2969static void yukon_set_multicast(struct net_device *dev)
2970{
2971        struct skge_port *skge = netdev_priv(dev);
2972        struct skge_hw *hw = skge->hw;
2973        int port = skge->port;
2974        struct netdev_hw_addr *ha;
2975        int rx_pause = (skge->flow_status == FLOW_STAT_REM_SEND ||
2976                        skge->flow_status == FLOW_STAT_SYMMETRIC);
2977        u16 reg;
2978        u8 filter[8];
2979
2980        memset(filter, 0, sizeof(filter));
2981
2982        reg = gma_read16(hw, port, GM_RX_CTRL);
2983        reg |= GM_RXCR_UCF_ENA;
2984
2985        if (dev->flags & IFF_PROMISC)           /* promiscuous */
2986                reg &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
2987        else if (dev->flags & IFF_ALLMULTI)     /* all multicast */
2988                memset(filter, 0xff, sizeof(filter));
2989        else if (netdev_mc_empty(dev) && !rx_pause)/* no multicast */
2990                reg &= ~GM_RXCR_MCF_ENA;
2991        else {
2992                reg |= GM_RXCR_MCF_ENA;
2993
2994                if (rx_pause)
2995                        yukon_add_filter(filter, pause_mc_addr);
2996
2997                netdev_for_each_mc_addr(ha, dev)
2998                        yukon_add_filter(filter, ha->addr);
2999        }
3000
3001
3002        gma_write16(hw, port, GM_MC_ADDR_H1,
3003                         (u16)filter[0] | ((u16)filter[1] << 8));
3004        gma_write16(hw, port, GM_MC_ADDR_H2,
3005                         (u16)filter[2] | ((u16)filter[3] << 8));
3006        gma_write16(hw, port, GM_MC_ADDR_H3,
3007                         (u16)filter[4] | ((u16)filter[5] << 8));
3008        gma_write16(hw, port, GM_MC_ADDR_H4,
3009                         (u16)filter[6] | ((u16)filter[7] << 8));
3010
3011        gma_write16(hw, port, GM_RX_CTRL, reg);
3012}
3013
3014static inline u16 phy_length(const struct skge_hw *hw, u32 status)
3015{
3016        if (is_genesis(hw))
3017                return status >> XMR_FS_LEN_SHIFT;
3018        else
3019                return status >> GMR_FS_LEN_SHIFT;
3020}
3021
3022static inline int bad_phy_status(const struct skge_hw *hw, u32 status)
3023{
3024        if (is_genesis(hw))
3025                return (status & (XMR_FS_ERR | XMR_FS_2L_VLAN)) != 0;
3026        else
3027                return (status & GMR_FS_ANY_ERR) ||
3028                        (status & GMR_FS_RX_OK) == 0;
3029}
3030
3031static void skge_set_multicast(struct net_device *dev)
3032{
3033        struct skge_port *skge = netdev_priv(dev);
3034
3035        if (is_genesis(skge->hw))
3036                genesis_set_multicast(dev);
3037        else
3038                yukon_set_multicast(dev);
3039
3040}
3041
3042
3043/* Get receive buffer from descriptor.
3044 * Handles copy of small buffers and reallocation failures
3045 */
3046static struct sk_buff *skge_rx_get(struct net_device *dev,
3047                                   struct skge_element *e,
3048                                   u32 control, u32 status, u16 csum)
3049{
3050        struct skge_port *skge = netdev_priv(dev);
3051        struct sk_buff *skb;
3052        u16 len = control & BMU_BBC;
3053
3054        netif_printk(skge, rx_status, KERN_DEBUG, skge->netdev,
3055                     "rx slot %td status 0x%x len %d\n",
3056                     e - skge->rx_ring.start, status, len);
3057
3058        if (len > skge->rx_buf_size)
3059                goto error;
3060
3061        if ((control & (BMU_EOF|BMU_STF)) != (BMU_STF|BMU_EOF))
3062                goto error;
3063
3064        if (bad_phy_status(skge->hw, status))
3065                goto error;
3066
3067        if (phy_length(skge->hw, status) != len)
3068                goto error;
3069
3070        if (len < RX_COPY_THRESHOLD) {
3071                skb = netdev_alloc_skb_ip_align(dev, len);
3072                if (!skb)
3073                        goto resubmit;
3074
3075                pci_dma_sync_single_for_cpu(skge->hw->pdev,
3076                                            dma_unmap_addr(e, mapaddr),
3077                                            dma_unmap_len(e, maplen),
3078                                            PCI_DMA_FROMDEVICE);
3079                skb_copy_from_linear_data(e->skb, skb->data, len);
3080                pci_dma_sync_single_for_device(skge->hw->pdev,
3081                                               dma_unmap_addr(e, mapaddr),
3082                                               dma_unmap_len(e, maplen),
3083                                               PCI_DMA_FROMDEVICE);
3084                skge_rx_reuse(e, skge->rx_buf_size);
3085        } else {
3086                struct skge_element ee;
3087                struct sk_buff *nskb;
3088
3089                nskb = netdev_alloc_skb_ip_align(dev, skge->rx_buf_size);
3090                if (!nskb)
3091                        goto resubmit;
3092
3093                ee = *e;
3094
3095                skb = ee.skb;
3096                prefetch(skb->data);
3097
3098                if (skge_rx_setup(skge, e, nskb, skge->rx_buf_size) < 0) {
3099                        dev_kfree_skb(nskb);
3100                        goto resubmit;
3101                }
3102
3103                pci_unmap_single(skge->hw->pdev,
3104                                 dma_unmap_addr(&ee, mapaddr),
3105                                 dma_unmap_len(&ee, maplen),
3106                                 PCI_DMA_FROMDEVICE);
3107        }
3108
3109        skb_put(skb, len);
3110
3111        if (dev->features & NETIF_F_RXCSUM) {
3112                skb->csum = csum;
3113                skb->ip_summed = CHECKSUM_COMPLETE;
3114        }
3115
3116        skb->protocol = eth_type_trans(skb, dev);
3117
3118        return skb;
3119error:
3120
3121        netif_printk(skge, rx_err, KERN_DEBUG, skge->netdev,
3122                     "rx err, slot %td control 0x%x status 0x%x\n",
3123                     e - skge->rx_ring.start, control, status);
3124
3125        if (is_genesis(skge->hw)) {
3126                if (status & (XMR_FS_RUNT|XMR_FS_LNG_ERR))
3127                        dev->stats.rx_length_errors++;
3128                if (status & XMR_FS_FRA_ERR)
3129                        dev->stats.rx_frame_errors++;
3130                if (status & XMR_FS_FCS_ERR)
3131                        dev->stats.rx_crc_errors++;
3132        } else {
3133                if (status & (GMR_FS_LONG_ERR|GMR_FS_UN_SIZE))
3134                        dev->stats.rx_length_errors++;
3135                if (status & GMR_FS_FRAGMENT)
3136                        dev->stats.rx_frame_errors++;
3137                if (status & GMR_FS_CRC_ERR)
3138                        dev->stats.rx_crc_errors++;
3139        }
3140
3141resubmit:
3142        skge_rx_reuse(e, skge->rx_buf_size);
3143        return NULL;
3144}
3145
3146/* Free all buffers in Tx ring which are no longer owned by device */
3147static void skge_tx_done(struct net_device *dev)
3148{
3149        struct skge_port *skge = netdev_priv(dev);
3150        struct skge_ring *ring = &skge->tx_ring;
3151        struct skge_element *e;
3152        unsigned int bytes_compl = 0, pkts_compl = 0;
3153
3154        skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F);
3155
3156        for (e = ring->to_clean; e != ring->to_use; e = e->next) {
3157                u32 control = ((const struct skge_tx_desc *) e->desc)->control;
3158
3159                if (control & BMU_OWN)
3160                        break;
3161
3162                skge_tx_unmap(skge->hw->pdev, e, control);
3163
3164                if (control & BMU_EOF) {
3165                        netif_printk(skge, tx_done, KERN_DEBUG, skge->netdev,
3166                                     "tx done slot %td\n",
3167                                     e - skge->tx_ring.start);
3168
3169                        pkts_compl++;
3170                        bytes_compl += e->skb->len;
3171
3172                        dev_consume_skb_any(e->skb);
3173                }
3174        }
3175        netdev_completed_queue(dev, pkts_compl, bytes_compl);
3176        skge->tx_ring.to_clean = e;
3177
3178        /* Can run lockless until we need to synchronize to restart queue. */
3179        smp_mb();
3180
3181        if (unlikely(netif_queue_stopped(dev) &&
3182                     skge_avail(&skge->tx_ring) > TX_LOW_WATER)) {
3183                netif_tx_lock(dev);
3184                if (unlikely(netif_queue_stopped(dev) &&
3185                             skge_avail(&skge->tx_ring) > TX_LOW_WATER)) {
3186                        netif_wake_queue(dev);
3187
3188                }
3189                netif_tx_unlock(dev);
3190        }
3191}
3192
3193static int skge_poll(struct napi_struct *napi, int to_do)
3194{
3195        struct skge_port *skge = container_of(napi, struct skge_port, napi);
3196        struct net_device *dev = skge->netdev;
3197        struct skge_hw *hw = skge->hw;
3198        struct skge_ring *ring = &skge->rx_ring;
3199        struct skge_element *e;
3200        int work_done = 0;
3201
3202        skge_tx_done(dev);
3203
3204        skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F);
3205
3206        for (e = ring->to_clean; prefetch(e->next), work_done < to_do; e = e->next) {
3207                struct skge_rx_desc *rd = e->desc;
3208                struct sk_buff *skb;
3209                u32 control;
3210
3211                rmb();
3212                control = rd->control;
3213                if (control & BMU_OWN)
3214                        break;
3215
3216                skb = skge_rx_get(dev, e, control, rd->status, rd->csum2);
3217                if (likely(skb)) {
3218                        napi_gro_receive(napi, skb);
3219                        ++work_done;
3220                }
3221        }
3222        ring->to_clean = e;
3223
3224        /* restart receiver */
3225        wmb();
3226        skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_START);
3227
3228        if (work_done < to_do) {
3229                unsigned long flags;
3230
3231                napi_gro_flush(napi, false);
3232                spin_lock_irqsave(&hw->hw_lock, flags);
3233                __napi_complete(napi);
3234                hw->intr_mask |= napimask[skge->port];
3235                skge_write32(hw, B0_IMSK, hw->intr_mask);
3236                skge_read32(hw, B0_IMSK);
3237                spin_unlock_irqrestore(&hw->hw_lock, flags);
3238        }
3239
3240        return work_done;
3241}
3242
3243/* Parity errors seem to happen when Genesis is connected to a switch
3244 * with no other ports present. Heartbeat error??
3245 */
3246static void skge_mac_parity(struct skge_hw *hw, int port)
3247{
3248        struct net_device *dev = hw->dev[port];
3249
3250        ++dev->stats.tx_heartbeat_errors;
3251
3252        if (is_genesis(hw))
3253                skge_write16(hw, SK_REG(port, TX_MFF_CTRL1),
3254                             MFF_CLR_PERR);
3255        else
3256                /* HW-Bug #8: cleared by GMF_CLI_TX_FC instead of GMF_CLI_TX_PE */
3257                skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T),
3258                            (hw->chip_id == CHIP_ID_YUKON && hw->chip_rev == 0)
3259                            ? GMF_CLI_TX_FC : GMF_CLI_TX_PE);
3260}
3261
3262static void skge_mac_intr(struct skge_hw *hw, int port)
3263{
3264        if (is_genesis(hw))
3265                genesis_mac_intr(hw, port);
3266        else
3267                yukon_mac_intr(hw, port);
3268}
3269
3270/* Handle device specific framing and timeout interrupts */
3271static void skge_error_irq(struct skge_hw *hw)
3272{
3273        struct pci_dev *pdev = hw->pdev;
3274        u32 hwstatus = skge_read32(hw, B0_HWE_ISRC);
3275
3276        if (is_genesis(hw)) {
3277                /* clear xmac errors */
3278                if (hwstatus & (IS_NO_STAT_M1|IS_NO_TIST_M1))
3279                        skge_write16(hw, RX_MFF_CTRL1, MFF_CLR_INSTAT);
3280                if (hwstatus & (IS_NO_STAT_M2|IS_NO_TIST_M2))
3281                        skge_write16(hw, RX_MFF_CTRL2, MFF_CLR_INSTAT);
3282        } else {
3283                /* Timestamp (unused) overflow */
3284                if (hwstatus & IS_IRQ_TIST_OV)
3285                        skge_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
3286        }
3287
3288        if (hwstatus & IS_RAM_RD_PAR) {
3289                dev_err(&pdev->dev, "Ram read data parity error\n");
3290                skge_write16(hw, B3_RI_CTRL, RI_CLR_RD_PERR);
3291        }
3292
3293        if (hwstatus & IS_RAM_WR_PAR) {
3294                dev_err(&pdev->dev, "Ram write data parity error\n");
3295                skge_write16(hw, B3_RI_CTRL, RI_CLR_WR_PERR);
3296        }
3297
3298        if (hwstatus & IS_M1_PAR_ERR)
3299                skge_mac_parity(hw, 0);
3300
3301        if (hwstatus & IS_M2_PAR_ERR)
3302                skge_mac_parity(hw, 1);
3303
3304        if (hwstatus & IS_R1_PAR_ERR) {
3305                dev_err(&pdev->dev, "%s: receive queue parity error\n",
3306                        hw->dev[0]->name);
3307                skge_write32(hw, B0_R1_CSR, CSR_IRQ_CL_P);
3308        }
3309
3310        if (hwstatus & IS_R2_PAR_ERR) {
3311                dev_err(&pdev->dev, "%s: receive queue parity error\n",
3312                        hw->dev[1]->name);
3313                skge_write32(hw, B0_R2_CSR, CSR_IRQ_CL_P);
3314        }
3315
3316        if (hwstatus & (IS_IRQ_MST_ERR|IS_IRQ_STAT)) {
3317                u16 pci_status, pci_cmd;
3318
3319                pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
3320                pci_read_config_word(pdev, PCI_STATUS, &pci_status);
3321
3322                dev_err(&pdev->dev, "PCI error cmd=%#x status=%#x\n",
3323                        pci_cmd, pci_status);
3324
3325                /* Write the error bits back to clear them. */
3326                pci_status &= PCI_STATUS_ERROR_BITS;
3327                skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
3328                pci_write_config_word(pdev, PCI_COMMAND,
3329                                      pci_cmd | PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
3330                pci_write_config_word(pdev, PCI_STATUS, pci_status);
3331                skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
3332
3333                /* if error still set then just ignore it */
3334                hwstatus = skge_read32(hw, B0_HWE_ISRC);
3335                if (hwstatus & IS_IRQ_STAT) {
3336                        dev_warn(&hw->pdev->dev, "unable to clear error (so ignoring them)\n");
3337                        hw->intr_mask &= ~IS_HW_ERR;
3338                }
3339        }
3340}
3341
3342/*
3343 * Interrupt from PHY are handled in tasklet (softirq)
3344 * because accessing phy registers requires spin wait which might
3345 * cause excess interrupt latency.
3346 */
3347static void skge_extirq(unsigned long arg)
3348{
3349        struct skge_hw *hw = (struct skge_hw *) arg;
3350        int port;
3351
3352        for (port = 0; port < hw->ports; port++) {
3353                struct net_device *dev = hw->dev[port];
3354
3355                if (netif_running(dev)) {
3356                        struct skge_port *skge = netdev_priv(dev);
3357
3358                        spin_lock(&hw->phy_lock);
3359                        if (!is_genesis(hw))
3360                                yukon_phy_intr(skge);
3361                        else if (hw->phy_type == SK_PHY_BCOM)
3362                                bcom_phy_intr(skge);
3363                        spin_unlock(&hw->phy_lock);
3364                }
3365        }
3366
3367        spin_lock_irq(&hw->hw_lock);
3368        hw->intr_mask |= IS_EXT_REG;
3369        skge_write32(hw, B0_IMSK, hw->intr_mask);
3370        skge_read32(hw, B0_IMSK);
3371        spin_unlock_irq(&hw->hw_lock);
3372}
3373
3374static irqreturn_t skge_intr(int irq, void *dev_id)
3375{
3376        struct skge_hw *hw = dev_id;
3377        u32 status;
3378        int handled = 0;
3379
3380        spin_lock(&hw->hw_lock);
3381        /* Reading this register masks IRQ */
3382        status = skge_read32(hw, B0_SP_ISRC);
3383        if (status == 0 || status == ~0)
3384                goto out;
3385
3386        handled = 1;
3387        status &= hw->intr_mask;
3388        if (status & IS_EXT_REG) {
3389                hw->intr_mask &= ~IS_EXT_REG;
3390                tasklet_schedule(&hw->phy_task);
3391        }
3392
3393        if (status & (IS_XA1_F|IS_R1_F)) {
3394                struct skge_port *skge = netdev_priv(hw->dev[0]);
3395                hw->intr_mask &= ~(IS_XA1_F|IS_R1_F);
3396                napi_schedule(&skge->napi);
3397        }
3398
3399        if (status & IS_PA_TO_TX1)
3400                skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX1);
3401
3402        if (status & IS_PA_TO_RX1) {
3403                ++hw->dev[0]->stats.rx_over_errors;
3404                skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX1);
3405        }
3406
3407
3408        if (status & IS_MAC1)
3409                skge_mac_intr(hw, 0);
3410
3411        if (hw->dev[1]) {
3412                struct skge_port *skge = netdev_priv(hw->dev[1]);
3413
3414                if (status & (IS_XA2_F|IS_R2_F)) {
3415                        hw->intr_mask &= ~(IS_XA2_F|IS_R2_F);
3416                        napi_schedule(&skge->napi);
3417                }
3418
3419                if (status & IS_PA_TO_RX2) {
3420                        ++hw->dev[1]->stats.rx_over_errors;
3421                        skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX2);
3422                }
3423
3424                if (status & IS_PA_TO_TX2)
3425                        skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX2);
3426
3427                if (status & IS_MAC2)
3428                        skge_mac_intr(hw, 1);
3429        }
3430
3431        if (status & IS_HW_ERR)
3432                skge_error_irq(hw);
3433out:
3434        skge_write32(hw, B0_IMSK, hw->intr_mask);
3435        skge_read32(hw, B0_IMSK);
3436        spin_unlock(&hw->hw_lock);
3437
3438        return IRQ_RETVAL(handled);
3439}
3440
3441#ifdef CONFIG_NET_POLL_CONTROLLER
3442static void skge_netpoll(struct net_device *dev)
3443{
3444        struct skge_port *skge = netdev_priv(dev);
3445
3446        disable_irq(dev->irq);
3447        skge_intr(dev->irq, skge->hw);
3448        enable_irq(dev->irq);
3449}
3450#endif
3451
3452static int skge_set_mac_address(struct net_device *dev, void *p)
3453{
3454        struct skge_port *skge = netdev_priv(dev);
3455        struct skge_hw *hw = skge->hw;
3456        unsigned port = skge->port;
3457        const struct sockaddr *addr = p;
3458        u16 ctrl;
3459
3460        if (!is_valid_ether_addr(addr->sa_data))
3461                return -EADDRNOTAVAIL;
3462
3463        memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
3464
3465        if (!netif_running(dev)) {
3466                memcpy_toio(hw->regs + B2_MAC_1 + port*8, dev->dev_addr, ETH_ALEN);
3467                memcpy_toio(hw->regs + B2_MAC_2 + port*8, dev->dev_addr, ETH_ALEN);
3468        } else {
3469                /* disable Rx */
3470                spin_lock_bh(&hw->phy_lock);
3471                ctrl = gma_read16(hw, port, GM_GP_CTRL);
3472                gma_write16(hw, port, GM_GP_CTRL, ctrl & ~GM_GPCR_RX_ENA);
3473
3474                memcpy_toio(hw->regs + B2_MAC_1 + port*8, dev->dev_addr, ETH_ALEN);
3475                memcpy_toio(hw->regs + B2_MAC_2 + port*8, dev->dev_addr, ETH_ALEN);
3476
3477                if (is_genesis(hw))
3478                        xm_outaddr(hw, port, XM_SA, dev->dev_addr);
3479                else {
3480                        gma_set_addr(hw, port, GM_SRC_ADDR_1L, dev->dev_addr);
3481                        gma_set_addr(hw, port, GM_SRC_ADDR_2L, dev->dev_addr);
3482                }
3483
3484                gma_write16(hw, port, GM_GP_CTRL, ctrl);
3485                spin_unlock_bh(&hw->phy_lock);
3486        }
3487
3488        return 0;
3489}
3490
3491static const struct {
3492        u8 id;
3493        const char *name;
3494} skge_chips[] = {
3495        { CHIP_ID_GENESIS,      "Genesis" },
3496        { CHIP_ID_YUKON,         "Yukon" },
3497        { CHIP_ID_YUKON_LITE,    "Yukon-Lite"},
3498        { CHIP_ID_YUKON_LP,      "Yukon-LP"},
3499};
3500
3501static const char *skge_board_name(const struct skge_hw *hw)
3502{
3503        int i;
3504        static char buf[16];
3505
3506        for (i = 0; i < ARRAY_SIZE(skge_chips); i++)
3507                if (skge_chips[i].id == hw->chip_id)
3508                        return skge_chips[i].name;
3509
3510        snprintf(buf, sizeof buf, "chipid 0x%x", hw->chip_id);
3511        return buf;
3512}
3513
3514
3515/*
3516 * Setup the board data structure, but don't bring up
3517 * the port(s)
3518 */
3519static int skge_reset(struct skge_hw *hw)
3520{
3521        u32 reg;
3522        u16 ctst, pci_status;
3523        u8 t8, mac_cfg, pmd_type;
3524        int i;
3525
3526        ctst = skge_read16(hw, B0_CTST);
3527
3528        /* do a SW reset */
3529        skge_write8(hw, B0_CTST, CS_RST_SET);
3530        skge_write8(hw, B0_CTST, CS_RST_CLR);
3531
3532        /* clear PCI errors, if any */
3533        skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
3534        skge_write8(hw, B2_TST_CTRL2, 0);
3535
3536        pci_read_config_word(hw->pdev, PCI_STATUS, &pci_status);
3537        pci_write_config_word(hw->pdev, PCI_STATUS,
3538                              pci_status | PCI_STATUS_ERROR_BITS);
3539        skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
3540        skge_write8(hw, B0_CTST, CS_MRST_CLR);
3541
3542        /* restore CLK_RUN bits (for Yukon-Lite) */
3543        skge_write16(hw, B0_CTST,
3544                     ctst & (CS_CLK_RUN_HOT|CS_CLK_RUN_RST|CS_CLK_RUN_ENA));
3545
3546        hw->chip_id = skge_read8(hw, B2_CHIP_ID);
3547        hw->phy_type = skge_read8(hw, B2_E_1) & 0xf;
3548        pmd_type = skge_read8(hw, B2_PMD_TYP);
3549        hw->copper = (pmd_type == 'T' || pmd_type == '1');
3550
3551        switch (hw->chip_id) {
3552        case CHIP_ID_GENESIS:
3553#ifdef CONFIG_SKGE_GENESIS
3554                switch (hw->phy_type) {
3555                case SK_PHY_XMAC:
3556                        hw->phy_addr = PHY_ADDR_XMAC;
3557                        break;
3558                case SK_PHY_BCOM:
3559                        hw->phy_addr = PHY_ADDR_BCOM;
3560                        break;
3561                default:
3562                        dev_err(&hw->pdev->dev, "unsupported phy type 0x%x\n",
3563                               hw->phy_type);
3564                        return -EOPNOTSUPP;
3565                }
3566                break;
3567#else
3568                dev_err(&hw->pdev->dev, "Genesis chip detected but not configured\n");
3569                return -EOPNOTSUPP;
3570#endif
3571
3572        case CHIP_ID_YUKON:
3573        case CHIP_ID_YUKON_LITE:
3574        case CHIP_ID_YUKON_LP:
3575                if (hw->phy_type < SK_PHY_MARV_COPPER && pmd_type != 'S')
3576                        hw->copper = 1;
3577
3578                hw->phy_addr = PHY_ADDR_MARV;
3579                break;
3580
3581        default:
3582                dev_err(&hw->pdev->dev, "unsupported chip type 0x%x\n",
3583                       hw->chip_id);
3584                return -EOPNOTSUPP;
3585        }
3586
3587        mac_cfg = skge_read8(hw, B2_MAC_CFG);
3588        hw->ports = (mac_cfg & CFG_SNG_MAC) ? 1 : 2;
3589        hw->chip_rev = (mac_cfg & CFG_CHIP_R_MSK) >> 4;
3590
3591        /* read the adapters RAM size */
3592        t8 = skge_read8(hw, B2_E_0);
3593        if (is_genesis(hw)) {
3594                if (t8 == 3) {
3595                        /* special case: 4 x 64k x 36, offset = 0x80000 */
3596                        hw->ram_size = 0x100000;
3597                        hw->ram_offset = 0x80000;
3598                } else
3599                        hw->ram_size = t8 * 512;
3600        } else if (t8 == 0)
3601                hw->ram_size = 0x20000;
3602        else
3603                hw->ram_size = t8 * 4096;
3604
3605        hw->intr_mask = IS_HW_ERR;
3606
3607        /* Use PHY IRQ for all but fiber based Genesis board */
3608        if (!(is_genesis(hw) && hw->phy_type == SK_PHY_XMAC))
3609                hw->intr_mask |= IS_EXT_REG;
3610
3611        if (is_genesis(hw))
3612                genesis_init(hw);
3613        else {
3614                /* switch power to VCC (WA for VAUX problem) */
3615                skge_write8(hw, B0_POWER_CTRL,
3616                            PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON);
3617
3618                /* avoid boards with stuck Hardware error bits */
3619                if ((skge_read32(hw, B0_ISRC) & IS_HW_ERR) &&
3620                    (skge_read32(hw, B0_HWE_ISRC) & IS_IRQ_SENSOR)) {
3621                        dev_warn(&hw->pdev->dev, "stuck hardware sensor bit\n");
3622                        hw->intr_mask &= ~IS_HW_ERR;
3623                }
3624
3625                /* Clear PHY COMA */
3626                skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
3627                pci_read_config_dword(hw->pdev, PCI_DEV_REG1, &reg);
3628                reg &= ~PCI_PHY_COMA;
3629                pci_write_config_dword(hw->pdev, PCI_DEV_REG1, reg);
3630                skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
3631
3632
3633                for (i = 0; i < hw->ports; i++) {
3634                        skge_write16(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET);
3635                        skge_write16(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_CLR);
3636                }
3637        }
3638
3639        /* turn off hardware timer (unused) */
3640        skge_write8(hw, B2_TI_CTRL, TIM_STOP);
3641        skge_write8(hw, B2_TI_CTRL, TIM_CLR_IRQ);
3642        skge_write8(hw, B0_LED, LED_STAT_ON);
3643
3644        /* enable the Tx Arbiters */
3645        for (i = 0; i < hw->ports; i++)
3646                skge_write8(hw, SK_REG(i, TXA_CTRL), TXA_ENA_ARB);
3647
3648        /* Initialize ram interface */
3649        skge_write16(hw, B3_RI_CTRL, RI_RST_CLR);
3650
3651        skge_write8(hw, B3_RI_WTO_R1, SK_RI_TO_53);
3652        skge_write8(hw, B3_RI_WTO_XA1, SK_RI_TO_53);
3653        skge_write8(hw, B3_RI_WTO_XS1, SK_RI_TO_53);
3654        skge_write8(hw, B3_RI_RTO_R1, SK_RI_TO_53);
3655        skge_write8(hw, B3_RI_RTO_XA1, SK_RI_TO_53);
3656        skge_write8(hw, B3_RI_RTO_XS1, SK_RI_TO_53);
3657        skge_write8(hw, B3_RI_WTO_R2, SK_RI_TO_53);
3658        skge_write8(hw, B3_RI_WTO_XA2, SK_RI_TO_53);
3659        skge_write8(hw, B3_RI_WTO_XS2, SK_RI_TO_53);
3660        skge_write8(hw, B3_RI_RTO_R2, SK_RI_TO_53);
3661        skge_write8(hw, B3_RI_RTO_XA2, SK_RI_TO_53);
3662        skge_write8(hw, B3_RI_RTO_XS2, SK_RI_TO_53);
3663
3664        skge_write32(hw, B0_HWE_IMSK, IS_ERR_MSK);
3665
3666        /* Set interrupt moderation for Transmit only
3667         * Receive interrupts avoided by NAPI
3668         */
3669        skge_write32(hw, B2_IRQM_MSK, IS_XA1_F|IS_XA2_F);
3670        skge_write32(hw, B2_IRQM_INI, skge_usecs2clk(hw, 100));
3671        skge_write32(hw, B2_IRQM_CTRL, TIM_START);
3672
3673        /* Leave irq disabled until first port is brought up. */
3674        skge_write32(hw, B0_IMSK, 0);
3675
3676        for (i = 0; i < hw->ports; i++) {
3677                if (is_genesis(hw))
3678                        genesis_reset(hw, i);
3679                else
3680                        yukon_reset(hw, i);
3681        }
3682
3683        return 0;
3684}
3685
3686
3687#ifdef CONFIG_SKGE_DEBUG
3688
3689static struct dentry *skge_debug;
3690
3691static int skge_debug_show(struct seq_file *seq, void *v)
3692{
3693        struct net_device *dev = seq->private;
3694        const struct skge_port *skge = netdev_priv(dev);
3695        const struct skge_hw *hw = skge->hw;
3696        const struct skge_element *e;
3697
3698        if (!netif_running(dev))
3699                return -ENETDOWN;
3700
3701        seq_printf(seq, "IRQ src=%x mask=%x\n", skge_read32(hw, B0_ISRC),
3702                   skge_read32(hw, B0_IMSK));
3703
3704        seq_printf(seq, "Tx Ring: (%d)\n", skge_avail(&skge->tx_ring));
3705        for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) {
3706                const struct skge_tx_desc *t = e->desc;
3707                seq_printf(seq, "%#x dma=%#x%08x %#x csum=%#x/%x/%x\n",
3708                           t->control, t->dma_hi, t->dma_lo, t->status,
3709                           t->csum_offs, t->csum_write, t->csum_start);
3710        }
3711
3712        seq_printf(seq, "\nRx Ring:\n");
3713        for (e = skge->rx_ring.to_clean; ; e = e->next) {
3714                const struct skge_rx_desc *r = e->desc;
3715
3716                if (r->control & BMU_OWN)
3717                        break;
3718
3719                seq_printf(seq, "%#x dma=%#x%08x %#x %#x csum=%#x/%x\n",
3720                           r->control, r->dma_hi, r->dma_lo, r->status,
3721                           r->timestamp, r->csum1, r->csum1_start);
3722        }
3723
3724        return 0;
3725}
3726
3727static int skge_debug_open(struct inode *inode, struct file *file)
3728{
3729        return single_open(file, skge_debug_show, inode->i_private);
3730}
3731
3732static const struct file_operations skge_debug_fops = {
3733        .owner          = THIS_MODULE,
3734        .open           = skge_debug_open,
3735        .read           = seq_read,
3736        .llseek         = seq_lseek,
3737        .release        = single_release,
3738};
3739
3740/*
3741 * Use network device events to create/remove/rename
3742 * debugfs file entries
3743 */
3744static int skge_device_event(struct notifier_block *unused,
3745                             unsigned long event, void *ptr)
3746{
3747        struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3748        struct skge_port *skge;
3749        struct dentry *d;
3750
3751        if (dev->netdev_ops->ndo_open != &skge_up || !skge_debug)
3752                goto done;
3753
3754        skge = netdev_priv(dev);
3755        switch (event) {
3756        case NETDEV_CHANGENAME:
3757                if (skge->debugfs) {
3758                        d = debugfs_rename(skge_debug, skge->debugfs,
3759                                           skge_debug, dev->name);
3760                        if (d)
3761                                skge->debugfs = d;
3762                        else {
3763                                netdev_info(dev, "rename failed\n");
3764                                debugfs_remove(skge->debugfs);
3765                        }
3766                }
3767                break;
3768
3769        case NETDEV_GOING_DOWN:
3770                if (skge->debugfs) {
3771                        debugfs_remove(skge->debugfs);
3772                        skge->debugfs = NULL;
3773                }
3774                break;
3775
3776        case NETDEV_UP:
3777                d = debugfs_create_file(dev->name, S_IRUGO,
3778                                        skge_debug, dev,
3779                                        &skge_debug_fops);
3780                if (!d || IS_ERR(d))
3781                        netdev_info(dev, "debugfs create failed\n");
3782                else
3783                        skge->debugfs = d;
3784                break;
3785        }
3786
3787done:
3788        return NOTIFY_DONE;
3789}
3790
3791static struct notifier_block skge_notifier = {
3792        .notifier_call = skge_device_event,
3793};
3794
3795
3796static __init void skge_debug_init(void)
3797{
3798        struct dentry *ent;
3799
3800        ent = debugfs_create_dir("skge", NULL);
3801        if (!ent || IS_ERR(ent)) {
3802                pr_info("debugfs create directory failed\n");
3803                return;
3804        }
3805
3806        skge_debug = ent;
3807        register_netdevice_notifier(&skge_notifier);
3808}
3809
3810static __exit void skge_debug_cleanup(void)
3811{
3812        if (skge_debug) {
3813                unregister_netdevice_notifier(&skge_notifier);
3814                debugfs_remove(skge_debug);
3815                skge_debug = NULL;
3816        }
3817}
3818
3819#else
3820#define skge_debug_init()
3821#define skge_debug_cleanup()
3822#endif
3823
3824static const struct net_device_ops skge_netdev_ops = {
3825        .ndo_open               = skge_up,
3826        .ndo_stop               = skge_down,
3827        .ndo_start_xmit         = skge_xmit_frame,
3828        .ndo_do_ioctl           = skge_ioctl,
3829        .ndo_get_stats          = skge_get_stats,
3830        .ndo_tx_timeout         = skge_tx_timeout,
3831        .ndo_change_mtu         = skge_change_mtu,
3832        .ndo_validate_addr      = eth_validate_addr,
3833        .ndo_set_rx_mode        = skge_set_multicast,
3834        .ndo_set_mac_address    = skge_set_mac_address,
3835#ifdef CONFIG_NET_POLL_CONTROLLER
3836        .ndo_poll_controller    = skge_netpoll,
3837#endif
3838};
3839
3840
3841/* Initialize network device */
3842static struct net_device *skge_devinit(struct skge_hw *hw, int port,
3843                                       int highmem)
3844{
3845        struct skge_port *skge;
3846        struct net_device *dev = alloc_etherdev(sizeof(*skge));
3847
3848        if (!dev)
3849                return NULL;
3850
3851        SET_NETDEV_DEV(dev, &hw->pdev->dev);
3852        dev->netdev_ops = &skge_netdev_ops;
3853        dev->ethtool_ops = &skge_ethtool_ops;
3854        dev->watchdog_timeo = TX_WATCHDOG;
3855        dev->irq = hw->pdev->irq;
3856
3857        /* MTU range: 60 - 9000 */
3858        dev->min_mtu = ETH_ZLEN;
3859        dev->max_mtu = ETH_JUMBO_MTU;
3860
3861        if (highmem)
3862                dev->features |= NETIF_F_HIGHDMA;
3863
3864        skge = netdev_priv(dev);
3865        netif_napi_add(dev, &skge->napi, skge_poll, NAPI_WEIGHT);
3866        skge->netdev = dev;
3867        skge->hw = hw;
3868        skge->msg_enable = netif_msg_init(debug, default_msg);
3869
3870        skge->tx_ring.count = DEFAULT_TX_RING_SIZE;
3871        skge->rx_ring.count = DEFAULT_RX_RING_SIZE;
3872
3873        /* Auto speed and flow control */
3874        skge->autoneg = AUTONEG_ENABLE;
3875        skge->flow_control = FLOW_MODE_SYM_OR_REM;
3876        skge->duplex = -1;
3877        skge->speed = -1;
3878        skge->advertising = skge_supported_modes(hw);
3879
3880        if (device_can_wakeup(&hw->pdev->dev)) {
3881                skge->wol = wol_supported(hw) & WAKE_MAGIC;
3882                device_set_wakeup_enable(&hw->pdev->dev, skge->wol);
3883        }
3884
3885        hw->dev[port] = dev;
3886
3887        skge->port = port;
3888
3889        /* Only used for Genesis XMAC */
3890        if (is_genesis(hw))
3891            setup_timer(&skge->link_timer, xm_link_timer, (unsigned long) skge);
3892        else {
3893                dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
3894                                   NETIF_F_RXCSUM;
3895                dev->features |= dev->hw_features;
3896        }
3897
3898        /* read the mac address */
3899        memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN);
3900
3901        return dev;
3902}
3903
3904static void skge_show_addr(struct net_device *dev)
3905{
3906        const struct skge_port *skge = netdev_priv(dev);
3907
3908        netif_info(skge, probe, skge->netdev, "addr %pM\n", dev->dev_addr);
3909}
3910
3911static int only_32bit_dma;
3912
3913static int skge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3914{
3915        struct net_device *dev, *dev1;
3916        struct skge_hw *hw;
3917        int err, using_dac = 0;
3918
3919        err = pci_enable_device(pdev);
3920        if (err) {
3921                dev_err(&pdev->dev, "cannot enable PCI device\n");
3922                goto err_out;
3923        }
3924
3925        err = pci_request_regions(pdev, DRV_NAME);
3926        if (err) {
3927                dev_err(&pdev->dev, "cannot obtain PCI resources\n");
3928                goto err_out_disable_pdev;
3929        }
3930
3931        pci_set_master(pdev);
3932
3933        if (!only_32bit_dma && !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
3934                using_dac = 1;
3935                err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3936        } else if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
3937                using_dac = 0;
3938                err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
3939        }
3940
3941        if (err) {
3942                dev_err(&pdev->dev, "no usable DMA configuration\n");
3943                goto err_out_free_regions;
3944        }
3945
3946#ifdef __BIG_ENDIAN
3947        /* byte swap descriptors in hardware */
3948        {
3949                u32 reg;
3950
3951                pci_read_config_dword(pdev, PCI_DEV_REG2, &reg);
3952                reg |= PCI_REV_DESC;
3953                pci_write_config_dword(pdev, PCI_DEV_REG2, reg);
3954        }
3955#endif
3956
3957        err = -ENOMEM;
3958        /* space for skge@pci:0000:04:00.0 */
3959        hw = kzalloc(sizeof(*hw) + strlen(DRV_NAME "@pci:")
3960                     + strlen(pci_name(pdev)) + 1, GFP_KERNEL);
3961        if (!hw)
3962                goto err_out_free_regions;
3963
3964        sprintf(hw->irq_name, DRV_NAME "@pci:%s", pci_name(pdev));
3965
3966        hw->pdev = pdev;
3967        spin_lock_init(&hw->hw_lock);
3968        spin_lock_init(&hw->phy_lock);
3969        tasklet_init(&hw->phy_task, skge_extirq, (unsigned long) hw);
3970
3971        hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
3972        if (!hw->regs) {
3973                dev_err(&pdev->dev, "cannot map device registers\n");
3974                goto err_out_free_hw;
3975        }
3976
3977        err = skge_reset(hw);
3978        if (err)
3979                goto err_out_iounmap;
3980
3981        pr_info("%s addr 0x%llx irq %d chip %s rev %d\n",
3982                DRV_VERSION,
3983                (unsigned long long)pci_resource_start(pdev, 0), pdev->irq,
3984                skge_board_name(hw), hw->chip_rev);
3985
3986        dev = skge_devinit(hw, 0, using_dac);
3987        if (!dev) {
3988                err = -ENOMEM;
3989                goto err_out_led_off;
3990        }
3991
3992        /* Some motherboards are broken and has zero in ROM. */
3993        if (!is_valid_ether_addr(dev->dev_addr))
3994                dev_warn(&pdev->dev, "bad (zero?) ethernet address in rom\n");
3995
3996        err = register_netdev(dev);
3997        if (err) {
3998                dev_err(&pdev->dev, "cannot register net device\n");
3999                goto err_out_free_netdev;
4000        }
4001
4002        skge_show_addr(dev);
4003
4004        if (hw->ports > 1) {
4005                dev1 = skge_devinit(hw, 1, using_dac);
4006                if (!dev1) {
4007                        err = -ENOMEM;
4008                        goto err_out_unregister;
4009                }
4010
4011                err = register_netdev(dev1);
4012                if (err) {
4013                        dev_err(&pdev->dev, "cannot register second net device\n");
4014                        goto err_out_free_dev1;
4015                }
4016
4017                err = request_irq(pdev->irq, skge_intr, IRQF_SHARED,
4018                                  hw->irq_name, hw);
4019                if (err) {
4020                        dev_err(&pdev->dev, "cannot assign irq %d\n",
4021                                pdev->irq);
4022                        goto err_out_unregister_dev1;
4023                }
4024
4025                skge_show_addr(dev1);
4026        }
4027        pci_set_drvdata(pdev, hw);
4028
4029        return 0;
4030
4031err_out_unregister_dev1:
4032        unregister_netdev(dev1);
4033err_out_free_dev1:
4034        free_netdev(dev1);
4035err_out_unregister:
4036        unregister_netdev(dev);
4037err_out_free_netdev:
4038        free_netdev(dev);
4039err_out_led_off:
4040        skge_write16(hw, B0_LED, LED_STAT_OFF);
4041err_out_iounmap:
4042        iounmap(hw->regs);
4043err_out_free_hw:
4044        kfree(hw);
4045err_out_free_regions:
4046        pci_release_regions(pdev);
4047err_out_disable_pdev:
4048        pci_disable_device(pdev);
4049err_out:
4050        return err;
4051}
4052
4053static void skge_remove(struct pci_dev *pdev)
4054{
4055        struct skge_hw *hw  = pci_get_drvdata(pdev);
4056        struct net_device *dev0, *dev1;
4057
4058        if (!hw)
4059                return;
4060
4061        dev1 = hw->dev[1];
4062        if (dev1)
4063                unregister_netdev(dev1);
4064        dev0 = hw->dev[0];
4065        unregister_netdev(dev0);
4066
4067        tasklet_kill(&hw->phy_task);
4068
4069        spin_lock_irq(&hw->hw_lock);
4070        hw->intr_mask = 0;
4071
4072        if (hw->ports > 1) {
4073                skge_write32(hw, B0_IMSK, 0);
4074                skge_read32(hw, B0_IMSK);
4075                free_irq(pdev->irq, hw);
4076        }
4077        spin_unlock_irq(&hw->hw_lock);
4078
4079        skge_write16(hw, B0_LED, LED_STAT_OFF);
4080        skge_write8(hw, B0_CTST, CS_RST_SET);
4081
4082        if (hw->ports > 1)
4083                free_irq(pdev->irq, hw);
4084        pci_release_regions(pdev);
4085        pci_disable_device(pdev);
4086        if (dev1)
4087                free_netdev(dev1);
4088        free_netdev(dev0);
4089
4090        iounmap(hw->regs);
4091        kfree(hw);
4092}
4093
4094#ifdef CONFIG_PM_SLEEP
4095static int skge_suspend(struct device *dev)
4096{
4097        struct pci_dev *pdev = to_pci_dev(dev);
4098        struct skge_hw *hw  = pci_get_drvdata(pdev);
4099        int i;
4100
4101        if (!hw)
4102                return 0;
4103
4104        for (i = 0; i < hw->ports; i++) {
4105                struct net_device *dev = hw->dev[i];
4106                struct skge_port *skge = netdev_priv(dev);
4107
4108                if (netif_running(dev))
4109                        skge_down(dev);
4110
4111                if (skge->wol)
4112                        skge_wol_init(skge);
4113        }
4114
4115        skge_write32(hw, B0_IMSK, 0);
4116
4117        return 0;
4118}
4119
4120static int skge_resume(struct device *dev)
4121{
4122        struct pci_dev *pdev = to_pci_dev(dev);
4123        struct skge_hw *hw  = pci_get_drvdata(pdev);
4124        int i, err;
4125
4126        if (!hw)
4127                return 0;
4128
4129        err = skge_reset(hw);
4130        if (err)
4131                goto out;
4132
4133        for (i = 0; i < hw->ports; i++) {
4134                struct net_device *dev = hw->dev[i];
4135
4136                if (netif_running(dev)) {
4137                        err = skge_up(dev);
4138
4139                        if (err) {
4140                                netdev_err(dev, "could not up: %d\n", err);
4141                                dev_close(dev);
4142                                goto out;
4143                        }
4144                }
4145        }
4146out:
4147        return err;
4148}
4149
4150static SIMPLE_DEV_PM_OPS(skge_pm_ops, skge_suspend, skge_resume);
4151#define SKGE_PM_OPS (&skge_pm_ops)
4152
4153#else
4154
4155#define SKGE_PM_OPS NULL
4156#endif /* CONFIG_PM_SLEEP */
4157
4158static void skge_shutdown(struct pci_dev *pdev)
4159{
4160        struct skge_hw *hw  = pci_get_drvdata(pdev);
4161        int i;
4162
4163        if (!hw)
4164                return;
4165
4166        for (i = 0; i < hw->ports; i++) {
4167                struct net_device *dev = hw->dev[i];
4168                struct skge_port *skge = netdev_priv(dev);
4169
4170                if (skge->wol)
4171                        skge_wol_init(skge);
4172        }
4173
4174        pci_wake_from_d3(pdev, device_may_wakeup(&pdev->dev));
4175        pci_set_power_state(pdev, PCI_D3hot);
4176}
4177
4178static struct pci_driver skge_driver = {
4179        .name =         DRV_NAME,
4180        .id_table =     skge_id_table,
4181        .probe =        skge_probe,
4182        .remove =       skge_remove,
4183        .shutdown =     skge_shutdown,
4184        .driver.pm =    SKGE_PM_OPS,
4185};
4186
4187static struct dmi_system_id skge_32bit_dma_boards[] = {
4188        {
4189                .ident = "Gigabyte nForce boards",
4190                .matches = {
4191                        DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co"),
4192                        DMI_MATCH(DMI_BOARD_NAME, "nForce"),
4193                },
4194        },
4195        {
4196                .ident = "ASUS P5NSLI",
4197                .matches = {
4198                        DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
4199                        DMI_MATCH(DMI_BOARD_NAME, "P5NSLI")
4200                },
4201        },
4202        {
4203                .ident = "FUJITSU SIEMENS A8NE-FM",
4204                .matches = {
4205                        DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTek Computer INC."),
4206                        DMI_MATCH(DMI_BOARD_NAME, "A8NE-FM")
4207                },
4208        },
4209        {}
4210};
4211
4212static int __init skge_init_module(void)
4213{
4214        if (dmi_check_system(skge_32bit_dma_boards))
4215                only_32bit_dma = 1;
4216        skge_debug_init();
4217        return pci_register_driver(&skge_driver);
4218}
4219
4220static void __exit skge_cleanup_module(void)
4221{
4222        pci_unregister_driver(&skge_driver);
4223        skge_debug_cleanup();
4224}
4225
4226module_init(skge_init_module);
4227module_exit(skge_cleanup_module);
4228