linux/drivers/net/ipg.c
<<
>>
Prefs
   1/*
   2 * ipg.c: Device Driver for the IP1000 Gigabit Ethernet Adapter
   3 *
   4 * Copyright (C) 2003, 2007  IC Plus Corp
   5 *
   6 * Original Author:
   7 *
   8 *   Craig Rich
   9 *   Sundance Technology, Inc.
  10 *   www.sundanceti.com
  11 *   craig_rich@sundanceti.com
  12 *
  13 * Current Maintainer:
  14 *
  15 *   Sorbica Shieh.
  16 *   http://www.icplus.com.tw
  17 *   sorbica@icplus.com.tw
  18 *
  19 *   Jesse Huang
  20 *   http://www.icplus.com.tw
  21 *   jesse@icplus.com.tw
  22 */
  23#include <linux/crc32.h>
  24#include <linux/ethtool.h>
  25#include <linux/gfp.h>
  26#include <linux/mii.h>
  27#include <linux/mutex.h>
  28
  29#include <asm/div64.h>
  30
  31#define IPG_RX_RING_BYTES       (sizeof(struct ipg_rx) * IPG_RFDLIST_LENGTH)
  32#define IPG_TX_RING_BYTES       (sizeof(struct ipg_tx) * IPG_TFDLIST_LENGTH)
  33#define IPG_RESET_MASK \
  34        (IPG_AC_GLOBAL_RESET | IPG_AC_RX_RESET | IPG_AC_TX_RESET | \
  35         IPG_AC_DMA | IPG_AC_FIFO | IPG_AC_NETWORK | IPG_AC_HOST | \
  36         IPG_AC_AUTO_INIT)
  37
  38#define ipg_w32(val32, reg)     iowrite32((val32), ioaddr + (reg))
  39#define ipg_w16(val16, reg)     iowrite16((val16), ioaddr + (reg))
  40#define ipg_w8(val8, reg)       iowrite8((val8), ioaddr + (reg))
  41
  42#define ipg_r32(reg)            ioread32(ioaddr + (reg))
  43#define ipg_r16(reg)            ioread16(ioaddr + (reg))
  44#define ipg_r8(reg)             ioread8(ioaddr + (reg))
  45
  46enum {
  47        netdev_io_size = 128
  48};
  49
  50#include "ipg.h"
  51#define DRV_NAME        "ipg"
  52
  53MODULE_AUTHOR("IC Plus Corp. 2003");
  54MODULE_DESCRIPTION("IC Plus IP1000 Gigabit Ethernet Adapter Linux Driver");
  55MODULE_LICENSE("GPL");
  56
  57/*
  58 * Defaults
  59 */
  60#define IPG_MAX_RXFRAME_SIZE    0x0600
  61#define IPG_RXFRAG_SIZE         0x0600
  62#define IPG_RXSUPPORT_SIZE      0x0600
  63#define IPG_IS_JUMBO            false
  64
  65/*
  66 * Variable record -- index by leading revision/length
  67 * Revision/Length(=N*4), Address1, Data1, Address2, Data2,...,AddressN,DataN
  68 */
  69static unsigned short DefaultPhyParam[] = {
  70        /* 11/12/03 IP1000A v1-3 rev=0x40 */
  71        /*--------------------------------------------------------------------------
  72        (0x4000|(15*4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 22, 0x85bd, 24, 0xfff2,
  73                                 27, 0x0c10, 28, 0x0c10, 29, 0x2c10, 31, 0x0003, 23, 0x92f6,
  74                                 31, 0x0000, 23, 0x003d, 30, 0x00de, 20, 0x20e7,  9, 0x0700,
  75          --------------------------------------------------------------------------*/
  76        /* 12/17/03 IP1000A v1-4 rev=0x40 */
  77        (0x4000 | (07 * 4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 27, 0xeb8e, 31,
  78            0x0000,
  79        30, 0x005e, 9, 0x0700,
  80        /* 01/09/04 IP1000A v1-5 rev=0x41 */
  81        (0x4100 | (07 * 4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 27, 0xeb8e, 31,
  82            0x0000,
  83        30, 0x005e, 9, 0x0700,
  84        0x0000
  85};
  86
  87static const char *ipg_brand_name[] = {
  88        "IC PLUS IP1000 1000/100/10 based NIC",
  89        "Sundance Technology ST2021 based NIC",
  90        "Tamarack Microelectronics TC9020/9021 based NIC",
  91        "D-Link NIC IP1000A"
  92};
  93
  94static DEFINE_PCI_DEVICE_TABLE(ipg_pci_tbl) = {
  95        { PCI_VDEVICE(SUNDANCE, 0x1023), 0 },
  96        { PCI_VDEVICE(SUNDANCE, 0x2021), 1 },
  97        { PCI_VDEVICE(DLINK,    0x9021), 2 },
  98        { PCI_VDEVICE(DLINK,    0x4020), 3 },
  99        { 0, }
 100};
 101
 102MODULE_DEVICE_TABLE(pci, ipg_pci_tbl);
 103
 104static inline void __iomem *ipg_ioaddr(struct net_device *dev)
 105{
 106        struct ipg_nic_private *sp = netdev_priv(dev);
 107        return sp->ioaddr;
 108}
 109
 110#ifdef IPG_DEBUG
 111static void ipg_dump_rfdlist(struct net_device *dev)
 112{
 113        struct ipg_nic_private *sp = netdev_priv(dev);
 114        void __iomem *ioaddr = sp->ioaddr;
 115        unsigned int i;
 116        u32 offset;
 117
 118        IPG_DEBUG_MSG("_dump_rfdlist\n");
 119
 120        printk(KERN_INFO "rx_current = %2.2x\n", sp->rx_current);
 121        printk(KERN_INFO "rx_dirty   = %2.2x\n", sp->rx_dirty);
 122        printk(KERN_INFO "RFDList start address = %16.16lx\n",
 123               (unsigned long) sp->rxd_map);
 124        printk(KERN_INFO "RFDListPtr register   = %8.8x%8.8x\n",
 125               ipg_r32(IPG_RFDLISTPTR1), ipg_r32(IPG_RFDLISTPTR0));
 126
 127        for (i = 0; i < IPG_RFDLIST_LENGTH; i++) {
 128                offset = (u32) &sp->rxd[i].next_desc - (u32) sp->rxd;
 129                printk(KERN_INFO "%2.2x %4.4x RFDNextPtr = %16.16lx\n", i,
 130                       offset, (unsigned long) sp->rxd[i].next_desc);
 131                offset = (u32) &sp->rxd[i].rfs - (u32) sp->rxd;
 132                printk(KERN_INFO "%2.2x %4.4x RFS        = %16.16lx\n", i,
 133                       offset, (unsigned long) sp->rxd[i].rfs);
 134                offset = (u32) &sp->rxd[i].frag_info - (u32) sp->rxd;
 135                printk(KERN_INFO "%2.2x %4.4x frag_info   = %16.16lx\n", i,
 136                       offset, (unsigned long) sp->rxd[i].frag_info);
 137        }
 138}
 139
 140static void ipg_dump_tfdlist(struct net_device *dev)
 141{
 142        struct ipg_nic_private *sp = netdev_priv(dev);
 143        void __iomem *ioaddr = sp->ioaddr;
 144        unsigned int i;
 145        u32 offset;
 146
 147        IPG_DEBUG_MSG("_dump_tfdlist\n");
 148
 149        printk(KERN_INFO "tx_current         = %2.2x\n", sp->tx_current);
 150        printk(KERN_INFO "tx_dirty = %2.2x\n", sp->tx_dirty);
 151        printk(KERN_INFO "TFDList start address = %16.16lx\n",
 152               (unsigned long) sp->txd_map);
 153        printk(KERN_INFO "TFDListPtr register   = %8.8x%8.8x\n",
 154               ipg_r32(IPG_TFDLISTPTR1), ipg_r32(IPG_TFDLISTPTR0));
 155
 156        for (i = 0; i < IPG_TFDLIST_LENGTH; i++) {
 157                offset = (u32) &sp->txd[i].next_desc - (u32) sp->txd;
 158                printk(KERN_INFO "%2.2x %4.4x TFDNextPtr = %16.16lx\n", i,
 159                       offset, (unsigned long) sp->txd[i].next_desc);
 160
 161                offset = (u32) &sp->txd[i].tfc - (u32) sp->txd;
 162                printk(KERN_INFO "%2.2x %4.4x TFC        = %16.16lx\n", i,
 163                       offset, (unsigned long) sp->txd[i].tfc);
 164                offset = (u32) &sp->txd[i].frag_info - (u32) sp->txd;
 165                printk(KERN_INFO "%2.2x %4.4x frag_info   = %16.16lx\n", i,
 166                       offset, (unsigned long) sp->txd[i].frag_info);
 167        }
 168}
 169#endif
 170
 171static void ipg_write_phy_ctl(void __iomem *ioaddr, u8 data)
 172{
 173        ipg_w8(IPG_PC_RSVD_MASK & data, PHY_CTRL);
 174        ndelay(IPG_PC_PHYCTRLWAIT_NS);
 175}
 176
 177static void ipg_drive_phy_ctl_low_high(void __iomem *ioaddr, u8 data)
 178{
 179        ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | data);
 180        ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | data);
 181}
 182
 183static void send_three_state(void __iomem *ioaddr, u8 phyctrlpolarity)
 184{
 185        phyctrlpolarity |= (IPG_PC_MGMTDATA & 0) | IPG_PC_MGMTDIR;
 186
 187        ipg_drive_phy_ctl_low_high(ioaddr, phyctrlpolarity);
 188}
 189
 190static void send_end(void __iomem *ioaddr, u8 phyctrlpolarity)
 191{
 192        ipg_w8((IPG_PC_MGMTCLK_LO | (IPG_PC_MGMTDATA & 0) | IPG_PC_MGMTDIR |
 193                phyctrlpolarity) & IPG_PC_RSVD_MASK, PHY_CTRL);
 194}
 195
 196static u16 read_phy_bit(void __iomem *ioaddr, u8 phyctrlpolarity)
 197{
 198        u16 bit_data;
 199
 200        ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | phyctrlpolarity);
 201
 202        bit_data = ((ipg_r8(PHY_CTRL) & IPG_PC_MGMTDATA) >> 1) & 1;
 203
 204        ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | phyctrlpolarity);
 205
 206        return bit_data;
 207}
 208
 209/*
 210 * Read a register from the Physical Layer device located
 211 * on the IPG NIC, using the IPG PHYCTRL register.
 212 */
 213static int mdio_read(struct net_device *dev, int phy_id, int phy_reg)
 214{
 215        void __iomem *ioaddr = ipg_ioaddr(dev);
 216        /*
 217         * The GMII mangement frame structure for a read is as follows:
 218         *
 219         * |Preamble|st|op|phyad|regad|ta|      data      |idle|
 220         * |< 32 1s>|01|10|AAAAA|RRRRR|z0|DDDDDDDDDDDDDDDD|z   |
 221         *
 222         * <32 1s> = 32 consecutive logic 1 values
 223         * A = bit of Physical Layer device address (MSB first)
 224         * R = bit of register address (MSB first)
 225         * z = High impedance state
 226         * D = bit of read data (MSB first)
 227         *
 228         * Transmission order is 'Preamble' field first, bits transmitted
 229         * left to right (first to last).
 230         */
 231        struct {
 232                u32 field;
 233                unsigned int len;
 234        } p[] = {
 235                { GMII_PREAMBLE,        32 },   /* Preamble */
 236                { GMII_ST,              2  },   /* ST */
 237                { GMII_READ,            2  },   /* OP */
 238                { phy_id,               5  },   /* PHYAD */
 239                { phy_reg,              5  },   /* REGAD */
 240                { 0x0000,               2  },   /* TA */
 241                { 0x0000,               16 },   /* DATA */
 242                { 0x0000,               1  }    /* IDLE */
 243        };
 244        unsigned int i, j;
 245        u8 polarity, data;
 246
 247        polarity  = ipg_r8(PHY_CTRL);
 248        polarity &= (IPG_PC_DUPLEX_POLARITY | IPG_PC_LINK_POLARITY);
 249
 250        /* Create the Preamble, ST, OP, PHYAD, and REGAD field. */
 251        for (j = 0; j < 5; j++) {
 252                for (i = 0; i < p[j].len; i++) {
 253                        /* For each variable length field, the MSB must be
 254                         * transmitted first. Rotate through the field bits,
 255                         * starting with the MSB, and move each bit into the
 256                         * the 1st (2^1) bit position (this is the bit position
 257                         * corresponding to the MgmtData bit of the PhyCtrl
 258                         * register for the IPG).
 259                         *
 260                         * Example: ST = 01;
 261                         *
 262                         *          First write a '0' to bit 1 of the PhyCtrl
 263                         *          register, then write a '1' to bit 1 of the
 264                         *          PhyCtrl register.
 265                         *
 266                         * To do this, right shift the MSB of ST by the value:
 267                         * [field length - 1 - #ST bits already written]
 268                         * then left shift this result by 1.
 269                         */
 270                        data  = (p[j].field >> (p[j].len - 1 - i)) << 1;
 271                        data &= IPG_PC_MGMTDATA;
 272                        data |= polarity | IPG_PC_MGMTDIR;
 273
 274                        ipg_drive_phy_ctl_low_high(ioaddr, data);
 275                }
 276        }
 277
 278        send_three_state(ioaddr, polarity);
 279
 280        read_phy_bit(ioaddr, polarity);
 281
 282        /*
 283         * For a read cycle, the bits for the next two fields (TA and
 284         * DATA) are driven by the PHY (the IPG reads these bits).
 285         */
 286        for (i = 0; i < p[6].len; i++) {
 287                p[6].field |=
 288                    (read_phy_bit(ioaddr, polarity) << (p[6].len - 1 - i));
 289        }
 290
 291        send_three_state(ioaddr, polarity);
 292        send_three_state(ioaddr, polarity);
 293        send_three_state(ioaddr, polarity);
 294        send_end(ioaddr, polarity);
 295
 296        /* Return the value of the DATA field. */
 297        return p[6].field;
 298}
 299
 300/*
 301 * Write to a register from the Physical Layer device located
 302 * on the IPG NIC, using the IPG PHYCTRL register.
 303 */
 304static void mdio_write(struct net_device *dev, int phy_id, int phy_reg, int val)
 305{
 306        void __iomem *ioaddr = ipg_ioaddr(dev);
 307        /*
 308         * The GMII mangement frame structure for a read is as follows:
 309         *
 310         * |Preamble|st|op|phyad|regad|ta|      data      |idle|
 311         * |< 32 1s>|01|10|AAAAA|RRRRR|z0|DDDDDDDDDDDDDDDD|z   |
 312         *
 313         * <32 1s> = 32 consecutive logic 1 values
 314         * A = bit of Physical Layer device address (MSB first)
 315         * R = bit of register address (MSB first)
 316         * z = High impedance state
 317         * D = bit of write data (MSB first)
 318         *
 319         * Transmission order is 'Preamble' field first, bits transmitted
 320         * left to right (first to last).
 321         */
 322        struct {
 323                u32 field;
 324                unsigned int len;
 325        } p[] = {
 326                { GMII_PREAMBLE,        32 },   /* Preamble */
 327                { GMII_ST,              2  },   /* ST */
 328                { GMII_WRITE,           2  },   /* OP */
 329                { phy_id,               5  },   /* PHYAD */
 330                { phy_reg,              5  },   /* REGAD */
 331                { 0x0002,               2  },   /* TA */
 332                { val & 0xffff,         16 },   /* DATA */
 333                { 0x0000,               1  }    /* IDLE */
 334        };
 335        unsigned int i, j;
 336        u8 polarity, data;
 337
 338        polarity  = ipg_r8(PHY_CTRL);
 339        polarity &= (IPG_PC_DUPLEX_POLARITY | IPG_PC_LINK_POLARITY);
 340
 341        /* Create the Preamble, ST, OP, PHYAD, and REGAD field. */
 342        for (j = 0; j < 7; j++) {
 343                for (i = 0; i < p[j].len; i++) {
 344                        /* For each variable length field, the MSB must be
 345                         * transmitted first. Rotate through the field bits,
 346                         * starting with the MSB, and move each bit into the
 347                         * the 1st (2^1) bit position (this is the bit position
 348                         * corresponding to the MgmtData bit of the PhyCtrl
 349                         * register for the IPG).
 350                         *
 351                         * Example: ST = 01;
 352                         *
 353                         *          First write a '0' to bit 1 of the PhyCtrl
 354                         *          register, then write a '1' to bit 1 of the
 355                         *          PhyCtrl register.
 356                         *
 357                         * To do this, right shift the MSB of ST by the value:
 358                         * [field length - 1 - #ST bits already written]
 359                         * then left shift this result by 1.
 360                         */
 361                        data  = (p[j].field >> (p[j].len - 1 - i)) << 1;
 362                        data &= IPG_PC_MGMTDATA;
 363                        data |= polarity | IPG_PC_MGMTDIR;
 364
 365                        ipg_drive_phy_ctl_low_high(ioaddr, data);
 366                }
 367        }
 368
 369        /* The last cycle is a tri-state, so read from the PHY. */
 370        for (j = 7; j < 8; j++) {
 371                for (i = 0; i < p[j].len; i++) {
 372                        ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | polarity);
 373
 374                        p[j].field |= ((ipg_r8(PHY_CTRL) &
 375                                IPG_PC_MGMTDATA) >> 1) << (p[j].len - 1 - i);
 376
 377                        ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | polarity);
 378                }
 379        }
 380}
 381
 382static void ipg_set_led_mode(struct net_device *dev)
 383{
 384        struct ipg_nic_private *sp = netdev_priv(dev);
 385        void __iomem *ioaddr = sp->ioaddr;
 386        u32 mode;
 387
 388        mode = ipg_r32(ASIC_CTRL);
 389        mode &= ~(IPG_AC_LED_MODE_BIT_1 | IPG_AC_LED_MODE | IPG_AC_LED_SPEED);
 390
 391        if ((sp->led_mode & 0x03) > 1)
 392                mode |= IPG_AC_LED_MODE_BIT_1;  /* Write Asic Control Bit 29 */
 393
 394        if ((sp->led_mode & 0x01) == 1)
 395                mode |= IPG_AC_LED_MODE;        /* Write Asic Control Bit 14 */
 396
 397        if ((sp->led_mode & 0x08) == 8)
 398                mode |= IPG_AC_LED_SPEED;       /* Write Asic Control Bit 27 */
 399
 400        ipg_w32(mode, ASIC_CTRL);
 401}
 402
 403static void ipg_set_phy_set(struct net_device *dev)
 404{
 405        struct ipg_nic_private *sp = netdev_priv(dev);
 406        void __iomem *ioaddr = sp->ioaddr;
 407        int physet;
 408
 409        physet = ipg_r8(PHY_SET);
 410        physet &= ~(IPG_PS_MEM_LENB9B | IPG_PS_MEM_LEN9 | IPG_PS_NON_COMPDET);
 411        physet |= ((sp->led_mode & 0x70) >> 4);
 412        ipg_w8(physet, PHY_SET);
 413}
 414
 415static int ipg_reset(struct net_device *dev, u32 resetflags)
 416{
 417        /* Assert functional resets via the IPG AsicCtrl
 418         * register as specified by the 'resetflags' input
 419         * parameter.
 420         */
 421        void __iomem *ioaddr = ipg_ioaddr(dev);
 422        unsigned int timeout_count = 0;
 423
 424        IPG_DEBUG_MSG("_reset\n");
 425
 426        ipg_w32(ipg_r32(ASIC_CTRL) | resetflags, ASIC_CTRL);
 427
 428        /* Delay added to account for problem with 10Mbps reset. */
 429        mdelay(IPG_AC_RESETWAIT);
 430
 431        while (IPG_AC_RESET_BUSY & ipg_r32(ASIC_CTRL)) {
 432                mdelay(IPG_AC_RESETWAIT);
 433                if (++timeout_count > IPG_AC_RESET_TIMEOUT)
 434                        return -ETIME;
 435        }
 436        /* Set LED Mode in Asic Control */
 437        ipg_set_led_mode(dev);
 438
 439        /* Set PHYSet Register Value */
 440        ipg_set_phy_set(dev);
 441        return 0;
 442}
 443
 444/* Find the GMII PHY address. */
 445static int ipg_find_phyaddr(struct net_device *dev)
 446{
 447        unsigned int phyaddr, i;
 448
 449        for (i = 0; i < 32; i++) {
 450                u32 status;
 451
 452                /* Search for the correct PHY address among 32 possible. */
 453                phyaddr = (IPG_NIC_PHY_ADDRESS + i) % 32;
 454
 455                /* 10/22/03 Grace change verify from GMII_PHY_STATUS to
 456                   GMII_PHY_ID1
 457                 */
 458
 459                status = mdio_read(dev, phyaddr, MII_BMSR);
 460
 461                if ((status != 0xFFFF) && (status != 0))
 462                        return phyaddr;
 463        }
 464
 465        return 0x1f;
 466}
 467
 468/*
 469 * Configure IPG based on result of IEEE 802.3 PHY
 470 * auto-negotiation.
 471 */
 472static int ipg_config_autoneg(struct net_device *dev)
 473{
 474        struct ipg_nic_private *sp = netdev_priv(dev);
 475        void __iomem *ioaddr = sp->ioaddr;
 476        unsigned int txflowcontrol;
 477        unsigned int rxflowcontrol;
 478        unsigned int fullduplex;
 479        u32 mac_ctrl_val;
 480        u32 asicctrl;
 481        u8 phyctrl;
 482
 483        IPG_DEBUG_MSG("_config_autoneg\n");
 484
 485        asicctrl = ipg_r32(ASIC_CTRL);
 486        phyctrl = ipg_r8(PHY_CTRL);
 487        mac_ctrl_val = ipg_r32(MAC_CTRL);
 488
 489        /* Set flags for use in resolving auto-negotiation, assuming
 490         * non-1000Mbps, half duplex, no flow control.
 491         */
 492        fullduplex = 0;
 493        txflowcontrol = 0;
 494        rxflowcontrol = 0;
 495
 496        /* To accommodate a problem in 10Mbps operation,
 497         * set a global flag if PHY running in 10Mbps mode.
 498         */
 499        sp->tenmbpsmode = 0;
 500
 501        printk(KERN_INFO "%s: Link speed = ", dev->name);
 502
 503        /* Determine actual speed of operation. */
 504        switch (phyctrl & IPG_PC_LINK_SPEED) {
 505        case IPG_PC_LINK_SPEED_10MBPS:
 506                printk("10Mbps.\n");
 507                printk(KERN_INFO "%s: 10Mbps operational mode enabled.\n",
 508                       dev->name);
 509                sp->tenmbpsmode = 1;
 510                break;
 511        case IPG_PC_LINK_SPEED_100MBPS:
 512                printk("100Mbps.\n");
 513                break;
 514        case IPG_PC_LINK_SPEED_1000MBPS:
 515                printk("1000Mbps.\n");
 516                break;
 517        default:
 518                printk("undefined!\n");
 519                return 0;
 520        }
 521
 522        if (phyctrl & IPG_PC_DUPLEX_STATUS) {
 523                fullduplex = 1;
 524                txflowcontrol = 1;
 525                rxflowcontrol = 1;
 526        }
 527
 528        /* Configure full duplex, and flow control. */
 529        if (fullduplex == 1) {
 530                /* Configure IPG for full duplex operation. */
 531                printk(KERN_INFO "%s: setting full duplex, ", dev->name);
 532
 533                mac_ctrl_val |= IPG_MC_DUPLEX_SELECT_FD;
 534
 535                if (txflowcontrol == 1) {
 536                        printk("TX flow control");
 537                        mac_ctrl_val |= IPG_MC_TX_FLOW_CONTROL_ENABLE;
 538                } else {
 539                        printk("no TX flow control");
 540                        mac_ctrl_val &= ~IPG_MC_TX_FLOW_CONTROL_ENABLE;
 541                }
 542
 543                if (rxflowcontrol == 1) {
 544                        printk(", RX flow control.");
 545                        mac_ctrl_val |= IPG_MC_RX_FLOW_CONTROL_ENABLE;
 546                } else {
 547                        printk(", no RX flow control.");
 548                        mac_ctrl_val &= ~IPG_MC_RX_FLOW_CONTROL_ENABLE;
 549                }
 550
 551                printk("\n");
 552        } else {
 553                /* Configure IPG for half duplex operation. */
 554                printk(KERN_INFO "%s: setting half duplex, "
 555                       "no TX flow control, no RX flow control.\n", dev->name);
 556
 557                mac_ctrl_val &= ~IPG_MC_DUPLEX_SELECT_FD &
 558                        ~IPG_MC_TX_FLOW_CONTROL_ENABLE &
 559                        ~IPG_MC_RX_FLOW_CONTROL_ENABLE;
 560        }
 561        ipg_w32(mac_ctrl_val, MAC_CTRL);
 562        return 0;
 563}
 564
 565/* Determine and configure multicast operation and set
 566 * receive mode for IPG.
 567 */
 568static void ipg_nic_set_multicast_list(struct net_device *dev)
 569{
 570        void __iomem *ioaddr = ipg_ioaddr(dev);
 571        struct netdev_hw_addr *ha;
 572        unsigned int hashindex;
 573        u32 hashtable[2];
 574        u8 receivemode;
 575
 576        IPG_DEBUG_MSG("_nic_set_multicast_list\n");
 577
 578        receivemode = IPG_RM_RECEIVEUNICAST | IPG_RM_RECEIVEBROADCAST;
 579
 580        if (dev->flags & IFF_PROMISC) {
 581                /* NIC to be configured in promiscuous mode. */
 582                receivemode = IPG_RM_RECEIVEALLFRAMES;
 583        } else if ((dev->flags & IFF_ALLMULTI) ||
 584                   ((dev->flags & IFF_MULTICAST) &&
 585                    (netdev_mc_count(dev) > IPG_MULTICAST_HASHTABLE_SIZE))) {
 586                /* NIC to be configured to receive all multicast
 587                 * frames. */
 588                receivemode |= IPG_RM_RECEIVEMULTICAST;
 589        } else if ((dev->flags & IFF_MULTICAST) && !netdev_mc_empty(dev)) {
 590                /* NIC to be configured to receive selected
 591                 * multicast addresses. */
 592                receivemode |= IPG_RM_RECEIVEMULTICASTHASH;
 593        }
 594
 595        /* Calculate the bits to set for the 64 bit, IPG HASHTABLE.
 596         * The IPG applies a cyclic-redundancy-check (the same CRC
 597         * used to calculate the frame data FCS) to the destination
 598         * address all incoming multicast frames whose destination
 599         * address has the multicast bit set. The least significant
 600         * 6 bits of the CRC result are used as an addressing index
 601         * into the hash table. If the value of the bit addressed by
 602         * this index is a 1, the frame is passed to the host system.
 603         */
 604
 605        /* Clear hashtable. */
 606        hashtable[0] = 0x00000000;
 607        hashtable[1] = 0x00000000;
 608
 609        /* Cycle through all multicast addresses to filter. */
 610        netdev_for_each_mc_addr(ha, dev) {
 611                /* Calculate CRC result for each multicast address. */
 612                hashindex = crc32_le(0xffffffff, ha->addr,
 613                                     ETH_ALEN);
 614
 615                /* Use only the least significant 6 bits. */
 616                hashindex = hashindex & 0x3F;
 617
 618                /* Within "hashtable", set bit number "hashindex"
 619                 * to a logic 1.
 620                 */
 621                set_bit(hashindex, (void *)hashtable);
 622        }
 623
 624        /* Write the value of the hashtable, to the 4, 16 bit
 625         * HASHTABLE IPG registers.
 626         */
 627        ipg_w32(hashtable[0], HASHTABLE_0);
 628        ipg_w32(hashtable[1], HASHTABLE_1);
 629
 630        ipg_w8(IPG_RM_RSVD_MASK & receivemode, RECEIVE_MODE);
 631
 632        IPG_DEBUG_MSG("ReceiveMode = %x\n", ipg_r8(RECEIVE_MODE));
 633}
 634
 635static int ipg_io_config(struct net_device *dev)
 636{
 637        struct ipg_nic_private *sp = netdev_priv(dev);
 638        void __iomem *ioaddr = ipg_ioaddr(dev);
 639        u32 origmacctrl;
 640        u32 restoremacctrl;
 641
 642        IPG_DEBUG_MSG("_io_config\n");
 643
 644        origmacctrl = ipg_r32(MAC_CTRL);
 645
 646        restoremacctrl = origmacctrl | IPG_MC_STATISTICS_ENABLE;
 647
 648        /* Based on compilation option, determine if FCS is to be
 649         * stripped on receive frames by IPG.
 650         */
 651        if (!IPG_STRIP_FCS_ON_RX)
 652                restoremacctrl |= IPG_MC_RCV_FCS;
 653
 654        /* Determine if transmitter and/or receiver are
 655         * enabled so we may restore MACCTRL correctly.
 656         */
 657        if (origmacctrl & IPG_MC_TX_ENABLED)
 658                restoremacctrl |= IPG_MC_TX_ENABLE;
 659
 660        if (origmacctrl & IPG_MC_RX_ENABLED)
 661                restoremacctrl |= IPG_MC_RX_ENABLE;
 662
 663        /* Transmitter and receiver must be disabled before setting
 664         * IFSSelect.
 665         */
 666        ipg_w32((origmacctrl & (IPG_MC_RX_DISABLE | IPG_MC_TX_DISABLE)) &
 667                IPG_MC_RSVD_MASK, MAC_CTRL);
 668
 669        /* Now that transmitter and receiver are disabled, write
 670         * to IFSSelect.
 671         */
 672        ipg_w32((origmacctrl & IPG_MC_IFS_96BIT) & IPG_MC_RSVD_MASK, MAC_CTRL);
 673
 674        /* Set RECEIVEMODE register. */
 675        ipg_nic_set_multicast_list(dev);
 676
 677        ipg_w16(sp->max_rxframe_size, MAX_FRAME_SIZE);
 678
 679        ipg_w8(IPG_RXDMAPOLLPERIOD_VALUE,   RX_DMA_POLL_PERIOD);
 680        ipg_w8(IPG_RXDMAURGENTTHRESH_VALUE, RX_DMA_URGENT_THRESH);
 681        ipg_w8(IPG_RXDMABURSTTHRESH_VALUE,  RX_DMA_BURST_THRESH);
 682        ipg_w8(IPG_TXDMAPOLLPERIOD_VALUE,   TX_DMA_POLL_PERIOD);
 683        ipg_w8(IPG_TXDMAURGENTTHRESH_VALUE, TX_DMA_URGENT_THRESH);
 684        ipg_w8(IPG_TXDMABURSTTHRESH_VALUE,  TX_DMA_BURST_THRESH);
 685        ipg_w16((IPG_IE_HOST_ERROR | IPG_IE_TX_DMA_COMPLETE |
 686                 IPG_IE_TX_COMPLETE | IPG_IE_INT_REQUESTED |
 687                 IPG_IE_UPDATE_STATS | IPG_IE_LINK_EVENT |
 688                 IPG_IE_RX_DMA_COMPLETE | IPG_IE_RX_DMA_PRIORITY), INT_ENABLE);
 689        ipg_w16(IPG_FLOWONTHRESH_VALUE,  FLOW_ON_THRESH);
 690        ipg_w16(IPG_FLOWOFFTHRESH_VALUE, FLOW_OFF_THRESH);
 691
 692        /* IPG multi-frag frame bug workaround.
 693         * Per silicon revision B3 eratta.
 694         */
 695        ipg_w16(ipg_r16(DEBUG_CTRL) | 0x0200, DEBUG_CTRL);
 696
 697        /* IPG TX poll now bug workaround.
 698         * Per silicon revision B3 eratta.
 699         */
 700        ipg_w16(ipg_r16(DEBUG_CTRL) | 0x0010, DEBUG_CTRL);
 701
 702        /* IPG RX poll now bug workaround.
 703         * Per silicon revision B3 eratta.
 704         */
 705        ipg_w16(ipg_r16(DEBUG_CTRL) | 0x0020, DEBUG_CTRL);
 706
 707        /* Now restore MACCTRL to original setting. */
 708        ipg_w32(IPG_MC_RSVD_MASK & restoremacctrl, MAC_CTRL);
 709
 710        /* Disable unused RMON statistics. */
 711        ipg_w32(IPG_RZ_ALL, RMON_STATISTICS_MASK);
 712
 713        /* Disable unused MIB statistics. */
 714        ipg_w32(IPG_SM_MACCONTROLFRAMESXMTD | IPG_SM_MACCONTROLFRAMESRCVD |
 715                IPG_SM_BCSTOCTETXMTOK_BCSTFRAMESXMTDOK | IPG_SM_TXJUMBOFRAMES |
 716                IPG_SM_MCSTOCTETXMTOK_MCSTFRAMESXMTDOK | IPG_SM_RXJUMBOFRAMES |
 717                IPG_SM_BCSTOCTETRCVDOK_BCSTFRAMESRCVDOK |
 718                IPG_SM_UDPCHECKSUMERRORS | IPG_SM_TCPCHECKSUMERRORS |
 719                IPG_SM_IPCHECKSUMERRORS, STATISTICS_MASK);
 720
 721        return 0;
 722}
 723
 724/*
 725 * Create a receive buffer within system memory and update
 726 * NIC private structure appropriately.
 727 */
 728static int ipg_get_rxbuff(struct net_device *dev, int entry)
 729{
 730        struct ipg_nic_private *sp = netdev_priv(dev);
 731        struct ipg_rx *rxfd = sp->rxd + entry;
 732        struct sk_buff *skb;
 733        u64 rxfragsize;
 734
 735        IPG_DEBUG_MSG("_get_rxbuff\n");
 736
 737        skb = netdev_alloc_skb_ip_align(dev, sp->rxsupport_size);
 738        if (!skb) {
 739                sp->rx_buff[entry] = NULL;
 740                return -ENOMEM;
 741        }
 742
 743        /* Associate the receive buffer with the IPG NIC. */
 744        skb->dev = dev;
 745
 746        /* Save the address of the sk_buff structure. */
 747        sp->rx_buff[entry] = skb;
 748
 749        rxfd->frag_info = cpu_to_le64(pci_map_single(sp->pdev, skb->data,
 750                sp->rx_buf_sz, PCI_DMA_FROMDEVICE));
 751
 752        /* Set the RFD fragment length. */
 753        rxfragsize = sp->rxfrag_size;
 754        rxfd->frag_info |= cpu_to_le64((rxfragsize << 48) & IPG_RFI_FRAGLEN);
 755
 756        return 0;
 757}
 758
 759static int init_rfdlist(struct net_device *dev)
 760{
 761        struct ipg_nic_private *sp = netdev_priv(dev);
 762        void __iomem *ioaddr = sp->ioaddr;
 763        unsigned int i;
 764
 765        IPG_DEBUG_MSG("_init_rfdlist\n");
 766
 767        for (i = 0; i < IPG_RFDLIST_LENGTH; i++) {
 768                struct ipg_rx *rxfd = sp->rxd + i;
 769
 770                if (sp->rx_buff[i]) {
 771                        pci_unmap_single(sp->pdev,
 772                                le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
 773                                sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
 774                        dev_kfree_skb_irq(sp->rx_buff[i]);
 775                        sp->rx_buff[i] = NULL;
 776                }
 777
 778                /* Clear out the RFS field. */
 779                rxfd->rfs = 0x0000000000000000;
 780
 781                if (ipg_get_rxbuff(dev, i) < 0) {
 782                        /*
 783                         * A receive buffer was not ready, break the
 784                         * RFD list here.
 785                         */
 786                        IPG_DEBUG_MSG("Cannot allocate Rx buffer.\n");
 787
 788                        /* Just in case we cannot allocate a single RFD.
 789                         * Should not occur.
 790                         */
 791                        if (i == 0) {
 792                                printk(KERN_ERR "%s: No memory available"
 793                                        " for RFD list.\n", dev->name);
 794                                return -ENOMEM;
 795                        }
 796                }
 797
 798                rxfd->next_desc = cpu_to_le64(sp->rxd_map +
 799                        sizeof(struct ipg_rx)*(i + 1));
 800        }
 801        sp->rxd[i - 1].next_desc = cpu_to_le64(sp->rxd_map);
 802
 803        sp->rx_current = 0;
 804        sp->rx_dirty = 0;
 805
 806        /* Write the location of the RFDList to the IPG. */
 807        ipg_w32((u32) sp->rxd_map, RFD_LIST_PTR_0);
 808        ipg_w32(0x00000000, RFD_LIST_PTR_1);
 809
 810        return 0;
 811}
 812
 813static void init_tfdlist(struct net_device *dev)
 814{
 815        struct ipg_nic_private *sp = netdev_priv(dev);
 816        void __iomem *ioaddr = sp->ioaddr;
 817        unsigned int i;
 818
 819        IPG_DEBUG_MSG("_init_tfdlist\n");
 820
 821        for (i = 0; i < IPG_TFDLIST_LENGTH; i++) {
 822                struct ipg_tx *txfd = sp->txd + i;
 823
 824                txfd->tfc = cpu_to_le64(IPG_TFC_TFDDONE);
 825
 826                if (sp->tx_buff[i]) {
 827                        dev_kfree_skb_irq(sp->tx_buff[i]);
 828                        sp->tx_buff[i] = NULL;
 829                }
 830
 831                txfd->next_desc = cpu_to_le64(sp->txd_map +
 832                        sizeof(struct ipg_tx)*(i + 1));
 833        }
 834        sp->txd[i - 1].next_desc = cpu_to_le64(sp->txd_map);
 835
 836        sp->tx_current = 0;
 837        sp->tx_dirty = 0;
 838
 839        /* Write the location of the TFDList to the IPG. */
 840        IPG_DDEBUG_MSG("Starting TFDListPtr = %8.8x\n",
 841                       (u32) sp->txd_map);
 842        ipg_w32((u32) sp->txd_map, TFD_LIST_PTR_0);
 843        ipg_w32(0x00000000, TFD_LIST_PTR_1);
 844
 845        sp->reset_current_tfd = 1;
 846}
 847
 848/*
 849 * Free all transmit buffers which have already been transferred
 850 * via DMA to the IPG.
 851 */
 852static void ipg_nic_txfree(struct net_device *dev)
 853{
 854        struct ipg_nic_private *sp = netdev_priv(dev);
 855        unsigned int released, pending, dirty;
 856
 857        IPG_DEBUG_MSG("_nic_txfree\n");
 858
 859        pending = sp->tx_current - sp->tx_dirty;
 860        dirty = sp->tx_dirty % IPG_TFDLIST_LENGTH;
 861
 862        for (released = 0; released < pending; released++) {
 863                struct sk_buff *skb = sp->tx_buff[dirty];
 864                struct ipg_tx *txfd = sp->txd + dirty;
 865
 866                IPG_DEBUG_MSG("TFC = %16.16lx\n", (unsigned long) txfd->tfc);
 867
 868                /* Look at each TFD's TFC field beginning
 869                 * at the last freed TFD up to the current TFD.
 870                 * If the TFDDone bit is set, free the associated
 871                 * buffer.
 872                 */
 873                if (!(txfd->tfc & cpu_to_le64(IPG_TFC_TFDDONE)))
 874                        break;
 875
 876                /* Free the transmit buffer. */
 877                if (skb) {
 878                        pci_unmap_single(sp->pdev,
 879                                le64_to_cpu(txfd->frag_info) & ~IPG_TFI_FRAGLEN,
 880                                skb->len, PCI_DMA_TODEVICE);
 881
 882                        dev_kfree_skb_irq(skb);
 883
 884                        sp->tx_buff[dirty] = NULL;
 885                }
 886                dirty = (dirty + 1) % IPG_TFDLIST_LENGTH;
 887        }
 888
 889        sp->tx_dirty += released;
 890
 891        if (netif_queue_stopped(dev) &&
 892            (sp->tx_current != (sp->tx_dirty + IPG_TFDLIST_LENGTH))) {
 893                netif_wake_queue(dev);
 894        }
 895}
 896
 897static void ipg_tx_timeout(struct net_device *dev)
 898{
 899        struct ipg_nic_private *sp = netdev_priv(dev);
 900        void __iomem *ioaddr = sp->ioaddr;
 901
 902        ipg_reset(dev, IPG_AC_TX_RESET | IPG_AC_DMA | IPG_AC_NETWORK |
 903                  IPG_AC_FIFO);
 904
 905        spin_lock_irq(&sp->lock);
 906
 907        /* Re-configure after DMA reset. */
 908        if (ipg_io_config(dev) < 0) {
 909                printk(KERN_INFO "%s: Error during re-configuration.\n",
 910                       dev->name);
 911        }
 912
 913        init_tfdlist(dev);
 914
 915        spin_unlock_irq(&sp->lock);
 916
 917        ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) & IPG_MC_RSVD_MASK,
 918                MAC_CTRL);
 919}
 920
 921/*
 922 * For TxComplete interrupts, free all transmit
 923 * buffers which have already been transferred via DMA
 924 * to the IPG.
 925 */
 926static void ipg_nic_txcleanup(struct net_device *dev)
 927{
 928        struct ipg_nic_private *sp = netdev_priv(dev);
 929        void __iomem *ioaddr = sp->ioaddr;
 930        unsigned int i;
 931
 932        IPG_DEBUG_MSG("_nic_txcleanup\n");
 933
 934        for (i = 0; i < IPG_TFDLIST_LENGTH; i++) {
 935                /* Reading the TXSTATUS register clears the
 936                 * TX_COMPLETE interrupt.
 937                 */
 938                u32 txstatusdword = ipg_r32(TX_STATUS);
 939
 940                IPG_DEBUG_MSG("TxStatus = %8.8x\n", txstatusdword);
 941
 942                /* Check for Transmit errors. Error bits only valid if
 943                 * TX_COMPLETE bit in the TXSTATUS register is a 1.
 944                 */
 945                if (!(txstatusdword & IPG_TS_TX_COMPLETE))
 946                        break;
 947
 948                /* If in 10Mbps mode, indicate transmit is ready. */
 949                if (sp->tenmbpsmode) {
 950                        netif_wake_queue(dev);
 951                }
 952
 953                /* Transmit error, increment stat counters. */
 954                if (txstatusdword & IPG_TS_TX_ERROR) {
 955                        IPG_DEBUG_MSG("Transmit error.\n");
 956                        sp->stats.tx_errors++;
 957                }
 958
 959                /* Late collision, re-enable transmitter. */
 960                if (txstatusdword & IPG_TS_LATE_COLLISION) {
 961                        IPG_DEBUG_MSG("Late collision on transmit.\n");
 962                        ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) &
 963                                IPG_MC_RSVD_MASK, MAC_CTRL);
 964                }
 965
 966                /* Maximum collisions, re-enable transmitter. */
 967                if (txstatusdword & IPG_TS_TX_MAX_COLL) {
 968                        IPG_DEBUG_MSG("Maximum collisions on transmit.\n");
 969                        ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) &
 970                                IPG_MC_RSVD_MASK, MAC_CTRL);
 971                }
 972
 973                /* Transmit underrun, reset and re-enable
 974                 * transmitter.
 975                 */
 976                if (txstatusdword & IPG_TS_TX_UNDERRUN) {
 977                        IPG_DEBUG_MSG("Transmitter underrun.\n");
 978                        sp->stats.tx_fifo_errors++;
 979                        ipg_reset(dev, IPG_AC_TX_RESET | IPG_AC_DMA |
 980                                  IPG_AC_NETWORK | IPG_AC_FIFO);
 981
 982                        /* Re-configure after DMA reset. */
 983                        if (ipg_io_config(dev) < 0) {
 984                                printk(KERN_INFO
 985                                       "%s: Error during re-configuration.\n",
 986                                       dev->name);
 987                        }
 988                        init_tfdlist(dev);
 989
 990                        ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) &
 991                                IPG_MC_RSVD_MASK, MAC_CTRL);
 992                }
 993        }
 994
 995        ipg_nic_txfree(dev);
 996}
 997
 998/* Provides statistical information about the IPG NIC. */
 999static struct net_device_stats *ipg_nic_get_stats(struct net_device *dev)
1000{
1001        struct ipg_nic_private *sp = netdev_priv(dev);
1002        void __iomem *ioaddr = sp->ioaddr;
1003        u16 temp1;
1004        u16 temp2;
1005
1006        IPG_DEBUG_MSG("_nic_get_stats\n");
1007
1008        /* Check to see if the NIC has been initialized via nic_open,
1009         * before trying to read statistic registers.
1010         */
1011        if (!test_bit(__LINK_STATE_START, &dev->state))
1012                return &sp->stats;
1013
1014        sp->stats.rx_packets += ipg_r32(IPG_FRAMESRCVDOK);
1015        sp->stats.tx_packets += ipg_r32(IPG_FRAMESXMTDOK);
1016        sp->stats.rx_bytes += ipg_r32(IPG_OCTETRCVOK);
1017        sp->stats.tx_bytes += ipg_r32(IPG_OCTETXMTOK);
1018        temp1 = ipg_r16(IPG_FRAMESLOSTRXERRORS);
1019        sp->stats.rx_errors += temp1;
1020        sp->stats.rx_missed_errors += temp1;
1021        temp1 = ipg_r32(IPG_SINGLECOLFRAMES) + ipg_r32(IPG_MULTICOLFRAMES) +
1022                ipg_r32(IPG_LATECOLLISIONS);
1023        temp2 = ipg_r16(IPG_CARRIERSENSEERRORS);
1024        sp->stats.collisions += temp1;
1025        sp->stats.tx_dropped += ipg_r16(IPG_FRAMESABORTXSCOLLS);
1026        sp->stats.tx_errors += ipg_r16(IPG_FRAMESWEXDEFERRAL) +
1027                ipg_r32(IPG_FRAMESWDEFERREDXMT) + temp1 + temp2;
1028        sp->stats.multicast += ipg_r32(IPG_MCSTOCTETRCVDOK);
1029
1030        /* detailed tx_errors */
1031        sp->stats.tx_carrier_errors += temp2;
1032
1033        /* detailed rx_errors */
1034        sp->stats.rx_length_errors += ipg_r16(IPG_INRANGELENGTHERRORS) +
1035                ipg_r16(IPG_FRAMETOOLONGERRRORS);
1036        sp->stats.rx_crc_errors += ipg_r16(IPG_FRAMECHECKSEQERRORS);
1037
1038        /* Unutilized IPG statistic registers. */
1039        ipg_r32(IPG_MCSTFRAMESRCVDOK);
1040
1041        return &sp->stats;
1042}
1043
1044/* Restore used receive buffers. */
1045static int ipg_nic_rxrestore(struct net_device *dev)
1046{
1047        struct ipg_nic_private *sp = netdev_priv(dev);
1048        const unsigned int curr = sp->rx_current;
1049        unsigned int dirty = sp->rx_dirty;
1050
1051        IPG_DEBUG_MSG("_nic_rxrestore\n");
1052
1053        for (dirty = sp->rx_dirty; curr - dirty > 0; dirty++) {
1054                unsigned int entry = dirty % IPG_RFDLIST_LENGTH;
1055
1056                /* rx_copybreak may poke hole here and there. */
1057                if (sp->rx_buff[entry])
1058                        continue;
1059
1060                /* Generate a new receive buffer to replace the
1061                 * current buffer (which will be released by the
1062                 * Linux system).
1063                 */
1064                if (ipg_get_rxbuff(dev, entry) < 0) {
1065                        IPG_DEBUG_MSG("Cannot allocate new Rx buffer.\n");
1066
1067                        break;
1068                }
1069
1070                /* Reset the RFS field. */
1071                sp->rxd[entry].rfs = 0x0000000000000000;
1072        }
1073        sp->rx_dirty = dirty;
1074
1075        return 0;
1076}
1077
1078/* use jumboindex and jumbosize to control jumbo frame status
1079 * initial status is jumboindex=-1 and jumbosize=0
1080 * 1. jumboindex = -1 and jumbosize=0 : previous jumbo frame has been done.
1081 * 2. jumboindex != -1 and jumbosize != 0 : jumbo frame is not over size and receiving
1082 * 3. jumboindex = -1 and jumbosize != 0 : jumbo frame is over size, already dump
1083 *               previous receiving and need to continue dumping the current one
1084 */
1085enum {
1086        NORMAL_PACKET,
1087        ERROR_PACKET
1088};
1089
1090enum {
1091        FRAME_NO_START_NO_END   = 0,
1092        FRAME_WITH_START                = 1,
1093        FRAME_WITH_END          = 10,
1094        FRAME_WITH_START_WITH_END = 11
1095};
1096
1097static void ipg_nic_rx_free_skb(struct net_device *dev)
1098{
1099        struct ipg_nic_private *sp = netdev_priv(dev);
1100        unsigned int entry = sp->rx_current % IPG_RFDLIST_LENGTH;
1101
1102        if (sp->rx_buff[entry]) {
1103                struct ipg_rx *rxfd = sp->rxd + entry;
1104
1105                pci_unmap_single(sp->pdev,
1106                        le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
1107                        sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1108                dev_kfree_skb_irq(sp->rx_buff[entry]);
1109                sp->rx_buff[entry] = NULL;
1110        }
1111}
1112
1113static int ipg_nic_rx_check_frame_type(struct net_device *dev)
1114{
1115        struct ipg_nic_private *sp = netdev_priv(dev);
1116        struct ipg_rx *rxfd = sp->rxd + (sp->rx_current % IPG_RFDLIST_LENGTH);
1117        int type = FRAME_NO_START_NO_END;
1118
1119        if (le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMESTART)
1120                type += FRAME_WITH_START;
1121        if (le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMEEND)
1122                type += FRAME_WITH_END;
1123        return type;
1124}
1125
1126static int ipg_nic_rx_check_error(struct net_device *dev)
1127{
1128        struct ipg_nic_private *sp = netdev_priv(dev);
1129        unsigned int entry = sp->rx_current % IPG_RFDLIST_LENGTH;
1130        struct ipg_rx *rxfd = sp->rxd + entry;
1131
1132        if (IPG_DROP_ON_RX_ETH_ERRORS && (le64_to_cpu(rxfd->rfs) &
1133             (IPG_RFS_RXFIFOOVERRUN | IPG_RFS_RXRUNTFRAME |
1134              IPG_RFS_RXALIGNMENTERROR | IPG_RFS_RXFCSERROR |
1135              IPG_RFS_RXOVERSIZEDFRAME | IPG_RFS_RXLENGTHERROR))) {
1136                IPG_DEBUG_MSG("Rx error, RFS = %16.16lx\n",
1137                              (unsigned long) rxfd->rfs);
1138
1139                /* Increment general receive error statistic. */
1140                sp->stats.rx_errors++;
1141
1142                /* Increment detailed receive error statistics. */
1143                if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFIFOOVERRUN) {
1144                        IPG_DEBUG_MSG("RX FIFO overrun occurred.\n");
1145
1146                        sp->stats.rx_fifo_errors++;
1147                }
1148
1149                if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXRUNTFRAME) {
1150                        IPG_DEBUG_MSG("RX runt occurred.\n");
1151                        sp->stats.rx_length_errors++;
1152                }
1153
1154                /* Do nothing for IPG_RFS_RXOVERSIZEDFRAME,
1155                 * error count handled by a IPG statistic register.
1156                 */
1157
1158                if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXALIGNMENTERROR) {
1159                        IPG_DEBUG_MSG("RX alignment error occurred.\n");
1160                        sp->stats.rx_frame_errors++;
1161                }
1162
1163                /* Do nothing for IPG_RFS_RXFCSERROR, error count
1164                 * handled by a IPG statistic register.
1165                 */
1166
1167                /* Free the memory associated with the RX
1168                 * buffer since it is erroneous and we will
1169                 * not pass it to higher layer processes.
1170                 */
1171                if (sp->rx_buff[entry]) {
1172                        pci_unmap_single(sp->pdev,
1173                                le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
1174                                sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1175
1176                        dev_kfree_skb_irq(sp->rx_buff[entry]);
1177                        sp->rx_buff[entry] = NULL;
1178                }
1179                return ERROR_PACKET;
1180        }
1181        return NORMAL_PACKET;
1182}
1183
1184static void ipg_nic_rx_with_start_and_end(struct net_device *dev,
1185                                          struct ipg_nic_private *sp,
1186                                          struct ipg_rx *rxfd, unsigned entry)
1187{
1188        struct ipg_jumbo *jumbo = &sp->jumbo;
1189        struct sk_buff *skb;
1190        int framelen;
1191
1192        if (jumbo->found_start) {
1193                dev_kfree_skb_irq(jumbo->skb);
1194                jumbo->found_start = 0;
1195                jumbo->current_size = 0;
1196                jumbo->skb = NULL;
1197        }
1198
1199        /* 1: found error, 0 no error */
1200        if (ipg_nic_rx_check_error(dev) != NORMAL_PACKET)
1201                return;
1202
1203        skb = sp->rx_buff[entry];
1204        if (!skb)
1205                return;
1206
1207        /* accept this frame and send to upper layer */
1208        framelen = le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFRAMELEN;
1209        if (framelen > sp->rxfrag_size)
1210                framelen = sp->rxfrag_size;
1211
1212        skb_put(skb, framelen);
1213        skb->protocol = eth_type_trans(skb, dev);
1214        skb_checksum_none_assert(skb);
1215        netif_rx(skb);
1216        sp->rx_buff[entry] = NULL;
1217}
1218
1219static void ipg_nic_rx_with_start(struct net_device *dev,
1220                                  struct ipg_nic_private *sp,
1221                                  struct ipg_rx *rxfd, unsigned entry)
1222{
1223        struct ipg_jumbo *jumbo = &sp->jumbo;
1224        struct pci_dev *pdev = sp->pdev;
1225        struct sk_buff *skb;
1226
1227        /* 1: found error, 0 no error */
1228        if (ipg_nic_rx_check_error(dev) != NORMAL_PACKET)
1229                return;
1230
1231        /* accept this frame and send to upper layer */
1232        skb = sp->rx_buff[entry];
1233        if (!skb)
1234                return;
1235
1236        if (jumbo->found_start)
1237                dev_kfree_skb_irq(jumbo->skb);
1238
1239        pci_unmap_single(pdev, le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
1240                         sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1241
1242        skb_put(skb, sp->rxfrag_size);
1243
1244        jumbo->found_start = 1;
1245        jumbo->current_size = sp->rxfrag_size;
1246        jumbo->skb = skb;
1247
1248        sp->rx_buff[entry] = NULL;
1249}
1250
1251static void ipg_nic_rx_with_end(struct net_device *dev,
1252                                struct ipg_nic_private *sp,
1253                                struct ipg_rx *rxfd, unsigned entry)
1254{
1255        struct ipg_jumbo *jumbo = &sp->jumbo;
1256
1257        /* 1: found error, 0 no error */
1258        if (ipg_nic_rx_check_error(dev) == NORMAL_PACKET) {
1259                struct sk_buff *skb = sp->rx_buff[entry];
1260
1261                if (!skb)
1262                        return;
1263
1264                if (jumbo->found_start) {
1265                        int framelen, endframelen;
1266
1267                        framelen = le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFRAMELEN;
1268
1269                        endframelen = framelen - jumbo->current_size;
1270                        if (framelen > sp->rxsupport_size)
1271                                dev_kfree_skb_irq(jumbo->skb);
1272                        else {
1273                                memcpy(skb_put(jumbo->skb, endframelen),
1274                                       skb->data, endframelen);
1275
1276                                jumbo->skb->protocol =
1277                                    eth_type_trans(jumbo->skb, dev);
1278
1279                                skb_checksum_none_assert(jumbo->skb);
1280                                netif_rx(jumbo->skb);
1281                        }
1282                }
1283
1284                jumbo->found_start = 0;
1285                jumbo->current_size = 0;
1286                jumbo->skb = NULL;
1287
1288                ipg_nic_rx_free_skb(dev);
1289        } else {
1290                dev_kfree_skb_irq(jumbo->skb);
1291                jumbo->found_start = 0;
1292                jumbo->current_size = 0;
1293                jumbo->skb = NULL;
1294        }
1295}
1296
1297static void ipg_nic_rx_no_start_no_end(struct net_device *dev,
1298                                       struct ipg_nic_private *sp,
1299                                       struct ipg_rx *rxfd, unsigned entry)
1300{
1301        struct ipg_jumbo *jumbo = &sp->jumbo;
1302
1303        /* 1: found error, 0 no error */
1304        if (ipg_nic_rx_check_error(dev) == NORMAL_PACKET) {
1305                struct sk_buff *skb = sp->rx_buff[entry];
1306
1307                if (skb) {
1308                        if (jumbo->found_start) {
1309                                jumbo->current_size += sp->rxfrag_size;
1310                                if (jumbo->current_size <= sp->rxsupport_size) {
1311                                        memcpy(skb_put(jumbo->skb,
1312                                                       sp->rxfrag_size),
1313                                               skb->data, sp->rxfrag_size);
1314                                }
1315                        }
1316                        ipg_nic_rx_free_skb(dev);
1317                }
1318        } else {
1319                dev_kfree_skb_irq(jumbo->skb);
1320                jumbo->found_start = 0;
1321                jumbo->current_size = 0;
1322                jumbo->skb = NULL;
1323        }
1324}
1325
1326static int ipg_nic_rx_jumbo(struct net_device *dev)
1327{
1328        struct ipg_nic_private *sp = netdev_priv(dev);
1329        unsigned int curr = sp->rx_current;
1330        void __iomem *ioaddr = sp->ioaddr;
1331        unsigned int i;
1332
1333        IPG_DEBUG_MSG("_nic_rx\n");
1334
1335        for (i = 0; i < IPG_MAXRFDPROCESS_COUNT; i++, curr++) {
1336                unsigned int entry = curr % IPG_RFDLIST_LENGTH;
1337                struct ipg_rx *rxfd = sp->rxd + entry;
1338
1339                if (!(rxfd->rfs & cpu_to_le64(IPG_RFS_RFDDONE)))
1340                        break;
1341
1342                switch (ipg_nic_rx_check_frame_type(dev)) {
1343                case FRAME_WITH_START_WITH_END:
1344                        ipg_nic_rx_with_start_and_end(dev, sp, rxfd, entry);
1345                        break;
1346                case FRAME_WITH_START:
1347                        ipg_nic_rx_with_start(dev, sp, rxfd, entry);
1348                        break;
1349                case FRAME_WITH_END:
1350                        ipg_nic_rx_with_end(dev, sp, rxfd, entry);
1351                        break;
1352                case FRAME_NO_START_NO_END:
1353                        ipg_nic_rx_no_start_no_end(dev, sp, rxfd, entry);
1354                        break;
1355                }
1356        }
1357
1358        sp->rx_current = curr;
1359
1360        if (i == IPG_MAXRFDPROCESS_COUNT) {
1361                /* There are more RFDs to process, however the
1362                 * allocated amount of RFD processing time has
1363                 * expired. Assert Interrupt Requested to make
1364                 * sure we come back to process the remaining RFDs.
1365                 */
1366                ipg_w32(ipg_r32(ASIC_CTRL) | IPG_AC_INT_REQUEST, ASIC_CTRL);
1367        }
1368
1369        ipg_nic_rxrestore(dev);
1370
1371        return 0;
1372}
1373
1374static int ipg_nic_rx(struct net_device *dev)
1375{
1376        /* Transfer received Ethernet frames to higher network layers. */
1377        struct ipg_nic_private *sp = netdev_priv(dev);
1378        unsigned int curr = sp->rx_current;
1379        void __iomem *ioaddr = sp->ioaddr;
1380        struct ipg_rx *rxfd;
1381        unsigned int i;
1382
1383        IPG_DEBUG_MSG("_nic_rx\n");
1384
1385#define __RFS_MASK \
1386        cpu_to_le64(IPG_RFS_RFDDONE | IPG_RFS_FRAMESTART | IPG_RFS_FRAMEEND)
1387
1388        for (i = 0; i < IPG_MAXRFDPROCESS_COUNT; i++, curr++) {
1389                unsigned int entry = curr % IPG_RFDLIST_LENGTH;
1390                struct sk_buff *skb = sp->rx_buff[entry];
1391                unsigned int framelen;
1392
1393                rxfd = sp->rxd + entry;
1394
1395                if (((rxfd->rfs & __RFS_MASK) != __RFS_MASK) || !skb)
1396                        break;
1397
1398                /* Get received frame length. */
1399                framelen = le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFRAMELEN;
1400
1401                /* Check for jumbo frame arrival with too small
1402                 * RXFRAG_SIZE.
1403                 */
1404                if (framelen > sp->rxfrag_size) {
1405                        IPG_DEBUG_MSG
1406                            ("RFS FrameLen > allocated fragment size.\n");
1407
1408                        framelen = sp->rxfrag_size;
1409                }
1410
1411                if ((IPG_DROP_ON_RX_ETH_ERRORS && (le64_to_cpu(rxfd->rfs) &
1412                       (IPG_RFS_RXFIFOOVERRUN | IPG_RFS_RXRUNTFRAME |
1413                        IPG_RFS_RXALIGNMENTERROR | IPG_RFS_RXFCSERROR |
1414                        IPG_RFS_RXOVERSIZEDFRAME | IPG_RFS_RXLENGTHERROR)))) {
1415
1416                        IPG_DEBUG_MSG("Rx error, RFS = %16.16lx\n",
1417                                      (unsigned long int) rxfd->rfs);
1418
1419                        /* Increment general receive error statistic. */
1420                        sp->stats.rx_errors++;
1421
1422                        /* Increment detailed receive error statistics. */
1423                        if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFIFOOVERRUN) {
1424                                IPG_DEBUG_MSG("RX FIFO overrun occurred.\n");
1425                                sp->stats.rx_fifo_errors++;
1426                        }
1427
1428                        if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXRUNTFRAME) {
1429                                IPG_DEBUG_MSG("RX runt occurred.\n");
1430                                sp->stats.rx_length_errors++;
1431                        }
1432
1433                        if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXOVERSIZEDFRAME) ;
1434                        /* Do nothing, error count handled by a IPG
1435                         * statistic register.
1436                         */
1437
1438                        if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXALIGNMENTERROR) {
1439                                IPG_DEBUG_MSG("RX alignment error occurred.\n");
1440                                sp->stats.rx_frame_errors++;
1441                        }
1442
1443                        if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFCSERROR) ;
1444                        /* Do nothing, error count handled by a IPG
1445                         * statistic register.
1446                         */
1447
1448                        /* Free the memory associated with the RX
1449                         * buffer since it is erroneous and we will
1450                         * not pass it to higher layer processes.
1451                         */
1452                        if (skb) {
1453                                __le64 info = rxfd->frag_info;
1454
1455                                pci_unmap_single(sp->pdev,
1456                                        le64_to_cpu(info) & ~IPG_RFI_FRAGLEN,
1457                                        sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1458
1459                                dev_kfree_skb_irq(skb);
1460                        }
1461                } else {
1462
1463                        /* Adjust the new buffer length to accommodate the size
1464                         * of the received frame.
1465                         */
1466                        skb_put(skb, framelen);
1467
1468                        /* Set the buffer's protocol field to Ethernet. */
1469                        skb->protocol = eth_type_trans(skb, dev);
1470
1471                        /* The IPG encountered an error with (or
1472                         * there were no) IP/TCP/UDP checksums.
1473                         * This may or may not indicate an invalid
1474                         * IP/TCP/UDP frame was received. Let the
1475                         * upper layer decide.
1476                         */
1477                        skb_checksum_none_assert(skb);
1478
1479                        /* Hand off frame for higher layer processing.
1480                         * The function netif_rx() releases the sk_buff
1481                         * when processing completes.
1482                         */
1483                        netif_rx(skb);
1484                }
1485
1486                /* Assure RX buffer is not reused by IPG. */
1487                sp->rx_buff[entry] = NULL;
1488        }
1489
1490        /*
1491         * If there are more RFDs to process and the allocated amount of RFD
1492         * processing time has expired, assert Interrupt Requested to make
1493         * sure we come back to process the remaining RFDs.
1494         */
1495        if (i == IPG_MAXRFDPROCESS_COUNT)
1496                ipg_w32(ipg_r32(ASIC_CTRL) | IPG_AC_INT_REQUEST, ASIC_CTRL);
1497
1498#ifdef IPG_DEBUG
1499        /* Check if the RFD list contained no receive frame data. */
1500        if (!i)
1501                sp->EmptyRFDListCount++;
1502#endif
1503        while ((le64_to_cpu(rxfd->rfs) & IPG_RFS_RFDDONE) &&
1504               !((le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMESTART) &&
1505                 (le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMEEND))) {
1506                unsigned int entry = curr++ % IPG_RFDLIST_LENGTH;
1507
1508                rxfd = sp->rxd + entry;
1509
1510                IPG_DEBUG_MSG("Frame requires multiple RFDs.\n");
1511
1512                /* An unexpected event, additional code needed to handle
1513                 * properly. So for the time being, just disregard the
1514                 * frame.
1515                 */
1516
1517                /* Free the memory associated with the RX
1518                 * buffer since it is erroneous and we will
1519                 * not pass it to higher layer processes.
1520                 */
1521                if (sp->rx_buff[entry]) {
1522                        pci_unmap_single(sp->pdev,
1523                                le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
1524                                sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1525                        dev_kfree_skb_irq(sp->rx_buff[entry]);
1526                }
1527
1528                /* Assure RX buffer is not reused by IPG. */
1529                sp->rx_buff[entry] = NULL;
1530        }
1531
1532        sp->rx_current = curr;
1533
1534        /* Check to see if there are a minimum number of used
1535         * RFDs before restoring any (should improve performance.)
1536         */
1537        if ((curr - sp->rx_dirty) >= IPG_MINUSEDRFDSTOFREE)
1538                ipg_nic_rxrestore(dev);
1539
1540        return 0;
1541}
1542
1543static void ipg_reset_after_host_error(struct work_struct *work)
1544{
1545        struct ipg_nic_private *sp =
1546                container_of(work, struct ipg_nic_private, task.work);
1547        struct net_device *dev = sp->dev;
1548
1549        /*
1550         * Acknowledge HostError interrupt by resetting
1551         * IPG DMA and HOST.
1552         */
1553        ipg_reset(dev, IPG_AC_GLOBAL_RESET | IPG_AC_HOST | IPG_AC_DMA);
1554
1555        init_rfdlist(dev);
1556        init_tfdlist(dev);
1557
1558        if (ipg_io_config(dev) < 0) {
1559                printk(KERN_INFO "%s: Cannot recover from PCI error.\n",
1560                       dev->name);
1561                schedule_delayed_work(&sp->task, HZ);
1562        }
1563}
1564
1565static irqreturn_t ipg_interrupt_handler(int irq, void *dev_inst)
1566{
1567        struct net_device *dev = dev_inst;
1568        struct ipg_nic_private *sp = netdev_priv(dev);
1569        void __iomem *ioaddr = sp->ioaddr;
1570        unsigned int handled = 0;
1571        u16 status;
1572
1573        IPG_DEBUG_MSG("_interrupt_handler\n");
1574
1575        if (sp->is_jumbo)
1576                ipg_nic_rxrestore(dev);
1577
1578        spin_lock(&sp->lock);
1579
1580        /* Get interrupt source information, and acknowledge
1581         * some (i.e. TxDMAComplete, RxDMAComplete, RxEarly,
1582         * IntRequested, MacControlFrame, LinkEvent) interrupts
1583         * if issued. Also, all IPG interrupts are disabled by
1584         * reading IntStatusAck.
1585         */
1586        status = ipg_r16(INT_STATUS_ACK);
1587
1588        IPG_DEBUG_MSG("IntStatusAck = %4.4x\n", status);
1589
1590        /* Shared IRQ of remove event. */
1591        if (!(status & IPG_IS_RSVD_MASK))
1592                goto out_enable;
1593
1594        handled = 1;
1595
1596        if (unlikely(!netif_running(dev)))
1597                goto out_unlock;
1598
1599        /* If RFDListEnd interrupt, restore all used RFDs. */
1600        if (status & IPG_IS_RFD_LIST_END) {
1601                IPG_DEBUG_MSG("RFDListEnd Interrupt.\n");
1602
1603                /* The RFD list end indicates an RFD was encountered
1604                 * with a 0 NextPtr, or with an RFDDone bit set to 1
1605                 * (indicating the RFD is not read for use by the
1606                 * IPG.) Try to restore all RFDs.
1607                 */
1608                ipg_nic_rxrestore(dev);
1609
1610#ifdef IPG_DEBUG
1611                /* Increment the RFDlistendCount counter. */
1612                sp->RFDlistendCount++;
1613#endif
1614        }
1615
1616        /* If RFDListEnd, RxDMAPriority, RxDMAComplete, or
1617         * IntRequested interrupt, process received frames. */
1618        if ((status & IPG_IS_RX_DMA_PRIORITY) ||
1619            (status & IPG_IS_RFD_LIST_END) ||
1620            (status & IPG_IS_RX_DMA_COMPLETE) ||
1621            (status & IPG_IS_INT_REQUESTED)) {
1622#ifdef IPG_DEBUG
1623                /* Increment the RFD list checked counter if interrupted
1624                 * only to check the RFD list. */
1625                if (status & (~(IPG_IS_RX_DMA_PRIORITY | IPG_IS_RFD_LIST_END |
1626                                IPG_IS_RX_DMA_COMPLETE | IPG_IS_INT_REQUESTED) &
1627                               (IPG_IS_HOST_ERROR | IPG_IS_TX_DMA_COMPLETE |
1628                                IPG_IS_LINK_EVENT | IPG_IS_TX_COMPLETE |
1629                                IPG_IS_UPDATE_STATS)))
1630                        sp->RFDListCheckedCount++;
1631#endif
1632
1633                if (sp->is_jumbo)
1634                        ipg_nic_rx_jumbo(dev);
1635                else
1636                        ipg_nic_rx(dev);
1637        }
1638
1639        /* If TxDMAComplete interrupt, free used TFDs. */
1640        if (status & IPG_IS_TX_DMA_COMPLETE)
1641                ipg_nic_txfree(dev);
1642
1643        /* TxComplete interrupts indicate one of numerous actions.
1644         * Determine what action to take based on TXSTATUS register.
1645         */
1646        if (status & IPG_IS_TX_COMPLETE)
1647                ipg_nic_txcleanup(dev);
1648
1649        /* If UpdateStats interrupt, update Linux Ethernet statistics */
1650        if (status & IPG_IS_UPDATE_STATS)
1651                ipg_nic_get_stats(dev);
1652
1653        /* If HostError interrupt, reset IPG. */
1654        if (status & IPG_IS_HOST_ERROR) {
1655                IPG_DDEBUG_MSG("HostError Interrupt\n");
1656
1657                schedule_delayed_work(&sp->task, 0);
1658        }
1659
1660        /* If LinkEvent interrupt, resolve autonegotiation. */
1661        if (status & IPG_IS_LINK_EVENT) {
1662                if (ipg_config_autoneg(dev) < 0)
1663                        printk(KERN_INFO "%s: Auto-negotiation error.\n",
1664                               dev->name);
1665        }
1666
1667        /* If MACCtrlFrame interrupt, do nothing. */
1668        if (status & IPG_IS_MAC_CTRL_FRAME)
1669                IPG_DEBUG_MSG("MACCtrlFrame interrupt.\n");
1670
1671        /* If RxComplete interrupt, do nothing. */
1672        if (status & IPG_IS_RX_COMPLETE)
1673                IPG_DEBUG_MSG("RxComplete interrupt.\n");
1674
1675        /* If RxEarly interrupt, do nothing. */
1676        if (status & IPG_IS_RX_EARLY)
1677                IPG_DEBUG_MSG("RxEarly interrupt.\n");
1678
1679out_enable:
1680        /* Re-enable IPG interrupts. */
1681        ipg_w16(IPG_IE_TX_DMA_COMPLETE | IPG_IE_RX_DMA_COMPLETE |
1682                IPG_IE_HOST_ERROR | IPG_IE_INT_REQUESTED | IPG_IE_TX_COMPLETE |
1683                IPG_IE_LINK_EVENT | IPG_IE_UPDATE_STATS, INT_ENABLE);
1684out_unlock:
1685        spin_unlock(&sp->lock);
1686
1687        return IRQ_RETVAL(handled);
1688}
1689
1690static void ipg_rx_clear(struct ipg_nic_private *sp)
1691{
1692        unsigned int i;
1693
1694        for (i = 0; i < IPG_RFDLIST_LENGTH; i++) {
1695                if (sp->rx_buff[i]) {
1696                        struct ipg_rx *rxfd = sp->rxd + i;
1697
1698                        dev_kfree_skb_irq(sp->rx_buff[i]);
1699                        sp->rx_buff[i] = NULL;
1700                        pci_unmap_single(sp->pdev,
1701                                le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
1702                                sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1703                }
1704        }
1705}
1706
1707static void ipg_tx_clear(struct ipg_nic_private *sp)
1708{
1709        unsigned int i;
1710
1711        for (i = 0; i < IPG_TFDLIST_LENGTH; i++) {
1712                if (sp->tx_buff[i]) {
1713                        struct ipg_tx *txfd = sp->txd + i;
1714
1715                        pci_unmap_single(sp->pdev,
1716                                le64_to_cpu(txfd->frag_info) & ~IPG_TFI_FRAGLEN,
1717                                sp->tx_buff[i]->len, PCI_DMA_TODEVICE);
1718
1719                        dev_kfree_skb_irq(sp->tx_buff[i]);
1720
1721                        sp->tx_buff[i] = NULL;
1722                }
1723        }
1724}
1725
1726static int ipg_nic_open(struct net_device *dev)
1727{
1728        struct ipg_nic_private *sp = netdev_priv(dev);
1729        void __iomem *ioaddr = sp->ioaddr;
1730        struct pci_dev *pdev = sp->pdev;
1731        int rc;
1732
1733        IPG_DEBUG_MSG("_nic_open\n");
1734
1735        sp->rx_buf_sz = sp->rxsupport_size;
1736
1737        /* Check for interrupt line conflicts, and request interrupt
1738         * line for IPG.
1739         *
1740         * IMPORTANT: Disable IPG interrupts prior to registering
1741         *            IRQ.
1742         */
1743        ipg_w16(0x0000, INT_ENABLE);
1744
1745        /* Register the interrupt line to be used by the IPG within
1746         * the Linux system.
1747         */
1748        rc = request_irq(pdev->irq, ipg_interrupt_handler, IRQF_SHARED,
1749                         dev->name, dev);
1750        if (rc < 0) {
1751                printk(KERN_INFO "%s: Error when requesting interrupt.\n",
1752                       dev->name);
1753                goto out;
1754        }
1755
1756        dev->irq = pdev->irq;
1757
1758        rc = -ENOMEM;
1759
1760        sp->rxd = dma_alloc_coherent(&pdev->dev, IPG_RX_RING_BYTES,
1761                                     &sp->rxd_map, GFP_KERNEL);
1762        if (!sp->rxd)
1763                goto err_free_irq_0;
1764
1765        sp->txd = dma_alloc_coherent(&pdev->dev, IPG_TX_RING_BYTES,
1766                                     &sp->txd_map, GFP_KERNEL);
1767        if (!sp->txd)
1768                goto err_free_rx_1;
1769
1770        rc = init_rfdlist(dev);
1771        if (rc < 0) {
1772                printk(KERN_INFO "%s: Error during configuration.\n",
1773                       dev->name);
1774                goto err_free_tx_2;
1775        }
1776
1777        init_tfdlist(dev);
1778
1779        rc = ipg_io_config(dev);
1780        if (rc < 0) {
1781                printk(KERN_INFO "%s: Error during configuration.\n",
1782                       dev->name);
1783                goto err_release_tfdlist_3;
1784        }
1785
1786        /* Resolve autonegotiation. */
1787        if (ipg_config_autoneg(dev) < 0)
1788                printk(KERN_INFO "%s: Auto-negotiation error.\n", dev->name);
1789
1790        /* initialize JUMBO Frame control variable */
1791        sp->jumbo.found_start = 0;
1792        sp->jumbo.current_size = 0;
1793        sp->jumbo.skb = NULL;
1794
1795        /* Enable transmit and receive operation of the IPG. */
1796        ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_RX_ENABLE | IPG_MC_TX_ENABLE) &
1797                 IPG_MC_RSVD_MASK, MAC_CTRL);
1798
1799        netif_start_queue(dev);
1800out:
1801        return rc;
1802
1803err_release_tfdlist_3:
1804        ipg_tx_clear(sp);
1805        ipg_rx_clear(sp);
1806err_free_tx_2:
1807        dma_free_coherent(&pdev->dev, IPG_TX_RING_BYTES, sp->txd, sp->txd_map);
1808err_free_rx_1:
1809        dma_free_coherent(&pdev->dev, IPG_RX_RING_BYTES, sp->rxd, sp->rxd_map);
1810err_free_irq_0:
1811        free_irq(pdev->irq, dev);
1812        goto out;
1813}
1814
1815static int ipg_nic_stop(struct net_device *dev)
1816{
1817        struct ipg_nic_private *sp = netdev_priv(dev);
1818        void __iomem *ioaddr = sp->ioaddr;
1819        struct pci_dev *pdev = sp->pdev;
1820
1821        IPG_DEBUG_MSG("_nic_stop\n");
1822
1823        netif_stop_queue(dev);
1824
1825        IPG_DUMPTFDLIST(dev);
1826
1827        do {
1828                (void) ipg_r16(INT_STATUS_ACK);
1829
1830                ipg_reset(dev, IPG_AC_GLOBAL_RESET | IPG_AC_HOST | IPG_AC_DMA);
1831
1832                synchronize_irq(pdev->irq);
1833        } while (ipg_r16(INT_ENABLE) & IPG_IE_RSVD_MASK);
1834
1835        ipg_rx_clear(sp);
1836
1837        ipg_tx_clear(sp);
1838
1839        pci_free_consistent(pdev, IPG_RX_RING_BYTES, sp->rxd, sp->rxd_map);
1840        pci_free_consistent(pdev, IPG_TX_RING_BYTES, sp->txd, sp->txd_map);
1841
1842        free_irq(pdev->irq, dev);
1843
1844        return 0;
1845}
1846
1847static netdev_tx_t ipg_nic_hard_start_xmit(struct sk_buff *skb,
1848                                           struct net_device *dev)
1849{
1850        struct ipg_nic_private *sp = netdev_priv(dev);
1851        void __iomem *ioaddr = sp->ioaddr;
1852        unsigned int entry = sp->tx_current % IPG_TFDLIST_LENGTH;
1853        unsigned long flags;
1854        struct ipg_tx *txfd;
1855
1856        IPG_DDEBUG_MSG("_nic_hard_start_xmit\n");
1857
1858        /* If in 10Mbps mode, stop the transmit queue so
1859         * no more transmit frames are accepted.
1860         */
1861        if (sp->tenmbpsmode)
1862                netif_stop_queue(dev);
1863
1864        if (sp->reset_current_tfd) {
1865                sp->reset_current_tfd = 0;
1866                entry = 0;
1867        }
1868
1869        txfd = sp->txd + entry;
1870
1871        sp->tx_buff[entry] = skb;
1872
1873        /* Clear all TFC fields, except TFDDONE. */
1874        txfd->tfc = cpu_to_le64(IPG_TFC_TFDDONE);
1875
1876        /* Specify the TFC field within the TFD. */
1877        txfd->tfc |= cpu_to_le64(IPG_TFC_WORDALIGNDISABLED |
1878                (IPG_TFC_FRAMEID & sp->tx_current) |
1879                (IPG_TFC_FRAGCOUNT & (1 << 24)));
1880        /*
1881         * 16--17 (WordAlign) <- 3 (disable),
1882         * 0--15 (FrameId) <- sp->tx_current,
1883         * 24--27 (FragCount) <- 1
1884         */
1885
1886        /* Request TxComplete interrupts at an interval defined
1887         * by the constant IPG_FRAMESBETWEENTXCOMPLETES.
1888         * Request TxComplete interrupt for every frame
1889         * if in 10Mbps mode to accommodate problem with 10Mbps
1890         * processing.
1891         */
1892        if (sp->tenmbpsmode)
1893                txfd->tfc |= cpu_to_le64(IPG_TFC_TXINDICATE);
1894        txfd->tfc |= cpu_to_le64(IPG_TFC_TXDMAINDICATE);
1895        /* Based on compilation option, determine if FCS is to be
1896         * appended to transmit frame by IPG.
1897         */
1898        if (!(IPG_APPEND_FCS_ON_TX))
1899                txfd->tfc |= cpu_to_le64(IPG_TFC_FCSAPPENDDISABLE);
1900
1901        /* Based on compilation option, determine if IP, TCP and/or
1902         * UDP checksums are to be added to transmit frame by IPG.
1903         */
1904        if (IPG_ADD_IPCHECKSUM_ON_TX)
1905                txfd->tfc |= cpu_to_le64(IPG_TFC_IPCHECKSUMENABLE);
1906
1907        if (IPG_ADD_TCPCHECKSUM_ON_TX)
1908                txfd->tfc |= cpu_to_le64(IPG_TFC_TCPCHECKSUMENABLE);
1909
1910        if (IPG_ADD_UDPCHECKSUM_ON_TX)
1911                txfd->tfc |= cpu_to_le64(IPG_TFC_UDPCHECKSUMENABLE);
1912
1913        /* Based on compilation option, determine if VLAN tag info is to be
1914         * inserted into transmit frame by IPG.
1915         */
1916        if (IPG_INSERT_MANUAL_VLAN_TAG) {
1917                txfd->tfc |= cpu_to_le64(IPG_TFC_VLANTAGINSERT |
1918                        ((u64) IPG_MANUAL_VLAN_VID << 32) |
1919                        ((u64) IPG_MANUAL_VLAN_CFI << 44) |
1920                        ((u64) IPG_MANUAL_VLAN_USERPRIORITY << 45));
1921        }
1922
1923        /* The fragment start location within system memory is defined
1924         * by the sk_buff structure's data field. The physical address
1925         * of this location within the system's virtual memory space
1926         * is determined using the IPG_HOST2BUS_MAP function.
1927         */
1928        txfd->frag_info = cpu_to_le64(pci_map_single(sp->pdev, skb->data,
1929                skb->len, PCI_DMA_TODEVICE));
1930
1931        /* The length of the fragment within system memory is defined by
1932         * the sk_buff structure's len field.
1933         */
1934        txfd->frag_info |= cpu_to_le64(IPG_TFI_FRAGLEN &
1935                ((u64) (skb->len & 0xffff) << 48));
1936
1937        /* Clear the TFDDone bit last to indicate the TFD is ready
1938         * for transfer to the IPG.
1939         */
1940        txfd->tfc &= cpu_to_le64(~IPG_TFC_TFDDONE);
1941
1942        spin_lock_irqsave(&sp->lock, flags);
1943
1944        sp->tx_current++;
1945
1946        mmiowb();
1947
1948        ipg_w32(IPG_DC_TX_DMA_POLL_NOW, DMA_CTRL);
1949
1950        if (sp->tx_current == (sp->tx_dirty + IPG_TFDLIST_LENGTH))
1951                netif_stop_queue(dev);
1952
1953        spin_unlock_irqrestore(&sp->lock, flags);
1954
1955        return NETDEV_TX_OK;
1956}
1957
1958static void ipg_set_phy_default_param(unsigned char rev,
1959                                      struct net_device *dev, int phy_address)
1960{
1961        unsigned short length;
1962        unsigned char revision;
1963        unsigned short *phy_param;
1964        unsigned short address, value;
1965
1966        phy_param = &DefaultPhyParam[0];
1967        length = *phy_param & 0x00FF;
1968        revision = (unsigned char)((*phy_param) >> 8);
1969        phy_param++;
1970        while (length != 0) {
1971                if (rev == revision) {
1972                        while (length > 1) {
1973                                address = *phy_param;
1974                                value = *(phy_param + 1);
1975                                phy_param += 2;
1976                                mdio_write(dev, phy_address, address, value);
1977                                length -= 4;
1978                        }
1979                        break;
1980                } else {
1981                        phy_param += length / 2;
1982                        length = *phy_param & 0x00FF;
1983                        revision = (unsigned char)((*phy_param) >> 8);
1984                        phy_param++;
1985                }
1986        }
1987}
1988
1989static int read_eeprom(struct net_device *dev, int eep_addr)
1990{
1991        void __iomem *ioaddr = ipg_ioaddr(dev);
1992        unsigned int i;
1993        int ret = 0;
1994        u16 value;
1995
1996        value = IPG_EC_EEPROM_READOPCODE | (eep_addr & 0xff);
1997        ipg_w16(value, EEPROM_CTRL);
1998
1999        for (i = 0; i < 1000; i++) {
2000                u16 data;
2001
2002                mdelay(10);
2003                data = ipg_r16(EEPROM_CTRL);
2004                if (!(data & IPG_EC_EEPROM_BUSY)) {
2005                        ret = ipg_r16(EEPROM_DATA);
2006                        break;
2007                }
2008        }
2009        return ret;
2010}
2011
2012static void ipg_init_mii(struct net_device *dev)
2013{
2014        struct ipg_nic_private *sp = netdev_priv(dev);
2015        struct mii_if_info *mii_if = &sp->mii_if;
2016        int phyaddr;
2017
2018        mii_if->dev          = dev;
2019        mii_if->mdio_read    = mdio_read;
2020        mii_if->mdio_write   = mdio_write;
2021        mii_if->phy_id_mask  = 0x1f;
2022        mii_if->reg_num_mask = 0x1f;
2023
2024        mii_if->phy_id = phyaddr = ipg_find_phyaddr(dev);
2025
2026        if (phyaddr != 0x1f) {
2027                u16 mii_phyctrl, mii_1000cr;
2028
2029                mii_1000cr  = mdio_read(dev, phyaddr, MII_CTRL1000);
2030                mii_1000cr |= ADVERTISE_1000FULL | ADVERTISE_1000HALF |
2031                        GMII_PHY_1000BASETCONTROL_PreferMaster;
2032                mdio_write(dev, phyaddr, MII_CTRL1000, mii_1000cr);
2033
2034                mii_phyctrl = mdio_read(dev, phyaddr, MII_BMCR);
2035
2036                /* Set default phyparam */
2037                ipg_set_phy_default_param(sp->pdev->revision, dev, phyaddr);
2038
2039                /* Reset PHY */
2040                mii_phyctrl |= BMCR_RESET | BMCR_ANRESTART;
2041                mdio_write(dev, phyaddr, MII_BMCR, mii_phyctrl);
2042
2043        }
2044}
2045
2046static int ipg_hw_init(struct net_device *dev)
2047{
2048        struct ipg_nic_private *sp = netdev_priv(dev);
2049        void __iomem *ioaddr = sp->ioaddr;
2050        unsigned int i;
2051        int rc;
2052
2053        /* Read/Write and Reset EEPROM Value */
2054        /* Read LED Mode Configuration from EEPROM */
2055        sp->led_mode = read_eeprom(dev, 6);
2056
2057        /* Reset all functions within the IPG. Do not assert
2058         * RST_OUT as not compatible with some PHYs.
2059         */
2060        rc = ipg_reset(dev, IPG_RESET_MASK);
2061        if (rc < 0)
2062                goto out;
2063
2064        ipg_init_mii(dev);
2065
2066        /* Read MAC Address from EEPROM */
2067        for (i = 0; i < 3; i++)
2068                sp->station_addr[i] = read_eeprom(dev, 16 + i);
2069
2070        for (i = 0; i < 3; i++)
2071                ipg_w16(sp->station_addr[i], STATION_ADDRESS_0 + 2*i);
2072
2073        /* Set station address in ethernet_device structure. */
2074        dev->dev_addr[0] =  ipg_r16(STATION_ADDRESS_0) & 0x00ff;
2075        dev->dev_addr[1] = (ipg_r16(STATION_ADDRESS_0) & 0xff00) >> 8;
2076        dev->dev_addr[2] =  ipg_r16(STATION_ADDRESS_1) & 0x00ff;
2077        dev->dev_addr[3] = (ipg_r16(STATION_ADDRESS_1) & 0xff00) >> 8;
2078        dev->dev_addr[4] =  ipg_r16(STATION_ADDRESS_2) & 0x00ff;
2079        dev->dev_addr[5] = (ipg_r16(STATION_ADDRESS_2) & 0xff00) >> 8;
2080out:
2081        return rc;
2082}
2083
2084static int ipg_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2085{
2086        struct ipg_nic_private *sp = netdev_priv(dev);
2087        int rc;
2088
2089        mutex_lock(&sp->mii_mutex);
2090        rc = generic_mii_ioctl(&sp->mii_if, if_mii(ifr), cmd, NULL);
2091        mutex_unlock(&sp->mii_mutex);
2092
2093        return rc;
2094}
2095
2096static int ipg_nic_change_mtu(struct net_device *dev, int new_mtu)
2097{
2098        struct ipg_nic_private *sp = netdev_priv(dev);
2099        int err;
2100
2101        /* Function to accommodate changes to Maximum Transfer Unit
2102         * (or MTU) of IPG NIC. Cannot use default function since
2103         * the default will not allow for MTU > 1500 bytes.
2104         */
2105
2106        IPG_DEBUG_MSG("_nic_change_mtu\n");
2107
2108        /*
2109         * Check that the new MTU value is between 68 (14 byte header, 46 byte
2110         * payload, 4 byte FCS) and 10 KB, which is the largest supported MTU.
2111         */
2112        if (new_mtu < 68 || new_mtu > 10240)
2113                return -EINVAL;
2114
2115        err = ipg_nic_stop(dev);
2116        if (err)
2117                return err;
2118
2119        dev->mtu = new_mtu;
2120
2121        sp->max_rxframe_size = new_mtu;
2122
2123        sp->rxfrag_size = new_mtu;
2124        if (sp->rxfrag_size > 4088)
2125                sp->rxfrag_size = 4088;
2126
2127        sp->rxsupport_size = sp->max_rxframe_size;
2128
2129        if (new_mtu > 0x0600)
2130                sp->is_jumbo = true;
2131        else
2132                sp->is_jumbo = false;
2133
2134        return ipg_nic_open(dev);
2135}
2136
2137static int ipg_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2138{
2139        struct ipg_nic_private *sp = netdev_priv(dev);
2140        int rc;
2141
2142        mutex_lock(&sp->mii_mutex);
2143        rc = mii_ethtool_gset(&sp->mii_if, cmd);
2144        mutex_unlock(&sp->mii_mutex);
2145
2146        return rc;
2147}
2148
2149static int ipg_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2150{
2151        struct ipg_nic_private *sp = netdev_priv(dev);
2152        int rc;
2153
2154        mutex_lock(&sp->mii_mutex);
2155        rc = mii_ethtool_sset(&sp->mii_if, cmd);
2156        mutex_unlock(&sp->mii_mutex);
2157
2158        return rc;
2159}
2160
2161static int ipg_nway_reset(struct net_device *dev)
2162{
2163        struct ipg_nic_private *sp = netdev_priv(dev);
2164        int rc;
2165
2166        mutex_lock(&sp->mii_mutex);
2167        rc = mii_nway_restart(&sp->mii_if);
2168        mutex_unlock(&sp->mii_mutex);
2169
2170        return rc;
2171}
2172
2173static const struct ethtool_ops ipg_ethtool_ops = {
2174        .get_settings = ipg_get_settings,
2175        .set_settings = ipg_set_settings,
2176        .nway_reset   = ipg_nway_reset,
2177};
2178
2179static void __devexit ipg_remove(struct pci_dev *pdev)
2180{
2181        struct net_device *dev = pci_get_drvdata(pdev);
2182        struct ipg_nic_private *sp = netdev_priv(dev);
2183
2184        IPG_DEBUG_MSG("_remove\n");
2185
2186        /* Un-register Ethernet device. */
2187        unregister_netdev(dev);
2188
2189        pci_iounmap(pdev, sp->ioaddr);
2190
2191        pci_release_regions(pdev);
2192
2193        free_netdev(dev);
2194        pci_disable_device(pdev);
2195        pci_set_drvdata(pdev, NULL);
2196}
2197
2198static const struct net_device_ops ipg_netdev_ops = {
2199        .ndo_open               = ipg_nic_open,
2200        .ndo_stop               = ipg_nic_stop,
2201        .ndo_start_xmit         = ipg_nic_hard_start_xmit,
2202        .ndo_get_stats          = ipg_nic_get_stats,
2203        .ndo_set_multicast_list = ipg_nic_set_multicast_list,
2204        .ndo_do_ioctl           = ipg_ioctl,
2205        .ndo_tx_timeout         = ipg_tx_timeout,
2206        .ndo_change_mtu         = ipg_nic_change_mtu,
2207        .ndo_set_mac_address    = eth_mac_addr,
2208        .ndo_validate_addr      = eth_validate_addr,
2209};
2210
2211static int __devinit ipg_probe(struct pci_dev *pdev,
2212                               const struct pci_device_id *id)
2213{
2214        unsigned int i = id->driver_data;
2215        struct ipg_nic_private *sp;
2216        struct net_device *dev;
2217        void __iomem *ioaddr;
2218        int rc;
2219
2220        rc = pci_enable_device(pdev);
2221        if (rc < 0)
2222                goto out;
2223
2224        printk(KERN_INFO "%s: %s\n", pci_name(pdev), ipg_brand_name[i]);
2225
2226        pci_set_master(pdev);
2227
2228        rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
2229        if (rc < 0) {
2230                rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2231                if (rc < 0) {
2232                        printk(KERN_ERR "%s: DMA config failed.\n",
2233                               pci_name(pdev));
2234                        goto err_disable_0;
2235                }
2236        }
2237
2238        /*
2239         * Initialize net device.
2240         */
2241        dev = alloc_etherdev(sizeof(struct ipg_nic_private));
2242        if (!dev) {
2243                printk(KERN_ERR "%s: alloc_etherdev failed\n", pci_name(pdev));
2244                rc = -ENOMEM;
2245                goto err_disable_0;
2246        }
2247
2248        sp = netdev_priv(dev);
2249        spin_lock_init(&sp->lock);
2250        mutex_init(&sp->mii_mutex);
2251
2252        sp->is_jumbo = IPG_IS_JUMBO;
2253        sp->rxfrag_size = IPG_RXFRAG_SIZE;
2254        sp->rxsupport_size = IPG_RXSUPPORT_SIZE;
2255        sp->max_rxframe_size = IPG_MAX_RXFRAME_SIZE;
2256
2257        /* Declare IPG NIC functions for Ethernet device methods.
2258         */
2259        dev->netdev_ops = &ipg_netdev_ops;
2260        SET_NETDEV_DEV(dev, &pdev->dev);
2261        SET_ETHTOOL_OPS(dev, &ipg_ethtool_ops);
2262
2263        rc = pci_request_regions(pdev, DRV_NAME);
2264        if (rc)
2265                goto err_free_dev_1;
2266
2267        ioaddr = pci_iomap(pdev, 1, pci_resource_len(pdev, 1));
2268        if (!ioaddr) {
2269                printk(KERN_ERR "%s cannot map MMIO\n", pci_name(pdev));
2270                rc = -EIO;
2271                goto err_release_regions_2;
2272        }
2273
2274        /* Save the pointer to the PCI device information. */
2275        sp->ioaddr = ioaddr;
2276        sp->pdev = pdev;
2277        sp->dev = dev;
2278
2279        INIT_DELAYED_WORK(&sp->task, ipg_reset_after_host_error);
2280
2281        pci_set_drvdata(pdev, dev);
2282
2283        rc = ipg_hw_init(dev);
2284        if (rc < 0)
2285                goto err_unmap_3;
2286
2287        rc = register_netdev(dev);
2288        if (rc < 0)
2289                goto err_unmap_3;
2290
2291        printk(KERN_INFO "Ethernet device registered as: %s\n", dev->name);
2292out:
2293        return rc;
2294
2295err_unmap_3:
2296        pci_iounmap(pdev, ioaddr);
2297err_release_regions_2:
2298        pci_release_regions(pdev);
2299err_free_dev_1:
2300        free_netdev(dev);
2301err_disable_0:
2302        pci_disable_device(pdev);
2303        goto out;
2304}
2305
2306static struct pci_driver ipg_pci_driver = {
2307        .name           = IPG_DRIVER_NAME,
2308        .id_table       = ipg_pci_tbl,
2309        .probe          = ipg_probe,
2310        .remove         = __devexit_p(ipg_remove),
2311};
2312
2313static int __init ipg_init_module(void)
2314{
2315        return pci_register_driver(&ipg_pci_driver);
2316}
2317
2318static void __exit ipg_exit_module(void)
2319{
2320        pci_unregister_driver(&ipg_pci_driver);
2321}
2322
2323module_init(ipg_init_module);
2324module_exit(ipg_exit_module);
2325