linux/drivers/net/jme.c
<<
>>
Prefs
   1/*
   2 * JMicron JMC2x0 series PCIe Ethernet Linux Device Driver
   3 *
   4 * Copyright 2008 JMicron Technology Corporation
   5 * http://www.jmicron.com/
   6 * Copyright (c) 2009 - 2010 Guo-Fu Tseng <cooldavid@cooldavid.org>
   7 *
   8 * Author: Guo-Fu Tseng <cooldavid@cooldavid.org>
   9 *
  10 * This program is free software; you can redistribute it and/or modify
  11 * it under the terms of the GNU General Public License as published by
  12 * the Free Software Foundation; either version 2 of the License.
  13 *
  14 * This program is distributed in the hope that it will be useful,
  15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  17 * GNU General Public License for more details.
  18 *
  19 * You should have received a copy of the GNU General Public License
  20 * along with this program; if not, write to the Free Software
  21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  22 *
  23 */
  24
  25#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  26
  27#include <linux/module.h>
  28#include <linux/kernel.h>
  29#include <linux/pci.h>
  30#include <linux/netdevice.h>
  31#include <linux/etherdevice.h>
  32#include <linux/ethtool.h>
  33#include <linux/mii.h>
  34#include <linux/crc32.h>
  35#include <linux/delay.h>
  36#include <linux/spinlock.h>
  37#include <linux/in.h>
  38#include <linux/ip.h>
  39#include <linux/ipv6.h>
  40#include <linux/tcp.h>
  41#include <linux/udp.h>
  42#include <linux/if_vlan.h>
  43#include <linux/slab.h>
  44#include <net/ip6_checksum.h>
  45#include "jme.h"
  46
  47static int force_pseudohp = -1;
  48static int no_pseudohp = -1;
  49static int no_extplug = -1;
  50module_param(force_pseudohp, int, 0);
  51MODULE_PARM_DESC(force_pseudohp,
  52        "Enable pseudo hot-plug feature manually by driver instead of BIOS.");
  53module_param(no_pseudohp, int, 0);
  54MODULE_PARM_DESC(no_pseudohp, "Disable pseudo hot-plug feature.");
  55module_param(no_extplug, int, 0);
  56MODULE_PARM_DESC(no_extplug,
  57        "Do not use external plug signal for pseudo hot-plug.");
  58
  59static int
  60jme_mdio_read(struct net_device *netdev, int phy, int reg)
  61{
  62        struct jme_adapter *jme = netdev_priv(netdev);
  63        int i, val, again = (reg == MII_BMSR) ? 1 : 0;
  64
  65read_again:
  66        jwrite32(jme, JME_SMI, SMI_OP_REQ |
  67                                smi_phy_addr(phy) |
  68                                smi_reg_addr(reg));
  69
  70        wmb();
  71        for (i = JME_PHY_TIMEOUT * 50 ; i > 0 ; --i) {
  72                udelay(20);
  73                val = jread32(jme, JME_SMI);
  74                if ((val & SMI_OP_REQ) == 0)
  75                        break;
  76        }
  77
  78        if (i == 0) {
  79                pr_err("phy(%d) read timeout : %d\n", phy, reg);
  80                return 0;
  81        }
  82
  83        if (again--)
  84                goto read_again;
  85
  86        return (val & SMI_DATA_MASK) >> SMI_DATA_SHIFT;
  87}
  88
  89static void
  90jme_mdio_write(struct net_device *netdev,
  91                                int phy, int reg, int val)
  92{
  93        struct jme_adapter *jme = netdev_priv(netdev);
  94        int i;
  95
  96        jwrite32(jme, JME_SMI, SMI_OP_WRITE | SMI_OP_REQ |
  97                ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
  98                smi_phy_addr(phy) | smi_reg_addr(reg));
  99
 100        wmb();
 101        for (i = JME_PHY_TIMEOUT * 50 ; i > 0 ; --i) {
 102                udelay(20);
 103                if ((jread32(jme, JME_SMI) & SMI_OP_REQ) == 0)
 104                        break;
 105        }
 106
 107        if (i == 0)
 108                pr_err("phy(%d) write timeout : %d\n", phy, reg);
 109}
 110
 111static inline void
 112jme_reset_phy_processor(struct jme_adapter *jme)
 113{
 114        u32 val;
 115
 116        jme_mdio_write(jme->dev,
 117                        jme->mii_if.phy_id,
 118                        MII_ADVERTISE, ADVERTISE_ALL |
 119                        ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
 120
 121        if (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250)
 122                jme_mdio_write(jme->dev,
 123                                jme->mii_if.phy_id,
 124                                MII_CTRL1000,
 125                                ADVERTISE_1000FULL | ADVERTISE_1000HALF);
 126
 127        val = jme_mdio_read(jme->dev,
 128                                jme->mii_if.phy_id,
 129                                MII_BMCR);
 130
 131        jme_mdio_write(jme->dev,
 132                        jme->mii_if.phy_id,
 133                        MII_BMCR, val | BMCR_RESET);
 134}
 135
 136static void
 137jme_setup_wakeup_frame(struct jme_adapter *jme,
 138                       const u32 *mask, u32 crc, int fnr)
 139{
 140        int i;
 141
 142        /*
 143         * Setup CRC pattern
 144         */
 145        jwrite32(jme, JME_WFOI, WFOI_CRC_SEL | (fnr & WFOI_FRAME_SEL));
 146        wmb();
 147        jwrite32(jme, JME_WFODP, crc);
 148        wmb();
 149
 150        /*
 151         * Setup Mask
 152         */
 153        for (i = 0 ; i < WAKEUP_FRAME_MASK_DWNR ; ++i) {
 154                jwrite32(jme, JME_WFOI,
 155                                ((i << WFOI_MASK_SHIFT) & WFOI_MASK_SEL) |
 156                                (fnr & WFOI_FRAME_SEL));
 157                wmb();
 158                jwrite32(jme, JME_WFODP, mask[i]);
 159                wmb();
 160        }
 161}
 162
 163static inline void
 164jme_mac_rxclk_off(struct jme_adapter *jme)
 165{
 166        jme->reg_gpreg1 |= GPREG1_RXCLKOFF;
 167        jwrite32f(jme, JME_GPREG1, jme->reg_gpreg1);
 168}
 169
 170static inline void
 171jme_mac_rxclk_on(struct jme_adapter *jme)
 172{
 173        jme->reg_gpreg1 &= ~GPREG1_RXCLKOFF;
 174        jwrite32f(jme, JME_GPREG1, jme->reg_gpreg1);
 175}
 176
 177static inline void
 178jme_mac_txclk_off(struct jme_adapter *jme)
 179{
 180        jme->reg_ghc &= ~(GHC_TO_CLK_SRC | GHC_TXMAC_CLK_SRC);
 181        jwrite32f(jme, JME_GHC, jme->reg_ghc);
 182}
 183
 184static inline void
 185jme_mac_txclk_on(struct jme_adapter *jme)
 186{
 187        u32 speed = jme->reg_ghc & GHC_SPEED;
 188        if (speed == GHC_SPEED_1000M)
 189                jme->reg_ghc |= GHC_TO_CLK_GPHY | GHC_TXMAC_CLK_GPHY;
 190        else
 191                jme->reg_ghc |= GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE;
 192        jwrite32f(jme, JME_GHC, jme->reg_ghc);
 193}
 194
 195static inline void
 196jme_reset_ghc_speed(struct jme_adapter *jme)
 197{
 198        jme->reg_ghc &= ~(GHC_SPEED | GHC_DPX);
 199        jwrite32f(jme, JME_GHC, jme->reg_ghc);
 200}
 201
 202static inline void
 203jme_reset_250A2_workaround(struct jme_adapter *jme)
 204{
 205        jme->reg_gpreg1 &= ~(GPREG1_HALFMODEPATCH |
 206                             GPREG1_RSSPATCH);
 207        jwrite32(jme, JME_GPREG1, jme->reg_gpreg1);
 208}
 209
 210static inline void
 211jme_assert_ghc_reset(struct jme_adapter *jme)
 212{
 213        jme->reg_ghc |= GHC_SWRST;
 214        jwrite32f(jme, JME_GHC, jme->reg_ghc);
 215}
 216
 217static inline void
 218jme_clear_ghc_reset(struct jme_adapter *jme)
 219{
 220        jme->reg_ghc &= ~GHC_SWRST;
 221        jwrite32f(jme, JME_GHC, jme->reg_ghc);
 222}
 223
 224static inline void
 225jme_reset_mac_processor(struct jme_adapter *jme)
 226{
 227        static const u32 mask[WAKEUP_FRAME_MASK_DWNR] = {0, 0, 0, 0};
 228        u32 crc = 0xCDCDCDCD;
 229        u32 gpreg0;
 230        int i;
 231
 232        jme_reset_ghc_speed(jme);
 233        jme_reset_250A2_workaround(jme);
 234
 235        jme_mac_rxclk_on(jme);
 236        jme_mac_txclk_on(jme);
 237        udelay(1);
 238        jme_assert_ghc_reset(jme);
 239        udelay(1);
 240        jme_mac_rxclk_off(jme);
 241        jme_mac_txclk_off(jme);
 242        udelay(1);
 243        jme_clear_ghc_reset(jme);
 244        udelay(1);
 245        jme_mac_rxclk_on(jme);
 246        jme_mac_txclk_on(jme);
 247        udelay(1);
 248        jme_mac_rxclk_off(jme);
 249        jme_mac_txclk_off(jme);
 250
 251        jwrite32(jme, JME_RXDBA_LO, 0x00000000);
 252        jwrite32(jme, JME_RXDBA_HI, 0x00000000);
 253        jwrite32(jme, JME_RXQDC, 0x00000000);
 254        jwrite32(jme, JME_RXNDA, 0x00000000);
 255        jwrite32(jme, JME_TXDBA_LO, 0x00000000);
 256        jwrite32(jme, JME_TXDBA_HI, 0x00000000);
 257        jwrite32(jme, JME_TXQDC, 0x00000000);
 258        jwrite32(jme, JME_TXNDA, 0x00000000);
 259
 260        jwrite32(jme, JME_RXMCHT_LO, 0x00000000);
 261        jwrite32(jme, JME_RXMCHT_HI, 0x00000000);
 262        for (i = 0 ; i < WAKEUP_FRAME_NR ; ++i)
 263                jme_setup_wakeup_frame(jme, mask, crc, i);
 264        if (jme->fpgaver)
 265                gpreg0 = GPREG0_DEFAULT | GPREG0_LNKINTPOLL;
 266        else
 267                gpreg0 = GPREG0_DEFAULT;
 268        jwrite32(jme, JME_GPREG0, gpreg0);
 269}
 270
 271static inline void
 272jme_clear_pm(struct jme_adapter *jme)
 273{
 274        jwrite32(jme, JME_PMCS, PMCS_STMASK | jme->reg_pmcs);
 275}
 276
 277static int
 278jme_reload_eeprom(struct jme_adapter *jme)
 279{
 280        u32 val;
 281        int i;
 282
 283        val = jread32(jme, JME_SMBCSR);
 284
 285        if (val & SMBCSR_EEPROMD) {
 286                val |= SMBCSR_CNACK;
 287                jwrite32(jme, JME_SMBCSR, val);
 288                val |= SMBCSR_RELOAD;
 289                jwrite32(jme, JME_SMBCSR, val);
 290                mdelay(12);
 291
 292                for (i = JME_EEPROM_RELOAD_TIMEOUT; i > 0; --i) {
 293                        mdelay(1);
 294                        if ((jread32(jme, JME_SMBCSR) & SMBCSR_RELOAD) == 0)
 295                                break;
 296                }
 297
 298                if (i == 0) {
 299                        pr_err("eeprom reload timeout\n");
 300                        return -EIO;
 301                }
 302        }
 303
 304        return 0;
 305}
 306
 307static void
 308jme_load_macaddr(struct net_device *netdev)
 309{
 310        struct jme_adapter *jme = netdev_priv(netdev);
 311        unsigned char macaddr[6];
 312        u32 val;
 313
 314        spin_lock_bh(&jme->macaddr_lock);
 315        val = jread32(jme, JME_RXUMA_LO);
 316        macaddr[0] = (val >>  0) & 0xFF;
 317        macaddr[1] = (val >>  8) & 0xFF;
 318        macaddr[2] = (val >> 16) & 0xFF;
 319        macaddr[3] = (val >> 24) & 0xFF;
 320        val = jread32(jme, JME_RXUMA_HI);
 321        macaddr[4] = (val >>  0) & 0xFF;
 322        macaddr[5] = (val >>  8) & 0xFF;
 323        memcpy(netdev->dev_addr, macaddr, 6);
 324        spin_unlock_bh(&jme->macaddr_lock);
 325}
 326
 327static inline void
 328jme_set_rx_pcc(struct jme_adapter *jme, int p)
 329{
 330        switch (p) {
 331        case PCC_OFF:
 332                jwrite32(jme, JME_PCCRX0,
 333                        ((PCC_OFF_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
 334                        ((PCC_OFF_CNT << PCCRX_SHIFT) & PCCRX_MASK));
 335                break;
 336        case PCC_P1:
 337                jwrite32(jme, JME_PCCRX0,
 338                        ((PCC_P1_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
 339                        ((PCC_P1_CNT << PCCRX_SHIFT) & PCCRX_MASK));
 340                break;
 341        case PCC_P2:
 342                jwrite32(jme, JME_PCCRX0,
 343                        ((PCC_P2_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
 344                        ((PCC_P2_CNT << PCCRX_SHIFT) & PCCRX_MASK));
 345                break;
 346        case PCC_P3:
 347                jwrite32(jme, JME_PCCRX0,
 348                        ((PCC_P3_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
 349                        ((PCC_P3_CNT << PCCRX_SHIFT) & PCCRX_MASK));
 350                break;
 351        default:
 352                break;
 353        }
 354        wmb();
 355
 356        if (!(test_bit(JME_FLAG_POLL, &jme->flags)))
 357                netif_info(jme, rx_status, jme->dev, "Switched to PCC_P%d\n", p);
 358}
 359
 360static void
 361jme_start_irq(struct jme_adapter *jme)
 362{
 363        register struct dynpcc_info *dpi = &(jme->dpi);
 364
 365        jme_set_rx_pcc(jme, PCC_P1);
 366        dpi->cur                = PCC_P1;
 367        dpi->attempt            = PCC_P1;
 368        dpi->cnt                = 0;
 369
 370        jwrite32(jme, JME_PCCTX,
 371                        ((PCC_TX_TO << PCCTXTO_SHIFT) & PCCTXTO_MASK) |
 372                        ((PCC_TX_CNT << PCCTX_SHIFT) & PCCTX_MASK) |
 373                        PCCTXQ0_EN
 374                );
 375
 376        /*
 377         * Enable Interrupts
 378         */
 379        jwrite32(jme, JME_IENS, INTR_ENABLE);
 380}
 381
 382static inline void
 383jme_stop_irq(struct jme_adapter *jme)
 384{
 385        /*
 386         * Disable Interrupts
 387         */
 388        jwrite32f(jme, JME_IENC, INTR_ENABLE);
 389}
 390
 391static u32
 392jme_linkstat_from_phy(struct jme_adapter *jme)
 393{
 394        u32 phylink, bmsr;
 395
 396        phylink = jme_mdio_read(jme->dev, jme->mii_if.phy_id, 17);
 397        bmsr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMSR);
 398        if (bmsr & BMSR_ANCOMP)
 399                phylink |= PHY_LINK_AUTONEG_COMPLETE;
 400
 401        return phylink;
 402}
 403
 404static inline void
 405jme_set_phyfifo_5level(struct jme_adapter *jme)
 406{
 407        jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0004);
 408}
 409
 410static inline void
 411jme_set_phyfifo_8level(struct jme_adapter *jme)
 412{
 413        jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0000);
 414}
 415
 416static int
 417jme_check_link(struct net_device *netdev, int testonly)
 418{
 419        struct jme_adapter *jme = netdev_priv(netdev);
 420        u32 phylink, cnt = JME_SPDRSV_TIMEOUT, bmcr;
 421        char linkmsg[64];
 422        int rc = 0;
 423
 424        linkmsg[0] = '\0';
 425
 426        if (jme->fpgaver)
 427                phylink = jme_linkstat_from_phy(jme);
 428        else
 429                phylink = jread32(jme, JME_PHY_LINK);
 430
 431        if (phylink & PHY_LINK_UP) {
 432                if (!(phylink & PHY_LINK_AUTONEG_COMPLETE)) {
 433                        /*
 434                         * If we did not enable AN
 435                         * Speed/Duplex Info should be obtained from SMI
 436                         */
 437                        phylink = PHY_LINK_UP;
 438
 439                        bmcr = jme_mdio_read(jme->dev,
 440                                                jme->mii_if.phy_id,
 441                                                MII_BMCR);
 442
 443                        phylink |= ((bmcr & BMCR_SPEED1000) &&
 444                                        (bmcr & BMCR_SPEED100) == 0) ?
 445                                        PHY_LINK_SPEED_1000M :
 446                                        (bmcr & BMCR_SPEED100) ?
 447                                        PHY_LINK_SPEED_100M :
 448                                        PHY_LINK_SPEED_10M;
 449
 450                        phylink |= (bmcr & BMCR_FULLDPLX) ?
 451                                         PHY_LINK_DUPLEX : 0;
 452
 453                        strcat(linkmsg, "Forced: ");
 454                } else {
 455                        /*
 456                         * Keep polling for speed/duplex resolve complete
 457                         */
 458                        while (!(phylink & PHY_LINK_SPEEDDPU_RESOLVED) &&
 459                                --cnt) {
 460
 461                                udelay(1);
 462
 463                                if (jme->fpgaver)
 464                                        phylink = jme_linkstat_from_phy(jme);
 465                                else
 466                                        phylink = jread32(jme, JME_PHY_LINK);
 467                        }
 468                        if (!cnt)
 469                                pr_err("Waiting speed resolve timeout\n");
 470
 471                        strcat(linkmsg, "ANed: ");
 472                }
 473
 474                if (jme->phylink == phylink) {
 475                        rc = 1;
 476                        goto out;
 477                }
 478                if (testonly)
 479                        goto out;
 480
 481                jme->phylink = phylink;
 482
 483                /*
 484                 * The speed/duplex setting of jme->reg_ghc already cleared
 485                 * by jme_reset_mac_processor()
 486                 */
 487                switch (phylink & PHY_LINK_SPEED_MASK) {
 488                case PHY_LINK_SPEED_10M:
 489                        jme->reg_ghc |= GHC_SPEED_10M;
 490                        strcat(linkmsg, "10 Mbps, ");
 491                        break;
 492                case PHY_LINK_SPEED_100M:
 493                        jme->reg_ghc |= GHC_SPEED_100M;
 494                        strcat(linkmsg, "100 Mbps, ");
 495                        break;
 496                case PHY_LINK_SPEED_1000M:
 497                        jme->reg_ghc |= GHC_SPEED_1000M;
 498                        strcat(linkmsg, "1000 Mbps, ");
 499                        break;
 500                default:
 501                        break;
 502                }
 503
 504                if (phylink & PHY_LINK_DUPLEX) {
 505                        jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT);
 506                        jwrite32(jme, JME_TXTRHD, TXTRHD_FULLDUPLEX);
 507                        jme->reg_ghc |= GHC_DPX;
 508                } else {
 509                        jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT |
 510                                                TXMCS_BACKOFF |
 511                                                TXMCS_CARRIERSENSE |
 512                                                TXMCS_COLLISION);
 513                        jwrite32(jme, JME_TXTRHD, TXTRHD_HALFDUPLEX);
 514                }
 515
 516                jwrite32(jme, JME_GHC, jme->reg_ghc);
 517
 518                if (is_buggy250(jme->pdev->device, jme->chiprev)) {
 519                        jme->reg_gpreg1 &= ~(GPREG1_HALFMODEPATCH |
 520                                             GPREG1_RSSPATCH);
 521                        if (!(phylink & PHY_LINK_DUPLEX))
 522                                jme->reg_gpreg1 |= GPREG1_HALFMODEPATCH;
 523                        switch (phylink & PHY_LINK_SPEED_MASK) {
 524                        case PHY_LINK_SPEED_10M:
 525                                jme_set_phyfifo_8level(jme);
 526                                jme->reg_gpreg1 |= GPREG1_RSSPATCH;
 527                                break;
 528                        case PHY_LINK_SPEED_100M:
 529                                jme_set_phyfifo_5level(jme);
 530                                jme->reg_gpreg1 |= GPREG1_RSSPATCH;
 531                                break;
 532                        case PHY_LINK_SPEED_1000M:
 533                                jme_set_phyfifo_8level(jme);
 534                                break;
 535                        default:
 536                                break;
 537                        }
 538                }
 539                jwrite32(jme, JME_GPREG1, jme->reg_gpreg1);
 540
 541                strcat(linkmsg, (phylink & PHY_LINK_DUPLEX) ?
 542                                        "Full-Duplex, " :
 543                                        "Half-Duplex, ");
 544                strcat(linkmsg, (phylink & PHY_LINK_MDI_STAT) ?
 545                                        "MDI-X" :
 546                                        "MDI");
 547                netif_info(jme, link, jme->dev, "Link is up at %s\n", linkmsg);
 548                netif_carrier_on(netdev);
 549        } else {
 550                if (testonly)
 551                        goto out;
 552
 553                netif_info(jme, link, jme->dev, "Link is down\n");
 554                jme->phylink = 0;
 555                netif_carrier_off(netdev);
 556        }
 557
 558out:
 559        return rc;
 560}
 561
 562static int
 563jme_setup_tx_resources(struct jme_adapter *jme)
 564{
 565        struct jme_ring *txring = &(jme->txring[0]);
 566
 567        txring->alloc = dma_alloc_coherent(&(jme->pdev->dev),
 568                                   TX_RING_ALLOC_SIZE(jme->tx_ring_size),
 569                                   &(txring->dmaalloc),
 570                                   GFP_ATOMIC);
 571
 572        if (!txring->alloc)
 573                goto err_set_null;
 574
 575        /*
 576         * 16 Bytes align
 577         */
 578        txring->desc            = (void *)ALIGN((unsigned long)(txring->alloc),
 579                                                RING_DESC_ALIGN);
 580        txring->dma             = ALIGN(txring->dmaalloc, RING_DESC_ALIGN);
 581        txring->next_to_use     = 0;
 582        atomic_set(&txring->next_to_clean, 0);
 583        atomic_set(&txring->nr_free, jme->tx_ring_size);
 584
 585        txring->bufinf          = kmalloc(sizeof(struct jme_buffer_info) *
 586                                        jme->tx_ring_size, GFP_ATOMIC);
 587        if (unlikely(!(txring->bufinf)))
 588                goto err_free_txring;
 589
 590        /*
 591         * Initialize Transmit Descriptors
 592         */
 593        memset(txring->alloc, 0, TX_RING_ALLOC_SIZE(jme->tx_ring_size));
 594        memset(txring->bufinf, 0,
 595                sizeof(struct jme_buffer_info) * jme->tx_ring_size);
 596
 597        return 0;
 598
 599err_free_txring:
 600        dma_free_coherent(&(jme->pdev->dev),
 601                          TX_RING_ALLOC_SIZE(jme->tx_ring_size),
 602                          txring->alloc,
 603                          txring->dmaalloc);
 604
 605err_set_null:
 606        txring->desc = NULL;
 607        txring->dmaalloc = 0;
 608        txring->dma = 0;
 609        txring->bufinf = NULL;
 610
 611        return -ENOMEM;
 612}
 613
 614static void
 615jme_free_tx_resources(struct jme_adapter *jme)
 616{
 617        int i;
 618        struct jme_ring *txring = &(jme->txring[0]);
 619        struct jme_buffer_info *txbi;
 620
 621        if (txring->alloc) {
 622                if (txring->bufinf) {
 623                        for (i = 0 ; i < jme->tx_ring_size ; ++i) {
 624                                txbi = txring->bufinf + i;
 625                                if (txbi->skb) {
 626                                        dev_kfree_skb(txbi->skb);
 627                                        txbi->skb = NULL;
 628                                }
 629                                txbi->mapping           = 0;
 630                                txbi->len               = 0;
 631                                txbi->nr_desc           = 0;
 632                                txbi->start_xmit        = 0;
 633                        }
 634                        kfree(txring->bufinf);
 635                }
 636
 637                dma_free_coherent(&(jme->pdev->dev),
 638                                  TX_RING_ALLOC_SIZE(jme->tx_ring_size),
 639                                  txring->alloc,
 640                                  txring->dmaalloc);
 641
 642                txring->alloc           = NULL;
 643                txring->desc            = NULL;
 644                txring->dmaalloc        = 0;
 645                txring->dma             = 0;
 646                txring->bufinf          = NULL;
 647        }
 648        txring->next_to_use     = 0;
 649        atomic_set(&txring->next_to_clean, 0);
 650        atomic_set(&txring->nr_free, 0);
 651}
 652
 653static inline void
 654jme_enable_tx_engine(struct jme_adapter *jme)
 655{
 656        /*
 657         * Select Queue 0
 658         */
 659        jwrite32(jme, JME_TXCS, TXCS_DEFAULT | TXCS_SELECT_QUEUE0);
 660        wmb();
 661
 662        /*
 663         * Setup TX Queue 0 DMA Bass Address
 664         */
 665        jwrite32(jme, JME_TXDBA_LO, (__u64)jme->txring[0].dma & 0xFFFFFFFFUL);
 666        jwrite32(jme, JME_TXDBA_HI, (__u64)(jme->txring[0].dma) >> 32);
 667        jwrite32(jme, JME_TXNDA, (__u64)jme->txring[0].dma & 0xFFFFFFFFUL);
 668
 669        /*
 670         * Setup TX Descptor Count
 671         */
 672        jwrite32(jme, JME_TXQDC, jme->tx_ring_size);
 673
 674        /*
 675         * Enable TX Engine
 676         */
 677        wmb();
 678        jwrite32f(jme, JME_TXCS, jme->reg_txcs |
 679                                TXCS_SELECT_QUEUE0 |
 680                                TXCS_ENABLE);
 681
 682        /*
 683         * Start clock for TX MAC Processor
 684         */
 685        jme_mac_txclk_on(jme);
 686}
 687
 688static inline void
 689jme_restart_tx_engine(struct jme_adapter *jme)
 690{
 691        /*
 692         * Restart TX Engine
 693         */
 694        jwrite32(jme, JME_TXCS, jme->reg_txcs |
 695                                TXCS_SELECT_QUEUE0 |
 696                                TXCS_ENABLE);
 697}
 698
 699static inline void
 700jme_disable_tx_engine(struct jme_adapter *jme)
 701{
 702        int i;
 703        u32 val;
 704
 705        /*
 706         * Disable TX Engine
 707         */
 708        jwrite32(jme, JME_TXCS, jme->reg_txcs | TXCS_SELECT_QUEUE0);
 709        wmb();
 710
 711        val = jread32(jme, JME_TXCS);
 712        for (i = JME_TX_DISABLE_TIMEOUT ; (val & TXCS_ENABLE) && i > 0 ; --i) {
 713                mdelay(1);
 714                val = jread32(jme, JME_TXCS);
 715                rmb();
 716        }
 717
 718        if (!i)
 719                pr_err("Disable TX engine timeout\n");
 720
 721        /*
 722         * Stop clock for TX MAC Processor
 723         */
 724        jme_mac_txclk_off(jme);
 725}
 726
 727static void
 728jme_set_clean_rxdesc(struct jme_adapter *jme, int i)
 729{
 730        struct jme_ring *rxring = &(jme->rxring[0]);
 731        register struct rxdesc *rxdesc = rxring->desc;
 732        struct jme_buffer_info *rxbi = rxring->bufinf;
 733        rxdesc += i;
 734        rxbi += i;
 735
 736        rxdesc->dw[0] = 0;
 737        rxdesc->dw[1] = 0;
 738        rxdesc->desc1.bufaddrh  = cpu_to_le32((__u64)rxbi->mapping >> 32);
 739        rxdesc->desc1.bufaddrl  = cpu_to_le32(
 740                                        (__u64)rxbi->mapping & 0xFFFFFFFFUL);
 741        rxdesc->desc1.datalen   = cpu_to_le16(rxbi->len);
 742        if (jme->dev->features & NETIF_F_HIGHDMA)
 743                rxdesc->desc1.flags = RXFLAG_64BIT;
 744        wmb();
 745        rxdesc->desc1.flags     |= RXFLAG_OWN | RXFLAG_INT;
 746}
 747
 748static int
 749jme_make_new_rx_buf(struct jme_adapter *jme, int i)
 750{
 751        struct jme_ring *rxring = &(jme->rxring[0]);
 752        struct jme_buffer_info *rxbi = rxring->bufinf + i;
 753        struct sk_buff *skb;
 754        dma_addr_t mapping;
 755
 756        skb = netdev_alloc_skb(jme->dev,
 757                jme->dev->mtu + RX_EXTRA_LEN);
 758        if (unlikely(!skb))
 759                return -ENOMEM;
 760
 761        mapping = pci_map_page(jme->pdev, virt_to_page(skb->data),
 762                               offset_in_page(skb->data), skb_tailroom(skb),
 763                               PCI_DMA_FROMDEVICE);
 764        if (unlikely(pci_dma_mapping_error(jme->pdev, mapping))) {
 765                dev_kfree_skb(skb);
 766                return -ENOMEM;
 767        }
 768
 769        if (likely(rxbi->mapping))
 770                pci_unmap_page(jme->pdev, rxbi->mapping,
 771                               rxbi->len, PCI_DMA_FROMDEVICE);
 772
 773        rxbi->skb = skb;
 774        rxbi->len = skb_tailroom(skb);
 775        rxbi->mapping = mapping;
 776        return 0;
 777}
 778
 779static void
 780jme_free_rx_buf(struct jme_adapter *jme, int i)
 781{
 782        struct jme_ring *rxring = &(jme->rxring[0]);
 783        struct jme_buffer_info *rxbi = rxring->bufinf;
 784        rxbi += i;
 785
 786        if (rxbi->skb) {
 787                pci_unmap_page(jme->pdev,
 788                                 rxbi->mapping,
 789                                 rxbi->len,
 790                                 PCI_DMA_FROMDEVICE);
 791                dev_kfree_skb(rxbi->skb);
 792                rxbi->skb = NULL;
 793                rxbi->mapping = 0;
 794                rxbi->len = 0;
 795        }
 796}
 797
 798static void
 799jme_free_rx_resources(struct jme_adapter *jme)
 800{
 801        int i;
 802        struct jme_ring *rxring = &(jme->rxring[0]);
 803
 804        if (rxring->alloc) {
 805                if (rxring->bufinf) {
 806                        for (i = 0 ; i < jme->rx_ring_size ; ++i)
 807                                jme_free_rx_buf(jme, i);
 808                        kfree(rxring->bufinf);
 809                }
 810
 811                dma_free_coherent(&(jme->pdev->dev),
 812                                  RX_RING_ALLOC_SIZE(jme->rx_ring_size),
 813                                  rxring->alloc,
 814                                  rxring->dmaalloc);
 815                rxring->alloc    = NULL;
 816                rxring->desc     = NULL;
 817                rxring->dmaalloc = 0;
 818                rxring->dma      = 0;
 819                rxring->bufinf   = NULL;
 820        }
 821        rxring->next_to_use   = 0;
 822        atomic_set(&rxring->next_to_clean, 0);
 823}
 824
 825static int
 826jme_setup_rx_resources(struct jme_adapter *jme)
 827{
 828        int i;
 829        struct jme_ring *rxring = &(jme->rxring[0]);
 830
 831        rxring->alloc = dma_alloc_coherent(&(jme->pdev->dev),
 832                                   RX_RING_ALLOC_SIZE(jme->rx_ring_size),
 833                                   &(rxring->dmaalloc),
 834                                   GFP_ATOMIC);
 835        if (!rxring->alloc)
 836                goto err_set_null;
 837
 838        /*
 839         * 16 Bytes align
 840         */
 841        rxring->desc            = (void *)ALIGN((unsigned long)(rxring->alloc),
 842                                                RING_DESC_ALIGN);
 843        rxring->dma             = ALIGN(rxring->dmaalloc, RING_DESC_ALIGN);
 844        rxring->next_to_use     = 0;
 845        atomic_set(&rxring->next_to_clean, 0);
 846
 847        rxring->bufinf          = kmalloc(sizeof(struct jme_buffer_info) *
 848                                        jme->rx_ring_size, GFP_ATOMIC);
 849        if (unlikely(!(rxring->bufinf)))
 850                goto err_free_rxring;
 851
 852        /*
 853         * Initiallize Receive Descriptors
 854         */
 855        memset(rxring->bufinf, 0,
 856                sizeof(struct jme_buffer_info) * jme->rx_ring_size);
 857        for (i = 0 ; i < jme->rx_ring_size ; ++i) {
 858                if (unlikely(jme_make_new_rx_buf(jme, i))) {
 859                        jme_free_rx_resources(jme);
 860                        return -ENOMEM;
 861                }
 862
 863                jme_set_clean_rxdesc(jme, i);
 864        }
 865
 866        return 0;
 867
 868err_free_rxring:
 869        dma_free_coherent(&(jme->pdev->dev),
 870                          RX_RING_ALLOC_SIZE(jme->rx_ring_size),
 871                          rxring->alloc,
 872                          rxring->dmaalloc);
 873err_set_null:
 874        rxring->desc = NULL;
 875        rxring->dmaalloc = 0;
 876        rxring->dma = 0;
 877        rxring->bufinf = NULL;
 878
 879        return -ENOMEM;
 880}
 881
 882static inline void
 883jme_enable_rx_engine(struct jme_adapter *jme)
 884{
 885        /*
 886         * Select Queue 0
 887         */
 888        jwrite32(jme, JME_RXCS, jme->reg_rxcs |
 889                                RXCS_QUEUESEL_Q0);
 890        wmb();
 891
 892        /*
 893         * Setup RX DMA Bass Address
 894         */
 895        jwrite32(jme, JME_RXDBA_LO, (__u64)(jme->rxring[0].dma) & 0xFFFFFFFFUL);
 896        jwrite32(jme, JME_RXDBA_HI, (__u64)(jme->rxring[0].dma) >> 32);
 897        jwrite32(jme, JME_RXNDA, (__u64)(jme->rxring[0].dma) & 0xFFFFFFFFUL);
 898
 899        /*
 900         * Setup RX Descriptor Count
 901         */
 902        jwrite32(jme, JME_RXQDC, jme->rx_ring_size);
 903
 904        /*
 905         * Setup Unicast Filter
 906         */
 907        jme_set_unicastaddr(jme->dev);
 908        jme_set_multi(jme->dev);
 909
 910        /*
 911         * Enable RX Engine
 912         */
 913        wmb();
 914        jwrite32f(jme, JME_RXCS, jme->reg_rxcs |
 915                                RXCS_QUEUESEL_Q0 |
 916                                RXCS_ENABLE |
 917                                RXCS_QST);
 918
 919        /*
 920         * Start clock for RX MAC Processor
 921         */
 922        jme_mac_rxclk_on(jme);
 923}
 924
 925static inline void
 926jme_restart_rx_engine(struct jme_adapter *jme)
 927{
 928        /*
 929         * Start RX Engine
 930         */
 931        jwrite32(jme, JME_RXCS, jme->reg_rxcs |
 932                                RXCS_QUEUESEL_Q0 |
 933                                RXCS_ENABLE |
 934                                RXCS_QST);
 935}
 936
 937static inline void
 938jme_disable_rx_engine(struct jme_adapter *jme)
 939{
 940        int i;
 941        u32 val;
 942
 943        /*
 944         * Disable RX Engine
 945         */
 946        jwrite32(jme, JME_RXCS, jme->reg_rxcs);
 947        wmb();
 948
 949        val = jread32(jme, JME_RXCS);
 950        for (i = JME_RX_DISABLE_TIMEOUT ; (val & RXCS_ENABLE) && i > 0 ; --i) {
 951                mdelay(1);
 952                val = jread32(jme, JME_RXCS);
 953                rmb();
 954        }
 955
 956        if (!i)
 957                pr_err("Disable RX engine timeout\n");
 958
 959        /*
 960         * Stop clock for RX MAC Processor
 961         */
 962        jme_mac_rxclk_off(jme);
 963}
 964
 965static u16
 966jme_udpsum(struct sk_buff *skb)
 967{
 968        u16 csum = 0xFFFFu;
 969
 970        if (skb->len < (ETH_HLEN + sizeof(struct iphdr)))
 971                return csum;
 972        if (skb->protocol != htons(ETH_P_IP))
 973                return csum;
 974        skb_set_network_header(skb, ETH_HLEN);
 975        if ((ip_hdr(skb)->protocol != IPPROTO_UDP) ||
 976            (skb->len < (ETH_HLEN +
 977                        (ip_hdr(skb)->ihl << 2) +
 978                        sizeof(struct udphdr)))) {
 979                skb_reset_network_header(skb);
 980                return csum;
 981        }
 982        skb_set_transport_header(skb,
 983                        ETH_HLEN + (ip_hdr(skb)->ihl << 2));
 984        csum = udp_hdr(skb)->check;
 985        skb_reset_transport_header(skb);
 986        skb_reset_network_header(skb);
 987
 988        return csum;
 989}
 990
 991static int
 992jme_rxsum_ok(struct jme_adapter *jme, u16 flags, struct sk_buff *skb)
 993{
 994        if (!(flags & (RXWBFLAG_TCPON | RXWBFLAG_UDPON | RXWBFLAG_IPV4)))
 995                return false;
 996
 997        if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_TCPON | RXWBFLAG_TCPCS))
 998                        == RXWBFLAG_TCPON)) {
 999                if (flags & RXWBFLAG_IPV4)
1000                        netif_err(jme, rx_err, jme->dev, "TCP Checksum error\n");
1001                return false;
1002        }
1003
1004        if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_UDPON | RXWBFLAG_UDPCS))
1005                        == RXWBFLAG_UDPON) && jme_udpsum(skb)) {
1006                if (flags & RXWBFLAG_IPV4)
1007                        netif_err(jme, rx_err, jme->dev, "UDP Checksum error\n");
1008                return false;
1009        }
1010
1011        if (unlikely((flags & (RXWBFLAG_IPV4 | RXWBFLAG_IPCS))
1012                        == RXWBFLAG_IPV4)) {
1013                netif_err(jme, rx_err, jme->dev, "IPv4 Checksum error\n");
1014                return false;
1015        }
1016
1017        return true;
1018}
1019
1020static void
1021jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx)
1022{
1023        struct jme_ring *rxring = &(jme->rxring[0]);
1024        struct rxdesc *rxdesc = rxring->desc;
1025        struct jme_buffer_info *rxbi = rxring->bufinf;
1026        struct sk_buff *skb;
1027        int framesize;
1028
1029        rxdesc += idx;
1030        rxbi += idx;
1031
1032        skb = rxbi->skb;
1033        pci_dma_sync_single_for_cpu(jme->pdev,
1034                                        rxbi->mapping,
1035                                        rxbi->len,
1036                                        PCI_DMA_FROMDEVICE);
1037
1038        if (unlikely(jme_make_new_rx_buf(jme, idx))) {
1039                pci_dma_sync_single_for_device(jme->pdev,
1040                                                rxbi->mapping,
1041                                                rxbi->len,
1042                                                PCI_DMA_FROMDEVICE);
1043
1044                ++(NET_STAT(jme).rx_dropped);
1045        } else {
1046                framesize = le16_to_cpu(rxdesc->descwb.framesize)
1047                                - RX_PREPAD_SIZE;
1048
1049                skb_reserve(skb, RX_PREPAD_SIZE);
1050                skb_put(skb, framesize);
1051                skb->protocol = eth_type_trans(skb, jme->dev);
1052
1053                if (jme_rxsum_ok(jme, le16_to_cpu(rxdesc->descwb.flags), skb))
1054                        skb->ip_summed = CHECKSUM_UNNECESSARY;
1055                else
1056                        skb_checksum_none_assert(skb);
1057
1058                if (rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_TAGON)) {
1059                        u16 vid = le16_to_cpu(rxdesc->descwb.vlan);
1060
1061                        __vlan_hwaccel_put_tag(skb, vid);
1062                        NET_STAT(jme).rx_bytes += 4;
1063                }
1064                jme->jme_rx(skb);
1065
1066                if ((rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_DEST)) ==
1067                    cpu_to_le16(RXWBFLAG_DEST_MUL))
1068                        ++(NET_STAT(jme).multicast);
1069
1070                NET_STAT(jme).rx_bytes += framesize;
1071                ++(NET_STAT(jme).rx_packets);
1072        }
1073
1074        jme_set_clean_rxdesc(jme, idx);
1075
1076}
1077
1078static int
1079jme_process_receive(struct jme_adapter *jme, int limit)
1080{
1081        struct jme_ring *rxring = &(jme->rxring[0]);
1082        struct rxdesc *rxdesc = rxring->desc;
1083        int i, j, ccnt, desccnt, mask = jme->rx_ring_mask;
1084
1085        if (unlikely(!atomic_dec_and_test(&jme->rx_cleaning)))
1086                goto out_inc;
1087
1088        if (unlikely(atomic_read(&jme->link_changing) != 1))
1089                goto out_inc;
1090
1091        if (unlikely(!netif_carrier_ok(jme->dev)))
1092                goto out_inc;
1093
1094        i = atomic_read(&rxring->next_to_clean);
1095        while (limit > 0) {
1096                rxdesc = rxring->desc;
1097                rxdesc += i;
1098
1099                if ((rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_OWN)) ||
1100                !(rxdesc->descwb.desccnt & RXWBDCNT_WBCPL))
1101                        goto out;
1102                --limit;
1103
1104                rmb();
1105                desccnt = rxdesc->descwb.desccnt & RXWBDCNT_DCNT;
1106
1107                if (unlikely(desccnt > 1 ||
1108                rxdesc->descwb.errstat & RXWBERR_ALLERR)) {
1109
1110                        if (rxdesc->descwb.errstat & RXWBERR_CRCERR)
1111                                ++(NET_STAT(jme).rx_crc_errors);
1112                        else if (rxdesc->descwb.errstat & RXWBERR_OVERUN)
1113                                ++(NET_STAT(jme).rx_fifo_errors);
1114                        else
1115                                ++(NET_STAT(jme).rx_errors);
1116
1117                        if (desccnt > 1)
1118                                limit -= desccnt - 1;
1119
1120                        for (j = i, ccnt = desccnt ; ccnt-- ; ) {
1121                                jme_set_clean_rxdesc(jme, j);
1122                                j = (j + 1) & (mask);
1123                        }
1124
1125                } else {
1126                        jme_alloc_and_feed_skb(jme, i);
1127                }
1128
1129                i = (i + desccnt) & (mask);
1130        }
1131
1132out:
1133        atomic_set(&rxring->next_to_clean, i);
1134
1135out_inc:
1136        atomic_inc(&jme->rx_cleaning);
1137
1138        return limit > 0 ? limit : 0;
1139
1140}
1141
1142static void
1143jme_attempt_pcc(struct dynpcc_info *dpi, int atmp)
1144{
1145        if (likely(atmp == dpi->cur)) {
1146                dpi->cnt = 0;
1147                return;
1148        }
1149
1150        if (dpi->attempt == atmp) {
1151                ++(dpi->cnt);
1152        } else {
1153                dpi->attempt = atmp;
1154                dpi->cnt = 0;
1155        }
1156
1157}
1158
1159static void
1160jme_dynamic_pcc(struct jme_adapter *jme)
1161{
1162        register struct dynpcc_info *dpi = &(jme->dpi);
1163
1164        if ((NET_STAT(jme).rx_bytes - dpi->last_bytes) > PCC_P3_THRESHOLD)
1165                jme_attempt_pcc(dpi, PCC_P3);
1166        else if ((NET_STAT(jme).rx_packets - dpi->last_pkts) > PCC_P2_THRESHOLD ||
1167                 dpi->intr_cnt > PCC_INTR_THRESHOLD)
1168                jme_attempt_pcc(dpi, PCC_P2);
1169        else
1170                jme_attempt_pcc(dpi, PCC_P1);
1171
1172        if (unlikely(dpi->attempt != dpi->cur && dpi->cnt > 5)) {
1173                if (dpi->attempt < dpi->cur)
1174                        tasklet_schedule(&jme->rxclean_task);
1175                jme_set_rx_pcc(jme, dpi->attempt);
1176                dpi->cur = dpi->attempt;
1177                dpi->cnt = 0;
1178        }
1179}
1180
1181static void
1182jme_start_pcc_timer(struct jme_adapter *jme)
1183{
1184        struct dynpcc_info *dpi = &(jme->dpi);
1185        dpi->last_bytes         = NET_STAT(jme).rx_bytes;
1186        dpi->last_pkts          = NET_STAT(jme).rx_packets;
1187        dpi->intr_cnt           = 0;
1188        jwrite32(jme, JME_TMCSR,
1189                TMCSR_EN | ((0xFFFFFF - PCC_INTERVAL_US) & TMCSR_CNT));
1190}
1191
1192static inline void
1193jme_stop_pcc_timer(struct jme_adapter *jme)
1194{
1195        jwrite32(jme, JME_TMCSR, 0);
1196}
1197
1198static void
1199jme_shutdown_nic(struct jme_adapter *jme)
1200{
1201        u32 phylink;
1202
1203        phylink = jme_linkstat_from_phy(jme);
1204
1205        if (!(phylink & PHY_LINK_UP)) {
1206                /*
1207                 * Disable all interrupt before issue timer
1208                 */
1209                jme_stop_irq(jme);
1210                jwrite32(jme, JME_TIMER2, TMCSR_EN | 0xFFFFFE);
1211        }
1212}
1213
1214static void
1215jme_pcc_tasklet(unsigned long arg)
1216{
1217        struct jme_adapter *jme = (struct jme_adapter *)arg;
1218        struct net_device *netdev = jme->dev;
1219
1220        if (unlikely(test_bit(JME_FLAG_SHUTDOWN, &jme->flags))) {
1221                jme_shutdown_nic(jme);
1222                return;
1223        }
1224
1225        if (unlikely(!netif_carrier_ok(netdev) ||
1226                (atomic_read(&jme->link_changing) != 1)
1227        )) {
1228                jme_stop_pcc_timer(jme);
1229                return;
1230        }
1231
1232        if (!(test_bit(JME_FLAG_POLL, &jme->flags)))
1233                jme_dynamic_pcc(jme);
1234
1235        jme_start_pcc_timer(jme);
1236}
1237
1238static inline void
1239jme_polling_mode(struct jme_adapter *jme)
1240{
1241        jme_set_rx_pcc(jme, PCC_OFF);
1242}
1243
1244static inline void
1245jme_interrupt_mode(struct jme_adapter *jme)
1246{
1247        jme_set_rx_pcc(jme, PCC_P1);
1248}
1249
1250static inline int
1251jme_pseudo_hotplug_enabled(struct jme_adapter *jme)
1252{
1253        u32 apmc;
1254        apmc = jread32(jme, JME_APMC);
1255        return apmc & JME_APMC_PSEUDO_HP_EN;
1256}
1257
1258static void
1259jme_start_shutdown_timer(struct jme_adapter *jme)
1260{
1261        u32 apmc;
1262
1263        apmc = jread32(jme, JME_APMC) | JME_APMC_PCIE_SD_EN;
1264        apmc &= ~JME_APMC_EPIEN_CTRL;
1265        if (!no_extplug) {
1266                jwrite32f(jme, JME_APMC, apmc | JME_APMC_EPIEN_CTRL_EN);
1267                wmb();
1268        }
1269        jwrite32f(jme, JME_APMC, apmc);
1270
1271        jwrite32f(jme, JME_TIMER2, 0);
1272        set_bit(JME_FLAG_SHUTDOWN, &jme->flags);
1273        jwrite32(jme, JME_TMCSR,
1274                TMCSR_EN | ((0xFFFFFF - APMC_PHP_SHUTDOWN_DELAY) & TMCSR_CNT));
1275}
1276
1277static void
1278jme_stop_shutdown_timer(struct jme_adapter *jme)
1279{
1280        u32 apmc;
1281
1282        jwrite32f(jme, JME_TMCSR, 0);
1283        jwrite32f(jme, JME_TIMER2, 0);
1284        clear_bit(JME_FLAG_SHUTDOWN, &jme->flags);
1285
1286        apmc = jread32(jme, JME_APMC);
1287        apmc &= ~(JME_APMC_PCIE_SD_EN | JME_APMC_EPIEN_CTRL);
1288        jwrite32f(jme, JME_APMC, apmc | JME_APMC_EPIEN_CTRL_DIS);
1289        wmb();
1290        jwrite32f(jme, JME_APMC, apmc);
1291}
1292
1293static void
1294jme_link_change_tasklet(unsigned long arg)
1295{
1296        struct jme_adapter *jme = (struct jme_adapter *)arg;
1297        struct net_device *netdev = jme->dev;
1298        int rc;
1299
1300        while (!atomic_dec_and_test(&jme->link_changing)) {
1301                atomic_inc(&jme->link_changing);
1302                netif_info(jme, intr, jme->dev, "Get link change lock failed\n");
1303                while (atomic_read(&jme->link_changing) != 1)
1304                        netif_info(jme, intr, jme->dev, "Waiting link change lock\n");
1305        }
1306
1307        if (jme_check_link(netdev, 1) && jme->old_mtu == netdev->mtu)
1308                goto out;
1309
1310        jme->old_mtu = netdev->mtu;
1311        netif_stop_queue(netdev);
1312        if (jme_pseudo_hotplug_enabled(jme))
1313                jme_stop_shutdown_timer(jme);
1314
1315        jme_stop_pcc_timer(jme);
1316        tasklet_disable(&jme->txclean_task);
1317        tasklet_disable(&jme->rxclean_task);
1318        tasklet_disable(&jme->rxempty_task);
1319
1320        if (netif_carrier_ok(netdev)) {
1321                jme_disable_rx_engine(jme);
1322                jme_disable_tx_engine(jme);
1323                jme_reset_mac_processor(jme);
1324                jme_free_rx_resources(jme);
1325                jme_free_tx_resources(jme);
1326
1327                if (test_bit(JME_FLAG_POLL, &jme->flags))
1328                        jme_polling_mode(jme);
1329
1330                netif_carrier_off(netdev);
1331        }
1332
1333        jme_check_link(netdev, 0);
1334        if (netif_carrier_ok(netdev)) {
1335                rc = jme_setup_rx_resources(jme);
1336                if (rc) {
1337                        pr_err("Allocating resources for RX error, Device STOPPED!\n");
1338                        goto out_enable_tasklet;
1339                }
1340
1341                rc = jme_setup_tx_resources(jme);
1342                if (rc) {
1343                        pr_err("Allocating resources for TX error, Device STOPPED!\n");
1344                        goto err_out_free_rx_resources;
1345                }
1346
1347                jme_enable_rx_engine(jme);
1348                jme_enable_tx_engine(jme);
1349
1350                netif_start_queue(netdev);
1351
1352                if (test_bit(JME_FLAG_POLL, &jme->flags))
1353                        jme_interrupt_mode(jme);
1354
1355                jme_start_pcc_timer(jme);
1356        } else if (jme_pseudo_hotplug_enabled(jme)) {
1357                jme_start_shutdown_timer(jme);
1358        }
1359
1360        goto out_enable_tasklet;
1361
1362err_out_free_rx_resources:
1363        jme_free_rx_resources(jme);
1364out_enable_tasklet:
1365        tasklet_enable(&jme->txclean_task);
1366        tasklet_hi_enable(&jme->rxclean_task);
1367        tasklet_hi_enable(&jme->rxempty_task);
1368out:
1369        atomic_inc(&jme->link_changing);
1370}
1371
1372static void
1373jme_rx_clean_tasklet(unsigned long arg)
1374{
1375        struct jme_adapter *jme = (struct jme_adapter *)arg;
1376        struct dynpcc_info *dpi = &(jme->dpi);
1377
1378        jme_process_receive(jme, jme->rx_ring_size);
1379        ++(dpi->intr_cnt);
1380
1381}
1382
1383static int
1384jme_poll(JME_NAPI_HOLDER(holder), JME_NAPI_WEIGHT(budget))
1385{
1386        struct jme_adapter *jme = jme_napi_priv(holder);
1387        int rest;
1388
1389        rest = jme_process_receive(jme, JME_NAPI_WEIGHT_VAL(budget));
1390
1391        while (atomic_read(&jme->rx_empty) > 0) {
1392                atomic_dec(&jme->rx_empty);
1393                ++(NET_STAT(jme).rx_dropped);
1394                jme_restart_rx_engine(jme);
1395        }
1396        atomic_inc(&jme->rx_empty);
1397
1398        if (rest) {
1399                JME_RX_COMPLETE(netdev, holder);
1400                jme_interrupt_mode(jme);
1401        }
1402
1403        JME_NAPI_WEIGHT_SET(budget, rest);
1404        return JME_NAPI_WEIGHT_VAL(budget) - rest;
1405}
1406
1407static void
1408jme_rx_empty_tasklet(unsigned long arg)
1409{
1410        struct jme_adapter *jme = (struct jme_adapter *)arg;
1411
1412        if (unlikely(atomic_read(&jme->link_changing) != 1))
1413                return;
1414
1415        if (unlikely(!netif_carrier_ok(jme->dev)))
1416                return;
1417
1418        netif_info(jme, rx_status, jme->dev, "RX Queue Full!\n");
1419
1420        jme_rx_clean_tasklet(arg);
1421
1422        while (atomic_read(&jme->rx_empty) > 0) {
1423                atomic_dec(&jme->rx_empty);
1424                ++(NET_STAT(jme).rx_dropped);
1425                jme_restart_rx_engine(jme);
1426        }
1427        atomic_inc(&jme->rx_empty);
1428}
1429
1430static void
1431jme_wake_queue_if_stopped(struct jme_adapter *jme)
1432{
1433        struct jme_ring *txring = &(jme->txring[0]);
1434
1435        smp_wmb();
1436        if (unlikely(netif_queue_stopped(jme->dev) &&
1437        atomic_read(&txring->nr_free) >= (jme->tx_wake_threshold))) {
1438                netif_info(jme, tx_done, jme->dev, "TX Queue Waked\n");
1439                netif_wake_queue(jme->dev);
1440        }
1441
1442}
1443
1444static void
1445jme_tx_clean_tasklet(unsigned long arg)
1446{
1447        struct jme_adapter *jme = (struct jme_adapter *)arg;
1448        struct jme_ring *txring = &(jme->txring[0]);
1449        struct txdesc *txdesc = txring->desc;
1450        struct jme_buffer_info *txbi = txring->bufinf, *ctxbi, *ttxbi;
1451        int i, j, cnt = 0, max, err, mask;
1452
1453        tx_dbg(jme, "Into txclean\n");
1454
1455        if (unlikely(!atomic_dec_and_test(&jme->tx_cleaning)))
1456                goto out;
1457
1458        if (unlikely(atomic_read(&jme->link_changing) != 1))
1459                goto out;
1460
1461        if (unlikely(!netif_carrier_ok(jme->dev)))
1462                goto out;
1463
1464        max = jme->tx_ring_size - atomic_read(&txring->nr_free);
1465        mask = jme->tx_ring_mask;
1466
1467        for (i = atomic_read(&txring->next_to_clean) ; cnt < max ; ) {
1468
1469                ctxbi = txbi + i;
1470
1471                if (likely(ctxbi->skb &&
1472                !(txdesc[i].descwb.flags & TXWBFLAG_OWN))) {
1473
1474                        tx_dbg(jme, "txclean: %d+%d@%lu\n",
1475                               i, ctxbi->nr_desc, jiffies);
1476
1477                        err = txdesc[i].descwb.flags & TXWBFLAG_ALLERR;
1478
1479                        for (j = 1 ; j < ctxbi->nr_desc ; ++j) {
1480                                ttxbi = txbi + ((i + j) & (mask));
1481                                txdesc[(i + j) & (mask)].dw[0] = 0;
1482
1483                                pci_unmap_page(jme->pdev,
1484                                                 ttxbi->mapping,
1485                                                 ttxbi->len,
1486                                                 PCI_DMA_TODEVICE);
1487
1488                                ttxbi->mapping = 0;
1489                                ttxbi->len = 0;
1490                        }
1491
1492                        dev_kfree_skb(ctxbi->skb);
1493
1494                        cnt += ctxbi->nr_desc;
1495
1496                        if (unlikely(err)) {
1497                                ++(NET_STAT(jme).tx_carrier_errors);
1498                        } else {
1499                                ++(NET_STAT(jme).tx_packets);
1500                                NET_STAT(jme).tx_bytes += ctxbi->len;
1501                        }
1502
1503                        ctxbi->skb = NULL;
1504                        ctxbi->len = 0;
1505                        ctxbi->start_xmit = 0;
1506
1507                } else {
1508                        break;
1509                }
1510
1511                i = (i + ctxbi->nr_desc) & mask;
1512
1513                ctxbi->nr_desc = 0;
1514        }
1515
1516        tx_dbg(jme, "txclean: done %d@%lu\n", i, jiffies);
1517        atomic_set(&txring->next_to_clean, i);
1518        atomic_add(cnt, &txring->nr_free);
1519
1520        jme_wake_queue_if_stopped(jme);
1521
1522out:
1523        atomic_inc(&jme->tx_cleaning);
1524}
1525
1526static void
1527jme_intr_msi(struct jme_adapter *jme, u32 intrstat)
1528{
1529        /*
1530         * Disable interrupt
1531         */
1532        jwrite32f(jme, JME_IENC, INTR_ENABLE);
1533
1534        if (intrstat & (INTR_LINKCH | INTR_SWINTR)) {
1535                /*
1536                 * Link change event is critical
1537                 * all other events are ignored
1538                 */
1539                jwrite32(jme, JME_IEVE, intrstat);
1540                tasklet_schedule(&jme->linkch_task);
1541                goto out_reenable;
1542        }
1543
1544        if (intrstat & INTR_TMINTR) {
1545                jwrite32(jme, JME_IEVE, INTR_TMINTR);
1546                tasklet_schedule(&jme->pcc_task);
1547        }
1548
1549        if (intrstat & (INTR_PCCTXTO | INTR_PCCTX)) {
1550                jwrite32(jme, JME_IEVE, INTR_PCCTXTO | INTR_PCCTX | INTR_TX0);
1551                tasklet_schedule(&jme->txclean_task);
1552        }
1553
1554        if ((intrstat & (INTR_PCCRX0TO | INTR_PCCRX0 | INTR_RX0EMP))) {
1555                jwrite32(jme, JME_IEVE, (intrstat & (INTR_PCCRX0TO |
1556                                                     INTR_PCCRX0 |
1557                                                     INTR_RX0EMP)) |
1558                                        INTR_RX0);
1559        }
1560
1561        if (test_bit(JME_FLAG_POLL, &jme->flags)) {
1562                if (intrstat & INTR_RX0EMP)
1563                        atomic_inc(&jme->rx_empty);
1564
1565                if ((intrstat & (INTR_PCCRX0TO | INTR_PCCRX0 | INTR_RX0EMP))) {
1566                        if (likely(JME_RX_SCHEDULE_PREP(jme))) {
1567                                jme_polling_mode(jme);
1568                                JME_RX_SCHEDULE(jme);
1569                        }
1570                }
1571        } else {
1572                if (intrstat & INTR_RX0EMP) {
1573                        atomic_inc(&jme->rx_empty);
1574                        tasklet_hi_schedule(&jme->rxempty_task);
1575                } else if (intrstat & (INTR_PCCRX0TO | INTR_PCCRX0)) {
1576                        tasklet_hi_schedule(&jme->rxclean_task);
1577                }
1578        }
1579
1580out_reenable:
1581        /*
1582         * Re-enable interrupt
1583         */
1584        jwrite32f(jme, JME_IENS, INTR_ENABLE);
1585}
1586
1587static irqreturn_t
1588jme_intr(int irq, void *dev_id)
1589{
1590        struct net_device *netdev = dev_id;
1591        struct jme_adapter *jme = netdev_priv(netdev);
1592        u32 intrstat;
1593
1594        intrstat = jread32(jme, JME_IEVE);
1595
1596        /*
1597         * Check if it's really an interrupt for us
1598         */
1599        if (unlikely((intrstat & INTR_ENABLE) == 0))
1600                return IRQ_NONE;
1601
1602        /*
1603         * Check if the device still exist
1604         */
1605        if (unlikely(intrstat == ~((typeof(intrstat))0)))
1606                return IRQ_NONE;
1607
1608        jme_intr_msi(jme, intrstat);
1609
1610        return IRQ_HANDLED;
1611}
1612
1613static irqreturn_t
1614jme_msi(int irq, void *dev_id)
1615{
1616        struct net_device *netdev = dev_id;
1617        struct jme_adapter *jme = netdev_priv(netdev);
1618        u32 intrstat;
1619
1620        intrstat = jread32(jme, JME_IEVE);
1621
1622        jme_intr_msi(jme, intrstat);
1623
1624        return IRQ_HANDLED;
1625}
1626
1627static void
1628jme_reset_link(struct jme_adapter *jme)
1629{
1630        jwrite32(jme, JME_TMCSR, TMCSR_SWIT);
1631}
1632
1633static void
1634jme_restart_an(struct jme_adapter *jme)
1635{
1636        u32 bmcr;
1637
1638        spin_lock_bh(&jme->phy_lock);
1639        bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
1640        bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
1641        jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr);
1642        spin_unlock_bh(&jme->phy_lock);
1643}
1644
1645static int
1646jme_request_irq(struct jme_adapter *jme)
1647{
1648        int rc;
1649        struct net_device *netdev = jme->dev;
1650        irq_handler_t handler = jme_intr;
1651        int irq_flags = IRQF_SHARED;
1652
1653        if (!pci_enable_msi(jme->pdev)) {
1654                set_bit(JME_FLAG_MSI, &jme->flags);
1655                handler = jme_msi;
1656                irq_flags = 0;
1657        }
1658
1659        rc = request_irq(jme->pdev->irq, handler, irq_flags, netdev->name,
1660                          netdev);
1661        if (rc) {
1662                netdev_err(netdev,
1663                           "Unable to request %s interrupt (return: %d)\n",
1664                           test_bit(JME_FLAG_MSI, &jme->flags) ? "MSI" : "INTx",
1665                           rc);
1666
1667                if (test_bit(JME_FLAG_MSI, &jme->flags)) {
1668                        pci_disable_msi(jme->pdev);
1669                        clear_bit(JME_FLAG_MSI, &jme->flags);
1670                }
1671        } else {
1672                netdev->irq = jme->pdev->irq;
1673        }
1674
1675        return rc;
1676}
1677
1678static void
1679jme_free_irq(struct jme_adapter *jme)
1680{
1681        free_irq(jme->pdev->irq, jme->dev);
1682        if (test_bit(JME_FLAG_MSI, &jme->flags)) {
1683                pci_disable_msi(jme->pdev);
1684                clear_bit(JME_FLAG_MSI, &jme->flags);
1685                jme->dev->irq = jme->pdev->irq;
1686        }
1687}
1688
1689static inline void
1690jme_new_phy_on(struct jme_adapter *jme)
1691{
1692        u32 reg;
1693
1694        reg = jread32(jme, JME_PHY_PWR);
1695        reg &= ~(PHY_PWR_DWN1SEL | PHY_PWR_DWN1SW |
1696                 PHY_PWR_DWN2 | PHY_PWR_CLKSEL);
1697        jwrite32(jme, JME_PHY_PWR, reg);
1698
1699        pci_read_config_dword(jme->pdev, PCI_PRIV_PE1, &reg);
1700        reg &= ~PE1_GPREG0_PBG;
1701        reg |= PE1_GPREG0_ENBG;
1702        pci_write_config_dword(jme->pdev, PCI_PRIV_PE1, reg);
1703}
1704
1705static inline void
1706jme_new_phy_off(struct jme_adapter *jme)
1707{
1708        u32 reg;
1709
1710        reg = jread32(jme, JME_PHY_PWR);
1711        reg |= PHY_PWR_DWN1SEL | PHY_PWR_DWN1SW |
1712               PHY_PWR_DWN2 | PHY_PWR_CLKSEL;
1713        jwrite32(jme, JME_PHY_PWR, reg);
1714
1715        pci_read_config_dword(jme->pdev, PCI_PRIV_PE1, &reg);
1716        reg &= ~PE1_GPREG0_PBG;
1717        reg |= PE1_GPREG0_PDD3COLD;
1718        pci_write_config_dword(jme->pdev, PCI_PRIV_PE1, reg);
1719}
1720
1721static inline void
1722jme_phy_on(struct jme_adapter *jme)
1723{
1724        u32 bmcr;
1725
1726        bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
1727        bmcr &= ~BMCR_PDOWN;
1728        jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr);
1729
1730        if (new_phy_power_ctrl(jme->chip_main_rev))
1731                jme_new_phy_on(jme);
1732}
1733
1734static inline void
1735jme_phy_off(struct jme_adapter *jme)
1736{
1737        u32 bmcr;
1738
1739        bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
1740        bmcr |= BMCR_PDOWN;
1741        jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr);
1742
1743        if (new_phy_power_ctrl(jme->chip_main_rev))
1744                jme_new_phy_off(jme);
1745}
1746
1747static int
1748jme_open(struct net_device *netdev)
1749{
1750        struct jme_adapter *jme = netdev_priv(netdev);
1751        int rc;
1752
1753        jme_clear_pm(jme);
1754        JME_NAPI_ENABLE(jme);
1755
1756        tasklet_enable(&jme->linkch_task);
1757        tasklet_enable(&jme->txclean_task);
1758        tasklet_hi_enable(&jme->rxclean_task);
1759        tasklet_hi_enable(&jme->rxempty_task);
1760
1761        rc = jme_request_irq(jme);
1762        if (rc)
1763                goto err_out;
1764
1765        jme_start_irq(jme);
1766
1767        jme_phy_on(jme);
1768        if (test_bit(JME_FLAG_SSET, &jme->flags))
1769                jme_set_settings(netdev, &jme->old_ecmd);
1770        else
1771                jme_reset_phy_processor(jme);
1772
1773        jme_reset_link(jme);
1774
1775        return 0;
1776
1777err_out:
1778        netif_stop_queue(netdev);
1779        netif_carrier_off(netdev);
1780        return rc;
1781}
1782
1783static void
1784jme_set_100m_half(struct jme_adapter *jme)
1785{
1786        u32 bmcr, tmp;
1787
1788        jme_phy_on(jme);
1789        bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
1790        tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 |
1791                       BMCR_SPEED1000 | BMCR_FULLDPLX);
1792        tmp |= BMCR_SPEED100;
1793
1794        if (bmcr != tmp)
1795                jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, tmp);
1796
1797        if (jme->fpgaver)
1798                jwrite32(jme, JME_GHC, GHC_SPEED_100M | GHC_LINK_POLL);
1799        else
1800                jwrite32(jme, JME_GHC, GHC_SPEED_100M);
1801}
1802
1803#define JME_WAIT_LINK_TIME 2000 /* 2000ms */
1804static void
1805jme_wait_link(struct jme_adapter *jme)
1806{
1807        u32 phylink, to = JME_WAIT_LINK_TIME;
1808
1809        mdelay(1000);
1810        phylink = jme_linkstat_from_phy(jme);
1811        while (!(phylink & PHY_LINK_UP) && (to -= 10) > 0) {
1812                mdelay(10);
1813                phylink = jme_linkstat_from_phy(jme);
1814        }
1815}
1816
1817static void
1818jme_powersave_phy(struct jme_adapter *jme)
1819{
1820        if (jme->reg_pmcs) {
1821                jme_set_100m_half(jme);
1822                if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN))
1823                        jme_wait_link(jme);
1824                jme_clear_pm(jme);
1825        } else {
1826                jme_phy_off(jme);
1827        }
1828}
1829
1830static int
1831jme_close(struct net_device *netdev)
1832{
1833        struct jme_adapter *jme = netdev_priv(netdev);
1834
1835        netif_stop_queue(netdev);
1836        netif_carrier_off(netdev);
1837
1838        jme_stop_irq(jme);
1839        jme_free_irq(jme);
1840
1841        JME_NAPI_DISABLE(jme);
1842
1843        tasklet_disable(&jme->linkch_task);
1844        tasklet_disable(&jme->txclean_task);
1845        tasklet_disable(&jme->rxclean_task);
1846        tasklet_disable(&jme->rxempty_task);
1847
1848        jme_disable_rx_engine(jme);
1849        jme_disable_tx_engine(jme);
1850        jme_reset_mac_processor(jme);
1851        jme_free_rx_resources(jme);
1852        jme_free_tx_resources(jme);
1853        jme->phylink = 0;
1854        jme_phy_off(jme);
1855
1856        return 0;
1857}
1858
1859static int
1860jme_alloc_txdesc(struct jme_adapter *jme,
1861                        struct sk_buff *skb)
1862{
1863        struct jme_ring *txring = &(jme->txring[0]);
1864        int idx, nr_alloc, mask = jme->tx_ring_mask;
1865
1866        idx = txring->next_to_use;
1867        nr_alloc = skb_shinfo(skb)->nr_frags + 2;
1868
1869        if (unlikely(atomic_read(&txring->nr_free) < nr_alloc))
1870                return -1;
1871
1872        atomic_sub(nr_alloc, &txring->nr_free);
1873
1874        txring->next_to_use = (txring->next_to_use + nr_alloc) & mask;
1875
1876        return idx;
1877}
1878
1879static void
1880jme_fill_tx_map(struct pci_dev *pdev,
1881                struct txdesc *txdesc,
1882                struct jme_buffer_info *txbi,
1883                struct page *page,
1884                u32 page_offset,
1885                u32 len,
1886                u8 hidma)
1887{
1888        dma_addr_t dmaaddr;
1889
1890        dmaaddr = pci_map_page(pdev,
1891                                page,
1892                                page_offset,
1893                                len,
1894                                PCI_DMA_TODEVICE);
1895
1896        pci_dma_sync_single_for_device(pdev,
1897                                       dmaaddr,
1898                                       len,
1899                                       PCI_DMA_TODEVICE);
1900
1901        txdesc->dw[0] = 0;
1902        txdesc->dw[1] = 0;
1903        txdesc->desc2.flags     = TXFLAG_OWN;
1904        txdesc->desc2.flags     |= (hidma) ? TXFLAG_64BIT : 0;
1905        txdesc->desc2.datalen   = cpu_to_le16(len);
1906        txdesc->desc2.bufaddrh  = cpu_to_le32((__u64)dmaaddr >> 32);
1907        txdesc->desc2.bufaddrl  = cpu_to_le32(
1908                                        (__u64)dmaaddr & 0xFFFFFFFFUL);
1909
1910        txbi->mapping = dmaaddr;
1911        txbi->len = len;
1912}
1913
1914static void
1915jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
1916{
1917        struct jme_ring *txring = &(jme->txring[0]);
1918        struct txdesc *txdesc = txring->desc, *ctxdesc;
1919        struct jme_buffer_info *txbi = txring->bufinf, *ctxbi;
1920        u8 hidma = jme->dev->features & NETIF_F_HIGHDMA;
1921        int i, nr_frags = skb_shinfo(skb)->nr_frags;
1922        int mask = jme->tx_ring_mask;
1923        struct skb_frag_struct *frag;
1924        u32 len;
1925
1926        for (i = 0 ; i < nr_frags ; ++i) {
1927                frag = &skb_shinfo(skb)->frags[i];
1928                ctxdesc = txdesc + ((idx + i + 2) & (mask));
1929                ctxbi = txbi + ((idx + i + 2) & (mask));
1930
1931                jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, frag->page,
1932                                 frag->page_offset, frag->size, hidma);
1933        }
1934
1935        len = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len;
1936        ctxdesc = txdesc + ((idx + 1) & (mask));
1937        ctxbi = txbi + ((idx + 1) & (mask));
1938        jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data),
1939                        offset_in_page(skb->data), len, hidma);
1940
1941}
1942
1943static int
1944jme_expand_header(struct jme_adapter *jme, struct sk_buff *skb)
1945{
1946        if (unlikely(skb_shinfo(skb)->gso_size &&
1947                        skb_header_cloned(skb) &&
1948                        pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) {
1949                dev_kfree_skb(skb);
1950                return -1;
1951        }
1952
1953        return 0;
1954}
1955
1956static int
1957jme_tx_tso(struct sk_buff *skb, __le16 *mss, u8 *flags)
1958{
1959        *mss = cpu_to_le16(skb_shinfo(skb)->gso_size << TXDESC_MSS_SHIFT);
1960        if (*mss) {
1961                *flags |= TXFLAG_LSEN;
1962
1963                if (skb->protocol == htons(ETH_P_IP)) {
1964                        struct iphdr *iph = ip_hdr(skb);
1965
1966                        iph->check = 0;
1967                        tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
1968                                                                iph->daddr, 0,
1969                                                                IPPROTO_TCP,
1970                                                                0);
1971                } else {
1972                        struct ipv6hdr *ip6h = ipv6_hdr(skb);
1973
1974                        tcp_hdr(skb)->check = ~csum_ipv6_magic(&ip6h->saddr,
1975                                                                &ip6h->daddr, 0,
1976                                                                IPPROTO_TCP,
1977                                                                0);
1978                }
1979
1980                return 0;
1981        }
1982
1983        return 1;
1984}
1985
1986static void
1987jme_tx_csum(struct jme_adapter *jme, struct sk_buff *skb, u8 *flags)
1988{
1989        if (skb->ip_summed == CHECKSUM_PARTIAL) {
1990                u8 ip_proto;
1991
1992                switch (skb->protocol) {
1993                case htons(ETH_P_IP):
1994                        ip_proto = ip_hdr(skb)->protocol;
1995                        break;
1996                case htons(ETH_P_IPV6):
1997                        ip_proto = ipv6_hdr(skb)->nexthdr;
1998                        break;
1999                default:
2000                        ip_proto = 0;
2001                        break;
2002                }
2003
2004                switch (ip_proto) {
2005                case IPPROTO_TCP:
2006                        *flags |= TXFLAG_TCPCS;
2007                        break;
2008                case IPPROTO_UDP:
2009                        *flags |= TXFLAG_UDPCS;
2010                        break;
2011                default:
2012                        netif_err(jme, tx_err, jme->dev, "Error upper layer protocol\n");
2013                        break;
2014                }
2015        }
2016}
2017
2018static inline void
2019jme_tx_vlan(struct sk_buff *skb, __le16 *vlan, u8 *flags)
2020{
2021        if (vlan_tx_tag_present(skb)) {
2022                *flags |= TXFLAG_TAGON;
2023                *vlan = cpu_to_le16(vlan_tx_tag_get(skb));
2024        }
2025}
2026
2027static int
2028jme_fill_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx)
2029{
2030        struct jme_ring *txring = &(jme->txring[0]);
2031        struct txdesc *txdesc;
2032        struct jme_buffer_info *txbi;
2033        u8 flags;
2034
2035        txdesc = (struct txdesc *)txring->desc + idx;
2036        txbi = txring->bufinf + idx;
2037
2038        txdesc->dw[0] = 0;
2039        txdesc->dw[1] = 0;
2040        txdesc->dw[2] = 0;
2041        txdesc->dw[3] = 0;
2042        txdesc->desc1.pktsize = cpu_to_le16(skb->len);
2043        /*
2044         * Set OWN bit at final.
2045         * When kernel transmit faster than NIC.
2046         * And NIC trying to send this descriptor before we tell
2047         * it to start sending this TX queue.
2048         * Other fields are already filled correctly.
2049         */
2050        wmb();
2051        flags = TXFLAG_OWN | TXFLAG_INT;
2052        /*
2053         * Set checksum flags while not tso
2054         */
2055        if (jme_tx_tso(skb, &txdesc->desc1.mss, &flags))
2056                jme_tx_csum(jme, skb, &flags);
2057        jme_tx_vlan(skb, &txdesc->desc1.vlan, &flags);
2058        jme_map_tx_skb(jme, skb, idx);
2059        txdesc->desc1.flags = flags;
2060        /*
2061         * Set tx buffer info after telling NIC to send
2062         * For better tx_clean timing
2063         */
2064        wmb();
2065        txbi->nr_desc = skb_shinfo(skb)->nr_frags + 2;
2066        txbi->skb = skb;
2067        txbi->len = skb->len;
2068        txbi->start_xmit = jiffies;
2069        if (!txbi->start_xmit)
2070                txbi->start_xmit = (0UL-1);
2071
2072        return 0;
2073}
2074
2075static void
2076jme_stop_queue_if_full(struct jme_adapter *jme)
2077{
2078        struct jme_ring *txring = &(jme->txring[0]);
2079        struct jme_buffer_info *txbi = txring->bufinf;
2080        int idx = atomic_read(&txring->next_to_clean);
2081
2082        txbi += idx;
2083
2084        smp_wmb();
2085        if (unlikely(atomic_read(&txring->nr_free) < (MAX_SKB_FRAGS+2))) {
2086                netif_stop_queue(jme->dev);
2087                netif_info(jme, tx_queued, jme->dev, "TX Queue Paused\n");
2088                smp_wmb();
2089                if (atomic_read(&txring->nr_free)
2090                        >= (jme->tx_wake_threshold)) {
2091                        netif_wake_queue(jme->dev);
2092                        netif_info(jme, tx_queued, jme->dev, "TX Queue Fast Waked\n");
2093                }
2094        }
2095
2096        if (unlikely(txbi->start_xmit &&
2097                        (jiffies - txbi->start_xmit) >= TX_TIMEOUT &&
2098                        txbi->skb)) {
2099                netif_stop_queue(jme->dev);
2100                netif_info(jme, tx_queued, jme->dev,
2101                           "TX Queue Stopped %d@%lu\n", idx, jiffies);
2102        }
2103}
2104
2105/*
2106 * This function is already protected by netif_tx_lock()
2107 */
2108
2109static netdev_tx_t
2110jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2111{
2112        struct jme_adapter *jme = netdev_priv(netdev);
2113        int idx;
2114
2115        if (unlikely(jme_expand_header(jme, skb))) {
2116                ++(NET_STAT(jme).tx_dropped);
2117                return NETDEV_TX_OK;
2118        }
2119
2120        idx = jme_alloc_txdesc(jme, skb);
2121
2122        if (unlikely(idx < 0)) {
2123                netif_stop_queue(netdev);
2124                netif_err(jme, tx_err, jme->dev,
2125                          "BUG! Tx ring full when queue awake!\n");
2126
2127                return NETDEV_TX_BUSY;
2128        }
2129
2130        jme_fill_tx_desc(jme, skb, idx);
2131
2132        jwrite32(jme, JME_TXCS, jme->reg_txcs |
2133                                TXCS_SELECT_QUEUE0 |
2134                                TXCS_QUEUE0S |
2135                                TXCS_ENABLE);
2136
2137        tx_dbg(jme, "xmit: %d+%d@%lu\n",
2138               idx, skb_shinfo(skb)->nr_frags + 2, jiffies);
2139        jme_stop_queue_if_full(jme);
2140
2141        return NETDEV_TX_OK;
2142}
2143
2144static void
2145jme_set_unicastaddr(struct net_device *netdev)
2146{
2147        struct jme_adapter *jme = netdev_priv(netdev);
2148        u32 val;
2149
2150        val = (netdev->dev_addr[3] & 0xff) << 24 |
2151              (netdev->dev_addr[2] & 0xff) << 16 |
2152              (netdev->dev_addr[1] & 0xff) <<  8 |
2153              (netdev->dev_addr[0] & 0xff);
2154        jwrite32(jme, JME_RXUMA_LO, val);
2155        val = (netdev->dev_addr[5] & 0xff) << 8 |
2156              (netdev->dev_addr[4] & 0xff);
2157        jwrite32(jme, JME_RXUMA_HI, val);
2158}
2159
2160static int
2161jme_set_macaddr(struct net_device *netdev, void *p)
2162{
2163        struct jme_adapter *jme = netdev_priv(netdev);
2164        struct sockaddr *addr = p;
2165
2166        if (netif_running(netdev))
2167                return -EBUSY;
2168
2169        spin_lock_bh(&jme->macaddr_lock);
2170        memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2171        jme_set_unicastaddr(netdev);
2172        spin_unlock_bh(&jme->macaddr_lock);
2173
2174        return 0;
2175}
2176
2177static void
2178jme_set_multi(struct net_device *netdev)
2179{
2180        struct jme_adapter *jme = netdev_priv(netdev);
2181        u32 mc_hash[2] = {};
2182
2183        spin_lock_bh(&jme->rxmcs_lock);
2184
2185        jme->reg_rxmcs |= RXMCS_BRDFRAME | RXMCS_UNIFRAME;
2186
2187        if (netdev->flags & IFF_PROMISC) {
2188                jme->reg_rxmcs |= RXMCS_ALLFRAME;
2189        } else if (netdev->flags & IFF_ALLMULTI) {
2190                jme->reg_rxmcs |= RXMCS_ALLMULFRAME;
2191        } else if (netdev->flags & IFF_MULTICAST) {
2192                struct netdev_hw_addr *ha;
2193                int bit_nr;
2194
2195                jme->reg_rxmcs |= RXMCS_MULFRAME | RXMCS_MULFILTERED;
2196                netdev_for_each_mc_addr(ha, netdev) {
2197                        bit_nr = ether_crc(ETH_ALEN, ha->addr) & 0x3F;
2198                        mc_hash[bit_nr >> 5] |= 1 << (bit_nr & 0x1F);
2199                }
2200
2201                jwrite32(jme, JME_RXMCHT_LO, mc_hash[0]);
2202                jwrite32(jme, JME_RXMCHT_HI, mc_hash[1]);
2203        }
2204
2205        wmb();
2206        jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
2207
2208        spin_unlock_bh(&jme->rxmcs_lock);
2209}
2210
2211static int
2212jme_change_mtu(struct net_device *netdev, int new_mtu)
2213{
2214        struct jme_adapter *jme = netdev_priv(netdev);
2215
2216        if (new_mtu == jme->old_mtu)
2217                return 0;
2218
2219        if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
2220                ((new_mtu) < IPV6_MIN_MTU))
2221                return -EINVAL;
2222
2223        if (new_mtu > 4000) {
2224                jme->reg_rxcs &= ~RXCS_FIFOTHNP;
2225                jme->reg_rxcs |= RXCS_FIFOTHNP_64QW;
2226                jme_restart_rx_engine(jme);
2227        } else {
2228                jme->reg_rxcs &= ~RXCS_FIFOTHNP;
2229                jme->reg_rxcs |= RXCS_FIFOTHNP_128QW;
2230                jme_restart_rx_engine(jme);
2231        }
2232
2233        netdev->mtu = new_mtu;
2234        netdev_update_features(netdev);
2235
2236        jme_reset_link(jme);
2237
2238        return 0;
2239}
2240
2241static void
2242jme_tx_timeout(struct net_device *netdev)
2243{
2244        struct jme_adapter *jme = netdev_priv(netdev);
2245
2246        jme->phylink = 0;
2247        jme_reset_phy_processor(jme);
2248        if (test_bit(JME_FLAG_SSET, &jme->flags))
2249                jme_set_settings(netdev, &jme->old_ecmd);
2250
2251        /*
2252         * Force to Reset the link again
2253         */
2254        jme_reset_link(jme);
2255}
2256
2257static inline void jme_pause_rx(struct jme_adapter *jme)
2258{
2259        atomic_dec(&jme->link_changing);
2260
2261        jme_set_rx_pcc(jme, PCC_OFF);
2262        if (test_bit(JME_FLAG_POLL, &jme->flags)) {
2263                JME_NAPI_DISABLE(jme);
2264        } else {
2265                tasklet_disable(&jme->rxclean_task);
2266                tasklet_disable(&jme->rxempty_task);
2267        }
2268}
2269
2270static inline void jme_resume_rx(struct jme_adapter *jme)
2271{
2272        struct dynpcc_info *dpi = &(jme->dpi);
2273
2274        if (test_bit(JME_FLAG_POLL, &jme->flags)) {
2275                JME_NAPI_ENABLE(jme);
2276        } else {
2277                tasklet_hi_enable(&jme->rxclean_task);
2278                tasklet_hi_enable(&jme->rxempty_task);
2279        }
2280        dpi->cur                = PCC_P1;
2281        dpi->attempt            = PCC_P1;
2282        dpi->cnt                = 0;
2283        jme_set_rx_pcc(jme, PCC_P1);
2284
2285        atomic_inc(&jme->link_changing);
2286}
2287
2288static void
2289jme_get_drvinfo(struct net_device *netdev,
2290                     struct ethtool_drvinfo *info)
2291{
2292        struct jme_adapter *jme = netdev_priv(netdev);
2293
2294        strcpy(info->driver, DRV_NAME);
2295        strcpy(info->version, DRV_VERSION);
2296        strcpy(info->bus_info, pci_name(jme->pdev));
2297}
2298
2299static int
2300jme_get_regs_len(struct net_device *netdev)
2301{
2302        return JME_REG_LEN;
2303}
2304
2305static void
2306mmapio_memcpy(struct jme_adapter *jme, u32 *p, u32 reg, int len)
2307{
2308        int i;
2309
2310        for (i = 0 ; i < len ; i += 4)
2311                p[i >> 2] = jread32(jme, reg + i);
2312}
2313
2314static void
2315mdio_memcpy(struct jme_adapter *jme, u32 *p, int reg_nr)
2316{
2317        int i;
2318        u16 *p16 = (u16 *)p;
2319
2320        for (i = 0 ; i < reg_nr ; ++i)
2321                p16[i] = jme_mdio_read(jme->dev, jme->mii_if.phy_id, i);
2322}
2323
2324static void
2325jme_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
2326{
2327        struct jme_adapter *jme = netdev_priv(netdev);
2328        u32 *p32 = (u32 *)p;
2329
2330        memset(p, 0xFF, JME_REG_LEN);
2331
2332        regs->version = 1;
2333        mmapio_memcpy(jme, p32, JME_MAC, JME_MAC_LEN);
2334
2335        p32 += 0x100 >> 2;
2336        mmapio_memcpy(jme, p32, JME_PHY, JME_PHY_LEN);
2337
2338        p32 += 0x100 >> 2;
2339        mmapio_memcpy(jme, p32, JME_MISC, JME_MISC_LEN);
2340
2341        p32 += 0x100 >> 2;
2342        mmapio_memcpy(jme, p32, JME_RSS, JME_RSS_LEN);
2343
2344        p32 += 0x100 >> 2;
2345        mdio_memcpy(jme, p32, JME_PHY_REG_NR);
2346}
2347
2348static int
2349jme_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd)
2350{
2351        struct jme_adapter *jme = netdev_priv(netdev);
2352
2353        ecmd->tx_coalesce_usecs = PCC_TX_TO;
2354        ecmd->tx_max_coalesced_frames = PCC_TX_CNT;
2355
2356        if (test_bit(JME_FLAG_POLL, &jme->flags)) {
2357                ecmd->use_adaptive_rx_coalesce = false;
2358                ecmd->rx_coalesce_usecs = 0;
2359                ecmd->rx_max_coalesced_frames = 0;
2360                return 0;
2361        }
2362
2363        ecmd->use_adaptive_rx_coalesce = true;
2364
2365        switch (jme->dpi.cur) {
2366        case PCC_P1:
2367                ecmd->rx_coalesce_usecs = PCC_P1_TO;
2368                ecmd->rx_max_coalesced_frames = PCC_P1_CNT;
2369                break;
2370        case PCC_P2:
2371                ecmd->rx_coalesce_usecs = PCC_P2_TO;
2372                ecmd->rx_max_coalesced_frames = PCC_P2_CNT;
2373                break;
2374        case PCC_P3:
2375                ecmd->rx_coalesce_usecs = PCC_P3_TO;
2376                ecmd->rx_max_coalesced_frames = PCC_P3_CNT;
2377                break;
2378        default:
2379                break;
2380        }
2381
2382        return 0;
2383}
2384
2385static int
2386jme_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd)
2387{
2388        struct jme_adapter *jme = netdev_priv(netdev);
2389        struct dynpcc_info *dpi = &(jme->dpi);
2390
2391        if (netif_running(netdev))
2392                return -EBUSY;
2393
2394        if (ecmd->use_adaptive_rx_coalesce &&
2395            test_bit(JME_FLAG_POLL, &jme->flags)) {
2396                clear_bit(JME_FLAG_POLL, &jme->flags);
2397                jme->jme_rx = netif_rx;
2398                dpi->cur                = PCC_P1;
2399                dpi->attempt            = PCC_P1;
2400                dpi->cnt                = 0;
2401                jme_set_rx_pcc(jme, PCC_P1);
2402                jme_interrupt_mode(jme);
2403        } else if (!(ecmd->use_adaptive_rx_coalesce) &&
2404                   !(test_bit(JME_FLAG_POLL, &jme->flags))) {
2405                set_bit(JME_FLAG_POLL, &jme->flags);
2406                jme->jme_rx = netif_receive_skb;
2407                jme_interrupt_mode(jme);
2408        }
2409
2410        return 0;
2411}
2412
2413static void
2414jme_get_pauseparam(struct net_device *netdev,
2415                        struct ethtool_pauseparam *ecmd)
2416{
2417        struct jme_adapter *jme = netdev_priv(netdev);
2418        u32 val;
2419
2420        ecmd->tx_pause = (jme->reg_txpfc & TXPFC_PF_EN) != 0;
2421        ecmd->rx_pause = (jme->reg_rxmcs & RXMCS_FLOWCTRL) != 0;
2422
2423        spin_lock_bh(&jme->phy_lock);
2424        val = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE);
2425        spin_unlock_bh(&jme->phy_lock);
2426
2427        ecmd->autoneg =
2428                (val & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0;
2429}
2430
2431static int
2432jme_set_pauseparam(struct net_device *netdev,
2433                        struct ethtool_pauseparam *ecmd)
2434{
2435        struct jme_adapter *jme = netdev_priv(netdev);
2436        u32 val;
2437
2438        if (((jme->reg_txpfc & TXPFC_PF_EN) != 0) ^
2439                (ecmd->tx_pause != 0)) {
2440
2441                if (ecmd->tx_pause)
2442                        jme->reg_txpfc |= TXPFC_PF_EN;
2443                else
2444                        jme->reg_txpfc &= ~TXPFC_PF_EN;
2445
2446                jwrite32(jme, JME_TXPFC, jme->reg_txpfc);
2447        }
2448
2449        spin_lock_bh(&jme->rxmcs_lock);
2450        if (((jme->reg_rxmcs & RXMCS_FLOWCTRL) != 0) ^
2451                (ecmd->rx_pause != 0)) {
2452
2453                if (ecmd->rx_pause)
2454                        jme->reg_rxmcs |= RXMCS_FLOWCTRL;
2455                else
2456                        jme->reg_rxmcs &= ~RXMCS_FLOWCTRL;
2457
2458                jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
2459        }
2460        spin_unlock_bh(&jme->rxmcs_lock);
2461
2462        spin_lock_bh(&jme->phy_lock);
2463        val = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE);
2464        if (((val & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0) ^
2465                (ecmd->autoneg != 0)) {
2466
2467                if (ecmd->autoneg)
2468                        val |= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2469                else
2470                        val &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2471
2472                jme_mdio_write(jme->dev, jme->mii_if.phy_id,
2473                                MII_ADVERTISE, val);
2474        }
2475        spin_unlock_bh(&jme->phy_lock);
2476
2477        return 0;
2478}
2479
2480static void
2481jme_get_wol(struct net_device *netdev,
2482                struct ethtool_wolinfo *wol)
2483{
2484        struct jme_adapter *jme = netdev_priv(netdev);
2485
2486        wol->supported = WAKE_MAGIC | WAKE_PHY;
2487
2488        wol->wolopts = 0;
2489
2490        if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN))
2491                wol->wolopts |= WAKE_PHY;
2492
2493        if (jme->reg_pmcs & PMCS_MFEN)
2494                wol->wolopts |= WAKE_MAGIC;
2495
2496}
2497
2498static int
2499jme_set_wol(struct net_device *netdev,
2500                struct ethtool_wolinfo *wol)
2501{
2502        struct jme_adapter *jme = netdev_priv(netdev);
2503
2504        if (wol->wolopts & (WAKE_MAGICSECURE |
2505                                WAKE_UCAST |
2506                                WAKE_MCAST |
2507                                WAKE_BCAST |
2508                                WAKE_ARP))
2509                return -EOPNOTSUPP;
2510
2511        jme->reg_pmcs = 0;
2512
2513        if (wol->wolopts & WAKE_PHY)
2514                jme->reg_pmcs |= PMCS_LFEN | PMCS_LREN;
2515
2516        if (wol->wolopts & WAKE_MAGIC)
2517                jme->reg_pmcs |= PMCS_MFEN;
2518
2519        jwrite32(jme, JME_PMCS, jme->reg_pmcs);
2520        device_set_wakeup_enable(&jme->pdev->dev, !!(jme->reg_pmcs));
2521
2522        return 0;
2523}
2524
2525static int
2526jme_get_settings(struct net_device *netdev,
2527                     struct ethtool_cmd *ecmd)
2528{
2529        struct jme_adapter *jme = netdev_priv(netdev);
2530        int rc;
2531
2532        spin_lock_bh(&jme->phy_lock);
2533        rc = mii_ethtool_gset(&(jme->mii_if), ecmd);
2534        spin_unlock_bh(&jme->phy_lock);
2535        return rc;
2536}
2537
2538static int
2539jme_set_settings(struct net_device *netdev,
2540                     struct ethtool_cmd *ecmd)
2541{
2542        struct jme_adapter *jme = netdev_priv(netdev);
2543        int rc, fdc = 0;
2544
2545        if (ethtool_cmd_speed(ecmd) == SPEED_1000
2546            && ecmd->autoneg != AUTONEG_ENABLE)
2547                return -EINVAL;
2548
2549        /*
2550         * Check If user changed duplex only while force_media.
2551         * Hardware would not generate link change interrupt.
2552         */
2553        if (jme->mii_if.force_media &&
2554        ecmd->autoneg != AUTONEG_ENABLE &&
2555        (jme->mii_if.full_duplex != ecmd->duplex))
2556                fdc = 1;
2557
2558        spin_lock_bh(&jme->phy_lock);
2559        rc = mii_ethtool_sset(&(jme->mii_if), ecmd);
2560        spin_unlock_bh(&jme->phy_lock);
2561
2562        if (!rc) {
2563                if (fdc)
2564                        jme_reset_link(jme);
2565                jme->old_ecmd = *ecmd;
2566                set_bit(JME_FLAG_SSET, &jme->flags);
2567        }
2568
2569        return rc;
2570}
2571
2572static int
2573jme_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
2574{
2575        int rc;
2576        struct jme_adapter *jme = netdev_priv(netdev);
2577        struct mii_ioctl_data *mii_data = if_mii(rq);
2578        unsigned int duplex_chg;
2579
2580        if (cmd == SIOCSMIIREG) {
2581                u16 val = mii_data->val_in;
2582                if (!(val & (BMCR_RESET|BMCR_ANENABLE)) &&
2583                    (val & BMCR_SPEED1000))
2584                        return -EINVAL;
2585        }
2586
2587        spin_lock_bh(&jme->phy_lock);
2588        rc = generic_mii_ioctl(&jme->mii_if, mii_data, cmd, &duplex_chg);
2589        spin_unlock_bh(&jme->phy_lock);
2590
2591        if (!rc && (cmd == SIOCSMIIREG)) {
2592                if (duplex_chg)
2593                        jme_reset_link(jme);
2594                jme_get_settings(netdev, &jme->old_ecmd);
2595                set_bit(JME_FLAG_SSET, &jme->flags);
2596        }
2597
2598        return rc;
2599}
2600
2601static u32
2602jme_get_link(struct net_device *netdev)
2603{
2604        struct jme_adapter *jme = netdev_priv(netdev);
2605        return jread32(jme, JME_PHY_LINK) & PHY_LINK_UP;
2606}
2607
2608static u32
2609jme_get_msglevel(struct net_device *netdev)
2610{
2611        struct jme_adapter *jme = netdev_priv(netdev);
2612        return jme->msg_enable;
2613}
2614
2615static void
2616jme_set_msglevel(struct net_device *netdev, u32 value)
2617{
2618        struct jme_adapter *jme = netdev_priv(netdev);
2619        jme->msg_enable = value;
2620}
2621
2622static u32
2623jme_fix_features(struct net_device *netdev, u32 features)
2624{
2625        if (netdev->mtu > 1900)
2626                features &= ~(NETIF_F_ALL_TSO | NETIF_F_ALL_CSUM);
2627        return features;
2628}
2629
2630static int
2631jme_set_features(struct net_device *netdev, u32 features)
2632{
2633        struct jme_adapter *jme = netdev_priv(netdev);
2634
2635        spin_lock_bh(&jme->rxmcs_lock);
2636        if (features & NETIF_F_RXCSUM)
2637                jme->reg_rxmcs |= RXMCS_CHECKSUM;
2638        else
2639                jme->reg_rxmcs &= ~RXMCS_CHECKSUM;
2640        jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
2641        spin_unlock_bh(&jme->rxmcs_lock);
2642
2643        return 0;
2644}
2645
2646static int
2647jme_nway_reset(struct net_device *netdev)
2648{
2649        struct jme_adapter *jme = netdev_priv(netdev);
2650        jme_restart_an(jme);
2651        return 0;
2652}
2653
2654static u8
2655jme_smb_read(struct jme_adapter *jme, unsigned int addr)
2656{
2657        u32 val;
2658        int to;
2659
2660        val = jread32(jme, JME_SMBCSR);
2661        to = JME_SMB_BUSY_TIMEOUT;
2662        while ((val & SMBCSR_BUSY) && --to) {
2663                msleep(1);
2664                val = jread32(jme, JME_SMBCSR);
2665        }
2666        if (!to) {
2667                netif_err(jme, hw, jme->dev, "SMB Bus Busy\n");
2668                return 0xFF;
2669        }
2670
2671        jwrite32(jme, JME_SMBINTF,
2672                ((addr << SMBINTF_HWADDR_SHIFT) & SMBINTF_HWADDR) |
2673                SMBINTF_HWRWN_READ |
2674                SMBINTF_HWCMD);
2675
2676        val = jread32(jme, JME_SMBINTF);
2677        to = JME_SMB_BUSY_TIMEOUT;
2678        while ((val & SMBINTF_HWCMD) && --to) {
2679                msleep(1);
2680                val = jread32(jme, JME_SMBINTF);
2681        }
2682        if (!to) {
2683                netif_err(jme, hw, jme->dev, "SMB Bus Busy\n");
2684                return 0xFF;
2685        }
2686
2687        return (val & SMBINTF_HWDATR) >> SMBINTF_HWDATR_SHIFT;
2688}
2689
2690static void
2691jme_smb_write(struct jme_adapter *jme, unsigned int addr, u8 data)
2692{
2693        u32 val;
2694        int to;
2695
2696        val = jread32(jme, JME_SMBCSR);
2697        to = JME_SMB_BUSY_TIMEOUT;
2698        while ((val & SMBCSR_BUSY) && --to) {
2699                msleep(1);
2700                val = jread32(jme, JME_SMBCSR);
2701        }
2702        if (!to) {
2703                netif_err(jme, hw, jme->dev, "SMB Bus Busy\n");
2704                return;
2705        }
2706
2707        jwrite32(jme, JME_SMBINTF,
2708                ((data << SMBINTF_HWDATW_SHIFT) & SMBINTF_HWDATW) |
2709                ((addr << SMBINTF_HWADDR_SHIFT) & SMBINTF_HWADDR) |
2710                SMBINTF_HWRWN_WRITE |
2711                SMBINTF_HWCMD);
2712
2713        val = jread32(jme, JME_SMBINTF);
2714        to = JME_SMB_BUSY_TIMEOUT;
2715        while ((val & SMBINTF_HWCMD) && --to) {
2716                msleep(1);
2717                val = jread32(jme, JME_SMBINTF);
2718        }
2719        if (!to) {
2720                netif_err(jme, hw, jme->dev, "SMB Bus Busy\n");
2721                return;
2722        }
2723
2724        mdelay(2);
2725}
2726
2727static int
2728jme_get_eeprom_len(struct net_device *netdev)
2729{
2730        struct jme_adapter *jme = netdev_priv(netdev);
2731        u32 val;
2732        val = jread32(jme, JME_SMBCSR);
2733        return (val & SMBCSR_EEPROMD) ? JME_SMB_LEN : 0;
2734}
2735
2736static int
2737jme_get_eeprom(struct net_device *netdev,
2738                struct ethtool_eeprom *eeprom, u8 *data)
2739{
2740        struct jme_adapter *jme = netdev_priv(netdev);
2741        int i, offset = eeprom->offset, len = eeprom->len;
2742
2743        /*
2744         * ethtool will check the boundary for us
2745         */
2746        eeprom->magic = JME_EEPROM_MAGIC;
2747        for (i = 0 ; i < len ; ++i)
2748                data[i] = jme_smb_read(jme, i + offset);
2749
2750        return 0;
2751}
2752
2753static int
2754jme_set_eeprom(struct net_device *netdev,
2755                struct ethtool_eeprom *eeprom, u8 *data)
2756{
2757        struct jme_adapter *jme = netdev_priv(netdev);
2758        int i, offset = eeprom->offset, len = eeprom->len;
2759
2760        if (eeprom->magic != JME_EEPROM_MAGIC)
2761                return -EINVAL;
2762
2763        /*
2764         * ethtool will check the boundary for us
2765         */
2766        for (i = 0 ; i < len ; ++i)
2767                jme_smb_write(jme, i + offset, data[i]);
2768
2769        return 0;
2770}
2771
2772static const struct ethtool_ops jme_ethtool_ops = {
2773        .get_drvinfo            = jme_get_drvinfo,
2774        .get_regs_len           = jme_get_regs_len,
2775        .get_regs               = jme_get_regs,
2776        .get_coalesce           = jme_get_coalesce,
2777        .set_coalesce           = jme_set_coalesce,
2778        .get_pauseparam         = jme_get_pauseparam,
2779        .set_pauseparam         = jme_set_pauseparam,
2780        .get_wol                = jme_get_wol,
2781        .set_wol                = jme_set_wol,
2782        .get_settings           = jme_get_settings,
2783        .set_settings           = jme_set_settings,
2784        .get_link               = jme_get_link,
2785        .get_msglevel           = jme_get_msglevel,
2786        .set_msglevel           = jme_set_msglevel,
2787        .nway_reset             = jme_nway_reset,
2788        .get_eeprom_len         = jme_get_eeprom_len,
2789        .get_eeprom             = jme_get_eeprom,
2790        .set_eeprom             = jme_set_eeprom,
2791};
2792
2793static int
2794jme_pci_dma64(struct pci_dev *pdev)
2795{
2796        if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250 &&
2797            !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
2798                if (!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
2799                        return 1;
2800
2801        if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250 &&
2802            !pci_set_dma_mask(pdev, DMA_BIT_MASK(40)))
2803                if (!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40)))
2804                        return 1;
2805
2806        if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))
2807                if (!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
2808                        return 0;
2809
2810        return -1;
2811}
2812
2813static inline void
2814jme_phy_init(struct jme_adapter *jme)
2815{
2816        u16 reg26;
2817
2818        reg26 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, 26);
2819        jme_mdio_write(jme->dev, jme->mii_if.phy_id, 26, reg26 | 0x1000);
2820}
2821
2822static inline void
2823jme_check_hw_ver(struct jme_adapter *jme)
2824{
2825        u32 chipmode;
2826
2827        chipmode = jread32(jme, JME_CHIPMODE);
2828
2829        jme->fpgaver = (chipmode & CM_FPGAVER_MASK) >> CM_FPGAVER_SHIFT;
2830        jme->chiprev = (chipmode & CM_CHIPREV_MASK) >> CM_CHIPREV_SHIFT;
2831        jme->chip_main_rev = jme->chiprev & 0xF;
2832        jme->chip_sub_rev = (jme->chiprev >> 4) & 0xF;
2833}
2834
2835static const struct net_device_ops jme_netdev_ops = {
2836        .ndo_open               = jme_open,
2837        .ndo_stop               = jme_close,
2838        .ndo_validate_addr      = eth_validate_addr,
2839        .ndo_do_ioctl           = jme_ioctl,
2840        .ndo_start_xmit         = jme_start_xmit,
2841        .ndo_set_mac_address    = jme_set_macaddr,
2842        .ndo_set_multicast_list = jme_set_multi,
2843        .ndo_change_mtu         = jme_change_mtu,
2844        .ndo_tx_timeout         = jme_tx_timeout,
2845        .ndo_fix_features       = jme_fix_features,
2846        .ndo_set_features       = jme_set_features,
2847};
2848
2849static int __devinit
2850jme_init_one(struct pci_dev *pdev,
2851             const struct pci_device_id *ent)
2852{
2853        int rc = 0, using_dac, i;
2854        struct net_device *netdev;
2855        struct jme_adapter *jme;
2856        u16 bmcr, bmsr;
2857        u32 apmc;
2858
2859        /*
2860         * set up PCI device basics
2861         */
2862        rc = pci_enable_device(pdev);
2863        if (rc) {
2864                pr_err("Cannot enable PCI device\n");
2865                goto err_out;
2866        }
2867
2868        using_dac = jme_pci_dma64(pdev);
2869        if (using_dac < 0) {
2870                pr_err("Cannot set PCI DMA Mask\n");
2871                rc = -EIO;
2872                goto err_out_disable_pdev;
2873        }
2874
2875        if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2876                pr_err("No PCI resource region found\n");
2877                rc = -ENOMEM;
2878                goto err_out_disable_pdev;
2879        }
2880
2881        rc = pci_request_regions(pdev, DRV_NAME);
2882        if (rc) {
2883                pr_err("Cannot obtain PCI resource region\n");
2884                goto err_out_disable_pdev;
2885        }
2886
2887        pci_set_master(pdev);
2888
2889        /*
2890         * alloc and init net device
2891         */
2892        netdev = alloc_etherdev(sizeof(*jme));
2893        if (!netdev) {
2894                pr_err("Cannot allocate netdev structure\n");
2895                rc = -ENOMEM;
2896                goto err_out_release_regions;
2897        }
2898        netdev->netdev_ops = &jme_netdev_ops;
2899        netdev->ethtool_ops             = &jme_ethtool_ops;
2900        netdev->watchdog_timeo          = TX_TIMEOUT;
2901        netdev->hw_features             =       NETIF_F_IP_CSUM |
2902                                                NETIF_F_IPV6_CSUM |
2903                                                NETIF_F_SG |
2904                                                NETIF_F_TSO |
2905                                                NETIF_F_TSO6 |
2906                                                NETIF_F_RXCSUM;
2907        netdev->features                =       NETIF_F_IP_CSUM |
2908                                                NETIF_F_IPV6_CSUM |
2909                                                NETIF_F_SG |
2910                                                NETIF_F_TSO |
2911                                                NETIF_F_TSO6 |
2912                                                NETIF_F_HW_VLAN_TX |
2913                                                NETIF_F_HW_VLAN_RX;
2914        if (using_dac)
2915                netdev->features        |=      NETIF_F_HIGHDMA;
2916
2917        SET_NETDEV_DEV(netdev, &pdev->dev);
2918        pci_set_drvdata(pdev, netdev);
2919
2920        /*
2921         * init adapter info
2922         */
2923        jme = netdev_priv(netdev);
2924        jme->pdev = pdev;
2925        jme->dev = netdev;
2926        jme->jme_rx = netif_rx;
2927        jme->old_mtu = netdev->mtu = 1500;
2928        jme->phylink = 0;
2929        jme->tx_ring_size = 1 << 10;
2930        jme->tx_ring_mask = jme->tx_ring_size - 1;
2931        jme->tx_wake_threshold = 1 << 9;
2932        jme->rx_ring_size = 1 << 9;
2933        jme->rx_ring_mask = jme->rx_ring_size - 1;
2934        jme->msg_enable = JME_DEF_MSG_ENABLE;
2935        jme->regs = ioremap(pci_resource_start(pdev, 0),
2936                             pci_resource_len(pdev, 0));
2937        if (!(jme->regs)) {
2938                pr_err("Mapping PCI resource region error\n");
2939                rc = -ENOMEM;
2940                goto err_out_free_netdev;
2941        }
2942
2943        if (no_pseudohp) {
2944                apmc = jread32(jme, JME_APMC) & ~JME_APMC_PSEUDO_HP_EN;
2945                jwrite32(jme, JME_APMC, apmc);
2946        } else if (force_pseudohp) {
2947                apmc = jread32(jme, JME_APMC) | JME_APMC_PSEUDO_HP_EN;
2948                jwrite32(jme, JME_APMC, apmc);
2949        }
2950
2951        NETIF_NAPI_SET(netdev, &jme->napi, jme_poll, jme->rx_ring_size >> 2)
2952
2953        spin_lock_init(&jme->phy_lock);
2954        spin_lock_init(&jme->macaddr_lock);
2955        spin_lock_init(&jme->rxmcs_lock);
2956
2957        atomic_set(&jme->link_changing, 1);
2958        atomic_set(&jme->rx_cleaning, 1);
2959        atomic_set(&jme->tx_cleaning, 1);
2960        atomic_set(&jme->rx_empty, 1);
2961
2962        tasklet_init(&jme->pcc_task,
2963                     jme_pcc_tasklet,
2964                     (unsigned long) jme);
2965        tasklet_init(&jme->linkch_task,
2966                     jme_link_change_tasklet,
2967                     (unsigned long) jme);
2968        tasklet_init(&jme->txclean_task,
2969                     jme_tx_clean_tasklet,
2970                     (unsigned long) jme);
2971        tasklet_init(&jme->rxclean_task,
2972                     jme_rx_clean_tasklet,
2973                     (unsigned long) jme);
2974        tasklet_init(&jme->rxempty_task,
2975                     jme_rx_empty_tasklet,
2976                     (unsigned long) jme);
2977        tasklet_disable_nosync(&jme->linkch_task);
2978        tasklet_disable_nosync(&jme->txclean_task);
2979        tasklet_disable_nosync(&jme->rxclean_task);
2980        tasklet_disable_nosync(&jme->rxempty_task);
2981        jme->dpi.cur = PCC_P1;
2982
2983        jme->reg_ghc = 0;
2984        jme->reg_rxcs = RXCS_DEFAULT;
2985        jme->reg_rxmcs = RXMCS_DEFAULT;
2986        jme->reg_txpfc = 0;
2987        jme->reg_pmcs = PMCS_MFEN;
2988        jme->reg_gpreg1 = GPREG1_DEFAULT;
2989
2990        if (jme->reg_rxmcs & RXMCS_CHECKSUM)
2991                netdev->features |= NETIF_F_RXCSUM;
2992
2993        /*
2994         * Get Max Read Req Size from PCI Config Space
2995         */
2996        pci_read_config_byte(pdev, PCI_DCSR_MRRS, &jme->mrrs);
2997        jme->mrrs &= PCI_DCSR_MRRS_MASK;
2998        switch (jme->mrrs) {
2999        case MRRS_128B:
3000                jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_128B;
3001                break;
3002        case MRRS_256B:
3003                jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_256B;
3004                break;
3005        default:
3006                jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_512B;
3007                break;
3008        }
3009
3010        /*
3011         * Must check before reset_mac_processor
3012         */
3013        jme_check_hw_ver(jme);
3014        jme->mii_if.dev = netdev;
3015        if (jme->fpgaver) {
3016                jme->mii_if.phy_id = 0;
3017                for (i = 1 ; i < 32 ; ++i) {
3018                        bmcr = jme_mdio_read(netdev, i, MII_BMCR);
3019                        bmsr = jme_mdio_read(netdev, i, MII_BMSR);
3020                        if (bmcr != 0xFFFFU && (bmcr != 0 || bmsr != 0)) {
3021                                jme->mii_if.phy_id = i;
3022                                break;
3023                        }
3024                }
3025
3026                if (!jme->mii_if.phy_id) {
3027                        rc = -EIO;
3028                        pr_err("Can not find phy_id\n");
3029                        goto err_out_unmap;
3030                }
3031
3032                jme->reg_ghc |= GHC_LINK_POLL;
3033        } else {
3034                jme->mii_if.phy_id = 1;
3035        }
3036        if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250)
3037                jme->mii_if.supports_gmii = true;
3038        else
3039                jme->mii_if.supports_gmii = false;
3040        jme->mii_if.phy_id_mask = 0x1F;
3041        jme->mii_if.reg_num_mask = 0x1F;
3042        jme->mii_if.mdio_read = jme_mdio_read;
3043        jme->mii_if.mdio_write = jme_mdio_write;
3044
3045        jme_clear_pm(jme);
3046        pci_set_power_state(jme->pdev, PCI_D0);
3047        device_set_wakeup_enable(&pdev->dev, true);
3048
3049        jme_set_phyfifo_5level(jme);
3050        jme->pcirev = pdev->revision;
3051        if (!jme->fpgaver)
3052                jme_phy_init(jme);
3053        jme_phy_off(jme);
3054
3055        /*
3056         * Reset MAC processor and reload EEPROM for MAC Address
3057         */
3058        jme_reset_mac_processor(jme);
3059        rc = jme_reload_eeprom(jme);
3060        if (rc) {
3061                pr_err("Reload eeprom for reading MAC Address error\n");
3062                goto err_out_unmap;
3063        }
3064        jme_load_macaddr(netdev);
3065
3066        /*
3067         * Tell stack that we are not ready to work until open()
3068         */
3069        netif_carrier_off(netdev);
3070
3071        rc = register_netdev(netdev);
3072        if (rc) {
3073                pr_err("Cannot register net device\n");
3074                goto err_out_unmap;
3075        }
3076
3077        netif_info(jme, probe, jme->dev, "%s%s chiprev:%x pcirev:%x macaddr:%pM\n",
3078                   (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250) ?
3079                   "JMC250 Gigabit Ethernet" :
3080                   (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC260) ?
3081                   "JMC260 Fast Ethernet" : "Unknown",
3082                   (jme->fpgaver != 0) ? " (FPGA)" : "",
3083                   (jme->fpgaver != 0) ? jme->fpgaver : jme->chiprev,
3084                   jme->pcirev, netdev->dev_addr);
3085
3086        return 0;
3087
3088err_out_unmap:
3089        iounmap(jme->regs);
3090err_out_free_netdev:
3091        pci_set_drvdata(pdev, NULL);
3092        free_netdev(netdev);
3093err_out_release_regions:
3094        pci_release_regions(pdev);
3095err_out_disable_pdev:
3096        pci_disable_device(pdev);
3097err_out:
3098        return rc;
3099}
3100
3101static void __devexit
3102jme_remove_one(struct pci_dev *pdev)
3103{
3104        struct net_device *netdev = pci_get_drvdata(pdev);
3105        struct jme_adapter *jme = netdev_priv(netdev);
3106
3107        unregister_netdev(netdev);
3108        iounmap(jme->regs);
3109        pci_set_drvdata(pdev, NULL);
3110        free_netdev(netdev);
3111        pci_release_regions(pdev);
3112        pci_disable_device(pdev);
3113
3114}
3115
3116static void
3117jme_shutdown(struct pci_dev *pdev)
3118{
3119        struct net_device *netdev = pci_get_drvdata(pdev);
3120        struct jme_adapter *jme = netdev_priv(netdev);
3121
3122        jme_powersave_phy(jme);
3123        pci_pme_active(pdev, true);
3124}
3125
3126#ifdef CONFIG_PM_SLEEP
3127static int
3128jme_suspend(struct device *dev)
3129{
3130        struct pci_dev *pdev = to_pci_dev(dev);
3131        struct net_device *netdev = pci_get_drvdata(pdev);
3132        struct jme_adapter *jme = netdev_priv(netdev);
3133
3134        atomic_dec(&jme->link_changing);
3135
3136        netif_device_detach(netdev);
3137        netif_stop_queue(netdev);
3138        jme_stop_irq(jme);
3139
3140        tasklet_disable(&jme->txclean_task);
3141        tasklet_disable(&jme->rxclean_task);
3142        tasklet_disable(&jme->rxempty_task);
3143
3144        if (netif_carrier_ok(netdev)) {
3145                if (test_bit(JME_FLAG_POLL, &jme->flags))
3146                        jme_polling_mode(jme);
3147
3148                jme_stop_pcc_timer(jme);
3149                jme_disable_rx_engine(jme);
3150                jme_disable_tx_engine(jme);
3151                jme_reset_mac_processor(jme);
3152                jme_free_rx_resources(jme);
3153                jme_free_tx_resources(jme);
3154                netif_carrier_off(netdev);
3155                jme->phylink = 0;
3156        }
3157
3158        tasklet_enable(&jme->txclean_task);
3159        tasklet_hi_enable(&jme->rxclean_task);
3160        tasklet_hi_enable(&jme->rxempty_task);
3161
3162        jme_powersave_phy(jme);
3163
3164        return 0;
3165}
3166
3167static int
3168jme_resume(struct device *dev)
3169{
3170        struct pci_dev *pdev = to_pci_dev(dev);
3171        struct net_device *netdev = pci_get_drvdata(pdev);
3172        struct jme_adapter *jme = netdev_priv(netdev);
3173
3174        jme_clear_pm(jme);
3175        jme_phy_on(jme);
3176        if (test_bit(JME_FLAG_SSET, &jme->flags))
3177                jme_set_settings(netdev, &jme->old_ecmd);
3178        else
3179                jme_reset_phy_processor(jme);
3180
3181        jme_start_irq(jme);
3182        netif_device_attach(netdev);
3183
3184        atomic_inc(&jme->link_changing);
3185
3186        jme_reset_link(jme);
3187
3188        return 0;
3189}
3190
3191static SIMPLE_DEV_PM_OPS(jme_pm_ops, jme_suspend, jme_resume);
3192#define JME_PM_OPS (&jme_pm_ops)
3193
3194#else
3195
3196#define JME_PM_OPS NULL
3197#endif
3198
3199static DEFINE_PCI_DEVICE_TABLE(jme_pci_tbl) = {
3200        { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMC250) },
3201        { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMC260) },
3202        { }
3203};
3204
3205static struct pci_driver jme_driver = {
3206        .name           = DRV_NAME,
3207        .id_table       = jme_pci_tbl,
3208        .probe          = jme_init_one,
3209        .remove         = __devexit_p(jme_remove_one),
3210        .shutdown       = jme_shutdown,
3211        .driver.pm      = JME_PM_OPS,
3212};
3213
3214static int __init
3215jme_init_module(void)
3216{
3217        pr_info("JMicron JMC2XX ethernet driver version %s\n", DRV_VERSION);
3218        return pci_register_driver(&jme_driver);
3219}
3220
3221static void __exit
3222jme_cleanup_module(void)
3223{
3224        pci_unregister_driver(&jme_driver);
3225}
3226
3227module_init(jme_init_module);
3228module_exit(jme_cleanup_module);
3229
3230MODULE_AUTHOR("Guo-Fu Tseng <cooldavid@cooldavid.org>");
3231MODULE_DESCRIPTION("JMicron JMC2x0 PCI Express Ethernet driver");
3232MODULE_LICENSE("GPL");
3233MODULE_VERSION(DRV_VERSION);
3234MODULE_DEVICE_TABLE(pci, jme_pci_tbl);
3235
3236