linux/drivers/net/ethernet/jme.c
<<
>>
Prefs
   1/*
   2 * JMicron JMC2x0 series PCIe Ethernet Linux Device Driver
   3 *
   4 * Copyright 2008 JMicron Technology Corporation
   5 * http://www.jmicron.com/
   6 * Copyright (c) 2009 - 2010 Guo-Fu Tseng <cooldavid@cooldavid.org>
   7 *
   8 * Author: Guo-Fu Tseng <cooldavid@cooldavid.org>
   9 *
  10 * This program is free software; you can redistribute it and/or modify
  11 * it under the terms of the GNU General Public License as published by
  12 * the Free Software Foundation; either version 2 of the License.
  13 *
  14 * This program is distributed in the hope that it will be useful,
  15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  17 * GNU General Public License for more details.
  18 *
  19 * You should have received a copy of the GNU General Public License
  20 * along with this program; if not, write to the Free Software
  21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  22 *
  23 */
  24
  25#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  26
  27#include <linux/module.h>
  28#include <linux/kernel.h>
  29#include <linux/pci.h>
  30#include <linux/pci-aspm.h>
  31#include <linux/netdevice.h>
  32#include <linux/etherdevice.h>
  33#include <linux/ethtool.h>
  34#include <linux/mii.h>
  35#include <linux/crc32.h>
  36#include <linux/delay.h>
  37#include <linux/spinlock.h>
  38#include <linux/in.h>
  39#include <linux/ip.h>
  40#include <linux/ipv6.h>
  41#include <linux/tcp.h>
  42#include <linux/udp.h>
  43#include <linux/if_vlan.h>
  44#include <linux/slab.h>
  45#include <net/ip6_checksum.h>
  46#include "jme.h"
  47
  48static int force_pseudohp = -1;
  49static int no_pseudohp = -1;
  50static int no_extplug = -1;
  51module_param(force_pseudohp, int, 0);
  52MODULE_PARM_DESC(force_pseudohp,
  53        "Enable pseudo hot-plug feature manually by driver instead of BIOS.");
  54module_param(no_pseudohp, int, 0);
  55MODULE_PARM_DESC(no_pseudohp, "Disable pseudo hot-plug feature.");
  56module_param(no_extplug, int, 0);
  57MODULE_PARM_DESC(no_extplug,
  58        "Do not use external plug signal for pseudo hot-plug.");
  59
  60static int
  61jme_mdio_read(struct net_device *netdev, int phy, int reg)
  62{
  63        struct jme_adapter *jme = netdev_priv(netdev);
  64        int i, val, again = (reg == MII_BMSR) ? 1 : 0;
  65
  66read_again:
  67        jwrite32(jme, JME_SMI, SMI_OP_REQ |
  68                                smi_phy_addr(phy) |
  69                                smi_reg_addr(reg));
  70
  71        wmb();
  72        for (i = JME_PHY_TIMEOUT * 50 ; i > 0 ; --i) {
  73                udelay(20);
  74                val = jread32(jme, JME_SMI);
  75                if ((val & SMI_OP_REQ) == 0)
  76                        break;
  77        }
  78
  79        if (i == 0) {
  80                pr_err("phy(%d) read timeout : %d\n", phy, reg);
  81                return 0;
  82        }
  83
  84        if (again--)
  85                goto read_again;
  86
  87        return (val & SMI_DATA_MASK) >> SMI_DATA_SHIFT;
  88}
  89
  90static void
  91jme_mdio_write(struct net_device *netdev,
  92                                int phy, int reg, int val)
  93{
  94        struct jme_adapter *jme = netdev_priv(netdev);
  95        int i;
  96
  97        jwrite32(jme, JME_SMI, SMI_OP_WRITE | SMI_OP_REQ |
  98                ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
  99                smi_phy_addr(phy) | smi_reg_addr(reg));
 100
 101        wmb();
 102        for (i = JME_PHY_TIMEOUT * 50 ; i > 0 ; --i) {
 103                udelay(20);
 104                if ((jread32(jme, JME_SMI) & SMI_OP_REQ) == 0)
 105                        break;
 106        }
 107
 108        if (i == 0)
 109                pr_err("phy(%d) write timeout : %d\n", phy, reg);
 110}
 111
 112static inline void
 113jme_reset_phy_processor(struct jme_adapter *jme)
 114{
 115        u32 val;
 116
 117        jme_mdio_write(jme->dev,
 118                        jme->mii_if.phy_id,
 119                        MII_ADVERTISE, ADVERTISE_ALL |
 120                        ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
 121
 122        if (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250)
 123                jme_mdio_write(jme->dev,
 124                                jme->mii_if.phy_id,
 125                                MII_CTRL1000,
 126                                ADVERTISE_1000FULL | ADVERTISE_1000HALF);
 127
 128        val = jme_mdio_read(jme->dev,
 129                                jme->mii_if.phy_id,
 130                                MII_BMCR);
 131
 132        jme_mdio_write(jme->dev,
 133                        jme->mii_if.phy_id,
 134                        MII_BMCR, val | BMCR_RESET);
 135}
 136
 137static void
 138jme_setup_wakeup_frame(struct jme_adapter *jme,
 139                       const u32 *mask, u32 crc, int fnr)
 140{
 141        int i;
 142
 143        /*
 144         * Setup CRC pattern
 145         */
 146        jwrite32(jme, JME_WFOI, WFOI_CRC_SEL | (fnr & WFOI_FRAME_SEL));
 147        wmb();
 148        jwrite32(jme, JME_WFODP, crc);
 149        wmb();
 150
 151        /*
 152         * Setup Mask
 153         */
 154        for (i = 0 ; i < WAKEUP_FRAME_MASK_DWNR ; ++i) {
 155                jwrite32(jme, JME_WFOI,
 156                                ((i << WFOI_MASK_SHIFT) & WFOI_MASK_SEL) |
 157                                (fnr & WFOI_FRAME_SEL));
 158                wmb();
 159                jwrite32(jme, JME_WFODP, mask[i]);
 160                wmb();
 161        }
 162}
 163
 164static inline void
 165jme_mac_rxclk_off(struct jme_adapter *jme)
 166{
 167        jme->reg_gpreg1 |= GPREG1_RXCLKOFF;
 168        jwrite32f(jme, JME_GPREG1, jme->reg_gpreg1);
 169}
 170
 171static inline void
 172jme_mac_rxclk_on(struct jme_adapter *jme)
 173{
 174        jme->reg_gpreg1 &= ~GPREG1_RXCLKOFF;
 175        jwrite32f(jme, JME_GPREG1, jme->reg_gpreg1);
 176}
 177
 178static inline void
 179jme_mac_txclk_off(struct jme_adapter *jme)
 180{
 181        jme->reg_ghc &= ~(GHC_TO_CLK_SRC | GHC_TXMAC_CLK_SRC);
 182        jwrite32f(jme, JME_GHC, jme->reg_ghc);
 183}
 184
 185static inline void
 186jme_mac_txclk_on(struct jme_adapter *jme)
 187{
 188        u32 speed = jme->reg_ghc & GHC_SPEED;
 189        if (speed == GHC_SPEED_1000M)
 190                jme->reg_ghc |= GHC_TO_CLK_GPHY | GHC_TXMAC_CLK_GPHY;
 191        else
 192                jme->reg_ghc |= GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE;
 193        jwrite32f(jme, JME_GHC, jme->reg_ghc);
 194}
 195
 196static inline void
 197jme_reset_ghc_speed(struct jme_adapter *jme)
 198{
 199        jme->reg_ghc &= ~(GHC_SPEED | GHC_DPX);
 200        jwrite32f(jme, JME_GHC, jme->reg_ghc);
 201}
 202
 203static inline void
 204jme_reset_250A2_workaround(struct jme_adapter *jme)
 205{
 206        jme->reg_gpreg1 &= ~(GPREG1_HALFMODEPATCH |
 207                             GPREG1_RSSPATCH);
 208        jwrite32(jme, JME_GPREG1, jme->reg_gpreg1);
 209}
 210
 211static inline void
 212jme_assert_ghc_reset(struct jme_adapter *jme)
 213{
 214        jme->reg_ghc |= GHC_SWRST;
 215        jwrite32f(jme, JME_GHC, jme->reg_ghc);
 216}
 217
 218static inline void
 219jme_clear_ghc_reset(struct jme_adapter *jme)
 220{
 221        jme->reg_ghc &= ~GHC_SWRST;
 222        jwrite32f(jme, JME_GHC, jme->reg_ghc);
 223}
 224
 225static inline void
 226jme_reset_mac_processor(struct jme_adapter *jme)
 227{
 228        static const u32 mask[WAKEUP_FRAME_MASK_DWNR] = {0, 0, 0, 0};
 229        u32 crc = 0xCDCDCDCD;
 230        u32 gpreg0;
 231        int i;
 232
 233        jme_reset_ghc_speed(jme);
 234        jme_reset_250A2_workaround(jme);
 235
 236        jme_mac_rxclk_on(jme);
 237        jme_mac_txclk_on(jme);
 238        udelay(1);
 239        jme_assert_ghc_reset(jme);
 240        udelay(1);
 241        jme_mac_rxclk_off(jme);
 242        jme_mac_txclk_off(jme);
 243        udelay(1);
 244        jme_clear_ghc_reset(jme);
 245        udelay(1);
 246        jme_mac_rxclk_on(jme);
 247        jme_mac_txclk_on(jme);
 248        udelay(1);
 249        jme_mac_rxclk_off(jme);
 250        jme_mac_txclk_off(jme);
 251
 252        jwrite32(jme, JME_RXDBA_LO, 0x00000000);
 253        jwrite32(jme, JME_RXDBA_HI, 0x00000000);
 254        jwrite32(jme, JME_RXQDC, 0x00000000);
 255        jwrite32(jme, JME_RXNDA, 0x00000000);
 256        jwrite32(jme, JME_TXDBA_LO, 0x00000000);
 257        jwrite32(jme, JME_TXDBA_HI, 0x00000000);
 258        jwrite32(jme, JME_TXQDC, 0x00000000);
 259        jwrite32(jme, JME_TXNDA, 0x00000000);
 260
 261        jwrite32(jme, JME_RXMCHT_LO, 0x00000000);
 262        jwrite32(jme, JME_RXMCHT_HI, 0x00000000);
 263        for (i = 0 ; i < WAKEUP_FRAME_NR ; ++i)
 264                jme_setup_wakeup_frame(jme, mask, crc, i);
 265        if (jme->fpgaver)
 266                gpreg0 = GPREG0_DEFAULT | GPREG0_LNKINTPOLL;
 267        else
 268                gpreg0 = GPREG0_DEFAULT;
 269        jwrite32(jme, JME_GPREG0, gpreg0);
 270}
 271
 272static inline void
 273jme_clear_pm(struct jme_adapter *jme)
 274{
 275        jwrite32(jme, JME_PMCS, PMCS_STMASK | jme->reg_pmcs);
 276}
 277
 278static int
 279jme_reload_eeprom(struct jme_adapter *jme)
 280{
 281        u32 val;
 282        int i;
 283
 284        val = jread32(jme, JME_SMBCSR);
 285
 286        if (val & SMBCSR_EEPROMD) {
 287                val |= SMBCSR_CNACK;
 288                jwrite32(jme, JME_SMBCSR, val);
 289                val |= SMBCSR_RELOAD;
 290                jwrite32(jme, JME_SMBCSR, val);
 291                mdelay(12);
 292
 293                for (i = JME_EEPROM_RELOAD_TIMEOUT; i > 0; --i) {
 294                        mdelay(1);
 295                        if ((jread32(jme, JME_SMBCSR) & SMBCSR_RELOAD) == 0)
 296                                break;
 297                }
 298
 299                if (i == 0) {
 300                        pr_err("eeprom reload timeout\n");
 301                        return -EIO;
 302                }
 303        }
 304
 305        return 0;
 306}
 307
 308static void
 309jme_load_macaddr(struct net_device *netdev)
 310{
 311        struct jme_adapter *jme = netdev_priv(netdev);
 312        unsigned char macaddr[6];
 313        u32 val;
 314
 315        spin_lock_bh(&jme->macaddr_lock);
 316        val = jread32(jme, JME_RXUMA_LO);
 317        macaddr[0] = (val >>  0) & 0xFF;
 318        macaddr[1] = (val >>  8) & 0xFF;
 319        macaddr[2] = (val >> 16) & 0xFF;
 320        macaddr[3] = (val >> 24) & 0xFF;
 321        val = jread32(jme, JME_RXUMA_HI);
 322        macaddr[4] = (val >>  0) & 0xFF;
 323        macaddr[5] = (val >>  8) & 0xFF;
 324        memcpy(netdev->dev_addr, macaddr, 6);
 325        spin_unlock_bh(&jme->macaddr_lock);
 326}
 327
 328static inline void
 329jme_set_rx_pcc(struct jme_adapter *jme, int p)
 330{
 331        switch (p) {
 332        case PCC_OFF:
 333                jwrite32(jme, JME_PCCRX0,
 334                        ((PCC_OFF_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
 335                        ((PCC_OFF_CNT << PCCRX_SHIFT) & PCCRX_MASK));
 336                break;
 337        case PCC_P1:
 338                jwrite32(jme, JME_PCCRX0,
 339                        ((PCC_P1_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
 340                        ((PCC_P1_CNT << PCCRX_SHIFT) & PCCRX_MASK));
 341                break;
 342        case PCC_P2:
 343                jwrite32(jme, JME_PCCRX0,
 344                        ((PCC_P2_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
 345                        ((PCC_P2_CNT << PCCRX_SHIFT) & PCCRX_MASK));
 346                break;
 347        case PCC_P3:
 348                jwrite32(jme, JME_PCCRX0,
 349                        ((PCC_P3_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
 350                        ((PCC_P3_CNT << PCCRX_SHIFT) & PCCRX_MASK));
 351                break;
 352        default:
 353                break;
 354        }
 355        wmb();
 356
 357        if (!(test_bit(JME_FLAG_POLL, &jme->flags)))
 358                netif_info(jme, rx_status, jme->dev, "Switched to PCC_P%d\n", p);
 359}
 360
 361static void
 362jme_start_irq(struct jme_adapter *jme)
 363{
 364        register struct dynpcc_info *dpi = &(jme->dpi);
 365
 366        jme_set_rx_pcc(jme, PCC_P1);
 367        dpi->cur                = PCC_P1;
 368        dpi->attempt            = PCC_P1;
 369        dpi->cnt                = 0;
 370
 371        jwrite32(jme, JME_PCCTX,
 372                        ((PCC_TX_TO << PCCTXTO_SHIFT) & PCCTXTO_MASK) |
 373                        ((PCC_TX_CNT << PCCTX_SHIFT) & PCCTX_MASK) |
 374                        PCCTXQ0_EN
 375                );
 376
 377        /*
 378         * Enable Interrupts
 379         */
 380        jwrite32(jme, JME_IENS, INTR_ENABLE);
 381}
 382
 383static inline void
 384jme_stop_irq(struct jme_adapter *jme)
 385{
 386        /*
 387         * Disable Interrupts
 388         */
 389        jwrite32f(jme, JME_IENC, INTR_ENABLE);
 390}
 391
 392static u32
 393jme_linkstat_from_phy(struct jme_adapter *jme)
 394{
 395        u32 phylink, bmsr;
 396
 397        phylink = jme_mdio_read(jme->dev, jme->mii_if.phy_id, 17);
 398        bmsr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMSR);
 399        if (bmsr & BMSR_ANCOMP)
 400                phylink |= PHY_LINK_AUTONEG_COMPLETE;
 401
 402        return phylink;
 403}
 404
 405static inline void
 406jme_set_phyfifo_5level(struct jme_adapter *jme)
 407{
 408        jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0004);
 409}
 410
 411static inline void
 412jme_set_phyfifo_8level(struct jme_adapter *jme)
 413{
 414        jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0000);
 415}
 416
 417static int
 418jme_check_link(struct net_device *netdev, int testonly)
 419{
 420        struct jme_adapter *jme = netdev_priv(netdev);
 421        u32 phylink, cnt = JME_SPDRSV_TIMEOUT, bmcr;
 422        char linkmsg[64];
 423        int rc = 0;
 424
 425        linkmsg[0] = '\0';
 426
 427        if (jme->fpgaver)
 428                phylink = jme_linkstat_from_phy(jme);
 429        else
 430                phylink = jread32(jme, JME_PHY_LINK);
 431
 432        if (phylink & PHY_LINK_UP) {
 433                if (!(phylink & PHY_LINK_AUTONEG_COMPLETE)) {
 434                        /*
 435                         * If we did not enable AN
 436                         * Speed/Duplex Info should be obtained from SMI
 437                         */
 438                        phylink = PHY_LINK_UP;
 439
 440                        bmcr = jme_mdio_read(jme->dev,
 441                                                jme->mii_if.phy_id,
 442                                                MII_BMCR);
 443
 444                        phylink |= ((bmcr & BMCR_SPEED1000) &&
 445                                        (bmcr & BMCR_SPEED100) == 0) ?
 446                                        PHY_LINK_SPEED_1000M :
 447                                        (bmcr & BMCR_SPEED100) ?
 448                                        PHY_LINK_SPEED_100M :
 449                                        PHY_LINK_SPEED_10M;
 450
 451                        phylink |= (bmcr & BMCR_FULLDPLX) ?
 452                                         PHY_LINK_DUPLEX : 0;
 453
 454                        strcat(linkmsg, "Forced: ");
 455                } else {
 456                        /*
 457                         * Keep polling for speed/duplex resolve complete
 458                         */
 459                        while (!(phylink & PHY_LINK_SPEEDDPU_RESOLVED) &&
 460                                --cnt) {
 461
 462                                udelay(1);
 463
 464                                if (jme->fpgaver)
 465                                        phylink = jme_linkstat_from_phy(jme);
 466                                else
 467                                        phylink = jread32(jme, JME_PHY_LINK);
 468                        }
 469                        if (!cnt)
 470                                pr_err("Waiting speed resolve timeout\n");
 471
 472                        strcat(linkmsg, "ANed: ");
 473                }
 474
 475                if (jme->phylink == phylink) {
 476                        rc = 1;
 477                        goto out;
 478                }
 479                if (testonly)
 480                        goto out;
 481
 482                jme->phylink = phylink;
 483
 484                /*
 485                 * The speed/duplex setting of jme->reg_ghc already cleared
 486                 * by jme_reset_mac_processor()
 487                 */
 488                switch (phylink & PHY_LINK_SPEED_MASK) {
 489                case PHY_LINK_SPEED_10M:
 490                        jme->reg_ghc |= GHC_SPEED_10M;
 491                        strcat(linkmsg, "10 Mbps, ");
 492                        break;
 493                case PHY_LINK_SPEED_100M:
 494                        jme->reg_ghc |= GHC_SPEED_100M;
 495                        strcat(linkmsg, "100 Mbps, ");
 496                        break;
 497                case PHY_LINK_SPEED_1000M:
 498                        jme->reg_ghc |= GHC_SPEED_1000M;
 499                        strcat(linkmsg, "1000 Mbps, ");
 500                        break;
 501                default:
 502                        break;
 503                }
 504
 505                if (phylink & PHY_LINK_DUPLEX) {
 506                        jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT);
 507                        jwrite32(jme, JME_TXTRHD, TXTRHD_FULLDUPLEX);
 508                        jme->reg_ghc |= GHC_DPX;
 509                } else {
 510                        jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT |
 511                                                TXMCS_BACKOFF |
 512                                                TXMCS_CARRIERSENSE |
 513                                                TXMCS_COLLISION);
 514                        jwrite32(jme, JME_TXTRHD, TXTRHD_HALFDUPLEX);
 515                }
 516
 517                jwrite32(jme, JME_GHC, jme->reg_ghc);
 518
 519                if (is_buggy250(jme->pdev->device, jme->chiprev)) {
 520                        jme->reg_gpreg1 &= ~(GPREG1_HALFMODEPATCH |
 521                                             GPREG1_RSSPATCH);
 522                        if (!(phylink & PHY_LINK_DUPLEX))
 523                                jme->reg_gpreg1 |= GPREG1_HALFMODEPATCH;
 524                        switch (phylink & PHY_LINK_SPEED_MASK) {
 525                        case PHY_LINK_SPEED_10M:
 526                                jme_set_phyfifo_8level(jme);
 527                                jme->reg_gpreg1 |= GPREG1_RSSPATCH;
 528                                break;
 529                        case PHY_LINK_SPEED_100M:
 530                                jme_set_phyfifo_5level(jme);
 531                                jme->reg_gpreg1 |= GPREG1_RSSPATCH;
 532                                break;
 533                        case PHY_LINK_SPEED_1000M:
 534                                jme_set_phyfifo_8level(jme);
 535                                break;
 536                        default:
 537                                break;
 538                        }
 539                }
 540                jwrite32(jme, JME_GPREG1, jme->reg_gpreg1);
 541
 542                strcat(linkmsg, (phylink & PHY_LINK_DUPLEX) ?
 543                                        "Full-Duplex, " :
 544                                        "Half-Duplex, ");
 545                strcat(linkmsg, (phylink & PHY_LINK_MDI_STAT) ?
 546                                        "MDI-X" :
 547                                        "MDI");
 548                netif_info(jme, link, jme->dev, "Link is up at %s\n", linkmsg);
 549                netif_carrier_on(netdev);
 550        } else {
 551                if (testonly)
 552                        goto out;
 553
 554                netif_info(jme, link, jme->dev, "Link is down\n");
 555                jme->phylink = 0;
 556                netif_carrier_off(netdev);
 557        }
 558
 559out:
 560        return rc;
 561}
 562
 563static int
 564jme_setup_tx_resources(struct jme_adapter *jme)
 565{
 566        struct jme_ring *txring = &(jme->txring[0]);
 567
 568        txring->alloc = dma_alloc_coherent(&(jme->pdev->dev),
 569                                   TX_RING_ALLOC_SIZE(jme->tx_ring_size),
 570                                   &(txring->dmaalloc),
 571                                   GFP_ATOMIC);
 572
 573        if (!txring->alloc)
 574                goto err_set_null;
 575
 576        /*
 577         * 16 Bytes align
 578         */
 579        txring->desc            = (void *)ALIGN((unsigned long)(txring->alloc),
 580                                                RING_DESC_ALIGN);
 581        txring->dma             = ALIGN(txring->dmaalloc, RING_DESC_ALIGN);
 582        txring->next_to_use     = 0;
 583        atomic_set(&txring->next_to_clean, 0);
 584        atomic_set(&txring->nr_free, jme->tx_ring_size);
 585
 586        txring->bufinf          = kmalloc(sizeof(struct jme_buffer_info) *
 587                                        jme->tx_ring_size, GFP_ATOMIC);
 588        if (unlikely(!(txring->bufinf)))
 589                goto err_free_txring;
 590
 591        /*
 592         * Initialize Transmit Descriptors
 593         */
 594        memset(txring->alloc, 0, TX_RING_ALLOC_SIZE(jme->tx_ring_size));
 595        memset(txring->bufinf, 0,
 596                sizeof(struct jme_buffer_info) * jme->tx_ring_size);
 597
 598        return 0;
 599
 600err_free_txring:
 601        dma_free_coherent(&(jme->pdev->dev),
 602                          TX_RING_ALLOC_SIZE(jme->tx_ring_size),
 603                          txring->alloc,
 604                          txring->dmaalloc);
 605
 606err_set_null:
 607        txring->desc = NULL;
 608        txring->dmaalloc = 0;
 609        txring->dma = 0;
 610        txring->bufinf = NULL;
 611
 612        return -ENOMEM;
 613}
 614
 615static void
 616jme_free_tx_resources(struct jme_adapter *jme)
 617{
 618        int i;
 619        struct jme_ring *txring = &(jme->txring[0]);
 620        struct jme_buffer_info *txbi;
 621
 622        if (txring->alloc) {
 623                if (txring->bufinf) {
 624                        for (i = 0 ; i < jme->tx_ring_size ; ++i) {
 625                                txbi = txring->bufinf + i;
 626                                if (txbi->skb) {
 627                                        dev_kfree_skb(txbi->skb);
 628                                        txbi->skb = NULL;
 629                                }
 630                                txbi->mapping           = 0;
 631                                txbi->len               = 0;
 632                                txbi->nr_desc           = 0;
 633                                txbi->start_xmit        = 0;
 634                        }
 635                        kfree(txring->bufinf);
 636                }
 637
 638                dma_free_coherent(&(jme->pdev->dev),
 639                                  TX_RING_ALLOC_SIZE(jme->tx_ring_size),
 640                                  txring->alloc,
 641                                  txring->dmaalloc);
 642
 643                txring->alloc           = NULL;
 644                txring->desc            = NULL;
 645                txring->dmaalloc        = 0;
 646                txring->dma             = 0;
 647                txring->bufinf          = NULL;
 648        }
 649        txring->next_to_use     = 0;
 650        atomic_set(&txring->next_to_clean, 0);
 651        atomic_set(&txring->nr_free, 0);
 652}
 653
 654static inline void
 655jme_enable_tx_engine(struct jme_adapter *jme)
 656{
 657        /*
 658         * Select Queue 0
 659         */
 660        jwrite32(jme, JME_TXCS, TXCS_DEFAULT | TXCS_SELECT_QUEUE0);
 661        wmb();
 662
 663        /*
 664         * Setup TX Queue 0 DMA Bass Address
 665         */
 666        jwrite32(jme, JME_TXDBA_LO, (__u64)jme->txring[0].dma & 0xFFFFFFFFUL);
 667        jwrite32(jme, JME_TXDBA_HI, (__u64)(jme->txring[0].dma) >> 32);
 668        jwrite32(jme, JME_TXNDA, (__u64)jme->txring[0].dma & 0xFFFFFFFFUL);
 669
 670        /*
 671         * Setup TX Descptor Count
 672         */
 673        jwrite32(jme, JME_TXQDC, jme->tx_ring_size);
 674
 675        /*
 676         * Enable TX Engine
 677         */
 678        wmb();
 679        jwrite32f(jme, JME_TXCS, jme->reg_txcs |
 680                                TXCS_SELECT_QUEUE0 |
 681                                TXCS_ENABLE);
 682
 683        /*
 684         * Start clock for TX MAC Processor
 685         */
 686        jme_mac_txclk_on(jme);
 687}
 688
 689static inline void
 690jme_restart_tx_engine(struct jme_adapter *jme)
 691{
 692        /*
 693         * Restart TX Engine
 694         */
 695        jwrite32(jme, JME_TXCS, jme->reg_txcs |
 696                                TXCS_SELECT_QUEUE0 |
 697                                TXCS_ENABLE);
 698}
 699
 700static inline void
 701jme_disable_tx_engine(struct jme_adapter *jme)
 702{
 703        int i;
 704        u32 val;
 705
 706        /*
 707         * Disable TX Engine
 708         */
 709        jwrite32(jme, JME_TXCS, jme->reg_txcs | TXCS_SELECT_QUEUE0);
 710        wmb();
 711
 712        val = jread32(jme, JME_TXCS);
 713        for (i = JME_TX_DISABLE_TIMEOUT ; (val & TXCS_ENABLE) && i > 0 ; --i) {
 714                mdelay(1);
 715                val = jread32(jme, JME_TXCS);
 716                rmb();
 717        }
 718
 719        if (!i)
 720                pr_err("Disable TX engine timeout\n");
 721
 722        /*
 723         * Stop clock for TX MAC Processor
 724         */
 725        jme_mac_txclk_off(jme);
 726}
 727
 728static void
 729jme_set_clean_rxdesc(struct jme_adapter *jme, int i)
 730{
 731        struct jme_ring *rxring = &(jme->rxring[0]);
 732        register struct rxdesc *rxdesc = rxring->desc;
 733        struct jme_buffer_info *rxbi = rxring->bufinf;
 734        rxdesc += i;
 735        rxbi += i;
 736
 737        rxdesc->dw[0] = 0;
 738        rxdesc->dw[1] = 0;
 739        rxdesc->desc1.bufaddrh  = cpu_to_le32((__u64)rxbi->mapping >> 32);
 740        rxdesc->desc1.bufaddrl  = cpu_to_le32(
 741                                        (__u64)rxbi->mapping & 0xFFFFFFFFUL);
 742        rxdesc->desc1.datalen   = cpu_to_le16(rxbi->len);
 743        if (jme->dev->features & NETIF_F_HIGHDMA)
 744                rxdesc->desc1.flags = RXFLAG_64BIT;
 745        wmb();
 746        rxdesc->desc1.flags     |= RXFLAG_OWN | RXFLAG_INT;
 747}
 748
 749static int
 750jme_make_new_rx_buf(struct jme_adapter *jme, int i)
 751{
 752        struct jme_ring *rxring = &(jme->rxring[0]);
 753        struct jme_buffer_info *rxbi = rxring->bufinf + i;
 754        struct sk_buff *skb;
 755        dma_addr_t mapping;
 756
 757        skb = netdev_alloc_skb(jme->dev,
 758                jme->dev->mtu + RX_EXTRA_LEN);
 759        if (unlikely(!skb))
 760                return -ENOMEM;
 761
 762        mapping = pci_map_page(jme->pdev, virt_to_page(skb->data),
 763                               offset_in_page(skb->data), skb_tailroom(skb),
 764                               PCI_DMA_FROMDEVICE);
 765        if (unlikely(pci_dma_mapping_error(jme->pdev, mapping))) {
 766                dev_kfree_skb(skb);
 767                return -ENOMEM;
 768        }
 769
 770        if (likely(rxbi->mapping))
 771                pci_unmap_page(jme->pdev, rxbi->mapping,
 772                               rxbi->len, PCI_DMA_FROMDEVICE);
 773
 774        rxbi->skb = skb;
 775        rxbi->len = skb_tailroom(skb);
 776        rxbi->mapping = mapping;
 777        return 0;
 778}
 779
 780static void
 781jme_free_rx_buf(struct jme_adapter *jme, int i)
 782{
 783        struct jme_ring *rxring = &(jme->rxring[0]);
 784        struct jme_buffer_info *rxbi = rxring->bufinf;
 785        rxbi += i;
 786
 787        if (rxbi->skb) {
 788                pci_unmap_page(jme->pdev,
 789                                 rxbi->mapping,
 790                                 rxbi->len,
 791                                 PCI_DMA_FROMDEVICE);
 792                dev_kfree_skb(rxbi->skb);
 793                rxbi->skb = NULL;
 794                rxbi->mapping = 0;
 795                rxbi->len = 0;
 796        }
 797}
 798
 799static void
 800jme_free_rx_resources(struct jme_adapter *jme)
 801{
 802        int i;
 803        struct jme_ring *rxring = &(jme->rxring[0]);
 804
 805        if (rxring->alloc) {
 806                if (rxring->bufinf) {
 807                        for (i = 0 ; i < jme->rx_ring_size ; ++i)
 808                                jme_free_rx_buf(jme, i);
 809                        kfree(rxring->bufinf);
 810                }
 811
 812                dma_free_coherent(&(jme->pdev->dev),
 813                                  RX_RING_ALLOC_SIZE(jme->rx_ring_size),
 814                                  rxring->alloc,
 815                                  rxring->dmaalloc);
 816                rxring->alloc    = NULL;
 817                rxring->desc     = NULL;
 818                rxring->dmaalloc = 0;
 819                rxring->dma      = 0;
 820                rxring->bufinf   = NULL;
 821        }
 822        rxring->next_to_use   = 0;
 823        atomic_set(&rxring->next_to_clean, 0);
 824}
 825
 826static int
 827jme_setup_rx_resources(struct jme_adapter *jme)
 828{
 829        int i;
 830        struct jme_ring *rxring = &(jme->rxring[0]);
 831
 832        rxring->alloc = dma_alloc_coherent(&(jme->pdev->dev),
 833                                   RX_RING_ALLOC_SIZE(jme->rx_ring_size),
 834                                   &(rxring->dmaalloc),
 835                                   GFP_ATOMIC);
 836        if (!rxring->alloc)
 837                goto err_set_null;
 838
 839        /*
 840         * 16 Bytes align
 841         */
 842        rxring->desc            = (void *)ALIGN((unsigned long)(rxring->alloc),
 843                                                RING_DESC_ALIGN);
 844        rxring->dma             = ALIGN(rxring->dmaalloc, RING_DESC_ALIGN);
 845        rxring->next_to_use     = 0;
 846        atomic_set(&rxring->next_to_clean, 0);
 847
 848        rxring->bufinf          = kmalloc(sizeof(struct jme_buffer_info) *
 849                                        jme->rx_ring_size, GFP_ATOMIC);
 850        if (unlikely(!(rxring->bufinf)))
 851                goto err_free_rxring;
 852
 853        /*
 854         * Initiallize Receive Descriptors
 855         */
 856        memset(rxring->bufinf, 0,
 857                sizeof(struct jme_buffer_info) * jme->rx_ring_size);
 858        for (i = 0 ; i < jme->rx_ring_size ; ++i) {
 859                if (unlikely(jme_make_new_rx_buf(jme, i))) {
 860                        jme_free_rx_resources(jme);
 861                        return -ENOMEM;
 862                }
 863
 864                jme_set_clean_rxdesc(jme, i);
 865        }
 866
 867        return 0;
 868
 869err_free_rxring:
 870        dma_free_coherent(&(jme->pdev->dev),
 871                          RX_RING_ALLOC_SIZE(jme->rx_ring_size),
 872                          rxring->alloc,
 873                          rxring->dmaalloc);
 874err_set_null:
 875        rxring->desc = NULL;
 876        rxring->dmaalloc = 0;
 877        rxring->dma = 0;
 878        rxring->bufinf = NULL;
 879
 880        return -ENOMEM;
 881}
 882
 883static inline void
 884jme_enable_rx_engine(struct jme_adapter *jme)
 885{
 886        /*
 887         * Select Queue 0
 888         */
 889        jwrite32(jme, JME_RXCS, jme->reg_rxcs |
 890                                RXCS_QUEUESEL_Q0);
 891        wmb();
 892
 893        /*
 894         * Setup RX DMA Bass Address
 895         */
 896        jwrite32(jme, JME_RXDBA_LO, (__u64)(jme->rxring[0].dma) & 0xFFFFFFFFUL);
 897        jwrite32(jme, JME_RXDBA_HI, (__u64)(jme->rxring[0].dma) >> 32);
 898        jwrite32(jme, JME_RXNDA, (__u64)(jme->rxring[0].dma) & 0xFFFFFFFFUL);
 899
 900        /*
 901         * Setup RX Descriptor Count
 902         */
 903        jwrite32(jme, JME_RXQDC, jme->rx_ring_size);
 904
 905        /*
 906         * Setup Unicast Filter
 907         */
 908        jme_set_unicastaddr(jme->dev);
 909        jme_set_multi(jme->dev);
 910
 911        /*
 912         * Enable RX Engine
 913         */
 914        wmb();
 915        jwrite32f(jme, JME_RXCS, jme->reg_rxcs |
 916                                RXCS_QUEUESEL_Q0 |
 917                                RXCS_ENABLE |
 918                                RXCS_QST);
 919
 920        /*
 921         * Start clock for RX MAC Processor
 922         */
 923        jme_mac_rxclk_on(jme);
 924}
 925
 926static inline void
 927jme_restart_rx_engine(struct jme_adapter *jme)
 928{
 929        /*
 930         * Start RX Engine
 931         */
 932        jwrite32(jme, JME_RXCS, jme->reg_rxcs |
 933                                RXCS_QUEUESEL_Q0 |
 934                                RXCS_ENABLE |
 935                                RXCS_QST);
 936}
 937
 938static inline void
 939jme_disable_rx_engine(struct jme_adapter *jme)
 940{
 941        int i;
 942        u32 val;
 943
 944        /*
 945         * Disable RX Engine
 946         */
 947        jwrite32(jme, JME_RXCS, jme->reg_rxcs);
 948        wmb();
 949
 950        val = jread32(jme, JME_RXCS);
 951        for (i = JME_RX_DISABLE_TIMEOUT ; (val & RXCS_ENABLE) && i > 0 ; --i) {
 952                mdelay(1);
 953                val = jread32(jme, JME_RXCS);
 954                rmb();
 955        }
 956
 957        if (!i)
 958                pr_err("Disable RX engine timeout\n");
 959
 960        /*
 961         * Stop clock for RX MAC Processor
 962         */
 963        jme_mac_rxclk_off(jme);
 964}
 965
 966static u16
 967jme_udpsum(struct sk_buff *skb)
 968{
 969        u16 csum = 0xFFFFu;
 970
 971        if (skb->len < (ETH_HLEN + sizeof(struct iphdr)))
 972                return csum;
 973        if (skb->protocol != htons(ETH_P_IP))
 974                return csum;
 975        skb_set_network_header(skb, ETH_HLEN);
 976        if ((ip_hdr(skb)->protocol != IPPROTO_UDP) ||
 977            (skb->len < (ETH_HLEN +
 978                        (ip_hdr(skb)->ihl << 2) +
 979                        sizeof(struct udphdr)))) {
 980                skb_reset_network_header(skb);
 981                return csum;
 982        }
 983        skb_set_transport_header(skb,
 984                        ETH_HLEN + (ip_hdr(skb)->ihl << 2));
 985        csum = udp_hdr(skb)->check;
 986        skb_reset_transport_header(skb);
 987        skb_reset_network_header(skb);
 988
 989        return csum;
 990}
 991
 992static int
 993jme_rxsum_ok(struct jme_adapter *jme, u16 flags, struct sk_buff *skb)
 994{
 995        if (!(flags & (RXWBFLAG_TCPON | RXWBFLAG_UDPON | RXWBFLAG_IPV4)))
 996                return false;
 997
 998        if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_TCPON | RXWBFLAG_TCPCS))
 999                        == RXWBFLAG_TCPON)) {
1000                if (flags & RXWBFLAG_IPV4)
1001                        netif_err(jme, rx_err, jme->dev, "TCP Checksum error\n");
1002                return false;
1003        }
1004
1005        if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_UDPON | RXWBFLAG_UDPCS))
1006                        == RXWBFLAG_UDPON) && jme_udpsum(skb)) {
1007                if (flags & RXWBFLAG_IPV4)
1008                        netif_err(jme, rx_err, jme->dev, "UDP Checksum error\n");
1009                return false;
1010        }
1011
1012        if (unlikely((flags & (RXWBFLAG_IPV4 | RXWBFLAG_IPCS))
1013                        == RXWBFLAG_IPV4)) {
1014                netif_err(jme, rx_err, jme->dev, "IPv4 Checksum error\n");
1015                return false;
1016        }
1017
1018        return true;
1019}
1020
1021static void
1022jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx)
1023{
1024        struct jme_ring *rxring = &(jme->rxring[0]);
1025        struct rxdesc *rxdesc = rxring->desc;
1026        struct jme_buffer_info *rxbi = rxring->bufinf;
1027        struct sk_buff *skb;
1028        int framesize;
1029
1030        rxdesc += idx;
1031        rxbi += idx;
1032
1033        skb = rxbi->skb;
1034        pci_dma_sync_single_for_cpu(jme->pdev,
1035                                        rxbi->mapping,
1036                                        rxbi->len,
1037                                        PCI_DMA_FROMDEVICE);
1038
1039        if (unlikely(jme_make_new_rx_buf(jme, idx))) {
1040                pci_dma_sync_single_for_device(jme->pdev,
1041                                                rxbi->mapping,
1042                                                rxbi->len,
1043                                                PCI_DMA_FROMDEVICE);
1044
1045                ++(NET_STAT(jme).rx_dropped);
1046        } else {
1047                framesize = le16_to_cpu(rxdesc->descwb.framesize)
1048                                - RX_PREPAD_SIZE;
1049
1050                skb_reserve(skb, RX_PREPAD_SIZE);
1051                skb_put(skb, framesize);
1052                skb->protocol = eth_type_trans(skb, jme->dev);
1053
1054                if (jme_rxsum_ok(jme, le16_to_cpu(rxdesc->descwb.flags), skb))
1055                        skb->ip_summed = CHECKSUM_UNNECESSARY;
1056                else
1057                        skb_checksum_none_assert(skb);
1058
1059                if (rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_TAGON)) {
1060                        u16 vid = le16_to_cpu(rxdesc->descwb.vlan);
1061
1062                        __vlan_hwaccel_put_tag(skb, vid);
1063                        NET_STAT(jme).rx_bytes += 4;
1064                }
1065                jme->jme_rx(skb);
1066
1067                if ((rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_DEST)) ==
1068                    cpu_to_le16(RXWBFLAG_DEST_MUL))
1069                        ++(NET_STAT(jme).multicast);
1070
1071                NET_STAT(jme).rx_bytes += framesize;
1072                ++(NET_STAT(jme).rx_packets);
1073        }
1074
1075        jme_set_clean_rxdesc(jme, idx);
1076
1077}
1078
1079static int
1080jme_process_receive(struct jme_adapter *jme, int limit)
1081{
1082        struct jme_ring *rxring = &(jme->rxring[0]);
1083        struct rxdesc *rxdesc = rxring->desc;
1084        int i, j, ccnt, desccnt, mask = jme->rx_ring_mask;
1085
1086        if (unlikely(!atomic_dec_and_test(&jme->rx_cleaning)))
1087                goto out_inc;
1088
1089        if (unlikely(atomic_read(&jme->link_changing) != 1))
1090                goto out_inc;
1091
1092        if (unlikely(!netif_carrier_ok(jme->dev)))
1093                goto out_inc;
1094
1095        i = atomic_read(&rxring->next_to_clean);
1096        while (limit > 0) {
1097                rxdesc = rxring->desc;
1098                rxdesc += i;
1099
1100                if ((rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_OWN)) ||
1101                !(rxdesc->descwb.desccnt & RXWBDCNT_WBCPL))
1102                        goto out;
1103                --limit;
1104
1105                rmb();
1106                desccnt = rxdesc->descwb.desccnt & RXWBDCNT_DCNT;
1107
1108                if (unlikely(desccnt > 1 ||
1109                rxdesc->descwb.errstat & RXWBERR_ALLERR)) {
1110
1111                        if (rxdesc->descwb.errstat & RXWBERR_CRCERR)
1112                                ++(NET_STAT(jme).rx_crc_errors);
1113                        else if (rxdesc->descwb.errstat & RXWBERR_OVERUN)
1114                                ++(NET_STAT(jme).rx_fifo_errors);
1115                        else
1116                                ++(NET_STAT(jme).rx_errors);
1117
1118                        if (desccnt > 1)
1119                                limit -= desccnt - 1;
1120
1121                        for (j = i, ccnt = desccnt ; ccnt-- ; ) {
1122                                jme_set_clean_rxdesc(jme, j);
1123                                j = (j + 1) & (mask);
1124                        }
1125
1126                } else {
1127                        jme_alloc_and_feed_skb(jme, i);
1128                }
1129
1130                i = (i + desccnt) & (mask);
1131        }
1132
1133out:
1134        atomic_set(&rxring->next_to_clean, i);
1135
1136out_inc:
1137        atomic_inc(&jme->rx_cleaning);
1138
1139        return limit > 0 ? limit : 0;
1140
1141}
1142
1143static void
1144jme_attempt_pcc(struct dynpcc_info *dpi, int atmp)
1145{
1146        if (likely(atmp == dpi->cur)) {
1147                dpi->cnt = 0;
1148                return;
1149        }
1150
1151        if (dpi->attempt == atmp) {
1152                ++(dpi->cnt);
1153        } else {
1154                dpi->attempt = atmp;
1155                dpi->cnt = 0;
1156        }
1157
1158}
1159
1160static void
1161jme_dynamic_pcc(struct jme_adapter *jme)
1162{
1163        register struct dynpcc_info *dpi = &(jme->dpi);
1164
1165        if ((NET_STAT(jme).rx_bytes - dpi->last_bytes) > PCC_P3_THRESHOLD)
1166                jme_attempt_pcc(dpi, PCC_P3);
1167        else if ((NET_STAT(jme).rx_packets - dpi->last_pkts) > PCC_P2_THRESHOLD ||
1168                 dpi->intr_cnt > PCC_INTR_THRESHOLD)
1169                jme_attempt_pcc(dpi, PCC_P2);
1170        else
1171                jme_attempt_pcc(dpi, PCC_P1);
1172
1173        if (unlikely(dpi->attempt != dpi->cur && dpi->cnt > 5)) {
1174                if (dpi->attempt < dpi->cur)
1175                        tasklet_schedule(&jme->rxclean_task);
1176                jme_set_rx_pcc(jme, dpi->attempt);
1177                dpi->cur = dpi->attempt;
1178                dpi->cnt = 0;
1179        }
1180}
1181
1182static void
1183jme_start_pcc_timer(struct jme_adapter *jme)
1184{
1185        struct dynpcc_info *dpi = &(jme->dpi);
1186        dpi->last_bytes         = NET_STAT(jme).rx_bytes;
1187        dpi->last_pkts          = NET_STAT(jme).rx_packets;
1188        dpi->intr_cnt           = 0;
1189        jwrite32(jme, JME_TMCSR,
1190                TMCSR_EN | ((0xFFFFFF - PCC_INTERVAL_US) & TMCSR_CNT));
1191}
1192
1193static inline void
1194jme_stop_pcc_timer(struct jme_adapter *jme)
1195{
1196        jwrite32(jme, JME_TMCSR, 0);
1197}
1198
1199static void
1200jme_shutdown_nic(struct jme_adapter *jme)
1201{
1202        u32 phylink;
1203
1204        phylink = jme_linkstat_from_phy(jme);
1205
1206        if (!(phylink & PHY_LINK_UP)) {
1207                /*
1208                 * Disable all interrupt before issue timer
1209                 */
1210                jme_stop_irq(jme);
1211                jwrite32(jme, JME_TIMER2, TMCSR_EN | 0xFFFFFE);
1212        }
1213}
1214
1215static void
1216jme_pcc_tasklet(unsigned long arg)
1217{
1218        struct jme_adapter *jme = (struct jme_adapter *)arg;
1219        struct net_device *netdev = jme->dev;
1220
1221        if (unlikely(test_bit(JME_FLAG_SHUTDOWN, &jme->flags))) {
1222                jme_shutdown_nic(jme);
1223                return;
1224        }
1225
1226        if (unlikely(!netif_carrier_ok(netdev) ||
1227                (atomic_read(&jme->link_changing) != 1)
1228        )) {
1229                jme_stop_pcc_timer(jme);
1230                return;
1231        }
1232
1233        if (!(test_bit(JME_FLAG_POLL, &jme->flags)))
1234                jme_dynamic_pcc(jme);
1235
1236        jme_start_pcc_timer(jme);
1237}
1238
1239static inline void
1240jme_polling_mode(struct jme_adapter *jme)
1241{
1242        jme_set_rx_pcc(jme, PCC_OFF);
1243}
1244
1245static inline void
1246jme_interrupt_mode(struct jme_adapter *jme)
1247{
1248        jme_set_rx_pcc(jme, PCC_P1);
1249}
1250
1251static inline int
1252jme_pseudo_hotplug_enabled(struct jme_adapter *jme)
1253{
1254        u32 apmc;
1255        apmc = jread32(jme, JME_APMC);
1256        return apmc & JME_APMC_PSEUDO_HP_EN;
1257}
1258
1259static void
1260jme_start_shutdown_timer(struct jme_adapter *jme)
1261{
1262        u32 apmc;
1263
1264        apmc = jread32(jme, JME_APMC) | JME_APMC_PCIE_SD_EN;
1265        apmc &= ~JME_APMC_EPIEN_CTRL;
1266        if (!no_extplug) {
1267                jwrite32f(jme, JME_APMC, apmc | JME_APMC_EPIEN_CTRL_EN);
1268                wmb();
1269        }
1270        jwrite32f(jme, JME_APMC, apmc);
1271
1272        jwrite32f(jme, JME_TIMER2, 0);
1273        set_bit(JME_FLAG_SHUTDOWN, &jme->flags);
1274        jwrite32(jme, JME_TMCSR,
1275                TMCSR_EN | ((0xFFFFFF - APMC_PHP_SHUTDOWN_DELAY) & TMCSR_CNT));
1276}
1277
1278static void
1279jme_stop_shutdown_timer(struct jme_adapter *jme)
1280{
1281        u32 apmc;
1282
1283        jwrite32f(jme, JME_TMCSR, 0);
1284        jwrite32f(jme, JME_TIMER2, 0);
1285        clear_bit(JME_FLAG_SHUTDOWN, &jme->flags);
1286
1287        apmc = jread32(jme, JME_APMC);
1288        apmc &= ~(JME_APMC_PCIE_SD_EN | JME_APMC_EPIEN_CTRL);
1289        jwrite32f(jme, JME_APMC, apmc | JME_APMC_EPIEN_CTRL_DIS);
1290        wmb();
1291        jwrite32f(jme, JME_APMC, apmc);
1292}
1293
1294static void
1295jme_link_change_tasklet(unsigned long arg)
1296{
1297        struct jme_adapter *jme = (struct jme_adapter *)arg;
1298        struct net_device *netdev = jme->dev;
1299        int rc;
1300
1301        while (!atomic_dec_and_test(&jme->link_changing)) {
1302                atomic_inc(&jme->link_changing);
1303                netif_info(jme, intr, jme->dev, "Get link change lock failed\n");
1304                while (atomic_read(&jme->link_changing) != 1)
1305                        netif_info(jme, intr, jme->dev, "Waiting link change lock\n");
1306        }
1307
1308        if (jme_check_link(netdev, 1) && jme->old_mtu == netdev->mtu)
1309                goto out;
1310
1311        jme->old_mtu = netdev->mtu;
1312        netif_stop_queue(netdev);
1313        if (jme_pseudo_hotplug_enabled(jme))
1314                jme_stop_shutdown_timer(jme);
1315
1316        jme_stop_pcc_timer(jme);
1317        tasklet_disable(&jme->txclean_task);
1318        tasklet_disable(&jme->rxclean_task);
1319        tasklet_disable(&jme->rxempty_task);
1320
1321        if (netif_carrier_ok(netdev)) {
1322                jme_disable_rx_engine(jme);
1323                jme_disable_tx_engine(jme);
1324                jme_reset_mac_processor(jme);
1325                jme_free_rx_resources(jme);
1326                jme_free_tx_resources(jme);
1327
1328                if (test_bit(JME_FLAG_POLL, &jme->flags))
1329                        jme_polling_mode(jme);
1330
1331                netif_carrier_off(netdev);
1332        }
1333
1334        jme_check_link(netdev, 0);
1335        if (netif_carrier_ok(netdev)) {
1336                rc = jme_setup_rx_resources(jme);
1337                if (rc) {
1338                        pr_err("Allocating resources for RX error, Device STOPPED!\n");
1339                        goto out_enable_tasklet;
1340                }
1341
1342                rc = jme_setup_tx_resources(jme);
1343                if (rc) {
1344                        pr_err("Allocating resources for TX error, Device STOPPED!\n");
1345                        goto err_out_free_rx_resources;
1346                }
1347
1348                jme_enable_rx_engine(jme);
1349                jme_enable_tx_engine(jme);
1350
1351                netif_start_queue(netdev);
1352
1353                if (test_bit(JME_FLAG_POLL, &jme->flags))
1354                        jme_interrupt_mode(jme);
1355
1356                jme_start_pcc_timer(jme);
1357        } else if (jme_pseudo_hotplug_enabled(jme)) {
1358                jme_start_shutdown_timer(jme);
1359        }
1360
1361        goto out_enable_tasklet;
1362
1363err_out_free_rx_resources:
1364        jme_free_rx_resources(jme);
1365out_enable_tasklet:
1366        tasklet_enable(&jme->txclean_task);
1367        tasklet_hi_enable(&jme->rxclean_task);
1368        tasklet_hi_enable(&jme->rxempty_task);
1369out:
1370        atomic_inc(&jme->link_changing);
1371}
1372
1373static void
1374jme_rx_clean_tasklet(unsigned long arg)
1375{
1376        struct jme_adapter *jme = (struct jme_adapter *)arg;
1377        struct dynpcc_info *dpi = &(jme->dpi);
1378
1379        jme_process_receive(jme, jme->rx_ring_size);
1380        ++(dpi->intr_cnt);
1381
1382}
1383
1384static int
1385jme_poll(JME_NAPI_HOLDER(holder), JME_NAPI_WEIGHT(budget))
1386{
1387        struct jme_adapter *jme = jme_napi_priv(holder);
1388        int rest;
1389
1390        rest = jme_process_receive(jme, JME_NAPI_WEIGHT_VAL(budget));
1391
1392        while (atomic_read(&jme->rx_empty) > 0) {
1393                atomic_dec(&jme->rx_empty);
1394                ++(NET_STAT(jme).rx_dropped);
1395                jme_restart_rx_engine(jme);
1396        }
1397        atomic_inc(&jme->rx_empty);
1398
1399        if (rest) {
1400                JME_RX_COMPLETE(netdev, holder);
1401                jme_interrupt_mode(jme);
1402        }
1403
1404        JME_NAPI_WEIGHT_SET(budget, rest);
1405        return JME_NAPI_WEIGHT_VAL(budget) - rest;
1406}
1407
1408static void
1409jme_rx_empty_tasklet(unsigned long arg)
1410{
1411        struct jme_adapter *jme = (struct jme_adapter *)arg;
1412
1413        if (unlikely(atomic_read(&jme->link_changing) != 1))
1414                return;
1415
1416        if (unlikely(!netif_carrier_ok(jme->dev)))
1417                return;
1418
1419        netif_info(jme, rx_status, jme->dev, "RX Queue Full!\n");
1420
1421        jme_rx_clean_tasklet(arg);
1422
1423        while (atomic_read(&jme->rx_empty) > 0) {
1424                atomic_dec(&jme->rx_empty);
1425                ++(NET_STAT(jme).rx_dropped);
1426                jme_restart_rx_engine(jme);
1427        }
1428        atomic_inc(&jme->rx_empty);
1429}
1430
1431static void
1432jme_wake_queue_if_stopped(struct jme_adapter *jme)
1433{
1434        struct jme_ring *txring = &(jme->txring[0]);
1435
1436        smp_wmb();
1437        if (unlikely(netif_queue_stopped(jme->dev) &&
1438        atomic_read(&txring->nr_free) >= (jme->tx_wake_threshold))) {
1439                netif_info(jme, tx_done, jme->dev, "TX Queue Waked\n");
1440                netif_wake_queue(jme->dev);
1441        }
1442
1443}
1444
1445static void
1446jme_tx_clean_tasklet(unsigned long arg)
1447{
1448        struct jme_adapter *jme = (struct jme_adapter *)arg;
1449        struct jme_ring *txring = &(jme->txring[0]);
1450        struct txdesc *txdesc = txring->desc;
1451        struct jme_buffer_info *txbi = txring->bufinf, *ctxbi, *ttxbi;
1452        int i, j, cnt = 0, max, err, mask;
1453
1454        tx_dbg(jme, "Into txclean\n");
1455
1456        if (unlikely(!atomic_dec_and_test(&jme->tx_cleaning)))
1457                goto out;
1458
1459        if (unlikely(atomic_read(&jme->link_changing) != 1))
1460                goto out;
1461
1462        if (unlikely(!netif_carrier_ok(jme->dev)))
1463                goto out;
1464
1465        max = jme->tx_ring_size - atomic_read(&txring->nr_free);
1466        mask = jme->tx_ring_mask;
1467
1468        for (i = atomic_read(&txring->next_to_clean) ; cnt < max ; ) {
1469
1470                ctxbi = txbi + i;
1471
1472                if (likely(ctxbi->skb &&
1473                !(txdesc[i].descwb.flags & TXWBFLAG_OWN))) {
1474
1475                        tx_dbg(jme, "txclean: %d+%d@%lu\n",
1476                               i, ctxbi->nr_desc, jiffies);
1477
1478                        err = txdesc[i].descwb.flags & TXWBFLAG_ALLERR;
1479
1480                        for (j = 1 ; j < ctxbi->nr_desc ; ++j) {
1481                                ttxbi = txbi + ((i + j) & (mask));
1482                                txdesc[(i + j) & (mask)].dw[0] = 0;
1483
1484                                pci_unmap_page(jme->pdev,
1485                                                 ttxbi->mapping,
1486                                                 ttxbi->len,
1487                                                 PCI_DMA_TODEVICE);
1488
1489                                ttxbi->mapping = 0;
1490                                ttxbi->len = 0;
1491                        }
1492
1493                        dev_kfree_skb(ctxbi->skb);
1494
1495                        cnt += ctxbi->nr_desc;
1496
1497                        if (unlikely(err)) {
1498                                ++(NET_STAT(jme).tx_carrier_errors);
1499                        } else {
1500                                ++(NET_STAT(jme).tx_packets);
1501                                NET_STAT(jme).tx_bytes += ctxbi->len;
1502                        }
1503
1504                        ctxbi->skb = NULL;
1505                        ctxbi->len = 0;
1506                        ctxbi->start_xmit = 0;
1507
1508                } else {
1509                        break;
1510                }
1511
1512                i = (i + ctxbi->nr_desc) & mask;
1513
1514                ctxbi->nr_desc = 0;
1515        }
1516
1517        tx_dbg(jme, "txclean: done %d@%lu\n", i, jiffies);
1518        atomic_set(&txring->next_to_clean, i);
1519        atomic_add(cnt, &txring->nr_free);
1520
1521        jme_wake_queue_if_stopped(jme);
1522
1523out:
1524        atomic_inc(&jme->tx_cleaning);
1525}
1526
1527static void
1528jme_intr_msi(struct jme_adapter *jme, u32 intrstat)
1529{
1530        /*
1531         * Disable interrupt
1532         */
1533        jwrite32f(jme, JME_IENC, INTR_ENABLE);
1534
1535        if (intrstat & (INTR_LINKCH | INTR_SWINTR)) {
1536                /*
1537                 * Link change event is critical
1538                 * all other events are ignored
1539                 */
1540                jwrite32(jme, JME_IEVE, intrstat);
1541                tasklet_schedule(&jme->linkch_task);
1542                goto out_reenable;
1543        }
1544
1545        if (intrstat & INTR_TMINTR) {
1546                jwrite32(jme, JME_IEVE, INTR_TMINTR);
1547                tasklet_schedule(&jme->pcc_task);
1548        }
1549
1550        if (intrstat & (INTR_PCCTXTO | INTR_PCCTX)) {
1551                jwrite32(jme, JME_IEVE, INTR_PCCTXTO | INTR_PCCTX | INTR_TX0);
1552                tasklet_schedule(&jme->txclean_task);
1553        }
1554
1555        if ((intrstat & (INTR_PCCRX0TO | INTR_PCCRX0 | INTR_RX0EMP))) {
1556                jwrite32(jme, JME_IEVE, (intrstat & (INTR_PCCRX0TO |
1557                                                     INTR_PCCRX0 |
1558                                                     INTR_RX0EMP)) |
1559                                        INTR_RX0);
1560        }
1561
1562        if (test_bit(JME_FLAG_POLL, &jme->flags)) {
1563                if (intrstat & INTR_RX0EMP)
1564                        atomic_inc(&jme->rx_empty);
1565
1566                if ((intrstat & (INTR_PCCRX0TO | INTR_PCCRX0 | INTR_RX0EMP))) {
1567                        if (likely(JME_RX_SCHEDULE_PREP(jme))) {
1568                                jme_polling_mode(jme);
1569                                JME_RX_SCHEDULE(jme);
1570                        }
1571                }
1572        } else {
1573                if (intrstat & INTR_RX0EMP) {
1574                        atomic_inc(&jme->rx_empty);
1575                        tasklet_hi_schedule(&jme->rxempty_task);
1576                } else if (intrstat & (INTR_PCCRX0TO | INTR_PCCRX0)) {
1577                        tasklet_hi_schedule(&jme->rxclean_task);
1578                }
1579        }
1580
1581out_reenable:
1582        /*
1583         * Re-enable interrupt
1584         */
1585        jwrite32f(jme, JME_IENS, INTR_ENABLE);
1586}
1587
1588static irqreturn_t
1589jme_intr(int irq, void *dev_id)
1590{
1591        struct net_device *netdev = dev_id;
1592        struct jme_adapter *jme = netdev_priv(netdev);
1593        u32 intrstat;
1594
1595        intrstat = jread32(jme, JME_IEVE);
1596
1597        /*
1598         * Check if it's really an interrupt for us
1599         */
1600        if (unlikely((intrstat & INTR_ENABLE) == 0))
1601                return IRQ_NONE;
1602
1603        /*
1604         * Check if the device still exist
1605         */
1606        if (unlikely(intrstat == ~((typeof(intrstat))0)))
1607                return IRQ_NONE;
1608
1609        jme_intr_msi(jme, intrstat);
1610
1611        return IRQ_HANDLED;
1612}
1613
1614static irqreturn_t
1615jme_msi(int irq, void *dev_id)
1616{
1617        struct net_device *netdev = dev_id;
1618        struct jme_adapter *jme = netdev_priv(netdev);
1619        u32 intrstat;
1620
1621        intrstat = jread32(jme, JME_IEVE);
1622
1623        jme_intr_msi(jme, intrstat);
1624
1625        return IRQ_HANDLED;
1626}
1627
1628static void
1629jme_reset_link(struct jme_adapter *jme)
1630{
1631        jwrite32(jme, JME_TMCSR, TMCSR_SWIT);
1632}
1633
1634static void
1635jme_restart_an(struct jme_adapter *jme)
1636{
1637        u32 bmcr;
1638
1639        spin_lock_bh(&jme->phy_lock);
1640        bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
1641        bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
1642        jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr);
1643        spin_unlock_bh(&jme->phy_lock);
1644}
1645
1646static int
1647jme_request_irq(struct jme_adapter *jme)
1648{
1649        int rc;
1650        struct net_device *netdev = jme->dev;
1651        irq_handler_t handler = jme_intr;
1652        int irq_flags = IRQF_SHARED;
1653
1654        if (!pci_enable_msi(jme->pdev)) {
1655                set_bit(JME_FLAG_MSI, &jme->flags);
1656                handler = jme_msi;
1657                irq_flags = 0;
1658        }
1659
1660        rc = request_irq(jme->pdev->irq, handler, irq_flags, netdev->name,
1661                          netdev);
1662        if (rc) {
1663                netdev_err(netdev,
1664                           "Unable to request %s interrupt (return: %d)\n",
1665                           test_bit(JME_FLAG_MSI, &jme->flags) ? "MSI" : "INTx",
1666                           rc);
1667
1668                if (test_bit(JME_FLAG_MSI, &jme->flags)) {
1669                        pci_disable_msi(jme->pdev);
1670                        clear_bit(JME_FLAG_MSI, &jme->flags);
1671                }
1672        } else {
1673                netdev->irq = jme->pdev->irq;
1674        }
1675
1676        return rc;
1677}
1678
1679static void
1680jme_free_irq(struct jme_adapter *jme)
1681{
1682        free_irq(jme->pdev->irq, jme->dev);
1683        if (test_bit(JME_FLAG_MSI, &jme->flags)) {
1684                pci_disable_msi(jme->pdev);
1685                clear_bit(JME_FLAG_MSI, &jme->flags);
1686                jme->dev->irq = jme->pdev->irq;
1687        }
1688}
1689
1690static inline void
1691jme_new_phy_on(struct jme_adapter *jme)
1692{
1693        u32 reg;
1694
1695        reg = jread32(jme, JME_PHY_PWR);
1696        reg &= ~(PHY_PWR_DWN1SEL | PHY_PWR_DWN1SW |
1697                 PHY_PWR_DWN2 | PHY_PWR_CLKSEL);
1698        jwrite32(jme, JME_PHY_PWR, reg);
1699
1700        pci_read_config_dword(jme->pdev, PCI_PRIV_PE1, &reg);
1701        reg &= ~PE1_GPREG0_PBG;
1702        reg |= PE1_GPREG0_ENBG;
1703        pci_write_config_dword(jme->pdev, PCI_PRIV_PE1, reg);
1704}
1705
1706static inline void
1707jme_new_phy_off(struct jme_adapter *jme)
1708{
1709        u32 reg;
1710
1711        reg = jread32(jme, JME_PHY_PWR);
1712        reg |= PHY_PWR_DWN1SEL | PHY_PWR_DWN1SW |
1713               PHY_PWR_DWN2 | PHY_PWR_CLKSEL;
1714        jwrite32(jme, JME_PHY_PWR, reg);
1715
1716        pci_read_config_dword(jme->pdev, PCI_PRIV_PE1, &reg);
1717        reg &= ~PE1_GPREG0_PBG;
1718        reg |= PE1_GPREG0_PDD3COLD;
1719        pci_write_config_dword(jme->pdev, PCI_PRIV_PE1, reg);
1720}
1721
1722static inline void
1723jme_phy_on(struct jme_adapter *jme)
1724{
1725        u32 bmcr;
1726
1727        bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
1728        bmcr &= ~BMCR_PDOWN;
1729        jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr);
1730
1731        if (new_phy_power_ctrl(jme->chip_main_rev))
1732                jme_new_phy_on(jme);
1733}
1734
1735static inline void
1736jme_phy_off(struct jme_adapter *jme)
1737{
1738        u32 bmcr;
1739
1740        bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
1741        bmcr |= BMCR_PDOWN;
1742        jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr);
1743
1744        if (new_phy_power_ctrl(jme->chip_main_rev))
1745                jme_new_phy_off(jme);
1746}
1747
1748static int
1749jme_phy_specreg_read(struct jme_adapter *jme, u32 specreg)
1750{
1751        u32 phy_addr;
1752
1753        phy_addr = JM_PHY_SPEC_REG_READ | specreg;
1754        jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_ADDR_REG,
1755                        phy_addr);
1756        return jme_mdio_read(jme->dev, jme->mii_if.phy_id,
1757                        JM_PHY_SPEC_DATA_REG);
1758}
1759
1760static void
1761jme_phy_specreg_write(struct jme_adapter *jme, u32 ext_reg, u32 phy_data)
1762{
1763        u32 phy_addr;
1764
1765        phy_addr = JM_PHY_SPEC_REG_WRITE | ext_reg;
1766        jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_DATA_REG,
1767                        phy_data);
1768        jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_ADDR_REG,
1769                        phy_addr);
1770}
1771
1772static int
1773jme_phy_calibration(struct jme_adapter *jme)
1774{
1775        u32 ctrl1000, phy_data;
1776
1777        jme_phy_off(jme);
1778        jme_phy_on(jme);
1779        /*  Enabel PHY test mode 1 */
1780        ctrl1000 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_CTRL1000);
1781        ctrl1000 &= ~PHY_GAD_TEST_MODE_MSK;
1782        ctrl1000 |= PHY_GAD_TEST_MODE_1;
1783        jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_CTRL1000, ctrl1000);
1784
1785        phy_data = jme_phy_specreg_read(jme, JM_PHY_EXT_COMM_2_REG);
1786        phy_data &= ~JM_PHY_EXT_COMM_2_CALI_MODE_0;
1787        phy_data |= JM_PHY_EXT_COMM_2_CALI_LATCH |
1788                        JM_PHY_EXT_COMM_2_CALI_ENABLE;
1789        jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_2_REG, phy_data);
1790        msleep(20);
1791        phy_data = jme_phy_specreg_read(jme, JM_PHY_EXT_COMM_2_REG);
1792        phy_data &= ~(JM_PHY_EXT_COMM_2_CALI_ENABLE |
1793                        JM_PHY_EXT_COMM_2_CALI_MODE_0 |
1794                        JM_PHY_EXT_COMM_2_CALI_LATCH);
1795        jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_2_REG, phy_data);
1796
1797        /*  Disable PHY test mode */
1798        ctrl1000 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_CTRL1000);
1799        ctrl1000 &= ~PHY_GAD_TEST_MODE_MSK;
1800        jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_CTRL1000, ctrl1000);
1801        return 0;
1802}
1803
1804static int
1805jme_phy_setEA(struct jme_adapter *jme)
1806{
1807        u32 phy_comm0 = 0, phy_comm1 = 0;
1808        u8 nic_ctrl;
1809
1810        pci_read_config_byte(jme->pdev, PCI_PRIV_SHARE_NICCTRL, &nic_ctrl);
1811        if ((nic_ctrl & 0x3) == JME_FLAG_PHYEA_ENABLE)
1812                return 0;
1813
1814        switch (jme->pdev->device) {
1815        case PCI_DEVICE_ID_JMICRON_JMC250:
1816                if (((jme->chip_main_rev == 5) &&
1817                        ((jme->chip_sub_rev == 0) || (jme->chip_sub_rev == 1) ||
1818                        (jme->chip_sub_rev == 3))) ||
1819                        (jme->chip_main_rev >= 6)) {
1820                        phy_comm0 = 0x008A;
1821                        phy_comm1 = 0x4109;
1822                }
1823                if ((jme->chip_main_rev == 3) &&
1824                        ((jme->chip_sub_rev == 1) || (jme->chip_sub_rev == 2)))
1825                        phy_comm0 = 0xE088;
1826                break;
1827        case PCI_DEVICE_ID_JMICRON_JMC260:
1828                if (((jme->chip_main_rev == 5) &&
1829                        ((jme->chip_sub_rev == 0) || (jme->chip_sub_rev == 1) ||
1830                        (jme->chip_sub_rev == 3))) ||
1831                        (jme->chip_main_rev >= 6)) {
1832                        phy_comm0 = 0x008A;
1833                        phy_comm1 = 0x4109;
1834                }
1835                if ((jme->chip_main_rev == 3) &&
1836                        ((jme->chip_sub_rev == 1) || (jme->chip_sub_rev == 2)))
1837                        phy_comm0 = 0xE088;
1838                if ((jme->chip_main_rev == 2) && (jme->chip_sub_rev == 0))
1839                        phy_comm0 = 0x608A;
1840                if ((jme->chip_main_rev == 2) && (jme->chip_sub_rev == 2))
1841                        phy_comm0 = 0x408A;
1842                break;
1843        default:
1844                return -ENODEV;
1845        }
1846        if (phy_comm0)
1847                jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_0_REG, phy_comm0);
1848        if (phy_comm1)
1849                jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_1_REG, phy_comm1);
1850
1851        return 0;
1852}
1853
1854static int
1855jme_open(struct net_device *netdev)
1856{
1857        struct jme_adapter *jme = netdev_priv(netdev);
1858        int rc;
1859
1860        jme_clear_pm(jme);
1861        JME_NAPI_ENABLE(jme);
1862
1863        tasklet_init(&jme->linkch_task, jme_link_change_tasklet,
1864                     (unsigned long) jme);
1865        tasklet_init(&jme->txclean_task, jme_tx_clean_tasklet,
1866                     (unsigned long) jme);
1867        tasklet_init(&jme->rxclean_task, jme_rx_clean_tasklet,
1868                     (unsigned long) jme);
1869        tasklet_init(&jme->rxempty_task, jme_rx_empty_tasklet,
1870                     (unsigned long) jme);
1871
1872        rc = jme_request_irq(jme);
1873        if (rc)
1874                goto err_out;
1875
1876        jme_start_irq(jme);
1877
1878        jme_phy_on(jme);
1879        if (test_bit(JME_FLAG_SSET, &jme->flags))
1880                jme_set_settings(netdev, &jme->old_ecmd);
1881        else
1882                jme_reset_phy_processor(jme);
1883        jme_phy_calibration(jme);
1884        jme_phy_setEA(jme);
1885        jme_reset_link(jme);
1886
1887        return 0;
1888
1889err_out:
1890        netif_stop_queue(netdev);
1891        netif_carrier_off(netdev);
1892        return rc;
1893}
1894
1895static void
1896jme_set_100m_half(struct jme_adapter *jme)
1897{
1898        u32 bmcr, tmp;
1899
1900        jme_phy_on(jme);
1901        bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
1902        tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 |
1903                       BMCR_SPEED1000 | BMCR_FULLDPLX);
1904        tmp |= BMCR_SPEED100;
1905
1906        if (bmcr != tmp)
1907                jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, tmp);
1908
1909        if (jme->fpgaver)
1910                jwrite32(jme, JME_GHC, GHC_SPEED_100M | GHC_LINK_POLL);
1911        else
1912                jwrite32(jme, JME_GHC, GHC_SPEED_100M);
1913}
1914
1915#define JME_WAIT_LINK_TIME 2000 /* 2000ms */
1916static void
1917jme_wait_link(struct jme_adapter *jme)
1918{
1919        u32 phylink, to = JME_WAIT_LINK_TIME;
1920
1921        mdelay(1000);
1922        phylink = jme_linkstat_from_phy(jme);
1923        while (!(phylink & PHY_LINK_UP) && (to -= 10) > 0) {
1924                mdelay(10);
1925                phylink = jme_linkstat_from_phy(jme);
1926        }
1927}
1928
1929static void
1930jme_powersave_phy(struct jme_adapter *jme)
1931{
1932        if (jme->reg_pmcs) {
1933                jme_set_100m_half(jme);
1934                if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN))
1935                        jme_wait_link(jme);
1936                jme_clear_pm(jme);
1937        } else {
1938                jme_phy_off(jme);
1939        }
1940}
1941
1942static int
1943jme_close(struct net_device *netdev)
1944{
1945        struct jme_adapter *jme = netdev_priv(netdev);
1946
1947        netif_stop_queue(netdev);
1948        netif_carrier_off(netdev);
1949
1950        jme_stop_irq(jme);
1951        jme_free_irq(jme);
1952
1953        JME_NAPI_DISABLE(jme);
1954
1955        tasklet_kill(&jme->linkch_task);
1956        tasklet_kill(&jme->txclean_task);
1957        tasklet_kill(&jme->rxclean_task);
1958        tasklet_kill(&jme->rxempty_task);
1959
1960        jme_disable_rx_engine(jme);
1961        jme_disable_tx_engine(jme);
1962        jme_reset_mac_processor(jme);
1963        jme_free_rx_resources(jme);
1964        jme_free_tx_resources(jme);
1965        jme->phylink = 0;
1966        jme_phy_off(jme);
1967
1968        return 0;
1969}
1970
1971static int
1972jme_alloc_txdesc(struct jme_adapter *jme,
1973                        struct sk_buff *skb)
1974{
1975        struct jme_ring *txring = &(jme->txring[0]);
1976        int idx, nr_alloc, mask = jme->tx_ring_mask;
1977
1978        idx = txring->next_to_use;
1979        nr_alloc = skb_shinfo(skb)->nr_frags + 2;
1980
1981        if (unlikely(atomic_read(&txring->nr_free) < nr_alloc))
1982                return -1;
1983
1984        atomic_sub(nr_alloc, &txring->nr_free);
1985
1986        txring->next_to_use = (txring->next_to_use + nr_alloc) & mask;
1987
1988        return idx;
1989}
1990
1991static void
1992jme_fill_tx_map(struct pci_dev *pdev,
1993                struct txdesc *txdesc,
1994                struct jme_buffer_info *txbi,
1995                struct page *page,
1996                u32 page_offset,
1997                u32 len,
1998                bool hidma)
1999{
2000        dma_addr_t dmaaddr;
2001
2002        dmaaddr = pci_map_page(pdev,
2003                                page,
2004                                page_offset,
2005                                len,
2006                                PCI_DMA_TODEVICE);
2007
2008        pci_dma_sync_single_for_device(pdev,
2009                                       dmaaddr,
2010                                       len,
2011                                       PCI_DMA_TODEVICE);
2012
2013        txdesc->dw[0] = 0;
2014        txdesc->dw[1] = 0;
2015        txdesc->desc2.flags     = TXFLAG_OWN;
2016        txdesc->desc2.flags     |= (hidma) ? TXFLAG_64BIT : 0;
2017        txdesc->desc2.datalen   = cpu_to_le16(len);
2018        txdesc->desc2.bufaddrh  = cpu_to_le32((__u64)dmaaddr >> 32);
2019        txdesc->desc2.bufaddrl  = cpu_to_le32(
2020                                        (__u64)dmaaddr & 0xFFFFFFFFUL);
2021
2022        txbi->mapping = dmaaddr;
2023        txbi->len = len;
2024}
2025
2026static void
2027jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
2028{
2029        struct jme_ring *txring = &(jme->txring[0]);
2030        struct txdesc *txdesc = txring->desc, *ctxdesc;
2031        struct jme_buffer_info *txbi = txring->bufinf, *ctxbi;
2032        bool hidma = jme->dev->features & NETIF_F_HIGHDMA;
2033        int i, nr_frags = skb_shinfo(skb)->nr_frags;
2034        int mask = jme->tx_ring_mask;
2035        const struct skb_frag_struct *frag;
2036        u32 len;
2037
2038        for (i = 0 ; i < nr_frags ; ++i) {
2039                frag = &skb_shinfo(skb)->frags[i];
2040                ctxdesc = txdesc + ((idx + i + 2) & (mask));
2041                ctxbi = txbi + ((idx + i + 2) & (mask));
2042
2043                jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi,
2044                                skb_frag_page(frag),
2045                                frag->page_offset, skb_frag_size(frag), hidma);
2046        }
2047
2048        len = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len;
2049        ctxdesc = txdesc + ((idx + 1) & (mask));
2050        ctxbi = txbi + ((idx + 1) & (mask));
2051        jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data),
2052                        offset_in_page(skb->data), len, hidma);
2053
2054}
2055
2056static int
2057jme_expand_header(struct jme_adapter *jme, struct sk_buff *skb)
2058{
2059        if (unlikely(skb_shinfo(skb)->gso_size &&
2060                        skb_header_cloned(skb) &&
2061                        pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) {
2062                dev_kfree_skb(skb);
2063                return -1;
2064        }
2065
2066        return 0;
2067}
2068
2069static int
2070jme_tx_tso(struct sk_buff *skb, __le16 *mss, u8 *flags)
2071{
2072        *mss = cpu_to_le16(skb_shinfo(skb)->gso_size << TXDESC_MSS_SHIFT);
2073        if (*mss) {
2074                *flags |= TXFLAG_LSEN;
2075
2076                if (skb->protocol == htons(ETH_P_IP)) {
2077                        struct iphdr *iph = ip_hdr(skb);
2078
2079                        iph->check = 0;
2080                        tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2081                                                                iph->daddr, 0,
2082                                                                IPPROTO_TCP,
2083                                                                0);
2084                } else {
2085                        struct ipv6hdr *ip6h = ipv6_hdr(skb);
2086
2087                        tcp_hdr(skb)->check = ~csum_ipv6_magic(&ip6h->saddr,
2088                                                                &ip6h->daddr, 0,
2089                                                                IPPROTO_TCP,
2090                                                                0);
2091                }
2092
2093                return 0;
2094        }
2095
2096        return 1;
2097}
2098
2099static void
2100jme_tx_csum(struct jme_adapter *jme, struct sk_buff *skb, u8 *flags)
2101{
2102        if (skb->ip_summed == CHECKSUM_PARTIAL) {
2103                u8 ip_proto;
2104
2105                switch (skb->protocol) {
2106                case htons(ETH_P_IP):
2107                        ip_proto = ip_hdr(skb)->protocol;
2108                        break;
2109                case htons(ETH_P_IPV6):
2110                        ip_proto = ipv6_hdr(skb)->nexthdr;
2111                        break;
2112                default:
2113                        ip_proto = 0;
2114                        break;
2115                }
2116
2117                switch (ip_proto) {
2118                case IPPROTO_TCP:
2119                        *flags |= TXFLAG_TCPCS;
2120                        break;
2121                case IPPROTO_UDP:
2122                        *flags |= TXFLAG_UDPCS;
2123                        break;
2124                default:
2125                        netif_err(jme, tx_err, jme->dev, "Error upper layer protocol\n");
2126                        break;
2127                }
2128        }
2129}
2130
2131static inline void
2132jme_tx_vlan(struct sk_buff *skb, __le16 *vlan, u8 *flags)
2133{
2134        if (vlan_tx_tag_present(skb)) {
2135                *flags |= TXFLAG_TAGON;
2136                *vlan = cpu_to_le16(vlan_tx_tag_get(skb));
2137        }
2138}
2139
2140static int
2141jme_fill_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx)
2142{
2143        struct jme_ring *txring = &(jme->txring[0]);
2144        struct txdesc *txdesc;
2145        struct jme_buffer_info *txbi;
2146        u8 flags;
2147
2148        txdesc = (struct txdesc *)txring->desc + idx;
2149        txbi = txring->bufinf + idx;
2150
2151        txdesc->dw[0] = 0;
2152        txdesc->dw[1] = 0;
2153        txdesc->dw[2] = 0;
2154        txdesc->dw[3] = 0;
2155        txdesc->desc1.pktsize = cpu_to_le16(skb->len);
2156        /*
2157         * Set OWN bit at final.
2158         * When kernel transmit faster than NIC.
2159         * And NIC trying to send this descriptor before we tell
2160         * it to start sending this TX queue.
2161         * Other fields are already filled correctly.
2162         */
2163        wmb();
2164        flags = TXFLAG_OWN | TXFLAG_INT;
2165        /*
2166         * Set checksum flags while not tso
2167         */
2168        if (jme_tx_tso(skb, &txdesc->desc1.mss, &flags))
2169                jme_tx_csum(jme, skb, &flags);
2170        jme_tx_vlan(skb, &txdesc->desc1.vlan, &flags);
2171        jme_map_tx_skb(jme, skb, idx);
2172        txdesc->desc1.flags = flags;
2173        /*
2174         * Set tx buffer info after telling NIC to send
2175         * For better tx_clean timing
2176         */
2177        wmb();
2178        txbi->nr_desc = skb_shinfo(skb)->nr_frags + 2;
2179        txbi->skb = skb;
2180        txbi->len = skb->len;
2181        txbi->start_xmit = jiffies;
2182        if (!txbi->start_xmit)
2183                txbi->start_xmit = (0UL-1);
2184
2185        return 0;
2186}
2187
2188static void
2189jme_stop_queue_if_full(struct jme_adapter *jme)
2190{
2191        struct jme_ring *txring = &(jme->txring[0]);
2192        struct jme_buffer_info *txbi = txring->bufinf;
2193        int idx = atomic_read(&txring->next_to_clean);
2194
2195        txbi += idx;
2196
2197        smp_wmb();
2198        if (unlikely(atomic_read(&txring->nr_free) < (MAX_SKB_FRAGS+2))) {
2199                netif_stop_queue(jme->dev);
2200                netif_info(jme, tx_queued, jme->dev, "TX Queue Paused\n");
2201                smp_wmb();
2202                if (atomic_read(&txring->nr_free)
2203                        >= (jme->tx_wake_threshold)) {
2204                        netif_wake_queue(jme->dev);
2205                        netif_info(jme, tx_queued, jme->dev, "TX Queue Fast Waked\n");
2206                }
2207        }
2208
2209        if (unlikely(txbi->start_xmit &&
2210                        (jiffies - txbi->start_xmit) >= TX_TIMEOUT &&
2211                        txbi->skb)) {
2212                netif_stop_queue(jme->dev);
2213                netif_info(jme, tx_queued, jme->dev,
2214                           "TX Queue Stopped %d@%lu\n", idx, jiffies);
2215        }
2216}
2217
2218/*
2219 * This function is already protected by netif_tx_lock()
2220 */
2221
2222static netdev_tx_t
2223jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2224{
2225        struct jme_adapter *jme = netdev_priv(netdev);
2226        int idx;
2227
2228        if (unlikely(jme_expand_header(jme, skb))) {
2229                ++(NET_STAT(jme).tx_dropped);
2230                return NETDEV_TX_OK;
2231        }
2232
2233        idx = jme_alloc_txdesc(jme, skb);
2234
2235        if (unlikely(idx < 0)) {
2236                netif_stop_queue(netdev);
2237                netif_err(jme, tx_err, jme->dev,
2238                          "BUG! Tx ring full when queue awake!\n");
2239
2240                return NETDEV_TX_BUSY;
2241        }
2242
2243        jme_fill_tx_desc(jme, skb, idx);
2244
2245        jwrite32(jme, JME_TXCS, jme->reg_txcs |
2246                                TXCS_SELECT_QUEUE0 |
2247                                TXCS_QUEUE0S |
2248                                TXCS_ENABLE);
2249
2250        tx_dbg(jme, "xmit: %d+%d@%lu\n",
2251               idx, skb_shinfo(skb)->nr_frags + 2, jiffies);
2252        jme_stop_queue_if_full(jme);
2253
2254        return NETDEV_TX_OK;
2255}
2256
2257static void
2258jme_set_unicastaddr(struct net_device *netdev)
2259{
2260        struct jme_adapter *jme = netdev_priv(netdev);
2261        u32 val;
2262
2263        val = (netdev->dev_addr[3] & 0xff) << 24 |
2264              (netdev->dev_addr[2] & 0xff) << 16 |
2265              (netdev->dev_addr[1] & 0xff) <<  8 |
2266              (netdev->dev_addr[0] & 0xff);
2267        jwrite32(jme, JME_RXUMA_LO, val);
2268        val = (netdev->dev_addr[5] & 0xff) << 8 |
2269              (netdev->dev_addr[4] & 0xff);
2270        jwrite32(jme, JME_RXUMA_HI, val);
2271}
2272
2273static int
2274jme_set_macaddr(struct net_device *netdev, void *p)
2275{
2276        struct jme_adapter *jme = netdev_priv(netdev);
2277        struct sockaddr *addr = p;
2278
2279        if (netif_running(netdev))
2280                return -EBUSY;
2281
2282        spin_lock_bh(&jme->macaddr_lock);
2283        memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2284        jme_set_unicastaddr(netdev);
2285        spin_unlock_bh(&jme->macaddr_lock);
2286
2287        return 0;
2288}
2289
2290static void
2291jme_set_multi(struct net_device *netdev)
2292{
2293        struct jme_adapter *jme = netdev_priv(netdev);
2294        u32 mc_hash[2] = {};
2295
2296        spin_lock_bh(&jme->rxmcs_lock);
2297
2298        jme->reg_rxmcs |= RXMCS_BRDFRAME | RXMCS_UNIFRAME;
2299
2300        if (netdev->flags & IFF_PROMISC) {
2301                jme->reg_rxmcs |= RXMCS_ALLFRAME;
2302        } else if (netdev->flags & IFF_ALLMULTI) {
2303                jme->reg_rxmcs |= RXMCS_ALLMULFRAME;
2304        } else if (netdev->flags & IFF_MULTICAST) {
2305                struct netdev_hw_addr *ha;
2306                int bit_nr;
2307
2308                jme->reg_rxmcs |= RXMCS_MULFRAME | RXMCS_MULFILTERED;
2309                netdev_for_each_mc_addr(ha, netdev) {
2310                        bit_nr = ether_crc(ETH_ALEN, ha->addr) & 0x3F;
2311                        mc_hash[bit_nr >> 5] |= 1 << (bit_nr & 0x1F);
2312                }
2313
2314                jwrite32(jme, JME_RXMCHT_LO, mc_hash[0]);
2315                jwrite32(jme, JME_RXMCHT_HI, mc_hash[1]);
2316        }
2317
2318        wmb();
2319        jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
2320
2321        spin_unlock_bh(&jme->rxmcs_lock);
2322}
2323
2324static int
2325jme_change_mtu(struct net_device *netdev, int new_mtu)
2326{
2327        struct jme_adapter *jme = netdev_priv(netdev);
2328
2329        if (new_mtu == jme->old_mtu)
2330                return 0;
2331
2332        if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
2333                ((new_mtu) < IPV6_MIN_MTU))
2334                return -EINVAL;
2335
2336
2337        netdev->mtu = new_mtu;
2338        netdev_update_features(netdev);
2339
2340        jme_restart_rx_engine(jme);
2341        jme_reset_link(jme);
2342
2343        return 0;
2344}
2345
2346static void
2347jme_tx_timeout(struct net_device *netdev)
2348{
2349        struct jme_adapter *jme = netdev_priv(netdev);
2350
2351        jme->phylink = 0;
2352        jme_reset_phy_processor(jme);
2353        if (test_bit(JME_FLAG_SSET, &jme->flags))
2354                jme_set_settings(netdev, &jme->old_ecmd);
2355
2356        /*
2357         * Force to Reset the link again
2358         */
2359        jme_reset_link(jme);
2360}
2361
2362static inline void jme_pause_rx(struct jme_adapter *jme)
2363{
2364        atomic_dec(&jme->link_changing);
2365
2366        jme_set_rx_pcc(jme, PCC_OFF);
2367        if (test_bit(JME_FLAG_POLL, &jme->flags)) {
2368                JME_NAPI_DISABLE(jme);
2369        } else {
2370                tasklet_disable(&jme->rxclean_task);
2371                tasklet_disable(&jme->rxempty_task);
2372        }
2373}
2374
2375static inline void jme_resume_rx(struct jme_adapter *jme)
2376{
2377        struct dynpcc_info *dpi = &(jme->dpi);
2378
2379        if (test_bit(JME_FLAG_POLL, &jme->flags)) {
2380                JME_NAPI_ENABLE(jme);
2381        } else {
2382                tasklet_hi_enable(&jme->rxclean_task);
2383                tasklet_hi_enable(&jme->rxempty_task);
2384        }
2385        dpi->cur                = PCC_P1;
2386        dpi->attempt            = PCC_P1;
2387        dpi->cnt                = 0;
2388        jme_set_rx_pcc(jme, PCC_P1);
2389
2390        atomic_inc(&jme->link_changing);
2391}
2392
2393static void
2394jme_get_drvinfo(struct net_device *netdev,
2395                     struct ethtool_drvinfo *info)
2396{
2397        struct jme_adapter *jme = netdev_priv(netdev);
2398
2399        strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
2400        strlcpy(info->version, DRV_VERSION, sizeof(info->version));
2401        strlcpy(info->bus_info, pci_name(jme->pdev), sizeof(info->bus_info));
2402}
2403
2404static int
2405jme_get_regs_len(struct net_device *netdev)
2406{
2407        return JME_REG_LEN;
2408}
2409
2410static void
2411mmapio_memcpy(struct jme_adapter *jme, u32 *p, u32 reg, int len)
2412{
2413        int i;
2414
2415        for (i = 0 ; i < len ; i += 4)
2416                p[i >> 2] = jread32(jme, reg + i);
2417}
2418
2419static void
2420mdio_memcpy(struct jme_adapter *jme, u32 *p, int reg_nr)
2421{
2422        int i;
2423        u16 *p16 = (u16 *)p;
2424
2425        for (i = 0 ; i < reg_nr ; ++i)
2426                p16[i] = jme_mdio_read(jme->dev, jme->mii_if.phy_id, i);
2427}
2428
2429static void
2430jme_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
2431{
2432        struct jme_adapter *jme = netdev_priv(netdev);
2433        u32 *p32 = (u32 *)p;
2434
2435        memset(p, 0xFF, JME_REG_LEN);
2436
2437        regs->version = 1;
2438        mmapio_memcpy(jme, p32, JME_MAC, JME_MAC_LEN);
2439
2440        p32 += 0x100 >> 2;
2441        mmapio_memcpy(jme, p32, JME_PHY, JME_PHY_LEN);
2442
2443        p32 += 0x100 >> 2;
2444        mmapio_memcpy(jme, p32, JME_MISC, JME_MISC_LEN);
2445
2446        p32 += 0x100 >> 2;
2447        mmapio_memcpy(jme, p32, JME_RSS, JME_RSS_LEN);
2448
2449        p32 += 0x100 >> 2;
2450        mdio_memcpy(jme, p32, JME_PHY_REG_NR);
2451}
2452
2453static int
2454jme_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd)
2455{
2456        struct jme_adapter *jme = netdev_priv(netdev);
2457
2458        ecmd->tx_coalesce_usecs = PCC_TX_TO;
2459        ecmd->tx_max_coalesced_frames = PCC_TX_CNT;
2460
2461        if (test_bit(JME_FLAG_POLL, &jme->flags)) {
2462                ecmd->use_adaptive_rx_coalesce = false;
2463                ecmd->rx_coalesce_usecs = 0;
2464                ecmd->rx_max_coalesced_frames = 0;
2465                return 0;
2466        }
2467
2468        ecmd->use_adaptive_rx_coalesce = true;
2469
2470        switch (jme->dpi.cur) {
2471        case PCC_P1:
2472                ecmd->rx_coalesce_usecs = PCC_P1_TO;
2473                ecmd->rx_max_coalesced_frames = PCC_P1_CNT;
2474                break;
2475        case PCC_P2:
2476                ecmd->rx_coalesce_usecs = PCC_P2_TO;
2477                ecmd->rx_max_coalesced_frames = PCC_P2_CNT;
2478                break;
2479        case PCC_P3:
2480                ecmd->rx_coalesce_usecs = PCC_P3_TO;
2481                ecmd->rx_max_coalesced_frames = PCC_P3_CNT;
2482                break;
2483        default:
2484                break;
2485        }
2486
2487        return 0;
2488}
2489
2490static int
2491jme_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd)
2492{
2493        struct jme_adapter *jme = netdev_priv(netdev);
2494        struct dynpcc_info *dpi = &(jme->dpi);
2495
2496        if (netif_running(netdev))
2497                return -EBUSY;
2498
2499        if (ecmd->use_adaptive_rx_coalesce &&
2500            test_bit(JME_FLAG_POLL, &jme->flags)) {
2501                clear_bit(JME_FLAG_POLL, &jme->flags);
2502                jme->jme_rx = netif_rx;
2503                dpi->cur                = PCC_P1;
2504                dpi->attempt            = PCC_P1;
2505                dpi->cnt                = 0;
2506                jme_set_rx_pcc(jme, PCC_P1);
2507                jme_interrupt_mode(jme);
2508        } else if (!(ecmd->use_adaptive_rx_coalesce) &&
2509                   !(test_bit(JME_FLAG_POLL, &jme->flags))) {
2510                set_bit(JME_FLAG_POLL, &jme->flags);
2511                jme->jme_rx = netif_receive_skb;
2512                jme_interrupt_mode(jme);
2513        }
2514
2515        return 0;
2516}
2517
2518static void
2519jme_get_pauseparam(struct net_device *netdev,
2520                        struct ethtool_pauseparam *ecmd)
2521{
2522        struct jme_adapter *jme = netdev_priv(netdev);
2523        u32 val;
2524
2525        ecmd->tx_pause = (jme->reg_txpfc & TXPFC_PF_EN) != 0;
2526        ecmd->rx_pause = (jme->reg_rxmcs & RXMCS_FLOWCTRL) != 0;
2527
2528        spin_lock_bh(&jme->phy_lock);
2529        val = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE);
2530        spin_unlock_bh(&jme->phy_lock);
2531
2532        ecmd->autoneg =
2533                (val & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0;
2534}
2535
2536static int
2537jme_set_pauseparam(struct net_device *netdev,
2538                        struct ethtool_pauseparam *ecmd)
2539{
2540        struct jme_adapter *jme = netdev_priv(netdev);
2541        u32 val;
2542
2543        if (((jme->reg_txpfc & TXPFC_PF_EN) != 0) ^
2544                (ecmd->tx_pause != 0)) {
2545
2546                if (ecmd->tx_pause)
2547                        jme->reg_txpfc |= TXPFC_PF_EN;
2548                else
2549                        jme->reg_txpfc &= ~TXPFC_PF_EN;
2550
2551                jwrite32(jme, JME_TXPFC, jme->reg_txpfc);
2552        }
2553
2554        spin_lock_bh(&jme->rxmcs_lock);
2555        if (((jme->reg_rxmcs & RXMCS_FLOWCTRL) != 0) ^
2556                (ecmd->rx_pause != 0)) {
2557
2558                if (ecmd->rx_pause)
2559                        jme->reg_rxmcs |= RXMCS_FLOWCTRL;
2560                else
2561                        jme->reg_rxmcs &= ~RXMCS_FLOWCTRL;
2562
2563                jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
2564        }
2565        spin_unlock_bh(&jme->rxmcs_lock);
2566
2567        spin_lock_bh(&jme->phy_lock);
2568        val = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE);
2569        if (((val & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0) ^
2570                (ecmd->autoneg != 0)) {
2571
2572                if (ecmd->autoneg)
2573                        val |= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2574                else
2575                        val &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2576
2577                jme_mdio_write(jme->dev, jme->mii_if.phy_id,
2578                                MII_ADVERTISE, val);
2579        }
2580        spin_unlock_bh(&jme->phy_lock);
2581
2582        return 0;
2583}
2584
2585static void
2586jme_get_wol(struct net_device *netdev,
2587                struct ethtool_wolinfo *wol)
2588{
2589        struct jme_adapter *jme = netdev_priv(netdev);
2590
2591        wol->supported = WAKE_MAGIC | WAKE_PHY;
2592
2593        wol->wolopts = 0;
2594
2595        if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN))
2596                wol->wolopts |= WAKE_PHY;
2597
2598        if (jme->reg_pmcs & PMCS_MFEN)
2599                wol->wolopts |= WAKE_MAGIC;
2600
2601}
2602
2603static int
2604jme_set_wol(struct net_device *netdev,
2605                struct ethtool_wolinfo *wol)
2606{
2607        struct jme_adapter *jme = netdev_priv(netdev);
2608
2609        if (wol->wolopts & (WAKE_MAGICSECURE |
2610                                WAKE_UCAST |
2611                                WAKE_MCAST |
2612                                WAKE_BCAST |
2613                                WAKE_ARP))
2614                return -EOPNOTSUPP;
2615
2616        jme->reg_pmcs = 0;
2617
2618        if (wol->wolopts & WAKE_PHY)
2619                jme->reg_pmcs |= PMCS_LFEN | PMCS_LREN;
2620
2621        if (wol->wolopts & WAKE_MAGIC)
2622                jme->reg_pmcs |= PMCS_MFEN;
2623
2624        jwrite32(jme, JME_PMCS, jme->reg_pmcs);
2625        device_set_wakeup_enable(&jme->pdev->dev, !!(jme->reg_pmcs));
2626
2627        return 0;
2628}
2629
2630static int
2631jme_get_settings(struct net_device *netdev,
2632                     struct ethtool_cmd *ecmd)
2633{
2634        struct jme_adapter *jme = netdev_priv(netdev);
2635        int rc;
2636
2637        spin_lock_bh(&jme->phy_lock);
2638        rc = mii_ethtool_gset(&(jme->mii_if), ecmd);
2639        spin_unlock_bh(&jme->phy_lock);
2640        return rc;
2641}
2642
2643static int
2644jme_set_settings(struct net_device *netdev,
2645                     struct ethtool_cmd *ecmd)
2646{
2647        struct jme_adapter *jme = netdev_priv(netdev);
2648        int rc, fdc = 0;
2649
2650        if (ethtool_cmd_speed(ecmd) == SPEED_1000
2651            && ecmd->autoneg != AUTONEG_ENABLE)
2652                return -EINVAL;
2653
2654        /*
2655         * Check If user changed duplex only while force_media.
2656         * Hardware would not generate link change interrupt.
2657         */
2658        if (jme->mii_if.force_media &&
2659        ecmd->autoneg != AUTONEG_ENABLE &&
2660        (jme->mii_if.full_duplex != ecmd->duplex))
2661                fdc = 1;
2662
2663        spin_lock_bh(&jme->phy_lock);
2664        rc = mii_ethtool_sset(&(jme->mii_if), ecmd);
2665        spin_unlock_bh(&jme->phy_lock);
2666
2667        if (!rc) {
2668                if (fdc)
2669                        jme_reset_link(jme);
2670                jme->old_ecmd = *ecmd;
2671                set_bit(JME_FLAG_SSET, &jme->flags);
2672        }
2673
2674        return rc;
2675}
2676
2677static int
2678jme_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
2679{
2680        int rc;
2681        struct jme_adapter *jme = netdev_priv(netdev);
2682        struct mii_ioctl_data *mii_data = if_mii(rq);
2683        unsigned int duplex_chg;
2684
2685        if (cmd == SIOCSMIIREG) {
2686                u16 val = mii_data->val_in;
2687                if (!(val & (BMCR_RESET|BMCR_ANENABLE)) &&
2688                    (val & BMCR_SPEED1000))
2689                        return -EINVAL;
2690        }
2691
2692        spin_lock_bh(&jme->phy_lock);
2693        rc = generic_mii_ioctl(&jme->mii_if, mii_data, cmd, &duplex_chg);
2694        spin_unlock_bh(&jme->phy_lock);
2695
2696        if (!rc && (cmd == SIOCSMIIREG)) {
2697                if (duplex_chg)
2698                        jme_reset_link(jme);
2699                jme_get_settings(netdev, &jme->old_ecmd);
2700                set_bit(JME_FLAG_SSET, &jme->flags);
2701        }
2702
2703        return rc;
2704}
2705
2706static u32
2707jme_get_link(struct net_device *netdev)
2708{
2709        struct jme_adapter *jme = netdev_priv(netdev);
2710        return jread32(jme, JME_PHY_LINK) & PHY_LINK_UP;
2711}
2712
2713static u32
2714jme_get_msglevel(struct net_device *netdev)
2715{
2716        struct jme_adapter *jme = netdev_priv(netdev);
2717        return jme->msg_enable;
2718}
2719
2720static void
2721jme_set_msglevel(struct net_device *netdev, u32 value)
2722{
2723        struct jme_adapter *jme = netdev_priv(netdev);
2724        jme->msg_enable = value;
2725}
2726
2727static netdev_features_t
2728jme_fix_features(struct net_device *netdev, netdev_features_t features)
2729{
2730        if (netdev->mtu > 1900)
2731                features &= ~(NETIF_F_ALL_TSO | NETIF_F_ALL_CSUM);
2732        return features;
2733}
2734
2735static int
2736jme_set_features(struct net_device *netdev, netdev_features_t features)
2737{
2738        struct jme_adapter *jme = netdev_priv(netdev);
2739
2740        spin_lock_bh(&jme->rxmcs_lock);
2741        if (features & NETIF_F_RXCSUM)
2742                jme->reg_rxmcs |= RXMCS_CHECKSUM;
2743        else
2744                jme->reg_rxmcs &= ~RXMCS_CHECKSUM;
2745        jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
2746        spin_unlock_bh(&jme->rxmcs_lock);
2747
2748        return 0;
2749}
2750
2751#ifdef CONFIG_NET_POLL_CONTROLLER
2752static void jme_netpoll(struct net_device *dev)
2753{
2754        unsigned long flags;
2755
2756        local_irq_save(flags);
2757        jme_intr(dev->irq, dev);
2758        local_irq_restore(flags);
2759}
2760#endif
2761
2762static int
2763jme_nway_reset(struct net_device *netdev)
2764{
2765        struct jme_adapter *jme = netdev_priv(netdev);
2766        jme_restart_an(jme);
2767        return 0;
2768}
2769
2770static u8
2771jme_smb_read(struct jme_adapter *jme, unsigned int addr)
2772{
2773        u32 val;
2774        int to;
2775
2776        val = jread32(jme, JME_SMBCSR);
2777        to = JME_SMB_BUSY_TIMEOUT;
2778        while ((val & SMBCSR_BUSY) && --to) {
2779                msleep(1);
2780                val = jread32(jme, JME_SMBCSR);
2781        }
2782        if (!to) {
2783                netif_err(jme, hw, jme->dev, "SMB Bus Busy\n");
2784                return 0xFF;
2785        }
2786
2787        jwrite32(jme, JME_SMBINTF,
2788                ((addr << SMBINTF_HWADDR_SHIFT) & SMBINTF_HWADDR) |
2789                SMBINTF_HWRWN_READ |
2790                SMBINTF_HWCMD);
2791
2792        val = jread32(jme, JME_SMBINTF);
2793        to = JME_SMB_BUSY_TIMEOUT;
2794        while ((val & SMBINTF_HWCMD) && --to) {
2795                msleep(1);
2796                val = jread32(jme, JME_SMBINTF);
2797        }
2798        if (!to) {
2799                netif_err(jme, hw, jme->dev, "SMB Bus Busy\n");
2800                return 0xFF;
2801        }
2802
2803        return (val & SMBINTF_HWDATR) >> SMBINTF_HWDATR_SHIFT;
2804}
2805
2806static void
2807jme_smb_write(struct jme_adapter *jme, unsigned int addr, u8 data)
2808{
2809        u32 val;
2810        int to;
2811
2812        val = jread32(jme, JME_SMBCSR);
2813        to = JME_SMB_BUSY_TIMEOUT;
2814        while ((val & SMBCSR_BUSY) && --to) {
2815                msleep(1);
2816                val = jread32(jme, JME_SMBCSR);
2817        }
2818        if (!to) {
2819                netif_err(jme, hw, jme->dev, "SMB Bus Busy\n");
2820                return;
2821        }
2822
2823        jwrite32(jme, JME_SMBINTF,
2824                ((data << SMBINTF_HWDATW_SHIFT) & SMBINTF_HWDATW) |
2825                ((addr << SMBINTF_HWADDR_SHIFT) & SMBINTF_HWADDR) |
2826                SMBINTF_HWRWN_WRITE |
2827                SMBINTF_HWCMD);
2828
2829        val = jread32(jme, JME_SMBINTF);
2830        to = JME_SMB_BUSY_TIMEOUT;
2831        while ((val & SMBINTF_HWCMD) && --to) {
2832                msleep(1);
2833                val = jread32(jme, JME_SMBINTF);
2834        }
2835        if (!to) {
2836                netif_err(jme, hw, jme->dev, "SMB Bus Busy\n");
2837                return;
2838        }
2839
2840        mdelay(2);
2841}
2842
2843static int
2844jme_get_eeprom_len(struct net_device *netdev)
2845{
2846        struct jme_adapter *jme = netdev_priv(netdev);
2847        u32 val;
2848        val = jread32(jme, JME_SMBCSR);
2849        return (val & SMBCSR_EEPROMD) ? JME_SMB_LEN : 0;
2850}
2851
2852static int
2853jme_get_eeprom(struct net_device *netdev,
2854                struct ethtool_eeprom *eeprom, u8 *data)
2855{
2856        struct jme_adapter *jme = netdev_priv(netdev);
2857        int i, offset = eeprom->offset, len = eeprom->len;
2858
2859        /*
2860         * ethtool will check the boundary for us
2861         */
2862        eeprom->magic = JME_EEPROM_MAGIC;
2863        for (i = 0 ; i < len ; ++i)
2864                data[i] = jme_smb_read(jme, i + offset);
2865
2866        return 0;
2867}
2868
2869static int
2870jme_set_eeprom(struct net_device *netdev,
2871                struct ethtool_eeprom *eeprom, u8 *data)
2872{
2873        struct jme_adapter *jme = netdev_priv(netdev);
2874        int i, offset = eeprom->offset, len = eeprom->len;
2875
2876        if (eeprom->magic != JME_EEPROM_MAGIC)
2877                return -EINVAL;
2878
2879        /*
2880         * ethtool will check the boundary for us
2881         */
2882        for (i = 0 ; i < len ; ++i)
2883                jme_smb_write(jme, i + offset, data[i]);
2884
2885        return 0;
2886}
2887
2888static const struct ethtool_ops jme_ethtool_ops = {
2889        .get_drvinfo            = jme_get_drvinfo,
2890        .get_regs_len           = jme_get_regs_len,
2891        .get_regs               = jme_get_regs,
2892        .get_coalesce           = jme_get_coalesce,
2893        .set_coalesce           = jme_set_coalesce,
2894        .get_pauseparam         = jme_get_pauseparam,
2895        .set_pauseparam         = jme_set_pauseparam,
2896        .get_wol                = jme_get_wol,
2897        .set_wol                = jme_set_wol,
2898        .get_settings           = jme_get_settings,
2899        .set_settings           = jme_set_settings,
2900        .get_link               = jme_get_link,
2901        .get_msglevel           = jme_get_msglevel,
2902        .set_msglevel           = jme_set_msglevel,
2903        .nway_reset             = jme_nway_reset,
2904        .get_eeprom_len         = jme_get_eeprom_len,
2905        .get_eeprom             = jme_get_eeprom,
2906        .set_eeprom             = jme_set_eeprom,
2907};
2908
2909static int
2910jme_pci_dma64(struct pci_dev *pdev)
2911{
2912        if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250 &&
2913            !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
2914                if (!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
2915                        return 1;
2916
2917        if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250 &&
2918            !pci_set_dma_mask(pdev, DMA_BIT_MASK(40)))
2919                if (!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40)))
2920                        return 1;
2921
2922        if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))
2923                if (!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
2924                        return 0;
2925
2926        return -1;
2927}
2928
2929static inline void
2930jme_phy_init(struct jme_adapter *jme)
2931{
2932        u16 reg26;
2933
2934        reg26 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, 26);
2935        jme_mdio_write(jme->dev, jme->mii_if.phy_id, 26, reg26 | 0x1000);
2936}
2937
2938static inline void
2939jme_check_hw_ver(struct jme_adapter *jme)
2940{
2941        u32 chipmode;
2942
2943        chipmode = jread32(jme, JME_CHIPMODE);
2944
2945        jme->fpgaver = (chipmode & CM_FPGAVER_MASK) >> CM_FPGAVER_SHIFT;
2946        jme->chiprev = (chipmode & CM_CHIPREV_MASK) >> CM_CHIPREV_SHIFT;
2947        jme->chip_main_rev = jme->chiprev & 0xF;
2948        jme->chip_sub_rev = (jme->chiprev >> 4) & 0xF;
2949}
2950
2951static const struct net_device_ops jme_netdev_ops = {
2952        .ndo_open               = jme_open,
2953        .ndo_stop               = jme_close,
2954        .ndo_validate_addr      = eth_validate_addr,
2955        .ndo_do_ioctl           = jme_ioctl,
2956        .ndo_start_xmit         = jme_start_xmit,
2957        .ndo_set_mac_address    = jme_set_macaddr,
2958        .ndo_set_rx_mode        = jme_set_multi,
2959        .ndo_change_mtu         = jme_change_mtu,
2960        .ndo_tx_timeout         = jme_tx_timeout,
2961        .ndo_fix_features       = jme_fix_features,
2962        .ndo_set_features       = jme_set_features,
2963#ifdef CONFIG_NET_POLL_CONTROLLER
2964        .ndo_poll_controller    = jme_netpoll,
2965#endif
2966};
2967
2968static int __devinit
2969jme_init_one(struct pci_dev *pdev,
2970             const struct pci_device_id *ent)
2971{
2972        int rc = 0, using_dac, i;
2973        struct net_device *netdev;
2974        struct jme_adapter *jme;
2975        u16 bmcr, bmsr;
2976        u32 apmc;
2977
2978        /*
2979         * set up PCI device basics
2980         */
2981        pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
2982                               PCIE_LINK_STATE_CLKPM);
2983
2984        rc = pci_enable_device(pdev);
2985        if (rc) {
2986                pr_err("Cannot enable PCI device\n");
2987                goto err_out;
2988        }
2989
2990        using_dac = jme_pci_dma64(pdev);
2991        if (using_dac < 0) {
2992                pr_err("Cannot set PCI DMA Mask\n");
2993                rc = -EIO;
2994                goto err_out_disable_pdev;
2995        }
2996
2997        if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2998                pr_err("No PCI resource region found\n");
2999                rc = -ENOMEM;
3000                goto err_out_disable_pdev;
3001        }
3002
3003        rc = pci_request_regions(pdev, DRV_NAME);
3004        if (rc) {
3005                pr_err("Cannot obtain PCI resource region\n");
3006                goto err_out_disable_pdev;
3007        }
3008
3009        pci_set_master(pdev);
3010
3011        /*
3012         * alloc and init net device
3013         */
3014        netdev = alloc_etherdev(sizeof(*jme));
3015        if (!netdev) {
3016                rc = -ENOMEM;
3017                goto err_out_release_regions;
3018        }
3019        netdev->netdev_ops = &jme_netdev_ops;
3020        netdev->ethtool_ops             = &jme_ethtool_ops;
3021        netdev->watchdog_timeo          = TX_TIMEOUT;
3022        netdev->hw_features             =       NETIF_F_IP_CSUM |
3023                                                NETIF_F_IPV6_CSUM |
3024                                                NETIF_F_SG |
3025                                                NETIF_F_TSO |
3026                                                NETIF_F_TSO6 |
3027                                                NETIF_F_RXCSUM;
3028        netdev->features                =       NETIF_F_IP_CSUM |
3029                                                NETIF_F_IPV6_CSUM |
3030                                                NETIF_F_SG |
3031                                                NETIF_F_TSO |
3032                                                NETIF_F_TSO6 |
3033                                                NETIF_F_HW_VLAN_TX |
3034                                                NETIF_F_HW_VLAN_RX;
3035        if (using_dac)
3036                netdev->features        |=      NETIF_F_HIGHDMA;
3037
3038        SET_NETDEV_DEV(netdev, &pdev->dev);
3039        pci_set_drvdata(pdev, netdev);
3040
3041        /*
3042         * init adapter info
3043         */
3044        jme = netdev_priv(netdev);
3045        jme->pdev = pdev;
3046        jme->dev = netdev;
3047        jme->jme_rx = netif_rx;
3048        jme->old_mtu = netdev->mtu = 1500;
3049        jme->phylink = 0;
3050        jme->tx_ring_size = 1 << 10;
3051        jme->tx_ring_mask = jme->tx_ring_size - 1;
3052        jme->tx_wake_threshold = 1 << 9;
3053        jme->rx_ring_size = 1 << 9;
3054        jme->rx_ring_mask = jme->rx_ring_size - 1;
3055        jme->msg_enable = JME_DEF_MSG_ENABLE;
3056        jme->regs = ioremap(pci_resource_start(pdev, 0),
3057                             pci_resource_len(pdev, 0));
3058        if (!(jme->regs)) {
3059                pr_err("Mapping PCI resource region error\n");
3060                rc = -ENOMEM;
3061                goto err_out_free_netdev;
3062        }
3063
3064        if (no_pseudohp) {
3065                apmc = jread32(jme, JME_APMC) & ~JME_APMC_PSEUDO_HP_EN;
3066                jwrite32(jme, JME_APMC, apmc);
3067        } else if (force_pseudohp) {
3068                apmc = jread32(jme, JME_APMC) | JME_APMC_PSEUDO_HP_EN;
3069                jwrite32(jme, JME_APMC, apmc);
3070        }
3071
3072        NETIF_NAPI_SET(netdev, &jme->napi, jme_poll, jme->rx_ring_size >> 2)
3073
3074        spin_lock_init(&jme->phy_lock);
3075        spin_lock_init(&jme->macaddr_lock);
3076        spin_lock_init(&jme->rxmcs_lock);
3077
3078        atomic_set(&jme->link_changing, 1);
3079        atomic_set(&jme->rx_cleaning, 1);
3080        atomic_set(&jme->tx_cleaning, 1);
3081        atomic_set(&jme->rx_empty, 1);
3082
3083        tasklet_init(&jme->pcc_task,
3084                     jme_pcc_tasklet,
3085                     (unsigned long) jme);
3086        jme->dpi.cur = PCC_P1;
3087
3088        jme->reg_ghc = 0;
3089        jme->reg_rxcs = RXCS_DEFAULT;
3090        jme->reg_rxmcs = RXMCS_DEFAULT;
3091        jme->reg_txpfc = 0;
3092        jme->reg_pmcs = PMCS_MFEN;
3093        jme->reg_gpreg1 = GPREG1_DEFAULT;
3094
3095        if (jme->reg_rxmcs & RXMCS_CHECKSUM)
3096                netdev->features |= NETIF_F_RXCSUM;
3097
3098        /*
3099         * Get Max Read Req Size from PCI Config Space
3100         */
3101        pci_read_config_byte(pdev, PCI_DCSR_MRRS, &jme->mrrs);
3102        jme->mrrs &= PCI_DCSR_MRRS_MASK;
3103        switch (jme->mrrs) {
3104        case MRRS_128B:
3105                jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_128B;
3106                break;
3107        case MRRS_256B:
3108                jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_256B;
3109                break;
3110        default:
3111                jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_512B;
3112                break;
3113        }
3114
3115        /*
3116         * Must check before reset_mac_processor
3117         */
3118        jme_check_hw_ver(jme);
3119        jme->mii_if.dev = netdev;
3120        if (jme->fpgaver) {
3121                jme->mii_if.phy_id = 0;
3122                for (i = 1 ; i < 32 ; ++i) {
3123                        bmcr = jme_mdio_read(netdev, i, MII_BMCR);
3124                        bmsr = jme_mdio_read(netdev, i, MII_BMSR);
3125                        if (bmcr != 0xFFFFU && (bmcr != 0 || bmsr != 0)) {
3126                                jme->mii_if.phy_id = i;
3127                                break;
3128                        }
3129                }
3130
3131                if (!jme->mii_if.phy_id) {
3132                        rc = -EIO;
3133                        pr_err("Can not find phy_id\n");
3134                        goto err_out_unmap;
3135                }
3136
3137                jme->reg_ghc |= GHC_LINK_POLL;
3138        } else {
3139                jme->mii_if.phy_id = 1;
3140        }
3141        if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250)
3142                jme->mii_if.supports_gmii = true;
3143        else
3144                jme->mii_if.supports_gmii = false;
3145        jme->mii_if.phy_id_mask = 0x1F;
3146        jme->mii_if.reg_num_mask = 0x1F;
3147        jme->mii_if.mdio_read = jme_mdio_read;
3148        jme->mii_if.mdio_write = jme_mdio_write;
3149
3150        jme_clear_pm(jme);
3151        pci_set_power_state(jme->pdev, PCI_D0);
3152        device_set_wakeup_enable(&pdev->dev, true);
3153
3154        jme_set_phyfifo_5level(jme);
3155        jme->pcirev = pdev->revision;
3156        if (!jme->fpgaver)
3157                jme_phy_init(jme);
3158        jme_phy_off(jme);
3159
3160        /*
3161         * Reset MAC processor and reload EEPROM for MAC Address
3162         */
3163        jme_reset_mac_processor(jme);
3164        rc = jme_reload_eeprom(jme);
3165        if (rc) {
3166                pr_err("Reload eeprom for reading MAC Address error\n");
3167                goto err_out_unmap;
3168        }
3169        jme_load_macaddr(netdev);
3170
3171        /*
3172         * Tell stack that we are not ready to work until open()
3173         */
3174        netif_carrier_off(netdev);
3175
3176        rc = register_netdev(netdev);
3177        if (rc) {
3178                pr_err("Cannot register net device\n");
3179                goto err_out_unmap;
3180        }
3181
3182        netif_info(jme, probe, jme->dev, "%s%s chiprev:%x pcirev:%x macaddr:%pM\n",
3183                   (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250) ?
3184                   "JMC250 Gigabit Ethernet" :
3185                   (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC260) ?
3186                   "JMC260 Fast Ethernet" : "Unknown",
3187                   (jme->fpgaver != 0) ? " (FPGA)" : "",
3188                   (jme->fpgaver != 0) ? jme->fpgaver : jme->chiprev,
3189                   jme->pcirev, netdev->dev_addr);
3190
3191        return 0;
3192
3193err_out_unmap:
3194        iounmap(jme->regs);
3195err_out_free_netdev:
3196        pci_set_drvdata(pdev, NULL);
3197        free_netdev(netdev);
3198err_out_release_regions:
3199        pci_release_regions(pdev);
3200err_out_disable_pdev:
3201        pci_disable_device(pdev);
3202err_out:
3203        return rc;
3204}
3205
3206static void __devexit
3207jme_remove_one(struct pci_dev *pdev)
3208{
3209        struct net_device *netdev = pci_get_drvdata(pdev);
3210        struct jme_adapter *jme = netdev_priv(netdev);
3211
3212        unregister_netdev(netdev);
3213        iounmap(jme->regs);
3214        pci_set_drvdata(pdev, NULL);
3215        free_netdev(netdev);
3216        pci_release_regions(pdev);
3217        pci_disable_device(pdev);
3218
3219}
3220
3221static void
3222jme_shutdown(struct pci_dev *pdev)
3223{
3224        struct net_device *netdev = pci_get_drvdata(pdev);
3225        struct jme_adapter *jme = netdev_priv(netdev);
3226
3227        jme_powersave_phy(jme);
3228        pci_pme_active(pdev, true);
3229}
3230
3231#ifdef CONFIG_PM_SLEEP
3232static int
3233jme_suspend(struct device *dev)
3234{
3235        struct pci_dev *pdev = to_pci_dev(dev);
3236        struct net_device *netdev = pci_get_drvdata(pdev);
3237        struct jme_adapter *jme = netdev_priv(netdev);
3238
3239        if (!netif_running(netdev))
3240                return 0;
3241
3242        atomic_dec(&jme->link_changing);
3243
3244        netif_device_detach(netdev);
3245        netif_stop_queue(netdev);
3246        jme_stop_irq(jme);
3247
3248        tasklet_disable(&jme->txclean_task);
3249        tasklet_disable(&jme->rxclean_task);
3250        tasklet_disable(&jme->rxempty_task);
3251
3252        if (netif_carrier_ok(netdev)) {
3253                if (test_bit(JME_FLAG_POLL, &jme->flags))
3254                        jme_polling_mode(jme);
3255
3256                jme_stop_pcc_timer(jme);
3257                jme_disable_rx_engine(jme);
3258                jme_disable_tx_engine(jme);
3259                jme_reset_mac_processor(jme);
3260                jme_free_rx_resources(jme);
3261                jme_free_tx_resources(jme);
3262                netif_carrier_off(netdev);
3263                jme->phylink = 0;
3264        }
3265
3266        tasklet_enable(&jme->txclean_task);
3267        tasklet_hi_enable(&jme->rxclean_task);
3268        tasklet_hi_enable(&jme->rxempty_task);
3269
3270        jme_powersave_phy(jme);
3271
3272        return 0;
3273}
3274
3275static int
3276jme_resume(struct device *dev)
3277{
3278        struct pci_dev *pdev = to_pci_dev(dev);
3279        struct net_device *netdev = pci_get_drvdata(pdev);
3280        struct jme_adapter *jme = netdev_priv(netdev);
3281
3282        if (!netif_running(netdev))
3283                return 0;
3284
3285        jme_clear_pm(jme);
3286        jme_phy_on(jme);
3287        if (test_bit(JME_FLAG_SSET, &jme->flags))
3288                jme_set_settings(netdev, &jme->old_ecmd);
3289        else
3290                jme_reset_phy_processor(jme);
3291        jme_phy_calibration(jme);
3292        jme_phy_setEA(jme);
3293        jme_start_irq(jme);
3294        netif_device_attach(netdev);
3295
3296        atomic_inc(&jme->link_changing);
3297
3298        jme_reset_link(jme);
3299
3300        return 0;
3301}
3302
3303static SIMPLE_DEV_PM_OPS(jme_pm_ops, jme_suspend, jme_resume);
3304#define JME_PM_OPS (&jme_pm_ops)
3305
3306#else
3307
3308#define JME_PM_OPS NULL
3309#endif
3310
3311static DEFINE_PCI_DEVICE_TABLE(jme_pci_tbl) = {
3312        { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMC250) },
3313        { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMC260) },
3314        { }
3315};
3316
3317static struct pci_driver jme_driver = {
3318        .name           = DRV_NAME,
3319        .id_table       = jme_pci_tbl,
3320        .probe          = jme_init_one,
3321        .remove         = __devexit_p(jme_remove_one),
3322        .shutdown       = jme_shutdown,
3323        .driver.pm      = JME_PM_OPS,
3324};
3325
3326static int __init
3327jme_init_module(void)
3328{
3329        pr_info("JMicron JMC2XX ethernet driver version %s\n", DRV_VERSION);
3330        return pci_register_driver(&jme_driver);
3331}
3332
3333static void __exit
3334jme_cleanup_module(void)
3335{
3336        pci_unregister_driver(&jme_driver);
3337}
3338
3339module_init(jme_init_module);
3340module_exit(jme_cleanup_module);
3341
3342MODULE_AUTHOR("Guo-Fu Tseng <cooldavid@cooldavid.org>");
3343MODULE_DESCRIPTION("JMicron JMC2x0 PCI Express Ethernet driver");
3344MODULE_LICENSE("GPL");
3345MODULE_VERSION(DRV_VERSION);
3346MODULE_DEVICE_TABLE(pci, jme_pci_tbl);
3347