linux/drivers/net/amd8111e.c
<<
>>
Prefs
   1
   2/* Advanced  Micro Devices Inc. AMD8111E Linux Network Driver
   3 * Copyright (C) 2004 Advanced Micro Devices
   4 *
   5 *
   6 * Copyright 2001,2002 Jeff Garzik <jgarzik@mandrakesoft.com> [ 8139cp.c,tg3.c ]
   7 * Copyright (C) 2001, 2002 David S. Miller (davem@redhat.com)[ tg3.c]
   8 * Copyright 1996-1999 Thomas Bogendoerfer [ pcnet32.c ]
   9 * Derived from the lance driver written 1993,1994,1995 by Donald Becker.
  10 * Copyright 1993 United States Government as represented by the
  11 *      Director, National Security Agency.[ pcnet32.c ]
  12 * Carsten Langgaard, carstenl@mips.com [ pcnet32.c ]
  13 * Copyright (C) 2000 MIPS Technologies, Inc.  All rights reserved.
  14 *
  15 *
  16 * This program is free software; you can redistribute it and/or modify
  17 * it under the terms of the GNU General Public License as published by
  18 * the Free Software Foundation; either version 2 of the License, or
  19 * (at your option) any later version.
  20 *
  21 * This program is distributed in the hope that it will be useful,
  22 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  24 * GNU General Public License for more details.
  25 *
  26 * You should have received a copy of the GNU General Public License
  27 * along with this program; if not, write to the Free Software
  28 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307
  29 * USA
  30
  31Module Name:
  32
  33        amd8111e.c
  34
  35Abstract:
  36
  37         AMD8111 based 10/100 Ethernet Controller Driver.
  38
  39Environment:
  40
  41        Kernel Mode
  42
  43Revision History:
  44        3.0.0
  45           Initial Revision.
  46        3.0.1
  47         1. Dynamic interrupt coalescing.
  48         2. Removed prev_stats.
  49         3. MII support.
  50         4. Dynamic IPG support
  51        3.0.2  05/29/2003
  52         1. Bug fix: Fixed failure to send jumbo packets larger than 4k.
  53         2. Bug fix: Fixed VLAN support failure.
  54         3. Bug fix: Fixed receive interrupt coalescing bug.
  55         4. Dynamic IPG support is disabled by default.
  56        3.0.3 06/05/2003
  57         1. Bug fix: Fixed failure to close the interface if SMP is enabled.
  58        3.0.4 12/09/2003
  59         1. Added set_mac_address routine for bonding driver support.
  60         2. Tested the driver for bonding support
  61         3. Bug fix: Fixed mismach in actual receive buffer lenth and lenth
  62            indicated to the h/w.
  63         4. Modified amd8111e_rx() routine to receive all the received packets
  64            in the first interrupt.
  65         5. Bug fix: Corrected  rx_errors  reported in get_stats() function.
  66        3.0.5 03/22/2004
  67         1. Added NAPI support
  68
  69*/
  70
  71
  72#include <linux/module.h>
  73#include <linux/kernel.h>
  74#include <linux/types.h>
  75#include <linux/compiler.h>
  76#include <linux/slab.h>
  77#include <linux/delay.h>
  78#include <linux/init.h>
  79#include <linux/ioport.h>
  80#include <linux/pci.h>
  81#include <linux/netdevice.h>
  82#include <linux/etherdevice.h>
  83#include <linux/skbuff.h>
  84#include <linux/ethtool.h>
  85#include <linux/mii.h>
  86#include <linux/if_vlan.h>
  87#include <linux/ctype.h>
  88#include <linux/crc32.h>
  89#include <linux/dma-mapping.h>
  90
  91#include <asm/system.h>
  92#include <asm/io.h>
  93#include <asm/byteorder.h>
  94#include <asm/uaccess.h>
  95
  96#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
  97#define AMD8111E_VLAN_TAG_USED 1
  98#else
  99#define AMD8111E_VLAN_TAG_USED 0
 100#endif
 101
 102#include "amd8111e.h"
 103#define MODULE_NAME     "amd8111e"
 104#define MODULE_VERS     "3.0.7"
 105MODULE_AUTHOR("Advanced Micro Devices, Inc.");
 106MODULE_DESCRIPTION ("AMD8111 based 10/100 Ethernet Controller. Driver Version "MODULE_VERS);
 107MODULE_LICENSE("GPL");
 108MODULE_DEVICE_TABLE(pci, amd8111e_pci_tbl);
 109module_param_array(speed_duplex, int, NULL, 0);
 110MODULE_PARM_DESC(speed_duplex, "Set device speed and duplex modes, 0: Auto Negotitate, 1: 10Mbps Half Duplex, 2: 10Mbps Full Duplex, 3: 100Mbps Half Duplex, 4: 100Mbps Full Duplex");
 111module_param_array(coalesce, bool, NULL, 0);
 112MODULE_PARM_DESC(coalesce, "Enable or Disable interrupt coalescing, 1: Enable, 0: Disable");
 113module_param_array(dynamic_ipg, bool, NULL, 0);
 114MODULE_PARM_DESC(dynamic_ipg, "Enable or Disable dynamic IPG, 1: Enable, 0: Disable");
 115
 116static struct pci_device_id amd8111e_pci_tbl[] = {
 117
 118        { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD8111E_7462,
 119         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
 120        { 0, }
 121
 122};
 123/*
 124This function will read the PHY registers.
 125*/
 126static int amd8111e_read_phy(struct amd8111e_priv* lp, int phy_id, int reg, u32* val)
 127{
 128        void __iomem *mmio = lp->mmio;
 129        unsigned int reg_val;
 130        unsigned int repeat= REPEAT_CNT;
 131
 132        reg_val = readl(mmio + PHY_ACCESS);
 133        while (reg_val & PHY_CMD_ACTIVE)
 134                reg_val = readl( mmio + PHY_ACCESS );
 135
 136        writel( PHY_RD_CMD | ((phy_id & 0x1f) << 21) |
 137                           ((reg & 0x1f) << 16),  mmio +PHY_ACCESS);
 138        do{
 139                reg_val = readl(mmio + PHY_ACCESS);
 140                udelay(30);  /* It takes 30 us to read/write data */
 141        } while (--repeat && (reg_val & PHY_CMD_ACTIVE));
 142        if(reg_val & PHY_RD_ERR)
 143                goto err_phy_read;
 144
 145        *val = reg_val & 0xffff;
 146        return 0;
 147err_phy_read:
 148        *val = 0;
 149        return -EINVAL;
 150
 151}
 152
 153/*
 154This function will write into PHY registers.
 155*/
 156static int amd8111e_write_phy(struct amd8111e_priv* lp,int phy_id, int reg, u32 val)
 157{
 158        unsigned int repeat = REPEAT_CNT;
 159        void __iomem *mmio = lp->mmio;
 160        unsigned int reg_val;
 161
 162        reg_val = readl(mmio + PHY_ACCESS);
 163        while (reg_val & PHY_CMD_ACTIVE)
 164                reg_val = readl( mmio + PHY_ACCESS );
 165
 166        writel( PHY_WR_CMD | ((phy_id & 0x1f) << 21) |
 167                           ((reg & 0x1f) << 16)|val, mmio + PHY_ACCESS);
 168
 169        do{
 170                reg_val = readl(mmio + PHY_ACCESS);
 171                udelay(30);  /* It takes 30 us to read/write the data */
 172        } while (--repeat && (reg_val & PHY_CMD_ACTIVE));
 173
 174        if(reg_val & PHY_RD_ERR)
 175                goto err_phy_write;
 176
 177        return 0;
 178
 179err_phy_write:
 180        return -EINVAL;
 181
 182}
 183/*
 184This is the mii register read function provided to the mii interface.
 185*/
 186static int amd8111e_mdio_read(struct net_device * dev, int phy_id, int reg_num)
 187{
 188        struct amd8111e_priv* lp = netdev_priv(dev);
 189        unsigned int reg_val;
 190
 191        amd8111e_read_phy(lp,phy_id,reg_num,&reg_val);
 192        return reg_val;
 193
 194}
 195
 196/*
 197This is the mii register write function provided to the mii interface.
 198*/
 199static void amd8111e_mdio_write(struct net_device * dev, int phy_id, int reg_num, int val)
 200{
 201        struct amd8111e_priv* lp = netdev_priv(dev);
 202
 203        amd8111e_write_phy(lp, phy_id, reg_num, val);
 204}
 205
 206/*
 207This function will set PHY speed. During initialization sets the original speed to 100 full.
 208*/
 209static void amd8111e_set_ext_phy(struct net_device *dev)
 210{
 211        struct amd8111e_priv *lp = netdev_priv(dev);
 212        u32 bmcr,advert,tmp;
 213
 214        /* Determine mii register values to set the speed */
 215        advert = amd8111e_mdio_read(dev, lp->ext_phy_addr, MII_ADVERTISE);
 216        tmp = advert & ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
 217        switch (lp->ext_phy_option){
 218
 219                default:
 220                case SPEED_AUTONEG: /* advertise all values */
 221                        tmp |= ( ADVERTISE_10HALF|ADVERTISE_10FULL|
 222                                ADVERTISE_100HALF|ADVERTISE_100FULL) ;
 223                        break;
 224                case SPEED10_HALF:
 225                        tmp |= ADVERTISE_10HALF;
 226                        break;
 227                case SPEED10_FULL:
 228                        tmp |= ADVERTISE_10FULL;
 229                        break;
 230                case SPEED100_HALF:
 231                        tmp |= ADVERTISE_100HALF;
 232                        break;
 233                case SPEED100_FULL:
 234                        tmp |= ADVERTISE_100FULL;
 235                        break;
 236        }
 237
 238        if(advert != tmp)
 239                amd8111e_mdio_write(dev, lp->ext_phy_addr, MII_ADVERTISE, tmp);
 240        /* Restart auto negotiation */
 241        bmcr = amd8111e_mdio_read(dev, lp->ext_phy_addr, MII_BMCR);
 242        bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
 243        amd8111e_mdio_write(dev, lp->ext_phy_addr, MII_BMCR, bmcr);
 244
 245}
 246
 247/*
 248This function will unmap skb->data space and will free
 249all transmit and receive skbuffs.
 250*/
 251static int amd8111e_free_skbs(struct net_device *dev)
 252{
 253        struct amd8111e_priv *lp = netdev_priv(dev);
 254        struct sk_buff* rx_skbuff;
 255        int i;
 256
 257        /* Freeing transmit skbs */
 258        for(i = 0; i < NUM_TX_BUFFERS; i++){
 259                if(lp->tx_skbuff[i]){
 260                        pci_unmap_single(lp->pci_dev,lp->tx_dma_addr[i],                                        lp->tx_skbuff[i]->len,PCI_DMA_TODEVICE);
 261                        dev_kfree_skb (lp->tx_skbuff[i]);
 262                        lp->tx_skbuff[i] = NULL;
 263                        lp->tx_dma_addr[i] = 0;
 264                }
 265        }
 266        /* Freeing previously allocated receive buffers */
 267        for (i = 0; i < NUM_RX_BUFFERS; i++){
 268                rx_skbuff = lp->rx_skbuff[i];
 269                if(rx_skbuff != NULL){
 270                        pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[i],
 271                                  lp->rx_buff_len - 2,PCI_DMA_FROMDEVICE);
 272                        dev_kfree_skb(lp->rx_skbuff[i]);
 273                        lp->rx_skbuff[i] = NULL;
 274                        lp->rx_dma_addr[i] = 0;
 275                }
 276        }
 277
 278        return 0;
 279}
 280
 281/*
 282This will set the receive buffer length corresponding to the mtu size of networkinterface.
 283*/
 284static inline void amd8111e_set_rx_buff_len(struct net_device* dev)
 285{
 286        struct amd8111e_priv* lp = netdev_priv(dev);
 287        unsigned int mtu = dev->mtu;
 288
 289        if (mtu > ETH_DATA_LEN){
 290                /* MTU + ethernet header + FCS
 291                + optional VLAN tag + skb reserve space 2 */
 292
 293                lp->rx_buff_len = mtu + ETH_HLEN + 10;
 294                lp->options |= OPTION_JUMBO_ENABLE;
 295        } else{
 296                lp->rx_buff_len = PKT_BUFF_SZ;
 297                lp->options &= ~OPTION_JUMBO_ENABLE;
 298        }
 299}
 300
 301/*
 302This function will free all the previously allocated buffers, determine new receive buffer length  and will allocate new receive buffers. This function also allocates and initializes both the transmitter and receive hardware descriptors.
 303 */
 304static int amd8111e_init_ring(struct net_device *dev)
 305{
 306        struct amd8111e_priv *lp = netdev_priv(dev);
 307        int i;
 308
 309        lp->rx_idx = lp->tx_idx = 0;
 310        lp->tx_complete_idx = 0;
 311        lp->tx_ring_idx = 0;
 312
 313
 314        if(lp->opened)
 315                /* Free previously allocated transmit and receive skbs */
 316                amd8111e_free_skbs(dev);
 317
 318        else{
 319                 /* allocate the tx and rx descriptors */
 320                if((lp->tx_ring = pci_alloc_consistent(lp->pci_dev,
 321                        sizeof(struct amd8111e_tx_dr)*NUM_TX_RING_DR,
 322                        &lp->tx_ring_dma_addr)) == NULL)
 323
 324                        goto err_no_mem;
 325
 326                if((lp->rx_ring = pci_alloc_consistent(lp->pci_dev,
 327                        sizeof(struct amd8111e_rx_dr)*NUM_RX_RING_DR,
 328                        &lp->rx_ring_dma_addr)) == NULL)
 329
 330                        goto err_free_tx_ring;
 331
 332        }
 333        /* Set new receive buff size */
 334        amd8111e_set_rx_buff_len(dev);
 335
 336        /* Allocating receive  skbs */
 337        for (i = 0; i < NUM_RX_BUFFERS; i++) {
 338
 339                if (!(lp->rx_skbuff[i] = dev_alloc_skb(lp->rx_buff_len))) {
 340                                /* Release previos allocated skbs */
 341                                for(--i; i >= 0 ;i--)
 342                                        dev_kfree_skb(lp->rx_skbuff[i]);
 343                                goto err_free_rx_ring;
 344                }
 345                skb_reserve(lp->rx_skbuff[i],2);
 346        }
 347        /* Initilaizing receive descriptors */
 348        for (i = 0; i < NUM_RX_BUFFERS; i++) {
 349                lp->rx_dma_addr[i] = pci_map_single(lp->pci_dev,
 350                        lp->rx_skbuff[i]->data,lp->rx_buff_len-2, PCI_DMA_FROMDEVICE);
 351
 352                lp->rx_ring[i].buff_phy_addr = cpu_to_le32(lp->rx_dma_addr[i]);
 353                lp->rx_ring[i].buff_count = cpu_to_le16(lp->rx_buff_len-2);
 354                wmb();
 355                lp->rx_ring[i].rx_flags = cpu_to_le16(OWN_BIT);
 356        }
 357
 358        /* Initializing transmit descriptors */
 359        for (i = 0; i < NUM_TX_RING_DR; i++) {
 360                lp->tx_ring[i].buff_phy_addr = 0;
 361                lp->tx_ring[i].tx_flags = 0;
 362                lp->tx_ring[i].buff_count = 0;
 363        }
 364
 365        return 0;
 366
 367err_free_rx_ring:
 368
 369        pci_free_consistent(lp->pci_dev,
 370                sizeof(struct amd8111e_rx_dr)*NUM_RX_RING_DR,lp->rx_ring,
 371                lp->rx_ring_dma_addr);
 372
 373err_free_tx_ring:
 374
 375        pci_free_consistent(lp->pci_dev,
 376                 sizeof(struct amd8111e_tx_dr)*NUM_TX_RING_DR,lp->tx_ring,
 377                 lp->tx_ring_dma_addr);
 378
 379err_no_mem:
 380        return -ENOMEM;
 381}
 382/* This function will set the interrupt coalescing according to the input arguments */
 383static int amd8111e_set_coalesce(struct net_device * dev, enum coal_mode cmod)
 384{
 385        unsigned int timeout;
 386        unsigned int event_count;
 387
 388        struct amd8111e_priv *lp = netdev_priv(dev);
 389        void __iomem *mmio = lp->mmio;
 390        struct amd8111e_coalesce_conf * coal_conf = &lp->coal_conf;
 391
 392
 393        switch(cmod)
 394        {
 395                case RX_INTR_COAL :
 396                        timeout = coal_conf->rx_timeout;
 397                        event_count = coal_conf->rx_event_count;
 398                        if( timeout > MAX_TIMEOUT ||
 399                                        event_count > MAX_EVENT_COUNT )
 400                        return -EINVAL;
 401
 402                        timeout = timeout * DELAY_TIMER_CONV;
 403                        writel(VAL0|STINTEN, mmio+INTEN0);
 404                        writel((u32)DLY_INT_A_R0|( event_count<< 16 )|timeout,
 405                                                        mmio+DLY_INT_A);
 406                        break;
 407
 408                case TX_INTR_COAL :
 409                        timeout = coal_conf->tx_timeout;
 410                        event_count = coal_conf->tx_event_count;
 411                        if( timeout > MAX_TIMEOUT ||
 412                                        event_count > MAX_EVENT_COUNT )
 413                        return -EINVAL;
 414
 415
 416                        timeout = timeout * DELAY_TIMER_CONV;
 417                        writel(VAL0|STINTEN,mmio+INTEN0);
 418                        writel((u32)DLY_INT_B_T0|( event_count<< 16 )|timeout,
 419                                                         mmio+DLY_INT_B);
 420                        break;
 421
 422                case DISABLE_COAL:
 423                        writel(0,mmio+STVAL);
 424                        writel(STINTEN, mmio+INTEN0);
 425                        writel(0, mmio +DLY_INT_B);
 426                        writel(0, mmio+DLY_INT_A);
 427                        break;
 428                 case ENABLE_COAL:
 429                       /* Start the timer */
 430                        writel((u32)SOFT_TIMER_FREQ, mmio+STVAL); /*  0.5 sec */
 431                        writel(VAL0|STINTEN, mmio+INTEN0);
 432                        break;
 433                default:
 434                        break;
 435
 436   }
 437        return 0;
 438
 439}
 440
 441/*
 442This function initializes the device registers  and starts the device.
 443*/
 444static int amd8111e_restart(struct net_device *dev)
 445{
 446        struct amd8111e_priv *lp = netdev_priv(dev);
 447        void __iomem *mmio = lp->mmio;
 448        int i,reg_val;
 449
 450        /* stop the chip */
 451         writel(RUN, mmio + CMD0);
 452
 453        if(amd8111e_init_ring(dev))
 454                return -ENOMEM;
 455
 456        /* enable the port manager and set auto negotiation always */
 457        writel((u32) VAL1|EN_PMGR, mmio + CMD3 );
 458        writel((u32)XPHYANE|XPHYRST , mmio + CTRL2);
 459
 460        amd8111e_set_ext_phy(dev);
 461
 462        /* set control registers */
 463        reg_val = readl(mmio + CTRL1);
 464        reg_val &= ~XMTSP_MASK;
 465        writel( reg_val| XMTSP_128 | CACHE_ALIGN, mmio + CTRL1 );
 466
 467        /* enable interrupt */
 468        writel( APINT5EN | APINT4EN | APINT3EN | APINT2EN | APINT1EN |
 469                APINT0EN | MIIPDTINTEN | MCCIINTEN | MCCINTEN | MREINTEN |
 470                SPNDINTEN | MPINTEN | SINTEN | STINTEN, mmio + INTEN0);
 471
 472        writel(VAL3 | LCINTEN | VAL1 | TINTEN0 | VAL0 | RINTEN0, mmio + INTEN0);
 473
 474        /* initialize tx and rx ring base addresses */
 475        writel((u32)lp->tx_ring_dma_addr,mmio + XMT_RING_BASE_ADDR0);
 476        writel((u32)lp->rx_ring_dma_addr,mmio+ RCV_RING_BASE_ADDR0);
 477
 478        writew((u32)NUM_TX_RING_DR, mmio + XMT_RING_LEN0);
 479        writew((u16)NUM_RX_RING_DR, mmio + RCV_RING_LEN0);
 480
 481        /* set default IPG to 96 */
 482        writew((u32)DEFAULT_IPG,mmio+IPG);
 483        writew((u32)(DEFAULT_IPG-IFS1_DELTA), mmio + IFS1);
 484
 485        if(lp->options & OPTION_JUMBO_ENABLE){
 486                writel((u32)VAL2|JUMBO, mmio + CMD3);
 487                /* Reset REX_UFLO */
 488                writel( REX_UFLO, mmio + CMD2);
 489                /* Should not set REX_UFLO for jumbo frames */
 490                writel( VAL0 | APAD_XMT|REX_RTRY , mmio + CMD2);
 491        }else{
 492                writel( VAL0 | APAD_XMT | REX_RTRY|REX_UFLO, mmio + CMD2);
 493                writel((u32)JUMBO, mmio + CMD3);
 494        }
 495
 496#if AMD8111E_VLAN_TAG_USED
 497        writel((u32) VAL2|VSIZE|VL_TAG_DEL, mmio + CMD3);
 498#endif
 499        writel( VAL0 | APAD_XMT | REX_RTRY, mmio + CMD2 );
 500
 501        /* Setting the MAC address to the device */
 502        for(i = 0; i < ETH_ADDR_LEN; i++)
 503                writeb( dev->dev_addr[i], mmio + PADR + i );
 504
 505        /* Enable interrupt coalesce */
 506        if(lp->options & OPTION_INTR_COAL_ENABLE){
 507                printk(KERN_INFO "%s: Interrupt Coalescing Enabled.\n",
 508                                                                dev->name);
 509                amd8111e_set_coalesce(dev,ENABLE_COAL);
 510        }
 511
 512        /* set RUN bit to start the chip */
 513        writel(VAL2 | RDMD0, mmio + CMD0);
 514        writel(VAL0 | INTREN | RUN, mmio + CMD0);
 515
 516        /* To avoid PCI posting bug */
 517        readl(mmio+CMD0);
 518        return 0;
 519}
 520/*
 521This function clears necessary the device registers.
 522*/
 523static void amd8111e_init_hw_default( struct amd8111e_priv* lp)
 524{
 525        unsigned int reg_val;
 526        unsigned int logic_filter[2] ={0,};
 527        void __iomem *mmio = lp->mmio;
 528
 529
 530        /* stop the chip */
 531        writel(RUN, mmio + CMD0);
 532
 533        /* AUTOPOLL0 Register *//*TBD default value is 8100 in FPS */
 534        writew( 0x8100 | lp->ext_phy_addr, mmio + AUTOPOLL0);
 535
 536        /* Clear RCV_RING_BASE_ADDR */
 537        writel(0, mmio + RCV_RING_BASE_ADDR0);
 538
 539        /* Clear XMT_RING_BASE_ADDR */
 540        writel(0, mmio + XMT_RING_BASE_ADDR0);
 541        writel(0, mmio + XMT_RING_BASE_ADDR1);
 542        writel(0, mmio + XMT_RING_BASE_ADDR2);
 543        writel(0, mmio + XMT_RING_BASE_ADDR3);
 544
 545        /* Clear CMD0  */
 546        writel(CMD0_CLEAR,mmio + CMD0);
 547
 548        /* Clear CMD2 */
 549        writel(CMD2_CLEAR, mmio +CMD2);
 550
 551        /* Clear CMD7 */
 552        writel(CMD7_CLEAR , mmio + CMD7);
 553
 554        /* Clear DLY_INT_A and DLY_INT_B */
 555        writel(0x0, mmio + DLY_INT_A);
 556        writel(0x0, mmio + DLY_INT_B);
 557
 558        /* Clear FLOW_CONTROL */
 559        writel(0x0, mmio + FLOW_CONTROL);
 560
 561        /* Clear INT0  write 1 to clear register */
 562        reg_val = readl(mmio + INT0);
 563        writel(reg_val, mmio + INT0);
 564
 565        /* Clear STVAL */
 566        writel(0x0, mmio + STVAL);
 567
 568        /* Clear INTEN0 */
 569        writel( INTEN0_CLEAR, mmio + INTEN0);
 570
 571        /* Clear LADRF */
 572        writel(0x0 , mmio + LADRF);
 573
 574        /* Set SRAM_SIZE & SRAM_BOUNDARY registers  */
 575        writel( 0x80010,mmio + SRAM_SIZE);
 576
 577        /* Clear RCV_RING0_LEN */
 578        writel(0x0, mmio +  RCV_RING_LEN0);
 579
 580        /* Clear XMT_RING0/1/2/3_LEN */
 581        writel(0x0, mmio +  XMT_RING_LEN0);
 582        writel(0x0, mmio +  XMT_RING_LEN1);
 583        writel(0x0, mmio +  XMT_RING_LEN2);
 584        writel(0x0, mmio +  XMT_RING_LEN3);
 585
 586        /* Clear XMT_RING_LIMIT */
 587        writel(0x0, mmio + XMT_RING_LIMIT);
 588
 589        /* Clear MIB */
 590        writew(MIB_CLEAR, mmio + MIB_ADDR);
 591
 592        /* Clear LARF */
 593        amd8111e_writeq(*(u64*)logic_filter,mmio+LADRF);
 594
 595        /* SRAM_SIZE register */
 596        reg_val = readl(mmio + SRAM_SIZE);
 597
 598        if(lp->options & OPTION_JUMBO_ENABLE)
 599                writel( VAL2|JUMBO, mmio + CMD3);
 600#if AMD8111E_VLAN_TAG_USED
 601        writel(VAL2|VSIZE|VL_TAG_DEL, mmio + CMD3 );
 602#endif
 603        /* Set default value to CTRL1 Register */
 604        writel(CTRL1_DEFAULT, mmio + CTRL1);
 605
 606        /* To avoid PCI posting bug */
 607        readl(mmio + CMD2);
 608
 609}
 610
 611/*
 612This function disables the interrupt and clears all the pending
 613interrupts in INT0
 614 */
 615static void amd8111e_disable_interrupt(struct amd8111e_priv* lp)
 616{
 617        u32 intr0;
 618
 619        /* Disable interrupt */
 620        writel(INTREN, lp->mmio + CMD0);
 621
 622        /* Clear INT0 */
 623        intr0 = readl(lp->mmio + INT0);
 624        writel(intr0, lp->mmio + INT0);
 625
 626        /* To avoid PCI posting bug */
 627        readl(lp->mmio + INT0);
 628
 629}
 630
 631/*
 632This function stops the chip.
 633*/
 634static void amd8111e_stop_chip(struct amd8111e_priv* lp)
 635{
 636        writel(RUN, lp->mmio + CMD0);
 637
 638        /* To avoid PCI posting bug */
 639        readl(lp->mmio + CMD0);
 640}
 641
 642/*
 643This function frees the  transmiter and receiver descriptor rings.
 644*/
 645static void amd8111e_free_ring(struct amd8111e_priv* lp)
 646{
 647        /* Free transmit and receive descriptor rings */
 648        if(lp->rx_ring){
 649                pci_free_consistent(lp->pci_dev,
 650                        sizeof(struct amd8111e_rx_dr)*NUM_RX_RING_DR,
 651                        lp->rx_ring, lp->rx_ring_dma_addr);
 652                lp->rx_ring = NULL;
 653        }
 654
 655        if(lp->tx_ring){
 656                pci_free_consistent(lp->pci_dev,
 657                        sizeof(struct amd8111e_tx_dr)*NUM_TX_RING_DR,
 658                        lp->tx_ring, lp->tx_ring_dma_addr);
 659
 660                lp->tx_ring = NULL;
 661        }
 662
 663}
 664#if AMD8111E_VLAN_TAG_USED
 665/*
 666This is the receive indication function for packets with vlan tag.
 667*/
 668static int amd8111e_vlan_rx(struct amd8111e_priv *lp, struct sk_buff *skb, u16 vlan_tag)
 669{
 670        return vlan_hwaccel_receive_skb(skb, lp->vlgrp,vlan_tag);
 671}
 672#endif
 673
 674/*
 675This function will free all the transmit skbs that are actually transmitted by the device. It will check the ownership of the skb before freeing the skb.
 676*/
 677static int amd8111e_tx(struct net_device *dev)
 678{
 679        struct amd8111e_priv* lp = netdev_priv(dev);
 680        int tx_index = lp->tx_complete_idx & TX_RING_DR_MOD_MASK;
 681        int status;
 682        /* Complete all the transmit packet */
 683        while (lp->tx_complete_idx != lp->tx_idx){
 684                tx_index =  lp->tx_complete_idx & TX_RING_DR_MOD_MASK;
 685                status = le16_to_cpu(lp->tx_ring[tx_index].tx_flags);
 686
 687                if(status & OWN_BIT)
 688                        break;  /* It still hasn't been Txed */
 689
 690                lp->tx_ring[tx_index].buff_phy_addr = 0;
 691
 692                /* We must free the original skb */
 693                if (lp->tx_skbuff[tx_index]) {
 694                        pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[tx_index],
 695                                        lp->tx_skbuff[tx_index]->len,
 696                                        PCI_DMA_TODEVICE);
 697                        dev_kfree_skb_irq (lp->tx_skbuff[tx_index]);
 698                        lp->tx_skbuff[tx_index] = NULL;
 699                        lp->tx_dma_addr[tx_index] = 0;
 700                }
 701                lp->tx_complete_idx++;
 702                /*COAL update tx coalescing parameters */
 703                lp->coal_conf.tx_packets++;
 704                lp->coal_conf.tx_bytes +=
 705                        le16_to_cpu(lp->tx_ring[tx_index].buff_count);
 706
 707                if (netif_queue_stopped(dev) &&
 708                        lp->tx_complete_idx > lp->tx_idx - NUM_TX_BUFFERS +2){
 709                        /* The ring is no longer full, clear tbusy. */
 710                        /* lp->tx_full = 0; */
 711                        netif_wake_queue (dev);
 712                }
 713        }
 714        return 0;
 715}
 716
 717/* This function handles the driver receive operation in polling mode */
 718static int amd8111e_rx_poll(struct napi_struct *napi, int budget)
 719{
 720        struct amd8111e_priv *lp = container_of(napi, struct amd8111e_priv, napi);
 721        struct net_device *dev = lp->amd8111e_net_dev;
 722        int rx_index = lp->rx_idx & RX_RING_DR_MOD_MASK;
 723        void __iomem *mmio = lp->mmio;
 724        struct sk_buff *skb,*new_skb;
 725        int min_pkt_len, status;
 726        unsigned int intr0;
 727        int num_rx_pkt = 0;
 728        short pkt_len;
 729#if AMD8111E_VLAN_TAG_USED
 730        short vtag;
 731#endif
 732        int rx_pkt_limit = budget;
 733        unsigned long flags;
 734
 735        do{
 736                /* process receive packets until we use the quota*/
 737                /* If we own the next entry, it's a new packet. Send it up. */
 738                while(1) {
 739                        status = le16_to_cpu(lp->rx_ring[rx_index].rx_flags);
 740                        if (status & OWN_BIT)
 741                                break;
 742
 743                        /*
 744                         * There is a tricky error noted by John Murphy,
 745                         * <murf@perftech.com> to Russ Nelson: Even with
 746                         * full-sized * buffers it's possible for a
 747                         * jabber packet to use two buffers, with only
 748                         * the last correctly noting the error.
 749                         */
 750
 751                        if(status & ERR_BIT) {
 752                                /* reseting flags */
 753                                lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
 754                                goto err_next_pkt;
 755                        }
 756                        /* check for STP and ENP */
 757                        if(!((status & STP_BIT) && (status & ENP_BIT))){
 758                                /* reseting flags */
 759                                lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
 760                                goto err_next_pkt;
 761                        }
 762                        pkt_len = le16_to_cpu(lp->rx_ring[rx_index].msg_count) - 4;
 763
 764#if AMD8111E_VLAN_TAG_USED
 765                        vtag = status & TT_MASK;
 766                        /*MAC will strip vlan tag*/
 767                        if(lp->vlgrp != NULL && vtag !=0)
 768                                min_pkt_len =MIN_PKT_LEN - 4;
 769                        else
 770#endif
 771                                min_pkt_len =MIN_PKT_LEN;
 772
 773                        if (pkt_len < min_pkt_len) {
 774                                lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
 775                                lp->drv_rx_errors++;
 776                                goto err_next_pkt;
 777                        }
 778                        if(--rx_pkt_limit < 0)
 779                                goto rx_not_empty;
 780                        if(!(new_skb = dev_alloc_skb(lp->rx_buff_len))){
 781                                /* if allocation fail,
 782                                   ignore that pkt and go to next one */
 783                                lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
 784                                lp->drv_rx_errors++;
 785                                goto err_next_pkt;
 786                        }
 787
 788                        skb_reserve(new_skb, 2);
 789                        skb = lp->rx_skbuff[rx_index];
 790                        pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[rx_index],
 791                                         lp->rx_buff_len-2, PCI_DMA_FROMDEVICE);
 792                        skb_put(skb, pkt_len);
 793                        lp->rx_skbuff[rx_index] = new_skb;
 794                        lp->rx_dma_addr[rx_index] = pci_map_single(lp->pci_dev,
 795                                                                   new_skb->data,
 796                                                                   lp->rx_buff_len-2,
 797                                                                   PCI_DMA_FROMDEVICE);
 798
 799                        skb->protocol = eth_type_trans(skb, dev);
 800
 801#if AMD8111E_VLAN_TAG_USED
 802                        if(lp->vlgrp != NULL && (vtag == TT_VLAN_TAGGED)){
 803                                amd8111e_vlan_rx(lp, skb,
 804                                         le16_to_cpu(lp->rx_ring[rx_index].tag_ctrl_info));
 805                        } else
 806#endif
 807                                netif_receive_skb(skb);
 808                        /*COAL update rx coalescing parameters*/
 809                        lp->coal_conf.rx_packets++;
 810                        lp->coal_conf.rx_bytes += pkt_len;
 811                        num_rx_pkt++;
 812
 813                err_next_pkt:
 814                        lp->rx_ring[rx_index].buff_phy_addr
 815                                = cpu_to_le32(lp->rx_dma_addr[rx_index]);
 816                        lp->rx_ring[rx_index].buff_count =
 817                                cpu_to_le16(lp->rx_buff_len-2);
 818                        wmb();
 819                        lp->rx_ring[rx_index].rx_flags |= cpu_to_le16(OWN_BIT);
 820                        rx_index = (++lp->rx_idx) & RX_RING_DR_MOD_MASK;
 821                }
 822                /* Check the interrupt status register for more packets in the
 823                   mean time. Process them since we have not used up our quota.*/
 824
 825                intr0 = readl(mmio + INT0);
 826                /*Ack receive packets */
 827                writel(intr0 & RINT0,mmio + INT0);
 828
 829        } while(intr0 & RINT0);
 830
 831        if (rx_pkt_limit > 0) {
 832                /* Receive descriptor is empty now */
 833                spin_lock_irqsave(&lp->lock, flags);
 834                __napi_complete(napi);
 835                writel(VAL0|RINTEN0, mmio + INTEN0);
 836                writel(VAL2 | RDMD0, mmio + CMD0);
 837                spin_unlock_irqrestore(&lp->lock, flags);
 838        }
 839
 840rx_not_empty:
 841        return num_rx_pkt;
 842}
 843
 844/*
 845This function will indicate the link status to the kernel.
 846*/
 847static int amd8111e_link_change(struct net_device* dev)
 848{
 849        struct amd8111e_priv *lp = netdev_priv(dev);
 850        int status0,speed;
 851
 852        /* read the link change */
 853        status0 = readl(lp->mmio + STAT0);
 854
 855        if(status0 & LINK_STATS){
 856                if(status0 & AUTONEG_COMPLETE)
 857                        lp->link_config.autoneg = AUTONEG_ENABLE;
 858                else
 859                        lp->link_config.autoneg = AUTONEG_DISABLE;
 860
 861                if(status0 & FULL_DPLX)
 862                        lp->link_config.duplex = DUPLEX_FULL;
 863                else
 864                        lp->link_config.duplex = DUPLEX_HALF;
 865                speed = (status0 & SPEED_MASK) >> 7;
 866                if(speed == PHY_SPEED_10)
 867                        lp->link_config.speed = SPEED_10;
 868                else if(speed == PHY_SPEED_100)
 869                        lp->link_config.speed = SPEED_100;
 870
 871                printk(KERN_INFO "%s: Link is Up. Speed is %s Mbps %s Duplex\n",                        dev->name,
 872                       (lp->link_config.speed == SPEED_100) ? "100": "10",
 873                       (lp->link_config.duplex == DUPLEX_FULL)? "Full": "Half");
 874                netif_carrier_on(dev);
 875        }
 876        else{
 877                lp->link_config.speed = SPEED_INVALID;
 878                lp->link_config.duplex = DUPLEX_INVALID;
 879                lp->link_config.autoneg = AUTONEG_INVALID;
 880                printk(KERN_INFO "%s: Link is Down.\n",dev->name);
 881                netif_carrier_off(dev);
 882        }
 883
 884        return 0;
 885}
 886/*
 887This function reads the mib counters.
 888*/
 889static int amd8111e_read_mib(void __iomem *mmio, u8 MIB_COUNTER)
 890{
 891        unsigned int  status;
 892        unsigned  int data;
 893        unsigned int repeat = REPEAT_CNT;
 894
 895        writew( MIB_RD_CMD | MIB_COUNTER, mmio + MIB_ADDR);
 896        do {
 897                status = readw(mmio + MIB_ADDR);
 898                udelay(2);      /* controller takes MAX 2 us to get mib data */
 899        }
 900        while (--repeat && (status & MIB_CMD_ACTIVE));
 901
 902        data = readl(mmio + MIB_DATA);
 903        return data;
 904}
 905
 906/*
 907This function reads the mib registers and returns the hardware statistics. It  updates previous internal driver statistics with new values.
 908*/
 909static struct net_device_stats *amd8111e_get_stats(struct net_device * dev)
 910{
 911        struct amd8111e_priv *lp = netdev_priv(dev);
 912        void __iomem *mmio = lp->mmio;
 913        unsigned long flags;
 914        /* struct net_device_stats *prev_stats = &lp->prev_stats; */
 915        struct net_device_stats* new_stats = &lp->stats;
 916
 917        if(!lp->opened)
 918                return &lp->stats;
 919        spin_lock_irqsave (&lp->lock, flags);
 920
 921        /* stats.rx_packets */
 922        new_stats->rx_packets = amd8111e_read_mib(mmio, rcv_broadcast_pkts)+
 923                                amd8111e_read_mib(mmio, rcv_multicast_pkts)+
 924                                amd8111e_read_mib(mmio, rcv_unicast_pkts);
 925
 926        /* stats.tx_packets */
 927        new_stats->tx_packets = amd8111e_read_mib(mmio, xmt_packets);
 928
 929        /*stats.rx_bytes */
 930        new_stats->rx_bytes = amd8111e_read_mib(mmio, rcv_octets);
 931
 932        /* stats.tx_bytes */
 933        new_stats->tx_bytes = amd8111e_read_mib(mmio, xmt_octets);
 934
 935        /* stats.rx_errors */
 936        /* hw errors + errors driver reported */
 937        new_stats->rx_errors = amd8111e_read_mib(mmio, rcv_undersize_pkts)+
 938                                amd8111e_read_mib(mmio, rcv_fragments)+
 939                                amd8111e_read_mib(mmio, rcv_jabbers)+
 940                                amd8111e_read_mib(mmio, rcv_alignment_errors)+
 941                                amd8111e_read_mib(mmio, rcv_fcs_errors)+
 942                                amd8111e_read_mib(mmio, rcv_miss_pkts)+
 943                                lp->drv_rx_errors;
 944
 945        /* stats.tx_errors */
 946        new_stats->tx_errors = amd8111e_read_mib(mmio, xmt_underrun_pkts);
 947
 948        /* stats.rx_dropped*/
 949        new_stats->rx_dropped = amd8111e_read_mib(mmio, rcv_miss_pkts);
 950
 951        /* stats.tx_dropped*/
 952        new_stats->tx_dropped = amd8111e_read_mib(mmio,  xmt_underrun_pkts);
 953
 954        /* stats.multicast*/
 955        new_stats->multicast = amd8111e_read_mib(mmio, rcv_multicast_pkts);
 956
 957        /* stats.collisions*/
 958        new_stats->collisions = amd8111e_read_mib(mmio, xmt_collisions);
 959
 960        /* stats.rx_length_errors*/
 961        new_stats->rx_length_errors =
 962                amd8111e_read_mib(mmio, rcv_undersize_pkts)+
 963                amd8111e_read_mib(mmio, rcv_oversize_pkts);
 964
 965        /* stats.rx_over_errors*/
 966        new_stats->rx_over_errors = amd8111e_read_mib(mmio, rcv_miss_pkts);
 967
 968        /* stats.rx_crc_errors*/
 969        new_stats->rx_crc_errors = amd8111e_read_mib(mmio, rcv_fcs_errors);
 970
 971        /* stats.rx_frame_errors*/
 972        new_stats->rx_frame_errors =
 973                amd8111e_read_mib(mmio, rcv_alignment_errors);
 974
 975        /* stats.rx_fifo_errors */
 976        new_stats->rx_fifo_errors = amd8111e_read_mib(mmio, rcv_miss_pkts);
 977
 978        /* stats.rx_missed_errors */
 979        new_stats->rx_missed_errors = amd8111e_read_mib(mmio, rcv_miss_pkts);
 980
 981        /* stats.tx_aborted_errors*/
 982        new_stats->tx_aborted_errors =
 983                amd8111e_read_mib(mmio, xmt_excessive_collision);
 984
 985        /* stats.tx_carrier_errors*/
 986        new_stats->tx_carrier_errors =
 987                amd8111e_read_mib(mmio, xmt_loss_carrier);
 988
 989        /* stats.tx_fifo_errors*/
 990        new_stats->tx_fifo_errors = amd8111e_read_mib(mmio, xmt_underrun_pkts);
 991
 992        /* stats.tx_window_errors*/
 993        new_stats->tx_window_errors =
 994                amd8111e_read_mib(mmio, xmt_late_collision);
 995
 996        /* Reset the mibs for collecting new statistics */
 997        /* writew(MIB_CLEAR, mmio + MIB_ADDR);*/
 998
 999        spin_unlock_irqrestore (&lp->lock, flags);
1000
1001        return new_stats;
1002}
1003/* This function recalculate the interrupt coalescing  mode on every interrupt
1004according to the datarate and the packet rate.
1005*/
1006static int amd8111e_calc_coalesce(struct net_device *dev)
1007{
1008        struct amd8111e_priv *lp = netdev_priv(dev);
1009        struct amd8111e_coalesce_conf * coal_conf = &lp->coal_conf;
1010        int tx_pkt_rate;
1011        int rx_pkt_rate;
1012        int tx_data_rate;
1013        int rx_data_rate;
1014        int rx_pkt_size;
1015        int tx_pkt_size;
1016
1017        tx_pkt_rate = coal_conf->tx_packets - coal_conf->tx_prev_packets;
1018        coal_conf->tx_prev_packets =  coal_conf->tx_packets;
1019
1020        tx_data_rate = coal_conf->tx_bytes - coal_conf->tx_prev_bytes;
1021        coal_conf->tx_prev_bytes =  coal_conf->tx_bytes;
1022
1023        rx_pkt_rate = coal_conf->rx_packets - coal_conf->rx_prev_packets;
1024        coal_conf->rx_prev_packets =  coal_conf->rx_packets;
1025
1026        rx_data_rate = coal_conf->rx_bytes - coal_conf->rx_prev_bytes;
1027        coal_conf->rx_prev_bytes =  coal_conf->rx_bytes;
1028
1029        if(rx_pkt_rate < 800){
1030                if(coal_conf->rx_coal_type != NO_COALESCE){
1031
1032                        coal_conf->rx_timeout = 0x0;
1033                        coal_conf->rx_event_count = 0;
1034                        amd8111e_set_coalesce(dev,RX_INTR_COAL);
1035                        coal_conf->rx_coal_type = NO_COALESCE;
1036                }
1037        }
1038        else{
1039
1040                rx_pkt_size = rx_data_rate/rx_pkt_rate;
1041                if (rx_pkt_size < 128){
1042                        if(coal_conf->rx_coal_type != NO_COALESCE){
1043
1044                                coal_conf->rx_timeout = 0;
1045                                coal_conf->rx_event_count = 0;
1046                                amd8111e_set_coalesce(dev,RX_INTR_COAL);
1047                                coal_conf->rx_coal_type = NO_COALESCE;
1048                        }
1049
1050                }
1051                else if ( (rx_pkt_size >= 128) && (rx_pkt_size < 512) ){
1052
1053                        if(coal_conf->rx_coal_type !=  LOW_COALESCE){
1054                                coal_conf->rx_timeout = 1;
1055                                coal_conf->rx_event_count = 4;
1056                                amd8111e_set_coalesce(dev,RX_INTR_COAL);
1057                                coal_conf->rx_coal_type = LOW_COALESCE;
1058                        }
1059                }
1060                else if ((rx_pkt_size >= 512) && (rx_pkt_size < 1024)){
1061
1062                        if(coal_conf->rx_coal_type !=  MEDIUM_COALESCE){
1063                                coal_conf->rx_timeout = 1;
1064                                coal_conf->rx_event_count = 4;
1065                                amd8111e_set_coalesce(dev,RX_INTR_COAL);
1066                                coal_conf->rx_coal_type = MEDIUM_COALESCE;
1067                        }
1068
1069                }
1070                else if(rx_pkt_size >= 1024){
1071                        if(coal_conf->rx_coal_type !=  HIGH_COALESCE){
1072                                coal_conf->rx_timeout = 2;
1073                                coal_conf->rx_event_count = 3;
1074                                amd8111e_set_coalesce(dev,RX_INTR_COAL);
1075                                coal_conf->rx_coal_type = HIGH_COALESCE;
1076                        }
1077                }
1078        }
1079        /* NOW FOR TX INTR COALESC */
1080        if(tx_pkt_rate < 800){
1081                if(coal_conf->tx_coal_type != NO_COALESCE){
1082
1083                        coal_conf->tx_timeout = 0x0;
1084                        coal_conf->tx_event_count = 0;
1085                        amd8111e_set_coalesce(dev,TX_INTR_COAL);
1086                        coal_conf->tx_coal_type = NO_COALESCE;
1087                }
1088        }
1089        else{
1090
1091                tx_pkt_size = tx_data_rate/tx_pkt_rate;
1092                if (tx_pkt_size < 128){
1093
1094                        if(coal_conf->tx_coal_type != NO_COALESCE){
1095
1096                                coal_conf->tx_timeout = 0;
1097                                coal_conf->tx_event_count = 0;
1098                                amd8111e_set_coalesce(dev,TX_INTR_COAL);
1099                                coal_conf->tx_coal_type = NO_COALESCE;
1100                        }
1101
1102                }
1103                else if ( (tx_pkt_size >= 128) && (tx_pkt_size < 512) ){
1104
1105                        if(coal_conf->tx_coal_type !=  LOW_COALESCE){
1106                                coal_conf->tx_timeout = 1;
1107                                coal_conf->tx_event_count = 2;
1108                                amd8111e_set_coalesce(dev,TX_INTR_COAL);
1109                                coal_conf->tx_coal_type = LOW_COALESCE;
1110
1111                        }
1112                }
1113                else if ((tx_pkt_size >= 512) && (tx_pkt_size < 1024)){
1114
1115                        if(coal_conf->tx_coal_type !=  MEDIUM_COALESCE){
1116                                coal_conf->tx_timeout = 2;
1117                                coal_conf->tx_event_count = 5;
1118                                amd8111e_set_coalesce(dev,TX_INTR_COAL);
1119                                coal_conf->tx_coal_type = MEDIUM_COALESCE;
1120                        }
1121
1122                }
1123                else if(tx_pkt_size >= 1024){
1124                        if (tx_pkt_size >= 1024){
1125                                if(coal_conf->tx_coal_type !=  HIGH_COALESCE){
1126                                        coal_conf->tx_timeout = 4;
1127                                        coal_conf->tx_event_count = 8;
1128                                        amd8111e_set_coalesce(dev,TX_INTR_COAL);
1129                                        coal_conf->tx_coal_type = HIGH_COALESCE;
1130                                }
1131                        }
1132                }
1133        }
1134        return 0;
1135
1136}
1137/*
1138This is device interrupt function. It handles transmit, receive,link change and hardware timer interrupts.
1139*/
1140static irqreturn_t amd8111e_interrupt(int irq, void *dev_id)
1141{
1142
1143        struct net_device * dev = (struct net_device *) dev_id;
1144        struct amd8111e_priv *lp = netdev_priv(dev);
1145        void __iomem *mmio = lp->mmio;
1146        unsigned int intr0, intren0;
1147        unsigned int handled = 1;
1148
1149        if(unlikely(dev == NULL))
1150                return IRQ_NONE;
1151
1152        spin_lock(&lp->lock);
1153
1154        /* disabling interrupt */
1155        writel(INTREN, mmio + CMD0);
1156
1157        /* Read interrupt status */
1158        intr0 = readl(mmio + INT0);
1159        intren0 = readl(mmio + INTEN0);
1160
1161        /* Process all the INT event until INTR bit is clear. */
1162
1163        if (!(intr0 & INTR)){
1164                handled = 0;
1165                goto err_no_interrupt;
1166        }
1167
1168        /* Current driver processes 4 interrupts : RINT,TINT,LCINT,STINT */
1169        writel(intr0, mmio + INT0);
1170
1171        /* Check if Receive Interrupt has occurred. */
1172        if (intr0 & RINT0) {
1173                if (napi_schedule_prep(&lp->napi)) {
1174                        /* Disable receive interupts */
1175                        writel(RINTEN0, mmio + INTEN0);
1176                        /* Schedule a polling routine */
1177                        __napi_schedule(&lp->napi);
1178                } else if (intren0 & RINTEN0) {
1179                        printk("************Driver bug! \
1180                                interrupt while in poll\n");
1181                        /* Fix by disable receive interrupts */
1182                        writel(RINTEN0, mmio + INTEN0);
1183                }
1184        }
1185
1186        /* Check if  Transmit Interrupt has occurred. */
1187        if (intr0 & TINT0)
1188                amd8111e_tx(dev);
1189
1190        /* Check if  Link Change Interrupt has occurred. */
1191        if (intr0 & LCINT)
1192                amd8111e_link_change(dev);
1193
1194        /* Check if Hardware Timer Interrupt has occurred. */
1195        if (intr0 & STINT)
1196                amd8111e_calc_coalesce(dev);
1197
1198err_no_interrupt:
1199        writel( VAL0 | INTREN,mmio + CMD0);
1200
1201        spin_unlock(&lp->lock);
1202
1203        return IRQ_RETVAL(handled);
1204}
1205
1206#ifdef CONFIG_NET_POLL_CONTROLLER
1207static void amd8111e_poll(struct net_device *dev)
1208{
1209        unsigned long flags;
1210        local_irq_save(flags);
1211        amd8111e_interrupt(0, dev);
1212        local_irq_restore(flags);
1213}
1214#endif
1215
1216
1217/*
1218This function closes the network interface and updates the statistics so that most recent statistics will be available after the interface is down.
1219*/
1220static int amd8111e_close(struct net_device * dev)
1221{
1222        struct amd8111e_priv *lp = netdev_priv(dev);
1223        netif_stop_queue(dev);
1224
1225        napi_disable(&lp->napi);
1226
1227        spin_lock_irq(&lp->lock);
1228
1229        amd8111e_disable_interrupt(lp);
1230        amd8111e_stop_chip(lp);
1231
1232        /* Free transmit and receive skbs */
1233        amd8111e_free_skbs(lp->amd8111e_net_dev);
1234
1235        netif_carrier_off(lp->amd8111e_net_dev);
1236
1237        /* Delete ipg timer */
1238        if(lp->options & OPTION_DYN_IPG_ENABLE)
1239                del_timer_sync(&lp->ipg_data.ipg_timer);
1240
1241        spin_unlock_irq(&lp->lock);
1242        free_irq(dev->irq, dev);
1243        amd8111e_free_ring(lp);
1244
1245        /* Update the statistics before closing */
1246        amd8111e_get_stats(dev);
1247        lp->opened = 0;
1248        return 0;
1249}
1250/* This function opens new interface.It requests irq for the device, initializes the device,buffers and descriptors, and starts the device.
1251*/
1252static int amd8111e_open(struct net_device * dev )
1253{
1254        struct amd8111e_priv *lp = netdev_priv(dev);
1255
1256        if(dev->irq ==0 || request_irq(dev->irq, amd8111e_interrupt, IRQF_SHARED,
1257                                         dev->name, dev))
1258                return -EAGAIN;
1259
1260        napi_enable(&lp->napi);
1261
1262        spin_lock_irq(&lp->lock);
1263
1264        amd8111e_init_hw_default(lp);
1265
1266        if(amd8111e_restart(dev)){
1267                spin_unlock_irq(&lp->lock);
1268                napi_disable(&lp->napi);
1269                if (dev->irq)
1270                        free_irq(dev->irq, dev);
1271                return -ENOMEM;
1272        }
1273        /* Start ipg timer */
1274        if(lp->options & OPTION_DYN_IPG_ENABLE){
1275                add_timer(&lp->ipg_data.ipg_timer);
1276                printk(KERN_INFO "%s: Dynamic IPG Enabled.\n",dev->name);
1277        }
1278
1279        lp->opened = 1;
1280
1281        spin_unlock_irq(&lp->lock);
1282
1283        netif_start_queue(dev);
1284
1285        return 0;
1286}
1287/*
1288This function checks if there is any transmit  descriptors available to queue more packet.
1289*/
1290static int amd8111e_tx_queue_avail(struct amd8111e_priv* lp )
1291{
1292        int tx_index = lp->tx_idx & TX_BUFF_MOD_MASK;
1293        if (lp->tx_skbuff[tx_index])
1294                return -1;
1295        else
1296                return 0;
1297
1298}
1299/*
1300This function will queue the transmit packets to the descriptors and will trigger the send operation. It also initializes the transmit descriptors with buffer physical address, byte count, ownership to hardware etc.
1301*/
1302
1303static netdev_tx_t amd8111e_start_xmit(struct sk_buff *skb,
1304                                       struct net_device * dev)
1305{
1306        struct amd8111e_priv *lp = netdev_priv(dev);
1307        int tx_index;
1308        unsigned long flags;
1309
1310        spin_lock_irqsave(&lp->lock, flags);
1311
1312        tx_index = lp->tx_idx & TX_RING_DR_MOD_MASK;
1313
1314        lp->tx_ring[tx_index].buff_count = cpu_to_le16(skb->len);
1315
1316        lp->tx_skbuff[tx_index] = skb;
1317        lp->tx_ring[tx_index].tx_flags = 0;
1318
1319#if AMD8111E_VLAN_TAG_USED
1320        if((lp->vlgrp != NULL) && vlan_tx_tag_present(skb)){
1321                lp->tx_ring[tx_index].tag_ctrl_cmd |=
1322                                cpu_to_le16(TCC_VLAN_INSERT);
1323                lp->tx_ring[tx_index].tag_ctrl_info =
1324                                cpu_to_le16(vlan_tx_tag_get(skb));
1325
1326        }
1327#endif
1328        lp->tx_dma_addr[tx_index] =
1329            pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
1330        lp->tx_ring[tx_index].buff_phy_addr =
1331            cpu_to_le32(lp->tx_dma_addr[tx_index]);
1332
1333        /*  Set FCS and LTINT bits */
1334        wmb();
1335        lp->tx_ring[tx_index].tx_flags |=
1336            cpu_to_le16(OWN_BIT | STP_BIT | ENP_BIT|ADD_FCS_BIT|LTINT_BIT);
1337
1338        lp->tx_idx++;
1339
1340        /* Trigger an immediate send poll. */
1341        writel( VAL1 | TDMD0, lp->mmio + CMD0);
1342        writel( VAL2 | RDMD0,lp->mmio + CMD0);
1343
1344        dev->trans_start = jiffies;
1345
1346        if(amd8111e_tx_queue_avail(lp) < 0){
1347                netif_stop_queue(dev);
1348        }
1349        spin_unlock_irqrestore(&lp->lock, flags);
1350        return NETDEV_TX_OK;
1351}
1352/*
1353This function returns all the memory mapped registers of the device.
1354*/
1355static void amd8111e_read_regs(struct amd8111e_priv *lp, u32 *buf)
1356{
1357        void __iomem *mmio = lp->mmio;
1358        /* Read only necessary registers */
1359        buf[0] = readl(mmio + XMT_RING_BASE_ADDR0);
1360        buf[1] = readl(mmio + XMT_RING_LEN0);
1361        buf[2] = readl(mmio + RCV_RING_BASE_ADDR0);
1362        buf[3] = readl(mmio + RCV_RING_LEN0);
1363        buf[4] = readl(mmio + CMD0);
1364        buf[5] = readl(mmio + CMD2);
1365        buf[6] = readl(mmio + CMD3);
1366        buf[7] = readl(mmio + CMD7);
1367        buf[8] = readl(mmio + INT0);
1368        buf[9] = readl(mmio + INTEN0);
1369        buf[10] = readl(mmio + LADRF);
1370        buf[11] = readl(mmio + LADRF+4);
1371        buf[12] = readl(mmio + STAT0);
1372}
1373
1374
1375/*
1376This function sets promiscuos mode, all-multi mode or the multicast address
1377list to the device.
1378*/
1379static void amd8111e_set_multicast_list(struct net_device *dev)
1380{
1381        struct dev_mc_list* mc_ptr;
1382        struct amd8111e_priv *lp = netdev_priv(dev);
1383        u32 mc_filter[2] ;
1384        int i,bit_num;
1385        if(dev->flags & IFF_PROMISC){
1386                writel( VAL2 | PROM, lp->mmio + CMD2);
1387                return;
1388        }
1389        else
1390                writel( PROM, lp->mmio + CMD2);
1391        if(dev->flags & IFF_ALLMULTI || dev->mc_count > MAX_FILTER_SIZE){
1392                /* get all multicast packet */
1393                mc_filter[1] = mc_filter[0] = 0xffffffff;
1394                lp->mc_list = dev->mc_list;
1395                lp->options |= OPTION_MULTICAST_ENABLE;
1396                amd8111e_writeq(*(u64*)mc_filter,lp->mmio + LADRF);
1397                return;
1398        }
1399        if( dev->mc_count == 0 ){
1400                /* get only own packets */
1401                mc_filter[1] = mc_filter[0] = 0;
1402                lp->mc_list = NULL;
1403                lp->options &= ~OPTION_MULTICAST_ENABLE;
1404                amd8111e_writeq(*(u64*)mc_filter,lp->mmio + LADRF);
1405                /* disable promiscous mode */
1406                writel(PROM, lp->mmio + CMD2);
1407                return;
1408        }
1409        /* load all the multicast addresses in the logic filter */
1410        lp->options |= OPTION_MULTICAST_ENABLE;
1411        lp->mc_list = dev->mc_list;
1412        mc_filter[1] = mc_filter[0] = 0;
1413        for (i = 0, mc_ptr = dev->mc_list; mc_ptr && i < dev->mc_count;
1414                     i++, mc_ptr = mc_ptr->next) {
1415                bit_num = (ether_crc_le(ETH_ALEN, mc_ptr->dmi_addr) >> 26) & 0x3f;
1416                mc_filter[bit_num >> 5] |= 1 << (bit_num & 31);
1417        }
1418        amd8111e_writeq(*(u64*)mc_filter,lp->mmio+ LADRF);
1419
1420        /* To eliminate PCI posting bug */
1421        readl(lp->mmio + CMD2);
1422
1423}
1424
1425static void amd8111e_get_drvinfo(struct net_device* dev, struct ethtool_drvinfo *info)
1426{
1427        struct amd8111e_priv *lp = netdev_priv(dev);
1428        struct pci_dev *pci_dev = lp->pci_dev;
1429        strcpy (info->driver, MODULE_NAME);
1430        strcpy (info->version, MODULE_VERS);
1431        sprintf(info->fw_version,"%u",chip_version);
1432        strcpy (info->bus_info, pci_name(pci_dev));
1433}
1434
1435static int amd8111e_get_regs_len(struct net_device *dev)
1436{
1437        return AMD8111E_REG_DUMP_LEN;
1438}
1439
1440static void amd8111e_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
1441{
1442        struct amd8111e_priv *lp = netdev_priv(dev);
1443        regs->version = 0;
1444        amd8111e_read_regs(lp, buf);
1445}
1446
1447static int amd8111e_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1448{
1449        struct amd8111e_priv *lp = netdev_priv(dev);
1450        spin_lock_irq(&lp->lock);
1451        mii_ethtool_gset(&lp->mii_if, ecmd);
1452        spin_unlock_irq(&lp->lock);
1453        return 0;
1454}
1455
1456static int amd8111e_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1457{
1458        struct amd8111e_priv *lp = netdev_priv(dev);
1459        int res;
1460        spin_lock_irq(&lp->lock);
1461        res = mii_ethtool_sset(&lp->mii_if, ecmd);
1462        spin_unlock_irq(&lp->lock);
1463        return res;
1464}
1465
1466static int amd8111e_nway_reset(struct net_device *dev)
1467{
1468        struct amd8111e_priv *lp = netdev_priv(dev);
1469        return mii_nway_restart(&lp->mii_if);
1470}
1471
1472static u32 amd8111e_get_link(struct net_device *dev)
1473{
1474        struct amd8111e_priv *lp = netdev_priv(dev);
1475        return mii_link_ok(&lp->mii_if);
1476}
1477
1478static void amd8111e_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol_info)
1479{
1480        struct amd8111e_priv *lp = netdev_priv(dev);
1481        wol_info->supported = WAKE_MAGIC|WAKE_PHY;
1482        if (lp->options & OPTION_WOL_ENABLE)
1483                wol_info->wolopts = WAKE_MAGIC;
1484}
1485
1486static int amd8111e_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol_info)
1487{
1488        struct amd8111e_priv *lp = netdev_priv(dev);
1489        if (wol_info->wolopts & ~(WAKE_MAGIC|WAKE_PHY))
1490                return -EINVAL;
1491        spin_lock_irq(&lp->lock);
1492        if (wol_info->wolopts & WAKE_MAGIC)
1493                lp->options |=
1494                        (OPTION_WOL_ENABLE | OPTION_WAKE_MAGIC_ENABLE);
1495        else if(wol_info->wolopts & WAKE_PHY)
1496                lp->options |=
1497                        (OPTION_WOL_ENABLE | OPTION_WAKE_PHY_ENABLE);
1498        else
1499                lp->options &= ~OPTION_WOL_ENABLE;
1500        spin_unlock_irq(&lp->lock);
1501        return 0;
1502}
1503
1504static const struct ethtool_ops ops = {
1505        .get_drvinfo = amd8111e_get_drvinfo,
1506        .get_regs_len = amd8111e_get_regs_len,
1507        .get_regs = amd8111e_get_regs,
1508        .get_settings = amd8111e_get_settings,
1509        .set_settings = amd8111e_set_settings,
1510        .nway_reset = amd8111e_nway_reset,
1511        .get_link = amd8111e_get_link,
1512        .get_wol = amd8111e_get_wol,
1513        .set_wol = amd8111e_set_wol,
1514};
1515
1516/*
1517This function handles all the  ethtool ioctls. It gives driver info, gets/sets driver speed, gets memory mapped register values, forces auto negotiation, sets/gets WOL options for ethtool application.
1518*/
1519
1520static int amd8111e_ioctl(struct net_device * dev , struct ifreq *ifr, int cmd)
1521{
1522        struct mii_ioctl_data *data = if_mii(ifr);
1523        struct amd8111e_priv *lp = netdev_priv(dev);
1524        int err;
1525        u32 mii_regval;
1526
1527        switch(cmd) {
1528        case SIOCGMIIPHY:
1529                data->phy_id = lp->ext_phy_addr;
1530
1531        /* fallthru */
1532        case SIOCGMIIREG:
1533
1534                spin_lock_irq(&lp->lock);
1535                err = amd8111e_read_phy(lp, data->phy_id,
1536                        data->reg_num & PHY_REG_ADDR_MASK, &mii_regval);
1537                spin_unlock_irq(&lp->lock);
1538
1539                data->val_out = mii_regval;
1540                return err;
1541
1542        case SIOCSMIIREG:
1543
1544                spin_lock_irq(&lp->lock);
1545                err = amd8111e_write_phy(lp, data->phy_id,
1546                        data->reg_num & PHY_REG_ADDR_MASK, data->val_in);
1547                spin_unlock_irq(&lp->lock);
1548
1549                return err;
1550
1551        default:
1552                /* do nothing */
1553                break;
1554        }
1555        return -EOPNOTSUPP;
1556}
1557static int amd8111e_set_mac_address(struct net_device *dev, void *p)
1558{
1559        struct amd8111e_priv *lp = netdev_priv(dev);
1560        int i;
1561        struct sockaddr *addr = p;
1562
1563        memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1564        spin_lock_irq(&lp->lock);
1565        /* Setting the MAC address to the device */
1566        for(i = 0; i < ETH_ADDR_LEN; i++)
1567                writeb( dev->dev_addr[i], lp->mmio + PADR + i );
1568
1569        spin_unlock_irq(&lp->lock);
1570
1571        return 0;
1572}
1573
1574/*
1575This function changes the mtu of the device. It restarts the device  to initialize the descriptor with new receive buffers.
1576*/
1577static int amd8111e_change_mtu(struct net_device *dev, int new_mtu)
1578{
1579        struct amd8111e_priv *lp = netdev_priv(dev);
1580        int err;
1581
1582        if ((new_mtu < AMD8111E_MIN_MTU) || (new_mtu > AMD8111E_MAX_MTU))
1583                return -EINVAL;
1584
1585        if (!netif_running(dev)) {
1586                /* new_mtu will be used
1587                   when device starts netxt time */
1588                dev->mtu = new_mtu;
1589                return 0;
1590        }
1591
1592        spin_lock_irq(&lp->lock);
1593
1594        /* stop the chip */
1595        writel(RUN, lp->mmio + CMD0);
1596
1597        dev->mtu = new_mtu;
1598
1599        err = amd8111e_restart(dev);
1600        spin_unlock_irq(&lp->lock);
1601        if(!err)
1602                netif_start_queue(dev);
1603        return err;
1604}
1605
1606#if AMD8111E_VLAN_TAG_USED
1607static void amd8111e_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
1608{
1609        struct  amd8111e_priv *lp = netdev_priv(dev);
1610        spin_lock_irq(&lp->lock);
1611        lp->vlgrp = grp;
1612        spin_unlock_irq(&lp->lock);
1613}
1614#endif
1615
1616static int amd8111e_enable_magicpkt(struct amd8111e_priv* lp)
1617{
1618        writel( VAL1|MPPLBA, lp->mmio + CMD3);
1619        writel( VAL0|MPEN_SW, lp->mmio + CMD7);
1620
1621        /* To eliminate PCI posting bug */
1622        readl(lp->mmio + CMD7);
1623        return 0;
1624}
1625
1626static int amd8111e_enable_link_change(struct amd8111e_priv* lp)
1627{
1628
1629        /* Adapter is already stoped/suspended/interrupt-disabled */
1630        writel(VAL0|LCMODE_SW,lp->mmio + CMD7);
1631
1632        /* To eliminate PCI posting bug */
1633        readl(lp->mmio + CMD7);
1634        return 0;
1635}
1636/* This function is called when a packet transmission fails to complete within a  resonable period, on the assumption that an interrupts have been failed or the  interface is locked up. This function will reinitialize the hardware */
1637
1638static void amd8111e_tx_timeout(struct net_device *dev)
1639{
1640        struct amd8111e_priv* lp = netdev_priv(dev);
1641        int err;
1642
1643        printk(KERN_ERR "%s: transmit timed out, resetting\n",
1644                                                      dev->name);
1645        spin_lock_irq(&lp->lock);
1646        err = amd8111e_restart(dev);
1647        spin_unlock_irq(&lp->lock);
1648        if(!err)
1649                netif_wake_queue(dev);
1650}
1651static int amd8111e_suspend(struct pci_dev *pci_dev, pm_message_t state)
1652{
1653        struct net_device *dev = pci_get_drvdata(pci_dev);
1654        struct amd8111e_priv *lp = netdev_priv(dev);
1655
1656        if (!netif_running(dev))
1657                return 0;
1658
1659        /* disable the interrupt */
1660        spin_lock_irq(&lp->lock);
1661        amd8111e_disable_interrupt(lp);
1662        spin_unlock_irq(&lp->lock);
1663
1664        netif_device_detach(dev);
1665
1666        /* stop chip */
1667        spin_lock_irq(&lp->lock);
1668        if(lp->options & OPTION_DYN_IPG_ENABLE)
1669                del_timer_sync(&lp->ipg_data.ipg_timer);
1670        amd8111e_stop_chip(lp);
1671        spin_unlock_irq(&lp->lock);
1672
1673        if(lp->options & OPTION_WOL_ENABLE){
1674                 /* enable wol */
1675                if(lp->options & OPTION_WAKE_MAGIC_ENABLE)
1676                        amd8111e_enable_magicpkt(lp);
1677                if(lp->options & OPTION_WAKE_PHY_ENABLE)
1678                        amd8111e_enable_link_change(lp);
1679
1680                pci_enable_wake(pci_dev, PCI_D3hot, 1);
1681                pci_enable_wake(pci_dev, PCI_D3cold, 1);
1682
1683        }
1684        else{
1685                pci_enable_wake(pci_dev, PCI_D3hot, 0);
1686                pci_enable_wake(pci_dev, PCI_D3cold, 0);
1687        }
1688
1689        pci_save_state(pci_dev);
1690        pci_set_power_state(pci_dev, PCI_D3hot);
1691
1692        return 0;
1693}
1694static int amd8111e_resume(struct pci_dev *pci_dev)
1695{
1696        struct net_device *dev = pci_get_drvdata(pci_dev);
1697        struct amd8111e_priv *lp = netdev_priv(dev);
1698
1699        if (!netif_running(dev))
1700                return 0;
1701
1702        pci_set_power_state(pci_dev, PCI_D0);
1703        pci_restore_state(pci_dev);
1704
1705        pci_enable_wake(pci_dev, PCI_D3hot, 0);
1706        pci_enable_wake(pci_dev, PCI_D3cold, 0); /* D3 cold */
1707
1708        netif_device_attach(dev);
1709
1710        spin_lock_irq(&lp->lock);
1711        amd8111e_restart(dev);
1712        /* Restart ipg timer */
1713        if(lp->options & OPTION_DYN_IPG_ENABLE)
1714                mod_timer(&lp->ipg_data.ipg_timer,
1715                                jiffies + IPG_CONVERGE_JIFFIES);
1716        spin_unlock_irq(&lp->lock);
1717
1718        return 0;
1719}
1720
1721
1722static void __devexit amd8111e_remove_one(struct pci_dev *pdev)
1723{
1724        struct net_device *dev = pci_get_drvdata(pdev);
1725        if (dev) {
1726                unregister_netdev(dev);
1727                iounmap(((struct amd8111e_priv *)netdev_priv(dev))->mmio);
1728                free_netdev(dev);
1729                pci_release_regions(pdev);
1730                pci_disable_device(pdev);
1731                pci_set_drvdata(pdev, NULL);
1732        }
1733}
1734static void amd8111e_config_ipg(struct net_device* dev)
1735{
1736        struct amd8111e_priv *lp = netdev_priv(dev);
1737        struct ipg_info* ipg_data = &lp->ipg_data;
1738        void __iomem *mmio = lp->mmio;
1739        unsigned int prev_col_cnt = ipg_data->col_cnt;
1740        unsigned int total_col_cnt;
1741        unsigned int tmp_ipg;
1742
1743        if(lp->link_config.duplex == DUPLEX_FULL){
1744                ipg_data->ipg = DEFAULT_IPG;
1745                return;
1746        }
1747
1748        if(ipg_data->ipg_state == SSTATE){
1749
1750                if(ipg_data->timer_tick == IPG_STABLE_TIME){
1751
1752                        ipg_data->timer_tick = 0;
1753                        ipg_data->ipg = MIN_IPG - IPG_STEP;
1754                        ipg_data->current_ipg = MIN_IPG;
1755                        ipg_data->diff_col_cnt = 0xFFFFFFFF;
1756                        ipg_data->ipg_state = CSTATE;
1757                }
1758                else
1759                        ipg_data->timer_tick++;
1760        }
1761
1762        if(ipg_data->ipg_state == CSTATE){
1763
1764                /* Get the current collision count */
1765
1766                total_col_cnt = ipg_data->col_cnt =
1767                                amd8111e_read_mib(mmio, xmt_collisions);
1768
1769                if ((total_col_cnt - prev_col_cnt) <
1770                                (ipg_data->diff_col_cnt)){
1771
1772                        ipg_data->diff_col_cnt =
1773                                total_col_cnt - prev_col_cnt ;
1774
1775                        ipg_data->ipg = ipg_data->current_ipg;
1776                }
1777
1778                ipg_data->current_ipg += IPG_STEP;
1779
1780                if (ipg_data->current_ipg <= MAX_IPG)
1781                        tmp_ipg = ipg_data->current_ipg;
1782                else{
1783                        tmp_ipg = ipg_data->ipg;
1784                        ipg_data->ipg_state = SSTATE;
1785                }
1786                writew((u32)tmp_ipg, mmio + IPG);
1787                writew((u32)(tmp_ipg - IFS1_DELTA), mmio + IFS1);
1788        }
1789         mod_timer(&lp->ipg_data.ipg_timer, jiffies + IPG_CONVERGE_JIFFIES);
1790        return;
1791
1792}
1793
1794static void __devinit amd8111e_probe_ext_phy(struct net_device* dev)
1795{
1796        struct amd8111e_priv *lp = netdev_priv(dev);
1797        int i;
1798
1799        for (i = 0x1e; i >= 0; i--) {
1800                u32 id1, id2;
1801
1802                if (amd8111e_read_phy(lp, i, MII_PHYSID1, &id1))
1803                        continue;
1804                if (amd8111e_read_phy(lp, i, MII_PHYSID2, &id2))
1805                        continue;
1806                lp->ext_phy_id = (id1 << 16) | id2;
1807                lp->ext_phy_addr = i;
1808                return;
1809        }
1810        lp->ext_phy_id = 0;
1811        lp->ext_phy_addr = 1;
1812}
1813
1814static const struct net_device_ops amd8111e_netdev_ops = {
1815        .ndo_open               = amd8111e_open,
1816        .ndo_stop               = amd8111e_close,
1817        .ndo_start_xmit         = amd8111e_start_xmit,
1818        .ndo_tx_timeout         = amd8111e_tx_timeout,
1819        .ndo_get_stats          = amd8111e_get_stats,
1820        .ndo_set_multicast_list = amd8111e_set_multicast_list,
1821        .ndo_validate_addr      = eth_validate_addr,
1822        .ndo_set_mac_address    = amd8111e_set_mac_address,
1823        .ndo_do_ioctl           = amd8111e_ioctl,
1824        .ndo_change_mtu         = amd8111e_change_mtu,
1825#if AMD8111E_VLAN_TAG_USED
1826        .ndo_vlan_rx_register   = amd8111e_vlan_rx_register,
1827#endif
1828#ifdef CONFIG_NET_POLL_CONTROLLER
1829        .ndo_poll_controller     = amd8111e_poll,
1830#endif
1831};
1832
1833static int __devinit amd8111e_probe_one(struct pci_dev *pdev,
1834                                  const struct pci_device_id *ent)
1835{
1836        int err,i,pm_cap;
1837        unsigned long reg_addr,reg_len;
1838        struct amd8111e_priv* lp;
1839        struct net_device* dev;
1840
1841        err = pci_enable_device(pdev);
1842        if(err){
1843                printk(KERN_ERR "amd8111e: Cannot enable new PCI device, "
1844                        "exiting.\n");
1845                return err;
1846        }
1847
1848        if(!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)){
1849                printk(KERN_ERR "amd8111e: Cannot find PCI base address, "
1850                       "exiting.\n");
1851                err = -ENODEV;
1852                goto err_disable_pdev;
1853        }
1854
1855        err = pci_request_regions(pdev, MODULE_NAME);
1856        if(err){
1857                printk(KERN_ERR "amd8111e: Cannot obtain PCI resources, "
1858                       "exiting.\n");
1859                goto err_disable_pdev;
1860        }
1861
1862        pci_set_master(pdev);
1863
1864        /* Find power-management capability. */
1865        if((pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM))==0){
1866                printk(KERN_ERR "amd8111e: No Power Management capability, "
1867                       "exiting.\n");
1868                goto err_free_reg;
1869        }
1870
1871        /* Initialize DMA */
1872        if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) < 0) {
1873                printk(KERN_ERR "amd8111e: DMA not supported,"
1874                        "exiting.\n");
1875                goto err_free_reg;
1876        }
1877
1878        reg_addr = pci_resource_start(pdev, 0);
1879        reg_len = pci_resource_len(pdev, 0);
1880
1881        dev = alloc_etherdev(sizeof(struct amd8111e_priv));
1882        if (!dev) {
1883                printk(KERN_ERR "amd8111e: Etherdev alloc failed, exiting.\n");
1884                err = -ENOMEM;
1885                goto err_free_reg;
1886        }
1887
1888        SET_NETDEV_DEV(dev, &pdev->dev);
1889
1890#if AMD8111E_VLAN_TAG_USED
1891        dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX ;
1892#endif
1893
1894        lp = netdev_priv(dev);
1895        lp->pci_dev = pdev;
1896        lp->amd8111e_net_dev = dev;
1897        lp->pm_cap = pm_cap;
1898
1899        spin_lock_init(&lp->lock);
1900
1901        lp->mmio = ioremap(reg_addr, reg_len);
1902        if (!lp->mmio) {
1903                printk(KERN_ERR "amd8111e: Cannot map device registers, "
1904                       "exiting\n");
1905                err = -ENOMEM;
1906                goto err_free_dev;
1907        }
1908
1909        /* Initializing MAC address */
1910        for(i = 0; i < ETH_ADDR_LEN; i++)
1911                dev->dev_addr[i] = readb(lp->mmio + PADR + i);
1912
1913        /* Setting user defined parametrs */
1914        lp->ext_phy_option = speed_duplex[card_idx];
1915        if(coalesce[card_idx])
1916                lp->options |= OPTION_INTR_COAL_ENABLE;
1917        if(dynamic_ipg[card_idx++])
1918                lp->options |= OPTION_DYN_IPG_ENABLE;
1919
1920
1921        /* Initialize driver entry points */
1922        dev->netdev_ops = &amd8111e_netdev_ops;
1923        SET_ETHTOOL_OPS(dev, &ops);
1924        dev->irq =pdev->irq;
1925        dev->watchdog_timeo = AMD8111E_TX_TIMEOUT;
1926        netif_napi_add(dev, &lp->napi, amd8111e_rx_poll, 32);
1927
1928#if AMD8111E_VLAN_TAG_USED
1929        dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1930#endif
1931        /* Probe the external PHY */
1932        amd8111e_probe_ext_phy(dev);
1933
1934        /* setting mii default values */
1935        lp->mii_if.dev = dev;
1936        lp->mii_if.mdio_read = amd8111e_mdio_read;
1937        lp->mii_if.mdio_write = amd8111e_mdio_write;
1938        lp->mii_if.phy_id = lp->ext_phy_addr;
1939
1940        /* Set receive buffer length and set jumbo option*/
1941        amd8111e_set_rx_buff_len(dev);
1942
1943
1944        err = register_netdev(dev);
1945        if (err) {
1946                printk(KERN_ERR "amd8111e: Cannot register net device, "
1947                       "exiting.\n");
1948                goto err_iounmap;
1949        }
1950
1951        pci_set_drvdata(pdev, dev);
1952
1953        /* Initialize software ipg timer */
1954        if(lp->options & OPTION_DYN_IPG_ENABLE){
1955                init_timer(&lp->ipg_data.ipg_timer);
1956                lp->ipg_data.ipg_timer.data = (unsigned long) dev;
1957                lp->ipg_data.ipg_timer.function = (void *)&amd8111e_config_ipg;
1958                lp->ipg_data.ipg_timer.expires = jiffies +
1959                                                 IPG_CONVERGE_JIFFIES;
1960                lp->ipg_data.ipg = DEFAULT_IPG;
1961                lp->ipg_data.ipg_state = CSTATE;
1962        };
1963
1964        /*  display driver and device information */
1965
1966        chip_version = (readl(lp->mmio + CHIPID) & 0xf0000000)>>28;
1967        printk(KERN_INFO "%s: AMD-8111e Driver Version: %s\n",
1968               dev->name,MODULE_VERS);
1969        printk(KERN_INFO "%s: [ Rev %x ] PCI 10/100BaseT Ethernet %pM\n",
1970               dev->name, chip_version, dev->dev_addr);
1971        if (lp->ext_phy_id)
1972                printk(KERN_INFO "%s: Found MII PHY ID 0x%08x at address 0x%02x\n",
1973                       dev->name, lp->ext_phy_id, lp->ext_phy_addr);
1974        else
1975                printk(KERN_INFO "%s: Couldn't detect MII PHY, assuming address 0x01\n",
1976                       dev->name);
1977        return 0;
1978err_iounmap:
1979        iounmap(lp->mmio);
1980
1981err_free_dev:
1982        free_netdev(dev);
1983
1984err_free_reg:
1985        pci_release_regions(pdev);
1986
1987err_disable_pdev:
1988        pci_disable_device(pdev);
1989        pci_set_drvdata(pdev, NULL);
1990        return err;
1991
1992}
1993
1994static struct pci_driver amd8111e_driver = {
1995        .name           = MODULE_NAME,
1996        .id_table       = amd8111e_pci_tbl,
1997        .probe          = amd8111e_probe_one,
1998        .remove         = __devexit_p(amd8111e_remove_one),
1999        .suspend        = amd8111e_suspend,
2000        .resume         = amd8111e_resume
2001};
2002
2003static int __init amd8111e_init(void)
2004{
2005        return pci_register_driver(&amd8111e_driver);
2006}
2007
2008static void __exit amd8111e_cleanup(void)
2009{
2010        pci_unregister_driver(&amd8111e_driver);
2011}
2012
2013module_init(amd8111e_init);
2014module_exit(amd8111e_cleanup);
2015