linux/drivers/net/ethernet/nvidia/forcedeth.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * forcedeth: Ethernet driver for NVIDIA nForce media access controllers.
   4 *
   5 * Note: This driver is a cleanroom reimplementation based on reverse
   6 *      engineered documentation written by Carl-Daniel Hailfinger
   7 *      and Andrew de Quincey.
   8 *
   9 * NVIDIA, nForce and other NVIDIA marks are trademarks or registered
  10 * trademarks of NVIDIA Corporation in the United States and other
  11 * countries.
  12 *
  13 * Copyright (C) 2003,4,5 Manfred Spraul
  14 * Copyright (C) 2004 Andrew de Quincey (wol support)
  15 * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane
  16 *              IRQ rate fixes, bigendian fixes, cleanups, verification)
  17 * Copyright (c) 2004,2005,2006,2007,2008,2009 NVIDIA Corporation
  18 *
  19 * Known bugs:
  20 * We suspect that on some hardware no TX done interrupts are generated.
  21 * This means recovery from netif_stop_queue only happens if the hw timer
  22 * interrupt fires (100 times/second, configurable with NVREG_POLL_DEFAULT)
  23 * and the timer is active in the IRQMask, or if a rx packet arrives by chance.
  24 * If your hardware reliably generates tx done interrupts, then you can remove
  25 * DEV_NEED_TIMERIRQ from the driver_data flags.
  26 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
  27 * superfluous timer interrupts from the nic.
  28 */
  29
  30#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  31
  32#define FORCEDETH_VERSION               "0.64"
  33#define DRV_NAME                        "forcedeth"
  34
  35#include <linux/module.h>
  36#include <linux/types.h>
  37#include <linux/pci.h>
  38#include <linux/interrupt.h>
  39#include <linux/netdevice.h>
  40#include <linux/etherdevice.h>
  41#include <linux/delay.h>
  42#include <linux/sched.h>
  43#include <linux/spinlock.h>
  44#include <linux/ethtool.h>
  45#include <linux/timer.h>
  46#include <linux/skbuff.h>
  47#include <linux/mii.h>
  48#include <linux/random.h>
  49#include <linux/if_vlan.h>
  50#include <linux/dma-mapping.h>
  51#include <linux/slab.h>
  52#include <linux/uaccess.h>
  53#include <linux/prefetch.h>
  54#include <linux/u64_stats_sync.h>
  55#include <linux/io.h>
  56
  57#include <asm/irq.h>
  58
  59#define TX_WORK_PER_LOOP  64
  60#define RX_WORK_PER_LOOP  64
  61
  62/*
  63 * Hardware access:
  64 */
  65
  66#define DEV_NEED_TIMERIRQ          0x0000001  /* set the timer irq flag in the irq mask */
  67#define DEV_NEED_LINKTIMER         0x0000002  /* poll link settings. Relies on the timer irq */
  68#define DEV_HAS_LARGEDESC          0x0000004  /* device supports jumbo frames and needs packet format 2 */
  69#define DEV_HAS_HIGH_DMA           0x0000008  /* device supports 64bit dma */
  70#define DEV_HAS_CHECKSUM           0x0000010  /* device supports tx and rx checksum offloads */
  71#define DEV_HAS_VLAN               0x0000020  /* device supports vlan tagging and striping */
  72#define DEV_HAS_MSI                0x0000040  /* device supports MSI */
  73#define DEV_HAS_MSI_X              0x0000080  /* device supports MSI-X */
  74#define DEV_HAS_POWER_CNTRL        0x0000100  /* device supports power savings */
  75#define DEV_HAS_STATISTICS_V1      0x0000200  /* device supports hw statistics version 1 */
  76#define DEV_HAS_STATISTICS_V2      0x0000400  /* device supports hw statistics version 2 */
  77#define DEV_HAS_STATISTICS_V3      0x0000800  /* device supports hw statistics version 3 */
  78#define DEV_HAS_STATISTICS_V12     0x0000600  /* device supports hw statistics version 1 and 2 */
  79#define DEV_HAS_STATISTICS_V123    0x0000e00  /* device supports hw statistics version 1, 2, and 3 */
  80#define DEV_HAS_TEST_EXTENDED      0x0001000  /* device supports extended diagnostic test */
  81#define DEV_HAS_MGMT_UNIT          0x0002000  /* device supports management unit */
  82#define DEV_HAS_CORRECT_MACADDR    0x0004000  /* device supports correct mac address order */
  83#define DEV_HAS_COLLISION_FIX      0x0008000  /* device supports tx collision fix */
  84#define DEV_HAS_PAUSEFRAME_TX_V1   0x0010000  /* device supports tx pause frames version 1 */
  85#define DEV_HAS_PAUSEFRAME_TX_V2   0x0020000  /* device supports tx pause frames version 2 */
  86#define DEV_HAS_PAUSEFRAME_TX_V3   0x0040000  /* device supports tx pause frames version 3 */
  87#define DEV_NEED_TX_LIMIT          0x0080000  /* device needs to limit tx */
  88#define DEV_NEED_TX_LIMIT2         0x0180000  /* device needs to limit tx, expect for some revs */
  89#define DEV_HAS_GEAR_MODE          0x0200000  /* device supports gear mode */
  90#define DEV_NEED_PHY_INIT_FIX      0x0400000  /* device needs specific phy workaround */
  91#define DEV_NEED_LOW_POWER_FIX     0x0800000  /* device needs special power up workaround */
  92#define DEV_NEED_MSI_FIX           0x1000000  /* device needs msi workaround */
  93
  94enum {
  95        NvRegIrqStatus = 0x000,
  96#define NVREG_IRQSTAT_MIIEVENT  0x040
  97#define NVREG_IRQSTAT_MASK              0x83ff
  98        NvRegIrqMask = 0x004,
  99#define NVREG_IRQ_RX_ERROR              0x0001
 100#define NVREG_IRQ_RX                    0x0002
 101#define NVREG_IRQ_RX_NOBUF              0x0004
 102#define NVREG_IRQ_TX_ERR                0x0008
 103#define NVREG_IRQ_TX_OK                 0x0010
 104#define NVREG_IRQ_TIMER                 0x0020
 105#define NVREG_IRQ_LINK                  0x0040
 106#define NVREG_IRQ_RX_FORCED             0x0080
 107#define NVREG_IRQ_TX_FORCED             0x0100
 108#define NVREG_IRQ_RECOVER_ERROR         0x8200
 109#define NVREG_IRQMASK_THROUGHPUT        0x00df
 110#define NVREG_IRQMASK_CPU               0x0060
 111#define NVREG_IRQ_TX_ALL                (NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED)
 112#define NVREG_IRQ_RX_ALL                (NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED)
 113#define NVREG_IRQ_OTHER                 (NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RECOVER_ERROR)
 114
 115        NvRegUnknownSetupReg6 = 0x008,
 116#define NVREG_UNKSETUP6_VAL             3
 117
 118/*
 119 * NVREG_POLL_DEFAULT is the interval length of the timer source on the nic
 120 * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms
 121 */
 122        NvRegPollingInterval = 0x00c,
 123#define NVREG_POLL_DEFAULT_THROUGHPUT   65535 /* backup tx cleanup if loop max reached */
 124#define NVREG_POLL_DEFAULT_CPU  13
 125        NvRegMSIMap0 = 0x020,
 126        NvRegMSIMap1 = 0x024,
 127        NvRegMSIIrqMask = 0x030,
 128#define NVREG_MSI_VECTOR_0_ENABLED 0x01
 129        NvRegMisc1 = 0x080,
 130#define NVREG_MISC1_PAUSE_TX    0x01
 131#define NVREG_MISC1_HD          0x02
 132#define NVREG_MISC1_FORCE       0x3b0f3c
 133
 134        NvRegMacReset = 0x34,
 135#define NVREG_MAC_RESET_ASSERT  0x0F3
 136        NvRegTransmitterControl = 0x084,
 137#define NVREG_XMITCTL_START     0x01
 138#define NVREG_XMITCTL_MGMT_ST   0x40000000
 139#define NVREG_XMITCTL_SYNC_MASK         0x000f0000
 140#define NVREG_XMITCTL_SYNC_NOT_READY    0x0
 141#define NVREG_XMITCTL_SYNC_PHY_INIT     0x00040000
 142#define NVREG_XMITCTL_MGMT_SEMA_MASK    0x00000f00
 143#define NVREG_XMITCTL_MGMT_SEMA_FREE    0x0
 144#define NVREG_XMITCTL_HOST_SEMA_MASK    0x0000f000
 145#define NVREG_XMITCTL_HOST_SEMA_ACQ     0x0000f000
 146#define NVREG_XMITCTL_HOST_LOADED       0x00004000
 147#define NVREG_XMITCTL_TX_PATH_EN        0x01000000
 148#define NVREG_XMITCTL_DATA_START        0x00100000
 149#define NVREG_XMITCTL_DATA_READY        0x00010000
 150#define NVREG_XMITCTL_DATA_ERROR        0x00020000
 151        NvRegTransmitterStatus = 0x088,
 152#define NVREG_XMITSTAT_BUSY     0x01
 153
 154        NvRegPacketFilterFlags = 0x8c,
 155#define NVREG_PFF_PAUSE_RX      0x08
 156#define NVREG_PFF_ALWAYS        0x7F0000
 157#define NVREG_PFF_PROMISC       0x80
 158#define NVREG_PFF_MYADDR        0x20
 159#define NVREG_PFF_LOOPBACK      0x10
 160
 161        NvRegOffloadConfig = 0x90,
 162#define NVREG_OFFLOAD_HOMEPHY   0x601
 163#define NVREG_OFFLOAD_NORMAL    RX_NIC_BUFSIZE
 164        NvRegReceiverControl = 0x094,
 165#define NVREG_RCVCTL_START      0x01
 166#define NVREG_RCVCTL_RX_PATH_EN 0x01000000
 167        NvRegReceiverStatus = 0x98,
 168#define NVREG_RCVSTAT_BUSY      0x01
 169
 170        NvRegSlotTime = 0x9c,
 171#define NVREG_SLOTTIME_LEGBF_ENABLED    0x80000000
 172#define NVREG_SLOTTIME_10_100_FULL      0x00007f00
 173#define NVREG_SLOTTIME_1000_FULL        0x0003ff00
 174#define NVREG_SLOTTIME_HALF             0x0000ff00
 175#define NVREG_SLOTTIME_DEFAULT          0x00007f00
 176#define NVREG_SLOTTIME_MASK             0x000000ff
 177
 178        NvRegTxDeferral = 0xA0,
 179#define NVREG_TX_DEFERRAL_DEFAULT               0x15050f
 180#define NVREG_TX_DEFERRAL_RGMII_10_100          0x16070f
 181#define NVREG_TX_DEFERRAL_RGMII_1000            0x14050f
 182#define NVREG_TX_DEFERRAL_RGMII_STRETCH_10      0x16190f
 183#define NVREG_TX_DEFERRAL_RGMII_STRETCH_100     0x16300f
 184#define NVREG_TX_DEFERRAL_MII_STRETCH           0x152000
 185        NvRegRxDeferral = 0xA4,
 186#define NVREG_RX_DEFERRAL_DEFAULT       0x16
 187        NvRegMacAddrA = 0xA8,
 188        NvRegMacAddrB = 0xAC,
 189        NvRegMulticastAddrA = 0xB0,
 190#define NVREG_MCASTADDRA_FORCE  0x01
 191        NvRegMulticastAddrB = 0xB4,
 192        NvRegMulticastMaskA = 0xB8,
 193#define NVREG_MCASTMASKA_NONE           0xffffffff
 194        NvRegMulticastMaskB = 0xBC,
 195#define NVREG_MCASTMASKB_NONE           0xffff
 196
 197        NvRegPhyInterface = 0xC0,
 198#define PHY_RGMII               0x10000000
 199        NvRegBackOffControl = 0xC4,
 200#define NVREG_BKOFFCTRL_DEFAULT                 0x70000000
 201#define NVREG_BKOFFCTRL_SEED_MASK               0x000003ff
 202#define NVREG_BKOFFCTRL_SELECT                  24
 203#define NVREG_BKOFFCTRL_GEAR                    12
 204
 205        NvRegTxRingPhysAddr = 0x100,
 206        NvRegRxRingPhysAddr = 0x104,
 207        NvRegRingSizes = 0x108,
 208#define NVREG_RINGSZ_TXSHIFT 0
 209#define NVREG_RINGSZ_RXSHIFT 16
 210        NvRegTransmitPoll = 0x10c,
 211#define NVREG_TRANSMITPOLL_MAC_ADDR_REV 0x00008000
 212        NvRegLinkSpeed = 0x110,
 213#define NVREG_LINKSPEED_FORCE 0x10000
 214#define NVREG_LINKSPEED_10      1000
 215#define NVREG_LINKSPEED_100     100
 216#define NVREG_LINKSPEED_1000    50
 217#define NVREG_LINKSPEED_MASK    (0xFFF)
 218        NvRegUnknownSetupReg5 = 0x130,
 219#define NVREG_UNKSETUP5_BIT31   (1<<31)
 220        NvRegTxWatermark = 0x13c,
 221#define NVREG_TX_WM_DESC1_DEFAULT       0x0200010
 222#define NVREG_TX_WM_DESC2_3_DEFAULT     0x1e08000
 223#define NVREG_TX_WM_DESC2_3_1000        0xfe08000
 224        NvRegTxRxControl = 0x144,
 225#define NVREG_TXRXCTL_KICK      0x0001
 226#define NVREG_TXRXCTL_BIT1      0x0002
 227#define NVREG_TXRXCTL_BIT2      0x0004
 228#define NVREG_TXRXCTL_IDLE      0x0008
 229#define NVREG_TXRXCTL_RESET     0x0010
 230#define NVREG_TXRXCTL_RXCHECK   0x0400
 231#define NVREG_TXRXCTL_DESC_1    0
 232#define NVREG_TXRXCTL_DESC_2    0x002100
 233#define NVREG_TXRXCTL_DESC_3    0xc02200
 234#define NVREG_TXRXCTL_VLANSTRIP 0x00040
 235#define NVREG_TXRXCTL_VLANINS   0x00080
 236        NvRegTxRingPhysAddrHigh = 0x148,
 237        NvRegRxRingPhysAddrHigh = 0x14C,
 238        NvRegTxPauseFrame = 0x170,
 239#define NVREG_TX_PAUSEFRAME_DISABLE     0x0fff0080
 240#define NVREG_TX_PAUSEFRAME_ENABLE_V1   0x01800010
 241#define NVREG_TX_PAUSEFRAME_ENABLE_V2   0x056003f0
 242#define NVREG_TX_PAUSEFRAME_ENABLE_V3   0x09f00880
 243        NvRegTxPauseFrameLimit = 0x174,
 244#define NVREG_TX_PAUSEFRAMELIMIT_ENABLE 0x00010000
 245        NvRegMIIStatus = 0x180,
 246#define NVREG_MIISTAT_ERROR             0x0001
 247#define NVREG_MIISTAT_LINKCHANGE        0x0008
 248#define NVREG_MIISTAT_MASK_RW           0x0007
 249#define NVREG_MIISTAT_MASK_ALL          0x000f
 250        NvRegMIIMask = 0x184,
 251#define NVREG_MII_LINKCHANGE            0x0008
 252
 253        NvRegAdapterControl = 0x188,
 254#define NVREG_ADAPTCTL_START    0x02
 255#define NVREG_ADAPTCTL_LINKUP   0x04
 256#define NVREG_ADAPTCTL_PHYVALID 0x40000
 257#define NVREG_ADAPTCTL_RUNNING  0x100000
 258#define NVREG_ADAPTCTL_PHYSHIFT 24
 259        NvRegMIISpeed = 0x18c,
 260#define NVREG_MIISPEED_BIT8     (1<<8)
 261#define NVREG_MIIDELAY  5
 262        NvRegMIIControl = 0x190,
 263#define NVREG_MIICTL_INUSE      0x08000
 264#define NVREG_MIICTL_WRITE      0x00400
 265#define NVREG_MIICTL_ADDRSHIFT  5
 266        NvRegMIIData = 0x194,
 267        NvRegTxUnicast = 0x1a0,
 268        NvRegTxMulticast = 0x1a4,
 269        NvRegTxBroadcast = 0x1a8,
 270        NvRegWakeUpFlags = 0x200,
 271#define NVREG_WAKEUPFLAGS_VAL           0x7770
 272#define NVREG_WAKEUPFLAGS_BUSYSHIFT     24
 273#define NVREG_WAKEUPFLAGS_ENABLESHIFT   16
 274#define NVREG_WAKEUPFLAGS_D3SHIFT       12
 275#define NVREG_WAKEUPFLAGS_D2SHIFT       8
 276#define NVREG_WAKEUPFLAGS_D1SHIFT       4
 277#define NVREG_WAKEUPFLAGS_D0SHIFT       0
 278#define NVREG_WAKEUPFLAGS_ACCEPT_MAGPAT         0x01
 279#define NVREG_WAKEUPFLAGS_ACCEPT_WAKEUPPAT      0x02
 280#define NVREG_WAKEUPFLAGS_ACCEPT_LINKCHANGE     0x04
 281#define NVREG_WAKEUPFLAGS_ENABLE        0x1111
 282
 283        NvRegMgmtUnitGetVersion = 0x204,
 284#define NVREG_MGMTUNITGETVERSION        0x01
 285        NvRegMgmtUnitVersion = 0x208,
 286#define NVREG_MGMTUNITVERSION           0x08
 287        NvRegPowerCap = 0x268,
 288#define NVREG_POWERCAP_D3SUPP   (1<<30)
 289#define NVREG_POWERCAP_D2SUPP   (1<<26)
 290#define NVREG_POWERCAP_D1SUPP   (1<<25)
 291        NvRegPowerState = 0x26c,
 292#define NVREG_POWERSTATE_POWEREDUP      0x8000
 293#define NVREG_POWERSTATE_VALID          0x0100
 294#define NVREG_POWERSTATE_MASK           0x0003
 295#define NVREG_POWERSTATE_D0             0x0000
 296#define NVREG_POWERSTATE_D1             0x0001
 297#define NVREG_POWERSTATE_D2             0x0002
 298#define NVREG_POWERSTATE_D3             0x0003
 299        NvRegMgmtUnitControl = 0x278,
 300#define NVREG_MGMTUNITCONTROL_INUSE     0x20000
 301        NvRegTxCnt = 0x280,
 302        NvRegTxZeroReXmt = 0x284,
 303        NvRegTxOneReXmt = 0x288,
 304        NvRegTxManyReXmt = 0x28c,
 305        NvRegTxLateCol = 0x290,
 306        NvRegTxUnderflow = 0x294,
 307        NvRegTxLossCarrier = 0x298,
 308        NvRegTxExcessDef = 0x29c,
 309        NvRegTxRetryErr = 0x2a0,
 310        NvRegRxFrameErr = 0x2a4,
 311        NvRegRxExtraByte = 0x2a8,
 312        NvRegRxLateCol = 0x2ac,
 313        NvRegRxRunt = 0x2b0,
 314        NvRegRxFrameTooLong = 0x2b4,
 315        NvRegRxOverflow = 0x2b8,
 316        NvRegRxFCSErr = 0x2bc,
 317        NvRegRxFrameAlignErr = 0x2c0,
 318        NvRegRxLenErr = 0x2c4,
 319        NvRegRxUnicast = 0x2c8,
 320        NvRegRxMulticast = 0x2cc,
 321        NvRegRxBroadcast = 0x2d0,
 322        NvRegTxDef = 0x2d4,
 323        NvRegTxFrame = 0x2d8,
 324        NvRegRxCnt = 0x2dc,
 325        NvRegTxPause = 0x2e0,
 326        NvRegRxPause = 0x2e4,
 327        NvRegRxDropFrame = 0x2e8,
 328        NvRegVlanControl = 0x300,
 329#define NVREG_VLANCONTROL_ENABLE        0x2000
 330        NvRegMSIXMap0 = 0x3e0,
 331        NvRegMSIXMap1 = 0x3e4,
 332        NvRegMSIXIrqStatus = 0x3f0,
 333
 334        NvRegPowerState2 = 0x600,
 335#define NVREG_POWERSTATE2_POWERUP_MASK          0x0F15
 336#define NVREG_POWERSTATE2_POWERUP_REV_A3        0x0001
 337#define NVREG_POWERSTATE2_PHY_RESET             0x0004
 338#define NVREG_POWERSTATE2_GATE_CLOCKS           0x0F00
 339};
 340
 341/* Big endian: should work, but is untested */
 342struct ring_desc {
 343        __le32 buf;
 344        __le32 flaglen;
 345};
 346
 347struct ring_desc_ex {
 348        __le32 bufhigh;
 349        __le32 buflow;
 350        __le32 txvlan;
 351        __le32 flaglen;
 352};
 353
 354union ring_type {
 355        struct ring_desc *orig;
 356        struct ring_desc_ex *ex;
 357};
 358
 359#define FLAG_MASK_V1 0xffff0000
 360#define FLAG_MASK_V2 0xffffc000
 361#define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1)
 362#define LEN_MASK_V2 (0xffffffff ^ FLAG_MASK_V2)
 363
 364#define NV_TX_LASTPACKET        (1<<16)
 365#define NV_TX_RETRYERROR        (1<<19)
 366#define NV_TX_RETRYCOUNT_MASK   (0xF<<20)
 367#define NV_TX_FORCED_INTERRUPT  (1<<24)
 368#define NV_TX_DEFERRED          (1<<26)
 369#define NV_TX_CARRIERLOST       (1<<27)
 370#define NV_TX_LATECOLLISION     (1<<28)
 371#define NV_TX_UNDERFLOW         (1<<29)
 372#define NV_TX_ERROR             (1<<30)
 373#define NV_TX_VALID             (1<<31)
 374
 375#define NV_TX2_LASTPACKET       (1<<29)
 376#define NV_TX2_RETRYERROR       (1<<18)
 377#define NV_TX2_RETRYCOUNT_MASK  (0xF<<19)
 378#define NV_TX2_FORCED_INTERRUPT (1<<30)
 379#define NV_TX2_DEFERRED         (1<<25)
 380#define NV_TX2_CARRIERLOST      (1<<26)
 381#define NV_TX2_LATECOLLISION    (1<<27)
 382#define NV_TX2_UNDERFLOW        (1<<28)
 383/* error and valid are the same for both */
 384#define NV_TX2_ERROR            (1<<30)
 385#define NV_TX2_VALID            (1<<31)
 386#define NV_TX2_TSO              (1<<28)
 387#define NV_TX2_TSO_SHIFT        14
 388#define NV_TX2_TSO_MAX_SHIFT    14
 389#define NV_TX2_TSO_MAX_SIZE     (1<<NV_TX2_TSO_MAX_SHIFT)
 390#define NV_TX2_CHECKSUM_L3      (1<<27)
 391#define NV_TX2_CHECKSUM_L4      (1<<26)
 392
 393#define NV_TX3_VLAN_TAG_PRESENT (1<<18)
 394
 395#define NV_RX_DESCRIPTORVALID   (1<<16)
 396#define NV_RX_MISSEDFRAME       (1<<17)
 397#define NV_RX_SUBTRACT1         (1<<18)
 398#define NV_RX_ERROR1            (1<<23)
 399#define NV_RX_ERROR2            (1<<24)
 400#define NV_RX_ERROR3            (1<<25)
 401#define NV_RX_ERROR4            (1<<26)
 402#define NV_RX_CRCERR            (1<<27)
 403#define NV_RX_OVERFLOW          (1<<28)
 404#define NV_RX_FRAMINGERR        (1<<29)
 405#define NV_RX_ERROR             (1<<30)
 406#define NV_RX_AVAIL             (1<<31)
 407#define NV_RX_ERROR_MASK        (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3|NV_RX_ERROR4|NV_RX_CRCERR|NV_RX_OVERFLOW|NV_RX_FRAMINGERR)
 408
 409#define NV_RX2_CHECKSUMMASK     (0x1C000000)
 410#define NV_RX2_CHECKSUM_IP      (0x10000000)
 411#define NV_RX2_CHECKSUM_IP_TCP  (0x14000000)
 412#define NV_RX2_CHECKSUM_IP_UDP  (0x18000000)
 413#define NV_RX2_DESCRIPTORVALID  (1<<29)
 414#define NV_RX2_SUBTRACT1        (1<<25)
 415#define NV_RX2_ERROR1           (1<<18)
 416#define NV_RX2_ERROR2           (1<<19)
 417#define NV_RX2_ERROR3           (1<<20)
 418#define NV_RX2_ERROR4           (1<<21)
 419#define NV_RX2_CRCERR           (1<<22)
 420#define NV_RX2_OVERFLOW         (1<<23)
 421#define NV_RX2_FRAMINGERR       (1<<24)
 422/* error and avail are the same for both */
 423#define NV_RX2_ERROR            (1<<30)
 424#define NV_RX2_AVAIL            (1<<31)
 425#define NV_RX2_ERROR_MASK       (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3|NV_RX2_ERROR4|NV_RX2_CRCERR|NV_RX2_OVERFLOW|NV_RX2_FRAMINGERR)
 426
 427#define NV_RX3_VLAN_TAG_PRESENT (1<<16)
 428#define NV_RX3_VLAN_TAG_MASK    (0x0000FFFF)
 429
 430/* Miscellaneous hardware related defines: */
 431#define NV_PCI_REGSZ_VER1       0x270
 432#define NV_PCI_REGSZ_VER2       0x2d4
 433#define NV_PCI_REGSZ_VER3       0x604
 434#define NV_PCI_REGSZ_MAX        0x604
 435
 436/* various timeout delays: all in usec */
 437#define NV_TXRX_RESET_DELAY     4
 438#define NV_TXSTOP_DELAY1        10
 439#define NV_TXSTOP_DELAY1MAX     500000
 440#define NV_TXSTOP_DELAY2        100
 441#define NV_RXSTOP_DELAY1        10
 442#define NV_RXSTOP_DELAY1MAX     500000
 443#define NV_RXSTOP_DELAY2        100
 444#define NV_SETUP5_DELAY         5
 445#define NV_SETUP5_DELAYMAX      50000
 446#define NV_POWERUP_DELAY        5
 447#define NV_POWERUP_DELAYMAX     5000
 448#define NV_MIIBUSY_DELAY        50
 449#define NV_MIIPHY_DELAY 10
 450#define NV_MIIPHY_DELAYMAX      10000
 451#define NV_MAC_RESET_DELAY      64
 452
 453#define NV_WAKEUPPATTERNS       5
 454#define NV_WAKEUPMASKENTRIES    4
 455
 456/* General driver defaults */
 457#define NV_WATCHDOG_TIMEO       (5*HZ)
 458
 459#define RX_RING_DEFAULT         512
 460#define TX_RING_DEFAULT         256
 461#define RX_RING_MIN             128
 462#define TX_RING_MIN             64
 463#define RING_MAX_DESC_VER_1     1024
 464#define RING_MAX_DESC_VER_2_3   16384
 465
 466/* rx/tx mac addr + type + vlan + align + slack*/
 467#define NV_RX_HEADERS           (64)
 468/* even more slack. */
 469#define NV_RX_ALLOC_PAD         (64)
 470
 471/* maximum mtu size */
 472#define NV_PKTLIMIT_1   ETH_DATA_LEN    /* hard limit not known */
 473#define NV_PKTLIMIT_2   9100    /* Actual limit according to NVidia: 9202 */
 474
 475#define OOM_REFILL      (1+HZ/20)
 476#define POLL_WAIT       (1+HZ/100)
 477#define LINK_TIMEOUT    (3*HZ)
 478#define STATS_INTERVAL  (10*HZ)
 479
 480/*
 481 * desc_ver values:
 482 * The nic supports three different descriptor types:
 483 * - DESC_VER_1: Original
 484 * - DESC_VER_2: support for jumbo frames.
 485 * - DESC_VER_3: 64-bit format.
 486 */
 487#define DESC_VER_1      1
 488#define DESC_VER_2      2
 489#define DESC_VER_3      3
 490
 491/* PHY defines */
 492#define PHY_OUI_MARVELL         0x5043
 493#define PHY_OUI_CICADA          0x03f1
 494#define PHY_OUI_VITESSE         0x01c1
 495#define PHY_OUI_REALTEK         0x0732
 496#define PHY_OUI_REALTEK2        0x0020
 497#define PHYID1_OUI_MASK 0x03ff
 498#define PHYID1_OUI_SHFT 6
 499#define PHYID2_OUI_MASK 0xfc00
 500#define PHYID2_OUI_SHFT 10
 501#define PHYID2_MODEL_MASK               0x03f0
 502#define PHY_MODEL_REALTEK_8211          0x0110
 503#define PHY_REV_MASK                    0x0001
 504#define PHY_REV_REALTEK_8211B           0x0000
 505#define PHY_REV_REALTEK_8211C           0x0001
 506#define PHY_MODEL_REALTEK_8201          0x0200
 507#define PHY_MODEL_MARVELL_E3016         0x0220
 508#define PHY_MARVELL_E3016_INITMASK      0x0300
 509#define PHY_CICADA_INIT1        0x0f000
 510#define PHY_CICADA_INIT2        0x0e00
 511#define PHY_CICADA_INIT3        0x01000
 512#define PHY_CICADA_INIT4        0x0200
 513#define PHY_CICADA_INIT5        0x0004
 514#define PHY_CICADA_INIT6        0x02000
 515#define PHY_VITESSE_INIT_REG1   0x1f
 516#define PHY_VITESSE_INIT_REG2   0x10
 517#define PHY_VITESSE_INIT_REG3   0x11
 518#define PHY_VITESSE_INIT_REG4   0x12
 519#define PHY_VITESSE_INIT_MSK1   0xc
 520#define PHY_VITESSE_INIT_MSK2   0x0180
 521#define PHY_VITESSE_INIT1       0x52b5
 522#define PHY_VITESSE_INIT2       0xaf8a
 523#define PHY_VITESSE_INIT3       0x8
 524#define PHY_VITESSE_INIT4       0x8f8a
 525#define PHY_VITESSE_INIT5       0xaf86
 526#define PHY_VITESSE_INIT6       0x8f86
 527#define PHY_VITESSE_INIT7       0xaf82
 528#define PHY_VITESSE_INIT8       0x0100
 529#define PHY_VITESSE_INIT9       0x8f82
 530#define PHY_VITESSE_INIT10      0x0
 531#define PHY_REALTEK_INIT_REG1   0x1f
 532#define PHY_REALTEK_INIT_REG2   0x19
 533#define PHY_REALTEK_INIT_REG3   0x13
 534#define PHY_REALTEK_INIT_REG4   0x14
 535#define PHY_REALTEK_INIT_REG5   0x18
 536#define PHY_REALTEK_INIT_REG6   0x11
 537#define PHY_REALTEK_INIT_REG7   0x01
 538#define PHY_REALTEK_INIT1       0x0000
 539#define PHY_REALTEK_INIT2       0x8e00
 540#define PHY_REALTEK_INIT3       0x0001
 541#define PHY_REALTEK_INIT4       0xad17
 542#define PHY_REALTEK_INIT5       0xfb54
 543#define PHY_REALTEK_INIT6       0xf5c7
 544#define PHY_REALTEK_INIT7       0x1000
 545#define PHY_REALTEK_INIT8       0x0003
 546#define PHY_REALTEK_INIT9       0x0008
 547#define PHY_REALTEK_INIT10      0x0005
 548#define PHY_REALTEK_INIT11      0x0200
 549#define PHY_REALTEK_INIT_MSK1   0x0003
 550
 551#define PHY_GIGABIT     0x0100
 552
 553#define PHY_TIMEOUT     0x1
 554#define PHY_ERROR       0x2
 555
 556#define PHY_100 0x1
 557#define PHY_1000        0x2
 558#define PHY_HALF        0x100
 559
 560#define NV_PAUSEFRAME_RX_CAPABLE 0x0001
 561#define NV_PAUSEFRAME_TX_CAPABLE 0x0002
 562#define NV_PAUSEFRAME_RX_ENABLE  0x0004
 563#define NV_PAUSEFRAME_TX_ENABLE  0x0008
 564#define NV_PAUSEFRAME_RX_REQ     0x0010
 565#define NV_PAUSEFRAME_TX_REQ     0x0020
 566#define NV_PAUSEFRAME_AUTONEG    0x0040
 567
 568/* MSI/MSI-X defines */
 569#define NV_MSI_X_MAX_VECTORS  8
 570#define NV_MSI_X_VECTORS_MASK 0x000f
 571#define NV_MSI_CAPABLE        0x0010
 572#define NV_MSI_X_CAPABLE      0x0020
 573#define NV_MSI_ENABLED        0x0040
 574#define NV_MSI_X_ENABLED      0x0080
 575
 576#define NV_MSI_X_VECTOR_ALL   0x0
 577#define NV_MSI_X_VECTOR_RX    0x0
 578#define NV_MSI_X_VECTOR_TX    0x1
 579#define NV_MSI_X_VECTOR_OTHER 0x2
 580
 581#define NV_MSI_PRIV_OFFSET 0x68
 582#define NV_MSI_PRIV_VALUE  0xffffffff
 583
 584#define NV_RESTART_TX         0x1
 585#define NV_RESTART_RX         0x2
 586
 587#define NV_TX_LIMIT_COUNT     16
 588
 589#define NV_DYNAMIC_THRESHOLD        4
 590#define NV_DYNAMIC_MAX_QUIET_COUNT  2048
 591
 592/* statistics */
 593struct nv_ethtool_str {
 594        char name[ETH_GSTRING_LEN];
 595};
 596
 597static const struct nv_ethtool_str nv_estats_str[] = {
 598        { "tx_bytes" }, /* includes Ethernet FCS CRC */
 599        { "tx_zero_rexmt" },
 600        { "tx_one_rexmt" },
 601        { "tx_many_rexmt" },
 602        { "tx_late_collision" },
 603        { "tx_fifo_errors" },
 604        { "tx_carrier_errors" },
 605        { "tx_excess_deferral" },
 606        { "tx_retry_error" },
 607        { "rx_frame_error" },
 608        { "rx_extra_byte" },
 609        { "rx_late_collision" },
 610        { "rx_runt" },
 611        { "rx_frame_too_long" },
 612        { "rx_over_errors" },
 613        { "rx_crc_errors" },
 614        { "rx_frame_align_error" },
 615        { "rx_length_error" },
 616        { "rx_unicast" },
 617        { "rx_multicast" },
 618        { "rx_broadcast" },
 619        { "rx_packets" },
 620        { "rx_errors_total" },
 621        { "tx_errors_total" },
 622
 623        /* version 2 stats */
 624        { "tx_deferral" },
 625        { "tx_packets" },
 626        { "rx_bytes" }, /* includes Ethernet FCS CRC */
 627        { "tx_pause" },
 628        { "rx_pause" },
 629        { "rx_drop_frame" },
 630
 631        /* version 3 stats */
 632        { "tx_unicast" },
 633        { "tx_multicast" },
 634        { "tx_broadcast" }
 635};
 636
 637struct nv_ethtool_stats {
 638        u64 tx_bytes; /* should be ifconfig->tx_bytes + 4*tx_packets */
 639        u64 tx_zero_rexmt;
 640        u64 tx_one_rexmt;
 641        u64 tx_many_rexmt;
 642        u64 tx_late_collision;
 643        u64 tx_fifo_errors;
 644        u64 tx_carrier_errors;
 645        u64 tx_excess_deferral;
 646        u64 tx_retry_error;
 647        u64 rx_frame_error;
 648        u64 rx_extra_byte;
 649        u64 rx_late_collision;
 650        u64 rx_runt;
 651        u64 rx_frame_too_long;
 652        u64 rx_over_errors;
 653        u64 rx_crc_errors;
 654        u64 rx_frame_align_error;
 655        u64 rx_length_error;
 656        u64 rx_unicast;
 657        u64 rx_multicast;
 658        u64 rx_broadcast;
 659        u64 rx_packets; /* should be ifconfig->rx_packets */
 660        u64 rx_errors_total;
 661        u64 tx_errors_total;
 662
 663        /* version 2 stats */
 664        u64 tx_deferral;
 665        u64 tx_packets; /* should be ifconfig->tx_packets */
 666        u64 rx_bytes;   /* should be ifconfig->rx_bytes + 4*rx_packets */
 667        u64 tx_pause;
 668        u64 rx_pause;
 669        u64 rx_drop_frame;
 670
 671        /* version 3 stats */
 672        u64 tx_unicast;
 673        u64 tx_multicast;
 674        u64 tx_broadcast;
 675};
 676
 677#define NV_DEV_STATISTICS_V3_COUNT (sizeof(struct nv_ethtool_stats)/sizeof(u64))
 678#define NV_DEV_STATISTICS_V2_COUNT (NV_DEV_STATISTICS_V3_COUNT - 3)
 679#define NV_DEV_STATISTICS_V1_COUNT (NV_DEV_STATISTICS_V2_COUNT - 6)
 680
 681/* diagnostics */
 682#define NV_TEST_COUNT_BASE 3
 683#define NV_TEST_COUNT_EXTENDED 4
 684
 685static const struct nv_ethtool_str nv_etests_str[] = {
 686        { "link      (online/offline)" },
 687        { "register  (offline)       " },
 688        { "interrupt (offline)       " },
 689        { "loopback  (offline)       " }
 690};
 691
 692struct register_test {
 693        __u32 reg;
 694        __u32 mask;
 695};
 696
 697static const struct register_test nv_registers_test[] = {
 698        { NvRegUnknownSetupReg6, 0x01 },
 699        { NvRegMisc1, 0x03c },
 700        { NvRegOffloadConfig, 0x03ff },
 701        { NvRegMulticastAddrA, 0xffffffff },
 702        { NvRegTxWatermark, 0x0ff },
 703        { NvRegWakeUpFlags, 0x07777 },
 704        { 0, 0 }
 705};
 706
 707struct nv_skb_map {
 708        struct sk_buff *skb;
 709        dma_addr_t dma;
 710        unsigned int dma_len:31;
 711        unsigned int dma_single:1;
 712        struct ring_desc_ex *first_tx_desc;
 713        struct nv_skb_map *next_tx_ctx;
 714};
 715
 716struct nv_txrx_stats {
 717        u64 stat_rx_packets;
 718        u64 stat_rx_bytes; /* not always available in HW */
 719        u64 stat_rx_missed_errors;
 720        u64 stat_rx_dropped;
 721        u64 stat_tx_packets; /* not always available in HW */
 722        u64 stat_tx_bytes;
 723        u64 stat_tx_dropped;
 724};
 725
 726#define nv_txrx_stats_inc(member) \
 727                __this_cpu_inc(np->txrx_stats->member)
 728#define nv_txrx_stats_add(member, count) \
 729                __this_cpu_add(np->txrx_stats->member, (count))
 730
 731/*
 732 * SMP locking:
 733 * All hardware access under netdev_priv(dev)->lock, except the performance
 734 * critical parts:
 735 * - rx is (pseudo-) lockless: it relies on the single-threading provided
 736 *      by the arch code for interrupts.
 737 * - tx setup is lockless: it relies on netif_tx_lock. Actual submission
 738 *      needs netdev_priv(dev)->lock :-(
 739 * - set_multicast_list: preparation lockless, relies on netif_tx_lock.
 740 *
 741 * Hardware stats updates are protected by hwstats_lock:
 742 * - updated by nv_do_stats_poll (timer). This is meant to avoid
 743 *   integer wraparound in the NIC stats registers, at low frequency
 744 *   (0.1 Hz)
 745 * - updated by nv_get_ethtool_stats + nv_get_stats64
 746 *
 747 * Software stats are accessed only through 64b synchronization points
 748 * and are not subject to other synchronization techniques (single
 749 * update thread on the TX or RX paths).
 750 */
 751
 752/* in dev: base, irq */
 753struct fe_priv {
 754        spinlock_t lock;
 755
 756        struct net_device *dev;
 757        struct napi_struct napi;
 758
 759        /* hardware stats are updated in syscall and timer */
 760        spinlock_t hwstats_lock;
 761        struct nv_ethtool_stats estats;
 762
 763        int in_shutdown;
 764        u32 linkspeed;
 765        int duplex;
 766        int autoneg;
 767        int fixed_mode;
 768        int phyaddr;
 769        int wolenabled;
 770        unsigned int phy_oui;
 771        unsigned int phy_model;
 772        unsigned int phy_rev;
 773        u16 gigabit;
 774        int intr_test;
 775        int recover_error;
 776        int quiet_count;
 777
 778        /* General data: RO fields */
 779        dma_addr_t ring_addr;
 780        struct pci_dev *pci_dev;
 781        u32 orig_mac[2];
 782        u32 events;
 783        u32 irqmask;
 784        u32 desc_ver;
 785        u32 txrxctl_bits;
 786        u32 vlanctl_bits;
 787        u32 driver_data;
 788        u32 device_id;
 789        u32 register_size;
 790        u32 mac_in_use;
 791        int mgmt_version;
 792        int mgmt_sema;
 793
 794        void __iomem *base;
 795
 796        /* rx specific fields.
 797         * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
 798         */
 799        union ring_type get_rx, put_rx, last_rx;
 800        struct nv_skb_map *get_rx_ctx, *put_rx_ctx;
 801        struct nv_skb_map *last_rx_ctx;
 802        struct nv_skb_map *rx_skb;
 803
 804        union ring_type rx_ring;
 805        unsigned int rx_buf_sz;
 806        unsigned int pkt_limit;
 807        struct timer_list oom_kick;
 808        struct timer_list nic_poll;
 809        struct timer_list stats_poll;
 810        u32 nic_poll_irq;
 811        int rx_ring_size;
 812
 813        /* RX software stats */
 814        struct u64_stats_sync swstats_rx_syncp;
 815        struct nv_txrx_stats __percpu *txrx_stats;
 816
 817        /* media detection workaround.
 818         * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
 819         */
 820        int need_linktimer;
 821        unsigned long link_timeout;
 822        /*
 823         * tx specific fields.
 824         */
 825        union ring_type get_tx, put_tx, last_tx;
 826        struct nv_skb_map *get_tx_ctx, *put_tx_ctx;
 827        struct nv_skb_map *last_tx_ctx;
 828        struct nv_skb_map *tx_skb;
 829
 830        union ring_type tx_ring;
 831        u32 tx_flags;
 832        int tx_ring_size;
 833        int tx_limit;
 834        u32 tx_pkts_in_progress;
 835        struct nv_skb_map *tx_change_owner;
 836        struct nv_skb_map *tx_end_flip;
 837        int tx_stop;
 838
 839        /* TX software stats */
 840        struct u64_stats_sync swstats_tx_syncp;
 841
 842        /* msi/msi-x fields */
 843        u32 msi_flags;
 844        struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS];
 845
 846        /* flow control */
 847        u32 pause_flags;
 848
 849        /* power saved state */
 850        u32 saved_config_space[NV_PCI_REGSZ_MAX/4];
 851
 852        /* for different msi-x irq type */
 853        char name_rx[IFNAMSIZ + 3];       /* -rx    */
 854        char name_tx[IFNAMSIZ + 3];       /* -tx    */
 855        char name_other[IFNAMSIZ + 6];    /* -other */
 856};
 857
 858/*
 859 * Maximum number of loops until we assume that a bit in the irq mask
 860 * is stuck. Overridable with module param.
 861 */
 862static int max_interrupt_work = 4;
 863
 864/*
 865 * Optimization can be either throuput mode or cpu mode
 866 *
 867 * Throughput Mode: Every tx and rx packet will generate an interrupt.
 868 * CPU Mode: Interrupts are controlled by a timer.
 869 */
 870enum {
 871        NV_OPTIMIZATION_MODE_THROUGHPUT,
 872        NV_OPTIMIZATION_MODE_CPU,
 873        NV_OPTIMIZATION_MODE_DYNAMIC
 874};
 875static int optimization_mode = NV_OPTIMIZATION_MODE_DYNAMIC;
 876
 877/*
 878 * Poll interval for timer irq
 879 *
 880 * This interval determines how frequent an interrupt is generated.
 881 * The is value is determined by [(time_in_micro_secs * 100) / (2^10)]
 882 * Min = 0, and Max = 65535
 883 */
 884static int poll_interval = -1;
 885
 886/*
 887 * MSI interrupts
 888 */
 889enum {
 890        NV_MSI_INT_DISABLED,
 891        NV_MSI_INT_ENABLED
 892};
 893static int msi = NV_MSI_INT_ENABLED;
 894
 895/*
 896 * MSIX interrupts
 897 */
 898enum {
 899        NV_MSIX_INT_DISABLED,
 900        NV_MSIX_INT_ENABLED
 901};
 902static int msix = NV_MSIX_INT_ENABLED;
 903
 904/*
 905 * DMA 64bit
 906 */
 907enum {
 908        NV_DMA_64BIT_DISABLED,
 909        NV_DMA_64BIT_ENABLED
 910};
 911static int dma_64bit = NV_DMA_64BIT_ENABLED;
 912
 913/*
 914 * Debug output control for tx_timeout
 915 */
 916static bool debug_tx_timeout = false;
 917
 918/*
 919 * Crossover Detection
 920 * Realtek 8201 phy + some OEM boards do not work properly.
 921 */
 922enum {
 923        NV_CROSSOVER_DETECTION_DISABLED,
 924        NV_CROSSOVER_DETECTION_ENABLED
 925};
 926static int phy_cross = NV_CROSSOVER_DETECTION_DISABLED;
 927
 928/*
 929 * Power down phy when interface is down (persists through reboot;
 930 * older Linux and other OSes may not power it up again)
 931 */
 932static int phy_power_down;
 933
 934static inline struct fe_priv *get_nvpriv(struct net_device *dev)
 935{
 936        return netdev_priv(dev);
 937}
 938
 939static inline u8 __iomem *get_hwbase(struct net_device *dev)
 940{
 941        return ((struct fe_priv *)netdev_priv(dev))->base;
 942}
 943
 944static inline void pci_push(u8 __iomem *base)
 945{
 946        /* force out pending posted writes */
 947        readl(base);
 948}
 949
 950static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v)
 951{
 952        return le32_to_cpu(prd->flaglen)
 953                & ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2);
 954}
 955
 956static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v)
 957{
 958        return le32_to_cpu(prd->flaglen) & LEN_MASK_V2;
 959}
 960
 961static bool nv_optimized(struct fe_priv *np)
 962{
 963        if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
 964                return false;
 965        return true;
 966}
 967
 968static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target,
 969                     int delay, int delaymax)
 970{
 971        u8 __iomem *base = get_hwbase(dev);
 972
 973        pci_push(base);
 974        do {
 975                udelay(delay);
 976                delaymax -= delay;
 977                if (delaymax < 0)
 978                        return 1;
 979        } while ((readl(base + offset) & mask) != target);
 980        return 0;
 981}
 982
 983#define NV_SETUP_RX_RING 0x01
 984#define NV_SETUP_TX_RING 0x02
 985
 986static inline u32 dma_low(dma_addr_t addr)
 987{
 988        return addr;
 989}
 990
 991static inline u32 dma_high(dma_addr_t addr)
 992{
 993        return addr>>31>>1;     /* 0 if 32bit, shift down by 32 if 64bit */
 994}
 995
 996static void setup_hw_rings(struct net_device *dev, int rxtx_flags)
 997{
 998        struct fe_priv *np = get_nvpriv(dev);
 999        u8 __iomem *base = get_hwbase(dev);
1000
1001        if (!nv_optimized(np)) {
1002                if (rxtx_flags & NV_SETUP_RX_RING)
1003                        writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr);
1004                if (rxtx_flags & NV_SETUP_TX_RING)
1005                        writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
1006        } else {
1007                if (rxtx_flags & NV_SETUP_RX_RING) {
1008                        writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr);
1009                        writel(dma_high(np->ring_addr), base + NvRegRxRingPhysAddrHigh);
1010                }
1011                if (rxtx_flags & NV_SETUP_TX_RING) {
1012                        writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
1013                        writel(dma_high(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddrHigh);
1014                }
1015        }
1016}
1017
1018static void free_rings(struct net_device *dev)
1019{
1020        struct fe_priv *np = get_nvpriv(dev);
1021
1022        if (!nv_optimized(np)) {
1023                if (np->rx_ring.orig)
1024                        dma_free_coherent(&np->pci_dev->dev,
1025                                          sizeof(struct ring_desc) *
1026                                          (np->rx_ring_size +
1027                                          np->tx_ring_size),
1028                                          np->rx_ring.orig, np->ring_addr);
1029        } else {
1030                if (np->rx_ring.ex)
1031                        dma_free_coherent(&np->pci_dev->dev,
1032                                          sizeof(struct ring_desc_ex) *
1033                                          (np->rx_ring_size +
1034                                          np->tx_ring_size),
1035                                          np->rx_ring.ex, np->ring_addr);
1036        }
1037        kfree(np->rx_skb);
1038        kfree(np->tx_skb);
1039}
1040
1041static int using_multi_irqs(struct net_device *dev)
1042{
1043        struct fe_priv *np = get_nvpriv(dev);
1044
1045        if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
1046            ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))
1047                return 0;
1048        else
1049                return 1;
1050}
1051
1052static void nv_txrx_gate(struct net_device *dev, bool gate)
1053{
1054        struct fe_priv *np = get_nvpriv(dev);
1055        u8 __iomem *base = get_hwbase(dev);
1056        u32 powerstate;
1057
1058        if (!np->mac_in_use &&
1059            (np->driver_data & DEV_HAS_POWER_CNTRL)) {
1060                powerstate = readl(base + NvRegPowerState2);
1061                if (gate)
1062                        powerstate |= NVREG_POWERSTATE2_GATE_CLOCKS;
1063                else
1064                        powerstate &= ~NVREG_POWERSTATE2_GATE_CLOCKS;
1065                writel(powerstate, base + NvRegPowerState2);
1066        }
1067}
1068
1069static void nv_enable_irq(struct net_device *dev)
1070{
1071        struct fe_priv *np = get_nvpriv(dev);
1072
1073        if (!using_multi_irqs(dev)) {
1074                if (np->msi_flags & NV_MSI_X_ENABLED)
1075                        enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1076                else
1077                        enable_irq(np->pci_dev->irq);
1078        } else {
1079                enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1080                enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
1081                enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
1082        }
1083}
1084
1085static void nv_disable_irq(struct net_device *dev)
1086{
1087        struct fe_priv *np = get_nvpriv(dev);
1088
1089        if (!using_multi_irqs(dev)) {
1090                if (np->msi_flags & NV_MSI_X_ENABLED)
1091                        disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1092                else
1093                        disable_irq(np->pci_dev->irq);
1094        } else {
1095                disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1096                disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
1097                disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
1098        }
1099}
1100
1101/* In MSIX mode, a write to irqmask behaves as XOR */
1102static void nv_enable_hw_interrupts(struct net_device *dev, u32 mask)
1103{
1104        u8 __iomem *base = get_hwbase(dev);
1105
1106        writel(mask, base + NvRegIrqMask);
1107}
1108
1109static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask)
1110{
1111        struct fe_priv *np = get_nvpriv(dev);
1112        u8 __iomem *base = get_hwbase(dev);
1113
1114        if (np->msi_flags & NV_MSI_X_ENABLED) {
1115                writel(mask, base + NvRegIrqMask);
1116        } else {
1117                if (np->msi_flags & NV_MSI_ENABLED)
1118                        writel(0, base + NvRegMSIIrqMask);
1119                writel(0, base + NvRegIrqMask);
1120        }
1121}
1122
1123static void nv_napi_enable(struct net_device *dev)
1124{
1125        struct fe_priv *np = get_nvpriv(dev);
1126
1127        napi_enable(&np->napi);
1128}
1129
1130static void nv_napi_disable(struct net_device *dev)
1131{
1132        struct fe_priv *np = get_nvpriv(dev);
1133
1134        napi_disable(&np->napi);
1135}
1136
1137#define MII_READ        (-1)
1138/* mii_rw: read/write a register on the PHY.
1139 *
1140 * Caller must guarantee serialization
1141 */
1142static int mii_rw(struct net_device *dev, int addr, int miireg, int value)
1143{
1144        u8 __iomem *base = get_hwbase(dev);
1145        u32 reg;
1146        int retval;
1147
1148        writel(NVREG_MIISTAT_MASK_RW, base + NvRegMIIStatus);
1149
1150        reg = readl(base + NvRegMIIControl);
1151        if (reg & NVREG_MIICTL_INUSE) {
1152                writel(NVREG_MIICTL_INUSE, base + NvRegMIIControl);
1153                udelay(NV_MIIBUSY_DELAY);
1154        }
1155
1156        reg = (addr << NVREG_MIICTL_ADDRSHIFT) | miireg;
1157        if (value != MII_READ) {
1158                writel(value, base + NvRegMIIData);
1159                reg |= NVREG_MIICTL_WRITE;
1160        }
1161        writel(reg, base + NvRegMIIControl);
1162
1163        if (reg_delay(dev, NvRegMIIControl, NVREG_MIICTL_INUSE, 0,
1164                        NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX)) {
1165                retval = -1;
1166        } else if (value != MII_READ) {
1167                /* it was a write operation - fewer failures are detectable */
1168                retval = 0;
1169        } else if (readl(base + NvRegMIIStatus) & NVREG_MIISTAT_ERROR) {
1170                retval = -1;
1171        } else {
1172                retval = readl(base + NvRegMIIData);
1173        }
1174
1175        return retval;
1176}
1177
1178static int phy_reset(struct net_device *dev, u32 bmcr_setup)
1179{
1180        struct fe_priv *np = netdev_priv(dev);
1181        u32 miicontrol;
1182        unsigned int tries = 0;
1183
1184        miicontrol = BMCR_RESET | bmcr_setup;
1185        if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol))
1186                return -1;
1187
1188        /* wait for 500ms */
1189        msleep(500);
1190
1191        /* must wait till reset is deasserted */
1192        while (miicontrol & BMCR_RESET) {
1193                usleep_range(10000, 20000);
1194                miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1195                /* FIXME: 100 tries seem excessive */
1196                if (tries++ > 100)
1197                        return -1;
1198        }
1199        return 0;
1200}
1201
1202static int init_realtek_8211b(struct net_device *dev, struct fe_priv *np)
1203{
1204        static const struct {
1205                int reg;
1206                int init;
1207        } ri[] = {
1208                { PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1 },
1209                { PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2 },
1210                { PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3 },
1211                { PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4 },
1212                { PHY_REALTEK_INIT_REG4, PHY_REALTEK_INIT5 },
1213                { PHY_REALTEK_INIT_REG5, PHY_REALTEK_INIT6 },
1214                { PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1 },
1215        };
1216        int i;
1217
1218        for (i = 0; i < ARRAY_SIZE(ri); i++) {
1219                if (mii_rw(dev, np->phyaddr, ri[i].reg, ri[i].init))
1220                        return PHY_ERROR;
1221        }
1222
1223        return 0;
1224}
1225
1226static int init_realtek_8211c(struct net_device *dev, struct fe_priv *np)
1227{
1228        u32 reg;
1229        u8 __iomem *base = get_hwbase(dev);
1230        u32 powerstate = readl(base + NvRegPowerState2);
1231
1232        /* need to perform hw phy reset */
1233        powerstate |= NVREG_POWERSTATE2_PHY_RESET;
1234        writel(powerstate, base + NvRegPowerState2);
1235        msleep(25);
1236
1237        powerstate &= ~NVREG_POWERSTATE2_PHY_RESET;
1238        writel(powerstate, base + NvRegPowerState2);
1239        msleep(25);
1240
1241        reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ);
1242        reg |= PHY_REALTEK_INIT9;
1243        if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, reg))
1244                return PHY_ERROR;
1245        if (mii_rw(dev, np->phyaddr,
1246                   PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT10))
1247                return PHY_ERROR;
1248        reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, MII_READ);
1249        if (!(reg & PHY_REALTEK_INIT11)) {
1250                reg |= PHY_REALTEK_INIT11;
1251                if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, reg))
1252                        return PHY_ERROR;
1253        }
1254        if (mii_rw(dev, np->phyaddr,
1255                   PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1))
1256                return PHY_ERROR;
1257
1258        return 0;
1259}
1260
1261static int init_realtek_8201(struct net_device *dev, struct fe_priv *np)
1262{
1263        u32 phy_reserved;
1264
1265        if (np->driver_data & DEV_NEED_PHY_INIT_FIX) {
1266                phy_reserved = mii_rw(dev, np->phyaddr,
1267                                      PHY_REALTEK_INIT_REG6, MII_READ);
1268                phy_reserved |= PHY_REALTEK_INIT7;
1269                if (mii_rw(dev, np->phyaddr,
1270                           PHY_REALTEK_INIT_REG6, phy_reserved))
1271                        return PHY_ERROR;
1272        }
1273
1274        return 0;
1275}
1276
1277static int init_realtek_8201_cross(struct net_device *dev, struct fe_priv *np)
1278{
1279        u32 phy_reserved;
1280
1281        if (phy_cross == NV_CROSSOVER_DETECTION_DISABLED) {
1282                if (mii_rw(dev, np->phyaddr,
1283                           PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3))
1284                        return PHY_ERROR;
1285                phy_reserved = mii_rw(dev, np->phyaddr,
1286                                      PHY_REALTEK_INIT_REG2, MII_READ);
1287                phy_reserved &= ~PHY_REALTEK_INIT_MSK1;
1288                phy_reserved |= PHY_REALTEK_INIT3;
1289                if (mii_rw(dev, np->phyaddr,
1290                           PHY_REALTEK_INIT_REG2, phy_reserved))
1291                        return PHY_ERROR;
1292                if (mii_rw(dev, np->phyaddr,
1293                           PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1))
1294                        return PHY_ERROR;
1295        }
1296
1297        return 0;
1298}
1299
1300static int init_cicada(struct net_device *dev, struct fe_priv *np,
1301                       u32 phyinterface)
1302{
1303        u32 phy_reserved;
1304
1305        if (phyinterface & PHY_RGMII) {
1306                phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ);
1307                phy_reserved &= ~(PHY_CICADA_INIT1 | PHY_CICADA_INIT2);
1308                phy_reserved |= (PHY_CICADA_INIT3 | PHY_CICADA_INIT4);
1309                if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved))
1310                        return PHY_ERROR;
1311                phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
1312                phy_reserved |= PHY_CICADA_INIT5;
1313                if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved))
1314                        return PHY_ERROR;
1315        }
1316        phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ);
1317        phy_reserved |= PHY_CICADA_INIT6;
1318        if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved))
1319                return PHY_ERROR;
1320
1321        return 0;
1322}
1323
1324static int init_vitesse(struct net_device *dev, struct fe_priv *np)
1325{
1326        u32 phy_reserved;
1327
1328        if (mii_rw(dev, np->phyaddr,
1329                   PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT1))
1330                return PHY_ERROR;
1331        if (mii_rw(dev, np->phyaddr,
1332                   PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT2))
1333                return PHY_ERROR;
1334        phy_reserved = mii_rw(dev, np->phyaddr,
1335                              PHY_VITESSE_INIT_REG4, MII_READ);
1336        if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved))
1337                return PHY_ERROR;
1338        phy_reserved = mii_rw(dev, np->phyaddr,
1339                              PHY_VITESSE_INIT_REG3, MII_READ);
1340        phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
1341        phy_reserved |= PHY_VITESSE_INIT3;
1342        if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved))
1343                return PHY_ERROR;
1344        if (mii_rw(dev, np->phyaddr,
1345                   PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT4))
1346                return PHY_ERROR;
1347        if (mii_rw(dev, np->phyaddr,
1348                   PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT5))
1349                return PHY_ERROR;
1350        phy_reserved = mii_rw(dev, np->phyaddr,
1351                              PHY_VITESSE_INIT_REG4, MII_READ);
1352        phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
1353        phy_reserved |= PHY_VITESSE_INIT3;
1354        if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved))
1355                return PHY_ERROR;
1356        phy_reserved = mii_rw(dev, np->phyaddr,
1357                              PHY_VITESSE_INIT_REG3, MII_READ);
1358        if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved))
1359                return PHY_ERROR;
1360        if (mii_rw(dev, np->phyaddr,
1361                   PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT6))
1362                return PHY_ERROR;
1363        if (mii_rw(dev, np->phyaddr,
1364                   PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT7))
1365                return PHY_ERROR;
1366        phy_reserved = mii_rw(dev, np->phyaddr,
1367                              PHY_VITESSE_INIT_REG4, MII_READ);
1368        if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved))
1369                return PHY_ERROR;
1370        phy_reserved = mii_rw(dev, np->phyaddr,
1371                              PHY_VITESSE_INIT_REG3, MII_READ);
1372        phy_reserved &= ~PHY_VITESSE_INIT_MSK2;
1373        phy_reserved |= PHY_VITESSE_INIT8;
1374        if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved))
1375                return PHY_ERROR;
1376        if (mii_rw(dev, np->phyaddr,
1377                   PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT9))
1378                return PHY_ERROR;
1379        if (mii_rw(dev, np->phyaddr,
1380                   PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT10))
1381                return PHY_ERROR;
1382
1383        return 0;
1384}
1385
1386static int phy_init(struct net_device *dev)
1387{
1388        struct fe_priv *np = get_nvpriv(dev);
1389        u8 __iomem *base = get_hwbase(dev);
1390        u32 phyinterface;
1391        u32 mii_status, mii_control, mii_control_1000, reg;
1392
1393        /* phy errata for E3016 phy */
1394        if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
1395                reg = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
1396                reg &= ~PHY_MARVELL_E3016_INITMASK;
1397                if (mii_rw(dev, np->phyaddr, MII_NCONFIG, reg)) {
1398                        netdev_info(dev, "%s: phy write to errata reg failed\n",
1399                                    pci_name(np->pci_dev));
1400                        return PHY_ERROR;
1401                }
1402        }
1403        if (np->phy_oui == PHY_OUI_REALTEK) {
1404                if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
1405                    np->phy_rev == PHY_REV_REALTEK_8211B) {
1406                        if (init_realtek_8211b(dev, np)) {
1407                                netdev_info(dev, "%s: phy init failed\n",
1408                                            pci_name(np->pci_dev));
1409                                return PHY_ERROR;
1410                        }
1411                } else if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
1412                           np->phy_rev == PHY_REV_REALTEK_8211C) {
1413                        if (init_realtek_8211c(dev, np)) {
1414                                netdev_info(dev, "%s: phy init failed\n",
1415                                            pci_name(np->pci_dev));
1416                                return PHY_ERROR;
1417                        }
1418                } else if (np->phy_model == PHY_MODEL_REALTEK_8201) {
1419                        if (init_realtek_8201(dev, np)) {
1420                                netdev_info(dev, "%s: phy init failed\n",
1421                                            pci_name(np->pci_dev));
1422                                return PHY_ERROR;
1423                        }
1424                }
1425        }
1426
1427        /* set advertise register */
1428        reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
1429        reg |= (ADVERTISE_10HALF | ADVERTISE_10FULL |
1430                ADVERTISE_100HALF | ADVERTISE_100FULL |
1431                ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP);
1432        if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) {
1433                netdev_info(dev, "%s: phy write to advertise failed\n",
1434                            pci_name(np->pci_dev));
1435                return PHY_ERROR;
1436        }
1437
1438        /* get phy interface type */
1439        phyinterface = readl(base + NvRegPhyInterface);
1440
1441        /* see if gigabit phy */
1442        mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
1443        if (mii_status & PHY_GIGABIT) {
1444                np->gigabit = PHY_GIGABIT;
1445                mii_control_1000 = mii_rw(dev, np->phyaddr,
1446                                          MII_CTRL1000, MII_READ);
1447                mii_control_1000 &= ~ADVERTISE_1000HALF;
1448                if (phyinterface & PHY_RGMII)
1449                        mii_control_1000 |= ADVERTISE_1000FULL;
1450                else
1451                        mii_control_1000 &= ~ADVERTISE_1000FULL;
1452
1453                if (mii_rw(dev, np->phyaddr, MII_CTRL1000, mii_control_1000)) {
1454                        netdev_info(dev, "%s: phy init failed\n",
1455                                    pci_name(np->pci_dev));
1456                        return PHY_ERROR;
1457                }
1458        } else
1459                np->gigabit = 0;
1460
1461        mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1462        mii_control |= BMCR_ANENABLE;
1463
1464        if (np->phy_oui == PHY_OUI_REALTEK &&
1465            np->phy_model == PHY_MODEL_REALTEK_8211 &&
1466            np->phy_rev == PHY_REV_REALTEK_8211C) {
1467                /* start autoneg since we already performed hw reset above */
1468                mii_control |= BMCR_ANRESTART;
1469                if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
1470                        netdev_info(dev, "%s: phy init failed\n",
1471                                    pci_name(np->pci_dev));
1472                        return PHY_ERROR;
1473                }
1474        } else {
1475                /* reset the phy
1476                 * (certain phys need bmcr to be setup with reset)
1477                 */
1478                if (phy_reset(dev, mii_control)) {
1479                        netdev_info(dev, "%s: phy reset failed\n",
1480                                    pci_name(np->pci_dev));
1481                        return PHY_ERROR;
1482                }
1483        }
1484
1485        /* phy vendor specific configuration */
1486        if (np->phy_oui == PHY_OUI_CICADA) {
1487                if (init_cicada(dev, np, phyinterface)) {
1488                        netdev_info(dev, "%s: phy init failed\n",
1489                                    pci_name(np->pci_dev));
1490                        return PHY_ERROR;
1491                }
1492        } else if (np->phy_oui == PHY_OUI_VITESSE) {
1493                if (init_vitesse(dev, np)) {
1494                        netdev_info(dev, "%s: phy init failed\n",
1495                                    pci_name(np->pci_dev));
1496                        return PHY_ERROR;
1497                }
1498        } else if (np->phy_oui == PHY_OUI_REALTEK) {
1499                if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
1500                    np->phy_rev == PHY_REV_REALTEK_8211B) {
1501                        /* reset could have cleared these out, set them back */
1502                        if (init_realtek_8211b(dev, np)) {
1503                                netdev_info(dev, "%s: phy init failed\n",
1504                                            pci_name(np->pci_dev));
1505                                return PHY_ERROR;
1506                        }
1507                } else if (np->phy_model == PHY_MODEL_REALTEK_8201) {
1508                        if (init_realtek_8201(dev, np) ||
1509                            init_realtek_8201_cross(dev, np)) {
1510                                netdev_info(dev, "%s: phy init failed\n",
1511                                            pci_name(np->pci_dev));
1512                                return PHY_ERROR;
1513                        }
1514                }
1515        }
1516
1517        /* some phys clear out pause advertisement on reset, set it back */
1518        mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg);
1519
1520        /* restart auto negotiation, power down phy */
1521        mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1522        mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
1523        if (phy_power_down)
1524                mii_control |= BMCR_PDOWN;
1525        if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control))
1526                return PHY_ERROR;
1527
1528        return 0;
1529}
1530
1531static void nv_start_rx(struct net_device *dev)
1532{
1533        struct fe_priv *np = netdev_priv(dev);
1534        u8 __iomem *base = get_hwbase(dev);
1535        u32 rx_ctrl = readl(base + NvRegReceiverControl);
1536
1537        /* Already running? Stop it. */
1538        if ((readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) && !np->mac_in_use) {
1539                rx_ctrl &= ~NVREG_RCVCTL_START;
1540                writel(rx_ctrl, base + NvRegReceiverControl);
1541                pci_push(base);
1542        }
1543        writel(np->linkspeed, base + NvRegLinkSpeed);
1544        pci_push(base);
1545        rx_ctrl |= NVREG_RCVCTL_START;
1546        if (np->mac_in_use)
1547                rx_ctrl &= ~NVREG_RCVCTL_RX_PATH_EN;
1548        writel(rx_ctrl, base + NvRegReceiverControl);
1549        pci_push(base);
1550}
1551
1552static void nv_stop_rx(struct net_device *dev)
1553{
1554        struct fe_priv *np = netdev_priv(dev);
1555        u8 __iomem *base = get_hwbase(dev);
1556        u32 rx_ctrl = readl(base + NvRegReceiverControl);
1557
1558        if (!np->mac_in_use)
1559                rx_ctrl &= ~NVREG_RCVCTL_START;
1560        else
1561                rx_ctrl |= NVREG_RCVCTL_RX_PATH_EN;
1562        writel(rx_ctrl, base + NvRegReceiverControl);
1563        if (reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0,
1564                      NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX))
1565                netdev_info(dev, "%s: ReceiverStatus remained busy\n",
1566                            __func__);
1567
1568        udelay(NV_RXSTOP_DELAY2);
1569        if (!np->mac_in_use)
1570                writel(0, base + NvRegLinkSpeed);
1571}
1572
1573static void nv_start_tx(struct net_device *dev)
1574{
1575        struct fe_priv *np = netdev_priv(dev);
1576        u8 __iomem *base = get_hwbase(dev);
1577        u32 tx_ctrl = readl(base + NvRegTransmitterControl);
1578
1579        tx_ctrl |= NVREG_XMITCTL_START;
1580        if (np->mac_in_use)
1581                tx_ctrl &= ~NVREG_XMITCTL_TX_PATH_EN;
1582        writel(tx_ctrl, base + NvRegTransmitterControl);
1583        pci_push(base);
1584}
1585
1586static void nv_stop_tx(struct net_device *dev)
1587{
1588        struct fe_priv *np = netdev_priv(dev);
1589        u8 __iomem *base = get_hwbase(dev);
1590        u32 tx_ctrl = readl(base + NvRegTransmitterControl);
1591
1592        if (!np->mac_in_use)
1593                tx_ctrl &= ~NVREG_XMITCTL_START;
1594        else
1595                tx_ctrl |= NVREG_XMITCTL_TX_PATH_EN;
1596        writel(tx_ctrl, base + NvRegTransmitterControl);
1597        if (reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0,
1598                      NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX))
1599                netdev_info(dev, "%s: TransmitterStatus remained busy\n",
1600                            __func__);
1601
1602        udelay(NV_TXSTOP_DELAY2);
1603        if (!np->mac_in_use)
1604                writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV,
1605                       base + NvRegTransmitPoll);
1606}
1607
1608static void nv_start_rxtx(struct net_device *dev)
1609{
1610        nv_start_rx(dev);
1611        nv_start_tx(dev);
1612}
1613
1614static void nv_stop_rxtx(struct net_device *dev)
1615{
1616        nv_stop_rx(dev);
1617        nv_stop_tx(dev);
1618}
1619
1620static void nv_txrx_reset(struct net_device *dev)
1621{
1622        struct fe_priv *np = netdev_priv(dev);
1623        u8 __iomem *base = get_hwbase(dev);
1624
1625        writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
1626        pci_push(base);
1627        udelay(NV_TXRX_RESET_DELAY);
1628        writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
1629        pci_push(base);
1630}
1631
1632static void nv_mac_reset(struct net_device *dev)
1633{
1634        struct fe_priv *np = netdev_priv(dev);
1635        u8 __iomem *base = get_hwbase(dev);
1636        u32 temp1, temp2, temp3;
1637
1638        writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
1639        pci_push(base);
1640
1641        /* save registers since they will be cleared on reset */
1642        temp1 = readl(base + NvRegMacAddrA);
1643        temp2 = readl(base + NvRegMacAddrB);
1644        temp3 = readl(base + NvRegTransmitPoll);
1645
1646        writel(NVREG_MAC_RESET_ASSERT, base + NvRegMacReset);
1647        pci_push(base);
1648        udelay(NV_MAC_RESET_DELAY);
1649        writel(0, base + NvRegMacReset);
1650        pci_push(base);
1651        udelay(NV_MAC_RESET_DELAY);
1652
1653        /* restore saved registers */
1654        writel(temp1, base + NvRegMacAddrA);
1655        writel(temp2, base + NvRegMacAddrB);
1656        writel(temp3, base + NvRegTransmitPoll);
1657
1658        writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
1659        pci_push(base);
1660}
1661
1662/* Caller must appropriately lock netdev_priv(dev)->hwstats_lock */
1663static void nv_update_stats(struct net_device *dev)
1664{
1665        struct fe_priv *np = netdev_priv(dev);
1666        u8 __iomem *base = get_hwbase(dev);
1667
1668        lockdep_assert_held(&np->hwstats_lock);
1669
1670        /* query hardware */
1671        np->estats.tx_bytes += readl(base + NvRegTxCnt);
1672        np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt);
1673        np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt);
1674        np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt);
1675        np->estats.tx_late_collision += readl(base + NvRegTxLateCol);
1676        np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow);
1677        np->estats.tx_carrier_errors += readl(base + NvRegTxLossCarrier);
1678        np->estats.tx_excess_deferral += readl(base + NvRegTxExcessDef);
1679        np->estats.tx_retry_error += readl(base + NvRegTxRetryErr);
1680        np->estats.rx_frame_error += readl(base + NvRegRxFrameErr);
1681        np->estats.rx_extra_byte += readl(base + NvRegRxExtraByte);
1682        np->estats.rx_late_collision += readl(base + NvRegRxLateCol);
1683        np->estats.rx_runt += readl(base + NvRegRxRunt);
1684        np->estats.rx_frame_too_long += readl(base + NvRegRxFrameTooLong);
1685        np->estats.rx_over_errors += readl(base + NvRegRxOverflow);
1686        np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr);
1687        np->estats.rx_frame_align_error += readl(base + NvRegRxFrameAlignErr);
1688        np->estats.rx_length_error += readl(base + NvRegRxLenErr);
1689        np->estats.rx_unicast += readl(base + NvRegRxUnicast);
1690        np->estats.rx_multicast += readl(base + NvRegRxMulticast);
1691        np->estats.rx_broadcast += readl(base + NvRegRxBroadcast);
1692        np->estats.rx_packets =
1693                np->estats.rx_unicast +
1694                np->estats.rx_multicast +
1695                np->estats.rx_broadcast;
1696        np->estats.rx_errors_total =
1697                np->estats.rx_crc_errors +
1698                np->estats.rx_over_errors +
1699                np->estats.rx_frame_error +
1700                (np->estats.rx_frame_align_error - np->estats.rx_extra_byte) +
1701                np->estats.rx_late_collision +
1702                np->estats.rx_runt +
1703                np->estats.rx_frame_too_long;
1704        np->estats.tx_errors_total =
1705                np->estats.tx_late_collision +
1706                np->estats.tx_fifo_errors +
1707                np->estats.tx_carrier_errors +
1708                np->estats.tx_excess_deferral +
1709                np->estats.tx_retry_error;
1710
1711        if (np->driver_data & DEV_HAS_STATISTICS_V2) {
1712                np->estats.tx_deferral += readl(base + NvRegTxDef);
1713                np->estats.tx_packets += readl(base + NvRegTxFrame);
1714                np->estats.rx_bytes += readl(base + NvRegRxCnt);
1715                np->estats.tx_pause += readl(base + NvRegTxPause);
1716                np->estats.rx_pause += readl(base + NvRegRxPause);
1717                np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame);
1718                np->estats.rx_errors_total += np->estats.rx_drop_frame;
1719        }
1720
1721        if (np->driver_data & DEV_HAS_STATISTICS_V3) {
1722                np->estats.tx_unicast += readl(base + NvRegTxUnicast);
1723                np->estats.tx_multicast += readl(base + NvRegTxMulticast);
1724                np->estats.tx_broadcast += readl(base + NvRegTxBroadcast);
1725        }
1726}
1727
1728static void nv_get_stats(int cpu, struct fe_priv *np,
1729                         struct rtnl_link_stats64 *storage)
1730{
1731        struct nv_txrx_stats *src = per_cpu_ptr(np->txrx_stats, cpu);
1732        unsigned int syncp_start;
1733        u64 rx_packets, rx_bytes, rx_dropped, rx_missed_errors;
1734        u64 tx_packets, tx_bytes, tx_dropped;
1735
1736        do {
1737                syncp_start = u64_stats_fetch_begin_irq(&np->swstats_rx_syncp);
1738                rx_packets       = src->stat_rx_packets;
1739                rx_bytes         = src->stat_rx_bytes;
1740                rx_dropped       = src->stat_rx_dropped;
1741                rx_missed_errors = src->stat_rx_missed_errors;
1742        } while (u64_stats_fetch_retry_irq(&np->swstats_rx_syncp, syncp_start));
1743
1744        storage->rx_packets       += rx_packets;
1745        storage->rx_bytes         += rx_bytes;
1746        storage->rx_dropped       += rx_dropped;
1747        storage->rx_missed_errors += rx_missed_errors;
1748
1749        do {
1750                syncp_start = u64_stats_fetch_begin_irq(&np->swstats_tx_syncp);
1751                tx_packets  = src->stat_tx_packets;
1752                tx_bytes    = src->stat_tx_bytes;
1753                tx_dropped  = src->stat_tx_dropped;
1754        } while (u64_stats_fetch_retry_irq(&np->swstats_tx_syncp, syncp_start));
1755
1756        storage->tx_packets += tx_packets;
1757        storage->tx_bytes   += tx_bytes;
1758        storage->tx_dropped += tx_dropped;
1759}
1760
1761/*
1762 * nv_get_stats64: dev->ndo_get_stats64 function
1763 * Get latest stats value from the nic.
1764 * Called with read_lock(&dev_base_lock) held for read -
1765 * only synchronized against unregister_netdevice.
1766 */
1767static void
1768nv_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *storage)
1769        __acquires(&netdev_priv(dev)->hwstats_lock)
1770        __releases(&netdev_priv(dev)->hwstats_lock)
1771{
1772        struct fe_priv *np = netdev_priv(dev);
1773        int cpu;
1774
1775        /*
1776         * Note: because HW stats are not always available and for
1777         * consistency reasons, the following ifconfig stats are
1778         * managed by software: rx_bytes, tx_bytes, rx_packets and
1779         * tx_packets. The related hardware stats reported by ethtool
1780         * should be equivalent to these ifconfig stats, with 4
1781         * additional bytes per packet (Ethernet FCS CRC), except for
1782         * tx_packets when TSO kicks in.
1783         */
1784
1785        /* software stats */
1786        for_each_online_cpu(cpu)
1787                nv_get_stats(cpu, np, storage);
1788
1789        /* If the nic supports hw counters then retrieve latest values */
1790        if (np->driver_data & DEV_HAS_STATISTICS_V123) {
1791                spin_lock_bh(&np->hwstats_lock);
1792
1793                nv_update_stats(dev);
1794
1795                /* generic stats */
1796                storage->rx_errors = np->estats.rx_errors_total;
1797                storage->tx_errors = np->estats.tx_errors_total;
1798
1799                /* meaningful only when NIC supports stats v3 */
1800                storage->multicast = np->estats.rx_multicast;
1801
1802                /* detailed rx_errors */
1803                storage->rx_length_errors = np->estats.rx_length_error;
1804                storage->rx_over_errors   = np->estats.rx_over_errors;
1805                storage->rx_crc_errors    = np->estats.rx_crc_errors;
1806                storage->rx_frame_errors  = np->estats.rx_frame_align_error;
1807                storage->rx_fifo_errors   = np->estats.rx_drop_frame;
1808
1809                /* detailed tx_errors */
1810                storage->tx_carrier_errors = np->estats.tx_carrier_errors;
1811                storage->tx_fifo_errors    = np->estats.tx_fifo_errors;
1812
1813                spin_unlock_bh(&np->hwstats_lock);
1814        }
1815}
1816
1817/*
1818 * nv_alloc_rx: fill rx ring entries.
1819 * Return 1 if the allocations for the skbs failed and the
1820 * rx engine is without Available descriptors
1821 */
1822static int nv_alloc_rx(struct net_device *dev)
1823{
1824        struct fe_priv *np = netdev_priv(dev);
1825        struct ring_desc *less_rx;
1826
1827        less_rx = np->get_rx.orig;
1828        if (less_rx-- == np->rx_ring.orig)
1829                less_rx = np->last_rx.orig;
1830
1831        while (np->put_rx.orig != less_rx) {
1832                struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz + NV_RX_ALLOC_PAD);
1833                if (likely(skb)) {
1834                        np->put_rx_ctx->skb = skb;
1835                        np->put_rx_ctx->dma = dma_map_single(&np->pci_dev->dev,
1836                                                             skb->data,
1837                                                             skb_tailroom(skb),
1838                                                             DMA_FROM_DEVICE);
1839                        if (unlikely(dma_mapping_error(&np->pci_dev->dev,
1840                                                       np->put_rx_ctx->dma))) {
1841                                kfree_skb(skb);
1842                                goto packet_dropped;
1843                        }
1844                        np->put_rx_ctx->dma_len = skb_tailroom(skb);
1845                        np->put_rx.orig->buf = cpu_to_le32(np->put_rx_ctx->dma);
1846                        wmb();
1847                        np->put_rx.orig->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL);
1848                        if (unlikely(np->put_rx.orig++ == np->last_rx.orig))
1849                                np->put_rx.orig = np->rx_ring.orig;
1850                        if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
1851                                np->put_rx_ctx = np->rx_skb;
1852                } else {
1853packet_dropped:
1854                        u64_stats_update_begin(&np->swstats_rx_syncp);
1855                        nv_txrx_stats_inc(stat_rx_dropped);
1856                        u64_stats_update_end(&np->swstats_rx_syncp);
1857                        return 1;
1858                }
1859        }
1860        return 0;
1861}
1862
1863static int nv_alloc_rx_optimized(struct net_device *dev)
1864{
1865        struct fe_priv *np = netdev_priv(dev);
1866        struct ring_desc_ex *less_rx;
1867
1868        less_rx = np->get_rx.ex;
1869        if (less_rx-- == np->rx_ring.ex)
1870                less_rx = np->last_rx.ex;
1871
1872        while (np->put_rx.ex != less_rx) {
1873                struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz + NV_RX_ALLOC_PAD);
1874                if (likely(skb)) {
1875                        np->put_rx_ctx->skb = skb;
1876                        np->put_rx_ctx->dma = dma_map_single(&np->pci_dev->dev,
1877                                                             skb->data,
1878                                                             skb_tailroom(skb),
1879                                                             DMA_FROM_DEVICE);
1880                        if (unlikely(dma_mapping_error(&np->pci_dev->dev,
1881                                                       np->put_rx_ctx->dma))) {
1882                                kfree_skb(skb);
1883                                goto packet_dropped;
1884                        }
1885                        np->put_rx_ctx->dma_len = skb_tailroom(skb);
1886                        np->put_rx.ex->bufhigh = cpu_to_le32(dma_high(np->put_rx_ctx->dma));
1887                        np->put_rx.ex->buflow = cpu_to_le32(dma_low(np->put_rx_ctx->dma));
1888                        wmb();
1889                        np->put_rx.ex->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL);
1890                        if (unlikely(np->put_rx.ex++ == np->last_rx.ex))
1891                                np->put_rx.ex = np->rx_ring.ex;
1892                        if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
1893                                np->put_rx_ctx = np->rx_skb;
1894                } else {
1895packet_dropped:
1896                        u64_stats_update_begin(&np->swstats_rx_syncp);
1897                        nv_txrx_stats_inc(stat_rx_dropped);
1898                        u64_stats_update_end(&np->swstats_rx_syncp);
1899                        return 1;
1900                }
1901        }
1902        return 0;
1903}
1904
1905/* If rx bufs are exhausted called after 50ms to attempt to refresh */
1906static void nv_do_rx_refill(struct timer_list *t)
1907{
1908        struct fe_priv *np = from_timer(np, t, oom_kick);
1909
1910        /* Just reschedule NAPI rx processing */
1911        napi_schedule(&np->napi);
1912}
1913
1914static void nv_init_rx(struct net_device *dev)
1915{
1916        struct fe_priv *np = netdev_priv(dev);
1917        int i;
1918
1919        np->get_rx = np->rx_ring;
1920        np->put_rx = np->rx_ring;
1921
1922        if (!nv_optimized(np))
1923                np->last_rx.orig = &np->rx_ring.orig[np->rx_ring_size-1];
1924        else
1925                np->last_rx.ex = &np->rx_ring.ex[np->rx_ring_size-1];
1926        np->get_rx_ctx = np->rx_skb;
1927        np->put_rx_ctx = np->rx_skb;
1928        np->last_rx_ctx = &np->rx_skb[np->rx_ring_size-1];
1929
1930        for (i = 0; i < np->rx_ring_size; i++) {
1931                if (!nv_optimized(np)) {
1932                        np->rx_ring.orig[i].flaglen = 0;
1933                        np->rx_ring.orig[i].buf = 0;
1934                } else {
1935                        np->rx_ring.ex[i].flaglen = 0;
1936                        np->rx_ring.ex[i].txvlan = 0;
1937                        np->rx_ring.ex[i].bufhigh = 0;
1938                        np->rx_ring.ex[i].buflow = 0;
1939                }
1940                np->rx_skb[i].skb = NULL;
1941                np->rx_skb[i].dma = 0;
1942        }
1943}
1944
1945static void nv_init_tx(struct net_device *dev)
1946{
1947        struct fe_priv *np = netdev_priv(dev);
1948        int i;
1949
1950        np->get_tx = np->tx_ring;
1951        np->put_tx = np->tx_ring;
1952
1953        if (!nv_optimized(np))
1954                np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1];
1955        else
1956                np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1];
1957        np->get_tx_ctx = np->tx_skb;
1958        np->put_tx_ctx = np->tx_skb;
1959        np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1];
1960        netdev_reset_queue(np->dev);
1961        np->tx_pkts_in_progress = 0;
1962        np->tx_change_owner = NULL;
1963        np->tx_end_flip = NULL;
1964        np->tx_stop = 0;
1965
1966        for (i = 0; i < np->tx_ring_size; i++) {
1967                if (!nv_optimized(np)) {
1968                        np->tx_ring.orig[i].flaglen = 0;
1969                        np->tx_ring.orig[i].buf = 0;
1970                } else {
1971                        np->tx_ring.ex[i].flaglen = 0;
1972                        np->tx_ring.ex[i].txvlan = 0;
1973                        np->tx_ring.ex[i].bufhigh = 0;
1974                        np->tx_ring.ex[i].buflow = 0;
1975                }
1976                np->tx_skb[i].skb = NULL;
1977                np->tx_skb[i].dma = 0;
1978                np->tx_skb[i].dma_len = 0;
1979                np->tx_skb[i].dma_single = 0;
1980                np->tx_skb[i].first_tx_desc = NULL;
1981                np->tx_skb[i].next_tx_ctx = NULL;
1982        }
1983}
1984
1985static int nv_init_ring(struct net_device *dev)
1986{
1987        struct fe_priv *np = netdev_priv(dev);
1988
1989        nv_init_tx(dev);
1990        nv_init_rx(dev);
1991
1992        if (!nv_optimized(np))
1993                return nv_alloc_rx(dev);
1994        else
1995                return nv_alloc_rx_optimized(dev);
1996}
1997
1998static void nv_unmap_txskb(struct fe_priv *np, struct nv_skb_map *tx_skb)
1999{
2000        if (tx_skb->dma) {
2001                if (tx_skb->dma_single)
2002                        dma_unmap_single(&np->pci_dev->dev, tx_skb->dma,
2003                                         tx_skb->dma_len,
2004                                         DMA_TO_DEVICE);
2005                else
2006                        dma_unmap_page(&np->pci_dev->dev, tx_skb->dma,
2007                                       tx_skb->dma_len,
2008                                       DMA_TO_DEVICE);
2009                tx_skb->dma = 0;
2010        }
2011}
2012
2013static int nv_release_txskb(struct fe_priv *np, struct nv_skb_map *tx_skb)
2014{
2015        nv_unmap_txskb(np, tx_skb);
2016        if (tx_skb->skb) {
2017                dev_kfree_skb_any(tx_skb->skb);
2018                tx_skb->skb = NULL;
2019                return 1;
2020        }
2021        return 0;
2022}
2023
2024static void nv_drain_tx(struct net_device *dev)
2025{
2026        struct fe_priv *np = netdev_priv(dev);
2027        unsigned int i;
2028
2029        for (i = 0; i < np->tx_ring_size; i++) {
2030                if (!nv_optimized(np)) {
2031                        np->tx_ring.orig[i].flaglen = 0;
2032                        np->tx_ring.orig[i].buf = 0;
2033                } else {
2034                        np->tx_ring.ex[i].flaglen = 0;
2035                        np->tx_ring.ex[i].txvlan = 0;
2036                        np->tx_ring.ex[i].bufhigh = 0;
2037                        np->tx_ring.ex[i].buflow = 0;
2038                }
2039                if (nv_release_txskb(np, &np->tx_skb[i])) {
2040                        u64_stats_update_begin(&np->swstats_tx_syncp);
2041                        nv_txrx_stats_inc(stat_tx_dropped);
2042                        u64_stats_update_end(&np->swstats_tx_syncp);
2043                }
2044                np->tx_skb[i].dma = 0;
2045                np->tx_skb[i].dma_len = 0;
2046                np->tx_skb[i].dma_single = 0;
2047                np->tx_skb[i].first_tx_desc = NULL;
2048                np->tx_skb[i].next_tx_ctx = NULL;
2049        }
2050        np->tx_pkts_in_progress = 0;
2051        np->tx_change_owner = NULL;
2052        np->tx_end_flip = NULL;
2053}
2054
2055static void nv_drain_rx(struct net_device *dev)
2056{
2057        struct fe_priv *np = netdev_priv(dev);
2058        int i;
2059
2060        for (i = 0; i < np->rx_ring_size; i++) {
2061                if (!nv_optimized(np)) {
2062                        np->rx_ring.orig[i].flaglen = 0;
2063                        np->rx_ring.orig[i].buf = 0;
2064                } else {
2065                        np->rx_ring.ex[i].flaglen = 0;
2066                        np->rx_ring.ex[i].txvlan = 0;
2067                        np->rx_ring.ex[i].bufhigh = 0;
2068                        np->rx_ring.ex[i].buflow = 0;
2069                }
2070                wmb();
2071                if (np->rx_skb[i].skb) {
2072                        dma_unmap_single(&np->pci_dev->dev, np->rx_skb[i].dma,
2073                                         (skb_end_pointer(np->rx_skb[i].skb) -
2074                                         np->rx_skb[i].skb->data),
2075                                         DMA_FROM_DEVICE);
2076                        dev_kfree_skb(np->rx_skb[i].skb);
2077                        np->rx_skb[i].skb = NULL;
2078                }
2079        }
2080}
2081
2082static void nv_drain_rxtx(struct net_device *dev)
2083{
2084        nv_drain_tx(dev);
2085        nv_drain_rx(dev);
2086}
2087
2088static inline u32 nv_get_empty_tx_slots(struct fe_priv *np)
2089{
2090        return (u32)(np->tx_ring_size - ((np->tx_ring_size + (np->put_tx_ctx - np->get_tx_ctx)) % np->tx_ring_size));
2091}
2092
2093static void nv_legacybackoff_reseed(struct net_device *dev)
2094{
2095        u8 __iomem *base = get_hwbase(dev);
2096        u32 reg;
2097        u32 low;
2098        int tx_status = 0;
2099
2100        reg = readl(base + NvRegSlotTime) & ~NVREG_SLOTTIME_MASK;
2101        get_random_bytes(&low, sizeof(low));
2102        reg |= low & NVREG_SLOTTIME_MASK;
2103
2104        /* Need to stop tx before change takes effect.
2105         * Caller has already gained np->lock.
2106         */
2107        tx_status = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_START;
2108        if (tx_status)
2109                nv_stop_tx(dev);
2110        nv_stop_rx(dev);
2111        writel(reg, base + NvRegSlotTime);
2112        if (tx_status)
2113                nv_start_tx(dev);
2114        nv_start_rx(dev);
2115}
2116
2117/* Gear Backoff Seeds */
2118#define BACKOFF_SEEDSET_ROWS    8
2119#define BACKOFF_SEEDSET_LFSRS   15
2120
2121/* Known Good seed sets */
2122static const u32 main_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = {
2123        {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
2124        {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 385, 761, 790, 974},
2125        {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
2126        {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 386, 761, 790, 974},
2127        {266, 265, 276, 585, 397, 208, 345, 355, 365, 376, 385, 396, 771, 700, 984},
2128        {266, 265, 276, 586, 397, 208, 346, 355, 365, 376, 285, 396, 771, 700, 984},
2129        {366, 365, 376, 686, 497, 308, 447, 455, 466, 476, 485, 496, 871, 800,  84},
2130        {466, 465, 476, 786, 597, 408, 547, 555, 566, 576, 585, 597, 971, 900, 184} };
2131
2132static const u32 gear_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = {
2133        {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375,  30, 295},
2134        {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
2135        {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 397},
2136        {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375,  30, 295},
2137        {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375,  30, 295},
2138        {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
2139        {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
2140        {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395} };
2141
2142static void nv_gear_backoff_reseed(struct net_device *dev)
2143{
2144        u8 __iomem *base = get_hwbase(dev);
2145        u32 miniseed1, miniseed2, miniseed2_reversed, miniseed3, miniseed3_reversed;
2146        u32 temp, seedset, combinedSeed;
2147        int i;
2148
2149        /* Setup seed for free running LFSR */
2150        /* We are going to read the time stamp counter 3 times
2151           and swizzle bits around to increase randomness */
2152        get_random_bytes(&miniseed1, sizeof(miniseed1));
2153        miniseed1 &= 0x0fff;
2154        if (miniseed1 == 0)
2155                miniseed1 = 0xabc;
2156
2157        get_random_bytes(&miniseed2, sizeof(miniseed2));
2158        miniseed2 &= 0x0fff;
2159        if (miniseed2 == 0)
2160                miniseed2 = 0xabc;
2161        miniseed2_reversed =
2162                ((miniseed2 & 0xF00) >> 8) |
2163                 (miniseed2 & 0x0F0) |
2164                 ((miniseed2 & 0x00F) << 8);
2165
2166        get_random_bytes(&miniseed3, sizeof(miniseed3));
2167        miniseed3 &= 0x0fff;
2168        if (miniseed3 == 0)
2169                miniseed3 = 0xabc;
2170        miniseed3_reversed =
2171                ((miniseed3 & 0xF00) >> 8) |
2172                 (miniseed3 & 0x0F0) |
2173                 ((miniseed3 & 0x00F) << 8);
2174
2175        combinedSeed = ((miniseed1 ^ miniseed2_reversed) << 12) |
2176                       (miniseed2 ^ miniseed3_reversed);
2177
2178        /* Seeds can not be zero */
2179        if ((combinedSeed & NVREG_BKOFFCTRL_SEED_MASK) == 0)
2180                combinedSeed |= 0x08;
2181        if ((combinedSeed & (NVREG_BKOFFCTRL_SEED_MASK << NVREG_BKOFFCTRL_GEAR)) == 0)
2182                combinedSeed |= 0x8000;
2183
2184        /* No need to disable tx here */
2185        temp = NVREG_BKOFFCTRL_DEFAULT | (0 << NVREG_BKOFFCTRL_SELECT);
2186        temp |= combinedSeed & NVREG_BKOFFCTRL_SEED_MASK;
2187        temp |= combinedSeed >> NVREG_BKOFFCTRL_GEAR;
2188        writel(temp, base + NvRegBackOffControl);
2189
2190        /* Setup seeds for all gear LFSRs. */
2191        get_random_bytes(&seedset, sizeof(seedset));
2192        seedset = seedset % BACKOFF_SEEDSET_ROWS;
2193        for (i = 1; i <= BACKOFF_SEEDSET_LFSRS; i++) {
2194                temp = NVREG_BKOFFCTRL_DEFAULT | (i << NVREG_BKOFFCTRL_SELECT);
2195                temp |= main_seedset[seedset][i-1] & 0x3ff;
2196                temp |= ((gear_seedset[seedset][i-1] & 0x3ff) << NVREG_BKOFFCTRL_GEAR);
2197                writel(temp, base + NvRegBackOffControl);
2198        }
2199}
2200
2201/*
2202 * nv_start_xmit: dev->hard_start_xmit function
2203 * Called with netif_tx_lock held.
2204 */
2205static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
2206{
2207        struct fe_priv *np = netdev_priv(dev);
2208        u32 tx_flags = 0;
2209        u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
2210        unsigned int fragments = skb_shinfo(skb)->nr_frags;
2211        unsigned int i;
2212        u32 offset = 0;
2213        u32 bcnt;
2214        u32 size = skb_headlen(skb);
2215        u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2216        u32 empty_slots;
2217        struct ring_desc *put_tx;
2218        struct ring_desc *start_tx;
2219        struct ring_desc *prev_tx;
2220        struct nv_skb_map *prev_tx_ctx;
2221        struct nv_skb_map *tmp_tx_ctx = NULL, *start_tx_ctx = NULL;
2222        unsigned long flags;
2223        netdev_tx_t ret = NETDEV_TX_OK;
2224
2225        /* add fragments to entries count */
2226        for (i = 0; i < fragments; i++) {
2227                u32 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
2228
2229                entries += (frag_size >> NV_TX2_TSO_MAX_SHIFT) +
2230                           ((frag_size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2231        }
2232
2233        spin_lock_irqsave(&np->lock, flags);
2234        empty_slots = nv_get_empty_tx_slots(np);
2235        if (unlikely(empty_slots <= entries)) {
2236                netif_stop_queue(dev);
2237                np->tx_stop = 1;
2238                spin_unlock_irqrestore(&np->lock, flags);
2239
2240                /* When normal packets and/or xmit_more packets fill up
2241                 * tx_desc, it is necessary to trigger NIC tx reg.
2242                 */
2243                ret = NETDEV_TX_BUSY;
2244                goto txkick;
2245        }
2246        spin_unlock_irqrestore(&np->lock, flags);
2247
2248        start_tx = put_tx = np->put_tx.orig;
2249
2250        /* setup the header buffer */
2251        do {
2252                bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
2253                np->put_tx_ctx->dma = dma_map_single(&np->pci_dev->dev,
2254                                                     skb->data + offset, bcnt,
2255                                                     DMA_TO_DEVICE);
2256                if (unlikely(dma_mapping_error(&np->pci_dev->dev,
2257                                               np->put_tx_ctx->dma))) {
2258                        /* on DMA mapping error - drop the packet */
2259                        dev_kfree_skb_any(skb);
2260                        u64_stats_update_begin(&np->swstats_tx_syncp);
2261                        nv_txrx_stats_inc(stat_tx_dropped);
2262                        u64_stats_update_end(&np->swstats_tx_syncp);
2263
2264                        ret = NETDEV_TX_OK;
2265
2266                        goto dma_error;
2267                }
2268                np->put_tx_ctx->dma_len = bcnt;
2269                np->put_tx_ctx->dma_single = 1;
2270                put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
2271                put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
2272
2273                tx_flags = np->tx_flags;
2274                offset += bcnt;
2275                size -= bcnt;
2276                if (unlikely(put_tx++ == np->last_tx.orig))
2277                        put_tx = np->tx_ring.orig;
2278                if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
2279                        np->put_tx_ctx = np->tx_skb;
2280        } while (size);
2281
2282        /* setup the fragments */
2283        for (i = 0; i < fragments; i++) {
2284                const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2285                u32 frag_size = skb_frag_size(frag);
2286                offset = 0;
2287
2288                do {
2289                        if (!start_tx_ctx)
2290                                start_tx_ctx = tmp_tx_ctx = np->put_tx_ctx;
2291
2292                        bcnt = (frag_size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : frag_size;
2293                        np->put_tx_ctx->dma = skb_frag_dma_map(
2294                                                        &np->pci_dev->dev,
2295                                                        frag, offset,
2296                                                        bcnt,
2297                                                        DMA_TO_DEVICE);
2298                        if (unlikely(dma_mapping_error(&np->pci_dev->dev,
2299                                                       np->put_tx_ctx->dma))) {
2300
2301                                /* Unwind the mapped fragments */
2302                                do {
2303                                        nv_unmap_txskb(np, start_tx_ctx);
2304                                        if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx))
2305                                                tmp_tx_ctx = np->tx_skb;
2306                                } while (tmp_tx_ctx != np->put_tx_ctx);
2307                                dev_kfree_skb_any(skb);
2308                                np->put_tx_ctx = start_tx_ctx;
2309                                u64_stats_update_begin(&np->swstats_tx_syncp);
2310                                nv_txrx_stats_inc(stat_tx_dropped);
2311                                u64_stats_update_end(&np->swstats_tx_syncp);
2312
2313                                ret = NETDEV_TX_OK;
2314
2315                                goto dma_error;
2316                        }
2317
2318                        np->put_tx_ctx->dma_len = bcnt;
2319                        np->put_tx_ctx->dma_single = 0;
2320                        put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
2321                        put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
2322
2323                        offset += bcnt;
2324                        frag_size -= bcnt;
2325                        if (unlikely(put_tx++ == np->last_tx.orig))
2326                                put_tx = np->tx_ring.orig;
2327                        if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
2328                                np->put_tx_ctx = np->tx_skb;
2329                } while (frag_size);
2330        }
2331
2332        if (unlikely(put_tx == np->tx_ring.orig))
2333                prev_tx = np->last_tx.orig;
2334        else
2335                prev_tx = put_tx - 1;
2336
2337        if (unlikely(np->put_tx_ctx == np->tx_skb))
2338                prev_tx_ctx = np->last_tx_ctx;
2339        else
2340                prev_tx_ctx = np->put_tx_ctx - 1;
2341
2342        /* set last fragment flag  */
2343        prev_tx->flaglen |= cpu_to_le32(tx_flags_extra);
2344
2345        /* save skb in this slot's context area */
2346        prev_tx_ctx->skb = skb;
2347
2348        if (skb_is_gso(skb))
2349                tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
2350        else
2351                tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ?
2352                         NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0;
2353
2354        spin_lock_irqsave(&np->lock, flags);
2355
2356        /* set tx flags */
2357        start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
2358
2359        netdev_sent_queue(np->dev, skb->len);
2360
2361        skb_tx_timestamp(skb);
2362
2363        np->put_tx.orig = put_tx;
2364
2365        spin_unlock_irqrestore(&np->lock, flags);
2366
2367txkick:
2368        if (netif_queue_stopped(dev) || !netdev_xmit_more()) {
2369                u32 txrxctl_kick;
2370dma_error:
2371                txrxctl_kick = NVREG_TXRXCTL_KICK | np->txrxctl_bits;
2372                writel(txrxctl_kick, get_hwbase(dev) + NvRegTxRxControl);
2373        }
2374
2375        return ret;
2376}
2377
2378static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
2379                                           struct net_device *dev)
2380{
2381        struct fe_priv *np = netdev_priv(dev);
2382        u32 tx_flags = 0;
2383        u32 tx_flags_extra;
2384        unsigned int fragments = skb_shinfo(skb)->nr_frags;
2385        unsigned int i;
2386        u32 offset = 0;
2387        u32 bcnt;
2388        u32 size = skb_headlen(skb);
2389        u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2390        u32 empty_slots;
2391        struct ring_desc_ex *put_tx;
2392        struct ring_desc_ex *start_tx;
2393        struct ring_desc_ex *prev_tx;
2394        struct nv_skb_map *prev_tx_ctx;
2395        struct nv_skb_map *start_tx_ctx = NULL;
2396        struct nv_skb_map *tmp_tx_ctx = NULL;
2397        unsigned long flags;
2398        netdev_tx_t ret = NETDEV_TX_OK;
2399
2400        /* add fragments to entries count */
2401        for (i = 0; i < fragments; i++) {
2402                u32 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
2403
2404                entries += (frag_size >> NV_TX2_TSO_MAX_SHIFT) +
2405                           ((frag_size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2406        }
2407
2408        spin_lock_irqsave(&np->lock, flags);
2409        empty_slots = nv_get_empty_tx_slots(np);
2410        if (unlikely(empty_slots <= entries)) {
2411                netif_stop_queue(dev);
2412                np->tx_stop = 1;
2413                spin_unlock_irqrestore(&np->lock, flags);
2414
2415                /* When normal packets and/or xmit_more packets fill up
2416                 * tx_desc, it is necessary to trigger NIC tx reg.
2417                 */
2418                ret = NETDEV_TX_BUSY;
2419
2420                goto txkick;
2421        }
2422        spin_unlock_irqrestore(&np->lock, flags);
2423
2424        start_tx = put_tx = np->put_tx.ex;
2425        start_tx_ctx = np->put_tx_ctx;
2426
2427        /* setup the header buffer */
2428        do {
2429                bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
2430                np->put_tx_ctx->dma = dma_map_single(&np->pci_dev->dev,
2431                                                     skb->data + offset, bcnt,
2432                                                     DMA_TO_DEVICE);
2433                if (unlikely(dma_mapping_error(&np->pci_dev->dev,
2434                                               np->put_tx_ctx->dma))) {
2435                        /* on DMA mapping error - drop the packet */
2436                        dev_kfree_skb_any(skb);
2437                        u64_stats_update_begin(&np->swstats_tx_syncp);
2438                        nv_txrx_stats_inc(stat_tx_dropped);
2439                        u64_stats_update_end(&np->swstats_tx_syncp);
2440
2441                        ret = NETDEV_TX_OK;
2442
2443                        goto dma_error;
2444                }
2445                np->put_tx_ctx->dma_len = bcnt;
2446                np->put_tx_ctx->dma_single = 1;
2447                put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma));
2448                put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma));
2449                put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
2450
2451                tx_flags = NV_TX2_VALID;
2452                offset += bcnt;
2453                size -= bcnt;
2454                if (unlikely(put_tx++ == np->last_tx.ex))
2455                        put_tx = np->tx_ring.ex;
2456                if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
2457                        np->put_tx_ctx = np->tx_skb;
2458        } while (size);
2459
2460        /* setup the fragments */
2461        for (i = 0; i < fragments; i++) {
2462                skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2463                u32 frag_size = skb_frag_size(frag);
2464                offset = 0;
2465
2466                do {
2467                        bcnt = (frag_size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : frag_size;
2468                        if (!start_tx_ctx)
2469                                start_tx_ctx = tmp_tx_ctx = np->put_tx_ctx;
2470                        np->put_tx_ctx->dma = skb_frag_dma_map(
2471                                                        &np->pci_dev->dev,
2472                                                        frag, offset,
2473                                                        bcnt,
2474                                                        DMA_TO_DEVICE);
2475
2476                        if (unlikely(dma_mapping_error(&np->pci_dev->dev,
2477                                                       np->put_tx_ctx->dma))) {
2478
2479                                /* Unwind the mapped fragments */
2480                                do {
2481                                        nv_unmap_txskb(np, start_tx_ctx);
2482                                        if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx))
2483                                                tmp_tx_ctx = np->tx_skb;
2484                                } while (tmp_tx_ctx != np->put_tx_ctx);
2485                                dev_kfree_skb_any(skb);
2486                                np->put_tx_ctx = start_tx_ctx;
2487                                u64_stats_update_begin(&np->swstats_tx_syncp);
2488                                nv_txrx_stats_inc(stat_tx_dropped);
2489                                u64_stats_update_end(&np->swstats_tx_syncp);
2490
2491                                ret = NETDEV_TX_OK;
2492
2493                                goto dma_error;
2494                        }
2495                        np->put_tx_ctx->dma_len = bcnt;
2496                        np->put_tx_ctx->dma_single = 0;
2497                        put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma));
2498                        put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma));
2499                        put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
2500
2501                        offset += bcnt;
2502                        frag_size -= bcnt;
2503                        if (unlikely(put_tx++ == np->last_tx.ex))
2504                                put_tx = np->tx_ring.ex;
2505                        if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
2506                                np->put_tx_ctx = np->tx_skb;
2507                } while (frag_size);
2508        }
2509
2510        if (unlikely(put_tx == np->tx_ring.ex))
2511                prev_tx = np->last_tx.ex;
2512        else
2513                prev_tx = put_tx - 1;
2514
2515        if (unlikely(np->put_tx_ctx == np->tx_skb))
2516                prev_tx_ctx = np->last_tx_ctx;
2517        else
2518                prev_tx_ctx = np->put_tx_ctx - 1;
2519
2520        /* set last fragment flag  */
2521        prev_tx->flaglen |= cpu_to_le32(NV_TX2_LASTPACKET);
2522
2523        /* save skb in this slot's context area */
2524        prev_tx_ctx->skb = skb;
2525
2526        if (skb_is_gso(skb))
2527                tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
2528        else
2529                tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ?
2530                         NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0;
2531
2532        /* vlan tag */
2533        if (skb_vlan_tag_present(skb))
2534                start_tx->txvlan = cpu_to_le32(NV_TX3_VLAN_TAG_PRESENT |
2535                                        skb_vlan_tag_get(skb));
2536        else
2537                start_tx->txvlan = 0;
2538
2539        spin_lock_irqsave(&np->lock, flags);
2540
2541        if (np->tx_limit) {
2542                /* Limit the number of outstanding tx. Setup all fragments, but
2543                 * do not set the VALID bit on the first descriptor. Save a pointer
2544                 * to that descriptor and also for next skb_map element.
2545                 */
2546
2547                if (np->tx_pkts_in_progress == NV_TX_LIMIT_COUNT) {
2548                        if (!np->tx_change_owner)
2549                                np->tx_change_owner = start_tx_ctx;
2550
2551                        /* remove VALID bit */
2552                        tx_flags &= ~NV_TX2_VALID;
2553                        start_tx_ctx->first_tx_desc = start_tx;
2554                        start_tx_ctx->next_tx_ctx = np->put_tx_ctx;
2555                        np->tx_end_flip = np->put_tx_ctx;
2556                } else {
2557                        np->tx_pkts_in_progress++;
2558                }
2559        }
2560
2561        /* set tx flags */
2562        start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
2563
2564        netdev_sent_queue(np->dev, skb->len);
2565
2566        skb_tx_timestamp(skb);
2567
2568        np->put_tx.ex = put_tx;
2569
2570        spin_unlock_irqrestore(&np->lock, flags);
2571
2572txkick:
2573        if (netif_queue_stopped(dev) || !netdev_xmit_more()) {
2574                u32 txrxctl_kick;
2575dma_error:
2576                txrxctl_kick = NVREG_TXRXCTL_KICK | np->txrxctl_bits;
2577                writel(txrxctl_kick, get_hwbase(dev) + NvRegTxRxControl);
2578        }
2579
2580        return ret;
2581}
2582
2583static inline void nv_tx_flip_ownership(struct net_device *dev)
2584{
2585        struct fe_priv *np = netdev_priv(dev);
2586
2587        np->tx_pkts_in_progress--;
2588        if (np->tx_change_owner) {
2589                np->tx_change_owner->first_tx_desc->flaglen |=
2590                        cpu_to_le32(NV_TX2_VALID);
2591                np->tx_pkts_in_progress++;
2592
2593                np->tx_change_owner = np->tx_change_owner->next_tx_ctx;
2594                if (np->tx_change_owner == np->tx_end_flip)
2595                        np->tx_change_owner = NULL;
2596
2597                writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
2598        }
2599}
2600
2601/*
2602 * nv_tx_done: check for completed packets, release the skbs.
2603 *
2604 * Caller must own np->lock.
2605 */
2606static int nv_tx_done(struct net_device *dev, int limit)
2607{
2608        struct fe_priv *np = netdev_priv(dev);
2609        u32 flags;
2610        int tx_work = 0;
2611        struct ring_desc *orig_get_tx = np->get_tx.orig;
2612        unsigned int bytes_compl = 0;
2613
2614        while ((np->get_tx.orig != np->put_tx.orig) &&
2615               !((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID) &&
2616               (tx_work < limit)) {
2617
2618                nv_unmap_txskb(np, np->get_tx_ctx);
2619
2620                if (np->desc_ver == DESC_VER_1) {
2621                        if (flags & NV_TX_LASTPACKET) {
2622                                if (unlikely(flags & NV_TX_ERROR)) {
2623                                        if ((flags & NV_TX_RETRYERROR)
2624                                            && !(flags & NV_TX_RETRYCOUNT_MASK))
2625                                                nv_legacybackoff_reseed(dev);
2626                                } else {
2627                                        unsigned int len;
2628
2629                                        u64_stats_update_begin(&np->swstats_tx_syncp);
2630                                        nv_txrx_stats_inc(stat_tx_packets);
2631                                        len = np->get_tx_ctx->skb->len;
2632                                        nv_txrx_stats_add(stat_tx_bytes, len);
2633                                        u64_stats_update_end(&np->swstats_tx_syncp);
2634                                }
2635                                bytes_compl += np->get_tx_ctx->skb->len;
2636                                dev_kfree_skb_any(np->get_tx_ctx->skb);
2637                                np->get_tx_ctx->skb = NULL;
2638                                tx_work++;
2639                        }
2640                } else {
2641                        if (flags & NV_TX2_LASTPACKET) {
2642                                if (unlikely(flags & NV_TX2_ERROR)) {
2643                                        if ((flags & NV_TX2_RETRYERROR)
2644                                            && !(flags & NV_TX2_RETRYCOUNT_MASK))
2645                                                nv_legacybackoff_reseed(dev);
2646                                } else {
2647                                        unsigned int len;
2648
2649                                        u64_stats_update_begin(&np->swstats_tx_syncp);
2650                                        nv_txrx_stats_inc(stat_tx_packets);
2651                                        len = np->get_tx_ctx->skb->len;
2652                                        nv_txrx_stats_add(stat_tx_bytes, len);
2653                                        u64_stats_update_end(&np->swstats_tx_syncp);
2654                                }
2655                                bytes_compl += np->get_tx_ctx->skb->len;
2656                                dev_kfree_skb_any(np->get_tx_ctx->skb);
2657                                np->get_tx_ctx->skb = NULL;
2658                                tx_work++;
2659                        }
2660                }
2661                if (unlikely(np->get_tx.orig++ == np->last_tx.orig))
2662                        np->get_tx.orig = np->tx_ring.orig;
2663                if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
2664                        np->get_tx_ctx = np->tx_skb;
2665        }
2666
2667        netdev_completed_queue(np->dev, tx_work, bytes_compl);
2668
2669        if (unlikely((np->tx_stop == 1) && (np->get_tx.orig != orig_get_tx))) {
2670                np->tx_stop = 0;
2671                netif_wake_queue(dev);
2672        }
2673        return tx_work;
2674}
2675
2676static int nv_tx_done_optimized(struct net_device *dev, int limit)
2677{
2678        struct fe_priv *np = netdev_priv(dev);
2679        u32 flags;
2680        int tx_work = 0;
2681        struct ring_desc_ex *orig_get_tx = np->get_tx.ex;
2682        unsigned long bytes_cleaned = 0;
2683
2684        while ((np->get_tx.ex != np->put_tx.ex) &&
2685               !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX2_VALID) &&
2686               (tx_work < limit)) {
2687
2688                nv_unmap_txskb(np, np->get_tx_ctx);
2689
2690                if (flags & NV_TX2_LASTPACKET) {
2691                        if (unlikely(flags & NV_TX2_ERROR)) {
2692                                if ((flags & NV_TX2_RETRYERROR)
2693                                    && !(flags & NV_TX2_RETRYCOUNT_MASK)) {
2694                                        if (np->driver_data & DEV_HAS_GEAR_MODE)
2695                                                nv_gear_backoff_reseed(dev);
2696                                        else
2697                                                nv_legacybackoff_reseed(dev);
2698                                }
2699                        } else {
2700                                unsigned int len;
2701
2702                                u64_stats_update_begin(&np->swstats_tx_syncp);
2703                                nv_txrx_stats_inc(stat_tx_packets);
2704                                len = np->get_tx_ctx->skb->len;
2705                                nv_txrx_stats_add(stat_tx_bytes, len);
2706                                u64_stats_update_end(&np->swstats_tx_syncp);
2707                        }
2708
2709                        bytes_cleaned += np->get_tx_ctx->skb->len;
2710                        dev_kfree_skb_any(np->get_tx_ctx->skb);
2711                        np->get_tx_ctx->skb = NULL;
2712                        tx_work++;
2713
2714                        if (np->tx_limit)
2715                                nv_tx_flip_ownership(dev);
2716                }
2717
2718                if (unlikely(np->get_tx.ex++ == np->last_tx.ex))
2719                        np->get_tx.ex = np->tx_ring.ex;
2720                if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
2721                        np->get_tx_ctx = np->tx_skb;
2722        }
2723
2724        netdev_completed_queue(np->dev, tx_work, bytes_cleaned);
2725
2726        if (unlikely((np->tx_stop == 1) && (np->get_tx.ex != orig_get_tx))) {
2727                np->tx_stop = 0;
2728                netif_wake_queue(dev);
2729        }
2730        return tx_work;
2731}
2732
2733/*
2734 * nv_tx_timeout: dev->tx_timeout function
2735 * Called with netif_tx_lock held.
2736 */
2737static void nv_tx_timeout(struct net_device *dev, unsigned int txqueue)
2738{
2739        struct fe_priv *np = netdev_priv(dev);
2740        u8 __iomem *base = get_hwbase(dev);
2741        u32 status;
2742        union ring_type put_tx;
2743        int saved_tx_limit;
2744
2745        if (np->msi_flags & NV_MSI_X_ENABLED)
2746                status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
2747        else
2748                status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
2749
2750        netdev_warn(dev, "Got tx_timeout. irq status: %08x\n", status);
2751
2752        if (unlikely(debug_tx_timeout)) {
2753                int i;
2754
2755                netdev_info(dev, "Ring at %lx\n", (unsigned long)np->ring_addr);
2756                netdev_info(dev, "Dumping tx registers\n");
2757                for (i = 0; i <= np->register_size; i += 32) {
2758                        netdev_info(dev,
2759                                    "%3x: %08x %08x %08x %08x "
2760                                    "%08x %08x %08x %08x\n",
2761                                    i,
2762                                    readl(base + i + 0), readl(base + i + 4),
2763                                    readl(base + i + 8), readl(base + i + 12),
2764                                    readl(base + i + 16), readl(base + i + 20),
2765                                    readl(base + i + 24), readl(base + i + 28));
2766                }
2767                netdev_info(dev, "Dumping tx ring\n");
2768                for (i = 0; i < np->tx_ring_size; i += 4) {
2769                        if (!nv_optimized(np)) {
2770                                netdev_info(dev,
2771                                            "%03x: %08x %08x // %08x %08x "
2772                                            "// %08x %08x // %08x %08x\n",
2773                                            i,
2774                                            le32_to_cpu(np->tx_ring.orig[i].buf),
2775                                            le32_to_cpu(np->tx_ring.orig[i].flaglen),
2776                                            le32_to_cpu(np->tx_ring.orig[i+1].buf),
2777                                            le32_to_cpu(np->tx_ring.orig[i+1].flaglen),
2778                                            le32_to_cpu(np->tx_ring.orig[i+2].buf),
2779                                            le32_to_cpu(np->tx_ring.orig[i+2].flaglen),
2780                                            le32_to_cpu(np->tx_ring.orig[i+3].buf),
2781                                            le32_to_cpu(np->tx_ring.orig[i+3].flaglen));
2782                        } else {
2783                                netdev_info(dev,
2784                                            "%03x: %08x %08x %08x "
2785                                            "// %08x %08x %08x "
2786                                            "// %08x %08x %08x "
2787                                            "// %08x %08x %08x\n",
2788                                            i,
2789                                            le32_to_cpu(np->tx_ring.ex[i].bufhigh),
2790                                            le32_to_cpu(np->tx_ring.ex[i].buflow),
2791                                            le32_to_cpu(np->tx_ring.ex[i].flaglen),
2792                                            le32_to_cpu(np->tx_ring.ex[i+1].bufhigh),
2793                                            le32_to_cpu(np->tx_ring.ex[i+1].buflow),
2794                                            le32_to_cpu(np->tx_ring.ex[i+1].flaglen),
2795                                            le32_to_cpu(np->tx_ring.ex[i+2].bufhigh),
2796                                            le32_to_cpu(np->tx_ring.ex[i+2].buflow),
2797                                            le32_to_cpu(np->tx_ring.ex[i+2].flaglen),
2798                                            le32_to_cpu(np->tx_ring.ex[i+3].bufhigh),
2799                                            le32_to_cpu(np->tx_ring.ex[i+3].buflow),
2800                                            le32_to_cpu(np->tx_ring.ex[i+3].flaglen));
2801                        }
2802                }
2803        }
2804
2805        spin_lock_irq(&np->lock);
2806
2807        /* 1) stop tx engine */
2808        nv_stop_tx(dev);
2809
2810        /* 2) complete any outstanding tx and do not give HW any limited tx pkts */
2811        saved_tx_limit = np->tx_limit;
2812        np->tx_limit = 0; /* prevent giving HW any limited pkts */
2813        np->tx_stop = 0;  /* prevent waking tx queue */
2814        if (!nv_optimized(np))
2815                nv_tx_done(dev, np->tx_ring_size);
2816        else
2817                nv_tx_done_optimized(dev, np->tx_ring_size);
2818
2819        /* save current HW position */
2820        if (np->tx_change_owner)
2821                put_tx.ex = np->tx_change_owner->first_tx_desc;
2822        else
2823                put_tx = np->put_tx;
2824
2825        /* 3) clear all tx state */
2826        nv_drain_tx(dev);
2827        nv_init_tx(dev);
2828
2829        /* 4) restore state to current HW position */
2830        np->get_tx = np->put_tx = put_tx;
2831        np->tx_limit = saved_tx_limit;
2832
2833        /* 5) restart tx engine */
2834        nv_start_tx(dev);
2835        netif_wake_queue(dev);
2836        spin_unlock_irq(&np->lock);
2837}
2838
2839/*
2840 * Called when the nic notices a mismatch between the actual data len on the
2841 * wire and the len indicated in the 802 header
2842 */
2843static int nv_getlen(struct net_device *dev, void *packet, int datalen)
2844{
2845        int hdrlen;     /* length of the 802 header */
2846        int protolen;   /* length as stored in the proto field */
2847
2848        /* 1) calculate len according to header */
2849        if (((struct vlan_ethhdr *)packet)->h_vlan_proto == htons(ETH_P_8021Q)) {
2850                protolen = ntohs(((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto);
2851                hdrlen = VLAN_HLEN;
2852        } else {
2853                protolen = ntohs(((struct ethhdr *)packet)->h_proto);
2854                hdrlen = ETH_HLEN;
2855        }
2856        if (protolen > ETH_DATA_LEN)
2857                return datalen; /* Value in proto field not a len, no checks possible */
2858
2859        protolen += hdrlen;
2860        /* consistency checks: */
2861        if (datalen > ETH_ZLEN) {
2862                if (datalen >= protolen) {
2863                        /* more data on wire than in 802 header, trim of
2864                         * additional data.
2865                         */
2866                        return protolen;
2867                } else {
2868                        /* less data on wire than mentioned in header.
2869                         * Discard the packet.
2870                         */
2871                        return -1;
2872                }
2873        } else {
2874                /* short packet. Accept only if 802 values are also short */
2875                if (protolen > ETH_ZLEN) {
2876                        return -1;
2877                }
2878                return datalen;
2879        }
2880}
2881
2882static void rx_missing_handler(u32 flags, struct fe_priv *np)
2883{
2884        if (flags & NV_RX_MISSEDFRAME) {
2885                u64_stats_update_begin(&np->swstats_rx_syncp);
2886                nv_txrx_stats_inc(stat_rx_missed_errors);
2887                u64_stats_update_end(&np->swstats_rx_syncp);
2888        }
2889}
2890
2891static int nv_rx_process(struct net_device *dev, int limit)
2892{
2893        struct fe_priv *np = netdev_priv(dev);
2894        u32 flags;
2895        int rx_work = 0;
2896        struct sk_buff *skb;
2897        int len;
2898
2899        while ((np->get_rx.orig != np->put_rx.orig) &&
2900              !((flags = le32_to_cpu(np->get_rx.orig->flaglen)) & NV_RX_AVAIL) &&
2901                (rx_work < limit)) {
2902
2903                /*
2904                 * the packet is for us - immediately tear down the pci mapping.
2905                 * TODO: check if a prefetch of the first cacheline improves
2906                 * the performance.
2907                 */
2908                dma_unmap_single(&np->pci_dev->dev, np->get_rx_ctx->dma,
2909                                 np->get_rx_ctx->dma_len,
2910                                 DMA_FROM_DEVICE);
2911                skb = np->get_rx_ctx->skb;
2912                np->get_rx_ctx->skb = NULL;
2913
2914                /* look at what we actually got: */
2915                if (np->desc_ver == DESC_VER_1) {
2916                        if (likely(flags & NV_RX_DESCRIPTORVALID)) {
2917                                len = flags & LEN_MASK_V1;
2918                                if (unlikely(flags & NV_RX_ERROR)) {
2919                                        if ((flags & NV_RX_ERROR_MASK) == NV_RX_ERROR4) {
2920                                                len = nv_getlen(dev, skb->data, len);
2921                                                if (len < 0) {
2922                                                        dev_kfree_skb(skb);
2923                                                        goto next_pkt;
2924                                                }
2925                                        }
2926                                        /* framing errors are soft errors */
2927                                        else if ((flags & NV_RX_ERROR_MASK) == NV_RX_FRAMINGERR) {
2928                                                if (flags & NV_RX_SUBTRACT1)
2929                                                        len--;
2930                                        }
2931                                        /* the rest are hard errors */
2932                                        else {
2933                                                rx_missing_handler(flags, np);
2934                                                dev_kfree_skb(skb);
2935                                                goto next_pkt;
2936                                        }
2937                                }
2938                        } else {
2939                                dev_kfree_skb(skb);
2940                                goto next_pkt;
2941                        }
2942                } else {
2943                        if (likely(flags & NV_RX2_DESCRIPTORVALID)) {
2944                                len = flags & LEN_MASK_V2;
2945                                if (unlikely(flags & NV_RX2_ERROR)) {
2946                                        if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) {
2947                                                len = nv_getlen(dev, skb->data, len);
2948                                                if (len < 0) {
2949                                                        dev_kfree_skb(skb);
2950                                                        goto next_pkt;
2951                                                }
2952                                        }
2953                                        /* framing errors are soft errors */
2954                                        else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) {
2955                                                if (flags & NV_RX2_SUBTRACT1)
2956                                                        len--;
2957                                        }
2958                                        /* the rest are hard errors */
2959                                        else {
2960                                                dev_kfree_skb(skb);
2961                                                goto next_pkt;
2962                                        }
2963                                }
2964                                if (((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || /*ip and tcp */
2965                                    ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP))   /*ip and udp */
2966                                        skb->ip_summed = CHECKSUM_UNNECESSARY;
2967                        } else {
2968                                dev_kfree_skb(skb);
2969                                goto next_pkt;
2970                        }
2971                }
2972                /* got a valid packet - forward it to the network core */
2973                skb_put(skb, len);
2974                skb->protocol = eth_type_trans(skb, dev);
2975                napi_gro_receive(&np->napi, skb);
2976                u64_stats_update_begin(&np->swstats_rx_syncp);
2977                nv_txrx_stats_inc(stat_rx_packets);
2978                nv_txrx_stats_add(stat_rx_bytes, len);
2979                u64_stats_update_end(&np->swstats_rx_syncp);
2980next_pkt:
2981                if (unlikely(np->get_rx.orig++ == np->last_rx.orig))
2982                        np->get_rx.orig = np->rx_ring.orig;
2983                if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
2984                        np->get_rx_ctx = np->rx_skb;
2985
2986                rx_work++;
2987        }
2988
2989        return rx_work;
2990}
2991
2992static int nv_rx_process_optimized(struct net_device *dev, int limit)
2993{
2994        struct fe_priv *np = netdev_priv(dev);
2995        u32 flags;
2996        u32 vlanflags = 0;
2997        int rx_work = 0;
2998        struct sk_buff *skb;
2999        int len;
3000
3001        while ((np->get_rx.ex != np->put_rx.ex) &&
3002              !((flags = le32_to_cpu(np->get_rx.ex->flaglen)) & NV_RX2_AVAIL) &&
3003              (rx_work < limit)) {
3004
3005                /*
3006                 * the packet is for us - immediately tear down the pci mapping.
3007                 * TODO: check if a prefetch of the first cacheline improves
3008                 * the performance.
3009                 */
3010                dma_unmap_single(&np->pci_dev->dev, np->get_rx_ctx->dma,
3011                                 np->get_rx_ctx->dma_len,
3012                                 DMA_FROM_DEVICE);
3013                skb = np->get_rx_ctx->skb;
3014                np->get_rx_ctx->skb = NULL;
3015
3016                /* look at what we actually got: */
3017                if (likely(flags & NV_RX2_DESCRIPTORVALID)) {
3018                        len = flags & LEN_MASK_V2;
3019                        if (unlikely(flags & NV_RX2_ERROR)) {
3020                                if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) {
3021                                        len = nv_getlen(dev, skb->data, len);
3022                                        if (len < 0) {
3023                                                dev_kfree_skb(skb);
3024                                                goto next_pkt;
3025                                        }
3026                                }
3027                                /* framing errors are soft errors */
3028                                else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) {
3029                                        if (flags & NV_RX2_SUBTRACT1)
3030                                                len--;
3031                                }
3032                                /* the rest are hard errors */
3033                                else {
3034                                        dev_kfree_skb(skb);
3035                                        goto next_pkt;
3036                                }
3037                        }
3038
3039                        if (((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || /*ip and tcp */
3040                            ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP))   /*ip and udp */
3041                                skb->ip_summed = CHECKSUM_UNNECESSARY;
3042
3043                        /* got a valid packet - forward it to the network core */
3044                        skb_put(skb, len);
3045                        skb->protocol = eth_type_trans(skb, dev);
3046                        prefetch(skb->data);
3047
3048                        vlanflags = le32_to_cpu(np->get_rx.ex->buflow);
3049
3050                        /*
3051                         * There's need to check for NETIF_F_HW_VLAN_CTAG_RX
3052                         * here. Even if vlan rx accel is disabled,
3053                         * NV_RX3_VLAN_TAG_PRESENT is pseudo randomly set.
3054                         */
3055                        if (dev->features & NETIF_F_HW_VLAN_CTAG_RX &&
3056                            vlanflags & NV_RX3_VLAN_TAG_PRESENT) {
3057                                u16 vid = vlanflags & NV_RX3_VLAN_TAG_MASK;
3058
3059                                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
3060                        }
3061                        napi_gro_receive(&np->napi, skb);
3062                        u64_stats_update_begin(&np->swstats_rx_syncp);
3063                        nv_txrx_stats_inc(stat_rx_packets);
3064                        nv_txrx_stats_add(stat_rx_bytes, len);
3065                        u64_stats_update_end(&np->swstats_rx_syncp);
3066                } else {
3067                        dev_kfree_skb(skb);
3068                }
3069next_pkt:
3070                if (unlikely(np->get_rx.ex++ == np->last_rx.ex))
3071                        np->get_rx.ex = np->rx_ring.ex;
3072                if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
3073                        np->get_rx_ctx = np->rx_skb;
3074
3075                rx_work++;
3076        }
3077
3078        return rx_work;
3079}
3080
3081static void set_bufsize(struct net_device *dev)
3082{
3083        struct fe_priv *np = netdev_priv(dev);
3084
3085        if (dev->mtu <= ETH_DATA_LEN)
3086                np->rx_buf_sz = ETH_DATA_LEN + NV_RX_HEADERS;
3087        else
3088                np->rx_buf_sz = dev->mtu + NV_RX_HEADERS;
3089}
3090
3091/*
3092 * nv_change_mtu: dev->change_mtu function
3093 * Called with dev_base_lock held for read.
3094 */
3095static int nv_change_mtu(struct net_device *dev, int new_mtu)
3096{
3097        struct fe_priv *np = netdev_priv(dev);
3098        int old_mtu;
3099
3100        old_mtu = dev->mtu;
3101        dev->mtu = new_mtu;
3102
3103        /* return early if the buffer sizes will not change */
3104        if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN)
3105                return 0;
3106
3107        /* synchronized against open : rtnl_lock() held by caller */
3108        if (netif_running(dev)) {
3109                u8 __iomem *base = get_hwbase(dev);
3110                /*
3111                 * It seems that the nic preloads valid ring entries into an
3112                 * internal buffer. The procedure for flushing everything is
3113                 * guessed, there is probably a simpler approach.
3114                 * Changing the MTU is a rare event, it shouldn't matter.
3115                 */
3116                nv_disable_irq(dev);
3117                nv_napi_disable(dev);
3118                netif_tx_lock_bh(dev);
3119                netif_addr_lock(dev);
3120                spin_lock(&np->lock);
3121                /* stop engines */
3122                nv_stop_rxtx(dev);
3123                nv_txrx_reset(dev);
3124                /* drain rx queue */
3125                nv_drain_rxtx(dev);
3126                /* reinit driver view of the rx queue */
3127                set_bufsize(dev);
3128                if (nv_init_ring(dev)) {
3129                        if (!np->in_shutdown)
3130                                mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3131                }
3132                /* reinit nic view of the rx queue */
3133                writel(np->rx_buf_sz, base + NvRegOffloadConfig);
3134                setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
3135                writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
3136                        base + NvRegRingSizes);
3137                pci_push(base);
3138                writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
3139                pci_push(base);
3140
3141                /* restart rx engine */
3142                nv_start_rxtx(dev);
3143                spin_unlock(&np->lock);
3144                netif_addr_unlock(dev);
3145                netif_tx_unlock_bh(dev);
3146                nv_napi_enable(dev);
3147                nv_enable_irq(dev);
3148        }
3149        return 0;
3150}
3151
3152static void nv_copy_mac_to_hw(struct net_device *dev)
3153{
3154        u8 __iomem *base = get_hwbase(dev);
3155        u32 mac[2];
3156
3157        mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) +
3158                        (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24);
3159        mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8);
3160
3161        writel(mac[0], base + NvRegMacAddrA);
3162        writel(mac[1], base + NvRegMacAddrB);
3163}
3164
3165/*
3166 * nv_set_mac_address: dev->set_mac_address function
3167 * Called with rtnl_lock() held.
3168 */
3169static int nv_set_mac_address(struct net_device *dev, void *addr)
3170{
3171        struct fe_priv *np = netdev_priv(dev);
3172        struct sockaddr *macaddr = (struct sockaddr *)addr;
3173
3174        if (!is_valid_ether_addr(macaddr->sa_data))
3175                return -EADDRNOTAVAIL;
3176
3177        /* synchronized against open : rtnl_lock() held by caller */
3178        memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN);
3179
3180        if (netif_running(dev)) {
3181                netif_tx_lock_bh(dev);
3182                netif_addr_lock(dev);
3183                spin_lock_irq(&np->lock);
3184
3185                /* stop rx engine */
3186                nv_stop_rx(dev);
3187
3188                /* set mac address */
3189                nv_copy_mac_to_hw(dev);
3190
3191                /* restart rx engine */
3192                nv_start_rx(dev);
3193                spin_unlock_irq(&np->lock);
3194                netif_addr_unlock(dev);
3195                netif_tx_unlock_bh(dev);
3196        } else {
3197                nv_copy_mac_to_hw(dev);
3198        }
3199        return 0;
3200}
3201
3202/*
3203 * nv_set_multicast: dev->set_multicast function
3204 * Called with netif_tx_lock held.
3205 */
3206static void nv_set_multicast(struct net_device *dev)
3207{
3208        struct fe_priv *np = netdev_priv(dev);
3209        u8 __iomem *base = get_hwbase(dev);
3210        u32 addr[2];
3211        u32 mask[2];
3212        u32 pff = readl(base + NvRegPacketFilterFlags) & NVREG_PFF_PAUSE_RX;
3213
3214        memset(addr, 0, sizeof(addr));
3215        memset(mask, 0, sizeof(mask));
3216
3217        if (dev->flags & IFF_PROMISC) {
3218                pff |= NVREG_PFF_PROMISC;
3219        } else {
3220                pff |= NVREG_PFF_MYADDR;
3221
3222                if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev)) {
3223                        u32 alwaysOff[2];
3224                        u32 alwaysOn[2];
3225
3226                        alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0xffffffff;
3227                        if (dev->flags & IFF_ALLMULTI) {
3228                                alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0;
3229                        } else {
3230                                struct netdev_hw_addr *ha;
3231
3232                                netdev_for_each_mc_addr(ha, dev) {
3233                                        unsigned char *hw_addr = ha->addr;
3234                                        u32 a, b;
3235
3236                                        a = le32_to_cpu(*(__le32 *) hw_addr);
3237                                        b = le16_to_cpu(*(__le16 *) (&hw_addr[4]));
3238                                        alwaysOn[0] &= a;
3239                                        alwaysOff[0] &= ~a;
3240                                        alwaysOn[1] &= b;
3241                                        alwaysOff[1] &= ~b;
3242                                }
3243                        }
3244                        addr[0] = alwaysOn[0];
3245                        addr[1] = alwaysOn[1];
3246                        mask[0] = alwaysOn[0] | alwaysOff[0];
3247                        mask[1] = alwaysOn[1] | alwaysOff[1];
3248                } else {
3249                        mask[0] = NVREG_MCASTMASKA_NONE;
3250                        mask[1] = NVREG_MCASTMASKB_NONE;
3251                }
3252        }
3253        addr[0] |= NVREG_MCASTADDRA_FORCE;
3254        pff |= NVREG_PFF_ALWAYS;
3255        spin_lock_irq(&np->lock);
3256        nv_stop_rx(dev);
3257        writel(addr[0], base + NvRegMulticastAddrA);
3258        writel(addr[1], base + NvRegMulticastAddrB);
3259        writel(mask[0], base + NvRegMulticastMaskA);
3260        writel(mask[1], base + NvRegMulticastMaskB);
3261        writel(pff, base + NvRegPacketFilterFlags);
3262        nv_start_rx(dev);
3263        spin_unlock_irq(&np->lock);
3264}
3265
3266static void nv_update_pause(struct net_device *dev, u32 pause_flags)
3267{
3268        struct fe_priv *np = netdev_priv(dev);
3269        u8 __iomem *base = get_hwbase(dev);
3270
3271        np->pause_flags &= ~(NV_PAUSEFRAME_TX_ENABLE | NV_PAUSEFRAME_RX_ENABLE);
3272
3273        if (np->pause_flags & NV_PAUSEFRAME_RX_CAPABLE) {
3274                u32 pff = readl(base + NvRegPacketFilterFlags) & ~NVREG_PFF_PAUSE_RX;
3275                if (pause_flags & NV_PAUSEFRAME_RX_ENABLE) {
3276                        writel(pff|NVREG_PFF_PAUSE_RX, base + NvRegPacketFilterFlags);
3277                        np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
3278                } else {
3279                        writel(pff, base + NvRegPacketFilterFlags);
3280                }
3281        }
3282        if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) {
3283                u32 regmisc = readl(base + NvRegMisc1) & ~NVREG_MISC1_PAUSE_TX;
3284                if (pause_flags & NV_PAUSEFRAME_TX_ENABLE) {
3285                        u32 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V1;
3286                        if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V2)
3287                                pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V2;
3288                        if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V3) {
3289                                pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V3;
3290                                /* limit the number of tx pause frames to a default of 8 */
3291                                writel(readl(base + NvRegTxPauseFrameLimit)|NVREG_TX_PAUSEFRAMELIMIT_ENABLE, base + NvRegTxPauseFrameLimit);
3292                        }
3293                        writel(pause_enable,  base + NvRegTxPauseFrame);
3294                        writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1);
3295                        np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3296                } else {
3297                        writel(NVREG_TX_PAUSEFRAME_DISABLE,  base + NvRegTxPauseFrame);
3298                        writel(regmisc, base + NvRegMisc1);
3299                }
3300        }
3301}
3302
3303static void nv_force_linkspeed(struct net_device *dev, int speed, int duplex)
3304{
3305        struct fe_priv *np = netdev_priv(dev);
3306        u8 __iomem *base = get_hwbase(dev);
3307        u32 phyreg, txreg;
3308        int mii_status;
3309
3310        np->linkspeed = NVREG_LINKSPEED_FORCE|speed;
3311        np->duplex = duplex;
3312
3313        /* see if gigabit phy */
3314        mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
3315        if (mii_status & PHY_GIGABIT) {
3316                np->gigabit = PHY_GIGABIT;
3317                phyreg = readl(base + NvRegSlotTime);
3318                phyreg &= ~(0x3FF00);
3319                if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10)
3320                        phyreg |= NVREG_SLOTTIME_10_100_FULL;
3321                else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100)
3322                        phyreg |= NVREG_SLOTTIME_10_100_FULL;
3323                else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000)
3324                        phyreg |= NVREG_SLOTTIME_1000_FULL;
3325                writel(phyreg, base + NvRegSlotTime);
3326        }
3327
3328        phyreg = readl(base + NvRegPhyInterface);
3329        phyreg &= ~(PHY_HALF|PHY_100|PHY_1000);
3330        if (np->duplex == 0)
3331                phyreg |= PHY_HALF;
3332        if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100)
3333                phyreg |= PHY_100;
3334        else if ((np->linkspeed & NVREG_LINKSPEED_MASK) ==
3335                                                        NVREG_LINKSPEED_1000)
3336                phyreg |= PHY_1000;
3337        writel(phyreg, base + NvRegPhyInterface);
3338
3339        if (phyreg & PHY_RGMII) {
3340                if ((np->linkspeed & NVREG_LINKSPEED_MASK) ==
3341                                                        NVREG_LINKSPEED_1000)
3342                        txreg = NVREG_TX_DEFERRAL_RGMII_1000;
3343                else
3344                        txreg = NVREG_TX_DEFERRAL_RGMII_10_100;
3345        } else {
3346                txreg = NVREG_TX_DEFERRAL_DEFAULT;
3347        }
3348        writel(txreg, base + NvRegTxDeferral);
3349
3350        if (np->desc_ver == DESC_VER_1) {
3351                txreg = NVREG_TX_WM_DESC1_DEFAULT;
3352        } else {
3353                if ((np->linkspeed & NVREG_LINKSPEED_MASK) ==
3354                                         NVREG_LINKSPEED_1000)
3355                        txreg = NVREG_TX_WM_DESC2_3_1000;
3356                else
3357                        txreg = NVREG_TX_WM_DESC2_3_DEFAULT;
3358        }
3359        writel(txreg, base + NvRegTxWatermark);
3360
3361        writel(NVREG_MISC1_FORCE | (np->duplex ? 0 : NVREG_MISC1_HD),
3362                        base + NvRegMisc1);
3363        pci_push(base);
3364        writel(np->linkspeed, base + NvRegLinkSpeed);
3365        pci_push(base);
3366}
3367
3368/**
3369 * nv_update_linkspeed - Setup the MAC according to the link partner
3370 * @dev: Network device to be configured
3371 *
3372 * The function queries the PHY and checks if there is a link partner.
3373 * If yes, then it sets up the MAC accordingly. Otherwise, the MAC is
3374 * set to 10 MBit HD.
3375 *
3376 * The function returns 0 if there is no link partner and 1 if there is
3377 * a good link partner.
3378 */
3379static int nv_update_linkspeed(struct net_device *dev)
3380{
3381        struct fe_priv *np = netdev_priv(dev);
3382        u8 __iomem *base = get_hwbase(dev);
3383        int adv = 0;
3384        int lpa = 0;
3385        int adv_lpa, adv_pause, lpa_pause;
3386        int newls = np->linkspeed;
3387        int newdup = np->duplex;
3388        int mii_status;
3389        u32 bmcr;
3390        int retval = 0;
3391        u32 control_1000, status_1000, phyreg, pause_flags, txreg;
3392        u32 txrxFlags = 0;
3393        u32 phy_exp;
3394
3395        /* If device loopback is enabled, set carrier on and enable max link
3396         * speed.
3397         */
3398        bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
3399        if (bmcr & BMCR_LOOPBACK) {
3400                if (netif_running(dev)) {
3401                        nv_force_linkspeed(dev, NVREG_LINKSPEED_1000, 1);
3402                        if (!netif_carrier_ok(dev))
3403                                netif_carrier_on(dev);
3404                }
3405                return 1;
3406        }
3407
3408        /* BMSR_LSTATUS is latched, read it twice:
3409         * we want the current value.
3410         */
3411        mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
3412        mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
3413
3414        if (!(mii_status & BMSR_LSTATUS)) {
3415                newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3416                newdup = 0;
3417                retval = 0;
3418                goto set_speed;
3419        }
3420
3421        if (np->autoneg == 0) {
3422                if (np->fixed_mode & LPA_100FULL) {
3423                        newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
3424                        newdup = 1;
3425                } else if (np->fixed_mode & LPA_100HALF) {
3426                        newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
3427                        newdup = 0;
3428                } else if (np->fixed_mode & LPA_10FULL) {
3429                        newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3430                        newdup = 1;
3431                } else {
3432                        newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3433                        newdup = 0;
3434                }
3435                retval = 1;
3436                goto set_speed;
3437        }
3438        /* check auto negotiation is complete */
3439        if (!(mii_status & BMSR_ANEGCOMPLETE)) {
3440                /* still in autonegotiation - configure nic for 10 MBit HD and wait. */
3441                newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3442                newdup = 0;
3443                retval = 0;
3444                goto set_speed;
3445        }
3446
3447        adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
3448        lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ);
3449
3450        retval = 1;
3451        if (np->gigabit == PHY_GIGABIT) {
3452                control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
3453                status_1000 = mii_rw(dev, np->phyaddr, MII_STAT1000, MII_READ);
3454
3455                if ((control_1000 & ADVERTISE_1000FULL) &&
3456                        (status_1000 & LPA_1000FULL)) {
3457                        newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_1000;
3458                        newdup = 1;
3459                        goto set_speed;
3460                }
3461        }
3462
3463        /* FIXME: handle parallel detection properly */
3464        adv_lpa = lpa & adv;
3465        if (adv_lpa & LPA_100FULL) {
3466                newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
3467                newdup = 1;
3468        } else if (adv_lpa & LPA_100HALF) {
3469                newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
3470                newdup = 0;
3471        } else if (adv_lpa & LPA_10FULL) {
3472                newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3473                newdup = 1;
3474        } else if (adv_lpa & LPA_10HALF) {
3475                newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3476                newdup = 0;
3477        } else {
3478                newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3479                newdup = 0;
3480        }
3481
3482set_speed:
3483        if (np->duplex == newdup && np->linkspeed == newls)
3484                return retval;
3485
3486        np->duplex = newdup;
3487        np->linkspeed = newls;
3488
3489        /* The transmitter and receiver must be restarted for safe update */
3490        if (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_START) {
3491                txrxFlags |= NV_RESTART_TX;
3492                nv_stop_tx(dev);
3493        }
3494        if (readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) {
3495                txrxFlags |= NV_RESTART_RX;
3496                nv_stop_rx(dev);
3497        }
3498
3499        if (np->gigabit == PHY_GIGABIT) {
3500                phyreg = readl(base + NvRegSlotTime);
3501                phyreg &= ~(0x3FF00);
3502                if (((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10) ||
3503                    ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100))
3504                        phyreg |= NVREG_SLOTTIME_10_100_FULL;
3505                else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000)
3506                        phyreg |= NVREG_SLOTTIME_1000_FULL;
3507                writel(phyreg, base + NvRegSlotTime);
3508        }
3509
3510        phyreg = readl(base + NvRegPhyInterface);
3511        phyreg &= ~(PHY_HALF|PHY_100|PHY_1000);
3512        if (np->duplex == 0)
3513                phyreg |= PHY_HALF;
3514        if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100)
3515                phyreg |= PHY_100;
3516        else if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
3517                phyreg |= PHY_1000;
3518        writel(phyreg, base + NvRegPhyInterface);
3519
3520        phy_exp = mii_rw(dev, np->phyaddr, MII_EXPANSION, MII_READ) & EXPANSION_NWAY; /* autoneg capable */
3521        if (phyreg & PHY_RGMII) {
3522                if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) {
3523                        txreg = NVREG_TX_DEFERRAL_RGMII_1000;
3524                } else {
3525                        if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX)) {
3526                                if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_10)
3527                                        txreg = NVREG_TX_DEFERRAL_RGMII_STRETCH_10;
3528                                else
3529                                        txreg = NVREG_TX_DEFERRAL_RGMII_STRETCH_100;
3530                        } else {
3531                                txreg = NVREG_TX_DEFERRAL_RGMII_10_100;
3532                        }
3533                }
3534        } else {
3535                if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX))
3536                        txreg = NVREG_TX_DEFERRAL_MII_STRETCH;
3537                else
3538                        txreg = NVREG_TX_DEFERRAL_DEFAULT;
3539        }
3540        writel(txreg, base + NvRegTxDeferral);
3541
3542        if (np->desc_ver == DESC_VER_1) {
3543                txreg = NVREG_TX_WM_DESC1_DEFAULT;
3544        } else {
3545                if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
3546                        txreg = NVREG_TX_WM_DESC2_3_1000;
3547                else
3548                        txreg = NVREG_TX_WM_DESC2_3_DEFAULT;
3549        }
3550        writel(txreg, base + NvRegTxWatermark);
3551
3552        writel(NVREG_MISC1_FORCE | (np->duplex ? 0 : NVREG_MISC1_HD),
3553                base + NvRegMisc1);
3554        pci_push(base);
3555        writel(np->linkspeed, base + NvRegLinkSpeed);
3556        pci_push(base);
3557
3558        pause_flags = 0;
3559        /* setup pause frame */
3560        if (netif_running(dev) && (np->duplex != 0)) {
3561                if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) {
3562                        adv_pause = adv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3563                        lpa_pause = lpa & (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
3564
3565                        switch (adv_pause) {
3566                        case ADVERTISE_PAUSE_CAP:
3567                                if (lpa_pause & LPA_PAUSE_CAP) {
3568                                        pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
3569                                        if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
3570                                                pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3571                                }
3572                                break;
3573                        case ADVERTISE_PAUSE_ASYM:
3574                                if (lpa_pause == (LPA_PAUSE_CAP | LPA_PAUSE_ASYM))
3575                                        pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3576                                break;
3577                        case ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM:
3578                                if (lpa_pause & LPA_PAUSE_CAP) {
3579                                        pause_flags |=  NV_PAUSEFRAME_RX_ENABLE;
3580                                        if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
3581                                                pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3582                                }
3583                                if (lpa_pause == LPA_PAUSE_ASYM)
3584                                        pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
3585                                break;
3586                        }
3587                } else {
3588                        pause_flags = np->pause_flags;
3589                }
3590        }
3591        nv_update_pause(dev, pause_flags);
3592
3593        if (txrxFlags & NV_RESTART_TX)
3594                nv_start_tx(dev);
3595        if (txrxFlags & NV_RESTART_RX)
3596                nv_start_rx(dev);
3597
3598        return retval;
3599}
3600
3601static void nv_linkchange(struct net_device *dev)
3602{
3603        if (nv_update_linkspeed(dev)) {
3604                if (!netif_carrier_ok(dev)) {
3605                        netif_carrier_on(dev);
3606                        netdev_info(dev, "link up\n");
3607                        nv_txrx_gate(dev, false);
3608                        nv_start_rx(dev);
3609                }
3610        } else {
3611                if (netif_carrier_ok(dev)) {
3612                        netif_carrier_off(dev);
3613                        netdev_info(dev, "link down\n");
3614                        nv_txrx_gate(dev, true);
3615                        nv_stop_rx(dev);
3616                }
3617        }
3618}
3619
3620static void nv_link_irq(struct net_device *dev)
3621{
3622        u8 __iomem *base = get_hwbase(dev);
3623        u32 miistat;
3624
3625        miistat = readl(base + NvRegMIIStatus);
3626        writel(NVREG_MIISTAT_LINKCHANGE, base + NvRegMIIStatus);
3627
3628        if (miistat & (NVREG_MIISTAT_LINKCHANGE))
3629                nv_linkchange(dev);
3630}
3631
3632static void nv_msi_workaround(struct fe_priv *np)
3633{
3634
3635        /* Need to toggle the msi irq mask within the ethernet device,
3636         * otherwise, future interrupts will not be detected.
3637         */
3638        if (np->msi_flags & NV_MSI_ENABLED) {
3639                u8 __iomem *base = np->base;
3640
3641                writel(0, base + NvRegMSIIrqMask);
3642                writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
3643        }
3644}
3645
3646static inline int nv_change_interrupt_mode(struct net_device *dev, int total_work)
3647{
3648        struct fe_priv *np = netdev_priv(dev);
3649
3650        if (optimization_mode == NV_OPTIMIZATION_MODE_DYNAMIC) {
3651                if (total_work > NV_DYNAMIC_THRESHOLD) {
3652                        /* transition to poll based interrupts */
3653                        np->quiet_count = 0;
3654                        if (np->irqmask != NVREG_IRQMASK_CPU) {
3655                                np->irqmask = NVREG_IRQMASK_CPU;
3656                                return 1;
3657                        }
3658                } else {
3659                        if (np->quiet_count < NV_DYNAMIC_MAX_QUIET_COUNT) {
3660                                np->quiet_count++;
3661                        } else {
3662                                /* reached a period of low activity, switch
3663                                   to per tx/rx packet interrupts */
3664                                if (np->irqmask != NVREG_IRQMASK_THROUGHPUT) {
3665                                        np->irqmask = NVREG_IRQMASK_THROUGHPUT;
3666                                        return 1;
3667                                }
3668                        }
3669                }
3670        }
3671        return 0;
3672}
3673
3674static irqreturn_t nv_nic_irq(int foo, void *data)
3675{
3676        struct net_device *dev = (struct net_device *) data;
3677        struct fe_priv *np = netdev_priv(dev);
3678        u8 __iomem *base = get_hwbase(dev);
3679
3680        if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
3681                np->events = readl(base + NvRegIrqStatus);
3682                writel(np->events, base + NvRegIrqStatus);
3683        } else {
3684                np->events = readl(base + NvRegMSIXIrqStatus);
3685                writel(np->events, base + NvRegMSIXIrqStatus);
3686        }
3687        if (!(np->events & np->irqmask))
3688                return IRQ_NONE;
3689
3690        nv_msi_workaround(np);
3691
3692        if (napi_schedule_prep(&np->napi)) {
3693                /*
3694                 * Disable further irq's (msix not enabled with napi)
3695                 */
3696                writel(0, base + NvRegIrqMask);
3697                __napi_schedule(&np->napi);
3698        }
3699
3700        return IRQ_HANDLED;
3701}
3702
3703/* All _optimized functions are used to help increase performance
3704 * (reduce CPU and increase throughput). They use descripter version 3,
3705 * compiler directives, and reduce memory accesses.
3706 */
3707static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
3708{
3709        struct net_device *dev = (struct net_device *) data;
3710        struct fe_priv *np = netdev_priv(dev);
3711        u8 __iomem *base = get_hwbase(dev);
3712
3713        if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
3714                np->events = readl(base + NvRegIrqStatus);
3715                writel(np->events, base + NvRegIrqStatus);
3716        } else {
3717                np->events = readl(base + NvRegMSIXIrqStatus);
3718                writel(np->events, base + NvRegMSIXIrqStatus);
3719        }
3720        if (!(np->events & np->irqmask))
3721                return IRQ_NONE;
3722
3723        nv_msi_workaround(np);
3724
3725        if (napi_schedule_prep(&np->napi)) {
3726                /*
3727                 * Disable further irq's (msix not enabled with napi)
3728                 */
3729                writel(0, base + NvRegIrqMask);
3730                __napi_schedule(&np->napi);
3731        }
3732
3733        return IRQ_HANDLED;
3734}
3735
3736static irqreturn_t nv_nic_irq_tx(int foo, void *data)
3737{
3738        struct net_device *dev = (struct net_device *) data;
3739        struct fe_priv *np = netdev_priv(dev);
3740        u8 __iomem *base = get_hwbase(dev);
3741        u32 events;
3742        int i;
3743        unsigned long flags;
3744
3745        for (i = 0;; i++) {
3746                events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL;
3747                writel(events, base + NvRegMSIXIrqStatus);
3748                netdev_dbg(dev, "tx irq events: %08x\n", events);
3749                if (!(events & np->irqmask))
3750                        break;
3751
3752                spin_lock_irqsave(&np->lock, flags);
3753                nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
3754                spin_unlock_irqrestore(&np->lock, flags);
3755
3756                if (unlikely(i > max_interrupt_work)) {
3757                        spin_lock_irqsave(&np->lock, flags);
3758                        /* disable interrupts on the nic */
3759                        writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask);
3760                        pci_push(base);
3761
3762                        if (!np->in_shutdown) {
3763                                np->nic_poll_irq |= NVREG_IRQ_TX_ALL;
3764                                mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3765                        }
3766                        spin_unlock_irqrestore(&np->lock, flags);
3767                        netdev_dbg(dev, "%s: too many iterations (%d)\n",
3768                                   __func__, i);
3769                        break;
3770                }
3771
3772        }
3773
3774        return IRQ_RETVAL(i);
3775}
3776
3777static int nv_napi_poll(struct napi_struct *napi, int budget)
3778{
3779        struct fe_priv *np = container_of(napi, struct fe_priv, napi);
3780        struct net_device *dev = np->dev;
3781        u8 __iomem *base = get_hwbase(dev);
3782        unsigned long flags;
3783        int retcode;
3784        int rx_count, tx_work = 0, rx_work = 0;
3785
3786        do {
3787                if (!nv_optimized(np)) {
3788                        spin_lock_irqsave(&np->lock, flags);
3789                        tx_work += nv_tx_done(dev, np->tx_ring_size);
3790                        spin_unlock_irqrestore(&np->lock, flags);
3791
3792                        rx_count = nv_rx_process(dev, budget - rx_work);
3793                        retcode = nv_alloc_rx(dev);
3794                } else {
3795                        spin_lock_irqsave(&np->lock, flags);
3796                        tx_work += nv_tx_done_optimized(dev, np->tx_ring_size);
3797                        spin_unlock_irqrestore(&np->lock, flags);
3798
3799                        rx_count = nv_rx_process_optimized(dev,
3800                            budget - rx_work);
3801                        retcode = nv_alloc_rx_optimized(dev);
3802                }
3803        } while (retcode == 0 &&
3804                 rx_count > 0 && (rx_work += rx_count) < budget);
3805
3806        if (retcode) {
3807                spin_lock_irqsave(&np->lock, flags);
3808                if (!np->in_shutdown)
3809                        mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3810                spin_unlock_irqrestore(&np->lock, flags);
3811        }
3812
3813        nv_change_interrupt_mode(dev, tx_work + rx_work);
3814
3815        if (unlikely(np->events & NVREG_IRQ_LINK)) {
3816                spin_lock_irqsave(&np->lock, flags);
3817                nv_link_irq(dev);
3818                spin_unlock_irqrestore(&np->lock, flags);
3819        }
3820        if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) {
3821                spin_lock_irqsave(&np->lock, flags);
3822                nv_linkchange(dev);
3823                spin_unlock_irqrestore(&np->lock, flags);
3824                np->link_timeout = jiffies + LINK_TIMEOUT;
3825        }
3826        if (unlikely(np->events & NVREG_IRQ_RECOVER_ERROR)) {
3827                spin_lock_irqsave(&np->lock, flags);
3828                if (!np->in_shutdown) {
3829                        np->nic_poll_irq = np->irqmask;
3830                        np->recover_error = 1;
3831                        mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3832                }
3833                spin_unlock_irqrestore(&np->lock, flags);
3834                napi_complete(napi);
3835                return rx_work;
3836        }
3837
3838        if (rx_work < budget) {
3839                /* re-enable interrupts
3840                   (msix not enabled in napi) */
3841                napi_complete_done(napi, rx_work);
3842
3843                writel(np->irqmask, base + NvRegIrqMask);
3844        }
3845        return rx_work;
3846}
3847
3848static irqreturn_t nv_nic_irq_rx(int foo, void *data)
3849{
3850        struct net_device *dev = (struct net_device *) data;
3851        struct fe_priv *np = netdev_priv(dev);
3852        u8 __iomem *base = get_hwbase(dev);
3853        u32 events;
3854        int i;
3855        unsigned long flags;
3856
3857        for (i = 0;; i++) {
3858                events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL;
3859                writel(events, base + NvRegMSIXIrqStatus);
3860                netdev_dbg(dev, "rx irq events: %08x\n", events);
3861                if (!(events & np->irqmask))
3862                        break;
3863
3864                if (nv_rx_process_optimized(dev, RX_WORK_PER_LOOP)) {
3865                        if (unlikely(nv_alloc_rx_optimized(dev))) {
3866                                spin_lock_irqsave(&np->lock, flags);
3867                                if (!np->in_shutdown)
3868                                        mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3869                                spin_unlock_irqrestore(&np->lock, flags);
3870                        }
3871                }
3872
3873                if (unlikely(i > max_interrupt_work)) {
3874                        spin_lock_irqsave(&np->lock, flags);
3875                        /* disable interrupts on the nic */
3876                        writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
3877                        pci_push(base);
3878
3879                        if (!np->in_shutdown) {
3880                                np->nic_poll_irq |= NVREG_IRQ_RX_ALL;
3881                                mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3882                        }
3883                        spin_unlock_irqrestore(&np->lock, flags);
3884                        netdev_dbg(dev, "%s: too many iterations (%d)\n",
3885                                   __func__, i);
3886                        break;
3887                }
3888        }
3889
3890        return IRQ_RETVAL(i);
3891}
3892
3893static irqreturn_t nv_nic_irq_other(int foo, void *data)
3894{
3895        struct net_device *dev = (struct net_device *) data;
3896        struct fe_priv *np = netdev_priv(dev);
3897        u8 __iomem *base = get_hwbase(dev);
3898        u32 events;
3899        int i;
3900        unsigned long flags;
3901
3902        for (i = 0;; i++) {
3903                events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER;
3904                writel(events, base + NvRegMSIXIrqStatus);
3905                netdev_dbg(dev, "irq events: %08x\n", events);
3906                if (!(events & np->irqmask))
3907                        break;
3908
3909                /* check tx in case we reached max loop limit in tx isr */
3910                spin_lock_irqsave(&np->lock, flags);
3911                nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
3912                spin_unlock_irqrestore(&np->lock, flags);
3913
3914                if (events & NVREG_IRQ_LINK) {
3915                        spin_lock_irqsave(&np->lock, flags);
3916                        nv_link_irq(dev);
3917                        spin_unlock_irqrestore(&np->lock, flags);
3918                }
3919                if (np->need_linktimer && time_after(jiffies, np->link_timeout)) {
3920                        spin_lock_irqsave(&np->lock, flags);
3921                        nv_linkchange(dev);
3922                        spin_unlock_irqrestore(&np->lock, flags);
3923                        np->link_timeout = jiffies + LINK_TIMEOUT;
3924                }
3925                if (events & NVREG_IRQ_RECOVER_ERROR) {
3926                        spin_lock_irqsave(&np->lock, flags);
3927                        /* disable interrupts on the nic */
3928                        writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
3929                        pci_push(base);
3930
3931                        if (!np->in_shutdown) {
3932                                np->nic_poll_irq |= NVREG_IRQ_OTHER;
3933                                np->recover_error = 1;
3934                                mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3935                        }
3936                        spin_unlock_irqrestore(&np->lock, flags);
3937                        break;
3938                }
3939                if (unlikely(i > max_interrupt_work)) {
3940                        spin_lock_irqsave(&np->lock, flags);
3941                        /* disable interrupts on the nic */
3942                        writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
3943                        pci_push(base);
3944
3945                        if (!np->in_shutdown) {
3946                                np->nic_poll_irq |= NVREG_IRQ_OTHER;
3947                                mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3948                        }
3949                        spin_unlock_irqrestore(&np->lock, flags);
3950                        netdev_dbg(dev, "%s: too many iterations (%d)\n",
3951                                   __func__, i);
3952                        break;
3953                }
3954
3955        }
3956
3957        return IRQ_RETVAL(i);
3958}
3959
3960static irqreturn_t nv_nic_irq_test(int foo, void *data)
3961{
3962        struct net_device *dev = (struct net_device *) data;
3963        struct fe_priv *np = netdev_priv(dev);
3964        u8 __iomem *base = get_hwbase(dev);
3965        u32 events;
3966
3967        if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
3968                events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
3969                writel(events & NVREG_IRQ_TIMER, base + NvRegIrqStatus);
3970        } else {
3971                events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
3972                writel(events & NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus);
3973        }
3974        pci_push(base);
3975        if (!(events & NVREG_IRQ_TIMER))
3976                return IRQ_RETVAL(0);
3977
3978        nv_msi_workaround(np);
3979
3980        spin_lock(&np->lock);
3981        np->intr_test = 1;
3982        spin_unlock(&np->lock);
3983
3984        return IRQ_RETVAL(1);
3985}
3986
3987static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask)
3988{
3989        u8 __iomem *base = get_hwbase(dev);
3990        int i;
3991        u32 msixmap = 0;
3992
3993        /* Each interrupt bit can be mapped to a MSIX vector (4 bits).
3994         * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents
3995         * the remaining 8 interrupts.
3996         */
3997        for (i = 0; i < 8; i++) {
3998                if ((irqmask >> i) & 0x1)
3999                        msixmap |= vector << (i << 2);
4000        }
4001        writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0);
4002
4003        msixmap = 0;
4004        for (i = 0; i < 8; i++) {
4005                if ((irqmask >> (i + 8)) & 0x1)
4006                        msixmap |= vector << (i << 2);
4007        }
4008        writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1);
4009}
4010
4011static int nv_request_irq(struct net_device *dev, int intr_test)
4012{
4013        struct fe_priv *np = get_nvpriv(dev);
4014        u8 __iomem *base = get_hwbase(dev);
4015        int ret;
4016        int i;
4017        irqreturn_t (*handler)(int foo, void *data);
4018
4019        if (intr_test) {
4020                handler = nv_nic_irq_test;
4021        } else {
4022                if (nv_optimized(np))
4023                        handler = nv_nic_irq_optimized;
4024                else
4025                        handler = nv_nic_irq;
4026        }
4027
4028        if (np->msi_flags & NV_MSI_X_CAPABLE) {
4029                for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++)
4030                        np->msi_x_entry[i].entry = i;
4031                ret = pci_enable_msix_range(np->pci_dev,
4032                                            np->msi_x_entry,
4033                                            np->msi_flags & NV_MSI_X_VECTORS_MASK,
4034                                            np->msi_flags & NV_MSI_X_VECTORS_MASK);
4035                if (ret > 0) {
4036                        np->msi_flags |= NV_MSI_X_ENABLED;
4037                        if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) {
4038                                /* Request irq for rx handling */
4039                                sprintf(np->name_rx, "%s-rx", dev->name);
4040                                ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector,
4041                                                  nv_nic_irq_rx, IRQF_SHARED, np->name_rx, dev);
4042                                if (ret) {
4043                                        netdev_info(dev,
4044                                                    "request_irq failed for rx %d\n",
4045                                                    ret);
4046                                        pci_disable_msix(np->pci_dev);
4047                                        np->msi_flags &= ~NV_MSI_X_ENABLED;
4048                                        goto out_err;
4049                                }
4050                                /* Request irq for tx handling */
4051                                sprintf(np->name_tx, "%s-tx", dev->name);
4052                                ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector,
4053                                                  nv_nic_irq_tx, IRQF_SHARED, np->name_tx, dev);
4054                                if (ret) {
4055                                        netdev_info(dev,
4056                                                    "request_irq failed for tx %d\n",
4057                                                    ret);
4058                                        pci_disable_msix(np->pci_dev);
4059                                        np->msi_flags &= ~NV_MSI_X_ENABLED;
4060                                        goto out_free_rx;
4061                                }
4062                                /* Request irq for link and timer handling */
4063                                sprintf(np->name_other, "%s-other", dev->name);
4064                                ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector,
4065                                                  nv_nic_irq_other, IRQF_SHARED, np->name_other, dev);
4066                                if (ret) {
4067                                        netdev_info(dev,
4068                                                    "request_irq failed for link %d\n",
4069                                                    ret);
4070                                        pci_disable_msix(np->pci_dev);
4071                                        np->msi_flags &= ~NV_MSI_X_ENABLED;
4072                                        goto out_free_tx;
4073                                }
4074                                /* map interrupts to their respective vector */
4075                                writel(0, base + NvRegMSIXMap0);
4076                                writel(0, base + NvRegMSIXMap1);
4077                                set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL);
4078                                set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL);
4079                                set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER);
4080                        } else {
4081                                /* Request irq for all interrupts */
4082                                ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector,
4083                                                  handler, IRQF_SHARED, dev->name, dev);
4084                                if (ret) {
4085                                        netdev_info(dev,
4086                                                    "request_irq failed %d\n",
4087                                                    ret);
4088                                        pci_disable_msix(np->pci_dev);
4089                                        np->msi_flags &= ~NV_MSI_X_ENABLED;
4090                                        goto out_err;
4091                                }
4092
4093                                /* map interrupts to vector 0 */
4094                                writel(0, base + NvRegMSIXMap0);
4095                                writel(0, base + NvRegMSIXMap1);
4096                        }
4097                        netdev_info(dev, "MSI-X enabled\n");
4098                        return 0;
4099                }
4100        }
4101        if (np->msi_flags & NV_MSI_CAPABLE) {
4102                ret = pci_enable_msi(np->pci_dev);
4103                if (ret == 0) {
4104                        np->msi_flags |= NV_MSI_ENABLED;
4105                        ret = request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev);
4106                        if (ret) {
4107                                netdev_info(dev, "request_irq failed %d\n",
4108                                            ret);
4109                                pci_disable_msi(np->pci_dev);
4110                                np->msi_flags &= ~NV_MSI_ENABLED;
4111                                goto out_err;
4112                        }
4113
4114                        /* map interrupts to vector 0 */
4115                        writel(0, base + NvRegMSIMap0);
4116                        writel(0, base + NvRegMSIMap1);
4117                        /* enable msi vector 0 */
4118                        writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
4119                        netdev_info(dev, "MSI enabled\n");
4120                        return 0;
4121                }
4122        }
4123
4124        if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0)
4125                goto out_err;
4126
4127        return 0;
4128out_free_tx:
4129        free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev);
4130out_free_rx:
4131        free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev);
4132out_err:
4133        return 1;
4134}
4135
4136static void nv_free_irq(struct net_device *dev)
4137{
4138        struct fe_priv *np = get_nvpriv(dev);
4139        int i;
4140
4141        if (np->msi_flags & NV_MSI_X_ENABLED) {
4142                for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++)
4143                        free_irq(np->msi_x_entry[i].vector, dev);
4144                pci_disable_msix(np->pci_dev);
4145                np->msi_flags &= ~NV_MSI_X_ENABLED;
4146        } else {
4147                free_irq(np->pci_dev->irq, dev);
4148                if (np->msi_flags & NV_MSI_ENABLED) {
4149                        pci_disable_msi(np->pci_dev);
4150                        np->msi_flags &= ~NV_MSI_ENABLED;
4151                }
4152        }
4153}
4154
4155static void nv_do_nic_poll(struct timer_list *t)
4156{
4157        struct fe_priv *np = from_timer(np, t, nic_poll);
4158        struct net_device *dev = np->dev;
4159        u8 __iomem *base = get_hwbase(dev);
4160        u32 mask = 0;
4161        unsigned long flags;
4162        unsigned int irq = 0;
4163
4164        /*
4165         * First disable irq(s) and then
4166         * reenable interrupts on the nic, we have to do this before calling
4167         * nv_nic_irq because that may decide to do otherwise
4168         */
4169
4170        if (!using_multi_irqs(dev)) {
4171                if (np->msi_flags & NV_MSI_X_ENABLED)
4172                        irq = np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector;
4173                else
4174                        irq = np->pci_dev->irq;
4175                mask = np->irqmask;
4176        } else {
4177                if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
4178                        irq = np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector;
4179                        mask |= NVREG_IRQ_RX_ALL;
4180                }
4181                if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
4182                        irq = np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector;
4183                        mask |= NVREG_IRQ_TX_ALL;
4184                }
4185                if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
4186                        irq = np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector;
4187                        mask |= NVREG_IRQ_OTHER;
4188                }
4189        }
4190
4191        disable_irq_nosync_lockdep_irqsave(irq, &flags);
4192        synchronize_irq(irq);
4193
4194        if (np->recover_error) {
4195                np->recover_error = 0;
4196                netdev_info(dev, "MAC in recoverable error state\n");
4197                if (netif_running(dev)) {
4198                        netif_tx_lock_bh(dev);
4199                        netif_addr_lock(dev);
4200                        spin_lock(&np->lock);
4201                        /* stop engines */
4202                        nv_stop_rxtx(dev);
4203                        if (np->driver_data & DEV_HAS_POWER_CNTRL)
4204                                nv_mac_reset(dev);
4205                        nv_txrx_reset(dev);
4206                        /* drain rx queue */
4207                        nv_drain_rxtx(dev);
4208                        /* reinit driver view of the rx queue */
4209                        set_bufsize(dev);
4210                        if (nv_init_ring(dev)) {
4211                                if (!np->in_shutdown)
4212                                        mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
4213                        }
4214                        /* reinit nic view of the rx queue */
4215                        writel(np->rx_buf_sz, base + NvRegOffloadConfig);
4216                        setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
4217                        writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
4218                                base + NvRegRingSizes);
4219                        pci_push(base);
4220                        writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4221                        pci_push(base);
4222                        /* clear interrupts */
4223                        if (!(np->msi_flags & NV_MSI_X_ENABLED))
4224                                writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
4225                        else
4226                                writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
4227
4228                        /* restart rx engine */
4229                        nv_start_rxtx(dev);
4230                        spin_unlock(&np->lock);
4231                        netif_addr_unlock(dev);
4232                        netif_tx_unlock_bh(dev);
4233                }
4234        }
4235
4236        writel(mask, base + NvRegIrqMask);
4237        pci_push(base);
4238
4239        if (!using_multi_irqs(dev)) {
4240                np->nic_poll_irq = 0;
4241                if (nv_optimized(np))
4242                        nv_nic_irq_optimized(0, dev);
4243                else
4244                        nv_nic_irq(0, dev);
4245        } else {
4246                if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
4247                        np->nic_poll_irq &= ~NVREG_IRQ_RX_ALL;
4248                        nv_nic_irq_rx(0, dev);
4249                }
4250                if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
4251                        np->nic_poll_irq &= ~NVREG_IRQ_TX_ALL;
4252                        nv_nic_irq_tx(0, dev);
4253                }
4254                if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
4255                        np->nic_poll_irq &= ~NVREG_IRQ_OTHER;
4256                        nv_nic_irq_other(0, dev);
4257                }
4258        }
4259
4260        enable_irq_lockdep_irqrestore(irq, &flags);
4261}
4262
4263#ifdef CONFIG_NET_POLL_CONTROLLER
4264static void nv_poll_controller(struct net_device *dev)
4265{
4266        struct fe_priv *np = netdev_priv(dev);
4267
4268        nv_do_nic_poll(&np->nic_poll);
4269}
4270#endif
4271
4272static void nv_do_stats_poll(struct timer_list *t)
4273        __acquires(&netdev_priv(dev)->hwstats_lock)
4274        __releases(&netdev_priv(dev)->hwstats_lock)
4275{
4276        struct fe_priv *np = from_timer(np, t, stats_poll);
4277        struct net_device *dev = np->dev;
4278
4279        /* If lock is currently taken, the stats are being refreshed
4280         * and hence fresh enough */
4281        if (spin_trylock(&np->hwstats_lock)) {
4282                nv_update_stats(dev);
4283                spin_unlock(&np->hwstats_lock);
4284        }
4285
4286        if (!np->in_shutdown)
4287                mod_timer(&np->stats_poll,
4288                        round_jiffies(jiffies + STATS_INTERVAL));
4289}
4290
4291static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4292{
4293        struct fe_priv *np = netdev_priv(dev);
4294        strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
4295        strlcpy(info->version, FORCEDETH_VERSION, sizeof(info->version));
4296        strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
4297}
4298
4299static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
4300{
4301        struct fe_priv *np = netdev_priv(dev);
4302        wolinfo->supported = WAKE_MAGIC;
4303
4304        spin_lock_irq(&np->lock);
4305        if (np->wolenabled)
4306                wolinfo->wolopts = WAKE_MAGIC;
4307        spin_unlock_irq(&np->lock);
4308}
4309
4310static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
4311{
4312        struct fe_priv *np = netdev_priv(dev);
4313        u8 __iomem *base = get_hwbase(dev);
4314        u32 flags = 0;
4315
4316        if (wolinfo->wolopts == 0) {
4317                np->wolenabled = 0;
4318        } else if (wolinfo->wolopts & WAKE_MAGIC) {
4319                np->wolenabled = 1;
4320                flags = NVREG_WAKEUPFLAGS_ENABLE;
4321        }
4322        if (netif_running(dev)) {
4323                spin_lock_irq(&np->lock);
4324                writel(flags, base + NvRegWakeUpFlags);
4325                spin_unlock_irq(&np->lock);
4326        }
4327        device_set_wakeup_enable(&np->pci_dev->dev, np->wolenabled);
4328        return 0;
4329}
4330
4331static int nv_get_link_ksettings(struct net_device *dev,
4332                                 struct ethtool_link_ksettings *cmd)
4333{
4334        struct fe_priv *np = netdev_priv(dev);
4335        u32 speed, supported, advertising;
4336        int adv;
4337
4338        spin_lock_irq(&np->lock);
4339        cmd->base.port = PORT_MII;
4340        if (!netif_running(dev)) {
4341                /* We do not track link speed / duplex setting if the
4342                 * interface is disabled. Force a link check */
4343                if (nv_update_linkspeed(dev)) {
4344                        netif_carrier_on(dev);
4345                } else {
4346                        netif_carrier_off(dev);
4347                }
4348        }
4349
4350        if (netif_carrier_ok(dev)) {
4351                switch (np->linkspeed & (NVREG_LINKSPEED_MASK)) {
4352                case NVREG_LINKSPEED_10:
4353                        speed = SPEED_10;
4354                        break;
4355                case NVREG_LINKSPEED_100:
4356                        speed = SPEED_100;
4357                        break;
4358                case NVREG_LINKSPEED_1000:
4359                        speed = SPEED_1000;
4360                        break;
4361                default:
4362                        speed = -1;
4363                        break;
4364                }
4365                cmd->base.duplex = DUPLEX_HALF;
4366                if (np->duplex)
4367                        cmd->base.duplex = DUPLEX_FULL;
4368        } else {
4369                speed = SPEED_UNKNOWN;
4370                cmd->base.duplex = DUPLEX_UNKNOWN;
4371        }
4372        cmd->base.speed = speed;
4373        cmd->base.autoneg = np->autoneg;
4374
4375        advertising = ADVERTISED_MII;
4376        if (np->autoneg) {
4377                advertising |= ADVERTISED_Autoneg;
4378                adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
4379                if (adv & ADVERTISE_10HALF)
4380                        advertising |= ADVERTISED_10baseT_Half;
4381                if (adv & ADVERTISE_10FULL)
4382                        advertising |= ADVERTISED_10baseT_Full;
4383                if (adv & ADVERTISE_100HALF)
4384                        advertising |= ADVERTISED_100baseT_Half;
4385                if (adv & ADVERTISE_100FULL)
4386                        advertising |= ADVERTISED_100baseT_Full;
4387                if (np->gigabit == PHY_GIGABIT) {
4388                        adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
4389                        if (adv & ADVERTISE_1000FULL)
4390                                advertising |= ADVERTISED_1000baseT_Full;
4391                }
4392        }
4393        supported = (SUPPORTED_Autoneg |
4394                SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
4395                SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
4396                SUPPORTED_MII);
4397        if (np->gigabit == PHY_GIGABIT)
4398                supported |= SUPPORTED_1000baseT_Full;
4399
4400        cmd->base.phy_address = np->phyaddr;
4401
4402        ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
4403                                                supported);
4404        ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
4405                                                advertising);
4406
4407        /* ignore maxtxpkt, maxrxpkt for now */
4408        spin_unlock_irq(&np->lock);
4409        return 0;
4410}
4411
4412static int nv_set_link_ksettings(struct net_device *dev,
4413                                 const struct ethtool_link_ksettings *cmd)
4414{
4415        struct fe_priv *np = netdev_priv(dev);
4416        u32 speed = cmd->base.speed;
4417        u32 advertising;
4418
4419        ethtool_convert_link_mode_to_legacy_u32(&advertising,
4420                                                cmd->link_modes.advertising);
4421
4422        if (cmd->base.port != PORT_MII)
4423                return -EINVAL;
4424        if (cmd->base.phy_address != np->phyaddr) {
4425                /* TODO: support switching between multiple phys. Should be
4426                 * trivial, but not enabled due to lack of test hardware. */
4427                return -EINVAL;
4428        }
4429        if (cmd->base.autoneg == AUTONEG_ENABLE) {
4430                u32 mask;
4431
4432                mask = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
4433                          ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full;
4434                if (np->gigabit == PHY_GIGABIT)
4435                        mask |= ADVERTISED_1000baseT_Full;
4436
4437                if ((advertising & mask) == 0)
4438                        return -EINVAL;
4439
4440        } else if (cmd->base.autoneg == AUTONEG_DISABLE) {
4441                /* Note: autonegotiation disable, speed 1000 intentionally
4442                 * forbidden - no one should need that. */
4443
4444                if (speed != SPEED_10 && speed != SPEED_100)
4445                        return -EINVAL;
4446                if (cmd->base.duplex != DUPLEX_HALF &&
4447                    cmd->base.duplex != DUPLEX_FULL)
4448                        return -EINVAL;
4449        } else {
4450                return -EINVAL;
4451        }
4452
4453        netif_carrier_off(dev);
4454        if (netif_running(dev)) {
4455                unsigned long flags;
4456
4457                nv_disable_irq(dev);
4458                netif_tx_lock_bh(dev);
4459                netif_addr_lock(dev);
4460                /* with plain spinlock lockdep complains */
4461                spin_lock_irqsave(&np->lock, flags);
4462                /* stop engines */
4463                /* FIXME:
4464                 * this can take some time, and interrupts are disabled
4465                 * due to spin_lock_irqsave, but let's hope no daemon
4466                 * is going to change the settings very often...
4467                 * Worst case:
4468                 * NV_RXSTOP_DELAY1MAX + NV_TXSTOP_DELAY1MAX
4469                 * + some minor delays, which is up to a second approximately
4470                 */
4471                nv_stop_rxtx(dev);
4472                spin_unlock_irqrestore(&np->lock, flags);
4473                netif_addr_unlock(dev);
4474                netif_tx_unlock_bh(dev);
4475        }
4476
4477        if (cmd->base.autoneg == AUTONEG_ENABLE) {
4478                int adv, bmcr;
4479
4480                np->autoneg = 1;
4481
4482                /* advertise only what has been requested */
4483                adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
4484                adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
4485                if (advertising & ADVERTISED_10baseT_Half)
4486                        adv |= ADVERTISE_10HALF;
4487                if (advertising & ADVERTISED_10baseT_Full)
4488                        adv |= ADVERTISE_10FULL;
4489                if (advertising & ADVERTISED_100baseT_Half)
4490                        adv |= ADVERTISE_100HALF;
4491                if (advertising & ADVERTISED_100baseT_Full)
4492                        adv |= ADVERTISE_100FULL;
4493                if (np->pause_flags & NV_PAUSEFRAME_RX_REQ)  /* for rx we set both advertisements but disable tx pause */
4494                        adv |=  ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4495                if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
4496                        adv |=  ADVERTISE_PAUSE_ASYM;
4497                mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
4498
4499                if (np->gigabit == PHY_GIGABIT) {
4500                        adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
4501                        adv &= ~ADVERTISE_1000FULL;
4502                        if (advertising & ADVERTISED_1000baseT_Full)
4503                                adv |= ADVERTISE_1000FULL;
4504                        mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
4505                }
4506
4507                if (netif_running(dev))
4508                        netdev_info(dev, "link down\n");
4509                bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4510                if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
4511                        bmcr |= BMCR_ANENABLE;
4512                        /* reset the phy in order for settings to stick,
4513                         * and cause autoneg to start */
4514                        if (phy_reset(dev, bmcr)) {
4515                                netdev_info(dev, "phy reset failed\n");
4516                                return -EINVAL;
4517                        }
4518                } else {
4519                        bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
4520                        mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4521                }
4522        } else {
4523                int adv, bmcr;
4524
4525                np->autoneg = 0;
4526
4527                adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
4528                adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
4529                if (speed == SPEED_10 && cmd->base.duplex == DUPLEX_HALF)
4530                        adv |= ADVERTISE_10HALF;
4531                if (speed == SPEED_10 && cmd->base.duplex == DUPLEX_FULL)
4532                        adv |= ADVERTISE_10FULL;
4533                if (speed == SPEED_100 && cmd->base.duplex == DUPLEX_HALF)
4534                        adv |= ADVERTISE_100HALF;
4535                if (speed == SPEED_100 && cmd->base.duplex == DUPLEX_FULL)
4536                        adv |= ADVERTISE_100FULL;
4537                np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
4538                if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) {/* for rx we set both advertisements but disable tx pause */
4539                        adv |=  ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4540                        np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
4541                }
4542                if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) {
4543                        adv |=  ADVERTISE_PAUSE_ASYM;
4544                        np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
4545                }
4546                mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
4547                np->fixed_mode = adv;
4548
4549                if (np->gigabit == PHY_GIGABIT) {
4550                        adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
4551                        adv &= ~ADVERTISE_1000FULL;
4552                        mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
4553                }
4554
4555                bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4556                bmcr &= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_SPEED1000|BMCR_FULLDPLX);
4557                if (np->fixed_mode & (ADVERTISE_10FULL|ADVERTISE_100FULL))
4558                        bmcr |= BMCR_FULLDPLX;
4559                if (np->fixed_mode & (ADVERTISE_100HALF|ADVERTISE_100FULL))
4560                        bmcr |= BMCR_SPEED100;
4561                if (np->phy_oui == PHY_OUI_MARVELL) {
4562                        /* reset the phy in order for forced mode settings to stick */
4563                        if (phy_reset(dev, bmcr)) {
4564                                netdev_info(dev, "phy reset failed\n");
4565                                return -EINVAL;
4566                        }
4567                } else {
4568                        mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4569                        if (netif_running(dev)) {
4570                                /* Wait a bit and then reconfigure the nic. */
4571                                udelay(10);
4572                                nv_linkchange(dev);
4573                        }
4574                }
4575        }
4576
4577        if (netif_running(dev)) {
4578                nv_start_rxtx(dev);
4579                nv_enable_irq(dev);
4580        }
4581
4582        return 0;
4583}
4584
4585#define FORCEDETH_REGS_VER      1
4586
4587static int nv_get_regs_len(struct net_device *dev)
4588{
4589        struct fe_priv *np = netdev_priv(dev);
4590        return np->register_size;
4591}
4592
4593static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
4594{
4595        struct fe_priv *np = netdev_priv(dev);
4596        u8 __iomem *base = get_hwbase(dev);
4597        u32 *rbuf = buf;
4598        int i;
4599
4600        regs->version = FORCEDETH_REGS_VER;
4601        spin_lock_irq(&np->lock);
4602        for (i = 0; i < np->register_size/sizeof(u32); i++)
4603                rbuf[i] = readl(base + i*sizeof(u32));
4604        spin_unlock_irq(&np->lock);
4605}
4606
4607static int nv_nway_reset(struct net_device *dev)
4608{
4609        struct fe_priv *np = netdev_priv(dev);
4610        int ret;
4611
4612        if (np->autoneg) {
4613                int bmcr;
4614
4615                netif_carrier_off(dev);
4616                if (netif_running(dev)) {
4617                        nv_disable_irq(dev);
4618                        netif_tx_lock_bh(dev);
4619                        netif_addr_lock(dev);
4620                        spin_lock(&np->lock);
4621                        /* stop engines */
4622                        nv_stop_rxtx(dev);
4623                        spin_unlock(&np->lock);
4624                        netif_addr_unlock(dev);
4625                        netif_tx_unlock_bh(dev);
4626                        netdev_info(dev, "link down\n");
4627                }
4628
4629                bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4630                if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
4631                        bmcr |= BMCR_ANENABLE;
4632                        /* reset the phy in order for settings to stick*/
4633                        if (phy_reset(dev, bmcr)) {
4634                                netdev_info(dev, "phy reset failed\n");
4635                                return -EINVAL;
4636                        }
4637                } else {
4638                        bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
4639                        mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4640                }
4641
4642                if (netif_running(dev)) {
4643                        nv_start_rxtx(dev);
4644                        nv_enable_irq(dev);
4645                }
4646                ret = 0;
4647        } else {
4648                ret = -EINVAL;
4649        }
4650
4651        return ret;
4652}
4653
4654static void nv_get_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
4655{
4656        struct fe_priv *np = netdev_priv(dev);
4657
4658        ring->rx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
4659        ring->tx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
4660
4661        ring->rx_pending = np->rx_ring_size;
4662        ring->tx_pending = np->tx_ring_size;
4663}
4664
4665static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
4666{
4667        struct fe_priv *np = netdev_priv(dev);
4668        u8 __iomem *base = get_hwbase(dev);
4669        u8 *rxtx_ring, *rx_skbuff, *tx_skbuff;
4670        dma_addr_t ring_addr;
4671
4672        if (ring->rx_pending < RX_RING_MIN ||
4673            ring->tx_pending < TX_RING_MIN ||
4674            ring->rx_mini_pending != 0 ||
4675            ring->rx_jumbo_pending != 0 ||
4676            (np->desc_ver == DESC_VER_1 &&
4677             (ring->rx_pending > RING_MAX_DESC_VER_1 ||
4678              ring->tx_pending > RING_MAX_DESC_VER_1)) ||
4679            (np->desc_ver != DESC_VER_1 &&
4680             (ring->rx_pending > RING_MAX_DESC_VER_2_3 ||
4681              ring->tx_pending > RING_MAX_DESC_VER_2_3))) {
4682                return -EINVAL;
4683        }
4684
4685        /* allocate new rings */
4686        if (!nv_optimized(np)) {
4687                rxtx_ring = dma_alloc_coherent(&np->pci_dev->dev,
4688                                               sizeof(struct ring_desc) *
4689                                               (ring->rx_pending +
4690                                               ring->tx_pending),
4691                                               &ring_addr, GFP_ATOMIC);
4692        } else {
4693                rxtx_ring = dma_alloc_coherent(&np->pci_dev->dev,
4694                                               sizeof(struct ring_desc_ex) *
4695                                               (ring->rx_pending +
4696                                               ring->tx_pending),
4697                                               &ring_addr, GFP_ATOMIC);
4698        }
4699        rx_skbuff = kmalloc_array(ring->rx_pending, sizeof(struct nv_skb_map),
4700                                  GFP_KERNEL);
4701        tx_skbuff = kmalloc_array(ring->tx_pending, sizeof(struct nv_skb_map),
4702                                  GFP_KERNEL);
4703        if (!rxtx_ring || !rx_skbuff || !tx_skbuff) {
4704                /* fall back to old rings */
4705                if (!nv_optimized(np)) {
4706                        if (rxtx_ring)
4707                                dma_free_coherent(&np->pci_dev->dev,
4708                                                  sizeof(struct ring_desc) *
4709                                                  (ring->rx_pending +
4710                                                  ring->tx_pending),
4711                                                  rxtx_ring, ring_addr);
4712                } else {
4713                        if (rxtx_ring)
4714                                dma_free_coherent(&np->pci_dev->dev,
4715                                                  sizeof(struct ring_desc_ex) *
4716                                                  (ring->rx_pending +
4717                                                  ring->tx_pending),
4718                                                  rxtx_ring, ring_addr);
4719                }
4720
4721                kfree(rx_skbuff);
4722                kfree(tx_skbuff);
4723                goto exit;
4724        }
4725
4726        if (netif_running(dev)) {
4727                nv_disable_irq(dev);
4728                nv_napi_disable(dev);
4729                netif_tx_lock_bh(dev);
4730                netif_addr_lock(dev);
4731                spin_lock(&np->lock);
4732                /* stop engines */
4733                nv_stop_rxtx(dev);
4734                nv_txrx_reset(dev);
4735                /* drain queues */
4736                nv_drain_rxtx(dev);
4737                /* delete queues */
4738                free_rings(dev);
4739        }
4740
4741        /* set new values */
4742        np->rx_ring_size = ring->rx_pending;
4743        np->tx_ring_size = ring->tx_pending;
4744
4745        if (!nv_optimized(np)) {
4746                np->rx_ring.orig = (struct ring_desc *)rxtx_ring;
4747                np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
4748        } else {
4749                np->rx_ring.ex = (struct ring_desc_ex *)rxtx_ring;
4750                np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
4751        }
4752        np->rx_skb = (struct nv_skb_map *)rx_skbuff;
4753        np->tx_skb = (struct nv_skb_map *)tx_skbuff;
4754        np->ring_addr = ring_addr;
4755
4756        memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size);
4757        memset(np->tx_skb, 0, sizeof(struct nv_skb_map) * np->tx_ring_size);
4758
4759        if (netif_running(dev)) {
4760                /* reinit driver view of the queues */
4761                set_bufsize(dev);
4762                if (nv_init_ring(dev)) {
4763                        if (!np->in_shutdown)
4764                                mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
4765                }
4766
4767                /* reinit nic view of the queues */
4768                writel(np->rx_buf_sz, base + NvRegOffloadConfig);
4769                setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
4770                writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
4771                        base + NvRegRingSizes);
4772                pci_push(base);
4773                writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4774                pci_push(base);
4775
4776                /* restart engines */
4777                nv_start_rxtx(dev);
4778                spin_unlock(&np->lock);
4779                netif_addr_unlock(dev);
4780                netif_tx_unlock_bh(dev);
4781                nv_napi_enable(dev);
4782                nv_enable_irq(dev);
4783        }
4784        return 0;
4785exit:
4786        return -ENOMEM;
4787}
4788
4789static void nv_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause)
4790{
4791        struct fe_priv *np = netdev_priv(dev);
4792
4793        pause->autoneg = (np->pause_flags & NV_PAUSEFRAME_AUTONEG) != 0;
4794        pause->rx_pause = (np->pause_flags & NV_PAUSEFRAME_RX_ENABLE) != 0;
4795        pause->tx_pause = (np->pause_flags & NV_PAUSEFRAME_TX_ENABLE) != 0;
4796}
4797
4798static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause)
4799{
4800        struct fe_priv *np = netdev_priv(dev);
4801        int adv, bmcr;
4802
4803        if ((!np->autoneg && np->duplex == 0) ||
4804            (np->autoneg && !pause->autoneg && np->duplex == 0)) {
4805                netdev_info(dev, "can not set pause settings when forced link is in half duplex\n");
4806                return -EINVAL;
4807        }
4808        if (pause->tx_pause && !(np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)) {
4809                netdev_info(dev, "hardware does not support tx pause frames\n");
4810                return -EINVAL;
4811        }
4812
4813        netif_carrier_off(dev);
4814        if (netif_running(dev)) {
4815                nv_disable_irq(dev);
4816                netif_tx_lock_bh(dev);
4817                netif_addr_lock(dev);
4818                spin_lock(&np->lock);
4819                /* stop engines */
4820                nv_stop_rxtx(dev);
4821                spin_unlock(&np->lock);
4822                netif_addr_unlock(dev);
4823                netif_tx_unlock_bh(dev);
4824        }
4825
4826        np->pause_flags &= ~(NV_PAUSEFRAME_RX_REQ|NV_PAUSEFRAME_TX_REQ);
4827        if (pause->rx_pause)
4828                np->pause_flags |= NV_PAUSEFRAME_RX_REQ;
4829        if (pause->tx_pause)
4830                np->pause_flags |= NV_PAUSEFRAME_TX_REQ;
4831
4832        if (np->autoneg && pause->autoneg) {
4833                np->pause_flags |= NV_PAUSEFRAME_AUTONEG;
4834
4835                adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
4836                adv &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
4837                if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisements but disable tx pause */
4838                        adv |=  ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4839                if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
4840                        adv |=  ADVERTISE_PAUSE_ASYM;
4841                mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
4842
4843                if (netif_running(dev))
4844                        netdev_info(dev, "link down\n");
4845                bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4846                bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
4847                mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4848        } else {
4849                np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
4850                if (pause->rx_pause)
4851                        np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
4852                if (pause->tx_pause)
4853                        np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
4854
4855                if (!netif_running(dev))
4856                        nv_update_linkspeed(dev);
4857                else
4858                        nv_update_pause(dev, np->pause_flags);
4859        }
4860
4861        if (netif_running(dev)) {
4862                nv_start_rxtx(dev);
4863                nv_enable_irq(dev);
4864        }
4865        return 0;
4866}
4867
4868static int nv_set_loopback(struct net_device *dev, netdev_features_t features)
4869{
4870        struct fe_priv *np = netdev_priv(dev);
4871        unsigned long flags;
4872        u32 miicontrol;
4873        int err, retval = 0;
4874
4875        spin_lock_irqsave(&np->lock, flags);
4876        miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4877        if (features & NETIF_F_LOOPBACK) {
4878                if (miicontrol & BMCR_LOOPBACK) {
4879                        spin_unlock_irqrestore(&np->lock, flags);
4880                        netdev_info(dev, "Loopback already enabled\n");
4881                        return 0;
4882                }
4883                nv_disable_irq(dev);
4884                /* Turn on loopback mode */
4885                miicontrol |= BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
4886                err = mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol);
4887                if (err) {
4888                        retval = PHY_ERROR;
4889                        spin_unlock_irqrestore(&np->lock, flags);
4890                        phy_init(dev);
4891                } else {
4892                        if (netif_running(dev)) {
4893                                /* Force 1000 Mbps full-duplex */
4894                                nv_force_linkspeed(dev, NVREG_LINKSPEED_1000,
4895                                                                         1);
4896                                /* Force link up */
4897                                netif_carrier_on(dev);
4898                        }
4899                        spin_unlock_irqrestore(&np->lock, flags);
4900                        netdev_info(dev,
4901                                "Internal PHY loopback mode enabled.\n");
4902                }
4903        } else {
4904                if (!(miicontrol & BMCR_LOOPBACK)) {
4905                        spin_unlock_irqrestore(&np->lock, flags);
4906                        netdev_info(dev, "Loopback already disabled\n");
4907                        return 0;
4908                }
4909                nv_disable_irq(dev);
4910                /* Turn off loopback */
4911                spin_unlock_irqrestore(&np->lock, flags);
4912                netdev_info(dev, "Internal PHY loopback mode disabled.\n");
4913                phy_init(dev);
4914        }
4915        msleep(500);
4916        spin_lock_irqsave(&np->lock, flags);
4917        nv_enable_irq(dev);
4918        spin_unlock_irqrestore(&np->lock, flags);
4919
4920        return retval;
4921}
4922
4923static netdev_features_t nv_fix_features(struct net_device *dev,
4924        netdev_features_t features)
4925{
4926        /* vlan is dependent on rx checksum offload */
4927        if (features & (NETIF_F_HW_VLAN_CTAG_TX|NETIF_F_HW_VLAN_CTAG_RX))
4928                features |= NETIF_F_RXCSUM;
4929
4930        return features;
4931}
4932
4933static void nv_vlan_mode(struct net_device *dev, netdev_features_t features)
4934{
4935        struct fe_priv *np = get_nvpriv(dev);
4936
4937        spin_lock_irq(&np->lock);
4938
4939        if (features & NETIF_F_HW_VLAN_CTAG_RX)
4940                np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP;
4941        else
4942                np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP;
4943
4944        if (features & NETIF_F_HW_VLAN_CTAG_TX)
4945                np->txrxctl_bits |= NVREG_TXRXCTL_VLANINS;
4946        else
4947                np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS;
4948
4949        writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4950
4951        spin_unlock_irq(&np->lock);
4952}
4953
4954static int nv_set_features(struct net_device *dev, netdev_features_t features)
4955{
4956        struct fe_priv *np = netdev_priv(dev);
4957        u8 __iomem *base = get_hwbase(dev);
4958        netdev_features_t changed = dev->features ^ features;
4959        int retval;
4960
4961        if ((changed & NETIF_F_LOOPBACK) && netif_running(dev)) {
4962                retval = nv_set_loopback(dev, features);
4963                if (retval != 0)
4964                        return retval;
4965        }
4966
4967        if (changed & NETIF_F_RXCSUM) {
4968                spin_lock_irq(&np->lock);
4969
4970                if (features & NETIF_F_RXCSUM)
4971                        np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
4972                else
4973                        np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK;
4974
4975                if (netif_running(dev))
4976                        writel(np->txrxctl_bits, base + NvRegTxRxControl);
4977
4978                spin_unlock_irq(&np->lock);
4979        }
4980
4981        if (changed & (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX))
4982                nv_vlan_mode(dev, features);
4983
4984        return 0;
4985}
4986
4987static int nv_get_sset_count(struct net_device *dev, int sset)
4988{
4989        struct fe_priv *np = netdev_priv(dev);
4990
4991        switch (sset) {
4992        case ETH_SS_TEST:
4993                if (np->driver_data & DEV_HAS_TEST_EXTENDED)
4994                        return NV_TEST_COUNT_EXTENDED;
4995                else
4996                        return NV_TEST_COUNT_BASE;
4997        case ETH_SS_STATS:
4998                if (np->driver_data & DEV_HAS_STATISTICS_V3)
4999                        return NV_DEV_STATISTICS_V3_COUNT;
5000                else if (np->driver_data & DEV_HAS_STATISTICS_V2)
5001                        return NV_DEV_STATISTICS_V2_COUNT;
5002                else if (np->driver_data & DEV_HAS_STATISTICS_V1)
5003                        return NV_DEV_STATISTICS_V1_COUNT;
5004                else
5005                        return 0;
5006        default:
5007                return -EOPNOTSUPP;
5008        }
5009}
5010
5011static void nv_get_ethtool_stats(struct net_device *dev,
5012                                 struct ethtool_stats *estats, u64 *buffer)
5013        __acquires(&netdev_priv(dev)->hwstats_lock)
5014        __releases(&netdev_priv(dev)->hwstats_lock)
5015{
5016        struct fe_priv *np = netdev_priv(dev);
5017
5018        spin_lock_bh(&np->hwstats_lock);
5019        nv_update_stats(dev);
5020        memcpy(buffer, &np->estats,
5021               nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(u64));
5022        spin_unlock_bh(&np->hwstats_lock);
5023}
5024
5025static int nv_link_test(struct net_device *dev)
5026{
5027        struct fe_priv *np = netdev_priv(dev);
5028        int mii_status;
5029
5030        mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
5031        mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
5032
5033        /* check phy link status */
5034        if (!(mii_status & BMSR_LSTATUS))
5035                return 0;
5036        else
5037                return 1;
5038}
5039
5040static int nv_register_test(struct net_device *dev)
5041{
5042        u8 __iomem *base = get_hwbase(dev);
5043        int i = 0;
5044        u32 orig_read, new_read;
5045
5046        do {
5047                orig_read = readl(base + nv_registers_test[i].reg);
5048
5049                /* xor with mask to toggle bits */
5050                orig_read ^= nv_registers_test[i].mask;
5051
5052                writel(orig_read, base + nv_registers_test[i].reg);
5053
5054                new_read = readl(base + nv_registers_test[i].reg);
5055
5056                if ((new_read & nv_registers_test[i].mask) != (orig_read & nv_registers_test[i].mask))
5057                        return 0;
5058
5059                /* restore original value */
5060                orig_read ^= nv_registers_test[i].mask;
5061                writel(orig_read, base + nv_registers_test[i].reg);
5062
5063        } while (nv_registers_test[++i].reg != 0);
5064
5065        return 1;
5066}
5067
5068static int nv_interrupt_test(struct net_device *dev)
5069{
5070        struct fe_priv *np = netdev_priv(dev);
5071        u8 __iomem *base = get_hwbase(dev);
5072        int ret = 1;
5073        int testcnt;
5074        u32 save_msi_flags, save_poll_interval = 0;
5075
5076        if (netif_running(dev)) {
5077                /* free current irq */
5078                nv_free_irq(dev);
5079                save_poll_interval = readl(base+NvRegPollingInterval);
5080        }
5081
5082        /* flag to test interrupt handler */
5083        np->intr_test = 0;
5084
5085        /* setup test irq */
5086        save_msi_flags = np->msi_flags;
5087        np->msi_flags &= ~NV_MSI_X_VECTORS_MASK;
5088        np->msi_flags |= 0x001; /* setup 1 vector */
5089        if (nv_request_irq(dev, 1))
5090                return 0;
5091
5092        /* setup timer interrupt */
5093        writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval);
5094        writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
5095
5096        nv_enable_hw_interrupts(dev, NVREG_IRQ_TIMER);
5097
5098        /* wait for at least one interrupt */
5099        msleep(100);
5100
5101        spin_lock_irq(&np->lock);
5102
5103        /* flag should be set within ISR */
5104        testcnt = np->intr_test;
5105        if (!testcnt)
5106                ret = 2;
5107
5108        nv_disable_hw_interrupts(dev, NVREG_IRQ_TIMER);
5109        if (!(np->msi_flags & NV_MSI_X_ENABLED))
5110                writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
5111        else
5112                writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
5113
5114        spin_unlock_irq(&np->lock);
5115
5116        nv_free_irq(dev);
5117
5118        np->msi_flags = save_msi_flags;
5119
5120        if (netif_running(dev)) {
5121                writel(save_poll_interval, base + NvRegPollingInterval);
5122                writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
5123                /* restore original irq */
5124                if (nv_request_irq(dev, 0))
5125                        return 0;
5126        }
5127
5128        return ret;
5129}
5130
5131static int nv_loopback_test(struct net_device *dev)
5132{
5133        struct fe_priv *np = netdev_priv(dev);
5134        u8 __iomem *base = get_hwbase(dev);
5135        struct sk_buff *tx_skb, *rx_skb;
5136        dma_addr_t test_dma_addr;
5137        u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
5138        u32 flags;
5139        int len, i, pkt_len;
5140        u8 *pkt_data;
5141        u32 filter_flags = 0;
5142        u32 misc1_flags = 0;
5143        int ret = 1;
5144
5145        if (netif_running(dev)) {
5146                nv_disable_irq(dev);
5147                filter_flags = readl(base + NvRegPacketFilterFlags);
5148                misc1_flags = readl(base + NvRegMisc1);
5149        } else {
5150                nv_txrx_reset(dev);
5151        }
5152
5153        /* reinit driver view of the rx queue */
5154        set_bufsize(dev);
5155        nv_init_ring(dev);
5156
5157        /* setup hardware for loopback */
5158        writel(NVREG_MISC1_FORCE, base + NvRegMisc1);
5159        writel(NVREG_PFF_ALWAYS | NVREG_PFF_LOOPBACK, base + NvRegPacketFilterFlags);
5160
5161        /* reinit nic view of the rx queue */
5162        writel(np->rx_buf_sz, base + NvRegOffloadConfig);
5163        setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
5164        writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
5165                base + NvRegRingSizes);
5166        pci_push(base);
5167
5168        /* restart rx engine */
5169        nv_start_rxtx(dev);
5170
5171        /* setup packet for tx */
5172        pkt_len = ETH_DATA_LEN;
5173        tx_skb = netdev_alloc_skb(dev, pkt_len);
5174        if (!tx_skb) {
5175                ret = 0;
5176                goto out;
5177        }
5178        test_dma_addr = dma_map_single(&np->pci_dev->dev, tx_skb->data,
5179                                       skb_tailroom(tx_skb),
5180                                       DMA_FROM_DEVICE);
5181        if (unlikely(dma_mapping_error(&np->pci_dev->dev,
5182                                       test_dma_addr))) {
5183                dev_kfree_skb_any(tx_skb);
5184                goto out;
5185        }
5186        pkt_data = skb_put(tx_skb, pkt_len);
5187        for (i = 0; i < pkt_len; i++)
5188                pkt_data[i] = (u8)(i & 0xff);
5189
5190        if (!nv_optimized(np)) {
5191                np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr);
5192                np->tx_ring.orig[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
5193        } else {
5194                np->tx_ring.ex[0].bufhigh = cpu_to_le32(dma_high(test_dma_addr));
5195                np->tx_ring.ex[0].buflow = cpu_to_le32(dma_low(test_dma_addr));
5196                np->tx_ring.ex[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
5197        }
5198        writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
5199        pci_push(get_hwbase(dev));
5200
5201        msleep(500);
5202
5203        /* check for rx of the packet */
5204        if (!nv_optimized(np)) {
5205                flags = le32_to_cpu(np->rx_ring.orig[0].flaglen);
5206                len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver);
5207
5208        } else {
5209                flags = le32_to_cpu(np->rx_ring.ex[0].flaglen);
5210                len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver);
5211        }
5212
5213        if (flags & NV_RX_AVAIL) {
5214                ret = 0;
5215        } else if (np->desc_ver == DESC_VER_1) {
5216                if (flags & NV_RX_ERROR)
5217                        ret = 0;
5218        } else {
5219                if (flags & NV_RX2_ERROR)
5220                        ret = 0;
5221        }
5222
5223        if (ret) {
5224                if (len != pkt_len) {
5225                        ret = 0;
5226                } else {
5227                        rx_skb = np->rx_skb[0].skb;
5228                        for (i = 0; i < pkt_len; i++) {
5229                                if (rx_skb->data[i] != (u8)(i & 0xff)) {
5230                                        ret = 0;
5231                                        break;
5232                                }
5233                        }
5234                }
5235        }
5236
5237        dma_unmap_single(&np->pci_dev->dev, test_dma_addr,
5238                         (skb_end_pointer(tx_skb) - tx_skb->data),
5239                         DMA_TO_DEVICE);
5240        dev_kfree_skb_any(tx_skb);
5241 out:
5242        /* stop engines */
5243        nv_stop_rxtx(dev);
5244        nv_txrx_reset(dev);
5245        /* drain rx queue */
5246        nv_drain_rxtx(dev);
5247
5248        if (netif_running(dev)) {
5249                writel(misc1_flags, base + NvRegMisc1);
5250                writel(filter_flags, base + NvRegPacketFilterFlags);
5251                nv_enable_irq(dev);
5252        }
5253
5254        return ret;
5255}
5256
5257static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 *buffer)
5258{
5259        struct fe_priv *np = netdev_priv(dev);
5260        u8 __iomem *base = get_hwbase(dev);
5261        int result, count;
5262
5263        count = nv_get_sset_count(dev, ETH_SS_TEST);
5264        memset(buffer, 0, count * sizeof(u64));
5265
5266        if (!nv_link_test(dev)) {
5267                test->flags |= ETH_TEST_FL_FAILED;
5268                buffer[0] = 1;
5269        }
5270
5271        if (test->flags & ETH_TEST_FL_OFFLINE) {
5272                if (netif_running(dev)) {
5273                        netif_stop_queue(dev);
5274                        nv_napi_disable(dev);
5275                        netif_tx_lock_bh(dev);
5276                        netif_addr_lock(dev);
5277                        spin_lock_irq(&np->lock);
5278                        nv_disable_hw_interrupts(dev, np->irqmask);
5279                        if (!(np->msi_flags & NV_MSI_X_ENABLED))
5280                                writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
5281                        else
5282                                writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
5283                        /* stop engines */
5284                        nv_stop_rxtx(dev);
5285                        nv_txrx_reset(dev);
5286                        /* drain rx queue */
5287                        nv_drain_rxtx(dev);
5288                        spin_unlock_irq(&np->lock);
5289                        netif_addr_unlock(dev);
5290                        netif_tx_unlock_bh(dev);
5291                }
5292
5293                if (!nv_register_test(dev)) {
5294                        test->flags |= ETH_TEST_FL_FAILED;
5295                        buffer[1] = 1;
5296                }
5297
5298                result = nv_interrupt_test(dev);
5299                if (result != 1) {
5300                        test->flags |= ETH_TEST_FL_FAILED;
5301                        buffer[2] = 1;
5302                }
5303                if (result == 0) {
5304                        /* bail out */
5305                        return;
5306                }
5307
5308                if (count > NV_TEST_COUNT_BASE && !nv_loopback_test(dev)) {
5309                        test->flags |= ETH_TEST_FL_FAILED;
5310                        buffer[3] = 1;
5311                }
5312
5313                if (netif_running(dev)) {
5314                        /* reinit driver view of the rx queue */
5315                        set_bufsize(dev);
5316                        if (nv_init_ring(dev)) {
5317                                if (!np->in_shutdown)
5318                                        mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
5319                        }
5320                        /* reinit nic view of the rx queue */
5321                        writel(np->rx_buf_sz, base + NvRegOffloadConfig);
5322                        setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
5323                        writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
5324                                base + NvRegRingSizes);
5325                        pci_push(base);
5326                        writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
5327                        pci_push(base);
5328                        /* restart rx engine */
5329                        nv_start_rxtx(dev);
5330                        netif_start_queue(dev);
5331                        nv_napi_enable(dev);
5332                        nv_enable_hw_interrupts(dev, np->irqmask);
5333                }
5334        }
5335}
5336
5337static void nv_get_strings(struct net_device *dev, u32 stringset, u8 *buffer)
5338{
5339        switch (stringset) {
5340        case ETH_SS_STATS:
5341                memcpy(buffer, &nv_estats_str, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(struct nv_ethtool_str));
5342                break;
5343        case ETH_SS_TEST:
5344                memcpy(buffer, &nv_etests_str, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(struct nv_ethtool_str));
5345                break;
5346        }
5347}
5348
5349static const struct ethtool_ops ops = {
5350        .get_drvinfo = nv_get_drvinfo,
5351        .get_link = ethtool_op_get_link,
5352        .get_wol = nv_get_wol,
5353        .set_wol = nv_set_wol,
5354        .get_regs_len = nv_get_regs_len,
5355        .get_regs = nv_get_regs,
5356        .nway_reset = nv_nway_reset,
5357        .get_ringparam = nv_get_ringparam,
5358        .set_ringparam = nv_set_ringparam,
5359        .get_pauseparam = nv_get_pauseparam,
5360        .set_pauseparam = nv_set_pauseparam,
5361        .get_strings = nv_get_strings,
5362        .get_ethtool_stats = nv_get_ethtool_stats,
5363        .get_sset_count = nv_get_sset_count,
5364        .self_test = nv_self_test,
5365        .get_ts_info = ethtool_op_get_ts_info,
5366        .get_link_ksettings = nv_get_link_ksettings,
5367        .set_link_ksettings = nv_set_link_ksettings,
5368};
5369
5370/* The mgmt unit and driver use a semaphore to access the phy during init */
5371static int nv_mgmt_acquire_sema(struct net_device *dev)
5372{
5373        struct fe_priv *np = netdev_priv(dev);
5374        u8 __iomem *base = get_hwbase(dev);
5375        int i;
5376        u32 tx_ctrl, mgmt_sema;
5377
5378        for (i = 0; i < 10; i++) {
5379                mgmt_sema = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_SEMA_MASK;
5380                if (mgmt_sema == NVREG_XMITCTL_MGMT_SEMA_FREE)
5381                        break;
5382                msleep(500);
5383        }
5384
5385        if (mgmt_sema != NVREG_XMITCTL_MGMT_SEMA_FREE)
5386                return 0;
5387
5388        for (i = 0; i < 2; i++) {
5389                tx_ctrl = readl(base + NvRegTransmitterControl);
5390                tx_ctrl |= NVREG_XMITCTL_HOST_SEMA_ACQ;
5391                writel(tx_ctrl, base + NvRegTransmitterControl);
5392
5393                /* verify that semaphore was acquired */
5394                tx_ctrl = readl(base + NvRegTransmitterControl);
5395                if (((tx_ctrl & NVREG_XMITCTL_HOST_SEMA_MASK) == NVREG_XMITCTL_HOST_SEMA_ACQ) &&
5396                    ((tx_ctrl & NVREG_XMITCTL_MGMT_SEMA_MASK) == NVREG_XMITCTL_MGMT_SEMA_FREE)) {
5397                        np->mgmt_sema = 1;
5398                        return 1;
5399                } else
5400                        udelay(50);
5401        }
5402
5403        return 0;
5404}
5405
5406static void nv_mgmt_release_sema(struct net_device *dev)
5407{
5408        struct fe_priv *np = netdev_priv(dev);
5409        u8 __iomem *base = get_hwbase(dev);
5410        u32 tx_ctrl;
5411
5412        if (np->driver_data & DEV_HAS_MGMT_UNIT) {
5413                if (np->mgmt_sema) {
5414                        tx_ctrl = readl(base + NvRegTransmitterControl);
5415                        tx_ctrl &= ~NVREG_XMITCTL_HOST_SEMA_ACQ;
5416                        writel(tx_ctrl, base + NvRegTransmitterControl);
5417                }
5418        }
5419}
5420
5421
5422static int nv_mgmt_get_version(struct net_device *dev)
5423{
5424        struct fe_priv *np = netdev_priv(dev);
5425        u8 __iomem *base = get_hwbase(dev);
5426        u32 data_ready = readl(base + NvRegTransmitterControl);
5427        u32 data_ready2 = 0;
5428        unsigned long start;
5429        int ready = 0;
5430
5431        writel(NVREG_MGMTUNITGETVERSION, base + NvRegMgmtUnitGetVersion);
5432        writel(data_ready ^ NVREG_XMITCTL_DATA_START, base + NvRegTransmitterControl);
5433        start = jiffies;
5434        while (time_before(jiffies, start + 5*HZ)) {
5435                data_ready2 = readl(base + NvRegTransmitterControl);
5436                if ((data_ready & NVREG_XMITCTL_DATA_READY) != (data_ready2 & NVREG_XMITCTL_DATA_READY)) {
5437                        ready = 1;
5438                        break;
5439                }
5440                schedule_timeout_uninterruptible(1);
5441        }
5442
5443        if (!ready || (data_ready2 & NVREG_XMITCTL_DATA_ERROR))
5444                return 0;
5445
5446        np->mgmt_version = readl(base + NvRegMgmtUnitVersion) & NVREG_MGMTUNITVERSION;
5447
5448        return 1;
5449}
5450
5451static int nv_open(struct net_device *dev)
5452{
5453        struct fe_priv *np = netdev_priv(dev);
5454        u8 __iomem *base = get_hwbase(dev);
5455        int ret = 1;
5456        int oom, i;
5457        u32 low;
5458
5459        /* power up phy */
5460        mii_rw(dev, np->phyaddr, MII_BMCR,
5461               mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ) & ~BMCR_PDOWN);
5462
5463        nv_txrx_gate(dev, false);
5464        /* erase previous misconfiguration */
5465        if (np->driver_data & DEV_HAS_POWER_CNTRL)
5466                nv_mac_reset(dev);
5467        writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
5468        writel(0, base + NvRegMulticastAddrB);
5469        writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA);
5470        writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB);
5471        writel(0, base + NvRegPacketFilterFlags);
5472
5473        writel(0, base + NvRegTransmitterControl);
5474        writel(0, base + NvRegReceiverControl);
5475
5476        writel(0, base + NvRegAdapterControl);
5477
5478        if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)
5479                writel(NVREG_TX_PAUSEFRAME_DISABLE,  base + NvRegTxPauseFrame);
5480
5481        /* initialize descriptor rings */
5482        set_bufsize(dev);
5483        oom = nv_init_ring(dev);
5484
5485        writel(0, base + NvRegLinkSpeed);
5486        writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
5487        nv_txrx_reset(dev);
5488        writel(0, base + NvRegUnknownSetupReg6);
5489
5490        np->in_shutdown = 0;
5491
5492        /* give hw rings */
5493        setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
5494        writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
5495                base + NvRegRingSizes);
5496
5497        writel(np->linkspeed, base + NvRegLinkSpeed);
5498        if (np->desc_ver == DESC_VER_1)
5499                writel(NVREG_TX_WM_DESC1_DEFAULT, base + NvRegTxWatermark);
5500        else
5501                writel(NVREG_TX_WM_DESC2_3_DEFAULT, base + NvRegTxWatermark);
5502        writel(np->txrxctl_bits, base + NvRegTxRxControl);
5503        writel(np->vlanctl_bits, base + NvRegVlanControl);
5504        pci_push(base);
5505        writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl);
5506        if (reg_delay(dev, NvRegUnknownSetupReg5,
5507                      NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31,
5508                      NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX))
5509                netdev_info(dev,
5510                            "%s: SetupReg5, Bit 31 remained off\n", __func__);
5511
5512        writel(0, base + NvRegMIIMask);
5513        writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
5514        writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
5515
5516        writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1);
5517        writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus);
5518        writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags);
5519        writel(np->rx_buf_sz, base + NvRegOffloadConfig);
5520
5521        writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus);
5522
5523        get_random_bytes(&low, sizeof(low));
5524        low &= NVREG_SLOTTIME_MASK;
5525        if (np->desc_ver == DESC_VER_1) {
5526                writel(low|NVREG_SLOTTIME_DEFAULT, base + NvRegSlotTime);
5527        } else {
5528                if (!(np->driver_data & DEV_HAS_GEAR_MODE)) {
5529                        /* setup legacy backoff */
5530                        writel(NVREG_SLOTTIME_LEGBF_ENABLED|NVREG_SLOTTIME_10_100_FULL|low, base + NvRegSlotTime);
5531                } else {
5532                        writel(NVREG_SLOTTIME_10_100_FULL, base + NvRegSlotTime);
5533                        nv_gear_backoff_reseed(dev);
5534                }
5535        }
5536        writel(NVREG_TX_DEFERRAL_DEFAULT, base + NvRegTxDeferral);
5537        writel(NVREG_RX_DEFERRAL_DEFAULT, base + NvRegRxDeferral);
5538        if (poll_interval == -1) {
5539                if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT)
5540                        writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval);
5541                else
5542                        writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval);
5543        } else
5544                writel(poll_interval & 0xFFFF, base + NvRegPollingInterval);
5545        writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
5546        writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING,
5547                        base + NvRegAdapterControl);
5548        writel(NVREG_MIISPEED_BIT8|NVREG_MIIDELAY, base + NvRegMIISpeed);
5549        writel(NVREG_MII_LINKCHANGE, base + NvRegMIIMask);
5550        if (np->wolenabled)
5551                writel(NVREG_WAKEUPFLAGS_ENABLE , base + NvRegWakeUpFlags);
5552
5553        i = readl(base + NvRegPowerState);
5554        if ((i & NVREG_POWERSTATE_POWEREDUP) == 0)
5555                writel(NVREG_POWERSTATE_POWEREDUP|i, base + NvRegPowerState);
5556
5557        pci_push(base);
5558        udelay(10);
5559        writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState);
5560
5561        nv_disable_hw_interrupts(dev, np->irqmask);
5562        pci_push(base);
5563        writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
5564        writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
5565        pci_push(base);
5566
5567        if (nv_request_irq(dev, 0))
5568                goto out_drain;
5569
5570        /* ask for interrupts */
5571        nv_enable_hw_interrupts(dev, np->irqmask);
5572
5573        spin_lock_irq(&np->lock);
5574        writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
5575        writel(0, base + NvRegMulticastAddrB);
5576        writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA);
5577        writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB);
5578        writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags);
5579        /* One manual link speed update: Interrupts are enabled, future link
5580         * speed changes cause interrupts and are handled by nv_link_irq().
5581         */
5582        readl(base + NvRegMIIStatus);
5583        writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
5584
5585        /* set linkspeed to invalid value, thus force nv_update_linkspeed
5586         * to init hw */
5587        np->linkspeed = 0;
5588        ret = nv_update_linkspeed(dev);
5589        nv_start_rxtx(dev);
5590        netif_start_queue(dev);
5591        nv_napi_enable(dev);
5592
5593        if (ret) {
5594                netif_carrier_on(dev);
5595        } else {
5596                netdev_info(dev, "no link during initialization\n");
5597                netif_carrier_off(dev);
5598        }
5599        if (oom)
5600                mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
5601
5602        /* start statistics timer */
5603        if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3))
5604                mod_timer(&np->stats_poll,
5605                        round_jiffies(jiffies + STATS_INTERVAL));
5606
5607        spin_unlock_irq(&np->lock);
5608
5609        /* If the loopback feature was set while the device was down, make sure
5610         * that it's set correctly now.
5611         */
5612        if (dev->features & NETIF_F_LOOPBACK)
5613                nv_set_loopback(dev, dev->features);
5614
5615        return 0;
5616out_drain:
5617        nv_drain_rxtx(dev);
5618        return ret;
5619}
5620
5621static int nv_close(struct net_device *dev)
5622{
5623        struct fe_priv *np = netdev_priv(dev);
5624        u8 __iomem *base;
5625
5626        spin_lock_irq(&np->lock);
5627        np->in_shutdown = 1;
5628        spin_unlock_irq(&np->lock);
5629        nv_napi_disable(dev);
5630        synchronize_irq(np->pci_dev->irq);
5631
5632        del_timer_sync(&np->oom_kick);
5633        del_timer_sync(&np->nic_poll);
5634        del_timer_sync(&np->stats_poll);
5635
5636        netif_stop_queue(dev);
5637        spin_lock_irq(&np->lock);
5638        nv_update_pause(dev, 0); /* otherwise stop_tx bricks NIC */
5639        nv_stop_rxtx(dev);
5640        nv_txrx_reset(dev);
5641
5642        /* disable interrupts on the nic or we will lock up */
5643        base = get_hwbase(dev);
5644        nv_disable_hw_interrupts(dev, np->irqmask);
5645        pci_push(base);
5646
5647        spin_unlock_irq(&np->lock);
5648
5649        nv_free_irq(dev);
5650
5651        nv_drain_rxtx(dev);
5652
5653        if (np->wolenabled || !phy_power_down) {
5654                nv_txrx_gate(dev, false);
5655                writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags);
5656                nv_start_rx(dev);
5657        } else {
5658                /* power down phy */
5659                mii_rw(dev, np->phyaddr, MII_BMCR,
5660                       mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ)|BMCR_PDOWN);
5661                nv_txrx_gate(dev, true);
5662        }
5663
5664        /* FIXME: power down nic */
5665
5666        return 0;
5667}
5668
5669static const struct net_device_ops nv_netdev_ops = {
5670        .ndo_open               = nv_open,
5671        .ndo_stop               = nv_close,
5672        .ndo_get_stats64        = nv_get_stats64,
5673        .ndo_start_xmit         = nv_start_xmit,
5674        .ndo_tx_timeout         = nv_tx_timeout,
5675        .ndo_change_mtu         = nv_change_mtu,
5676        .ndo_fix_features       = nv_fix_features,
5677        .ndo_set_features       = nv_set_features,
5678        .ndo_validate_addr      = eth_validate_addr,
5679        .ndo_set_mac_address    = nv_set_mac_address,
5680        .ndo_set_rx_mode        = nv_set_multicast,
5681#ifdef CONFIG_NET_POLL_CONTROLLER
5682        .ndo_poll_controller    = nv_poll_controller,
5683#endif
5684};
5685
5686static const struct net_device_ops nv_netdev_ops_optimized = {
5687        .ndo_open               = nv_open,
5688        .ndo_stop               = nv_close,
5689        .ndo_get_stats64        = nv_get_stats64,
5690        .ndo_start_xmit         = nv_start_xmit_optimized,
5691        .ndo_tx_timeout         = nv_tx_timeout,
5692        .ndo_change_mtu         = nv_change_mtu,
5693        .ndo_fix_features       = nv_fix_features,
5694        .ndo_set_features       = nv_set_features,
5695        .ndo_validate_addr      = eth_validate_addr,
5696        .ndo_set_mac_address    = nv_set_mac_address,
5697        .ndo_set_rx_mode        = nv_set_multicast,
5698#ifdef CONFIG_NET_POLL_CONTROLLER
5699        .ndo_poll_controller    = nv_poll_controller,
5700#endif
5701};
5702
5703static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
5704{
5705        struct net_device *dev;
5706        struct fe_priv *np;
5707        unsigned long addr;
5708        u8 __iomem *base;
5709        int err, i;
5710        u32 powerstate, txreg;
5711        u32 phystate_orig = 0, phystate;
5712        int phyinitialized = 0;
5713        static int printed_version;
5714
5715        if (!printed_version++)
5716                pr_info("Reverse Engineered nForce ethernet driver. Version %s.\n",
5717                        FORCEDETH_VERSION);
5718
5719        dev = alloc_etherdev(sizeof(struct fe_priv));
5720        err = -ENOMEM;
5721        if (!dev)
5722                goto out;
5723
5724        np = netdev_priv(dev);
5725        np->dev = dev;
5726        np->pci_dev = pci_dev;
5727        spin_lock_init(&np->lock);
5728        spin_lock_init(&np->hwstats_lock);
5729        SET_NETDEV_DEV(dev, &pci_dev->dev);
5730        u64_stats_init(&np->swstats_rx_syncp);
5731        u64_stats_init(&np->swstats_tx_syncp);
5732        np->txrx_stats = alloc_percpu(struct nv_txrx_stats);
5733        if (!np->txrx_stats) {
5734                pr_err("np->txrx_stats, alloc memory error.\n");
5735                err = -ENOMEM;
5736                goto out_alloc_percpu;
5737        }
5738
5739        timer_setup(&np->oom_kick, nv_do_rx_refill, 0);
5740        timer_setup(&np->nic_poll, nv_do_nic_poll, 0);
5741        timer_setup(&np->stats_poll, nv_do_stats_poll, TIMER_DEFERRABLE);
5742
5743        err = pci_enable_device(pci_dev);
5744        if (err)
5745                goto out_free;
5746
5747        pci_set_master(pci_dev);
5748
5749        err = pci_request_regions(pci_dev, DRV_NAME);
5750        if (err < 0)
5751                goto out_disable;
5752
5753        if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3))
5754                np->register_size = NV_PCI_REGSZ_VER3;
5755        else if (id->driver_data & DEV_HAS_STATISTICS_V1)
5756                np->register_size = NV_PCI_REGSZ_VER2;
5757        else
5758                np->register_size = NV_PCI_REGSZ_VER1;
5759
5760        err = -EINVAL;
5761        addr = 0;
5762        for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
5763                if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM &&
5764                                pci_resource_len(pci_dev, i) >= np->register_size) {
5765                        addr = pci_resource_start(pci_dev, i);
5766                        break;
5767                }
5768        }
5769        if (i == DEVICE_COUNT_RESOURCE) {
5770                dev_info(&pci_dev->dev, "Couldn't find register window\n");
5771                goto out_relreg;
5772        }
5773
5774        /* copy of driver data */
5775        np->driver_data = id->driver_data;
5776        /* copy of device id */
5777        np->device_id = id->device;
5778
5779        /* handle different descriptor versions */
5780        if (id->driver_data & DEV_HAS_HIGH_DMA) {
5781                /* packet format 3: supports 40-bit addressing */
5782                np->desc_ver = DESC_VER_3;
5783                np->txrxctl_bits = NVREG_TXRXCTL_DESC_3;
5784                if (dma_64bit) {
5785                        if (dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(39)))
5786                                dev_info(&pci_dev->dev,
5787                                         "64-bit DMA failed, using 32-bit addressing\n");
5788                        else
5789                                dev->features |= NETIF_F_HIGHDMA;
5790                }
5791        } else if (id->driver_data & DEV_HAS_LARGEDESC) {
5792                /* packet format 2: supports jumbo frames */
5793                np->desc_ver = DESC_VER_2;
5794                np->txrxctl_bits = NVREG_TXRXCTL_DESC_2;
5795        } else {
5796                /* original packet format */
5797                np->desc_ver = DESC_VER_1;
5798                np->txrxctl_bits = NVREG_TXRXCTL_DESC_1;
5799        }
5800
5801        np->pkt_limit = NV_PKTLIMIT_1;
5802        if (id->driver_data & DEV_HAS_LARGEDESC)
5803                np->pkt_limit = NV_PKTLIMIT_2;
5804
5805        if (id->driver_data & DEV_HAS_CHECKSUM) {
5806                np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
5807                dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_SG |
5808                        NETIF_F_TSO | NETIF_F_RXCSUM;
5809        }
5810
5811        np->vlanctl_bits = 0;
5812        if (id->driver_data & DEV_HAS_VLAN) {
5813                np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE;
5814                dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX |
5815                                    NETIF_F_HW_VLAN_CTAG_TX;
5816        }
5817
5818        dev->features |= dev->hw_features;
5819
5820        /* Add loopback capability to the device. */
5821        dev->hw_features |= NETIF_F_LOOPBACK;
5822
5823        /* MTU range: 64 - 1500 or 9100 */
5824        dev->min_mtu = ETH_ZLEN + ETH_FCS_LEN;
5825        dev->max_mtu = np->pkt_limit;
5826
5827        np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG;
5828        if ((id->driver_data & DEV_HAS_PAUSEFRAME_TX_V1) ||
5829            (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) ||
5830            (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V3)) {
5831                np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE | NV_PAUSEFRAME_TX_REQ;
5832        }
5833
5834        err = -ENOMEM;
5835        np->base = ioremap(addr, np->register_size);
5836        if (!np->base)
5837                goto out_relreg;
5838
5839        np->rx_ring_size = RX_RING_DEFAULT;
5840        np->tx_ring_size = TX_RING_DEFAULT;
5841
5842        if (!nv_optimized(np)) {
5843                np->rx_ring.orig = dma_alloc_coherent(&pci_dev->dev,
5844                                                      sizeof(struct ring_desc) *
5845                                                      (np->rx_ring_size +
5846                                                      np->tx_ring_size),
5847                                                      &np->ring_addr,
5848                                                      GFP_KERNEL);
5849                if (!np->rx_ring.orig)
5850                        goto out_unmap;
5851                np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
5852        } else {
5853                np->rx_ring.ex = dma_alloc_coherent(&pci_dev->dev,
5854                                                    sizeof(struct ring_desc_ex) *
5855                                                    (np->rx_ring_size +
5856                                                    np->tx_ring_size),
5857                                                    &np->ring_addr, GFP_KERNEL);
5858                if (!np->rx_ring.ex)
5859                        goto out_unmap;
5860                np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
5861        }
5862        np->rx_skb = kcalloc(np->rx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL);
5863        np->tx_skb = kcalloc(np->tx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL);
5864        if (!np->rx_skb || !np->tx_skb)
5865                goto out_freering;
5866
5867        if (!nv_optimized(np))
5868                dev->netdev_ops = &nv_netdev_ops;
5869        else
5870                dev->netdev_ops = &nv_netdev_ops_optimized;
5871
5872        netif_napi_add(dev, &np->napi, nv_napi_poll, RX_WORK_PER_LOOP);
5873        dev->ethtool_ops = &ops;
5874        dev->watchdog_timeo = NV_WATCHDOG_TIMEO;
5875
5876        pci_set_drvdata(pci_dev, dev);
5877
5878        /* read the mac address */
5879        base = get_hwbase(dev);
5880        np->orig_mac[0] = readl(base + NvRegMacAddrA);
5881        np->orig_mac[1] = readl(base + NvRegMacAddrB);
5882
5883        /* check the workaround bit for correct mac address order */
5884        txreg = readl(base + NvRegTransmitPoll);
5885        if (id->driver_data & DEV_HAS_CORRECT_MACADDR) {
5886                /* mac address is already in correct order */
5887                dev->dev_addr[0] = (np->orig_mac[0] >>  0) & 0xff;
5888                dev->dev_addr[1] = (np->orig_mac[0] >>  8) & 0xff;
5889                dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff;
5890                dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff;
5891                dev->dev_addr[4] = (np->orig_mac[1] >>  0) & 0xff;
5892                dev->dev_addr[5] = (np->orig_mac[1] >>  8) & 0xff;
5893        } else if (txreg & NVREG_TRANSMITPOLL_MAC_ADDR_REV) {
5894                /* mac address is already in correct order */
5895                dev->dev_addr[0] = (np->orig_mac[0] >>  0) & 0xff;
5896                dev->dev_addr[1] = (np->orig_mac[0] >>  8) & 0xff;
5897                dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff;
5898                dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff;
5899                dev->dev_addr[4] = (np->orig_mac[1] >>  0) & 0xff;
5900                dev->dev_addr[5] = (np->orig_mac[1] >>  8) & 0xff;
5901                /*
5902                 * Set orig mac address back to the reversed version.
5903                 * This flag will be cleared during low power transition.
5904                 * Therefore, we should always put back the reversed address.
5905                 */
5906                np->orig_mac[0] = (dev->dev_addr[5] << 0) + (dev->dev_addr[4] << 8) +
5907                        (dev->dev_addr[3] << 16) + (dev->dev_addr[2] << 24);
5908                np->orig_mac[1] = (dev->dev_addr[1] << 0) + (dev->dev_addr[0] << 8);
5909        } else {
5910                /* need to reverse mac address to correct order */
5911                dev->dev_addr[0] = (np->orig_mac[1] >>  8) & 0xff;
5912                dev->dev_addr[1] = (np->orig_mac[1] >>  0) & 0xff;
5913                dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff;
5914                dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff;
5915                dev->dev_addr[4] = (np->orig_mac[0] >>  8) & 0xff;
5916                dev->dev_addr[5] = (np->orig_mac[0] >>  0) & 0xff;
5917                writel(txreg|NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
5918                dev_dbg(&pci_dev->dev,
5919                        "%s: set workaround bit for reversed mac addr\n",
5920                        __func__);
5921        }
5922
5923        if (!is_valid_ether_addr(dev->dev_addr)) {
5924                /*
5925                 * Bad mac address. At least one bios sets the mac address
5926                 * to 01:23:45:67:89:ab
5927                 */
5928                dev_err(&pci_dev->dev,
5929                        "Invalid MAC address detected: %pM - Please complain to your hardware vendor.\n",
5930                        dev->dev_addr);
5931                eth_hw_addr_random(dev);
5932                dev_err(&pci_dev->dev,
5933                        "Using random MAC address: %pM\n", dev->dev_addr);
5934        }
5935
5936        /* set mac address */
5937        nv_copy_mac_to_hw(dev);
5938
5939        /* disable WOL */
5940        writel(0, base + NvRegWakeUpFlags);
5941        np->wolenabled = 0;
5942        device_set_wakeup_enable(&pci_dev->dev, false);
5943
5944        if (id->driver_data & DEV_HAS_POWER_CNTRL) {
5945
5946                /* take phy and nic out of low power mode */
5947                powerstate = readl(base + NvRegPowerState2);
5948                powerstate &= ~NVREG_POWERSTATE2_POWERUP_MASK;
5949                if ((id->driver_data & DEV_NEED_LOW_POWER_FIX) &&
5950                    pci_dev->revision >= 0xA3)
5951                        powerstate |= NVREG_POWERSTATE2_POWERUP_REV_A3;
5952                writel(powerstate, base + NvRegPowerState2);
5953        }
5954
5955        if (np->desc_ver == DESC_VER_1)
5956                np->tx_flags = NV_TX_VALID;
5957        else
5958                np->tx_flags = NV_TX2_VALID;
5959
5960        np->msi_flags = 0;
5961        if ((id->driver_data & DEV_HAS_MSI) && msi)
5962                np->msi_flags |= NV_MSI_CAPABLE;
5963
5964        if ((id->driver_data & DEV_HAS_MSI_X) && msix) {
5965                /* msix has had reported issues when modifying irqmask
5966                   as in the case of napi, therefore, disable for now
5967                */
5968#if 0
5969                np->msi_flags |= NV_MSI_X_CAPABLE;
5970#endif
5971        }
5972
5973        if (optimization_mode == NV_OPTIMIZATION_MODE_CPU) {
5974                np->irqmask = NVREG_IRQMASK_CPU;
5975                if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */
5976                        np->msi_flags |= 0x0001;
5977        } else if (optimization_mode == NV_OPTIMIZATION_MODE_DYNAMIC &&
5978                   !(id->driver_data & DEV_NEED_TIMERIRQ)) {
5979                /* start off in throughput mode */
5980                np->irqmask = NVREG_IRQMASK_THROUGHPUT;
5981                /* remove support for msix mode */
5982                np->msi_flags &= ~NV_MSI_X_CAPABLE;
5983        } else {
5984                optimization_mode = NV_OPTIMIZATION_MODE_THROUGHPUT;
5985                np->irqmask = NVREG_IRQMASK_THROUGHPUT;
5986                if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */
5987                        np->msi_flags |= 0x0003;
5988        }
5989
5990        if (id->driver_data & DEV_NEED_TIMERIRQ)
5991                np->irqmask |= NVREG_IRQ_TIMER;
5992        if (id->driver_data & DEV_NEED_LINKTIMER) {
5993                np->need_linktimer = 1;
5994                np->link_timeout = jiffies + LINK_TIMEOUT;
5995        } else {
5996                np->need_linktimer = 0;
5997        }
5998
5999        /* Limit the number of tx's outstanding for hw bug */
6000        if (id->driver_data & DEV_NEED_TX_LIMIT) {
6001                np->tx_limit = 1;
6002                if (((id->driver_data & DEV_NEED_TX_LIMIT2) == DEV_NEED_TX_LIMIT2) &&
6003                    pci_dev->revision >= 0xA2)
6004                        np->tx_limit = 0;
6005        }
6006
6007        /* clear phy state and temporarily halt phy interrupts */
6008        writel(0, base + NvRegMIIMask);
6009        phystate = readl(base + NvRegAdapterControl);
6010        if (phystate & NVREG_ADAPTCTL_RUNNING) {
6011                phystate_orig = 1;
6012                phystate &= ~NVREG_ADAPTCTL_RUNNING;
6013                writel(phystate, base + NvRegAdapterControl);
6014        }
6015        writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
6016
6017        if (id->driver_data & DEV_HAS_MGMT_UNIT) {
6018                /* management unit running on the mac? */
6019                if ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_ST) &&
6020                    (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_PHY_INIT) &&
6021                    nv_mgmt_acquire_sema(dev) &&
6022                    nv_mgmt_get_version(dev)) {
6023                        np->mac_in_use = 1;
6024                        if (np->mgmt_version > 0)
6025                                np->mac_in_use = readl(base + NvRegMgmtUnitControl) & NVREG_MGMTUNITCONTROL_INUSE;
6026                        /* management unit setup the phy already? */
6027                        if (np->mac_in_use &&
6028                            ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK) ==
6029                             NVREG_XMITCTL_SYNC_PHY_INIT)) {
6030                                /* phy is inited by mgmt unit */
6031                                phyinitialized = 1;
6032                        } else {
6033                                /* we need to init the phy */
6034                        }
6035                }
6036        }
6037
6038        /* find a suitable phy */
6039        for (i = 1; i <= 32; i++) {
6040                int id1, id2;
6041                int phyaddr = i & 0x1F;
6042
6043                spin_lock_irq(&np->lock);
6044                id1 = mii_rw(dev, phyaddr, MII_PHYSID1, MII_READ);
6045                spin_unlock_irq(&np->lock);
6046                if (id1 < 0 || id1 == 0xffff)
6047                        continue;
6048                spin_lock_irq(&np->lock);
6049                id2 = mii_rw(dev, phyaddr, MII_PHYSID2, MII_READ);
6050                spin_unlock_irq(&np->lock);
6051                if (id2 < 0 || id2 == 0xffff)
6052                        continue;
6053
6054                np->phy_model = id2 & PHYID2_MODEL_MASK;
6055                id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT;
6056                id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT;
6057                np->phyaddr = phyaddr;
6058                np->phy_oui = id1 | id2;
6059
6060                /* Realtek hardcoded phy id1 to all zero's on certain phys */
6061                if (np->phy_oui == PHY_OUI_REALTEK2)
6062                        np->phy_oui = PHY_OUI_REALTEK;
6063                /* Setup phy revision for Realtek */
6064                if (np->phy_oui == PHY_OUI_REALTEK && np->phy_model == PHY_MODEL_REALTEK_8211)
6065                        np->phy_rev = mii_rw(dev, phyaddr, MII_RESV1, MII_READ) & PHY_REV_MASK;
6066
6067                break;
6068        }
6069        if (i == 33) {
6070                dev_info(&pci_dev->dev, "open: Could not find a valid PHY\n");
6071                goto out_error;
6072        }
6073
6074        if (!phyinitialized) {
6075                /* reset it */
6076                phy_init(dev);
6077        } else {
6078                /* see if it is a gigabit phy */
6079                u32 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
6080                if (mii_status & PHY_GIGABIT)
6081                        np->gigabit = PHY_GIGABIT;
6082        }
6083
6084        /* set default link speed settings */
6085        np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
6086        np->duplex = 0;
6087        np->autoneg = 1;
6088
6089        err = register_netdev(dev);
6090        if (err) {
6091                dev_info(&pci_dev->dev, "unable to register netdev: %d\n", err);
6092                goto out_error;
6093        }
6094
6095        netif_carrier_off(dev);
6096
6097        /* Some NICs freeze when TX pause is enabled while NIC is
6098         * down, and this stays across warm reboots. The sequence
6099         * below should be enough to recover from that state.
6100         */
6101        nv_update_pause(dev, 0);
6102        nv_start_tx(dev);
6103        nv_stop_tx(dev);
6104
6105        if (id->driver_data & DEV_HAS_VLAN)
6106                nv_vlan_mode(dev, dev->features);
6107
6108        dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n",
6109                 dev->name, np->phy_oui, np->phyaddr, dev->dev_addr);
6110
6111        dev_info(&pci_dev->dev, "%s%s%s%s%s%s%s%s%s%s%sdesc-v%u\n",
6112                 dev->features & NETIF_F_HIGHDMA ? "highdma " : "",
6113                 dev->features & (NETIF_F_IP_CSUM | NETIF_F_SG) ?
6114                        "csum " : "",
6115                 dev->features & (NETIF_F_HW_VLAN_CTAG_RX |
6116                                  NETIF_F_HW_VLAN_CTAG_TX) ?
6117                        "vlan " : "",
6118                 dev->features & (NETIF_F_LOOPBACK) ?
6119                        "loopback " : "",
6120                 id->driver_data & DEV_HAS_POWER_CNTRL ? "pwrctl " : "",
6121                 id->driver_data & DEV_HAS_MGMT_UNIT ? "mgmt " : "",
6122                 id->driver_data & DEV_NEED_TIMERIRQ ? "timirq " : "",
6123                 np->gigabit == PHY_GIGABIT ? "gbit " : "",
6124                 np->need_linktimer ? "lnktim " : "",
6125                 np->msi_flags & NV_MSI_CAPABLE ? "msi " : "",
6126                 np->msi_flags & NV_MSI_X_CAPABLE ? "msi-x " : "",
6127                 np->desc_ver);
6128
6129        return 0;
6130
6131out_error:
6132        if (phystate_orig)
6133                writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl);
6134out_freering:
6135        free_rings(dev);
6136out_unmap:
6137        iounmap(get_hwbase(dev));
6138out_relreg:
6139        pci_release_regions(pci_dev);
6140out_disable:
6141        pci_disable_device(pci_dev);
6142out_free:
6143        free_percpu(np->txrx_stats);
6144out_alloc_percpu:
6145        free_netdev(dev);
6146out:
6147        return err;
6148}
6149
6150static void nv_restore_phy(struct net_device *dev)
6151{
6152        struct fe_priv *np = netdev_priv(dev);
6153        u16 phy_reserved, mii_control;
6154
6155        if (np->phy_oui == PHY_OUI_REALTEK &&
6156            np->phy_model == PHY_MODEL_REALTEK_8201 &&
6157            phy_cross == NV_CROSSOVER_DETECTION_DISABLED) {
6158                mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3);
6159                phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, MII_READ);
6160                phy_reserved &= ~PHY_REALTEK_INIT_MSK1;
6161                phy_reserved |= PHY_REALTEK_INIT8;
6162                mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, phy_reserved);
6163                mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1);
6164
6165                /* restart auto negotiation */
6166                mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
6167                mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
6168                mii_rw(dev, np->phyaddr, MII_BMCR, mii_control);
6169        }
6170}
6171
6172static void nv_restore_mac_addr(struct pci_dev *pci_dev)
6173{
6174        struct net_device *dev = pci_get_drvdata(pci_dev);
6175        struct fe_priv *np = netdev_priv(dev);
6176        u8 __iomem *base = get_hwbase(dev);
6177
6178        /* special op: write back the misordered MAC address - otherwise
6179         * the next nv_probe would see a wrong address.
6180         */
6181        writel(np->orig_mac[0], base + NvRegMacAddrA);
6182        writel(np->orig_mac[1], base + NvRegMacAddrB);
6183        writel(readl(base + NvRegTransmitPoll) & ~NVREG_TRANSMITPOLL_MAC_ADDR_REV,
6184               base + NvRegTransmitPoll);
6185}
6186
6187static void nv_remove(struct pci_dev *pci_dev)
6188{
6189        struct net_device *dev = pci_get_drvdata(pci_dev);
6190        struct fe_priv *np = netdev_priv(dev);
6191
6192        free_percpu(np->txrx_stats);
6193
6194        unregister_netdev(dev);
6195
6196        nv_restore_mac_addr(pci_dev);
6197
6198        /* restore any phy related changes */
6199        nv_restore_phy(dev);
6200
6201        nv_mgmt_release_sema(dev);
6202
6203        /* free all structures */
6204        free_rings(dev);
6205        iounmap(get_hwbase(dev));
6206        pci_release_regions(pci_dev);
6207        pci_disable_device(pci_dev);
6208        free_netdev(dev);
6209}
6210
6211#ifdef CONFIG_PM_SLEEP
6212static int nv_suspend(struct device *device)
6213{
6214        struct net_device *dev = dev_get_drvdata(device);
6215        struct fe_priv *np = netdev_priv(dev);
6216        u8 __iomem *base = get_hwbase(dev);
6217        int i;
6218
6219        if (netif_running(dev)) {
6220                /* Gross. */
6221                nv_close(dev);
6222        }
6223        netif_device_detach(dev);
6224
6225        /* save non-pci configuration space */
6226        for (i = 0; i <= np->register_size/sizeof(u32); i++)
6227                np->saved_config_space[i] = readl(base + i*sizeof(u32));
6228
6229        return 0;
6230}
6231
6232static int nv_resume(struct device *device)
6233{
6234        struct pci_dev *pdev = to_pci_dev(device);
6235        struct net_device *dev = pci_get_drvdata(pdev);
6236        struct fe_priv *np = netdev_priv(dev);
6237        u8 __iomem *base = get_hwbase(dev);
6238        int i, rc = 0;
6239
6240        /* restore non-pci configuration space */
6241        for (i = 0; i <= np->register_size/sizeof(u32); i++)
6242                writel(np->saved_config_space[i], base+i*sizeof(u32));
6243
6244        if (np->driver_data & DEV_NEED_MSI_FIX)
6245                pci_write_config_dword(pdev, NV_MSI_PRIV_OFFSET, NV_MSI_PRIV_VALUE);
6246
6247        /* restore phy state, including autoneg */
6248        phy_init(dev);
6249
6250        netif_device_attach(dev);
6251        if (netif_running(dev)) {
6252                rc = nv_open(dev);
6253                nv_set_multicast(dev);
6254        }
6255        return rc;
6256}
6257
6258static SIMPLE_DEV_PM_OPS(nv_pm_ops, nv_suspend, nv_resume);
6259#define NV_PM_OPS (&nv_pm_ops)
6260
6261#else
6262#define NV_PM_OPS NULL
6263#endif /* CONFIG_PM_SLEEP */
6264
6265#ifdef CONFIG_PM
6266static void nv_shutdown(struct pci_dev *pdev)
6267{
6268        struct net_device *dev = pci_get_drvdata(pdev);
6269        struct fe_priv *np = netdev_priv(dev);
6270
6271        if (netif_running(dev))
6272                nv_close(dev);
6273
6274        /*
6275         * Restore the MAC so a kernel started by kexec won't get confused.
6276         * If we really go for poweroff, we must not restore the MAC,
6277         * otherwise the MAC for WOL will be reversed at least on some boards.
6278         */
6279        if (system_state != SYSTEM_POWER_OFF)
6280                nv_restore_mac_addr(pdev);
6281
6282        pci_disable_device(pdev);
6283        /*
6284         * Apparently it is not possible to reinitialise from D3 hot,
6285         * only put the device into D3 if we really go for poweroff.
6286         */
6287        if (system_state == SYSTEM_POWER_OFF) {
6288                pci_wake_from_d3(pdev, np->wolenabled);
6289                pci_set_power_state(pdev, PCI_D3hot);
6290        }
6291}
6292#else
6293#define nv_shutdown NULL
6294#endif /* CONFIG_PM */
6295
6296static const struct pci_device_id pci_tbl[] = {
6297        {       /* nForce Ethernet Controller */
6298                PCI_DEVICE(0x10DE, 0x01C3),
6299                .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
6300        },
6301        {       /* nForce2 Ethernet Controller */
6302                PCI_DEVICE(0x10DE, 0x0066),
6303                .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
6304        },
6305        {       /* nForce3 Ethernet Controller */
6306                PCI_DEVICE(0x10DE, 0x00D6),
6307                .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
6308        },
6309        {       /* nForce3 Ethernet Controller */
6310                PCI_DEVICE(0x10DE, 0x0086),
6311                .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
6312        },
6313        {       /* nForce3 Ethernet Controller */
6314                PCI_DEVICE(0x10DE, 0x008C),
6315                .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
6316        },
6317        {       /* nForce3 Ethernet Controller */
6318                PCI_DEVICE(0x10DE, 0x00E6),
6319                .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
6320        },
6321        {       /* nForce3 Ethernet Controller */
6322                PCI_DEVICE(0x10DE, 0x00DF),
6323                .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
6324        },
6325        {       /* CK804 Ethernet Controller */
6326                PCI_DEVICE(0x10DE, 0x0056),
6327                .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
6328        },
6329        {       /* CK804 Ethernet Controller */
6330                PCI_DEVICE(0x10DE, 0x0057),
6331                .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
6332        },
6333        {       /* MCP04 Ethernet Controller */
6334                PCI_DEVICE(0x10DE, 0x0037),
6335                .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
6336        },
6337        {       /* MCP04 Ethernet Controller */
6338                PCI_DEVICE(0x10DE, 0x0038),
6339                .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
6340        },
6341        {       /* MCP51 Ethernet Controller */
6342                PCI_DEVICE(0x10DE, 0x0268),
6343                .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1|DEV_NEED_LOW_POWER_FIX,
6344        },
6345        {       /* MCP51 Ethernet Controller */
6346                PCI_DEVICE(0x10DE, 0x0269),
6347                .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1|DEV_NEED_LOW_POWER_FIX,
6348        },
6349        {       /* MCP55 Ethernet Controller */
6350                PCI_DEVICE(0x10DE, 0x0372),
6351                .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT|DEV_NEED_MSI_FIX,
6352        },
6353        {       /* MCP55 Ethernet Controller */
6354                PCI_DEVICE(0x10DE, 0x0373),
6355                .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT|DEV_NEED_MSI_FIX,
6356        },
6357        {       /* MCP61 Ethernet Controller */
6358                PCI_DEVICE(0x10DE, 0x03E5),
6359                .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
6360        },
6361        {       /* MCP61 Ethernet Controller */
6362                PCI_DEVICE(0x10DE, 0x03E6),
6363                .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
6364        },
6365        {       /* MCP61 Ethernet Controller */
6366                PCI_DEVICE(0x10DE, 0x03EE),
6367                .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
6368        },
6369        {       /* MCP61 Ethernet Controller */
6370                PCI_DEVICE(0x10DE, 0x03EF),
6371                .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
6372        },
6373        {       /* MCP65 Ethernet Controller */
6374                PCI_DEVICE(0x10DE, 0x0450),
6375                .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6376        },
6377        {       /* MCP65 Ethernet Controller */
6378                PCI_DEVICE(0x10DE, 0x0451),
6379                .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6380        },
6381        {       /* MCP65 Ethernet Controller */
6382                PCI_DEVICE(0x10DE, 0x0452),
6383                .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6384        },
6385        {       /* MCP65 Ethernet Controller */
6386                PCI_DEVICE(0x10DE, 0x0453),
6387                .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6388        },
6389        {       /* MCP67 Ethernet Controller */
6390                PCI_DEVICE(0x10DE, 0x054C),
6391                .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6392        },
6393        {       /* MCP67 Ethernet Controller */
6394                PCI_DEVICE(0x10DE, 0x054D),
6395                .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6396        },
6397        {       /* MCP67 Ethernet Controller */
6398                PCI_DEVICE(0x10DE, 0x054E),
6399                .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6400        },
6401        {       /* MCP67 Ethernet Controller */
6402                PCI_DEVICE(0x10DE, 0x054F),
6403                .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6404        },
6405        {       /* MCP73 Ethernet Controller */
6406                PCI_DEVICE(0x10DE, 0x07DC),
6407                .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6408        },
6409        {       /* MCP73 Ethernet Controller */
6410                PCI_DEVICE(0x10DE, 0x07DD),
6411                .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6412        },
6413        {       /* MCP73 Ethernet Controller */
6414                PCI_DEVICE(0x10DE, 0x07DE),
6415                .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6416        },
6417        {       /* MCP73 Ethernet Controller */
6418                PCI_DEVICE(0x10DE, 0x07DF),
6419                .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6420        },
6421        {       /* MCP77 Ethernet Controller */
6422                PCI_DEVICE(0x10DE, 0x0760),
6423                .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
6424        },
6425        {       /* MCP77 Ethernet Controller */
6426                PCI_DEVICE(0x10DE, 0x0761),
6427                .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
6428        },
6429        {       /* MCP77 Ethernet Controller */
6430                PCI_DEVICE(0x10DE, 0x0762),
6431                .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
6432        },
6433        {       /* MCP77 Ethernet Controller */
6434                PCI_DEVICE(0x10DE, 0x0763),
6435                .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
6436        },
6437        {       /* MCP79 Ethernet Controller */
6438                PCI_DEVICE(0x10DE, 0x0AB0),
6439                .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
6440        },
6441        {       /* MCP79 Ethernet Controller */
6442                PCI_DEVICE(0x10DE, 0x0AB1),
6443                .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
6444        },
6445        {       /* MCP79 Ethernet Controller */
6446                PCI_DEVICE(0x10DE, 0x0AB2),
6447                .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
6448        },
6449        {       /* MCP79 Ethernet Controller */
6450                PCI_DEVICE(0x10DE, 0x0AB3),
6451                .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
6452        },
6453        {       /* MCP89 Ethernet Controller */
6454                PCI_DEVICE(0x10DE, 0x0D7D),
6455                .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX,
6456        },
6457        {0,},
6458};
6459
6460static struct pci_driver forcedeth_pci_driver = {
6461        .name           = DRV_NAME,
6462        .id_table       = pci_tbl,
6463        .probe          = nv_probe,
6464        .remove         = nv_remove,
6465        .shutdown       = nv_shutdown,
6466        .driver.pm      = NV_PM_OPS,
6467};
6468
6469module_param(max_interrupt_work, int, 0);
6470MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt");
6471module_param(optimization_mode, int, 0);
6472MODULE_PARM_DESC(optimization_mode, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer. In dynamic mode (2), the mode toggles between throughput and CPU mode based on network load.");
6473module_param(poll_interval, int, 0);
6474MODULE_PARM_DESC(poll_interval, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535.");
6475module_param(msi, int, 0);
6476MODULE_PARM_DESC(msi, "MSI interrupts are enabled by setting to 1 and disabled by setting to 0.");
6477module_param(msix, int, 0);
6478MODULE_PARM_DESC(msix, "MSIX interrupts are enabled by setting to 1 and disabled by setting to 0.");
6479module_param(dma_64bit, int, 0);
6480MODULE_PARM_DESC(dma_64bit, "High DMA is enabled by setting to 1 and disabled by setting to 0.");
6481module_param(phy_cross, int, 0);
6482MODULE_PARM_DESC(phy_cross, "Phy crossover detection for Realtek 8201 phy is enabled by setting to 1 and disabled by setting to 0.");
6483module_param(phy_power_down, int, 0);
6484MODULE_PARM_DESC(phy_power_down, "Power down phy and disable link when interface is down (1), or leave phy powered up (0).");
6485module_param(debug_tx_timeout, bool, 0);
6486MODULE_PARM_DESC(debug_tx_timeout,
6487                 "Dump tx related registers and ring when tx_timeout happens");
6488
6489module_pci_driver(forcedeth_pci_driver);
6490MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>");
6491MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver");
6492MODULE_LICENSE("GPL");
6493MODULE_DEVICE_TABLE(pci, pci_tbl);
6494