linux/drivers/net/ethernet/adaptec/starfire.c
<<
>>
Prefs
   1/* starfire.c: Linux device driver for the Adaptec Starfire network adapter. */
   2/*
   3        Written 1998-2000 by Donald Becker.
   4
   5        Current maintainer is Ion Badulescu <ionut ta badula tod org>. Please
   6        send all bug reports to me, and not to Donald Becker, as this code
   7        has been heavily modified from Donald's original version.
   8
   9        This software may be used and distributed according to the terms of
  10        the GNU General Public License (GPL), incorporated herein by reference.
  11        Drivers based on or derived from this code fall under the GPL and must
  12        retain the authorship, copyright and license notice.  This file is not
  13        a complete program and may only be used when the entire operating
  14        system is licensed under the GPL.
  15
  16        The information below comes from Donald Becker's original driver:
  17
  18        The author may be reached as becker@scyld.com, or C/O
  19        Scyld Computing Corporation
  20        410 Severn Ave., Suite 210
  21        Annapolis MD 21403
  22
  23        Support and updates available at
  24        http://www.scyld.com/network/starfire.html
  25        [link no longer provides useful info -jgarzik]
  26
  27*/
  28
  29#define DRV_NAME        "starfire"
  30#define DRV_VERSION     "2.1"
  31#define DRV_RELDATE     "July  6, 2008"
  32
  33#include <linux/interrupt.h>
  34#include <linux/module.h>
  35#include <linux/kernel.h>
  36#include <linux/pci.h>
  37#include <linux/netdevice.h>
  38#include <linux/etherdevice.h>
  39#include <linux/init.h>
  40#include <linux/delay.h>
  41#include <linux/crc32.h>
  42#include <linux/ethtool.h>
  43#include <linux/mii.h>
  44#include <linux/if_vlan.h>
  45#include <linux/mm.h>
  46#include <linux/firmware.h>
  47#include <asm/processor.h>              /* Processor type for cache alignment. */
  48#include <linux/uaccess.h>
  49#include <asm/io.h>
  50
  51/*
  52 * The current frame processor firmware fails to checksum a fragment
  53 * of length 1. If and when this is fixed, the #define below can be removed.
  54 */
  55#define HAS_BROKEN_FIRMWARE
  56
  57/*
  58 * If using the broken firmware, data must be padded to the next 32-bit boundary.
  59 */
  60#ifdef HAS_BROKEN_FIRMWARE
  61#define PADDING_MASK 3
  62#endif
  63
  64/*
  65 * Define this if using the driver with the zero-copy patch
  66 */
  67#define ZEROCOPY
  68
  69#if IS_ENABLED(CONFIG_VLAN_8021Q)
  70#define VLAN_SUPPORT
  71#endif
  72
  73/* The user-configurable values.
  74   These may be modified when a driver module is loaded.*/
  75
  76/* Used for tuning interrupt latency vs. overhead. */
  77static int intr_latency;
  78static int small_frames;
  79
  80static int debug = 1;                   /* 1 normal messages, 0 quiet .. 7 verbose. */
  81static int max_interrupt_work = 20;
  82static int mtu;
  83/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
  84   The Starfire has a 512 element hash table based on the Ethernet CRC. */
  85static const int multicast_filter_limit = 512;
  86/* Whether to do TCP/UDP checksums in hardware */
  87static int enable_hw_cksum = 1;
  88
  89#define PKT_BUF_SZ      1536            /* Size of each temporary Rx buffer.*/
  90/*
  91 * Set the copy breakpoint for the copy-only-tiny-frames scheme.
  92 * Setting to > 1518 effectively disables this feature.
  93 *
  94 * NOTE:
  95 * The ia64 doesn't allow for unaligned loads even of integers being
  96 * misaligned on a 2 byte boundary. Thus always force copying of
  97 * packets as the starfire doesn't allow for misaligned DMAs ;-(
  98 * 23/10/2000 - Jes
  99 *
 100 * The Alpha and the Sparc don't like unaligned loads, either. On Sparc64,
 101 * at least, having unaligned frames leads to a rather serious performance
 102 * penalty. -Ion
 103 */
 104#if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
 105static int rx_copybreak = PKT_BUF_SZ;
 106#else
 107static int rx_copybreak /* = 0 */;
 108#endif
 109
 110/* PCI DMA burst size -- on sparc64 we want to force it to 64 bytes, on the others the default of 128 is fine. */
 111#ifdef __sparc__
 112#define DMA_BURST_SIZE 64
 113#else
 114#define DMA_BURST_SIZE 128
 115#endif
 116
 117/* Operational parameters that are set at compile time. */
 118
 119/* The "native" ring sizes are either 256 or 2048.
 120   However in some modes a descriptor may be marked to wrap the ring earlier.
 121*/
 122#define RX_RING_SIZE    256
 123#define TX_RING_SIZE    32
 124/* The completion queues are fixed at 1024 entries i.e. 4K or 8KB. */
 125#define DONE_Q_SIZE     1024
 126/* All queues must be aligned on a 256-byte boundary */
 127#define QUEUE_ALIGN     256
 128
 129#if RX_RING_SIZE > 256
 130#define RX_Q_ENTRIES Rx2048QEntries
 131#else
 132#define RX_Q_ENTRIES Rx256QEntries
 133#endif
 134
 135/* Operational parameters that usually are not changed. */
 136/* Time in jiffies before concluding the transmitter is hung. */
 137#define TX_TIMEOUT      (2 * HZ)
 138
 139#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 140/* 64-bit dma_addr_t */
 141#define ADDR_64BITS     /* This chip uses 64 bit addresses. */
 142#define netdrv_addr_t __le64
 143#define cpu_to_dma(x) cpu_to_le64(x)
 144#define dma_to_cpu(x) le64_to_cpu(x)
 145#define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
 146#define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
 147#define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
 148#define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
 149#define RX_DESC_ADDR_SIZE RxDescAddr64bit
 150#else  /* 32-bit dma_addr_t */
 151#define netdrv_addr_t __le32
 152#define cpu_to_dma(x) cpu_to_le32(x)
 153#define dma_to_cpu(x) le32_to_cpu(x)
 154#define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
 155#define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
 156#define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
 157#define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
 158#define RX_DESC_ADDR_SIZE RxDescAddr32bit
 159#endif
 160
 161#define skb_first_frag_len(skb) skb_headlen(skb)
 162#define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
 163
 164/* Firmware names */
 165#define FIRMWARE_RX     "adaptec/starfire_rx.bin"
 166#define FIRMWARE_TX     "adaptec/starfire_tx.bin"
 167
 168/* These identify the driver base version and may not be removed. */
 169static const char version[] =
 170KERN_INFO "starfire.c:v1.03 7/26/2000  Written by Donald Becker <becker@scyld.com>\n"
 171" (unofficial 2.2/2.4 kernel port, version " DRV_VERSION ", " DRV_RELDATE ")\n";
 172
 173MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
 174MODULE_DESCRIPTION("Adaptec Starfire Ethernet driver");
 175MODULE_LICENSE("GPL");
 176MODULE_VERSION(DRV_VERSION);
 177MODULE_FIRMWARE(FIRMWARE_RX);
 178MODULE_FIRMWARE(FIRMWARE_TX);
 179
 180module_param(max_interrupt_work, int, 0);
 181module_param(mtu, int, 0);
 182module_param(debug, int, 0);
 183module_param(rx_copybreak, int, 0);
 184module_param(intr_latency, int, 0);
 185module_param(small_frames, int, 0);
 186module_param(enable_hw_cksum, int, 0);
 187MODULE_PARM_DESC(max_interrupt_work, "Maximum events handled per interrupt");
 188MODULE_PARM_DESC(mtu, "MTU (all boards)");
 189MODULE_PARM_DESC(debug, "Debug level (0-6)");
 190MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
 191MODULE_PARM_DESC(intr_latency, "Maximum interrupt latency, in microseconds");
 192MODULE_PARM_DESC(small_frames, "Maximum size of receive frames that bypass interrupt latency (0,64,128,256,512)");
 193MODULE_PARM_DESC(enable_hw_cksum, "Enable/disable hardware cksum support (0/1)");
 194
 195/*
 196                                Theory of Operation
 197
 198I. Board Compatibility
 199
 200This driver is for the Adaptec 6915 "Starfire" 64 bit PCI Ethernet adapter.
 201
 202II. Board-specific settings
 203
 204III. Driver operation
 205
 206IIIa. Ring buffers
 207
 208The Starfire hardware uses multiple fixed-size descriptor queues/rings.  The
 209ring sizes are set fixed by the hardware, but may optionally be wrapped
 210earlier by the END bit in the descriptor.
 211This driver uses that hardware queue size for the Rx ring, where a large
 212number of entries has no ill effect beyond increases the potential backlog.
 213The Tx ring is wrapped with the END bit, since a large hardware Tx queue
 214disables the queue layer priority ordering and we have no mechanism to
 215utilize the hardware two-level priority queue.  When modifying the
 216RX/TX_RING_SIZE pay close attention to page sizes and the ring-empty warning
 217levels.
 218
 219IIIb/c. Transmit/Receive Structure
 220
 221See the Adaptec manual for the many possible structures, and options for
 222each structure.  There are far too many to document all of them here.
 223
 224For transmit this driver uses type 0/1 transmit descriptors (depending
 225on the 32/64 bitness of the architecture), and relies on automatic
 226minimum-length padding.  It does not use the completion queue
 227consumer index, but instead checks for non-zero status entries.
 228
 229For receive this driver uses type 2/3 receive descriptors.  The driver
 230allocates full frame size skbuffs for the Rx ring buffers, so all frames
 231should fit in a single descriptor.  The driver does not use the completion
 232queue consumer index, but instead checks for non-zero status entries.
 233
 234When an incoming frame is less than RX_COPYBREAK bytes long, a fresh skbuff
 235is allocated and the frame is copied to the new skbuff.  When the incoming
 236frame is larger, the skbuff is passed directly up the protocol stack.
 237Buffers consumed this way are replaced by newly allocated skbuffs in a later
 238phase of receive.
 239
 240A notable aspect of operation is that unaligned buffers are not permitted by
 241the Starfire hardware.  Thus the IP header at offset 14 in an ethernet frame
 242isn't longword aligned, which may cause problems on some machine
 243e.g. Alphas and IA64. For these architectures, the driver is forced to copy
 244the frame into a new skbuff unconditionally. Copied frames are put into the
 245skbuff at an offset of "+2", thus 16-byte aligning the IP header.
 246
 247IIId. Synchronization
 248
 249The driver runs as two independent, single-threaded flows of control.  One
 250is the send-packet routine, which enforces single-threaded use by the
 251dev->tbusy flag.  The other thread is the interrupt handler, which is single
 252threaded by the hardware and interrupt handling software.
 253
 254The send packet thread has partial control over the Tx ring and the netif_queue
 255status. If the number of free Tx slots in the ring falls below a certain number
 256(currently hardcoded to 4), it signals the upper layer to stop the queue.
 257
 258The interrupt handler has exclusive control over the Rx ring and records stats
 259from the Tx ring.  After reaping the stats, it marks the Tx queue entry as
 260empty by incrementing the dirty_tx mark. Iff the netif_queue is stopped and the
 261number of free Tx slow is above the threshold, it signals the upper layer to
 262restart the queue.
 263
 264IV. Notes
 265
 266IVb. References
 267
 268The Adaptec Starfire manuals, available only from Adaptec.
 269http://www.scyld.com/expert/100mbps.html
 270http://www.scyld.com/expert/NWay.html
 271
 272IVc. Errata
 273
 274- StopOnPerr is broken, don't enable
 275- Hardware ethernet padding exposes random data, perform software padding
 276  instead (unverified -- works correctly for all the hardware I have)
 277
 278*/
 279
 280
 281
 282enum chip_capability_flags {CanHaveMII=1, };
 283
 284enum chipset {
 285        CH_6915 = 0,
 286};
 287
 288static const struct pci_device_id starfire_pci_tbl[] = {
 289        { PCI_VDEVICE(ADAPTEC, 0x6915), CH_6915 },
 290        { 0, }
 291};
 292MODULE_DEVICE_TABLE(pci, starfire_pci_tbl);
 293
 294/* A chip capabilities table, matching the CH_xxx entries in xxx_pci_tbl[] above. */
 295static const struct chip_info {
 296        const char *name;
 297        int drv_flags;
 298} netdrv_tbl[] = {
 299        { "Adaptec Starfire 6915", CanHaveMII },
 300};
 301
 302
 303/* Offsets to the device registers.
 304   Unlike software-only systems, device drivers interact with complex hardware.
 305   It's not useful to define symbolic names for every register bit in the
 306   device.  The name can only partially document the semantics and make
 307   the driver longer and more difficult to read.
 308   In general, only the important configuration values or bits changed
 309   multiple times should be defined symbolically.
 310*/
 311enum register_offsets {
 312        PCIDeviceConfig=0x50040, GenCtrl=0x50070, IntrTimerCtrl=0x50074,
 313        IntrClear=0x50080, IntrStatus=0x50084, IntrEnable=0x50088,
 314        MIICtrl=0x52000, TxStationAddr=0x50120, EEPROMCtrl=0x51000,
 315        GPIOCtrl=0x5008C, TxDescCtrl=0x50090,
 316        TxRingPtr=0x50098, HiPriTxRingPtr=0x50094, /* Low and High priority. */
 317        TxRingHiAddr=0x5009C,           /* 64 bit address extension. */
 318        TxProducerIdx=0x500A0, TxConsumerIdx=0x500A4,
 319        TxThreshold=0x500B0,
 320        CompletionHiAddr=0x500B4, TxCompletionAddr=0x500B8,
 321        RxCompletionAddr=0x500BC, RxCompletionQ2Addr=0x500C0,
 322        CompletionQConsumerIdx=0x500C4, RxDMACtrl=0x500D0,
 323        RxDescQCtrl=0x500D4, RxDescQHiAddr=0x500DC, RxDescQAddr=0x500E0,
 324        RxDescQIdx=0x500E8, RxDMAStatus=0x500F0, RxFilterMode=0x500F4,
 325        TxMode=0x55000, VlanType=0x55064,
 326        PerfFilterTable=0x56000, HashTable=0x56100,
 327        TxGfpMem=0x58000, RxGfpMem=0x5a000,
 328};
 329
 330/*
 331 * Bits in the interrupt status/mask registers.
 332 * Warning: setting Intr[Ab]NormalSummary in the IntrEnable register
 333 * enables all the interrupt sources that are or'ed into those status bits.
 334 */
 335enum intr_status_bits {
 336        IntrLinkChange=0xf0000000, IntrStatsMax=0x08000000,
 337        IntrAbnormalSummary=0x02000000, IntrGeneralTimer=0x01000000,
 338        IntrSoftware=0x800000, IntrRxComplQ1Low=0x400000,
 339        IntrTxComplQLow=0x200000, IntrPCI=0x100000,
 340        IntrDMAErr=0x080000, IntrTxDataLow=0x040000,
 341        IntrRxComplQ2Low=0x020000, IntrRxDescQ1Low=0x010000,
 342        IntrNormalSummary=0x8000, IntrTxDone=0x4000,
 343        IntrTxDMADone=0x2000, IntrTxEmpty=0x1000,
 344        IntrEarlyRxQ2=0x0800, IntrEarlyRxQ1=0x0400,
 345        IntrRxQ2Done=0x0200, IntrRxQ1Done=0x0100,
 346        IntrRxGFPDead=0x80, IntrRxDescQ2Low=0x40,
 347        IntrNoTxCsum=0x20, IntrTxBadID=0x10,
 348        IntrHiPriTxBadID=0x08, IntrRxGfp=0x04,
 349        IntrTxGfp=0x02, IntrPCIPad=0x01,
 350        /* not quite bits */
 351        IntrRxDone=IntrRxQ2Done | IntrRxQ1Done,
 352        IntrRxEmpty=IntrRxDescQ1Low | IntrRxDescQ2Low,
 353        IntrNormalMask=0xff00, IntrAbnormalMask=0x3ff00fe,
 354};
 355
 356/* Bits in the RxFilterMode register. */
 357enum rx_mode_bits {
 358        AcceptBroadcast=0x04, AcceptAllMulticast=0x02, AcceptAll=0x01,
 359        AcceptMulticast=0x10, PerfectFilter=0x40, HashFilter=0x30,
 360        PerfectFilterVlan=0x80, MinVLANPrio=0xE000, VlanMode=0x0200,
 361        WakeupOnGFP=0x0800,
 362};
 363
 364/* Bits in the TxMode register */
 365enum tx_mode_bits {
 366        MiiSoftReset=0x8000, MIILoopback=0x4000,
 367        TxFlowEnable=0x0800, RxFlowEnable=0x0400,
 368        PadEnable=0x04, FullDuplex=0x02, HugeFrame=0x01,
 369};
 370
 371/* Bits in the TxDescCtrl register. */
 372enum tx_ctrl_bits {
 373        TxDescSpaceUnlim=0x00, TxDescSpace32=0x10, TxDescSpace64=0x20,
 374        TxDescSpace128=0x30, TxDescSpace256=0x40,
 375        TxDescType0=0x00, TxDescType1=0x01, TxDescType2=0x02,
 376        TxDescType3=0x03, TxDescType4=0x04,
 377        TxNoDMACompletion=0x08,
 378        TxDescQAddr64bit=0x80, TxDescQAddr32bit=0,
 379        TxHiPriFIFOThreshShift=24, TxPadLenShift=16,
 380        TxDMABurstSizeShift=8,
 381};
 382
 383/* Bits in the RxDescQCtrl register. */
 384enum rx_ctrl_bits {
 385        RxBufferLenShift=16, RxMinDescrThreshShift=0,
 386        RxPrefetchMode=0x8000, RxVariableQ=0x2000,
 387        Rx2048QEntries=0x4000, Rx256QEntries=0,
 388        RxDescAddr64bit=0x1000, RxDescAddr32bit=0,
 389        RxDescQAddr64bit=0x0100, RxDescQAddr32bit=0,
 390        RxDescSpace4=0x000, RxDescSpace8=0x100,
 391        RxDescSpace16=0x200, RxDescSpace32=0x300,
 392        RxDescSpace64=0x400, RxDescSpace128=0x500,
 393        RxConsumerWrEn=0x80,
 394};
 395
 396/* Bits in the RxDMACtrl register. */
 397enum rx_dmactrl_bits {
 398        RxReportBadFrames=0x80000000, RxDMAShortFrames=0x40000000,
 399        RxDMABadFrames=0x20000000, RxDMACrcErrorFrames=0x10000000,
 400        RxDMAControlFrame=0x08000000, RxDMAPauseFrame=0x04000000,
 401        RxChecksumIgnore=0, RxChecksumRejectTCPUDP=0x02000000,
 402        RxChecksumRejectTCPOnly=0x01000000,
 403        RxCompletionQ2Enable=0x800000,
 404        RxDMAQ2Disable=0, RxDMAQ2FPOnly=0x100000,
 405        RxDMAQ2SmallPkt=0x200000, RxDMAQ2HighPrio=0x300000,
 406        RxDMAQ2NonIP=0x400000,
 407        RxUseBackupQueue=0x080000, RxDMACRC=0x040000,
 408        RxEarlyIntThreshShift=12, RxHighPrioThreshShift=8,
 409        RxBurstSizeShift=0,
 410};
 411
 412/* Bits in the RxCompletionAddr register */
 413enum rx_compl_bits {
 414        RxComplQAddr64bit=0x80, RxComplQAddr32bit=0,
 415        RxComplProducerWrEn=0x40,
 416        RxComplType0=0x00, RxComplType1=0x10,
 417        RxComplType2=0x20, RxComplType3=0x30,
 418        RxComplThreshShift=0,
 419};
 420
 421/* Bits in the TxCompletionAddr register */
 422enum tx_compl_bits {
 423        TxComplQAddr64bit=0x80, TxComplQAddr32bit=0,
 424        TxComplProducerWrEn=0x40,
 425        TxComplIntrStatus=0x20,
 426        CommonQueueMode=0x10,
 427        TxComplThreshShift=0,
 428};
 429
 430/* Bits in the GenCtrl register */
 431enum gen_ctrl_bits {
 432        RxEnable=0x05, TxEnable=0x0a,
 433        RxGFPEnable=0x10, TxGFPEnable=0x20,
 434};
 435
 436/* Bits in the IntrTimerCtrl register */
 437enum intr_ctrl_bits {
 438        Timer10X=0x800, EnableIntrMasking=0x60, SmallFrameBypass=0x100,
 439        SmallFrame64=0, SmallFrame128=0x200, SmallFrame256=0x400, SmallFrame512=0x600,
 440        IntrLatencyMask=0x1f,
 441};
 442
 443/* The Rx and Tx buffer descriptors. */
 444struct starfire_rx_desc {
 445        netdrv_addr_t rxaddr;
 446};
 447enum rx_desc_bits {
 448        RxDescValid=1, RxDescEndRing=2,
 449};
 450
 451/* Completion queue entry. */
 452struct short_rx_done_desc {
 453        __le32 status;                  /* Low 16 bits is length. */
 454};
 455struct basic_rx_done_desc {
 456        __le32 status;                  /* Low 16 bits is length. */
 457        __le16 vlanid;
 458        __le16 status2;
 459};
 460struct csum_rx_done_desc {
 461        __le32 status;                  /* Low 16 bits is length. */
 462        __le16 csum;                    /* Partial checksum */
 463        __le16 status2;
 464};
 465struct full_rx_done_desc {
 466        __le32 status;                  /* Low 16 bits is length. */
 467        __le16 status3;
 468        __le16 status2;
 469        __le16 vlanid;
 470        __le16 csum;                    /* partial checksum */
 471        __le32 timestamp;
 472};
 473/* XXX: this is ugly and I'm not sure it's worth the trouble -Ion */
 474#ifdef VLAN_SUPPORT
 475typedef struct full_rx_done_desc rx_done_desc;
 476#define RxComplType RxComplType3
 477#else  /* not VLAN_SUPPORT */
 478typedef struct csum_rx_done_desc rx_done_desc;
 479#define RxComplType RxComplType2
 480#endif /* not VLAN_SUPPORT */
 481
 482enum rx_done_bits {
 483        RxOK=0x20000000, RxFIFOErr=0x10000000, RxBufQ2=0x08000000,
 484};
 485
 486/* Type 1 Tx descriptor. */
 487struct starfire_tx_desc_1 {
 488        __le32 status;                  /* Upper bits are status, lower 16 length. */
 489        __le32 addr;
 490};
 491
 492/* Type 2 Tx descriptor. */
 493struct starfire_tx_desc_2 {
 494        __le32 status;                  /* Upper bits are status, lower 16 length. */
 495        __le32 reserved;
 496        __le64 addr;
 497};
 498
 499#ifdef ADDR_64BITS
 500typedef struct starfire_tx_desc_2 starfire_tx_desc;
 501#define TX_DESC_TYPE TxDescType2
 502#else  /* not ADDR_64BITS */
 503typedef struct starfire_tx_desc_1 starfire_tx_desc;
 504#define TX_DESC_TYPE TxDescType1
 505#endif /* not ADDR_64BITS */
 506#define TX_DESC_SPACING TxDescSpaceUnlim
 507
 508enum tx_desc_bits {
 509        TxDescID=0xB0000000,
 510        TxCRCEn=0x01000000, TxDescIntr=0x08000000,
 511        TxRingWrap=0x04000000, TxCalTCP=0x02000000,
 512};
 513struct tx_done_desc {
 514        __le32 status;                  /* timestamp, index. */
 515#if 0
 516        __le32 intrstatus;              /* interrupt status */
 517#endif
 518};
 519
 520struct rx_ring_info {
 521        struct sk_buff *skb;
 522        dma_addr_t mapping;
 523};
 524struct tx_ring_info {
 525        struct sk_buff *skb;
 526        dma_addr_t mapping;
 527        unsigned int used_slots;
 528};
 529
 530#define PHY_CNT         2
 531struct netdev_private {
 532        /* Descriptor rings first for alignment. */
 533        struct starfire_rx_desc *rx_ring;
 534        starfire_tx_desc *tx_ring;
 535        dma_addr_t rx_ring_dma;
 536        dma_addr_t tx_ring_dma;
 537        /* The addresses of rx/tx-in-place skbuffs. */
 538        struct rx_ring_info rx_info[RX_RING_SIZE];
 539        struct tx_ring_info tx_info[TX_RING_SIZE];
 540        /* Pointers to completion queues (full pages). */
 541        rx_done_desc *rx_done_q;
 542        dma_addr_t rx_done_q_dma;
 543        unsigned int rx_done;
 544        struct tx_done_desc *tx_done_q;
 545        dma_addr_t tx_done_q_dma;
 546        unsigned int tx_done;
 547        struct napi_struct napi;
 548        struct net_device *dev;
 549        struct pci_dev *pci_dev;
 550#ifdef VLAN_SUPPORT
 551        unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
 552#endif
 553        void *queue_mem;
 554        dma_addr_t queue_mem_dma;
 555        size_t queue_mem_size;
 556
 557        /* Frequently used values: keep some adjacent for cache effect. */
 558        spinlock_t lock;
 559        unsigned int cur_rx, dirty_rx;  /* Producer/consumer ring indices */
 560        unsigned int cur_tx, dirty_tx, reap_tx;
 561        unsigned int rx_buf_sz;         /* Based on MTU+slack. */
 562        /* These values keep track of the transceiver/media in use. */
 563        int speed100;                   /* Set if speed == 100MBit. */
 564        u32 tx_mode;
 565        u32 intr_timer_ctrl;
 566        u8 tx_threshold;
 567        /* MII transceiver section. */
 568        struct mii_if_info mii_if;              /* MII lib hooks/info */
 569        int phy_cnt;                    /* MII device addresses. */
 570        unsigned char phys[PHY_CNT];    /* MII device addresses. */
 571        void __iomem *base;
 572};
 573
 574
 575static int      mdio_read(struct net_device *dev, int phy_id, int location);
 576static void     mdio_write(struct net_device *dev, int phy_id, int location, int value);
 577static int      netdev_open(struct net_device *dev);
 578static void     check_duplex(struct net_device *dev);
 579static void     tx_timeout(struct net_device *dev);
 580static void     init_ring(struct net_device *dev);
 581static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
 582static irqreturn_t intr_handler(int irq, void *dev_instance);
 583static void     netdev_error(struct net_device *dev, int intr_status);
 584static int      __netdev_rx(struct net_device *dev, int *quota);
 585static int      netdev_poll(struct napi_struct *napi, int budget);
 586static void     refill_rx_ring(struct net_device *dev);
 587static void     netdev_error(struct net_device *dev, int intr_status);
 588static void     set_rx_mode(struct net_device *dev);
 589static struct net_device_stats *get_stats(struct net_device *dev);
 590static int      netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 591static int      netdev_close(struct net_device *dev);
 592static void     netdev_media_change(struct net_device *dev);
 593static const struct ethtool_ops ethtool_ops;
 594
 595
 596#ifdef VLAN_SUPPORT
 597static int netdev_vlan_rx_add_vid(struct net_device *dev,
 598                                  __be16 proto, u16 vid)
 599{
 600        struct netdev_private *np = netdev_priv(dev);
 601
 602        spin_lock(&np->lock);
 603        if (debug > 1)
 604                printk("%s: Adding vlanid %d to vlan filter\n", dev->name, vid);
 605        set_bit(vid, np->active_vlans);
 606        set_rx_mode(dev);
 607        spin_unlock(&np->lock);
 608
 609        return 0;
 610}
 611
 612static int netdev_vlan_rx_kill_vid(struct net_device *dev,
 613                                   __be16 proto, u16 vid)
 614{
 615        struct netdev_private *np = netdev_priv(dev);
 616
 617        spin_lock(&np->lock);
 618        if (debug > 1)
 619                printk("%s: removing vlanid %d from vlan filter\n", dev->name, vid);
 620        clear_bit(vid, np->active_vlans);
 621        set_rx_mode(dev);
 622        spin_unlock(&np->lock);
 623
 624        return 0;
 625}
 626#endif /* VLAN_SUPPORT */
 627
 628
 629static const struct net_device_ops netdev_ops = {
 630        .ndo_open               = netdev_open,
 631        .ndo_stop               = netdev_close,
 632        .ndo_start_xmit         = start_tx,
 633        .ndo_tx_timeout         = tx_timeout,
 634        .ndo_get_stats          = get_stats,
 635        .ndo_set_rx_mode        = set_rx_mode,
 636        .ndo_do_ioctl           = netdev_ioctl,
 637        .ndo_set_mac_address    = eth_mac_addr,
 638        .ndo_validate_addr      = eth_validate_addr,
 639#ifdef VLAN_SUPPORT
 640        .ndo_vlan_rx_add_vid    = netdev_vlan_rx_add_vid,
 641        .ndo_vlan_rx_kill_vid   = netdev_vlan_rx_kill_vid,
 642#endif
 643};
 644
 645static int starfire_init_one(struct pci_dev *pdev,
 646                             const struct pci_device_id *ent)
 647{
 648        struct device *d = &pdev->dev;
 649        struct netdev_private *np;
 650        int i, irq, chip_idx = ent->driver_data;
 651        struct net_device *dev;
 652        long ioaddr;
 653        void __iomem *base;
 654        int drv_flags, io_size;
 655        int boguscnt;
 656
 657/* when built into the kernel, we only print version if device is found */
 658#ifndef MODULE
 659        static int printed_version;
 660        if (!printed_version++)
 661                printk(version);
 662#endif
 663
 664        if (pci_enable_device (pdev))
 665                return -EIO;
 666
 667        ioaddr = pci_resource_start(pdev, 0);
 668        io_size = pci_resource_len(pdev, 0);
 669        if (!ioaddr || ((pci_resource_flags(pdev, 0) & IORESOURCE_MEM) == 0)) {
 670                dev_err(d, "no PCI MEM resources, aborting\n");
 671                return -ENODEV;
 672        }
 673
 674        dev = alloc_etherdev(sizeof(*np));
 675        if (!dev)
 676                return -ENOMEM;
 677
 678        SET_NETDEV_DEV(dev, &pdev->dev);
 679
 680        irq = pdev->irq;
 681
 682        if (pci_request_regions (pdev, DRV_NAME)) {
 683                dev_err(d, "cannot reserve PCI resources, aborting\n");
 684                goto err_out_free_netdev;
 685        }
 686
 687        base = ioremap(ioaddr, io_size);
 688        if (!base) {
 689                dev_err(d, "cannot remap %#x @ %#lx, aborting\n",
 690                        io_size, ioaddr);
 691                goto err_out_free_res;
 692        }
 693
 694        pci_set_master(pdev);
 695
 696        /* enable MWI -- it vastly improves Rx performance on sparc64 */
 697        pci_try_set_mwi(pdev);
 698
 699#ifdef ZEROCOPY
 700        /* Starfire can do TCP/UDP checksumming */
 701        if (enable_hw_cksum)
 702                dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
 703#endif /* ZEROCOPY */
 704
 705#ifdef VLAN_SUPPORT
 706        dev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
 707#endif /* VLAN_RX_KILL_VID */
 708#ifdef ADDR_64BITS
 709        dev->features |= NETIF_F_HIGHDMA;
 710#endif /* ADDR_64BITS */
 711
 712        /* Serial EEPROM reads are hidden by the hardware. */
 713        for (i = 0; i < 6; i++)
 714                dev->dev_addr[i] = readb(base + EEPROMCtrl + 20 - i);
 715
 716#if ! defined(final_version) /* Dump the EEPROM contents during development. */
 717        if (debug > 4)
 718                for (i = 0; i < 0x20; i++)
 719                        printk("%2.2x%s",
 720                               (unsigned int)readb(base + EEPROMCtrl + i),
 721                               i % 16 != 15 ? " " : "\n");
 722#endif
 723
 724        /* Issue soft reset */
 725        writel(MiiSoftReset, base + TxMode);
 726        udelay(1000);
 727        writel(0, base + TxMode);
 728
 729        /* Reset the chip to erase previous misconfiguration. */
 730        writel(1, base + PCIDeviceConfig);
 731        boguscnt = 1000;
 732        while (--boguscnt > 0) {
 733                udelay(10);
 734                if ((readl(base + PCIDeviceConfig) & 1) == 0)
 735                        break;
 736        }
 737        if (boguscnt == 0)
 738                printk("%s: chipset reset never completed!\n", dev->name);
 739        /* wait a little longer */
 740        udelay(1000);
 741
 742        np = netdev_priv(dev);
 743        np->dev = dev;
 744        np->base = base;
 745        spin_lock_init(&np->lock);
 746        pci_set_drvdata(pdev, dev);
 747
 748        np->pci_dev = pdev;
 749
 750        np->mii_if.dev = dev;
 751        np->mii_if.mdio_read = mdio_read;
 752        np->mii_if.mdio_write = mdio_write;
 753        np->mii_if.phy_id_mask = 0x1f;
 754        np->mii_if.reg_num_mask = 0x1f;
 755
 756        drv_flags = netdrv_tbl[chip_idx].drv_flags;
 757
 758        np->speed100 = 1;
 759
 760        /* timer resolution is 128 * 0.8us */
 761        np->intr_timer_ctrl = (((intr_latency * 10) / 1024) & IntrLatencyMask) |
 762                Timer10X | EnableIntrMasking;
 763
 764        if (small_frames > 0) {
 765                np->intr_timer_ctrl |= SmallFrameBypass;
 766                switch (small_frames) {
 767                case 1 ... 64:
 768                        np->intr_timer_ctrl |= SmallFrame64;
 769                        break;
 770                case 65 ... 128:
 771                        np->intr_timer_ctrl |= SmallFrame128;
 772                        break;
 773                case 129 ... 256:
 774                        np->intr_timer_ctrl |= SmallFrame256;
 775                        break;
 776                default:
 777                        np->intr_timer_ctrl |= SmallFrame512;
 778                        if (small_frames > 512)
 779                                printk("Adjusting small_frames down to 512\n");
 780                        break;
 781                }
 782        }
 783
 784        dev->netdev_ops = &netdev_ops;
 785        dev->watchdog_timeo = TX_TIMEOUT;
 786        dev->ethtool_ops = &ethtool_ops;
 787
 788        netif_napi_add(dev, &np->napi, netdev_poll, max_interrupt_work);
 789
 790        if (mtu)
 791                dev->mtu = mtu;
 792
 793        if (register_netdev(dev))
 794                goto err_out_cleardev;
 795
 796        printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n",
 797               dev->name, netdrv_tbl[chip_idx].name, base,
 798               dev->dev_addr, irq);
 799
 800        if (drv_flags & CanHaveMII) {
 801                int phy, phy_idx = 0;
 802                int mii_status;
 803                for (phy = 0; phy < 32 && phy_idx < PHY_CNT; phy++) {
 804                        mdio_write(dev, phy, MII_BMCR, BMCR_RESET);
 805                        msleep(100);
 806                        boguscnt = 1000;
 807                        while (--boguscnt > 0)
 808                                if ((mdio_read(dev, phy, MII_BMCR) & BMCR_RESET) == 0)
 809                                        break;
 810                        if (boguscnt == 0) {
 811                                printk("%s: PHY#%d reset never completed!\n", dev->name, phy);
 812                                continue;
 813                        }
 814                        mii_status = mdio_read(dev, phy, MII_BMSR);
 815                        if (mii_status != 0) {
 816                                np->phys[phy_idx++] = phy;
 817                                np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE);
 818                                printk(KERN_INFO "%s: MII PHY found at address %d, status "
 819                                           "%#4.4x advertising %#4.4x.\n",
 820                                           dev->name, phy, mii_status, np->mii_if.advertising);
 821                                /* there can be only one PHY on-board */
 822                                break;
 823                        }
 824                }
 825                np->phy_cnt = phy_idx;
 826                if (np->phy_cnt > 0)
 827                        np->mii_if.phy_id = np->phys[0];
 828                else
 829                        memset(&np->mii_if, 0, sizeof(np->mii_if));
 830        }
 831
 832        printk(KERN_INFO "%s: scatter-gather and hardware TCP cksumming %s.\n",
 833               dev->name, enable_hw_cksum ? "enabled" : "disabled");
 834        return 0;
 835
 836err_out_cleardev:
 837        iounmap(base);
 838err_out_free_res:
 839        pci_release_regions (pdev);
 840err_out_free_netdev:
 841        free_netdev(dev);
 842        return -ENODEV;
 843}
 844
 845
 846/* Read the MII Management Data I/O (MDIO) interfaces. */
 847static int mdio_read(struct net_device *dev, int phy_id, int location)
 848{
 849        struct netdev_private *np = netdev_priv(dev);
 850        void __iomem *mdio_addr = np->base + MIICtrl + (phy_id<<7) + (location<<2);
 851        int result, boguscnt=1000;
 852        /* ??? Should we add a busy-wait here? */
 853        do {
 854                result = readl(mdio_addr);
 855        } while ((result & 0xC0000000) != 0x80000000 && --boguscnt > 0);
 856        if (boguscnt == 0)
 857                return 0;
 858        if ((result & 0xffff) == 0xffff)
 859                return 0;
 860        return result & 0xffff;
 861}
 862
 863
 864static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
 865{
 866        struct netdev_private *np = netdev_priv(dev);
 867        void __iomem *mdio_addr = np->base + MIICtrl + (phy_id<<7) + (location<<2);
 868        writel(value, mdio_addr);
 869        /* The busy-wait will occur before a read. */
 870}
 871
 872
 873static int netdev_open(struct net_device *dev)
 874{
 875        const struct firmware *fw_rx, *fw_tx;
 876        const __be32 *fw_rx_data, *fw_tx_data;
 877        struct netdev_private *np = netdev_priv(dev);
 878        void __iomem *ioaddr = np->base;
 879        const int irq = np->pci_dev->irq;
 880        int i, retval;
 881        size_t tx_size, rx_size;
 882        size_t tx_done_q_size, rx_done_q_size, tx_ring_size, rx_ring_size;
 883
 884        /* Do we ever need to reset the chip??? */
 885
 886        retval = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
 887        if (retval)
 888                return retval;
 889
 890        /* Disable the Rx and Tx, and reset the chip. */
 891        writel(0, ioaddr + GenCtrl);
 892        writel(1, ioaddr + PCIDeviceConfig);
 893        if (debug > 1)
 894                printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
 895                       dev->name, irq);
 896
 897        /* Allocate the various queues. */
 898        if (!np->queue_mem) {
 899                tx_done_q_size = ((sizeof(struct tx_done_desc) * DONE_Q_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN;
 900                rx_done_q_size = ((sizeof(rx_done_desc) * DONE_Q_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN;
 901                tx_ring_size = ((sizeof(starfire_tx_desc) * TX_RING_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN;
 902                rx_ring_size = sizeof(struct starfire_rx_desc) * RX_RING_SIZE;
 903                np->queue_mem_size = tx_done_q_size + rx_done_q_size + tx_ring_size + rx_ring_size;
 904                np->queue_mem = pci_alloc_consistent(np->pci_dev, np->queue_mem_size, &np->queue_mem_dma);
 905                if (np->queue_mem == NULL) {
 906                        free_irq(irq, dev);
 907                        return -ENOMEM;
 908                }
 909
 910                np->tx_done_q     = np->queue_mem;
 911                np->tx_done_q_dma = np->queue_mem_dma;
 912                np->rx_done_q     = (void *) np->tx_done_q + tx_done_q_size;
 913                np->rx_done_q_dma = np->tx_done_q_dma + tx_done_q_size;
 914                np->tx_ring       = (void *) np->rx_done_q + rx_done_q_size;
 915                np->tx_ring_dma   = np->rx_done_q_dma + rx_done_q_size;
 916                np->rx_ring       = (void *) np->tx_ring + tx_ring_size;
 917                np->rx_ring_dma   = np->tx_ring_dma + tx_ring_size;
 918        }
 919
 920        /* Start with no carrier, it gets adjusted later */
 921        netif_carrier_off(dev);
 922        init_ring(dev);
 923        /* Set the size of the Rx buffers. */
 924        writel((np->rx_buf_sz << RxBufferLenShift) |
 925               (0 << RxMinDescrThreshShift) |
 926               RxPrefetchMode | RxVariableQ |
 927               RX_Q_ENTRIES |
 928               RX_DESC_Q_ADDR_SIZE | RX_DESC_ADDR_SIZE |
 929               RxDescSpace4,
 930               ioaddr + RxDescQCtrl);
 931
 932        /* Set up the Rx DMA controller. */
 933        writel(RxChecksumIgnore |
 934               (0 << RxEarlyIntThreshShift) |
 935               (6 << RxHighPrioThreshShift) |
 936               ((DMA_BURST_SIZE / 32) << RxBurstSizeShift),
 937               ioaddr + RxDMACtrl);
 938
 939        /* Set Tx descriptor */
 940        writel((2 << TxHiPriFIFOThreshShift) |
 941               (0 << TxPadLenShift) |
 942               ((DMA_BURST_SIZE / 32) << TxDMABurstSizeShift) |
 943               TX_DESC_Q_ADDR_SIZE |
 944               TX_DESC_SPACING | TX_DESC_TYPE,
 945               ioaddr + TxDescCtrl);
 946
 947        writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + RxDescQHiAddr);
 948        writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + TxRingHiAddr);
 949        writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + CompletionHiAddr);
 950        writel(np->rx_ring_dma, ioaddr + RxDescQAddr);
 951        writel(np->tx_ring_dma, ioaddr + TxRingPtr);
 952
 953        writel(np->tx_done_q_dma, ioaddr + TxCompletionAddr);
 954        writel(np->rx_done_q_dma |
 955               RxComplType |
 956               (0 << RxComplThreshShift),
 957               ioaddr + RxCompletionAddr);
 958
 959        if (debug > 1)
 960                printk(KERN_DEBUG "%s: Filling in the station address.\n", dev->name);
 961
 962        /* Fill both the Tx SA register and the Rx perfect filter. */
 963        for (i = 0; i < 6; i++)
 964                writeb(dev->dev_addr[i], ioaddr + TxStationAddr + 5 - i);
 965        /* The first entry is special because it bypasses the VLAN filter.
 966           Don't use it. */
 967        writew(0, ioaddr + PerfFilterTable);
 968        writew(0, ioaddr + PerfFilterTable + 4);
 969        writew(0, ioaddr + PerfFilterTable + 8);
 970        for (i = 1; i < 16; i++) {
 971                __be16 *eaddrs = (__be16 *)dev->dev_addr;
 972                void __iomem *setup_frm = ioaddr + PerfFilterTable + i * 16;
 973                writew(be16_to_cpu(eaddrs[2]), setup_frm); setup_frm += 4;
 974                writew(be16_to_cpu(eaddrs[1]), setup_frm); setup_frm += 4;
 975                writew(be16_to_cpu(eaddrs[0]), setup_frm); setup_frm += 8;
 976        }
 977
 978        /* Initialize other registers. */
 979        /* Configure the PCI bus bursts and FIFO thresholds. */
 980        np->tx_mode = TxFlowEnable|RxFlowEnable|PadEnable;      /* modified when link is up. */
 981        writel(MiiSoftReset | np->tx_mode, ioaddr + TxMode);
 982        udelay(1000);
 983        writel(np->tx_mode, ioaddr + TxMode);
 984        np->tx_threshold = 4;
 985        writel(np->tx_threshold, ioaddr + TxThreshold);
 986
 987        writel(np->intr_timer_ctrl, ioaddr + IntrTimerCtrl);
 988
 989        napi_enable(&np->napi);
 990
 991        netif_start_queue(dev);
 992
 993        if (debug > 1)
 994                printk(KERN_DEBUG "%s: Setting the Rx and Tx modes.\n", dev->name);
 995        set_rx_mode(dev);
 996
 997        np->mii_if.advertising = mdio_read(dev, np->phys[0], MII_ADVERTISE);
 998        check_duplex(dev);
 999
1000        /* Enable GPIO interrupts on link change */
1001        writel(0x0f00ff00, ioaddr + GPIOCtrl);
1002
1003        /* Set the interrupt mask */
1004        writel(IntrRxDone | IntrRxEmpty | IntrDMAErr |
1005               IntrTxDMADone | IntrStatsMax | IntrLinkChange |
1006               IntrRxGFPDead | IntrNoTxCsum | IntrTxBadID,
1007               ioaddr + IntrEnable);
1008        /* Enable PCI interrupts. */
1009        writel(0x00800000 | readl(ioaddr + PCIDeviceConfig),
1010               ioaddr + PCIDeviceConfig);
1011
1012#ifdef VLAN_SUPPORT
1013        /* Set VLAN type to 802.1q */
1014        writel(ETH_P_8021Q, ioaddr + VlanType);
1015#endif /* VLAN_SUPPORT */
1016
1017        retval = request_firmware(&fw_rx, FIRMWARE_RX, &np->pci_dev->dev);
1018        if (retval) {
1019                printk(KERN_ERR "starfire: Failed to load firmware \"%s\"\n",
1020                       FIRMWARE_RX);
1021                goto out_init;
1022        }
1023        if (fw_rx->size % 4) {
1024                printk(KERN_ERR "starfire: bogus length %zu in \"%s\"\n",
1025                       fw_rx->size, FIRMWARE_RX);
1026                retval = -EINVAL;
1027                goto out_rx;
1028        }
1029        retval = request_firmware(&fw_tx, FIRMWARE_TX, &np->pci_dev->dev);
1030        if (retval) {
1031                printk(KERN_ERR "starfire: Failed to load firmware \"%s\"\n",
1032                       FIRMWARE_TX);
1033                goto out_rx;
1034        }
1035        if (fw_tx->size % 4) {
1036                printk(KERN_ERR "starfire: bogus length %zu in \"%s\"\n",
1037                       fw_tx->size, FIRMWARE_TX);
1038                retval = -EINVAL;
1039                goto out_tx;
1040        }
1041        fw_rx_data = (const __be32 *)&fw_rx->data[0];
1042        fw_tx_data = (const __be32 *)&fw_tx->data[0];
1043        rx_size = fw_rx->size / 4;
1044        tx_size = fw_tx->size / 4;
1045
1046        /* Load Rx/Tx firmware into the frame processors */
1047        for (i = 0; i < rx_size; i++)
1048                writel(be32_to_cpup(&fw_rx_data[i]), ioaddr + RxGfpMem + i * 4);
1049        for (i = 0; i < tx_size; i++)
1050                writel(be32_to_cpup(&fw_tx_data[i]), ioaddr + TxGfpMem + i * 4);
1051        if (enable_hw_cksum)
1052                /* Enable the Rx and Tx units, and the Rx/Tx frame processors. */
1053                writel(TxEnable|TxGFPEnable|RxEnable|RxGFPEnable, ioaddr + GenCtrl);
1054        else
1055                /* Enable the Rx and Tx units only. */
1056                writel(TxEnable|RxEnable, ioaddr + GenCtrl);
1057
1058        if (debug > 1)
1059                printk(KERN_DEBUG "%s: Done netdev_open().\n",
1060                       dev->name);
1061
1062out_tx:
1063        release_firmware(fw_tx);
1064out_rx:
1065        release_firmware(fw_rx);
1066out_init:
1067        if (retval)
1068                netdev_close(dev);
1069        return retval;
1070}
1071
1072
1073static void check_duplex(struct net_device *dev)
1074{
1075        struct netdev_private *np = netdev_priv(dev);
1076        u16 reg0;
1077        int silly_count = 1000;
1078
1079        mdio_write(dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising);
1080        mdio_write(dev, np->phys[0], MII_BMCR, BMCR_RESET);
1081        udelay(500);
1082        while (--silly_count && mdio_read(dev, np->phys[0], MII_BMCR) & BMCR_RESET)
1083                /* do nothing */;
1084        if (!silly_count) {
1085                printk("%s: MII reset failed!\n", dev->name);
1086                return;
1087        }
1088
1089        reg0 = mdio_read(dev, np->phys[0], MII_BMCR);
1090
1091        if (!np->mii_if.force_media) {
1092                reg0 |= BMCR_ANENABLE | BMCR_ANRESTART;
1093        } else {
1094                reg0 &= ~(BMCR_ANENABLE | BMCR_ANRESTART);
1095                if (np->speed100)
1096                        reg0 |= BMCR_SPEED100;
1097                if (np->mii_if.full_duplex)
1098                        reg0 |= BMCR_FULLDPLX;
1099                printk(KERN_DEBUG "%s: Link forced to %sMbit %s-duplex\n",
1100                       dev->name,
1101                       np->speed100 ? "100" : "10",
1102                       np->mii_if.full_duplex ? "full" : "half");
1103        }
1104        mdio_write(dev, np->phys[0], MII_BMCR, reg0);
1105}
1106
1107
1108static void tx_timeout(struct net_device *dev)
1109{
1110        struct netdev_private *np = netdev_priv(dev);
1111        void __iomem *ioaddr = np->base;
1112        int old_debug;
1113
1114        printk(KERN_WARNING "%s: Transmit timed out, status %#8.8x, "
1115               "resetting...\n", dev->name, (int) readl(ioaddr + IntrStatus));
1116
1117        /* Perhaps we should reinitialize the hardware here. */
1118
1119        /*
1120         * Stop and restart the interface.
1121         * Cheat and increase the debug level temporarily.
1122         */
1123        old_debug = debug;
1124        debug = 2;
1125        netdev_close(dev);
1126        netdev_open(dev);
1127        debug = old_debug;
1128
1129        /* Trigger an immediate transmit demand. */
1130
1131        netif_trans_update(dev); /* prevent tx timeout */
1132        dev->stats.tx_errors++;
1133        netif_wake_queue(dev);
1134}
1135
1136
1137/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1138static void init_ring(struct net_device *dev)
1139{
1140        struct netdev_private *np = netdev_priv(dev);
1141        int i;
1142
1143        np->cur_rx = np->cur_tx = np->reap_tx = 0;
1144        np->dirty_rx = np->dirty_tx = np->rx_done = np->tx_done = 0;
1145
1146        np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1147
1148        /* Fill in the Rx buffers.  Handle allocation failure gracefully. */
1149        for (i = 0; i < RX_RING_SIZE; i++) {
1150                struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz);
1151                np->rx_info[i].skb = skb;
1152                if (skb == NULL)
1153                        break;
1154                np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1155                if (pci_dma_mapping_error(np->pci_dev,
1156                                          np->rx_info[i].mapping)) {
1157                        dev_kfree_skb(skb);
1158                        np->rx_info[i].skb = NULL;
1159                        break;
1160                }
1161                /* Grrr, we cannot offset to correctly align the IP header. */
1162                np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid);
1163        }
1164        writew(i - 1, np->base + RxDescQIdx);
1165        np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1166
1167        /* Clear the remainder of the Rx buffer ring. */
1168        for (  ; i < RX_RING_SIZE; i++) {
1169                np->rx_ring[i].rxaddr = 0;
1170                np->rx_info[i].skb = NULL;
1171                np->rx_info[i].mapping = 0;
1172        }
1173        /* Mark the last entry as wrapping the ring. */
1174        np->rx_ring[RX_RING_SIZE - 1].rxaddr |= cpu_to_dma(RxDescEndRing);
1175
1176        /* Clear the completion rings. */
1177        for (i = 0; i < DONE_Q_SIZE; i++) {
1178                np->rx_done_q[i].status = 0;
1179                np->tx_done_q[i].status = 0;
1180        }
1181
1182        for (i = 0; i < TX_RING_SIZE; i++)
1183                memset(&np->tx_info[i], 0, sizeof(np->tx_info[i]));
1184}
1185
1186
1187static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
1188{
1189        struct netdev_private *np = netdev_priv(dev);
1190        unsigned int entry;
1191        unsigned int prev_tx;
1192        u32 status;
1193        int i, j;
1194
1195        /*
1196         * be cautious here, wrapping the queue has weird semantics
1197         * and we may not have enough slots even when it seems we do.
1198         */
1199        if ((np->cur_tx - np->dirty_tx) + skb_num_frags(skb) * 2 > TX_RING_SIZE) {
1200                netif_stop_queue(dev);
1201                return NETDEV_TX_BUSY;
1202        }
1203
1204#if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
1205        if (skb->ip_summed == CHECKSUM_PARTIAL) {
1206                if (skb_padto(skb, (skb->len + PADDING_MASK) & ~PADDING_MASK))
1207                        return NETDEV_TX_OK;
1208        }
1209#endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */
1210
1211        prev_tx = np->cur_tx;
1212        entry = np->cur_tx % TX_RING_SIZE;
1213        for (i = 0; i < skb_num_frags(skb); i++) {
1214                int wrap_ring = 0;
1215                status = TxDescID;
1216
1217                if (i == 0) {
1218                        np->tx_info[entry].skb = skb;
1219                        status |= TxCRCEn;
1220                        if (entry >= TX_RING_SIZE - skb_num_frags(skb)) {
1221                                status |= TxRingWrap;
1222                                wrap_ring = 1;
1223                        }
1224                        if (np->reap_tx) {
1225                                status |= TxDescIntr;
1226                                np->reap_tx = 0;
1227                        }
1228                        if (skb->ip_summed == CHECKSUM_PARTIAL) {
1229                                status |= TxCalTCP;
1230                                dev->stats.tx_compressed++;
1231                        }
1232                        status |= skb_first_frag_len(skb) | (skb_num_frags(skb) << 16);
1233
1234                        np->tx_info[entry].mapping =
1235                                pci_map_single(np->pci_dev, skb->data, skb_first_frag_len(skb), PCI_DMA_TODEVICE);
1236                } else {
1237                        const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[i - 1];
1238                        status |= skb_frag_size(this_frag);
1239                        np->tx_info[entry].mapping =
1240                                pci_map_single(np->pci_dev,
1241                                               skb_frag_address(this_frag),
1242                                               skb_frag_size(this_frag),
1243                                               PCI_DMA_TODEVICE);
1244                }
1245                if (pci_dma_mapping_error(np->pci_dev,
1246                                          np->tx_info[entry].mapping)) {
1247                        dev->stats.tx_dropped++;
1248                        goto err_out;
1249                }
1250
1251                np->tx_ring[entry].addr = cpu_to_dma(np->tx_info[entry].mapping);
1252                np->tx_ring[entry].status = cpu_to_le32(status);
1253                if (debug > 3)
1254                        printk(KERN_DEBUG "%s: Tx #%d/#%d slot %d status %#8.8x.\n",
1255                               dev->name, np->cur_tx, np->dirty_tx,
1256                               entry, status);
1257                if (wrap_ring) {
1258                        np->tx_info[entry].used_slots = TX_RING_SIZE - entry;
1259                        np->cur_tx += np->tx_info[entry].used_slots;
1260                        entry = 0;
1261                } else {
1262                        np->tx_info[entry].used_slots = 1;
1263                        np->cur_tx += np->tx_info[entry].used_slots;
1264                        entry++;
1265                }
1266                /* scavenge the tx descriptors twice per TX_RING_SIZE */
1267                if (np->cur_tx % (TX_RING_SIZE / 2) == 0)
1268                        np->reap_tx = 1;
1269        }
1270
1271        /* Non-x86: explicitly flush descriptor cache lines here. */
1272        /* Ensure all descriptors are written back before the transmit is
1273           initiated. - Jes */
1274        wmb();
1275
1276        /* Update the producer index. */
1277        writel(entry * (sizeof(starfire_tx_desc) / 8), np->base + TxProducerIdx);
1278
1279        /* 4 is arbitrary, but should be ok */
1280        if ((np->cur_tx - np->dirty_tx) + 4 > TX_RING_SIZE)
1281                netif_stop_queue(dev);
1282
1283        return NETDEV_TX_OK;
1284
1285err_out:
1286        entry = prev_tx % TX_RING_SIZE;
1287        np->tx_info[entry].skb = NULL;
1288        if (i > 0) {
1289                pci_unmap_single(np->pci_dev,
1290                                 np->tx_info[entry].mapping,
1291                                 skb_first_frag_len(skb),
1292                                 PCI_DMA_TODEVICE);
1293                np->tx_info[entry].mapping = 0;
1294                entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE;
1295                for (j = 1; j < i; j++) {
1296                        pci_unmap_single(np->pci_dev,
1297                                         np->tx_info[entry].mapping,
1298                                         skb_frag_size(
1299                                                &skb_shinfo(skb)->frags[j-1]),
1300                                         PCI_DMA_TODEVICE);
1301                        entry++;
1302                }
1303        }
1304        dev_kfree_skb_any(skb);
1305        np->cur_tx = prev_tx;
1306        return NETDEV_TX_OK;
1307}
1308
1309/* The interrupt handler does all of the Rx thread work and cleans up
1310   after the Tx thread. */
1311static irqreturn_t intr_handler(int irq, void *dev_instance)
1312{
1313        struct net_device *dev = dev_instance;
1314        struct netdev_private *np = netdev_priv(dev);
1315        void __iomem *ioaddr = np->base;
1316        int boguscnt = max_interrupt_work;
1317        int consumer;
1318        int tx_status;
1319        int handled = 0;
1320
1321        do {
1322                u32 intr_status = readl(ioaddr + IntrClear);
1323
1324                if (debug > 4)
1325                        printk(KERN_DEBUG "%s: Interrupt status %#8.8x.\n",
1326                               dev->name, intr_status);
1327
1328                if (intr_status == 0 || intr_status == (u32) -1)
1329                        break;
1330
1331                handled = 1;
1332
1333                if (intr_status & (IntrRxDone | IntrRxEmpty)) {
1334                        u32 enable;
1335
1336                        if (likely(napi_schedule_prep(&np->napi))) {
1337                                __napi_schedule(&np->napi);
1338                                enable = readl(ioaddr + IntrEnable);
1339                                enable &= ~(IntrRxDone | IntrRxEmpty);
1340                                writel(enable, ioaddr + IntrEnable);
1341                                /* flush PCI posting buffers */
1342                                readl(ioaddr + IntrEnable);
1343                        } else {
1344                                /* Paranoia check */
1345                                enable = readl(ioaddr + IntrEnable);
1346                                if (enable & (IntrRxDone | IntrRxEmpty)) {
1347                                        printk(KERN_INFO
1348                                               "%s: interrupt while in poll!\n",
1349                                               dev->name);
1350                                        enable &= ~(IntrRxDone | IntrRxEmpty);
1351                                        writel(enable, ioaddr + IntrEnable);
1352                                }
1353                        }
1354                }
1355
1356                /* Scavenge the skbuff list based on the Tx-done queue.
1357                   There are redundant checks here that may be cleaned up
1358                   after the driver has proven to be reliable. */
1359                consumer = readl(ioaddr + TxConsumerIdx);
1360                if (debug > 3)
1361                        printk(KERN_DEBUG "%s: Tx Consumer index is %d.\n",
1362                               dev->name, consumer);
1363
1364                while ((tx_status = le32_to_cpu(np->tx_done_q[np->tx_done].status)) != 0) {
1365                        if (debug > 3)
1366                                printk(KERN_DEBUG "%s: Tx completion #%d entry %d is %#8.8x.\n",
1367                                       dev->name, np->dirty_tx, np->tx_done, tx_status);
1368                        if ((tx_status & 0xe0000000) == 0xa0000000) {
1369                                dev->stats.tx_packets++;
1370                        } else if ((tx_status & 0xe0000000) == 0x80000000) {
1371                                u16 entry = (tx_status & 0x7fff) / sizeof(starfire_tx_desc);
1372                                struct sk_buff *skb = np->tx_info[entry].skb;
1373                                np->tx_info[entry].skb = NULL;
1374                                pci_unmap_single(np->pci_dev,
1375                                                 np->tx_info[entry].mapping,
1376                                                 skb_first_frag_len(skb),
1377                                                 PCI_DMA_TODEVICE);
1378                                np->tx_info[entry].mapping = 0;
1379                                np->dirty_tx += np->tx_info[entry].used_slots;
1380                                entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE;
1381                                {
1382                                        int i;
1383                                        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1384                                                pci_unmap_single(np->pci_dev,
1385                                                                 np->tx_info[entry].mapping,
1386                                                                 skb_frag_size(&skb_shinfo(skb)->frags[i]),
1387                                                                 PCI_DMA_TODEVICE);
1388                                                np->dirty_tx++;
1389                                                entry++;
1390                                        }
1391                                }
1392
1393                                dev_kfree_skb_irq(skb);
1394                        }
1395                        np->tx_done_q[np->tx_done].status = 0;
1396                        np->tx_done = (np->tx_done + 1) % DONE_Q_SIZE;
1397                }
1398                writew(np->tx_done, ioaddr + CompletionQConsumerIdx + 2);
1399
1400                if (netif_queue_stopped(dev) &&
1401                    (np->cur_tx - np->dirty_tx + 4 < TX_RING_SIZE)) {
1402                        /* The ring is no longer full, wake the queue. */
1403                        netif_wake_queue(dev);
1404                }
1405
1406                /* Stats overflow */
1407                if (intr_status & IntrStatsMax)
1408                        get_stats(dev);
1409
1410                /* Media change interrupt. */
1411                if (intr_status & IntrLinkChange)
1412                        netdev_media_change(dev);
1413
1414                /* Abnormal error summary/uncommon events handlers. */
1415                if (intr_status & IntrAbnormalSummary)
1416                        netdev_error(dev, intr_status);
1417
1418                if (--boguscnt < 0) {
1419                        if (debug > 1)
1420                                printk(KERN_WARNING "%s: Too much work at interrupt, "
1421                                       "status=%#8.8x.\n",
1422                                       dev->name, intr_status);
1423                        break;
1424                }
1425        } while (1);
1426
1427        if (debug > 4)
1428                printk(KERN_DEBUG "%s: exiting interrupt, status=%#8.8x.\n",
1429                       dev->name, (int) readl(ioaddr + IntrStatus));
1430        return IRQ_RETVAL(handled);
1431}
1432
1433
1434/*
1435 * This routine is logically part of the interrupt/poll handler, but separated
1436 * for clarity and better register allocation.
1437 */
1438static int __netdev_rx(struct net_device *dev, int *quota)
1439{
1440        struct netdev_private *np = netdev_priv(dev);
1441        u32 desc_status;
1442        int retcode = 0;
1443
1444        /* If EOP is set on the next entry, it's a new packet. Send it up. */
1445        while ((desc_status = le32_to_cpu(np->rx_done_q[np->rx_done].status)) != 0) {
1446                struct sk_buff *skb;
1447                u16 pkt_len;
1448                int entry;
1449                rx_done_desc *desc = &np->rx_done_q[np->rx_done];
1450
1451                if (debug > 4)
1452                        printk(KERN_DEBUG "  netdev_rx() status of %d was %#8.8x.\n", np->rx_done, desc_status);
1453                if (!(desc_status & RxOK)) {
1454                        /* There was an error. */
1455                        if (debug > 2)
1456                                printk(KERN_DEBUG "  netdev_rx() Rx error was %#8.8x.\n", desc_status);
1457                        dev->stats.rx_errors++;
1458                        if (desc_status & RxFIFOErr)
1459                                dev->stats.rx_fifo_errors++;
1460                        goto next_rx;
1461                }
1462
1463                if (*quota <= 0) {      /* out of rx quota */
1464                        retcode = 1;
1465                        goto out;
1466                }
1467                (*quota)--;
1468
1469                pkt_len = desc_status;  /* Implicitly Truncate */
1470                entry = (desc_status >> 16) & 0x7ff;
1471
1472                if (debug > 4)
1473                        printk(KERN_DEBUG "  netdev_rx() normal Rx pkt length %d, quota %d.\n", pkt_len, *quota);
1474                /* Check if the packet is long enough to accept without copying
1475                   to a minimally-sized skbuff. */
1476                if (pkt_len < rx_copybreak &&
1477                    (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
1478                        skb_reserve(skb, 2);    /* 16 byte align the IP header */
1479                        pci_dma_sync_single_for_cpu(np->pci_dev,
1480                                                    np->rx_info[entry].mapping,
1481                                                    pkt_len, PCI_DMA_FROMDEVICE);
1482                        skb_copy_to_linear_data(skb, np->rx_info[entry].skb->data, pkt_len);
1483                        pci_dma_sync_single_for_device(np->pci_dev,
1484                                                       np->rx_info[entry].mapping,
1485                                                       pkt_len, PCI_DMA_FROMDEVICE);
1486                        skb_put(skb, pkt_len);
1487                } else {
1488                        pci_unmap_single(np->pci_dev, np->rx_info[entry].mapping, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1489                        skb = np->rx_info[entry].skb;
1490                        skb_put(skb, pkt_len);
1491                        np->rx_info[entry].skb = NULL;
1492                        np->rx_info[entry].mapping = 0;
1493                }
1494#ifndef final_version                   /* Remove after testing. */
1495                /* You will want this info for the initial debug. */
1496                if (debug > 5) {
1497                        printk(KERN_DEBUG "  Rx data %pM %pM %2.2x%2.2x.\n",
1498                               skb->data, skb->data + 6,
1499                               skb->data[12], skb->data[13]);
1500                }
1501#endif
1502
1503                skb->protocol = eth_type_trans(skb, dev);
1504#ifdef VLAN_SUPPORT
1505                if (debug > 4)
1506                        printk(KERN_DEBUG "  netdev_rx() status2 of %d was %#4.4x.\n", np->rx_done, le16_to_cpu(desc->status2));
1507#endif
1508                if (le16_to_cpu(desc->status2) & 0x0100) {
1509                        skb->ip_summed = CHECKSUM_UNNECESSARY;
1510                        dev->stats.rx_compressed++;
1511                }
1512                /*
1513                 * This feature doesn't seem to be working, at least
1514                 * with the two firmware versions I have. If the GFP sees
1515                 * an IP fragment, it either ignores it completely, or reports
1516                 * "bad checksum" on it.
1517                 *
1518                 * Maybe I missed something -- corrections are welcome.
1519                 * Until then, the printk stays. :-) -Ion
1520                 */
1521                else if (le16_to_cpu(desc->status2) & 0x0040) {
1522                        skb->ip_summed = CHECKSUM_COMPLETE;
1523                        skb->csum = le16_to_cpu(desc->csum);
1524                        printk(KERN_DEBUG "%s: checksum_hw, status2 = %#x\n", dev->name, le16_to_cpu(desc->status2));
1525                }
1526#ifdef VLAN_SUPPORT
1527                if (le16_to_cpu(desc->status2) & 0x0200) {
1528                        u16 vlid = le16_to_cpu(desc->vlanid);
1529
1530                        if (debug > 4) {
1531                                printk(KERN_DEBUG "  netdev_rx() vlanid = %d\n",
1532                                       vlid);
1533                        }
1534                        __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlid);
1535                }
1536#endif /* VLAN_SUPPORT */
1537                netif_receive_skb(skb);
1538                dev->stats.rx_packets++;
1539
1540        next_rx:
1541                np->cur_rx++;
1542                desc->status = 0;
1543                np->rx_done = (np->rx_done + 1) % DONE_Q_SIZE;
1544        }
1545
1546        if (*quota == 0) {      /* out of rx quota */
1547                retcode = 1;
1548                goto out;
1549        }
1550        writew(np->rx_done, np->base + CompletionQConsumerIdx);
1551
1552 out:
1553        refill_rx_ring(dev);
1554        if (debug > 5)
1555                printk(KERN_DEBUG "  exiting netdev_rx(): %d, status of %d was %#8.8x.\n",
1556                       retcode, np->rx_done, desc_status);
1557        return retcode;
1558}
1559
1560static int netdev_poll(struct napi_struct *napi, int budget)
1561{
1562        struct netdev_private *np = container_of(napi, struct netdev_private, napi);
1563        struct net_device *dev = np->dev;
1564        u32 intr_status;
1565        void __iomem *ioaddr = np->base;
1566        int quota = budget;
1567
1568        do {
1569                writel(IntrRxDone | IntrRxEmpty, ioaddr + IntrClear);
1570
1571                if (__netdev_rx(dev, &quota))
1572                        goto out;
1573
1574                intr_status = readl(ioaddr + IntrStatus);
1575        } while (intr_status & (IntrRxDone | IntrRxEmpty));
1576
1577        napi_complete(napi);
1578        intr_status = readl(ioaddr + IntrEnable);
1579        intr_status |= IntrRxDone | IntrRxEmpty;
1580        writel(intr_status, ioaddr + IntrEnable);
1581
1582 out:
1583        if (debug > 5)
1584                printk(KERN_DEBUG "  exiting netdev_poll(): %d.\n",
1585                       budget - quota);
1586
1587        /* Restart Rx engine if stopped. */
1588        return budget - quota;
1589}
1590
1591static void refill_rx_ring(struct net_device *dev)
1592{
1593        struct netdev_private *np = netdev_priv(dev);
1594        struct sk_buff *skb;
1595        int entry = -1;
1596
1597        /* Refill the Rx ring buffers. */
1598        for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
1599                entry = np->dirty_rx % RX_RING_SIZE;
1600                if (np->rx_info[entry].skb == NULL) {
1601                        skb = netdev_alloc_skb(dev, np->rx_buf_sz);
1602                        np->rx_info[entry].skb = skb;
1603                        if (skb == NULL)
1604                                break;  /* Better luck next round. */
1605                        np->rx_info[entry].mapping =
1606                                pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1607                        if (pci_dma_mapping_error(np->pci_dev,
1608                                                np->rx_info[entry].mapping)) {
1609                                dev_kfree_skb(skb);
1610                                np->rx_info[entry].skb = NULL;
1611                                break;
1612                        }
1613                        np->rx_ring[entry].rxaddr =
1614                                cpu_to_dma(np->rx_info[entry].mapping | RxDescValid);
1615                }
1616                if (entry == RX_RING_SIZE - 1)
1617                        np->rx_ring[entry].rxaddr |= cpu_to_dma(RxDescEndRing);
1618        }
1619        if (entry >= 0)
1620                writew(entry, np->base + RxDescQIdx);
1621}
1622
1623
1624static void netdev_media_change(struct net_device *dev)
1625{
1626        struct netdev_private *np = netdev_priv(dev);
1627        void __iomem *ioaddr = np->base;
1628        u16 reg0, reg1, reg4, reg5;
1629        u32 new_tx_mode;
1630        u32 new_intr_timer_ctrl;
1631
1632        /* reset status first */
1633        mdio_read(dev, np->phys[0], MII_BMCR);
1634        mdio_read(dev, np->phys[0], MII_BMSR);
1635
1636        reg0 = mdio_read(dev, np->phys[0], MII_BMCR);
1637        reg1 = mdio_read(dev, np->phys[0], MII_BMSR);
1638
1639        if (reg1 & BMSR_LSTATUS) {
1640                /* link is up */
1641                if (reg0 & BMCR_ANENABLE) {
1642                        /* autonegotiation is enabled */
1643                        reg4 = mdio_read(dev, np->phys[0], MII_ADVERTISE);
1644                        reg5 = mdio_read(dev, np->phys[0], MII_LPA);
1645                        if (reg4 & ADVERTISE_100FULL && reg5 & LPA_100FULL) {
1646                                np->speed100 = 1;
1647                                np->mii_if.full_duplex = 1;
1648                        } else if (reg4 & ADVERTISE_100HALF && reg5 & LPA_100HALF) {
1649                                np->speed100 = 1;
1650                                np->mii_if.full_duplex = 0;
1651                        } else if (reg4 & ADVERTISE_10FULL && reg5 & LPA_10FULL) {
1652                                np->speed100 = 0;
1653                                np->mii_if.full_duplex = 1;
1654                        } else {
1655                                np->speed100 = 0;
1656                                np->mii_if.full_duplex = 0;
1657                        }
1658                } else {
1659                        /* autonegotiation is disabled */
1660                        if (reg0 & BMCR_SPEED100)
1661                                np->speed100 = 1;
1662                        else
1663                                np->speed100 = 0;
1664                        if (reg0 & BMCR_FULLDPLX)
1665                                np->mii_if.full_duplex = 1;
1666                        else
1667                                np->mii_if.full_duplex = 0;
1668                }
1669                netif_carrier_on(dev);
1670                printk(KERN_DEBUG "%s: Link is up, running at %sMbit %s-duplex\n",
1671                       dev->name,
1672                       np->speed100 ? "100" : "10",
1673                       np->mii_if.full_duplex ? "full" : "half");
1674
1675                new_tx_mode = np->tx_mode & ~FullDuplex;        /* duplex setting */
1676                if (np->mii_if.full_duplex)
1677                        new_tx_mode |= FullDuplex;
1678                if (np->tx_mode != new_tx_mode) {
1679                        np->tx_mode = new_tx_mode;
1680                        writel(np->tx_mode | MiiSoftReset, ioaddr + TxMode);
1681                        udelay(1000);
1682                        writel(np->tx_mode, ioaddr + TxMode);
1683                }
1684
1685                new_intr_timer_ctrl = np->intr_timer_ctrl & ~Timer10X;
1686                if (np->speed100)
1687                        new_intr_timer_ctrl |= Timer10X;
1688                if (np->intr_timer_ctrl != new_intr_timer_ctrl) {
1689                        np->intr_timer_ctrl = new_intr_timer_ctrl;
1690                        writel(new_intr_timer_ctrl, ioaddr + IntrTimerCtrl);
1691                }
1692        } else {
1693                netif_carrier_off(dev);
1694                printk(KERN_DEBUG "%s: Link is down\n", dev->name);
1695        }
1696}
1697
1698
1699static void netdev_error(struct net_device *dev, int intr_status)
1700{
1701        struct netdev_private *np = netdev_priv(dev);
1702
1703        /* Came close to underrunning the Tx FIFO, increase threshold. */
1704        if (intr_status & IntrTxDataLow) {
1705                if (np->tx_threshold <= PKT_BUF_SZ / 16) {
1706                        writel(++np->tx_threshold, np->base + TxThreshold);
1707                        printk(KERN_NOTICE "%s: PCI bus congestion, increasing Tx FIFO threshold to %d bytes\n",
1708                               dev->name, np->tx_threshold * 16);
1709                } else
1710                        printk(KERN_WARNING "%s: PCI Tx underflow -- adapter is probably malfunctioning\n", dev->name);
1711        }
1712        if (intr_status & IntrRxGFPDead) {
1713                dev->stats.rx_fifo_errors++;
1714                dev->stats.rx_errors++;
1715        }
1716        if (intr_status & (IntrNoTxCsum | IntrDMAErr)) {
1717                dev->stats.tx_fifo_errors++;
1718                dev->stats.tx_errors++;
1719        }
1720        if ((intr_status & ~(IntrNormalMask | IntrAbnormalSummary | IntrLinkChange | IntrStatsMax | IntrTxDataLow | IntrRxGFPDead | IntrNoTxCsum | IntrPCIPad)) && debug)
1721                printk(KERN_ERR "%s: Something Wicked happened! %#8.8x.\n",
1722                       dev->name, intr_status);
1723}
1724
1725
1726static struct net_device_stats *get_stats(struct net_device *dev)
1727{
1728        struct netdev_private *np = netdev_priv(dev);
1729        void __iomem *ioaddr = np->base;
1730
1731        /* This adapter architecture needs no SMP locks. */
1732        dev->stats.tx_bytes = readl(ioaddr + 0x57010);
1733        dev->stats.rx_bytes = readl(ioaddr + 0x57044);
1734        dev->stats.tx_packets = readl(ioaddr + 0x57000);
1735        dev->stats.tx_aborted_errors =
1736                readl(ioaddr + 0x57024) + readl(ioaddr + 0x57028);
1737        dev->stats.tx_window_errors = readl(ioaddr + 0x57018);
1738        dev->stats.collisions =
1739                readl(ioaddr + 0x57004) + readl(ioaddr + 0x57008);
1740
1741        /* The chip only need report frame silently dropped. */
1742        dev->stats.rx_dropped += readw(ioaddr + RxDMAStatus);
1743        writew(0, ioaddr + RxDMAStatus);
1744        dev->stats.rx_crc_errors = readl(ioaddr + 0x5703C);
1745        dev->stats.rx_frame_errors = readl(ioaddr + 0x57040);
1746        dev->stats.rx_length_errors = readl(ioaddr + 0x57058);
1747        dev->stats.rx_missed_errors = readl(ioaddr + 0x5707C);
1748
1749        return &dev->stats;
1750}
1751
1752#ifdef VLAN_SUPPORT
1753static u32 set_vlan_mode(struct netdev_private *np)
1754{
1755        u32 ret = VlanMode;
1756        u16 vid;
1757        void __iomem *filter_addr = np->base + HashTable + 8;
1758        int vlan_count = 0;
1759
1760        for_each_set_bit(vid, np->active_vlans, VLAN_N_VID) {
1761                if (vlan_count == 32)
1762                        break;
1763                writew(vid, filter_addr);
1764                filter_addr += 16;
1765                vlan_count++;
1766        }
1767        if (vlan_count == 32) {
1768                ret |= PerfectFilterVlan;
1769                while (vlan_count < 32) {
1770                        writew(0, filter_addr);
1771                        filter_addr += 16;
1772                        vlan_count++;
1773                }
1774        }
1775        return ret;
1776}
1777#endif /* VLAN_SUPPORT */
1778
1779static void set_rx_mode(struct net_device *dev)
1780{
1781        struct netdev_private *np = netdev_priv(dev);
1782        void __iomem *ioaddr = np->base;
1783        u32 rx_mode = MinVLANPrio;
1784        struct netdev_hw_addr *ha;
1785        int i;
1786
1787#ifdef VLAN_SUPPORT
1788        rx_mode |= set_vlan_mode(np);
1789#endif /* VLAN_SUPPORT */
1790
1791        if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1792                rx_mode |= AcceptAll;
1793        } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
1794                   (dev->flags & IFF_ALLMULTI)) {
1795                /* Too many to match, or accept all multicasts. */
1796                rx_mode |= AcceptBroadcast|AcceptAllMulticast|PerfectFilter;
1797        } else if (netdev_mc_count(dev) <= 14) {
1798                /* Use the 16 element perfect filter, skip first two entries. */
1799                void __iomem *filter_addr = ioaddr + PerfFilterTable + 2 * 16;
1800                __be16 *eaddrs;
1801                netdev_for_each_mc_addr(ha, dev) {
1802                        eaddrs = (__be16 *) ha->addr;
1803                        writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 4;
1804                        writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4;
1805                        writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 8;
1806                }
1807                eaddrs = (__be16 *)dev->dev_addr;
1808                i = netdev_mc_count(dev) + 2;
1809                while (i++ < 16) {
1810                        writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 4;
1811                        writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4;
1812                        writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 8;
1813                }
1814                rx_mode |= AcceptBroadcast|PerfectFilter;
1815        } else {
1816                /* Must use a multicast hash table. */
1817                void __iomem *filter_addr;
1818                __be16 *eaddrs;
1819                __le16 mc_filter[32] __attribute__ ((aligned(sizeof(long))));   /* Multicast hash filter */
1820
1821                memset(mc_filter, 0, sizeof(mc_filter));
1822                netdev_for_each_mc_addr(ha, dev) {
1823                        /* The chip uses the upper 9 CRC bits
1824                           as index into the hash table */
1825                        int bit_nr = ether_crc_le(ETH_ALEN, ha->addr) >> 23;
1826                        __le32 *fptr = (__le32 *) &mc_filter[(bit_nr >> 4) & ~1];
1827
1828                        *fptr |= cpu_to_le32(1 << (bit_nr & 31));
1829                }
1830                /* Clear the perfect filter list, skip first two entries. */
1831                filter_addr = ioaddr + PerfFilterTable + 2 * 16;
1832                eaddrs = (__be16 *)dev->dev_addr;
1833                for (i = 2; i < 16; i++) {
1834                        writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 4;
1835                        writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4;
1836                        writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 8;
1837                }
1838                for (filter_addr = ioaddr + HashTable, i = 0; i < 32; filter_addr+= 16, i++)
1839                        writew(mc_filter[i], filter_addr);
1840                rx_mode |= AcceptBroadcast|PerfectFilter|HashFilter;
1841        }
1842        writel(rx_mode, ioaddr + RxFilterMode);
1843}
1844
1845static int check_if_running(struct net_device *dev)
1846{
1847        if (!netif_running(dev))
1848                return -EINVAL;
1849        return 0;
1850}
1851
1852static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1853{
1854        struct netdev_private *np = netdev_priv(dev);
1855        strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1856        strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1857        strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
1858}
1859
1860static int get_link_ksettings(struct net_device *dev,
1861                              struct ethtool_link_ksettings *cmd)
1862{
1863        struct netdev_private *np = netdev_priv(dev);
1864        spin_lock_irq(&np->lock);
1865        mii_ethtool_get_link_ksettings(&np->mii_if, cmd);
1866        spin_unlock_irq(&np->lock);
1867        return 0;
1868}
1869
1870static int set_link_ksettings(struct net_device *dev,
1871                              const struct ethtool_link_ksettings *cmd)
1872{
1873        struct netdev_private *np = netdev_priv(dev);
1874        int res;
1875        spin_lock_irq(&np->lock);
1876        res = mii_ethtool_set_link_ksettings(&np->mii_if, cmd);
1877        spin_unlock_irq(&np->lock);
1878        check_duplex(dev);
1879        return res;
1880}
1881
1882static int nway_reset(struct net_device *dev)
1883{
1884        struct netdev_private *np = netdev_priv(dev);
1885        return mii_nway_restart(&np->mii_if);
1886}
1887
1888static u32 get_link(struct net_device *dev)
1889{
1890        struct netdev_private *np = netdev_priv(dev);
1891        return mii_link_ok(&np->mii_if);
1892}
1893
1894static u32 get_msglevel(struct net_device *dev)
1895{
1896        return debug;
1897}
1898
1899static void set_msglevel(struct net_device *dev, u32 val)
1900{
1901        debug = val;
1902}
1903
1904static const struct ethtool_ops ethtool_ops = {
1905        .begin = check_if_running,
1906        .get_drvinfo = get_drvinfo,
1907        .nway_reset = nway_reset,
1908        .get_link = get_link,
1909        .get_msglevel = get_msglevel,
1910        .set_msglevel = set_msglevel,
1911        .get_link_ksettings = get_link_ksettings,
1912        .set_link_ksettings = set_link_ksettings,
1913};
1914
1915static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1916{
1917        struct netdev_private *np = netdev_priv(dev);
1918        struct mii_ioctl_data *data = if_mii(rq);
1919        int rc;
1920
1921        if (!netif_running(dev))
1922                return -EINVAL;
1923
1924        spin_lock_irq(&np->lock);
1925        rc = generic_mii_ioctl(&np->mii_if, data, cmd, NULL);
1926        spin_unlock_irq(&np->lock);
1927
1928        if ((cmd == SIOCSMIIREG) && (data->phy_id == np->phys[0]))
1929                check_duplex(dev);
1930
1931        return rc;
1932}
1933
1934static int netdev_close(struct net_device *dev)
1935{
1936        struct netdev_private *np = netdev_priv(dev);
1937        void __iomem *ioaddr = np->base;
1938        int i;
1939
1940        netif_stop_queue(dev);
1941
1942        napi_disable(&np->napi);
1943
1944        if (debug > 1) {
1945                printk(KERN_DEBUG "%s: Shutting down ethercard, Intr status %#8.8x.\n",
1946                           dev->name, (int) readl(ioaddr + IntrStatus));
1947                printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
1948                       dev->name, np->cur_tx, np->dirty_tx,
1949                       np->cur_rx, np->dirty_rx);
1950        }
1951
1952        /* Disable interrupts by clearing the interrupt mask. */
1953        writel(0, ioaddr + IntrEnable);
1954
1955        /* Stop the chip's Tx and Rx processes. */
1956        writel(0, ioaddr + GenCtrl);
1957        readl(ioaddr + GenCtrl);
1958
1959        if (debug > 5) {
1960                printk(KERN_DEBUG"  Tx ring at %#llx:\n",
1961                       (long long) np->tx_ring_dma);
1962                for (i = 0; i < 8 /* TX_RING_SIZE is huge! */; i++)
1963                        printk(KERN_DEBUG " #%d desc. %#8.8x %#llx -> %#8.8x.\n",
1964                               i, le32_to_cpu(np->tx_ring[i].status),
1965                               (long long) dma_to_cpu(np->tx_ring[i].addr),
1966                               le32_to_cpu(np->tx_done_q[i].status));
1967                printk(KERN_DEBUG "  Rx ring at %#llx -> %p:\n",
1968                       (long long) np->rx_ring_dma, np->rx_done_q);
1969                if (np->rx_done_q)
1970                        for (i = 0; i < 8 /* RX_RING_SIZE */; i++) {
1971                                printk(KERN_DEBUG " #%d desc. %#llx -> %#8.8x\n",
1972                                       i, (long long) dma_to_cpu(np->rx_ring[i].rxaddr), le32_to_cpu(np->rx_done_q[i].status));
1973                }
1974        }
1975
1976        free_irq(np->pci_dev->irq, dev);
1977
1978        /* Free all the skbuffs in the Rx queue. */
1979        for (i = 0; i < RX_RING_SIZE; i++) {
1980                np->rx_ring[i].rxaddr = cpu_to_dma(0xBADF00D0); /* An invalid address. */
1981                if (np->rx_info[i].skb != NULL) {
1982                        pci_unmap_single(np->pci_dev, np->rx_info[i].mapping, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1983                        dev_kfree_skb(np->rx_info[i].skb);
1984                }
1985                np->rx_info[i].skb = NULL;
1986                np->rx_info[i].mapping = 0;
1987        }
1988        for (i = 0; i < TX_RING_SIZE; i++) {
1989                struct sk_buff *skb = np->tx_info[i].skb;
1990                if (skb == NULL)
1991                        continue;
1992                pci_unmap_single(np->pci_dev,
1993                                 np->tx_info[i].mapping,
1994                                 skb_first_frag_len(skb), PCI_DMA_TODEVICE);
1995                np->tx_info[i].mapping = 0;
1996                dev_kfree_skb(skb);
1997                np->tx_info[i].skb = NULL;
1998        }
1999
2000        return 0;
2001}
2002
2003#ifdef CONFIG_PM
2004static int starfire_suspend(struct pci_dev *pdev, pm_message_t state)
2005{
2006        struct net_device *dev = pci_get_drvdata(pdev);
2007
2008        if (netif_running(dev)) {
2009                netif_device_detach(dev);
2010                netdev_close(dev);
2011        }
2012
2013        pci_save_state(pdev);
2014        pci_set_power_state(pdev, pci_choose_state(pdev,state));
2015
2016        return 0;
2017}
2018
2019static int starfire_resume(struct pci_dev *pdev)
2020{
2021        struct net_device *dev = pci_get_drvdata(pdev);
2022
2023        pci_set_power_state(pdev, PCI_D0);
2024        pci_restore_state(pdev);
2025
2026        if (netif_running(dev)) {
2027                netdev_open(dev);
2028                netif_device_attach(dev);
2029        }
2030
2031        return 0;
2032}
2033#endif /* CONFIG_PM */
2034
2035
2036static void starfire_remove_one(struct pci_dev *pdev)
2037{
2038        struct net_device *dev = pci_get_drvdata(pdev);
2039        struct netdev_private *np = netdev_priv(dev);
2040
2041        BUG_ON(!dev);
2042
2043        unregister_netdev(dev);
2044
2045        if (np->queue_mem)
2046                pci_free_consistent(pdev, np->queue_mem_size, np->queue_mem, np->queue_mem_dma);
2047
2048
2049        /* XXX: add wakeup code -- requires firmware for MagicPacket */
2050        pci_set_power_state(pdev, PCI_D3hot);   /* go to sleep in D3 mode */
2051        pci_disable_device(pdev);
2052
2053        iounmap(np->base);
2054        pci_release_regions(pdev);
2055
2056        free_netdev(dev);                       /* Will also free np!! */
2057}
2058
2059
2060static struct pci_driver starfire_driver = {
2061        .name           = DRV_NAME,
2062        .probe          = starfire_init_one,
2063        .remove         = starfire_remove_one,
2064#ifdef CONFIG_PM
2065        .suspend        = starfire_suspend,
2066        .resume         = starfire_resume,
2067#endif /* CONFIG_PM */
2068        .id_table       = starfire_pci_tbl,
2069};
2070
2071
2072static int __init starfire_init (void)
2073{
2074/* when a module, this is printed whether or not devices are found in probe */
2075#ifdef MODULE
2076        printk(version);
2077
2078        printk(KERN_INFO DRV_NAME ": polling (NAPI) enabled\n");
2079#endif
2080
2081        BUILD_BUG_ON(sizeof(dma_addr_t) != sizeof(netdrv_addr_t));
2082
2083        return pci_register_driver(&starfire_driver);
2084}
2085
2086
2087static void __exit starfire_cleanup (void)
2088{
2089        pci_unregister_driver (&starfire_driver);
2090}
2091
2092
2093module_init(starfire_init);
2094module_exit(starfire_cleanup);
2095
2096
2097/*
2098 * Local variables:
2099 *  c-basic-offset: 8
2100 *  tab-width: 8
2101 * End:
2102 */
2103