linux/drivers/net/starfire.c
<<
>>
Prefs
   1/* starfire.c: Linux device driver for the Adaptec Starfire network adapter. */
   2/*
   3        Written 1998-2000 by Donald Becker.
   4
   5        Current maintainer is Ion Badulescu <ionut ta badula tod org>. Please
   6        send all bug reports to me, and not to Donald Becker, as this code
   7        has been heavily modified from Donald's original version.
   8
   9        This software may be used and distributed according to the terms of
  10        the GNU General Public License (GPL), incorporated herein by reference.
  11        Drivers based on or derived from this code fall under the GPL and must
  12        retain the authorship, copyright and license notice.  This file is not
  13        a complete program and may only be used when the entire operating
  14        system is licensed under the GPL.
  15
  16        The information below comes from Donald Becker's original driver:
  17
  18        The author may be reached as becker@scyld.com, or C/O
  19        Scyld Computing Corporation
  20        410 Severn Ave., Suite 210
  21        Annapolis MD 21403
  22
  23        Support and updates available at
  24        http://www.scyld.com/network/starfire.html
  25        [link no longer provides useful info -jgarzik]
  26
  27*/
  28
  29#define DRV_NAME        "starfire"
  30#define DRV_VERSION     "2.0"
  31#define DRV_RELDATE     "June 27, 2006"
  32
  33#include <linux/module.h>
  34#include <linux/kernel.h>
  35#include <linux/pci.h>
  36#include <linux/netdevice.h>
  37#include <linux/etherdevice.h>
  38#include <linux/init.h>
  39#include <linux/delay.h>
  40#include <linux/crc32.h>
  41#include <linux/ethtool.h>
  42#include <linux/mii.h>
  43#include <linux/if_vlan.h>
  44#include <linux/mm.h>
  45#include <asm/processor.h>              /* Processor type for cache alignment. */
  46#include <asm/uaccess.h>
  47#include <asm/io.h>
  48
  49#include "starfire_firmware.h"
  50/*
  51 * The current frame processor firmware fails to checksum a fragment
  52 * of length 1. If and when this is fixed, the #define below can be removed.
  53 */
  54#define HAS_BROKEN_FIRMWARE
  55
  56/*
  57 * If using the broken firmware, data must be padded to the next 32-bit boundary.
  58 */
  59#ifdef HAS_BROKEN_FIRMWARE
  60#define PADDING_MASK 3
  61#endif
  62
  63/*
  64 * Define this if using the driver with the zero-copy patch
  65 */
  66#define ZEROCOPY
  67
  68#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
  69#define VLAN_SUPPORT
  70#endif
  71
  72#ifndef CONFIG_ADAPTEC_STARFIRE_NAPI
  73#undef HAVE_NETDEV_POLL
  74#endif
  75
  76/* The user-configurable values.
  77   These may be modified when a driver module is loaded.*/
  78
  79/* Used for tuning interrupt latency vs. overhead. */
  80static int intr_latency;
  81static int small_frames;
  82
  83static int debug = 1;                   /* 1 normal messages, 0 quiet .. 7 verbose. */
  84static int max_interrupt_work = 20;
  85static int mtu;
  86/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
  87   The Starfire has a 512 element hash table based on the Ethernet CRC. */
  88static const int multicast_filter_limit = 512;
  89/* Whether to do TCP/UDP checksums in hardware */
  90static int enable_hw_cksum = 1;
  91
  92#define PKT_BUF_SZ      1536            /* Size of each temporary Rx buffer.*/
  93/*
  94 * Set the copy breakpoint for the copy-only-tiny-frames scheme.
  95 * Setting to > 1518 effectively disables this feature.
  96 *
  97 * NOTE:
  98 * The ia64 doesn't allow for unaligned loads even of integers being
  99 * misaligned on a 2 byte boundary. Thus always force copying of
 100 * packets as the starfire doesn't allow for misaligned DMAs ;-(
 101 * 23/10/2000 - Jes
 102 *
 103 * The Alpha and the Sparc don't like unaligned loads, either. On Sparc64,
 104 * at least, having unaligned frames leads to a rather serious performance
 105 * penalty. -Ion
 106 */
 107#if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
 108static int rx_copybreak = PKT_BUF_SZ;
 109#else
 110static int rx_copybreak /* = 0 */;
 111#endif
 112
 113/* PCI DMA burst size -- on sparc64 we want to force it to 64 bytes, on the others the default of 128 is fine. */
 114#ifdef __sparc__
 115#define DMA_BURST_SIZE 64
 116#else
 117#define DMA_BURST_SIZE 128
 118#endif
 119
 120/* Used to pass the media type, etc.
 121   Both 'options[]' and 'full_duplex[]' exist for driver interoperability.
 122   The media type is usually passed in 'options[]'.
 123   These variables are deprecated, use ethtool instead. -Ion
 124*/
 125#define MAX_UNITS 8             /* More are supported, limit only on options */
 126static int options[MAX_UNITS] = {0, };
 127static int full_duplex[MAX_UNITS] = {0, };
 128
 129/* Operational parameters that are set at compile time. */
 130
 131/* The "native" ring sizes are either 256 or 2048.
 132   However in some modes a descriptor may be marked to wrap the ring earlier.
 133*/
 134#define RX_RING_SIZE    256
 135#define TX_RING_SIZE    32
 136/* The completion queues are fixed at 1024 entries i.e. 4K or 8KB. */
 137#define DONE_Q_SIZE     1024
 138/* All queues must be aligned on a 256-byte boundary */
 139#define QUEUE_ALIGN     256
 140
 141#if RX_RING_SIZE > 256
 142#define RX_Q_ENTRIES Rx2048QEntries
 143#else
 144#define RX_Q_ENTRIES Rx256QEntries
 145#endif
 146
 147/* Operational parameters that usually are not changed. */
 148/* Time in jiffies before concluding the transmitter is hung. */
 149#define TX_TIMEOUT      (2 * HZ)
 150
 151/*
 152 * This SUCKS.
 153 * We need a much better method to determine if dma_addr_t is 64-bit.
 154 */
 155#if (defined(__i386__) && defined(CONFIG_HIGHMEM64G)) || defined(__x86_64__) || defined (__ia64__) || defined(__alpha__) || defined(__mips64__) || (defined(__mips__) && defined(CONFIG_HIGHMEM) && defined(CONFIG_64BIT_PHYS_ADDR))
 156/* 64-bit dma_addr_t */
 157#define ADDR_64BITS     /* This chip uses 64 bit addresses. */
 158#define netdrv_addr_t __le64
 159#define cpu_to_dma(x) cpu_to_le64(x)
 160#define dma_to_cpu(x) le64_to_cpu(x)
 161#define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
 162#define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
 163#define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
 164#define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
 165#define RX_DESC_ADDR_SIZE RxDescAddr64bit
 166#else  /* 32-bit dma_addr_t */
 167#define netdrv_addr_t __le32
 168#define cpu_to_dma(x) cpu_to_le32(x)
 169#define dma_to_cpu(x) le32_to_cpu(x)
 170#define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
 171#define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
 172#define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
 173#define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
 174#define RX_DESC_ADDR_SIZE RxDescAddr32bit
 175#endif
 176
 177#define skb_first_frag_len(skb) skb_headlen(skb)
 178#define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
 179
 180#ifdef HAVE_NETDEV_POLL
 181#define init_poll(dev, np) \
 182        netif_napi_add(dev, &np->napi, netdev_poll, max_interrupt_work)
 183#define netdev_rx(dev, np, ioaddr) \
 184do { \
 185        u32 intr_enable; \
 186        if (netif_rx_schedule_prep(dev, &np->napi)) { \
 187                __netif_rx_schedule(dev, &np->napi); \
 188                intr_enable = readl(ioaddr + IntrEnable); \
 189                intr_enable &= ~(IntrRxDone | IntrRxEmpty); \
 190                writel(intr_enable, ioaddr + IntrEnable); \
 191                readl(ioaddr + IntrEnable); /* flush PCI posting buffers */ \
 192        } else { \
 193                /* Paranoia check */ \
 194                intr_enable = readl(ioaddr + IntrEnable); \
 195                if (intr_enable & (IntrRxDone | IntrRxEmpty)) { \
 196                        printk(KERN_INFO "%s: interrupt while in polling mode!\n", dev->name); \
 197                        intr_enable &= ~(IntrRxDone | IntrRxEmpty); \
 198                        writel(intr_enable, ioaddr + IntrEnable); \
 199                } \
 200        } \
 201} while (0)
 202#define netdev_receive_skb(skb) netif_receive_skb(skb)
 203#define vlan_netdev_receive_skb(skb, vlgrp, vlid) vlan_hwaccel_receive_skb(skb, vlgrp, vlid)
 204static int      netdev_poll(struct napi_struct *napi, int budget);
 205#else  /* not HAVE_NETDEV_POLL */
 206#define init_poll(dev, np)
 207#define netdev_receive_skb(skb) netif_rx(skb)
 208#define vlan_netdev_receive_skb(skb, vlgrp, vlid) vlan_hwaccel_rx(skb, vlgrp, vlid)
 209#define netdev_rx(dev, np, ioaddr) \
 210do { \
 211        int quota = np->dirty_rx + RX_RING_SIZE - np->cur_rx; \
 212        __netdev_rx(dev, &quota);\
 213} while (0)
 214#endif /* not HAVE_NETDEV_POLL */
 215/* end of compatibility code */
 216
 217
 218/* These identify the driver base version and may not be removed. */
 219static const char version[] __devinitdata =
 220KERN_INFO "starfire.c:v1.03 7/26/2000  Written by Donald Becker <becker@scyld.com>\n"
 221KERN_INFO " (unofficial 2.2/2.4 kernel port, version " DRV_VERSION ", " DRV_RELDATE ")\n";
 222
 223MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
 224MODULE_DESCRIPTION("Adaptec Starfire Ethernet driver");
 225MODULE_LICENSE("GPL");
 226MODULE_VERSION(DRV_VERSION);
 227
 228module_param(max_interrupt_work, int, 0);
 229module_param(mtu, int, 0);
 230module_param(debug, int, 0);
 231module_param(rx_copybreak, int, 0);
 232module_param(intr_latency, int, 0);
 233module_param(small_frames, int, 0);
 234module_param_array(options, int, NULL, 0);
 235module_param_array(full_duplex, int, NULL, 0);
 236module_param(enable_hw_cksum, int, 0);
 237MODULE_PARM_DESC(max_interrupt_work, "Maximum events handled per interrupt");
 238MODULE_PARM_DESC(mtu, "MTU (all boards)");
 239MODULE_PARM_DESC(debug, "Debug level (0-6)");
 240MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
 241MODULE_PARM_DESC(intr_latency, "Maximum interrupt latency, in microseconds");
 242MODULE_PARM_DESC(small_frames, "Maximum size of receive frames that bypass interrupt latency (0,64,128,256,512)");
 243MODULE_PARM_DESC(options, "Deprecated: Bits 0-3: media type, bit 17: full duplex");
 244MODULE_PARM_DESC(full_duplex, "Deprecated: Forced full-duplex setting (0/1)");
 245MODULE_PARM_DESC(enable_hw_cksum, "Enable/disable hardware cksum support (0/1)");
 246
 247/*
 248                                Theory of Operation
 249
 250I. Board Compatibility
 251
 252This driver is for the Adaptec 6915 "Starfire" 64 bit PCI Ethernet adapter.
 253
 254II. Board-specific settings
 255
 256III. Driver operation
 257
 258IIIa. Ring buffers
 259
 260The Starfire hardware uses multiple fixed-size descriptor queues/rings.  The
 261ring sizes are set fixed by the hardware, but may optionally be wrapped
 262earlier by the END bit in the descriptor.
 263This driver uses that hardware queue size for the Rx ring, where a large
 264number of entries has no ill effect beyond increases the potential backlog.
 265The Tx ring is wrapped with the END bit, since a large hardware Tx queue
 266disables the queue layer priority ordering and we have no mechanism to
 267utilize the hardware two-level priority queue.  When modifying the
 268RX/TX_RING_SIZE pay close attention to page sizes and the ring-empty warning
 269levels.
 270
 271IIIb/c. Transmit/Receive Structure
 272
 273See the Adaptec manual for the many possible structures, and options for
 274each structure.  There are far too many to document all of them here.
 275
 276For transmit this driver uses type 0/1 transmit descriptors (depending
 277on the 32/64 bitness of the architecture), and relies on automatic
 278minimum-length padding.  It does not use the completion queue
 279consumer index, but instead checks for non-zero status entries.
 280
 281For receive this driver uses type 2/3 receive descriptors.  The driver
 282allocates full frame size skbuffs for the Rx ring buffers, so all frames
 283should fit in a single descriptor.  The driver does not use the completion
 284queue consumer index, but instead checks for non-zero status entries.
 285
 286When an incoming frame is less than RX_COPYBREAK bytes long, a fresh skbuff
 287is allocated and the frame is copied to the new skbuff.  When the incoming
 288frame is larger, the skbuff is passed directly up the protocol stack.
 289Buffers consumed this way are replaced by newly allocated skbuffs in a later
 290phase of receive.
 291
 292A notable aspect of operation is that unaligned buffers are not permitted by
 293the Starfire hardware.  Thus the IP header at offset 14 in an ethernet frame
 294isn't longword aligned, which may cause problems on some machine
 295e.g. Alphas and IA64. For these architectures, the driver is forced to copy
 296the frame into a new skbuff unconditionally. Copied frames are put into the
 297skbuff at an offset of "+2", thus 16-byte aligning the IP header.
 298
 299IIId. Synchronization
 300
 301The driver runs as two independent, single-threaded flows of control.  One
 302is the send-packet routine, which enforces single-threaded use by the
 303dev->tbusy flag.  The other thread is the interrupt handler, which is single
 304threaded by the hardware and interrupt handling software.
 305
 306The send packet thread has partial control over the Tx ring and the netif_queue
 307status. If the number of free Tx slots in the ring falls below a certain number
 308(currently hardcoded to 4), it signals the upper layer to stop the queue.
 309
 310The interrupt handler has exclusive control over the Rx ring and records stats
 311from the Tx ring.  After reaping the stats, it marks the Tx queue entry as
 312empty by incrementing the dirty_tx mark. Iff the netif_queue is stopped and the
 313number of free Tx slow is above the threshold, it signals the upper layer to
 314restart the queue.
 315
 316IV. Notes
 317
 318IVb. References
 319
 320The Adaptec Starfire manuals, available only from Adaptec.
 321http://www.scyld.com/expert/100mbps.html
 322http://www.scyld.com/expert/NWay.html
 323
 324IVc. Errata
 325
 326- StopOnPerr is broken, don't enable
 327- Hardware ethernet padding exposes random data, perform software padding
 328  instead (unverified -- works correctly for all the hardware I have)
 329
 330*/
 331
 332
 333
 334enum chip_capability_flags {CanHaveMII=1, };
 335
 336enum chipset {
 337        CH_6915 = 0,
 338};
 339
 340static struct pci_device_id starfire_pci_tbl[] = {
 341        { 0x9004, 0x6915, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_6915 },
 342        { 0, }
 343};
 344MODULE_DEVICE_TABLE(pci, starfire_pci_tbl);
 345
 346/* A chip capabilities table, matching the CH_xxx entries in xxx_pci_tbl[] above. */
 347static const struct chip_info {
 348        const char *name;
 349        int drv_flags;
 350} netdrv_tbl[] __devinitdata = {
 351        { "Adaptec Starfire 6915", CanHaveMII },
 352};
 353
 354
 355/* Offsets to the device registers.
 356   Unlike software-only systems, device drivers interact with complex hardware.
 357   It's not useful to define symbolic names for every register bit in the
 358   device.  The name can only partially document the semantics and make
 359   the driver longer and more difficult to read.
 360   In general, only the important configuration values or bits changed
 361   multiple times should be defined symbolically.
 362*/
 363enum register_offsets {
 364        PCIDeviceConfig=0x50040, GenCtrl=0x50070, IntrTimerCtrl=0x50074,
 365        IntrClear=0x50080, IntrStatus=0x50084, IntrEnable=0x50088,
 366        MIICtrl=0x52000, TxStationAddr=0x50120, EEPROMCtrl=0x51000,
 367        GPIOCtrl=0x5008C, TxDescCtrl=0x50090,
 368        TxRingPtr=0x50098, HiPriTxRingPtr=0x50094, /* Low and High priority. */
 369        TxRingHiAddr=0x5009C,           /* 64 bit address extension. */
 370        TxProducerIdx=0x500A0, TxConsumerIdx=0x500A4,
 371        TxThreshold=0x500B0,
 372        CompletionHiAddr=0x500B4, TxCompletionAddr=0x500B8,
 373        RxCompletionAddr=0x500BC, RxCompletionQ2Addr=0x500C0,
 374        CompletionQConsumerIdx=0x500C4, RxDMACtrl=0x500D0,
 375        RxDescQCtrl=0x500D4, RxDescQHiAddr=0x500DC, RxDescQAddr=0x500E0,
 376        RxDescQIdx=0x500E8, RxDMAStatus=0x500F0, RxFilterMode=0x500F4,
 377        TxMode=0x55000, VlanType=0x55064,
 378        PerfFilterTable=0x56000, HashTable=0x56100,
 379        TxGfpMem=0x58000, RxGfpMem=0x5a000,
 380};
 381
 382/*
 383 * Bits in the interrupt status/mask registers.
 384 * Warning: setting Intr[Ab]NormalSummary in the IntrEnable register
 385 * enables all the interrupt sources that are or'ed into those status bits.
 386 */
 387enum intr_status_bits {
 388        IntrLinkChange=0xf0000000, IntrStatsMax=0x08000000,
 389        IntrAbnormalSummary=0x02000000, IntrGeneralTimer=0x01000000,
 390        IntrSoftware=0x800000, IntrRxComplQ1Low=0x400000,
 391        IntrTxComplQLow=0x200000, IntrPCI=0x100000,
 392        IntrDMAErr=0x080000, IntrTxDataLow=0x040000,
 393        IntrRxComplQ2Low=0x020000, IntrRxDescQ1Low=0x010000,
 394        IntrNormalSummary=0x8000, IntrTxDone=0x4000,
 395        IntrTxDMADone=0x2000, IntrTxEmpty=0x1000,
 396        IntrEarlyRxQ2=0x0800, IntrEarlyRxQ1=0x0400,
 397        IntrRxQ2Done=0x0200, IntrRxQ1Done=0x0100,
 398        IntrRxGFPDead=0x80, IntrRxDescQ2Low=0x40,
 399        IntrNoTxCsum=0x20, IntrTxBadID=0x10,
 400        IntrHiPriTxBadID=0x08, IntrRxGfp=0x04,
 401        IntrTxGfp=0x02, IntrPCIPad=0x01,
 402        /* not quite bits */
 403        IntrRxDone=IntrRxQ2Done | IntrRxQ1Done,
 404        IntrRxEmpty=IntrRxDescQ1Low | IntrRxDescQ2Low,
 405        IntrNormalMask=0xff00, IntrAbnormalMask=0x3ff00fe,
 406};
 407
 408/* Bits in the RxFilterMode register. */
 409enum rx_mode_bits {
 410        AcceptBroadcast=0x04, AcceptAllMulticast=0x02, AcceptAll=0x01,
 411        AcceptMulticast=0x10, PerfectFilter=0x40, HashFilter=0x30,
 412        PerfectFilterVlan=0x80, MinVLANPrio=0xE000, VlanMode=0x0200,
 413        WakeupOnGFP=0x0800,
 414};
 415
 416/* Bits in the TxMode register */
 417enum tx_mode_bits {
 418        MiiSoftReset=0x8000, MIILoopback=0x4000,
 419        TxFlowEnable=0x0800, RxFlowEnable=0x0400,
 420        PadEnable=0x04, FullDuplex=0x02, HugeFrame=0x01,
 421};
 422
 423/* Bits in the TxDescCtrl register. */
 424enum tx_ctrl_bits {
 425        TxDescSpaceUnlim=0x00, TxDescSpace32=0x10, TxDescSpace64=0x20,
 426        TxDescSpace128=0x30, TxDescSpace256=0x40,
 427        TxDescType0=0x00, TxDescType1=0x01, TxDescType2=0x02,
 428        TxDescType3=0x03, TxDescType4=0x04,
 429        TxNoDMACompletion=0x08,
 430        TxDescQAddr64bit=0x80, TxDescQAddr32bit=0,
 431        TxHiPriFIFOThreshShift=24, TxPadLenShift=16,
 432        TxDMABurstSizeShift=8,
 433};
 434
 435/* Bits in the RxDescQCtrl register. */
 436enum rx_ctrl_bits {
 437        RxBufferLenShift=16, RxMinDescrThreshShift=0,
 438        RxPrefetchMode=0x8000, RxVariableQ=0x2000,
 439        Rx2048QEntries=0x4000, Rx256QEntries=0,
 440        RxDescAddr64bit=0x1000, RxDescAddr32bit=0,
 441        RxDescQAddr64bit=0x0100, RxDescQAddr32bit=0,
 442        RxDescSpace4=0x000, RxDescSpace8=0x100,
 443        RxDescSpace16=0x200, RxDescSpace32=0x300,
 444        RxDescSpace64=0x400, RxDescSpace128=0x500,
 445        RxConsumerWrEn=0x80,
 446};
 447
 448/* Bits in the RxDMACtrl register. */
 449enum rx_dmactrl_bits {
 450        RxReportBadFrames=0x80000000, RxDMAShortFrames=0x40000000,
 451        RxDMABadFrames=0x20000000, RxDMACrcErrorFrames=0x10000000,
 452        RxDMAControlFrame=0x08000000, RxDMAPauseFrame=0x04000000,
 453        RxChecksumIgnore=0, RxChecksumRejectTCPUDP=0x02000000,
 454        RxChecksumRejectTCPOnly=0x01000000,
 455        RxCompletionQ2Enable=0x800000,
 456        RxDMAQ2Disable=0, RxDMAQ2FPOnly=0x100000,
 457        RxDMAQ2SmallPkt=0x200000, RxDMAQ2HighPrio=0x300000,
 458        RxDMAQ2NonIP=0x400000,
 459        RxUseBackupQueue=0x080000, RxDMACRC=0x040000,
 460        RxEarlyIntThreshShift=12, RxHighPrioThreshShift=8,
 461        RxBurstSizeShift=0,
 462};
 463
 464/* Bits in the RxCompletionAddr register */
 465enum rx_compl_bits {
 466        RxComplQAddr64bit=0x80, RxComplQAddr32bit=0,
 467        RxComplProducerWrEn=0x40,
 468        RxComplType0=0x00, RxComplType1=0x10,
 469        RxComplType2=0x20, RxComplType3=0x30,
 470        RxComplThreshShift=0,
 471};
 472
 473/* Bits in the TxCompletionAddr register */
 474enum tx_compl_bits {
 475        TxComplQAddr64bit=0x80, TxComplQAddr32bit=0,
 476        TxComplProducerWrEn=0x40,
 477        TxComplIntrStatus=0x20,
 478        CommonQueueMode=0x10,
 479        TxComplThreshShift=0,
 480};
 481
 482/* Bits in the GenCtrl register */
 483enum gen_ctrl_bits {
 484        RxEnable=0x05, TxEnable=0x0a,
 485        RxGFPEnable=0x10, TxGFPEnable=0x20,
 486};
 487
 488/* Bits in the IntrTimerCtrl register */
 489enum intr_ctrl_bits {
 490        Timer10X=0x800, EnableIntrMasking=0x60, SmallFrameBypass=0x100,
 491        SmallFrame64=0, SmallFrame128=0x200, SmallFrame256=0x400, SmallFrame512=0x600,
 492        IntrLatencyMask=0x1f,
 493};
 494
 495/* The Rx and Tx buffer descriptors. */
 496struct starfire_rx_desc {
 497        netdrv_addr_t rxaddr;
 498};
 499enum rx_desc_bits {
 500        RxDescValid=1, RxDescEndRing=2,
 501};
 502
 503/* Completion queue entry. */
 504struct short_rx_done_desc {
 505        __le32 status;                  /* Low 16 bits is length. */
 506};
 507struct basic_rx_done_desc {
 508        __le32 status;                  /* Low 16 bits is length. */
 509        __le16 vlanid;
 510        __le16 status2;
 511};
 512struct csum_rx_done_desc {
 513        __le32 status;                  /* Low 16 bits is length. */
 514        __le16 csum;                    /* Partial checksum */
 515        __le16 status2;
 516};
 517struct full_rx_done_desc {
 518        __le32 status;                  /* Low 16 bits is length. */
 519        __le16 status3;
 520        __le16 status2;
 521        __le16 vlanid;
 522        __le16 csum;                    /* partial checksum */
 523        __le32 timestamp;
 524};
 525/* XXX: this is ugly and I'm not sure it's worth the trouble -Ion */
 526#ifdef VLAN_SUPPORT
 527typedef struct full_rx_done_desc rx_done_desc;
 528#define RxComplType RxComplType3
 529#else  /* not VLAN_SUPPORT */
 530typedef struct csum_rx_done_desc rx_done_desc;
 531#define RxComplType RxComplType2
 532#endif /* not VLAN_SUPPORT */
 533
 534enum rx_done_bits {
 535        RxOK=0x20000000, RxFIFOErr=0x10000000, RxBufQ2=0x08000000,
 536};
 537
 538/* Type 1 Tx descriptor. */
 539struct starfire_tx_desc_1 {
 540        __le32 status;                  /* Upper bits are status, lower 16 length. */
 541        __le32 addr;
 542};
 543
 544/* Type 2 Tx descriptor. */
 545struct starfire_tx_desc_2 {
 546        __le32 status;                  /* Upper bits are status, lower 16 length. */
 547        __le32 reserved;
 548        __le64 addr;
 549};
 550
 551#ifdef ADDR_64BITS
 552typedef struct starfire_tx_desc_2 starfire_tx_desc;
 553#define TX_DESC_TYPE TxDescType2
 554#else  /* not ADDR_64BITS */
 555typedef struct starfire_tx_desc_1 starfire_tx_desc;
 556#define TX_DESC_TYPE TxDescType1
 557#endif /* not ADDR_64BITS */
 558#define TX_DESC_SPACING TxDescSpaceUnlim
 559
 560enum tx_desc_bits {
 561        TxDescID=0xB0000000,
 562        TxCRCEn=0x01000000, TxDescIntr=0x08000000,
 563        TxRingWrap=0x04000000, TxCalTCP=0x02000000,
 564};
 565struct tx_done_desc {
 566        __le32 status;                  /* timestamp, index. */
 567#if 0
 568        __le32 intrstatus;              /* interrupt status */
 569#endif
 570};
 571
 572struct rx_ring_info {
 573        struct sk_buff *skb;
 574        dma_addr_t mapping;
 575};
 576struct tx_ring_info {
 577        struct sk_buff *skb;
 578        dma_addr_t mapping;
 579        unsigned int used_slots;
 580};
 581
 582#define PHY_CNT         2
 583struct netdev_private {
 584        /* Descriptor rings first for alignment. */
 585        struct starfire_rx_desc *rx_ring;
 586        starfire_tx_desc *tx_ring;
 587        dma_addr_t rx_ring_dma;
 588        dma_addr_t tx_ring_dma;
 589        /* The addresses of rx/tx-in-place skbuffs. */
 590        struct rx_ring_info rx_info[RX_RING_SIZE];
 591        struct tx_ring_info tx_info[TX_RING_SIZE];
 592        /* Pointers to completion queues (full pages). */
 593        rx_done_desc *rx_done_q;
 594        dma_addr_t rx_done_q_dma;
 595        unsigned int rx_done;
 596        struct tx_done_desc *tx_done_q;
 597        dma_addr_t tx_done_q_dma;
 598        unsigned int tx_done;
 599        struct napi_struct napi;
 600        struct net_device *dev;
 601        struct net_device_stats stats;
 602        struct pci_dev *pci_dev;
 603#ifdef VLAN_SUPPORT
 604        struct vlan_group *vlgrp;
 605#endif
 606        void *queue_mem;
 607        dma_addr_t queue_mem_dma;
 608        size_t queue_mem_size;
 609
 610        /* Frequently used values: keep some adjacent for cache effect. */
 611        spinlock_t lock;
 612        unsigned int cur_rx, dirty_rx;  /* Producer/consumer ring indices */
 613        unsigned int cur_tx, dirty_tx, reap_tx;
 614        unsigned int rx_buf_sz;         /* Based on MTU+slack. */
 615        /* These values keep track of the transceiver/media in use. */
 616        int speed100;                   /* Set if speed == 100MBit. */
 617        u32 tx_mode;
 618        u32 intr_timer_ctrl;
 619        u8 tx_threshold;
 620        /* MII transceiver section. */
 621        struct mii_if_info mii_if;              /* MII lib hooks/info */
 622        int phy_cnt;                    /* MII device addresses. */
 623        unsigned char phys[PHY_CNT];    /* MII device addresses. */
 624        void __iomem *base;
 625};
 626
 627
 628static int      mdio_read(struct net_device *dev, int phy_id, int location);
 629static void     mdio_write(struct net_device *dev, int phy_id, int location, int value);
 630static int      netdev_open(struct net_device *dev);
 631static void     check_duplex(struct net_device *dev);
 632static void     tx_timeout(struct net_device *dev);
 633static void     init_ring(struct net_device *dev);
 634static int      start_tx(struct sk_buff *skb, struct net_device *dev);
 635static irqreturn_t intr_handler(int irq, void *dev_instance);
 636static void     netdev_error(struct net_device *dev, int intr_status);
 637static int      __netdev_rx(struct net_device *dev, int *quota);
 638static void     refill_rx_ring(struct net_device *dev);
 639static void     netdev_error(struct net_device *dev, int intr_status);
 640static void     set_rx_mode(struct net_device *dev);
 641static struct net_device_stats *get_stats(struct net_device *dev);
 642static int      netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 643static int      netdev_close(struct net_device *dev);
 644static void     netdev_media_change(struct net_device *dev);
 645static const struct ethtool_ops ethtool_ops;
 646
 647
 648#ifdef VLAN_SUPPORT
 649static void netdev_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
 650{
 651        struct netdev_private *np = netdev_priv(dev);
 652
 653        spin_lock(&np->lock);
 654        if (debug > 2)
 655                printk("%s: Setting vlgrp to %p\n", dev->name, grp);
 656        np->vlgrp = grp;
 657        set_rx_mode(dev);
 658        spin_unlock(&np->lock);
 659}
 660
 661static void netdev_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
 662{
 663        struct netdev_private *np = netdev_priv(dev);
 664
 665        spin_lock(&np->lock);
 666        if (debug > 1)
 667                printk("%s: Adding vlanid %d to vlan filter\n", dev->name, vid);
 668        set_rx_mode(dev);
 669        spin_unlock(&np->lock);
 670}
 671
 672static void netdev_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
 673{
 674        struct netdev_private *np = netdev_priv(dev);
 675
 676        spin_lock(&np->lock);
 677        if (debug > 1)
 678                printk("%s: removing vlanid %d from vlan filter\n", dev->name, vid);
 679        vlan_group_set_device(np->vlgrp, vid, NULL);
 680        set_rx_mode(dev);
 681        spin_unlock(&np->lock);
 682}
 683#endif /* VLAN_SUPPORT */
 684
 685
 686static int __devinit starfire_init_one(struct pci_dev *pdev,
 687                                       const struct pci_device_id *ent)
 688{
 689        struct netdev_private *np;
 690        int i, irq, option, chip_idx = ent->driver_data;
 691        struct net_device *dev;
 692        static int card_idx = -1;
 693        long ioaddr;
 694        void __iomem *base;
 695        int drv_flags, io_size;
 696        int boguscnt;
 697        DECLARE_MAC_BUF(mac);
 698
 699/* when built into the kernel, we only print version if device is found */
 700#ifndef MODULE
 701        static int printed_version;
 702        if (!printed_version++)
 703                printk(version);
 704#endif
 705
 706        card_idx++;
 707
 708        if (pci_enable_device (pdev))
 709                return -EIO;
 710
 711        ioaddr = pci_resource_start(pdev, 0);
 712        io_size = pci_resource_len(pdev, 0);
 713        if (!ioaddr || ((pci_resource_flags(pdev, 0) & IORESOURCE_MEM) == 0)) {
 714                printk(KERN_ERR DRV_NAME " %d: no PCI MEM resources, aborting\n", card_idx);
 715                return -ENODEV;
 716        }
 717
 718        dev = alloc_etherdev(sizeof(*np));
 719        if (!dev) {
 720                printk(KERN_ERR DRV_NAME " %d: cannot alloc etherdev, aborting\n", card_idx);
 721                return -ENOMEM;
 722        }
 723        SET_NETDEV_DEV(dev, &pdev->dev);
 724
 725        irq = pdev->irq;
 726
 727        if (pci_request_regions (pdev, DRV_NAME)) {
 728                printk(KERN_ERR DRV_NAME " %d: cannot reserve PCI resources, aborting\n", card_idx);
 729                goto err_out_free_netdev;
 730        }
 731
 732        base = ioremap(ioaddr, io_size);
 733        if (!base) {
 734                printk(KERN_ERR DRV_NAME " %d: cannot remap %#x @ %#lx, aborting\n",
 735                        card_idx, io_size, ioaddr);
 736                goto err_out_free_res;
 737        }
 738
 739        pci_set_master(pdev);
 740
 741        /* enable MWI -- it vastly improves Rx performance on sparc64 */
 742        pci_try_set_mwi(pdev);
 743
 744#ifdef ZEROCOPY
 745        /* Starfire can do TCP/UDP checksumming */
 746        if (enable_hw_cksum)
 747                dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
 748#endif /* ZEROCOPY */
 749#ifdef VLAN_SUPPORT
 750        dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
 751        dev->vlan_rx_register = netdev_vlan_rx_register;
 752        dev->vlan_rx_add_vid = netdev_vlan_rx_add_vid;
 753        dev->vlan_rx_kill_vid = netdev_vlan_rx_kill_vid;
 754#endif /* VLAN_RX_KILL_VID */
 755#ifdef ADDR_64BITS
 756        dev->features |= NETIF_F_HIGHDMA;
 757#endif /* ADDR_64BITS */
 758
 759        /* Serial EEPROM reads are hidden by the hardware. */
 760        for (i = 0; i < 6; i++)
 761                dev->dev_addr[i] = readb(base + EEPROMCtrl + 20 - i);
 762
 763#if ! defined(final_version) /* Dump the EEPROM contents during development. */
 764        if (debug > 4)
 765                for (i = 0; i < 0x20; i++)
 766                        printk("%2.2x%s",
 767                               (unsigned int)readb(base + EEPROMCtrl + i),
 768                               i % 16 != 15 ? " " : "\n");
 769#endif
 770
 771        /* Issue soft reset */
 772        writel(MiiSoftReset, base + TxMode);
 773        udelay(1000);
 774        writel(0, base + TxMode);
 775
 776        /* Reset the chip to erase previous misconfiguration. */
 777        writel(1, base + PCIDeviceConfig);
 778        boguscnt = 1000;
 779        while (--boguscnt > 0) {
 780                udelay(10);
 781                if ((readl(base + PCIDeviceConfig) & 1) == 0)
 782                        break;
 783        }
 784        if (boguscnt == 0)
 785                printk("%s: chipset reset never completed!\n", dev->name);
 786        /* wait a little longer */
 787        udelay(1000);
 788
 789        dev->base_addr = (unsigned long)base;
 790        dev->irq = irq;
 791
 792        np = netdev_priv(dev);
 793        np->dev = dev;
 794        np->base = base;
 795        spin_lock_init(&np->lock);
 796        pci_set_drvdata(pdev, dev);
 797
 798        np->pci_dev = pdev;
 799
 800        np->mii_if.dev = dev;
 801        np->mii_if.mdio_read = mdio_read;
 802        np->mii_if.mdio_write = mdio_write;
 803        np->mii_if.phy_id_mask = 0x1f;
 804        np->mii_if.reg_num_mask = 0x1f;
 805
 806        drv_flags = netdrv_tbl[chip_idx].drv_flags;
 807
 808        option = card_idx < MAX_UNITS ? options[card_idx] : 0;
 809        if (dev->mem_start)
 810                option = dev->mem_start;
 811
 812        /* The lower four bits are the media type. */
 813        if (option & 0x200)
 814                np->mii_if.full_duplex = 1;
 815
 816        if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
 817                np->mii_if.full_duplex = 1;
 818
 819        if (np->mii_if.full_duplex)
 820                np->mii_if.force_media = 1;
 821        else
 822                np->mii_if.force_media = 0;
 823        np->speed100 = 1;
 824
 825        /* timer resolution is 128 * 0.8us */
 826        np->intr_timer_ctrl = (((intr_latency * 10) / 1024) & IntrLatencyMask) |
 827                Timer10X | EnableIntrMasking;
 828
 829        if (small_frames > 0) {
 830                np->intr_timer_ctrl |= SmallFrameBypass;
 831                switch (small_frames) {
 832                case 1 ... 64:
 833                        np->intr_timer_ctrl |= SmallFrame64;
 834                        break;
 835                case 65 ... 128:
 836                        np->intr_timer_ctrl |= SmallFrame128;
 837                        break;
 838                case 129 ... 256:
 839                        np->intr_timer_ctrl |= SmallFrame256;
 840                        break;
 841                default:
 842                        np->intr_timer_ctrl |= SmallFrame512;
 843                        if (small_frames > 512)
 844                                printk("Adjusting small_frames down to 512\n");
 845                        break;
 846                }
 847        }
 848
 849        /* The chip-specific entries in the device structure. */
 850        dev->open = &netdev_open;
 851        dev->hard_start_xmit = &start_tx;
 852        dev->tx_timeout = tx_timeout;
 853        dev->watchdog_timeo = TX_TIMEOUT;
 854        init_poll(dev, np);
 855        dev->stop = &netdev_close;
 856        dev->get_stats = &get_stats;
 857        dev->set_multicast_list = &set_rx_mode;
 858        dev->do_ioctl = &netdev_ioctl;
 859        SET_ETHTOOL_OPS(dev, &ethtool_ops);
 860
 861        if (mtu)
 862                dev->mtu = mtu;
 863
 864        if (register_netdev(dev))
 865                goto err_out_cleardev;
 866
 867        printk(KERN_INFO "%s: %s at %p, %s, IRQ %d.\n",
 868               dev->name, netdrv_tbl[chip_idx].name, base,
 869               print_mac(mac, dev->dev_addr), irq);
 870
 871        if (drv_flags & CanHaveMII) {
 872                int phy, phy_idx = 0;
 873                int mii_status;
 874                for (phy = 0; phy < 32 && phy_idx < PHY_CNT; phy++) {
 875                        mdio_write(dev, phy, MII_BMCR, BMCR_RESET);
 876                        mdelay(100);
 877                        boguscnt = 1000;
 878                        while (--boguscnt > 0)
 879                                if ((mdio_read(dev, phy, MII_BMCR) & BMCR_RESET) == 0)
 880                                        break;
 881                        if (boguscnt == 0) {
 882                                printk("%s: PHY#%d reset never completed!\n", dev->name, phy);
 883                                continue;
 884                        }
 885                        mii_status = mdio_read(dev, phy, MII_BMSR);
 886                        if (mii_status != 0) {
 887                                np->phys[phy_idx++] = phy;
 888                                np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE);
 889                                printk(KERN_INFO "%s: MII PHY found at address %d, status "
 890                                           "%#4.4x advertising %#4.4x.\n",
 891                                           dev->name, phy, mii_status, np->mii_if.advertising);
 892                                /* there can be only one PHY on-board */
 893                                break;
 894                        }
 895                }
 896                np->phy_cnt = phy_idx;
 897                if (np->phy_cnt > 0)
 898                        np->mii_if.phy_id = np->phys[0];
 899                else
 900                        memset(&np->mii_if, 0, sizeof(np->mii_if));
 901        }
 902
 903        printk(KERN_INFO "%s: scatter-gather and hardware TCP cksumming %s.\n",
 904               dev->name, enable_hw_cksum ? "enabled" : "disabled");
 905        return 0;
 906
 907err_out_cleardev:
 908        pci_set_drvdata(pdev, NULL);
 909        iounmap(base);
 910err_out_free_res:
 911        pci_release_regions (pdev);
 912err_out_free_netdev:
 913        free_netdev(dev);
 914        return -ENODEV;
 915}
 916
 917
 918/* Read the MII Management Data I/O (MDIO) interfaces. */
 919static int mdio_read(struct net_device *dev, int phy_id, int location)
 920{
 921        struct netdev_private *np = netdev_priv(dev);
 922        void __iomem *mdio_addr = np->base + MIICtrl + (phy_id<<7) + (location<<2);
 923        int result, boguscnt=1000;
 924        /* ??? Should we add a busy-wait here? */
 925        do
 926                result = readl(mdio_addr);
 927        while ((result & 0xC0000000) != 0x80000000 && --boguscnt > 0);
 928        if (boguscnt == 0)
 929                return 0;
 930        if ((result & 0xffff) == 0xffff)
 931                return 0;
 932        return result & 0xffff;
 933}
 934
 935
 936static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
 937{
 938        struct netdev_private *np = netdev_priv(dev);
 939        void __iomem *mdio_addr = np->base + MIICtrl + (phy_id<<7) + (location<<2);
 940        writel(value, mdio_addr);
 941        /* The busy-wait will occur before a read. */
 942}
 943
 944
 945static int netdev_open(struct net_device *dev)
 946{
 947        struct netdev_private *np = netdev_priv(dev);
 948        void __iomem *ioaddr = np->base;
 949        int i, retval;
 950        size_t tx_done_q_size, rx_done_q_size, tx_ring_size, rx_ring_size;
 951
 952        /* Do we ever need to reset the chip??? */
 953
 954        retval = request_irq(dev->irq, &intr_handler, IRQF_SHARED, dev->name, dev);
 955        if (retval)
 956                return retval;
 957
 958        /* Disable the Rx and Tx, and reset the chip. */
 959        writel(0, ioaddr + GenCtrl);
 960        writel(1, ioaddr + PCIDeviceConfig);
 961        if (debug > 1)
 962                printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
 963                       dev->name, dev->irq);
 964
 965        /* Allocate the various queues. */
 966        if (!np->queue_mem) {
 967                tx_done_q_size = ((sizeof(struct tx_done_desc) * DONE_Q_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN;
 968                rx_done_q_size = ((sizeof(rx_done_desc) * DONE_Q_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN;
 969                tx_ring_size = ((sizeof(starfire_tx_desc) * TX_RING_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN;
 970                rx_ring_size = sizeof(struct starfire_rx_desc) * RX_RING_SIZE;
 971                np->queue_mem_size = tx_done_q_size + rx_done_q_size + tx_ring_size + rx_ring_size;
 972                np->queue_mem = pci_alloc_consistent(np->pci_dev, np->queue_mem_size, &np->queue_mem_dma);
 973                if (np->queue_mem == NULL) {
 974                        free_irq(dev->irq, dev);
 975                        return -ENOMEM;
 976                }
 977
 978                np->tx_done_q     = np->queue_mem;
 979                np->tx_done_q_dma = np->queue_mem_dma;
 980                np->rx_done_q     = (void *) np->tx_done_q + tx_done_q_size;
 981                np->rx_done_q_dma = np->tx_done_q_dma + tx_done_q_size;
 982                np->tx_ring       = (void *) np->rx_done_q + rx_done_q_size;
 983                np->tx_ring_dma   = np->rx_done_q_dma + rx_done_q_size;
 984                np->rx_ring       = (void *) np->tx_ring + tx_ring_size;
 985                np->rx_ring_dma   = np->tx_ring_dma + tx_ring_size;
 986        }
 987
 988        /* Start with no carrier, it gets adjusted later */
 989        netif_carrier_off(dev);
 990        init_ring(dev);
 991        /* Set the size of the Rx buffers. */
 992        writel((np->rx_buf_sz << RxBufferLenShift) |
 993               (0 << RxMinDescrThreshShift) |
 994               RxPrefetchMode | RxVariableQ |
 995               RX_Q_ENTRIES |
 996               RX_DESC_Q_ADDR_SIZE | RX_DESC_ADDR_SIZE |
 997               RxDescSpace4,
 998               ioaddr + RxDescQCtrl);
 999
1000        /* Set up the Rx DMA controller. */
1001        writel(RxChecksumIgnore |
1002               (0 << RxEarlyIntThreshShift) |
1003               (6 << RxHighPrioThreshShift) |
1004               ((DMA_BURST_SIZE / 32) << RxBurstSizeShift),
1005               ioaddr + RxDMACtrl);
1006
1007        /* Set Tx descriptor */
1008        writel((2 << TxHiPriFIFOThreshShift) |
1009               (0 << TxPadLenShift) |
1010               ((DMA_BURST_SIZE / 32) << TxDMABurstSizeShift) |
1011               TX_DESC_Q_ADDR_SIZE |
1012               TX_DESC_SPACING | TX_DESC_TYPE,
1013               ioaddr + TxDescCtrl);
1014
1015        writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + RxDescQHiAddr);
1016        writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + TxRingHiAddr);
1017        writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + CompletionHiAddr);
1018        writel(np->rx_ring_dma, ioaddr + RxDescQAddr);
1019        writel(np->tx_ring_dma, ioaddr + TxRingPtr);
1020
1021        writel(np->tx_done_q_dma, ioaddr + TxCompletionAddr);
1022        writel(np->rx_done_q_dma |
1023               RxComplType |
1024               (0 << RxComplThreshShift),
1025               ioaddr + RxCompletionAddr);
1026
1027        if (debug > 1)
1028                printk(KERN_DEBUG "%s: Filling in the station address.\n", dev->name);
1029
1030        /* Fill both the Tx SA register and the Rx perfect filter. */
1031        for (i = 0; i < 6; i++)
1032                writeb(dev->dev_addr[i], ioaddr + TxStationAddr + 5 - i);
1033        /* The first entry is special because it bypasses the VLAN filter.
1034           Don't use it. */
1035        writew(0, ioaddr + PerfFilterTable);
1036        writew(0, ioaddr + PerfFilterTable + 4);
1037        writew(0, ioaddr + PerfFilterTable + 8);
1038        for (i = 1; i < 16; i++) {
1039                __be16 *eaddrs = (__be16 *)dev->dev_addr;
1040                void __iomem *setup_frm = ioaddr + PerfFilterTable + i * 16;
1041                writew(be16_to_cpu(eaddrs[2]), setup_frm); setup_frm += 4;
1042                writew(be16_to_cpu(eaddrs[1]), setup_frm); setup_frm += 4;
1043                writew(be16_to_cpu(eaddrs[0]), setup_frm); setup_frm += 8;
1044        }
1045
1046        /* Initialize other registers. */
1047        /* Configure the PCI bus bursts and FIFO thresholds. */
1048        np->tx_mode = TxFlowEnable|RxFlowEnable|PadEnable;      /* modified when link is up. */
1049        writel(MiiSoftReset | np->tx_mode, ioaddr + TxMode);
1050        udelay(1000);
1051        writel(np->tx_mode, ioaddr + TxMode);
1052        np->tx_threshold = 4;
1053        writel(np->tx_threshold, ioaddr + TxThreshold);
1054
1055        writel(np->intr_timer_ctrl, ioaddr + IntrTimerCtrl);
1056
1057#ifdef HAVE_NETDEV_POLL
1058        napi_enable(&np->napi);
1059#endif
1060        netif_start_queue(dev);
1061
1062        if (debug > 1)
1063                printk(KERN_DEBUG "%s: Setting the Rx and Tx modes.\n", dev->name);
1064        set_rx_mode(dev);
1065
1066        np->mii_if.advertising = mdio_read(dev, np->phys[0], MII_ADVERTISE);
1067        check_duplex(dev);
1068
1069        /* Enable GPIO interrupts on link change */
1070        writel(0x0f00ff00, ioaddr + GPIOCtrl);
1071
1072        /* Set the interrupt mask */
1073        writel(IntrRxDone | IntrRxEmpty | IntrDMAErr |
1074               IntrTxDMADone | IntrStatsMax | IntrLinkChange |
1075               IntrRxGFPDead | IntrNoTxCsum | IntrTxBadID,
1076               ioaddr + IntrEnable);
1077        /* Enable PCI interrupts. */
1078        writel(0x00800000 | readl(ioaddr + PCIDeviceConfig),
1079               ioaddr + PCIDeviceConfig);
1080
1081#ifdef VLAN_SUPPORT
1082        /* Set VLAN type to 802.1q */
1083        writel(ETH_P_8021Q, ioaddr + VlanType);
1084#endif /* VLAN_SUPPORT */
1085
1086        /* Load Rx/Tx firmware into the frame processors */
1087        for (i = 0; i < FIRMWARE_RX_SIZE * 2; i++)
1088                writel(firmware_rx[i], ioaddr + RxGfpMem + i * 4);
1089        for (i = 0; i < FIRMWARE_TX_SIZE * 2; i++)
1090                writel(firmware_tx[i], ioaddr + TxGfpMem + i * 4);
1091        if (enable_hw_cksum)
1092                /* Enable the Rx and Tx units, and the Rx/Tx frame processors. */
1093                writel(TxEnable|TxGFPEnable|RxEnable|RxGFPEnable, ioaddr + GenCtrl);
1094        else
1095                /* Enable the Rx and Tx units only. */
1096                writel(TxEnable|RxEnable, ioaddr + GenCtrl);
1097
1098        if (debug > 1)
1099                printk(KERN_DEBUG "%s: Done netdev_open().\n",
1100                       dev->name);
1101
1102        return 0;
1103}
1104
1105
1106static void check_duplex(struct net_device *dev)
1107{
1108        struct netdev_private *np = netdev_priv(dev);
1109        u16 reg0;
1110        int silly_count = 1000;
1111
1112        mdio_write(dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising);
1113        mdio_write(dev, np->phys[0], MII_BMCR, BMCR_RESET);
1114        udelay(500);
1115        while (--silly_count && mdio_read(dev, np->phys[0], MII_BMCR) & BMCR_RESET)
1116                /* do nothing */;
1117        if (!silly_count) {
1118                printk("%s: MII reset failed!\n", dev->name);
1119                return;
1120        }
1121
1122        reg0 = mdio_read(dev, np->phys[0], MII_BMCR);
1123
1124        if (!np->mii_if.force_media) {
1125                reg0 |= BMCR_ANENABLE | BMCR_ANRESTART;
1126        } else {
1127                reg0 &= ~(BMCR_ANENABLE | BMCR_ANRESTART);
1128                if (np->speed100)
1129                        reg0 |= BMCR_SPEED100;
1130                if (np->mii_if.full_duplex)
1131                        reg0 |= BMCR_FULLDPLX;
1132                printk(KERN_DEBUG "%s: Link forced to %sMbit %s-duplex\n",
1133                       dev->name,
1134                       np->speed100 ? "100" : "10",
1135                       np->mii_if.full_duplex ? "full" : "half");
1136        }
1137        mdio_write(dev, np->phys[0], MII_BMCR, reg0);
1138}
1139
1140
1141static void tx_timeout(struct net_device *dev)
1142{
1143        struct netdev_private *np = netdev_priv(dev);
1144        void __iomem *ioaddr = np->base;
1145        int old_debug;
1146
1147        printk(KERN_WARNING "%s: Transmit timed out, status %#8.8x, "
1148               "resetting...\n", dev->name, (int) readl(ioaddr + IntrStatus));
1149
1150        /* Perhaps we should reinitialize the hardware here. */
1151
1152        /*
1153         * Stop and restart the interface.
1154         * Cheat and increase the debug level temporarily.
1155         */
1156        old_debug = debug;
1157        debug = 2;
1158        netdev_close(dev);
1159        netdev_open(dev);
1160        debug = old_debug;
1161
1162        /* Trigger an immediate transmit demand. */
1163
1164        dev->trans_start = jiffies;
1165        np->stats.tx_errors++;
1166        netif_wake_queue(dev);
1167}
1168
1169
1170/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1171static void init_ring(struct net_device *dev)
1172{
1173        struct netdev_private *np = netdev_priv(dev);
1174        int i;
1175
1176        np->cur_rx = np->cur_tx = np->reap_tx = 0;
1177        np->dirty_rx = np->dirty_tx = np->rx_done = np->tx_done = 0;
1178
1179        np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1180
1181        /* Fill in the Rx buffers.  Handle allocation failure gracefully. */
1182        for (i = 0; i < RX_RING_SIZE; i++) {
1183                struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
1184                np->rx_info[i].skb = skb;
1185                if (skb == NULL)
1186                        break;
1187                np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1188                skb->dev = dev;                 /* Mark as being used by this device. */
1189                /* Grrr, we cannot offset to correctly align the IP header. */
1190                np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid);
1191        }
1192        writew(i - 1, np->base + RxDescQIdx);
1193        np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1194
1195        /* Clear the remainder of the Rx buffer ring. */
1196        for (  ; i < RX_RING_SIZE; i++) {
1197                np->rx_ring[i].rxaddr = 0;
1198                np->rx_info[i].skb = NULL;
1199                np->rx_info[i].mapping = 0;
1200        }
1201        /* Mark the last entry as wrapping the ring. */
1202        np->rx_ring[RX_RING_SIZE - 1].rxaddr |= cpu_to_dma(RxDescEndRing);
1203
1204        /* Clear the completion rings. */
1205        for (i = 0; i < DONE_Q_SIZE; i++) {
1206                np->rx_done_q[i].status = 0;
1207                np->tx_done_q[i].status = 0;
1208        }
1209
1210        for (i = 0; i < TX_RING_SIZE; i++)
1211                memset(&np->tx_info[i], 0, sizeof(np->tx_info[i]));
1212
1213        return;
1214}
1215
1216
1217static int start_tx(struct sk_buff *skb, struct net_device *dev)
1218{
1219        struct netdev_private *np = netdev_priv(dev);
1220        unsigned int entry;
1221        u32 status;
1222        int i;
1223
1224        /*
1225         * be cautious here, wrapping the queue has weird semantics
1226         * and we may not have enough slots even when it seems we do.
1227         */
1228        if ((np->cur_tx - np->dirty_tx) + skb_num_frags(skb) * 2 > TX_RING_SIZE) {
1229                netif_stop_queue(dev);
1230                return 1;
1231        }
1232
1233#if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
1234        if (skb->ip_summed == CHECKSUM_PARTIAL) {
1235                if (skb_padto(skb, (skb->len + PADDING_MASK) & ~PADDING_MASK))
1236                        return NETDEV_TX_OK;
1237        }
1238#endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */
1239
1240        entry = np->cur_tx % TX_RING_SIZE;
1241        for (i = 0; i < skb_num_frags(skb); i++) {
1242                int wrap_ring = 0;
1243                status = TxDescID;
1244
1245                if (i == 0) {
1246                        np->tx_info[entry].skb = skb;
1247                        status |= TxCRCEn;
1248                        if (entry >= TX_RING_SIZE - skb_num_frags(skb)) {
1249                                status |= TxRingWrap;
1250                                wrap_ring = 1;
1251                        }
1252                        if (np->reap_tx) {
1253                                status |= TxDescIntr;
1254                                np->reap_tx = 0;
1255                        }
1256                        if (skb->ip_summed == CHECKSUM_PARTIAL) {
1257                                status |= TxCalTCP;
1258                                np->stats.tx_compressed++;
1259                        }
1260                        status |= skb_first_frag_len(skb) | (skb_num_frags(skb) << 16);
1261
1262                        np->tx_info[entry].mapping =
1263                                pci_map_single(np->pci_dev, skb->data, skb_first_frag_len(skb), PCI_DMA_TODEVICE);
1264                } else {
1265                        skb_frag_t *this_frag = &skb_shinfo(skb)->frags[i - 1];
1266                        status |= this_frag->size;
1267                        np->tx_info[entry].mapping =
1268                                pci_map_single(np->pci_dev, page_address(this_frag->page) + this_frag->page_offset, this_frag->size, PCI_DMA_TODEVICE);
1269                }
1270
1271                np->tx_ring[entry].addr = cpu_to_dma(np->tx_info[entry].mapping);
1272                np->tx_ring[entry].status = cpu_to_le32(status);
1273                if (debug > 3)
1274                        printk(KERN_DEBUG "%s: Tx #%d/#%d slot %d status %#8.8x.\n",
1275                               dev->name, np->cur_tx, np->dirty_tx,
1276                               entry, status);
1277                if (wrap_ring) {
1278                        np->tx_info[entry].used_slots = TX_RING_SIZE - entry;
1279                        np->cur_tx += np->tx_info[entry].used_slots;
1280                        entry = 0;
1281                } else {
1282                        np->tx_info[entry].used_slots = 1;
1283                        np->cur_tx += np->tx_info[entry].used_slots;
1284                        entry++;
1285                }
1286                /* scavenge the tx descriptors twice per TX_RING_SIZE */
1287                if (np->cur_tx % (TX_RING_SIZE / 2) == 0)
1288                        np->reap_tx = 1;
1289        }
1290
1291        /* Non-x86: explicitly flush descriptor cache lines here. */
1292        /* Ensure all descriptors are written back before the transmit is
1293           initiated. - Jes */
1294        wmb();
1295
1296        /* Update the producer index. */
1297        writel(entry * (sizeof(starfire_tx_desc) / 8), np->base + TxProducerIdx);
1298
1299        /* 4 is arbitrary, but should be ok */
1300        if ((np->cur_tx - np->dirty_tx) + 4 > TX_RING_SIZE)
1301                netif_stop_queue(dev);
1302
1303        dev->trans_start = jiffies;
1304
1305        return 0;
1306}
1307
1308
1309/* The interrupt handler does all of the Rx thread work and cleans up
1310   after the Tx thread. */
1311static irqreturn_t intr_handler(int irq, void *dev_instance)
1312{
1313        struct net_device *dev = dev_instance;
1314        struct netdev_private *np = netdev_priv(dev);
1315        void __iomem *ioaddr = np->base;
1316        int boguscnt = max_interrupt_work;
1317        int consumer;
1318        int tx_status;
1319        int handled = 0;
1320
1321        do {
1322                u32 intr_status = readl(ioaddr + IntrClear);
1323
1324                if (debug > 4)
1325                        printk(KERN_DEBUG "%s: Interrupt status %#8.8x.\n",
1326                               dev->name, intr_status);
1327
1328                if (intr_status == 0 || intr_status == (u32) -1)
1329                        break;
1330
1331                handled = 1;
1332
1333                if (intr_status & (IntrRxDone | IntrRxEmpty))
1334                        netdev_rx(dev, np, ioaddr);
1335
1336                /* Scavenge the skbuff list based on the Tx-done queue.
1337                   There are redundant checks here that may be cleaned up
1338                   after the driver has proven to be reliable. */
1339                consumer = readl(ioaddr + TxConsumerIdx);
1340                if (debug > 3)
1341                        printk(KERN_DEBUG "%s: Tx Consumer index is %d.\n",
1342                               dev->name, consumer);
1343
1344                while ((tx_status = le32_to_cpu(np->tx_done_q[np->tx_done].status)) != 0) {
1345                        if (debug > 3)
1346                                printk(KERN_DEBUG "%s: Tx completion #%d entry %d is %#8.8x.\n",
1347                                       dev->name, np->dirty_tx, np->tx_done, tx_status);
1348                        if ((tx_status & 0xe0000000) == 0xa0000000) {
1349                                np->stats.tx_packets++;
1350                        } else if ((tx_status & 0xe0000000) == 0x80000000) {
1351                                u16 entry = (tx_status & 0x7fff) / sizeof(starfire_tx_desc);
1352                                struct sk_buff *skb = np->tx_info[entry].skb;
1353                                np->tx_info[entry].skb = NULL;
1354                                pci_unmap_single(np->pci_dev,
1355                                                 np->tx_info[entry].mapping,
1356                                                 skb_first_frag_len(skb),
1357                                                 PCI_DMA_TODEVICE);
1358                                np->tx_info[entry].mapping = 0;
1359                                np->dirty_tx += np->tx_info[entry].used_slots;
1360                                entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE;
1361                                {
1362                                        int i;
1363                                        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1364                                                pci_unmap_single(np->pci_dev,
1365                                                                 np->tx_info[entry].mapping,
1366                                                                 skb_shinfo(skb)->frags[i].size,
1367                                                                 PCI_DMA_TODEVICE);
1368                                                np->dirty_tx++;
1369                                                entry++;
1370                                        }
1371                                }
1372
1373                                dev_kfree_skb_irq(skb);
1374                        }
1375                        np->tx_done_q[np->tx_done].status = 0;
1376                        np->tx_done = (np->tx_done + 1) % DONE_Q_SIZE;
1377                }
1378                writew(np->tx_done, ioaddr + CompletionQConsumerIdx + 2);
1379
1380                if (netif_queue_stopped(dev) &&
1381                    (np->cur_tx - np->dirty_tx + 4 < TX_RING_SIZE)) {
1382                        /* The ring is no longer full, wake the queue. */
1383                        netif_wake_queue(dev);
1384                }
1385
1386                /* Stats overflow */
1387                if (intr_status & IntrStatsMax)
1388                        get_stats(dev);
1389
1390                /* Media change interrupt. */
1391                if (intr_status & IntrLinkChange)
1392                        netdev_media_change(dev);
1393
1394                /* Abnormal error summary/uncommon events handlers. */
1395                if (intr_status & IntrAbnormalSummary)
1396                        netdev_error(dev, intr_status);
1397
1398                if (--boguscnt < 0) {
1399                        if (debug > 1)
1400                                printk(KERN_WARNING "%s: Too much work at interrupt, "
1401                                       "status=%#8.8x.\n",
1402                                       dev->name, intr_status);
1403                        break;
1404                }
1405        } while (1);
1406
1407        if (debug > 4)
1408                printk(KERN_DEBUG "%s: exiting interrupt, status=%#8.8x.\n",
1409                       dev->name, (int) readl(ioaddr + IntrStatus));
1410        return IRQ_RETVAL(handled);
1411}
1412
1413
1414/* This routine is logically part of the interrupt/poll handler, but separated
1415   for clarity, code sharing between NAPI/non-NAPI, and better register allocation. */
1416static int __netdev_rx(struct net_device *dev, int *quota)
1417{
1418        struct netdev_private *np = netdev_priv(dev);
1419        u32 desc_status;
1420        int retcode = 0;
1421
1422        /* If EOP is set on the next entry, it's a new packet. Send it up. */
1423        while ((desc_status = le32_to_cpu(np->rx_done_q[np->rx_done].status)) != 0) {
1424                struct sk_buff *skb;
1425                u16 pkt_len;
1426                int entry;
1427                rx_done_desc *desc = &np->rx_done_q[np->rx_done];
1428
1429                if (debug > 4)
1430                        printk(KERN_DEBUG "  netdev_rx() status of %d was %#8.8x.\n", np->rx_done, desc_status);
1431                if (!(desc_status & RxOK)) {
1432                        /* There was an error. */
1433                        if (debug > 2)
1434                                printk(KERN_DEBUG "  netdev_rx() Rx error was %#8.8x.\n", desc_status);
1435                        np->stats.rx_errors++;
1436                        if (desc_status & RxFIFOErr)
1437                                np->stats.rx_fifo_errors++;
1438                        goto next_rx;
1439                }
1440
1441                if (*quota <= 0) {      /* out of rx quota */
1442                        retcode = 1;
1443                        goto out;
1444                }
1445                (*quota)--;
1446
1447                pkt_len = desc_status;  /* Implicitly Truncate */
1448                entry = (desc_status >> 16) & 0x7ff;
1449
1450                if (debug > 4)
1451                        printk(KERN_DEBUG "  netdev_rx() normal Rx pkt length %d, quota %d.\n", pkt_len, *quota);
1452                /* Check if the packet is long enough to accept without copying
1453                   to a minimally-sized skbuff. */
1454                if (pkt_len < rx_copybreak
1455                    && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1456                        skb_reserve(skb, 2);    /* 16 byte align the IP header */
1457                        pci_dma_sync_single_for_cpu(np->pci_dev,
1458                                                    np->rx_info[entry].mapping,
1459                                                    pkt_len, PCI_DMA_FROMDEVICE);
1460                        skb_copy_to_linear_data(skb, np->rx_info[entry].skb->data, pkt_len);
1461                        pci_dma_sync_single_for_device(np->pci_dev,
1462                                                       np->rx_info[entry].mapping,
1463                                                       pkt_len, PCI_DMA_FROMDEVICE);
1464                        skb_put(skb, pkt_len);
1465                } else {
1466                        pci_unmap_single(np->pci_dev, np->rx_info[entry].mapping, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1467                        skb = np->rx_info[entry].skb;
1468                        skb_put(skb, pkt_len);
1469                        np->rx_info[entry].skb = NULL;
1470                        np->rx_info[entry].mapping = 0;
1471                }
1472#ifndef final_version                   /* Remove after testing. */
1473                /* You will want this info for the initial debug. */
1474                if (debug > 5) {
1475                        DECLARE_MAC_BUF(mac);
1476                        DECLARE_MAC_BUF(mac2);
1477
1478                        printk(KERN_DEBUG "  Rx data %s %s"
1479                               " %2.2x%2.2x.\n",
1480                               print_mac(mac, &skb->data[0]),
1481                               print_mac(mac2, &skb->data[6]),
1482                               skb->data[12], skb->data[13]);
1483                }
1484#endif
1485
1486                skb->protocol = eth_type_trans(skb, dev);
1487#ifdef VLAN_SUPPORT
1488                if (debug > 4)
1489                        printk(KERN_DEBUG "  netdev_rx() status2 of %d was %#4.4x.\n", np->rx_done, le16_to_cpu(desc->status2));
1490#endif
1491                if (le16_to_cpu(desc->status2) & 0x0100) {
1492                        skb->ip_summed = CHECKSUM_UNNECESSARY;
1493                        np->stats.rx_compressed++;
1494                }
1495                /*
1496                 * This feature doesn't seem to be working, at least
1497                 * with the two firmware versions I have. If the GFP sees
1498                 * an IP fragment, it either ignores it completely, or reports
1499                 * "bad checksum" on it.
1500                 *
1501                 * Maybe I missed something -- corrections are welcome.
1502                 * Until then, the printk stays. :-) -Ion
1503                 */
1504                else if (le16_to_cpu(desc->status2) & 0x0040) {
1505                        skb->ip_summed = CHECKSUM_COMPLETE;
1506                        skb->csum = le16_to_cpu(desc->csum);
1507                        printk(KERN_DEBUG "%s: checksum_hw, status2 = %#x\n", dev->name, le16_to_cpu(desc->status2));
1508                }
1509#ifdef VLAN_SUPPORT
1510                if (np->vlgrp && le16_to_cpu(desc->status2) & 0x0200) {
1511                        if (debug > 4)
1512                                printk(KERN_DEBUG "  netdev_rx() vlanid = %d\n", le16_to_cpu(desc->vlanid));
1513                        /* vlan_netdev_receive_skb() expects a packet with the VLAN tag stripped out */
1514                        vlan_netdev_receive_skb(skb, np->vlgrp, le16_to_cpu(desc->vlanid) & VLAN_VID_MASK);
1515                } else
1516#endif /* VLAN_SUPPORT */
1517                        netdev_receive_skb(skb);
1518                dev->last_rx = jiffies;
1519                np->stats.rx_packets++;
1520
1521        next_rx:
1522                np->cur_rx++;
1523                desc->status = 0;
1524                np->rx_done = (np->rx_done + 1) % DONE_Q_SIZE;
1525        }
1526        writew(np->rx_done, np->base + CompletionQConsumerIdx);
1527
1528 out:
1529        refill_rx_ring(dev);
1530        if (debug > 5)
1531                printk(KERN_DEBUG "  exiting netdev_rx(): %d, status of %d was %#8.8x.\n",
1532                       retcode, np->rx_done, desc_status);
1533        return retcode;
1534}
1535
1536
1537#ifdef HAVE_NETDEV_POLL
1538static int netdev_poll(struct napi_struct *napi, int budget)
1539{
1540        struct netdev_private *np = container_of(napi, struct netdev_private, napi);
1541        struct net_device *dev = np->dev;
1542        u32 intr_status;
1543        void __iomem *ioaddr = np->base;
1544        int quota = budget;
1545
1546        do {
1547                writel(IntrRxDone | IntrRxEmpty, ioaddr + IntrClear);
1548
1549                if (__netdev_rx(dev, &quota))
1550                        goto out;
1551
1552                intr_status = readl(ioaddr + IntrStatus);
1553        } while (intr_status & (IntrRxDone | IntrRxEmpty));
1554
1555        netif_rx_complete(dev, napi);
1556        intr_status = readl(ioaddr + IntrEnable);
1557        intr_status |= IntrRxDone | IntrRxEmpty;
1558        writel(intr_status, ioaddr + IntrEnable);
1559
1560 out:
1561        if (debug > 5)
1562                printk(KERN_DEBUG "  exiting netdev_poll(): %d.\n",
1563                       budget - quota);
1564
1565        /* Restart Rx engine if stopped. */
1566        return budget - quota;
1567}
1568#endif /* HAVE_NETDEV_POLL */
1569
1570
1571static void refill_rx_ring(struct net_device *dev)
1572{
1573        struct netdev_private *np = netdev_priv(dev);
1574        struct sk_buff *skb;
1575        int entry = -1;
1576
1577        /* Refill the Rx ring buffers. */
1578        for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
1579                entry = np->dirty_rx % RX_RING_SIZE;
1580                if (np->rx_info[entry].skb == NULL) {
1581                        skb = dev_alloc_skb(np->rx_buf_sz);
1582                        np->rx_info[entry].skb = skb;
1583                        if (skb == NULL)
1584                                break;  /* Better luck next round. */
1585                        np->rx_info[entry].mapping =
1586                                pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1587                        skb->dev = dev; /* Mark as being used by this device. */
1588                        np->rx_ring[entry].rxaddr =
1589                                cpu_to_dma(np->rx_info[entry].mapping | RxDescValid);
1590                }
1591                if (entry == RX_RING_SIZE - 1)
1592                        np->rx_ring[entry].rxaddr |= cpu_to_dma(RxDescEndRing);
1593        }
1594        if (entry >= 0)
1595                writew(entry, np->base + RxDescQIdx);
1596}
1597
1598
1599static void netdev_media_change(struct net_device *dev)
1600{
1601        struct netdev_private *np = netdev_priv(dev);
1602        void __iomem *ioaddr = np->base;
1603        u16 reg0, reg1, reg4, reg5;
1604        u32 new_tx_mode;
1605        u32 new_intr_timer_ctrl;
1606
1607        /* reset status first */
1608        mdio_read(dev, np->phys[0], MII_BMCR);
1609        mdio_read(dev, np->phys[0], MII_BMSR);
1610
1611        reg0 = mdio_read(dev, np->phys[0], MII_BMCR);
1612        reg1 = mdio_read(dev, np->phys[0], MII_BMSR);
1613
1614        if (reg1 & BMSR_LSTATUS) {
1615                /* link is up */
1616                if (reg0 & BMCR_ANENABLE) {
1617                        /* autonegotiation is enabled */
1618                        reg4 = mdio_read(dev, np->phys[0], MII_ADVERTISE);
1619                        reg5 = mdio_read(dev, np->phys[0], MII_LPA);
1620                        if (reg4 & ADVERTISE_100FULL && reg5 & LPA_100FULL) {
1621                                np->speed100 = 1;
1622                                np->mii_if.full_duplex = 1;
1623                        } else if (reg4 & ADVERTISE_100HALF && reg5 & LPA_100HALF) {
1624                                np->speed100 = 1;
1625                                np->mii_if.full_duplex = 0;
1626                        } else if (reg4 & ADVERTISE_10FULL && reg5 & LPA_10FULL) {
1627                                np->speed100 = 0;
1628                                np->mii_if.full_duplex = 1;
1629                        } else {
1630                                np->speed100 = 0;
1631                                np->mii_if.full_duplex = 0;
1632                        }
1633                } else {
1634                        /* autonegotiation is disabled */
1635                        if (reg0 & BMCR_SPEED100)
1636                                np->speed100 = 1;
1637                        else
1638                                np->speed100 = 0;
1639                        if (reg0 & BMCR_FULLDPLX)
1640                                np->mii_if.full_duplex = 1;
1641                        else
1642                                np->mii_if.full_duplex = 0;
1643                }
1644                netif_carrier_on(dev);
1645                printk(KERN_DEBUG "%s: Link is up, running at %sMbit %s-duplex\n",
1646                       dev->name,
1647                       np->speed100 ? "100" : "10",
1648                       np->mii_if.full_duplex ? "full" : "half");
1649
1650                new_tx_mode = np->tx_mode & ~FullDuplex;        /* duplex setting */
1651                if (np->mii_if.full_duplex)
1652                        new_tx_mode |= FullDuplex;
1653                if (np->tx_mode != new_tx_mode) {
1654                        np->tx_mode = new_tx_mode;
1655                        writel(np->tx_mode | MiiSoftReset, ioaddr + TxMode);
1656                        udelay(1000);
1657                        writel(np->tx_mode, ioaddr + TxMode);
1658                }
1659
1660                new_intr_timer_ctrl = np->intr_timer_ctrl & ~Timer10X;
1661                if (np->speed100)
1662                        new_intr_timer_ctrl |= Timer10X;
1663                if (np->intr_timer_ctrl != new_intr_timer_ctrl) {
1664                        np->intr_timer_ctrl = new_intr_timer_ctrl;
1665                        writel(new_intr_timer_ctrl, ioaddr + IntrTimerCtrl);
1666                }
1667        } else {
1668                netif_carrier_off(dev);
1669                printk(KERN_DEBUG "%s: Link is down\n", dev->name);
1670        }
1671}
1672
1673
1674static void netdev_error(struct net_device *dev, int intr_status)
1675{
1676        struct netdev_private *np = netdev_priv(dev);
1677
1678        /* Came close to underrunning the Tx FIFO, increase threshold. */
1679        if (intr_status & IntrTxDataLow) {
1680                if (np->tx_threshold <= PKT_BUF_SZ / 16) {
1681                        writel(++np->tx_threshold, np->base + TxThreshold);
1682                        printk(KERN_NOTICE "%s: PCI bus congestion, increasing Tx FIFO threshold to %d bytes\n",
1683                               dev->name, np->tx_threshold * 16);
1684                } else
1685                        printk(KERN_WARNING "%s: PCI Tx underflow -- adapter is probably malfunctioning\n", dev->name);
1686        }
1687        if (intr_status & IntrRxGFPDead) {
1688                np->stats.rx_fifo_errors++;
1689                np->stats.rx_errors++;
1690        }
1691        if (intr_status & (IntrNoTxCsum | IntrDMAErr)) {
1692                np->stats.tx_fifo_errors++;
1693                np->stats.tx_errors++;
1694        }
1695        if ((intr_status & ~(IntrNormalMask | IntrAbnormalSummary | IntrLinkChange | IntrStatsMax | IntrTxDataLow | IntrRxGFPDead | IntrNoTxCsum | IntrPCIPad)) && debug)
1696                printk(KERN_ERR "%s: Something Wicked happened! %#8.8x.\n",
1697                       dev->name, intr_status);
1698}
1699
1700
1701static struct net_device_stats *get_stats(struct net_device *dev)
1702{
1703        struct netdev_private *np = netdev_priv(dev);
1704        void __iomem *ioaddr = np->base;
1705
1706        /* This adapter architecture needs no SMP locks. */
1707        np->stats.tx_bytes = readl(ioaddr + 0x57010);
1708        np->stats.rx_bytes = readl(ioaddr + 0x57044);
1709        np->stats.tx_packets = readl(ioaddr + 0x57000);
1710        np->stats.tx_aborted_errors =
1711                readl(ioaddr + 0x57024) + readl(ioaddr + 0x57028);
1712        np->stats.tx_window_errors = readl(ioaddr + 0x57018);
1713        np->stats.collisions =
1714                readl(ioaddr + 0x57004) + readl(ioaddr + 0x57008);
1715
1716        /* The chip only need report frame silently dropped. */
1717        np->stats.rx_dropped += readw(ioaddr + RxDMAStatus);
1718        writew(0, ioaddr + RxDMAStatus);
1719        np->stats.rx_crc_errors = readl(ioaddr + 0x5703C);
1720        np->stats.rx_frame_errors = readl(ioaddr + 0x57040);
1721        np->stats.rx_length_errors = readl(ioaddr + 0x57058);
1722        np->stats.rx_missed_errors = readl(ioaddr + 0x5707C);
1723
1724        return &np->stats;
1725}
1726
1727
1728static void set_rx_mode(struct net_device *dev)
1729{
1730        struct netdev_private *np = netdev_priv(dev);
1731        void __iomem *ioaddr = np->base;
1732        u32 rx_mode = MinVLANPrio;
1733        struct dev_mc_list *mclist;
1734        int i;
1735#ifdef VLAN_SUPPORT
1736
1737        rx_mode |= VlanMode;
1738        if (np->vlgrp) {
1739                int vlan_count = 0;
1740                void __iomem *filter_addr = ioaddr + HashTable + 8;
1741                for (i = 0; i < VLAN_VID_MASK; i++) {
1742                        if (vlan_group_get_device(np->vlgrp, i)) {
1743                                if (vlan_count >= 32)
1744                                        break;
1745                                writew(i, filter_addr);
1746                                filter_addr += 16;
1747                                vlan_count++;
1748                        }
1749                }
1750                if (i == VLAN_VID_MASK) {
1751                        rx_mode |= PerfectFilterVlan;
1752                        while (vlan_count < 32) {
1753                                writew(0, filter_addr);
1754                                filter_addr += 16;
1755                                vlan_count++;
1756                        }
1757                }
1758        }
1759#endif /* VLAN_SUPPORT */
1760
1761        if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1762                rx_mode |= AcceptAll;
1763        } else if ((dev->mc_count > multicast_filter_limit)
1764                   || (dev->flags & IFF_ALLMULTI)) {
1765                /* Too many to match, or accept all multicasts. */
1766                rx_mode |= AcceptBroadcast|AcceptAllMulticast|PerfectFilter;
1767        } else if (dev->mc_count <= 14) {
1768                /* Use the 16 element perfect filter, skip first two entries. */
1769                void __iomem *filter_addr = ioaddr + PerfFilterTable + 2 * 16;
1770                __be16 *eaddrs;
1771                for (i = 2, mclist = dev->mc_list; mclist && i < dev->mc_count + 2;
1772                     i++, mclist = mclist->next) {
1773                        eaddrs = (__be16 *)mclist->dmi_addr;
1774                        writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 4;
1775                        writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4;
1776                        writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 8;
1777                }
1778                eaddrs = (__be16 *)dev->dev_addr;
1779                while (i++ < 16) {
1780                        writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 4;
1781                        writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4;
1782                        writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 8;
1783                }
1784                rx_mode |= AcceptBroadcast|PerfectFilter;
1785        } else {
1786                /* Must use a multicast hash table. */
1787                void __iomem *filter_addr;
1788                __be16 *eaddrs;
1789                __le16 mc_filter[32] __attribute__ ((aligned(sizeof(long))));   /* Multicast hash filter */
1790
1791                memset(mc_filter, 0, sizeof(mc_filter));
1792                for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1793                     i++, mclist = mclist->next) {
1794                        /* The chip uses the upper 9 CRC bits
1795                           as index into the hash table */
1796                        int bit_nr = ether_crc_le(ETH_ALEN, mclist->dmi_addr) >> 23;
1797                        __le32 *fptr = (__le32 *) &mc_filter[(bit_nr >> 4) & ~1];
1798
1799                        *fptr |= cpu_to_le32(1 << (bit_nr & 31));
1800                }
1801                /* Clear the perfect filter list, skip first two entries. */
1802                filter_addr = ioaddr + PerfFilterTable + 2 * 16;
1803                eaddrs = (__be16 *)dev->dev_addr;
1804                for (i = 2; i < 16; i++) {
1805                        writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 4;
1806                        writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4;
1807                        writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 8;
1808                }
1809                for (filter_addr = ioaddr + HashTable, i = 0; i < 32; filter_addr+= 16, i++)
1810                        writew(mc_filter[i], filter_addr);
1811                rx_mode |= AcceptBroadcast|PerfectFilter|HashFilter;
1812        }
1813        writel(rx_mode, ioaddr + RxFilterMode);
1814}
1815
1816static int check_if_running(struct net_device *dev)
1817{
1818        if (!netif_running(dev))
1819                return -EINVAL;
1820        return 0;
1821}
1822
1823static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1824{
1825        struct netdev_private *np = netdev_priv(dev);
1826        strcpy(info->driver, DRV_NAME);
1827        strcpy(info->version, DRV_VERSION);
1828        strcpy(info->bus_info, pci_name(np->pci_dev));
1829}
1830
1831static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1832{
1833        struct netdev_private *np = netdev_priv(dev);
1834        spin_lock_irq(&np->lock);
1835        mii_ethtool_gset(&np->mii_if, ecmd);
1836        spin_unlock_irq(&np->lock);
1837        return 0;
1838}
1839
1840static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1841{
1842        struct netdev_private *np = netdev_priv(dev);
1843        int res;
1844        spin_lock_irq(&np->lock);
1845        res = mii_ethtool_sset(&np->mii_if, ecmd);
1846        spin_unlock_irq(&np->lock);
1847        check_duplex(dev);
1848        return res;
1849}
1850
1851static int nway_reset(struct net_device *dev)
1852{
1853        struct netdev_private *np = netdev_priv(dev);
1854        return mii_nway_restart(&np->mii_if);
1855}
1856
1857static u32 get_link(struct net_device *dev)
1858{
1859        struct netdev_private *np = netdev_priv(dev);
1860        return mii_link_ok(&np->mii_if);
1861}
1862
1863static u32 get_msglevel(struct net_device *dev)
1864{
1865        return debug;
1866}
1867
1868static void set_msglevel(struct net_device *dev, u32 val)
1869{
1870        debug = val;
1871}
1872
1873static const struct ethtool_ops ethtool_ops = {
1874        .begin = check_if_running,
1875        .get_drvinfo = get_drvinfo,
1876        .get_settings = get_settings,
1877        .set_settings = set_settings,
1878        .nway_reset = nway_reset,
1879        .get_link = get_link,
1880        .get_msglevel = get_msglevel,
1881        .set_msglevel = set_msglevel,
1882};
1883
1884static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1885{
1886        struct netdev_private *np = netdev_priv(dev);
1887        struct mii_ioctl_data *data = if_mii(rq);
1888        int rc;
1889
1890        if (!netif_running(dev))
1891                return -EINVAL;
1892
1893        spin_lock_irq(&np->lock);
1894        rc = generic_mii_ioctl(&np->mii_if, data, cmd, NULL);
1895        spin_unlock_irq(&np->lock);
1896
1897        if ((cmd == SIOCSMIIREG) && (data->phy_id == np->phys[0]))
1898                check_duplex(dev);
1899
1900        return rc;
1901}
1902
1903static int netdev_close(struct net_device *dev)
1904{
1905        struct netdev_private *np = netdev_priv(dev);
1906        void __iomem *ioaddr = np->base;
1907        int i;
1908
1909        netif_stop_queue(dev);
1910#ifdef HAVE_NETDEV_POLL
1911        napi_disable(&np->napi);
1912#endif
1913
1914        if (debug > 1) {
1915                printk(KERN_DEBUG "%s: Shutting down ethercard, Intr status %#8.8x.\n",
1916                           dev->name, (int) readl(ioaddr + IntrStatus));
1917                printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
1918                       dev->name, np->cur_tx, np->dirty_tx,
1919                       np->cur_rx, np->dirty_rx);
1920        }
1921
1922        /* Disable interrupts by clearing the interrupt mask. */
1923        writel(0, ioaddr + IntrEnable);
1924
1925        /* Stop the chip's Tx and Rx processes. */
1926        writel(0, ioaddr + GenCtrl);
1927        readl(ioaddr + GenCtrl);
1928
1929        if (debug > 5) {
1930                printk(KERN_DEBUG"  Tx ring at %#llx:\n",
1931                       (long long) np->tx_ring_dma);
1932                for (i = 0; i < 8 /* TX_RING_SIZE is huge! */; i++)
1933                        printk(KERN_DEBUG " #%d desc. %#8.8x %#llx -> %#8.8x.\n",
1934                               i, le32_to_cpu(np->tx_ring[i].status),
1935                               (long long) dma_to_cpu(np->tx_ring[i].addr),
1936                               le32_to_cpu(np->tx_done_q[i].status));
1937                printk(KERN_DEBUG "  Rx ring at %#llx -> %p:\n",
1938                       (long long) np->rx_ring_dma, np->rx_done_q);
1939                if (np->rx_done_q)
1940                        for (i = 0; i < 8 /* RX_RING_SIZE */; i++) {
1941                                printk(KERN_DEBUG " #%d desc. %#llx -> %#8.8x\n",
1942                                       i, (long long) dma_to_cpu(np->rx_ring[i].rxaddr), le32_to_cpu(np->rx_done_q[i].status));
1943                }
1944        }
1945
1946        free_irq(dev->irq, dev);
1947
1948        /* Free all the skbuffs in the Rx queue. */
1949        for (i = 0; i < RX_RING_SIZE; i++) {
1950                np->rx_ring[i].rxaddr = cpu_to_dma(0xBADF00D0); /* An invalid address. */
1951                if (np->rx_info[i].skb != NULL) {
1952                        pci_unmap_single(np->pci_dev, np->rx_info[i].mapping, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1953                        dev_kfree_skb(np->rx_info[i].skb);
1954                }
1955                np->rx_info[i].skb = NULL;
1956                np->rx_info[i].mapping = 0;
1957        }
1958        for (i = 0; i < TX_RING_SIZE; i++) {
1959                struct sk_buff *skb = np->tx_info[i].skb;
1960                if (skb == NULL)
1961                        continue;
1962                pci_unmap_single(np->pci_dev,
1963                                 np->tx_info[i].mapping,
1964                                 skb_first_frag_len(skb), PCI_DMA_TODEVICE);
1965                np->tx_info[i].mapping = 0;
1966                dev_kfree_skb(skb);
1967                np->tx_info[i].skb = NULL;
1968        }
1969
1970        return 0;
1971}
1972
1973#ifdef CONFIG_PM
1974static int starfire_suspend(struct pci_dev *pdev, pm_message_t state)
1975{
1976        struct net_device *dev = pci_get_drvdata(pdev);
1977
1978        if (netif_running(dev)) {
1979                netif_device_detach(dev);
1980                netdev_close(dev);
1981        }
1982
1983        pci_save_state(pdev);
1984        pci_set_power_state(pdev, pci_choose_state(pdev,state));
1985
1986        return 0;
1987}
1988
1989static int starfire_resume(struct pci_dev *pdev)
1990{
1991        struct net_device *dev = pci_get_drvdata(pdev);
1992
1993        pci_set_power_state(pdev, PCI_D0);
1994        pci_restore_state(pdev);
1995
1996        if (netif_running(dev)) {
1997                netdev_open(dev);
1998                netif_device_attach(dev);
1999        }
2000
2001        return 0;
2002}
2003#endif /* CONFIG_PM */
2004
2005
2006static void __devexit starfire_remove_one (struct pci_dev *pdev)
2007{
2008        struct net_device *dev = pci_get_drvdata(pdev);
2009        struct netdev_private *np = netdev_priv(dev);
2010
2011        BUG_ON(!dev);
2012
2013        unregister_netdev(dev);
2014
2015        if (np->queue_mem)
2016                pci_free_consistent(pdev, np->queue_mem_size, np->queue_mem, np->queue_mem_dma);
2017
2018
2019        /* XXX: add wakeup code -- requires firmware for MagicPacket */
2020        pci_set_power_state(pdev, PCI_D3hot);   /* go to sleep in D3 mode */
2021        pci_disable_device(pdev);
2022
2023        iounmap(np->base);
2024        pci_release_regions(pdev);
2025
2026        pci_set_drvdata(pdev, NULL);
2027        free_netdev(dev);                       /* Will also free np!! */
2028}
2029
2030
2031static struct pci_driver starfire_driver = {
2032        .name           = DRV_NAME,
2033        .probe          = starfire_init_one,
2034        .remove         = __devexit_p(starfire_remove_one),
2035#ifdef CONFIG_PM
2036        .suspend        = starfire_suspend,
2037        .resume         = starfire_resume,
2038#endif /* CONFIG_PM */
2039        .id_table       = starfire_pci_tbl,
2040};
2041
2042
2043static int __init starfire_init (void)
2044{
2045/* when a module, this is printed whether or not devices are found in probe */
2046#ifdef MODULE
2047        printk(version);
2048#ifdef HAVE_NETDEV_POLL
2049        printk(KERN_INFO DRV_NAME ": polling (NAPI) enabled\n");
2050#else
2051        printk(KERN_INFO DRV_NAME ": polling (NAPI) disabled\n");
2052#endif
2053#endif
2054
2055        /* we can do this test only at run-time... sigh */
2056        if (sizeof(dma_addr_t) != sizeof(netdrv_addr_t)) {
2057                printk("This driver has dma_addr_t issues, please send email to maintainer\n");
2058                return -ENODEV;
2059        }
2060
2061        return pci_register_driver(&starfire_driver);
2062}
2063
2064
2065static void __exit starfire_cleanup (void)
2066{
2067        pci_unregister_driver (&starfire_driver);
2068}
2069
2070
2071module_init(starfire_init);
2072module_exit(starfire_cleanup);
2073
2074
2075/*
2076 * Local variables:
2077 *  c-basic-offset: 8
2078 *  tab-width: 8
2079 * End:
2080 */
2081