linux/drivers/net/starfire.c
<<
>>
Prefs
   1/* starfire.c: Linux device driver for the Adaptec Starfire network adapter. */
   2/*
   3        Written 1998-2000 by Donald Becker.
   4
   5        Current maintainer is Ion Badulescu <ionut ta badula tod org>. Please
   6        send all bug reports to me, and not to Donald Becker, as this code
   7        has been heavily modified from Donald's original version.
   8
   9        This software may be used and distributed according to the terms of
  10        the GNU General Public License (GPL), incorporated herein by reference.
  11        Drivers based on or derived from this code fall under the GPL and must
  12        retain the authorship, copyright and license notice.  This file is not
  13        a complete program and may only be used when the entire operating
  14        system is licensed under the GPL.
  15
  16        The information below comes from Donald Becker's original driver:
  17
  18        The author may be reached as becker@scyld.com, or C/O
  19        Scyld Computing Corporation
  20        410 Severn Ave., Suite 210
  21        Annapolis MD 21403
  22
  23        Support and updates available at
  24        http://www.scyld.com/network/starfire.html
  25        [link no longer provides useful info -jgarzik]
  26
  27*/
  28
  29#define DRV_NAME        "starfire"
  30#define DRV_VERSION     "2.1"
  31#define DRV_RELDATE     "July  6, 2008"
  32
  33#include <linux/module.h>
  34#include <linux/kernel.h>
  35#include <linux/pci.h>
  36#include <linux/netdevice.h>
  37#include <linux/etherdevice.h>
  38#include <linux/init.h>
  39#include <linux/delay.h>
  40#include <linux/crc32.h>
  41#include <linux/ethtool.h>
  42#include <linux/mii.h>
  43#include <linux/if_vlan.h>
  44#include <linux/mm.h>
  45#include <linux/firmware.h>
  46#include <asm/processor.h>              /* Processor type for cache alignment. */
  47#include <asm/uaccess.h>
  48#include <asm/io.h>
  49
  50/*
  51 * The current frame processor firmware fails to checksum a fragment
  52 * of length 1. If and when this is fixed, the #define below can be removed.
  53 */
  54#define HAS_BROKEN_FIRMWARE
  55
  56/*
  57 * If using the broken firmware, data must be padded to the next 32-bit boundary.
  58 */
  59#ifdef HAS_BROKEN_FIRMWARE
  60#define PADDING_MASK 3
  61#endif
  62
  63/*
  64 * Define this if using the driver with the zero-copy patch
  65 */
  66#define ZEROCOPY
  67
  68#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
  69#define VLAN_SUPPORT
  70#endif
  71
  72/* The user-configurable values.
  73   These may be modified when a driver module is loaded.*/
  74
  75/* Used for tuning interrupt latency vs. overhead. */
  76static int intr_latency;
  77static int small_frames;
  78
  79static int debug = 1;                   /* 1 normal messages, 0 quiet .. 7 verbose. */
  80static int max_interrupt_work = 20;
  81static int mtu;
  82/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
  83   The Starfire has a 512 element hash table based on the Ethernet CRC. */
  84static const int multicast_filter_limit = 512;
  85/* Whether to do TCP/UDP checksums in hardware */
  86static int enable_hw_cksum = 1;
  87
  88#define PKT_BUF_SZ      1536            /* Size of each temporary Rx buffer.*/
  89/*
  90 * Set the copy breakpoint for the copy-only-tiny-frames scheme.
  91 * Setting to > 1518 effectively disables this feature.
  92 *
  93 * NOTE:
  94 * The ia64 doesn't allow for unaligned loads even of integers being
  95 * misaligned on a 2 byte boundary. Thus always force copying of
  96 * packets as the starfire doesn't allow for misaligned DMAs ;-(
  97 * 23/10/2000 - Jes
  98 *
  99 * The Alpha and the Sparc don't like unaligned loads, either. On Sparc64,
 100 * at least, having unaligned frames leads to a rather serious performance
 101 * penalty. -Ion
 102 */
 103#if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
 104static int rx_copybreak = PKT_BUF_SZ;
 105#else
 106static int rx_copybreak /* = 0 */;
 107#endif
 108
 109/* PCI DMA burst size -- on sparc64 we want to force it to 64 bytes, on the others the default of 128 is fine. */
 110#ifdef __sparc__
 111#define DMA_BURST_SIZE 64
 112#else
 113#define DMA_BURST_SIZE 128
 114#endif
 115
 116/* Used to pass the media type, etc.
 117   Both 'options[]' and 'full_duplex[]' exist for driver interoperability.
 118   The media type is usually passed in 'options[]'.
 119   These variables are deprecated, use ethtool instead. -Ion
 120*/
 121#define MAX_UNITS 8             /* More are supported, limit only on options */
 122static int options[MAX_UNITS] = {0, };
 123static int full_duplex[MAX_UNITS] = {0, };
 124
 125/* Operational parameters that are set at compile time. */
 126
 127/* The "native" ring sizes are either 256 or 2048.
 128   However in some modes a descriptor may be marked to wrap the ring earlier.
 129*/
 130#define RX_RING_SIZE    256
 131#define TX_RING_SIZE    32
 132/* The completion queues are fixed at 1024 entries i.e. 4K or 8KB. */
 133#define DONE_Q_SIZE     1024
 134/* All queues must be aligned on a 256-byte boundary */
 135#define QUEUE_ALIGN     256
 136
 137#if RX_RING_SIZE > 256
 138#define RX_Q_ENTRIES Rx2048QEntries
 139#else
 140#define RX_Q_ENTRIES Rx256QEntries
 141#endif
 142
 143/* Operational parameters that usually are not changed. */
 144/* Time in jiffies before concluding the transmitter is hung. */
 145#define TX_TIMEOUT      (2 * HZ)
 146
 147/*
 148 * This SUCKS.
 149 * We need a much better method to determine if dma_addr_t is 64-bit.
 150 */
 151#if (defined(__i386__) && defined(CONFIG_HIGHMEM64G)) || defined(__x86_64__) || defined (__ia64__) || defined(__alpha__) || defined(__mips64__) || (defined(__mips__) && defined(CONFIG_HIGHMEM) && defined(CONFIG_64BIT_PHYS_ADDR))
 152/* 64-bit dma_addr_t */
 153#define ADDR_64BITS     /* This chip uses 64 bit addresses. */
 154#define netdrv_addr_t __le64
 155#define cpu_to_dma(x) cpu_to_le64(x)
 156#define dma_to_cpu(x) le64_to_cpu(x)
 157#define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
 158#define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
 159#define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
 160#define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
 161#define RX_DESC_ADDR_SIZE RxDescAddr64bit
 162#else  /* 32-bit dma_addr_t */
 163#define netdrv_addr_t __le32
 164#define cpu_to_dma(x) cpu_to_le32(x)
 165#define dma_to_cpu(x) le32_to_cpu(x)
 166#define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
 167#define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
 168#define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
 169#define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
 170#define RX_DESC_ADDR_SIZE RxDescAddr32bit
 171#endif
 172
 173#define skb_first_frag_len(skb) skb_headlen(skb)
 174#define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
 175
 176/* Firmware names */
 177#define FIRMWARE_RX     "adaptec/starfire_rx.bin"
 178#define FIRMWARE_TX     "adaptec/starfire_tx.bin"
 179
 180/* These identify the driver base version and may not be removed. */
 181static const char version[] __devinitconst =
 182KERN_INFO "starfire.c:v1.03 7/26/2000  Written by Donald Becker <becker@scyld.com>\n"
 183" (unofficial 2.2/2.4 kernel port, version " DRV_VERSION ", " DRV_RELDATE ")\n";
 184
 185MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
 186MODULE_DESCRIPTION("Adaptec Starfire Ethernet driver");
 187MODULE_LICENSE("GPL");
 188MODULE_VERSION(DRV_VERSION);
 189MODULE_FIRMWARE(FIRMWARE_RX);
 190MODULE_FIRMWARE(FIRMWARE_TX);
 191
 192module_param(max_interrupt_work, int, 0);
 193module_param(mtu, int, 0);
 194module_param(debug, int, 0);
 195module_param(rx_copybreak, int, 0);
 196module_param(intr_latency, int, 0);
 197module_param(small_frames, int, 0);
 198module_param_array(options, int, NULL, 0);
 199module_param_array(full_duplex, int, NULL, 0);
 200module_param(enable_hw_cksum, int, 0);
 201MODULE_PARM_DESC(max_interrupt_work, "Maximum events handled per interrupt");
 202MODULE_PARM_DESC(mtu, "MTU (all boards)");
 203MODULE_PARM_DESC(debug, "Debug level (0-6)");
 204MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
 205MODULE_PARM_DESC(intr_latency, "Maximum interrupt latency, in microseconds");
 206MODULE_PARM_DESC(small_frames, "Maximum size of receive frames that bypass interrupt latency (0,64,128,256,512)");
 207MODULE_PARM_DESC(options, "Deprecated: Bits 0-3: media type, bit 17: full duplex");
 208MODULE_PARM_DESC(full_duplex, "Deprecated: Forced full-duplex setting (0/1)");
 209MODULE_PARM_DESC(enable_hw_cksum, "Enable/disable hardware cksum support (0/1)");
 210
 211/*
 212                                Theory of Operation
 213
 214I. Board Compatibility
 215
 216This driver is for the Adaptec 6915 "Starfire" 64 bit PCI Ethernet adapter.
 217
 218II. Board-specific settings
 219
 220III. Driver operation
 221
 222IIIa. Ring buffers
 223
 224The Starfire hardware uses multiple fixed-size descriptor queues/rings.  The
 225ring sizes are set fixed by the hardware, but may optionally be wrapped
 226earlier by the END bit in the descriptor.
 227This driver uses that hardware queue size for the Rx ring, where a large
 228number of entries has no ill effect beyond increases the potential backlog.
 229The Tx ring is wrapped with the END bit, since a large hardware Tx queue
 230disables the queue layer priority ordering and we have no mechanism to
 231utilize the hardware two-level priority queue.  When modifying the
 232RX/TX_RING_SIZE pay close attention to page sizes and the ring-empty warning
 233levels.
 234
 235IIIb/c. Transmit/Receive Structure
 236
 237See the Adaptec manual for the many possible structures, and options for
 238each structure.  There are far too many to document all of them here.
 239
 240For transmit this driver uses type 0/1 transmit descriptors (depending
 241on the 32/64 bitness of the architecture), and relies on automatic
 242minimum-length padding.  It does not use the completion queue
 243consumer index, but instead checks for non-zero status entries.
 244
 245For receive this driver uses type 2/3 receive descriptors.  The driver
 246allocates full frame size skbuffs for the Rx ring buffers, so all frames
 247should fit in a single descriptor.  The driver does not use the completion
 248queue consumer index, but instead checks for non-zero status entries.
 249
 250When an incoming frame is less than RX_COPYBREAK bytes long, a fresh skbuff
 251is allocated and the frame is copied to the new skbuff.  When the incoming
 252frame is larger, the skbuff is passed directly up the protocol stack.
 253Buffers consumed this way are replaced by newly allocated skbuffs in a later
 254phase of receive.
 255
 256A notable aspect of operation is that unaligned buffers are not permitted by
 257the Starfire hardware.  Thus the IP header at offset 14 in an ethernet frame
 258isn't longword aligned, which may cause problems on some machine
 259e.g. Alphas and IA64. For these architectures, the driver is forced to copy
 260the frame into a new skbuff unconditionally. Copied frames are put into the
 261skbuff at an offset of "+2", thus 16-byte aligning the IP header.
 262
 263IIId. Synchronization
 264
 265The driver runs as two independent, single-threaded flows of control.  One
 266is the send-packet routine, which enforces single-threaded use by the
 267dev->tbusy flag.  The other thread is the interrupt handler, which is single
 268threaded by the hardware and interrupt handling software.
 269
 270The send packet thread has partial control over the Tx ring and the netif_queue
 271status. If the number of free Tx slots in the ring falls below a certain number
 272(currently hardcoded to 4), it signals the upper layer to stop the queue.
 273
 274The interrupt handler has exclusive control over the Rx ring and records stats
 275from the Tx ring.  After reaping the stats, it marks the Tx queue entry as
 276empty by incrementing the dirty_tx mark. Iff the netif_queue is stopped and the
 277number of free Tx slow is above the threshold, it signals the upper layer to
 278restart the queue.
 279
 280IV. Notes
 281
 282IVb. References
 283
 284The Adaptec Starfire manuals, available only from Adaptec.
 285http://www.scyld.com/expert/100mbps.html
 286http://www.scyld.com/expert/NWay.html
 287
 288IVc. Errata
 289
 290- StopOnPerr is broken, don't enable
 291- Hardware ethernet padding exposes random data, perform software padding
 292  instead (unverified -- works correctly for all the hardware I have)
 293
 294*/
 295
 296
 297
 298enum chip_capability_flags {CanHaveMII=1, };
 299
 300enum chipset {
 301        CH_6915 = 0,
 302};
 303
 304static struct pci_device_id starfire_pci_tbl[] = {
 305        { 0x9004, 0x6915, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_6915 },
 306        { 0, }
 307};
 308MODULE_DEVICE_TABLE(pci, starfire_pci_tbl);
 309
 310/* A chip capabilities table, matching the CH_xxx entries in xxx_pci_tbl[] above. */
 311static const struct chip_info {
 312        const char *name;
 313        int drv_flags;
 314} netdrv_tbl[] __devinitdata = {
 315        { "Adaptec Starfire 6915", CanHaveMII },
 316};
 317
 318
 319/* Offsets to the device registers.
 320   Unlike software-only systems, device drivers interact with complex hardware.
 321   It's not useful to define symbolic names for every register bit in the
 322   device.  The name can only partially document the semantics and make
 323   the driver longer and more difficult to read.
 324   In general, only the important configuration values or bits changed
 325   multiple times should be defined symbolically.
 326*/
 327enum register_offsets {
 328        PCIDeviceConfig=0x50040, GenCtrl=0x50070, IntrTimerCtrl=0x50074,
 329        IntrClear=0x50080, IntrStatus=0x50084, IntrEnable=0x50088,
 330        MIICtrl=0x52000, TxStationAddr=0x50120, EEPROMCtrl=0x51000,
 331        GPIOCtrl=0x5008C, TxDescCtrl=0x50090,
 332        TxRingPtr=0x50098, HiPriTxRingPtr=0x50094, /* Low and High priority. */
 333        TxRingHiAddr=0x5009C,           /* 64 bit address extension. */
 334        TxProducerIdx=0x500A0, TxConsumerIdx=0x500A4,
 335        TxThreshold=0x500B0,
 336        CompletionHiAddr=0x500B4, TxCompletionAddr=0x500B8,
 337        RxCompletionAddr=0x500BC, RxCompletionQ2Addr=0x500C0,
 338        CompletionQConsumerIdx=0x500C4, RxDMACtrl=0x500D0,
 339        RxDescQCtrl=0x500D4, RxDescQHiAddr=0x500DC, RxDescQAddr=0x500E0,
 340        RxDescQIdx=0x500E8, RxDMAStatus=0x500F0, RxFilterMode=0x500F4,
 341        TxMode=0x55000, VlanType=0x55064,
 342        PerfFilterTable=0x56000, HashTable=0x56100,
 343        TxGfpMem=0x58000, RxGfpMem=0x5a000,
 344};
 345
 346/*
 347 * Bits in the interrupt status/mask registers.
 348 * Warning: setting Intr[Ab]NormalSummary in the IntrEnable register
 349 * enables all the interrupt sources that are or'ed into those status bits.
 350 */
 351enum intr_status_bits {
 352        IntrLinkChange=0xf0000000, IntrStatsMax=0x08000000,
 353        IntrAbnormalSummary=0x02000000, IntrGeneralTimer=0x01000000,
 354        IntrSoftware=0x800000, IntrRxComplQ1Low=0x400000,
 355        IntrTxComplQLow=0x200000, IntrPCI=0x100000,
 356        IntrDMAErr=0x080000, IntrTxDataLow=0x040000,
 357        IntrRxComplQ2Low=0x020000, IntrRxDescQ1Low=0x010000,
 358        IntrNormalSummary=0x8000, IntrTxDone=0x4000,
 359        IntrTxDMADone=0x2000, IntrTxEmpty=0x1000,
 360        IntrEarlyRxQ2=0x0800, IntrEarlyRxQ1=0x0400,
 361        IntrRxQ2Done=0x0200, IntrRxQ1Done=0x0100,
 362        IntrRxGFPDead=0x80, IntrRxDescQ2Low=0x40,
 363        IntrNoTxCsum=0x20, IntrTxBadID=0x10,
 364        IntrHiPriTxBadID=0x08, IntrRxGfp=0x04,
 365        IntrTxGfp=0x02, IntrPCIPad=0x01,
 366        /* not quite bits */
 367        IntrRxDone=IntrRxQ2Done | IntrRxQ1Done,
 368        IntrRxEmpty=IntrRxDescQ1Low | IntrRxDescQ2Low,
 369        IntrNormalMask=0xff00, IntrAbnormalMask=0x3ff00fe,
 370};
 371
 372/* Bits in the RxFilterMode register. */
 373enum rx_mode_bits {
 374        AcceptBroadcast=0x04, AcceptAllMulticast=0x02, AcceptAll=0x01,
 375        AcceptMulticast=0x10, PerfectFilter=0x40, HashFilter=0x30,
 376        PerfectFilterVlan=0x80, MinVLANPrio=0xE000, VlanMode=0x0200,
 377        WakeupOnGFP=0x0800,
 378};
 379
 380/* Bits in the TxMode register */
 381enum tx_mode_bits {
 382        MiiSoftReset=0x8000, MIILoopback=0x4000,
 383        TxFlowEnable=0x0800, RxFlowEnable=0x0400,
 384        PadEnable=0x04, FullDuplex=0x02, HugeFrame=0x01,
 385};
 386
 387/* Bits in the TxDescCtrl register. */
 388enum tx_ctrl_bits {
 389        TxDescSpaceUnlim=0x00, TxDescSpace32=0x10, TxDescSpace64=0x20,
 390        TxDescSpace128=0x30, TxDescSpace256=0x40,
 391        TxDescType0=0x00, TxDescType1=0x01, TxDescType2=0x02,
 392        TxDescType3=0x03, TxDescType4=0x04,
 393        TxNoDMACompletion=0x08,
 394        TxDescQAddr64bit=0x80, TxDescQAddr32bit=0,
 395        TxHiPriFIFOThreshShift=24, TxPadLenShift=16,
 396        TxDMABurstSizeShift=8,
 397};
 398
 399/* Bits in the RxDescQCtrl register. */
 400enum rx_ctrl_bits {
 401        RxBufferLenShift=16, RxMinDescrThreshShift=0,
 402        RxPrefetchMode=0x8000, RxVariableQ=0x2000,
 403        Rx2048QEntries=0x4000, Rx256QEntries=0,
 404        RxDescAddr64bit=0x1000, RxDescAddr32bit=0,
 405        RxDescQAddr64bit=0x0100, RxDescQAddr32bit=0,
 406        RxDescSpace4=0x000, RxDescSpace8=0x100,
 407        RxDescSpace16=0x200, RxDescSpace32=0x300,
 408        RxDescSpace64=0x400, RxDescSpace128=0x500,
 409        RxConsumerWrEn=0x80,
 410};
 411
 412/* Bits in the RxDMACtrl register. */
 413enum rx_dmactrl_bits {
 414        RxReportBadFrames=0x80000000, RxDMAShortFrames=0x40000000,
 415        RxDMABadFrames=0x20000000, RxDMACrcErrorFrames=0x10000000,
 416        RxDMAControlFrame=0x08000000, RxDMAPauseFrame=0x04000000,
 417        RxChecksumIgnore=0, RxChecksumRejectTCPUDP=0x02000000,
 418        RxChecksumRejectTCPOnly=0x01000000,
 419        RxCompletionQ2Enable=0x800000,
 420        RxDMAQ2Disable=0, RxDMAQ2FPOnly=0x100000,
 421        RxDMAQ2SmallPkt=0x200000, RxDMAQ2HighPrio=0x300000,
 422        RxDMAQ2NonIP=0x400000,
 423        RxUseBackupQueue=0x080000, RxDMACRC=0x040000,
 424        RxEarlyIntThreshShift=12, RxHighPrioThreshShift=8,
 425        RxBurstSizeShift=0,
 426};
 427
 428/* Bits in the RxCompletionAddr register */
 429enum rx_compl_bits {
 430        RxComplQAddr64bit=0x80, RxComplQAddr32bit=0,
 431        RxComplProducerWrEn=0x40,
 432        RxComplType0=0x00, RxComplType1=0x10,
 433        RxComplType2=0x20, RxComplType3=0x30,
 434        RxComplThreshShift=0,
 435};
 436
 437/* Bits in the TxCompletionAddr register */
 438enum tx_compl_bits {
 439        TxComplQAddr64bit=0x80, TxComplQAddr32bit=0,
 440        TxComplProducerWrEn=0x40,
 441        TxComplIntrStatus=0x20,
 442        CommonQueueMode=0x10,
 443        TxComplThreshShift=0,
 444};
 445
 446/* Bits in the GenCtrl register */
 447enum gen_ctrl_bits {
 448        RxEnable=0x05, TxEnable=0x0a,
 449        RxGFPEnable=0x10, TxGFPEnable=0x20,
 450};
 451
 452/* Bits in the IntrTimerCtrl register */
 453enum intr_ctrl_bits {
 454        Timer10X=0x800, EnableIntrMasking=0x60, SmallFrameBypass=0x100,
 455        SmallFrame64=0, SmallFrame128=0x200, SmallFrame256=0x400, SmallFrame512=0x600,
 456        IntrLatencyMask=0x1f,
 457};
 458
 459/* The Rx and Tx buffer descriptors. */
 460struct starfire_rx_desc {
 461        netdrv_addr_t rxaddr;
 462};
 463enum rx_desc_bits {
 464        RxDescValid=1, RxDescEndRing=2,
 465};
 466
 467/* Completion queue entry. */
 468struct short_rx_done_desc {
 469        __le32 status;                  /* Low 16 bits is length. */
 470};
 471struct basic_rx_done_desc {
 472        __le32 status;                  /* Low 16 bits is length. */
 473        __le16 vlanid;
 474        __le16 status2;
 475};
 476struct csum_rx_done_desc {
 477        __le32 status;                  /* Low 16 bits is length. */
 478        __le16 csum;                    /* Partial checksum */
 479        __le16 status2;
 480};
 481struct full_rx_done_desc {
 482        __le32 status;                  /* Low 16 bits is length. */
 483        __le16 status3;
 484        __le16 status2;
 485        __le16 vlanid;
 486        __le16 csum;                    /* partial checksum */
 487        __le32 timestamp;
 488};
 489/* XXX: this is ugly and I'm not sure it's worth the trouble -Ion */
 490#ifdef VLAN_SUPPORT
 491typedef struct full_rx_done_desc rx_done_desc;
 492#define RxComplType RxComplType3
 493#else  /* not VLAN_SUPPORT */
 494typedef struct csum_rx_done_desc rx_done_desc;
 495#define RxComplType RxComplType2
 496#endif /* not VLAN_SUPPORT */
 497
 498enum rx_done_bits {
 499        RxOK=0x20000000, RxFIFOErr=0x10000000, RxBufQ2=0x08000000,
 500};
 501
 502/* Type 1 Tx descriptor. */
 503struct starfire_tx_desc_1 {
 504        __le32 status;                  /* Upper bits are status, lower 16 length. */
 505        __le32 addr;
 506};
 507
 508/* Type 2 Tx descriptor. */
 509struct starfire_tx_desc_2 {
 510        __le32 status;                  /* Upper bits are status, lower 16 length. */
 511        __le32 reserved;
 512        __le64 addr;
 513};
 514
 515#ifdef ADDR_64BITS
 516typedef struct starfire_tx_desc_2 starfire_tx_desc;
 517#define TX_DESC_TYPE TxDescType2
 518#else  /* not ADDR_64BITS */
 519typedef struct starfire_tx_desc_1 starfire_tx_desc;
 520#define TX_DESC_TYPE TxDescType1
 521#endif /* not ADDR_64BITS */
 522#define TX_DESC_SPACING TxDescSpaceUnlim
 523
 524enum tx_desc_bits {
 525        TxDescID=0xB0000000,
 526        TxCRCEn=0x01000000, TxDescIntr=0x08000000,
 527        TxRingWrap=0x04000000, TxCalTCP=0x02000000,
 528};
 529struct tx_done_desc {
 530        __le32 status;                  /* timestamp, index. */
 531#if 0
 532        __le32 intrstatus;              /* interrupt status */
 533#endif
 534};
 535
 536struct rx_ring_info {
 537        struct sk_buff *skb;
 538        dma_addr_t mapping;
 539};
 540struct tx_ring_info {
 541        struct sk_buff *skb;
 542        dma_addr_t mapping;
 543        unsigned int used_slots;
 544};
 545
 546#define PHY_CNT         2
 547struct netdev_private {
 548        /* Descriptor rings first for alignment. */
 549        struct starfire_rx_desc *rx_ring;
 550        starfire_tx_desc *tx_ring;
 551        dma_addr_t rx_ring_dma;
 552        dma_addr_t tx_ring_dma;
 553        /* The addresses of rx/tx-in-place skbuffs. */
 554        struct rx_ring_info rx_info[RX_RING_SIZE];
 555        struct tx_ring_info tx_info[TX_RING_SIZE];
 556        /* Pointers to completion queues (full pages). */
 557        rx_done_desc *rx_done_q;
 558        dma_addr_t rx_done_q_dma;
 559        unsigned int rx_done;
 560        struct tx_done_desc *tx_done_q;
 561        dma_addr_t tx_done_q_dma;
 562        unsigned int tx_done;
 563        struct napi_struct napi;
 564        struct net_device *dev;
 565        struct net_device_stats stats;
 566        struct pci_dev *pci_dev;
 567#ifdef VLAN_SUPPORT
 568        struct vlan_group *vlgrp;
 569#endif
 570        void *queue_mem;
 571        dma_addr_t queue_mem_dma;
 572        size_t queue_mem_size;
 573
 574        /* Frequently used values: keep some adjacent for cache effect. */
 575        spinlock_t lock;
 576        unsigned int cur_rx, dirty_rx;  /* Producer/consumer ring indices */
 577        unsigned int cur_tx, dirty_tx, reap_tx;
 578        unsigned int rx_buf_sz;         /* Based on MTU+slack. */
 579        /* These values keep track of the transceiver/media in use. */
 580        int speed100;                   /* Set if speed == 100MBit. */
 581        u32 tx_mode;
 582        u32 intr_timer_ctrl;
 583        u8 tx_threshold;
 584        /* MII transceiver section. */
 585        struct mii_if_info mii_if;              /* MII lib hooks/info */
 586        int phy_cnt;                    /* MII device addresses. */
 587        unsigned char phys[PHY_CNT];    /* MII device addresses. */
 588        void __iomem *base;
 589};
 590
 591
 592static int      mdio_read(struct net_device *dev, int phy_id, int location);
 593static void     mdio_write(struct net_device *dev, int phy_id, int location, int value);
 594static int      netdev_open(struct net_device *dev);
 595static void     check_duplex(struct net_device *dev);
 596static void     tx_timeout(struct net_device *dev);
 597static void     init_ring(struct net_device *dev);
 598static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
 599static irqreturn_t intr_handler(int irq, void *dev_instance);
 600static void     netdev_error(struct net_device *dev, int intr_status);
 601static int      __netdev_rx(struct net_device *dev, int *quota);
 602static int      netdev_poll(struct napi_struct *napi, int budget);
 603static void     refill_rx_ring(struct net_device *dev);
 604static void     netdev_error(struct net_device *dev, int intr_status);
 605static void     set_rx_mode(struct net_device *dev);
 606static struct net_device_stats *get_stats(struct net_device *dev);
 607static int      netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 608static int      netdev_close(struct net_device *dev);
 609static void     netdev_media_change(struct net_device *dev);
 610static const struct ethtool_ops ethtool_ops;
 611
 612
 613#ifdef VLAN_SUPPORT
 614static void netdev_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
 615{
 616        struct netdev_private *np = netdev_priv(dev);
 617
 618        spin_lock(&np->lock);
 619        if (debug > 2)
 620                printk("%s: Setting vlgrp to %p\n", dev->name, grp);
 621        np->vlgrp = grp;
 622        set_rx_mode(dev);
 623        spin_unlock(&np->lock);
 624}
 625
 626static void netdev_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
 627{
 628        struct netdev_private *np = netdev_priv(dev);
 629
 630        spin_lock(&np->lock);
 631        if (debug > 1)
 632                printk("%s: Adding vlanid %d to vlan filter\n", dev->name, vid);
 633        set_rx_mode(dev);
 634        spin_unlock(&np->lock);
 635}
 636
 637static void netdev_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
 638{
 639        struct netdev_private *np = netdev_priv(dev);
 640
 641        spin_lock(&np->lock);
 642        if (debug > 1)
 643                printk("%s: removing vlanid %d from vlan filter\n", dev->name, vid);
 644        vlan_group_set_device(np->vlgrp, vid, NULL);
 645        set_rx_mode(dev);
 646        spin_unlock(&np->lock);
 647}
 648#endif /* VLAN_SUPPORT */
 649
 650
 651static const struct net_device_ops netdev_ops = {
 652        .ndo_open               = netdev_open,
 653        .ndo_stop               = netdev_close,
 654        .ndo_start_xmit         = start_tx,
 655        .ndo_tx_timeout         = tx_timeout,
 656        .ndo_get_stats          = get_stats,
 657        .ndo_set_multicast_list = &set_rx_mode,
 658        .ndo_do_ioctl           = netdev_ioctl,
 659        .ndo_change_mtu         = eth_change_mtu,
 660        .ndo_set_mac_address    = eth_mac_addr,
 661        .ndo_validate_addr      = eth_validate_addr,
 662#ifdef VLAN_SUPPORT
 663        .ndo_vlan_rx_register   = netdev_vlan_rx_register,
 664        .ndo_vlan_rx_add_vid    = netdev_vlan_rx_add_vid,
 665        .ndo_vlan_rx_kill_vid   = netdev_vlan_rx_kill_vid,
 666#endif
 667};
 668
 669static int __devinit starfire_init_one(struct pci_dev *pdev,
 670                                       const struct pci_device_id *ent)
 671{
 672        struct netdev_private *np;
 673        int i, irq, option, chip_idx = ent->driver_data;
 674        struct net_device *dev;
 675        static int card_idx = -1;
 676        long ioaddr;
 677        void __iomem *base;
 678        int drv_flags, io_size;
 679        int boguscnt;
 680
 681/* when built into the kernel, we only print version if device is found */
 682#ifndef MODULE
 683        static int printed_version;
 684        if (!printed_version++)
 685                printk(version);
 686#endif
 687
 688        card_idx++;
 689
 690        if (pci_enable_device (pdev))
 691                return -EIO;
 692
 693        ioaddr = pci_resource_start(pdev, 0);
 694        io_size = pci_resource_len(pdev, 0);
 695        if (!ioaddr || ((pci_resource_flags(pdev, 0) & IORESOURCE_MEM) == 0)) {
 696                printk(KERN_ERR DRV_NAME " %d: no PCI MEM resources, aborting\n", card_idx);
 697                return -ENODEV;
 698        }
 699
 700        dev = alloc_etherdev(sizeof(*np));
 701        if (!dev) {
 702                printk(KERN_ERR DRV_NAME " %d: cannot alloc etherdev, aborting\n", card_idx);
 703                return -ENOMEM;
 704        }
 705        SET_NETDEV_DEV(dev, &pdev->dev);
 706
 707        irq = pdev->irq;
 708
 709        if (pci_request_regions (pdev, DRV_NAME)) {
 710                printk(KERN_ERR DRV_NAME " %d: cannot reserve PCI resources, aborting\n", card_idx);
 711                goto err_out_free_netdev;
 712        }
 713
 714        base = ioremap(ioaddr, io_size);
 715        if (!base) {
 716                printk(KERN_ERR DRV_NAME " %d: cannot remap %#x @ %#lx, aborting\n",
 717                        card_idx, io_size, ioaddr);
 718                goto err_out_free_res;
 719        }
 720
 721        pci_set_master(pdev);
 722
 723        /* enable MWI -- it vastly improves Rx performance on sparc64 */
 724        pci_try_set_mwi(pdev);
 725
 726#ifdef ZEROCOPY
 727        /* Starfire can do TCP/UDP checksumming */
 728        if (enable_hw_cksum)
 729                dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
 730#endif /* ZEROCOPY */
 731
 732#ifdef VLAN_SUPPORT
 733        dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
 734#endif /* VLAN_RX_KILL_VID */
 735#ifdef ADDR_64BITS
 736        dev->features |= NETIF_F_HIGHDMA;
 737#endif /* ADDR_64BITS */
 738
 739        /* Serial EEPROM reads are hidden by the hardware. */
 740        for (i = 0; i < 6; i++)
 741                dev->dev_addr[i] = readb(base + EEPROMCtrl + 20 - i);
 742
 743#if ! defined(final_version) /* Dump the EEPROM contents during development. */
 744        if (debug > 4)
 745                for (i = 0; i < 0x20; i++)
 746                        printk("%2.2x%s",
 747                               (unsigned int)readb(base + EEPROMCtrl + i),
 748                               i % 16 != 15 ? " " : "\n");
 749#endif
 750
 751        /* Issue soft reset */
 752        writel(MiiSoftReset, base + TxMode);
 753        udelay(1000);
 754        writel(0, base + TxMode);
 755
 756        /* Reset the chip to erase previous misconfiguration. */
 757        writel(1, base + PCIDeviceConfig);
 758        boguscnt = 1000;
 759        while (--boguscnt > 0) {
 760                udelay(10);
 761                if ((readl(base + PCIDeviceConfig) & 1) == 0)
 762                        break;
 763        }
 764        if (boguscnt == 0)
 765                printk("%s: chipset reset never completed!\n", dev->name);
 766        /* wait a little longer */
 767        udelay(1000);
 768
 769        dev->base_addr = (unsigned long)base;
 770        dev->irq = irq;
 771
 772        np = netdev_priv(dev);
 773        np->dev = dev;
 774        np->base = base;
 775        spin_lock_init(&np->lock);
 776        pci_set_drvdata(pdev, dev);
 777
 778        np->pci_dev = pdev;
 779
 780        np->mii_if.dev = dev;
 781        np->mii_if.mdio_read = mdio_read;
 782        np->mii_if.mdio_write = mdio_write;
 783        np->mii_if.phy_id_mask = 0x1f;
 784        np->mii_if.reg_num_mask = 0x1f;
 785
 786        drv_flags = netdrv_tbl[chip_idx].drv_flags;
 787
 788        option = card_idx < MAX_UNITS ? options[card_idx] : 0;
 789        if (dev->mem_start)
 790                option = dev->mem_start;
 791
 792        /* The lower four bits are the media type. */
 793        if (option & 0x200)
 794                np->mii_if.full_duplex = 1;
 795
 796        if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
 797                np->mii_if.full_duplex = 1;
 798
 799        if (np->mii_if.full_duplex)
 800                np->mii_if.force_media = 1;
 801        else
 802                np->mii_if.force_media = 0;
 803        np->speed100 = 1;
 804
 805        /* timer resolution is 128 * 0.8us */
 806        np->intr_timer_ctrl = (((intr_latency * 10) / 1024) & IntrLatencyMask) |
 807                Timer10X | EnableIntrMasking;
 808
 809        if (small_frames > 0) {
 810                np->intr_timer_ctrl |= SmallFrameBypass;
 811                switch (small_frames) {
 812                case 1 ... 64:
 813                        np->intr_timer_ctrl |= SmallFrame64;
 814                        break;
 815                case 65 ... 128:
 816                        np->intr_timer_ctrl |= SmallFrame128;
 817                        break;
 818                case 129 ... 256:
 819                        np->intr_timer_ctrl |= SmallFrame256;
 820                        break;
 821                default:
 822                        np->intr_timer_ctrl |= SmallFrame512;
 823                        if (small_frames > 512)
 824                                printk("Adjusting small_frames down to 512\n");
 825                        break;
 826                }
 827        }
 828
 829        dev->netdev_ops = &netdev_ops;
 830        dev->watchdog_timeo = TX_TIMEOUT;
 831        SET_ETHTOOL_OPS(dev, &ethtool_ops);
 832
 833        netif_napi_add(dev, &np->napi, netdev_poll, max_interrupt_work);
 834
 835        if (mtu)
 836                dev->mtu = mtu;
 837
 838        if (register_netdev(dev))
 839                goto err_out_cleardev;
 840
 841        printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n",
 842               dev->name, netdrv_tbl[chip_idx].name, base,
 843               dev->dev_addr, irq);
 844
 845        if (drv_flags & CanHaveMII) {
 846                int phy, phy_idx = 0;
 847                int mii_status;
 848                for (phy = 0; phy < 32 && phy_idx < PHY_CNT; phy++) {
 849                        mdio_write(dev, phy, MII_BMCR, BMCR_RESET);
 850                        mdelay(100);
 851                        boguscnt = 1000;
 852                        while (--boguscnt > 0)
 853                                if ((mdio_read(dev, phy, MII_BMCR) & BMCR_RESET) == 0)
 854                                        break;
 855                        if (boguscnt == 0) {
 856                                printk("%s: PHY#%d reset never completed!\n", dev->name, phy);
 857                                continue;
 858                        }
 859                        mii_status = mdio_read(dev, phy, MII_BMSR);
 860                        if (mii_status != 0) {
 861                                np->phys[phy_idx++] = phy;
 862                                np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE);
 863                                printk(KERN_INFO "%s: MII PHY found at address %d, status "
 864                                           "%#4.4x advertising %#4.4x.\n",
 865                                           dev->name, phy, mii_status, np->mii_if.advertising);
 866                                /* there can be only one PHY on-board */
 867                                break;
 868                        }
 869                }
 870                np->phy_cnt = phy_idx;
 871                if (np->phy_cnt > 0)
 872                        np->mii_if.phy_id = np->phys[0];
 873                else
 874                        memset(&np->mii_if, 0, sizeof(np->mii_if));
 875        }
 876
 877        printk(KERN_INFO "%s: scatter-gather and hardware TCP cksumming %s.\n",
 878               dev->name, enable_hw_cksum ? "enabled" : "disabled");
 879        return 0;
 880
 881err_out_cleardev:
 882        pci_set_drvdata(pdev, NULL);
 883        iounmap(base);
 884err_out_free_res:
 885        pci_release_regions (pdev);
 886err_out_free_netdev:
 887        free_netdev(dev);
 888        return -ENODEV;
 889}
 890
 891
 892/* Read the MII Management Data I/O (MDIO) interfaces. */
 893static int mdio_read(struct net_device *dev, int phy_id, int location)
 894{
 895        struct netdev_private *np = netdev_priv(dev);
 896        void __iomem *mdio_addr = np->base + MIICtrl + (phy_id<<7) + (location<<2);
 897        int result, boguscnt=1000;
 898        /* ??? Should we add a busy-wait here? */
 899        do {
 900                result = readl(mdio_addr);
 901        } while ((result & 0xC0000000) != 0x80000000 && --boguscnt > 0);
 902        if (boguscnt == 0)
 903                return 0;
 904        if ((result & 0xffff) == 0xffff)
 905                return 0;
 906        return result & 0xffff;
 907}
 908
 909
 910static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
 911{
 912        struct netdev_private *np = netdev_priv(dev);
 913        void __iomem *mdio_addr = np->base + MIICtrl + (phy_id<<7) + (location<<2);
 914        writel(value, mdio_addr);
 915        /* The busy-wait will occur before a read. */
 916}
 917
 918
 919static int netdev_open(struct net_device *dev)
 920{
 921        const struct firmware *fw_rx, *fw_tx;
 922        const __be32 *fw_rx_data, *fw_tx_data;
 923        struct netdev_private *np = netdev_priv(dev);
 924        void __iomem *ioaddr = np->base;
 925        int i, retval;
 926        size_t tx_size, rx_size;
 927        size_t tx_done_q_size, rx_done_q_size, tx_ring_size, rx_ring_size;
 928
 929        /* Do we ever need to reset the chip??? */
 930
 931        retval = request_irq(dev->irq, &intr_handler, IRQF_SHARED, dev->name, dev);
 932        if (retval)
 933                return retval;
 934
 935        /* Disable the Rx and Tx, and reset the chip. */
 936        writel(0, ioaddr + GenCtrl);
 937        writel(1, ioaddr + PCIDeviceConfig);
 938        if (debug > 1)
 939                printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
 940                       dev->name, dev->irq);
 941
 942        /* Allocate the various queues. */
 943        if (!np->queue_mem) {
 944                tx_done_q_size = ((sizeof(struct tx_done_desc) * DONE_Q_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN;
 945                rx_done_q_size = ((sizeof(rx_done_desc) * DONE_Q_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN;
 946                tx_ring_size = ((sizeof(starfire_tx_desc) * TX_RING_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN;
 947                rx_ring_size = sizeof(struct starfire_rx_desc) * RX_RING_SIZE;
 948                np->queue_mem_size = tx_done_q_size + rx_done_q_size + tx_ring_size + rx_ring_size;
 949                np->queue_mem = pci_alloc_consistent(np->pci_dev, np->queue_mem_size, &np->queue_mem_dma);
 950                if (np->queue_mem == NULL) {
 951                        free_irq(dev->irq, dev);
 952                        return -ENOMEM;
 953                }
 954
 955                np->tx_done_q     = np->queue_mem;
 956                np->tx_done_q_dma = np->queue_mem_dma;
 957                np->rx_done_q     = (void *) np->tx_done_q + tx_done_q_size;
 958                np->rx_done_q_dma = np->tx_done_q_dma + tx_done_q_size;
 959                np->tx_ring       = (void *) np->rx_done_q + rx_done_q_size;
 960                np->tx_ring_dma   = np->rx_done_q_dma + rx_done_q_size;
 961                np->rx_ring       = (void *) np->tx_ring + tx_ring_size;
 962                np->rx_ring_dma   = np->tx_ring_dma + tx_ring_size;
 963        }
 964
 965        /* Start with no carrier, it gets adjusted later */
 966        netif_carrier_off(dev);
 967        init_ring(dev);
 968        /* Set the size of the Rx buffers. */
 969        writel((np->rx_buf_sz << RxBufferLenShift) |
 970               (0 << RxMinDescrThreshShift) |
 971               RxPrefetchMode | RxVariableQ |
 972               RX_Q_ENTRIES |
 973               RX_DESC_Q_ADDR_SIZE | RX_DESC_ADDR_SIZE |
 974               RxDescSpace4,
 975               ioaddr + RxDescQCtrl);
 976
 977        /* Set up the Rx DMA controller. */
 978        writel(RxChecksumIgnore |
 979               (0 << RxEarlyIntThreshShift) |
 980               (6 << RxHighPrioThreshShift) |
 981               ((DMA_BURST_SIZE / 32) << RxBurstSizeShift),
 982               ioaddr + RxDMACtrl);
 983
 984        /* Set Tx descriptor */
 985        writel((2 << TxHiPriFIFOThreshShift) |
 986               (0 << TxPadLenShift) |
 987               ((DMA_BURST_SIZE / 32) << TxDMABurstSizeShift) |
 988               TX_DESC_Q_ADDR_SIZE |
 989               TX_DESC_SPACING | TX_DESC_TYPE,
 990               ioaddr + TxDescCtrl);
 991
 992        writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + RxDescQHiAddr);
 993        writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + TxRingHiAddr);
 994        writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + CompletionHiAddr);
 995        writel(np->rx_ring_dma, ioaddr + RxDescQAddr);
 996        writel(np->tx_ring_dma, ioaddr + TxRingPtr);
 997
 998        writel(np->tx_done_q_dma, ioaddr + TxCompletionAddr);
 999        writel(np->rx_done_q_dma |
1000               RxComplType |
1001               (0 << RxComplThreshShift),
1002               ioaddr + RxCompletionAddr);
1003
1004        if (debug > 1)
1005                printk(KERN_DEBUG "%s: Filling in the station address.\n", dev->name);
1006
1007        /* Fill both the Tx SA register and the Rx perfect filter. */
1008        for (i = 0; i < 6; i++)
1009                writeb(dev->dev_addr[i], ioaddr + TxStationAddr + 5 - i);
1010        /* The first entry is special because it bypasses the VLAN filter.
1011           Don't use it. */
1012        writew(0, ioaddr + PerfFilterTable);
1013        writew(0, ioaddr + PerfFilterTable + 4);
1014        writew(0, ioaddr + PerfFilterTable + 8);
1015        for (i = 1; i < 16; i++) {
1016                __be16 *eaddrs = (__be16 *)dev->dev_addr;
1017                void __iomem *setup_frm = ioaddr + PerfFilterTable + i * 16;
1018                writew(be16_to_cpu(eaddrs[2]), setup_frm); setup_frm += 4;
1019                writew(be16_to_cpu(eaddrs[1]), setup_frm); setup_frm += 4;
1020                writew(be16_to_cpu(eaddrs[0]), setup_frm); setup_frm += 8;
1021        }
1022
1023        /* Initialize other registers. */
1024        /* Configure the PCI bus bursts and FIFO thresholds. */
1025        np->tx_mode = TxFlowEnable|RxFlowEnable|PadEnable;      /* modified when link is up. */
1026        writel(MiiSoftReset | np->tx_mode, ioaddr + TxMode);
1027        udelay(1000);
1028        writel(np->tx_mode, ioaddr + TxMode);
1029        np->tx_threshold = 4;
1030        writel(np->tx_threshold, ioaddr + TxThreshold);
1031
1032        writel(np->intr_timer_ctrl, ioaddr + IntrTimerCtrl);
1033
1034        napi_enable(&np->napi);
1035
1036        netif_start_queue(dev);
1037
1038        if (debug > 1)
1039                printk(KERN_DEBUG "%s: Setting the Rx and Tx modes.\n", dev->name);
1040        set_rx_mode(dev);
1041
1042        np->mii_if.advertising = mdio_read(dev, np->phys[0], MII_ADVERTISE);
1043        check_duplex(dev);
1044
1045        /* Enable GPIO interrupts on link change */
1046        writel(0x0f00ff00, ioaddr + GPIOCtrl);
1047
1048        /* Set the interrupt mask */
1049        writel(IntrRxDone | IntrRxEmpty | IntrDMAErr |
1050               IntrTxDMADone | IntrStatsMax | IntrLinkChange |
1051               IntrRxGFPDead | IntrNoTxCsum | IntrTxBadID,
1052               ioaddr + IntrEnable);
1053        /* Enable PCI interrupts. */
1054        writel(0x00800000 | readl(ioaddr + PCIDeviceConfig),
1055               ioaddr + PCIDeviceConfig);
1056
1057#ifdef VLAN_SUPPORT
1058        /* Set VLAN type to 802.1q */
1059        writel(ETH_P_8021Q, ioaddr + VlanType);
1060#endif /* VLAN_SUPPORT */
1061
1062        retval = request_firmware(&fw_rx, FIRMWARE_RX, &np->pci_dev->dev);
1063        if (retval) {
1064                printk(KERN_ERR "starfire: Failed to load firmware \"%s\"\n",
1065                       FIRMWARE_RX);
1066                return retval;
1067        }
1068        if (fw_rx->size % 4) {
1069                printk(KERN_ERR "starfire: bogus length %zu in \"%s\"\n",
1070                       fw_rx->size, FIRMWARE_RX);
1071                retval = -EINVAL;
1072                goto out_rx;
1073        }
1074        retval = request_firmware(&fw_tx, FIRMWARE_TX, &np->pci_dev->dev);
1075        if (retval) {
1076                printk(KERN_ERR "starfire: Failed to load firmware \"%s\"\n",
1077                       FIRMWARE_TX);
1078                goto out_rx;
1079        }
1080        if (fw_tx->size % 4) {
1081                printk(KERN_ERR "starfire: bogus length %zu in \"%s\"\n",
1082                       fw_tx->size, FIRMWARE_TX);
1083                retval = -EINVAL;
1084                goto out_tx;
1085        }
1086        fw_rx_data = (const __be32 *)&fw_rx->data[0];
1087        fw_tx_data = (const __be32 *)&fw_tx->data[0];
1088        rx_size = fw_rx->size / 4;
1089        tx_size = fw_tx->size / 4;
1090
1091        /* Load Rx/Tx firmware into the frame processors */
1092        for (i = 0; i < rx_size; i++)
1093                writel(be32_to_cpup(&fw_rx_data[i]), ioaddr + RxGfpMem + i * 4);
1094        for (i = 0; i < tx_size; i++)
1095                writel(be32_to_cpup(&fw_tx_data[i]), ioaddr + TxGfpMem + i * 4);
1096        if (enable_hw_cksum)
1097                /* Enable the Rx and Tx units, and the Rx/Tx frame processors. */
1098                writel(TxEnable|TxGFPEnable|RxEnable|RxGFPEnable, ioaddr + GenCtrl);
1099        else
1100                /* Enable the Rx and Tx units only. */
1101                writel(TxEnable|RxEnable, ioaddr + GenCtrl);
1102
1103        if (debug > 1)
1104                printk(KERN_DEBUG "%s: Done netdev_open().\n",
1105                       dev->name);
1106
1107out_tx:
1108        release_firmware(fw_tx);
1109out_rx:
1110        release_firmware(fw_rx);
1111        return retval;
1112}
1113
1114
1115static void check_duplex(struct net_device *dev)
1116{
1117        struct netdev_private *np = netdev_priv(dev);
1118        u16 reg0;
1119        int silly_count = 1000;
1120
1121        mdio_write(dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising);
1122        mdio_write(dev, np->phys[0], MII_BMCR, BMCR_RESET);
1123        udelay(500);
1124        while (--silly_count && mdio_read(dev, np->phys[0], MII_BMCR) & BMCR_RESET)
1125                /* do nothing */;
1126        if (!silly_count) {
1127                printk("%s: MII reset failed!\n", dev->name);
1128                return;
1129        }
1130
1131        reg0 = mdio_read(dev, np->phys[0], MII_BMCR);
1132
1133        if (!np->mii_if.force_media) {
1134                reg0 |= BMCR_ANENABLE | BMCR_ANRESTART;
1135        } else {
1136                reg0 &= ~(BMCR_ANENABLE | BMCR_ANRESTART);
1137                if (np->speed100)
1138                        reg0 |= BMCR_SPEED100;
1139                if (np->mii_if.full_duplex)
1140                        reg0 |= BMCR_FULLDPLX;
1141                printk(KERN_DEBUG "%s: Link forced to %sMbit %s-duplex\n",
1142                       dev->name,
1143                       np->speed100 ? "100" : "10",
1144                       np->mii_if.full_duplex ? "full" : "half");
1145        }
1146        mdio_write(dev, np->phys[0], MII_BMCR, reg0);
1147}
1148
1149
1150static void tx_timeout(struct net_device *dev)
1151{
1152        struct netdev_private *np = netdev_priv(dev);
1153        void __iomem *ioaddr = np->base;
1154        int old_debug;
1155
1156        printk(KERN_WARNING "%s: Transmit timed out, status %#8.8x, "
1157               "resetting...\n", dev->name, (int) readl(ioaddr + IntrStatus));
1158
1159        /* Perhaps we should reinitialize the hardware here. */
1160
1161        /*
1162         * Stop and restart the interface.
1163         * Cheat and increase the debug level temporarily.
1164         */
1165        old_debug = debug;
1166        debug = 2;
1167        netdev_close(dev);
1168        netdev_open(dev);
1169        debug = old_debug;
1170
1171        /* Trigger an immediate transmit demand. */
1172
1173        dev->trans_start = jiffies;
1174        np->stats.tx_errors++;
1175        netif_wake_queue(dev);
1176}
1177
1178
1179/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1180static void init_ring(struct net_device *dev)
1181{
1182        struct netdev_private *np = netdev_priv(dev);
1183        int i;
1184
1185        np->cur_rx = np->cur_tx = np->reap_tx = 0;
1186        np->dirty_rx = np->dirty_tx = np->rx_done = np->tx_done = 0;
1187
1188        np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1189
1190        /* Fill in the Rx buffers.  Handle allocation failure gracefully. */
1191        for (i = 0; i < RX_RING_SIZE; i++) {
1192                struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
1193                np->rx_info[i].skb = skb;
1194                if (skb == NULL)
1195                        break;
1196                np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1197                skb->dev = dev;                 /* Mark as being used by this device. */
1198                /* Grrr, we cannot offset to correctly align the IP header. */
1199                np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid);
1200        }
1201        writew(i - 1, np->base + RxDescQIdx);
1202        np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1203
1204        /* Clear the remainder of the Rx buffer ring. */
1205        for (  ; i < RX_RING_SIZE; i++) {
1206                np->rx_ring[i].rxaddr = 0;
1207                np->rx_info[i].skb = NULL;
1208                np->rx_info[i].mapping = 0;
1209        }
1210        /* Mark the last entry as wrapping the ring. */
1211        np->rx_ring[RX_RING_SIZE - 1].rxaddr |= cpu_to_dma(RxDescEndRing);
1212
1213        /* Clear the completion rings. */
1214        for (i = 0; i < DONE_Q_SIZE; i++) {
1215                np->rx_done_q[i].status = 0;
1216                np->tx_done_q[i].status = 0;
1217        }
1218
1219        for (i = 0; i < TX_RING_SIZE; i++)
1220                memset(&np->tx_info[i], 0, sizeof(np->tx_info[i]));
1221
1222        return;
1223}
1224
1225
1226static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
1227{
1228        struct netdev_private *np = netdev_priv(dev);
1229        unsigned int entry;
1230        u32 status;
1231        int i;
1232
1233        /*
1234         * be cautious here, wrapping the queue has weird semantics
1235         * and we may not have enough slots even when it seems we do.
1236         */
1237        if ((np->cur_tx - np->dirty_tx) + skb_num_frags(skb) * 2 > TX_RING_SIZE) {
1238                netif_stop_queue(dev);
1239                return NETDEV_TX_BUSY;
1240        }
1241
1242#if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
1243        if (skb->ip_summed == CHECKSUM_PARTIAL) {
1244                if (skb_padto(skb, (skb->len + PADDING_MASK) & ~PADDING_MASK))
1245                        return NETDEV_TX_OK;
1246        }
1247#endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */
1248
1249        entry = np->cur_tx % TX_RING_SIZE;
1250        for (i = 0; i < skb_num_frags(skb); i++) {
1251                int wrap_ring = 0;
1252                status = TxDescID;
1253
1254                if (i == 0) {
1255                        np->tx_info[entry].skb = skb;
1256                        status |= TxCRCEn;
1257                        if (entry >= TX_RING_SIZE - skb_num_frags(skb)) {
1258                                status |= TxRingWrap;
1259                                wrap_ring = 1;
1260                        }
1261                        if (np->reap_tx) {
1262                                status |= TxDescIntr;
1263                                np->reap_tx = 0;
1264                        }
1265                        if (skb->ip_summed == CHECKSUM_PARTIAL) {
1266                                status |= TxCalTCP;
1267                                np->stats.tx_compressed++;
1268                        }
1269                        status |= skb_first_frag_len(skb) | (skb_num_frags(skb) << 16);
1270
1271                        np->tx_info[entry].mapping =
1272                                pci_map_single(np->pci_dev, skb->data, skb_first_frag_len(skb), PCI_DMA_TODEVICE);
1273                } else {
1274                        skb_frag_t *this_frag = &skb_shinfo(skb)->frags[i - 1];
1275                        status |= this_frag->size;
1276                        np->tx_info[entry].mapping =
1277                                pci_map_single(np->pci_dev, page_address(this_frag->page) + this_frag->page_offset, this_frag->size, PCI_DMA_TODEVICE);
1278                }
1279
1280                np->tx_ring[entry].addr = cpu_to_dma(np->tx_info[entry].mapping);
1281                np->tx_ring[entry].status = cpu_to_le32(status);
1282                if (debug > 3)
1283                        printk(KERN_DEBUG "%s: Tx #%d/#%d slot %d status %#8.8x.\n",
1284                               dev->name, np->cur_tx, np->dirty_tx,
1285                               entry, status);
1286                if (wrap_ring) {
1287                        np->tx_info[entry].used_slots = TX_RING_SIZE - entry;
1288                        np->cur_tx += np->tx_info[entry].used_slots;
1289                        entry = 0;
1290                } else {
1291                        np->tx_info[entry].used_slots = 1;
1292                        np->cur_tx += np->tx_info[entry].used_slots;
1293                        entry++;
1294                }
1295                /* scavenge the tx descriptors twice per TX_RING_SIZE */
1296                if (np->cur_tx % (TX_RING_SIZE / 2) == 0)
1297                        np->reap_tx = 1;
1298        }
1299
1300        /* Non-x86: explicitly flush descriptor cache lines here. */
1301        /* Ensure all descriptors are written back before the transmit is
1302           initiated. - Jes */
1303        wmb();
1304
1305        /* Update the producer index. */
1306        writel(entry * (sizeof(starfire_tx_desc) / 8), np->base + TxProducerIdx);
1307
1308        /* 4 is arbitrary, but should be ok */
1309        if ((np->cur_tx - np->dirty_tx) + 4 > TX_RING_SIZE)
1310                netif_stop_queue(dev);
1311
1312        dev->trans_start = jiffies;
1313
1314        return NETDEV_TX_OK;
1315}
1316
1317
1318/* The interrupt handler does all of the Rx thread work and cleans up
1319   after the Tx thread. */
1320static irqreturn_t intr_handler(int irq, void *dev_instance)
1321{
1322        struct net_device *dev = dev_instance;
1323        struct netdev_private *np = netdev_priv(dev);
1324        void __iomem *ioaddr = np->base;
1325        int boguscnt = max_interrupt_work;
1326        int consumer;
1327        int tx_status;
1328        int handled = 0;
1329
1330        do {
1331                u32 intr_status = readl(ioaddr + IntrClear);
1332
1333                if (debug > 4)
1334                        printk(KERN_DEBUG "%s: Interrupt status %#8.8x.\n",
1335                               dev->name, intr_status);
1336
1337                if (intr_status == 0 || intr_status == (u32) -1)
1338                        break;
1339
1340                handled = 1;
1341
1342                if (intr_status & (IntrRxDone | IntrRxEmpty)) {
1343                        u32 enable;
1344
1345                        if (likely(napi_schedule_prep(&np->napi))) {
1346                                __napi_schedule(&np->napi);
1347                                enable = readl(ioaddr + IntrEnable);
1348                                enable &= ~(IntrRxDone | IntrRxEmpty);
1349                                writel(enable, ioaddr + IntrEnable);
1350                                /* flush PCI posting buffers */
1351                                readl(ioaddr + IntrEnable);
1352                        } else {
1353                                /* Paranoia check */
1354                                enable = readl(ioaddr + IntrEnable);
1355                                if (enable & (IntrRxDone | IntrRxEmpty)) {
1356                                        printk(KERN_INFO
1357                                               "%s: interrupt while in poll!\n",
1358                                               dev->name);
1359                                        enable &= ~(IntrRxDone | IntrRxEmpty);
1360                                        writel(enable, ioaddr + IntrEnable);
1361                                }
1362                        }
1363                }
1364
1365                /* Scavenge the skbuff list based on the Tx-done queue.
1366                   There are redundant checks here that may be cleaned up
1367                   after the driver has proven to be reliable. */
1368                consumer = readl(ioaddr + TxConsumerIdx);
1369                if (debug > 3)
1370                        printk(KERN_DEBUG "%s: Tx Consumer index is %d.\n",
1371                               dev->name, consumer);
1372
1373                while ((tx_status = le32_to_cpu(np->tx_done_q[np->tx_done].status)) != 0) {
1374                        if (debug > 3)
1375                                printk(KERN_DEBUG "%s: Tx completion #%d entry %d is %#8.8x.\n",
1376                                       dev->name, np->dirty_tx, np->tx_done, tx_status);
1377                        if ((tx_status & 0xe0000000) == 0xa0000000) {
1378                                np->stats.tx_packets++;
1379                        } else if ((tx_status & 0xe0000000) == 0x80000000) {
1380                                u16 entry = (tx_status & 0x7fff) / sizeof(starfire_tx_desc);
1381                                struct sk_buff *skb = np->tx_info[entry].skb;
1382                                np->tx_info[entry].skb = NULL;
1383                                pci_unmap_single(np->pci_dev,
1384                                                 np->tx_info[entry].mapping,
1385                                                 skb_first_frag_len(skb),
1386                                                 PCI_DMA_TODEVICE);
1387                                np->tx_info[entry].mapping = 0;
1388                                np->dirty_tx += np->tx_info[entry].used_slots;
1389                                entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE;
1390                                {
1391                                        int i;
1392                                        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1393                                                pci_unmap_single(np->pci_dev,
1394                                                                 np->tx_info[entry].mapping,
1395                                                                 skb_shinfo(skb)->frags[i].size,
1396                                                                 PCI_DMA_TODEVICE);
1397                                                np->dirty_tx++;
1398                                                entry++;
1399                                        }
1400                                }
1401
1402                                dev_kfree_skb_irq(skb);
1403                        }
1404                        np->tx_done_q[np->tx_done].status = 0;
1405                        np->tx_done = (np->tx_done + 1) % DONE_Q_SIZE;
1406                }
1407                writew(np->tx_done, ioaddr + CompletionQConsumerIdx + 2);
1408
1409                if (netif_queue_stopped(dev) &&
1410                    (np->cur_tx - np->dirty_tx + 4 < TX_RING_SIZE)) {
1411                        /* The ring is no longer full, wake the queue. */
1412                        netif_wake_queue(dev);
1413                }
1414
1415                /* Stats overflow */
1416                if (intr_status & IntrStatsMax)
1417                        get_stats(dev);
1418
1419                /* Media change interrupt. */
1420                if (intr_status & IntrLinkChange)
1421                        netdev_media_change(dev);
1422
1423                /* Abnormal error summary/uncommon events handlers. */
1424                if (intr_status & IntrAbnormalSummary)
1425                        netdev_error(dev, intr_status);
1426
1427                if (--boguscnt < 0) {
1428                        if (debug > 1)
1429                                printk(KERN_WARNING "%s: Too much work at interrupt, "
1430                                       "status=%#8.8x.\n",
1431                                       dev->name, intr_status);
1432                        break;
1433                }
1434        } while (1);
1435
1436        if (debug > 4)
1437                printk(KERN_DEBUG "%s: exiting interrupt, status=%#8.8x.\n",
1438                       dev->name, (int) readl(ioaddr + IntrStatus));
1439        return IRQ_RETVAL(handled);
1440}
1441
1442
1443/*
1444 * This routine is logically part of the interrupt/poll handler, but separated
1445 * for clarity and better register allocation.
1446 */
1447static int __netdev_rx(struct net_device *dev, int *quota)
1448{
1449        struct netdev_private *np = netdev_priv(dev);
1450        u32 desc_status;
1451        int retcode = 0;
1452
1453        /* If EOP is set on the next entry, it's a new packet. Send it up. */
1454        while ((desc_status = le32_to_cpu(np->rx_done_q[np->rx_done].status)) != 0) {
1455                struct sk_buff *skb;
1456                u16 pkt_len;
1457                int entry;
1458                rx_done_desc *desc = &np->rx_done_q[np->rx_done];
1459
1460                if (debug > 4)
1461                        printk(KERN_DEBUG "  netdev_rx() status of %d was %#8.8x.\n", np->rx_done, desc_status);
1462                if (!(desc_status & RxOK)) {
1463                        /* There was an error. */
1464                        if (debug > 2)
1465                                printk(KERN_DEBUG "  netdev_rx() Rx error was %#8.8x.\n", desc_status);
1466                        np->stats.rx_errors++;
1467                        if (desc_status & RxFIFOErr)
1468                                np->stats.rx_fifo_errors++;
1469                        goto next_rx;
1470                }
1471
1472                if (*quota <= 0) {      /* out of rx quota */
1473                        retcode = 1;
1474                        goto out;
1475                }
1476                (*quota)--;
1477
1478                pkt_len = desc_status;  /* Implicitly Truncate */
1479                entry = (desc_status >> 16) & 0x7ff;
1480
1481                if (debug > 4)
1482                        printk(KERN_DEBUG "  netdev_rx() normal Rx pkt length %d, quota %d.\n", pkt_len, *quota);
1483                /* Check if the packet is long enough to accept without copying
1484                   to a minimally-sized skbuff. */
1485                if (pkt_len < rx_copybreak
1486                    && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1487                        skb_reserve(skb, 2);    /* 16 byte align the IP header */
1488                        pci_dma_sync_single_for_cpu(np->pci_dev,
1489                                                    np->rx_info[entry].mapping,
1490                                                    pkt_len, PCI_DMA_FROMDEVICE);
1491                        skb_copy_to_linear_data(skb, np->rx_info[entry].skb->data, pkt_len);
1492                        pci_dma_sync_single_for_device(np->pci_dev,
1493                                                       np->rx_info[entry].mapping,
1494                                                       pkt_len, PCI_DMA_FROMDEVICE);
1495                        skb_put(skb, pkt_len);
1496                } else {
1497                        pci_unmap_single(np->pci_dev, np->rx_info[entry].mapping, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1498                        skb = np->rx_info[entry].skb;
1499                        skb_put(skb, pkt_len);
1500                        np->rx_info[entry].skb = NULL;
1501                        np->rx_info[entry].mapping = 0;
1502                }
1503#ifndef final_version                   /* Remove after testing. */
1504                /* You will want this info for the initial debug. */
1505                if (debug > 5) {
1506                        printk(KERN_DEBUG "  Rx data %pM %pM %2.2x%2.2x.\n",
1507                               skb->data, skb->data + 6,
1508                               skb->data[12], skb->data[13]);
1509                }
1510#endif
1511
1512                skb->protocol = eth_type_trans(skb, dev);
1513#ifdef VLAN_SUPPORT
1514                if (debug > 4)
1515                        printk(KERN_DEBUG "  netdev_rx() status2 of %d was %#4.4x.\n", np->rx_done, le16_to_cpu(desc->status2));
1516#endif
1517                if (le16_to_cpu(desc->status2) & 0x0100) {
1518                        skb->ip_summed = CHECKSUM_UNNECESSARY;
1519                        np->stats.rx_compressed++;
1520                }
1521                /*
1522                 * This feature doesn't seem to be working, at least
1523                 * with the two firmware versions I have. If the GFP sees
1524                 * an IP fragment, it either ignores it completely, or reports
1525                 * "bad checksum" on it.
1526                 *
1527                 * Maybe I missed something -- corrections are welcome.
1528                 * Until then, the printk stays. :-) -Ion
1529                 */
1530                else if (le16_to_cpu(desc->status2) & 0x0040) {
1531                        skb->ip_summed = CHECKSUM_COMPLETE;
1532                        skb->csum = le16_to_cpu(desc->csum);
1533                        printk(KERN_DEBUG "%s: checksum_hw, status2 = %#x\n", dev->name, le16_to_cpu(desc->status2));
1534                }
1535#ifdef VLAN_SUPPORT
1536                if (np->vlgrp && le16_to_cpu(desc->status2) & 0x0200) {
1537                        u16 vlid = le16_to_cpu(desc->vlanid);
1538
1539                        if (debug > 4) {
1540                                printk(KERN_DEBUG "  netdev_rx() vlanid = %d\n",
1541                                       vlid);
1542                        }
1543                        /*
1544                         * vlan_hwaccel_rx expects a packet with the VLAN tag
1545                         * stripped out.
1546                         */
1547                        vlan_hwaccel_rx(skb, np->vlgrp, vlid);
1548                } else
1549#endif /* VLAN_SUPPORT */
1550                        netif_receive_skb(skb);
1551                np->stats.rx_packets++;
1552
1553        next_rx:
1554                np->cur_rx++;
1555                desc->status = 0;
1556                np->rx_done = (np->rx_done + 1) % DONE_Q_SIZE;
1557        }
1558
1559        if (*quota == 0) {      /* out of rx quota */
1560                retcode = 1;
1561                goto out;
1562        }
1563        writew(np->rx_done, np->base + CompletionQConsumerIdx);
1564
1565 out:
1566        refill_rx_ring(dev);
1567        if (debug > 5)
1568                printk(KERN_DEBUG "  exiting netdev_rx(): %d, status of %d was %#8.8x.\n",
1569                       retcode, np->rx_done, desc_status);
1570        return retcode;
1571}
1572
1573static int netdev_poll(struct napi_struct *napi, int budget)
1574{
1575        struct netdev_private *np = container_of(napi, struct netdev_private, napi);
1576        struct net_device *dev = np->dev;
1577        u32 intr_status;
1578        void __iomem *ioaddr = np->base;
1579        int quota = budget;
1580
1581        do {
1582                writel(IntrRxDone | IntrRxEmpty, ioaddr + IntrClear);
1583
1584                if (__netdev_rx(dev, &quota))
1585                        goto out;
1586
1587                intr_status = readl(ioaddr + IntrStatus);
1588        } while (intr_status & (IntrRxDone | IntrRxEmpty));
1589
1590        napi_complete(napi);
1591        intr_status = readl(ioaddr + IntrEnable);
1592        intr_status |= IntrRxDone | IntrRxEmpty;
1593        writel(intr_status, ioaddr + IntrEnable);
1594
1595 out:
1596        if (debug > 5)
1597                printk(KERN_DEBUG "  exiting netdev_poll(): %d.\n",
1598                       budget - quota);
1599
1600        /* Restart Rx engine if stopped. */
1601        return budget - quota;
1602}
1603
1604static void refill_rx_ring(struct net_device *dev)
1605{
1606        struct netdev_private *np = netdev_priv(dev);
1607        struct sk_buff *skb;
1608        int entry = -1;
1609
1610        /* Refill the Rx ring buffers. */
1611        for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
1612                entry = np->dirty_rx % RX_RING_SIZE;
1613                if (np->rx_info[entry].skb == NULL) {
1614                        skb = dev_alloc_skb(np->rx_buf_sz);
1615                        np->rx_info[entry].skb = skb;
1616                        if (skb == NULL)
1617                                break;  /* Better luck next round. */
1618                        np->rx_info[entry].mapping =
1619                                pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1620                        skb->dev = dev; /* Mark as being used by this device. */
1621                        np->rx_ring[entry].rxaddr =
1622                                cpu_to_dma(np->rx_info[entry].mapping | RxDescValid);
1623                }
1624                if (entry == RX_RING_SIZE - 1)
1625                        np->rx_ring[entry].rxaddr |= cpu_to_dma(RxDescEndRing);
1626        }
1627        if (entry >= 0)
1628                writew(entry, np->base + RxDescQIdx);
1629}
1630
1631
1632static void netdev_media_change(struct net_device *dev)
1633{
1634        struct netdev_private *np = netdev_priv(dev);
1635        void __iomem *ioaddr = np->base;
1636        u16 reg0, reg1, reg4, reg5;
1637        u32 new_tx_mode;
1638        u32 new_intr_timer_ctrl;
1639
1640        /* reset status first */
1641        mdio_read(dev, np->phys[0], MII_BMCR);
1642        mdio_read(dev, np->phys[0], MII_BMSR);
1643
1644        reg0 = mdio_read(dev, np->phys[0], MII_BMCR);
1645        reg1 = mdio_read(dev, np->phys[0], MII_BMSR);
1646
1647        if (reg1 & BMSR_LSTATUS) {
1648                /* link is up */
1649                if (reg0 & BMCR_ANENABLE) {
1650                        /* autonegotiation is enabled */
1651                        reg4 = mdio_read(dev, np->phys[0], MII_ADVERTISE);
1652                        reg5 = mdio_read(dev, np->phys[0], MII_LPA);
1653                        if (reg4 & ADVERTISE_100FULL && reg5 & LPA_100FULL) {
1654                                np->speed100 = 1;
1655                                np->mii_if.full_duplex = 1;
1656                        } else if (reg4 & ADVERTISE_100HALF && reg5 & LPA_100HALF) {
1657                                np->speed100 = 1;
1658                                np->mii_if.full_duplex = 0;
1659                        } else if (reg4 & ADVERTISE_10FULL && reg5 & LPA_10FULL) {
1660                                np->speed100 = 0;
1661                                np->mii_if.full_duplex = 1;
1662                        } else {
1663                                np->speed100 = 0;
1664                                np->mii_if.full_duplex = 0;
1665                        }
1666                } else {
1667                        /* autonegotiation is disabled */
1668                        if (reg0 & BMCR_SPEED100)
1669                                np->speed100 = 1;
1670                        else
1671                                np->speed100 = 0;
1672                        if (reg0 & BMCR_FULLDPLX)
1673                                np->mii_if.full_duplex = 1;
1674                        else
1675                                np->mii_if.full_duplex = 0;
1676                }
1677                netif_carrier_on(dev);
1678                printk(KERN_DEBUG "%s: Link is up, running at %sMbit %s-duplex\n",
1679                       dev->name,
1680                       np->speed100 ? "100" : "10",
1681                       np->mii_if.full_duplex ? "full" : "half");
1682
1683                new_tx_mode = np->tx_mode & ~FullDuplex;        /* duplex setting */
1684                if (np->mii_if.full_duplex)
1685                        new_tx_mode |= FullDuplex;
1686                if (np->tx_mode != new_tx_mode) {
1687                        np->tx_mode = new_tx_mode;
1688                        writel(np->tx_mode | MiiSoftReset, ioaddr + TxMode);
1689                        udelay(1000);
1690                        writel(np->tx_mode, ioaddr + TxMode);
1691                }
1692
1693                new_intr_timer_ctrl = np->intr_timer_ctrl & ~Timer10X;
1694                if (np->speed100)
1695                        new_intr_timer_ctrl |= Timer10X;
1696                if (np->intr_timer_ctrl != new_intr_timer_ctrl) {
1697                        np->intr_timer_ctrl = new_intr_timer_ctrl;
1698                        writel(new_intr_timer_ctrl, ioaddr + IntrTimerCtrl);
1699                }
1700        } else {
1701                netif_carrier_off(dev);
1702                printk(KERN_DEBUG "%s: Link is down\n", dev->name);
1703        }
1704}
1705
1706
1707static void netdev_error(struct net_device *dev, int intr_status)
1708{
1709        struct netdev_private *np = netdev_priv(dev);
1710
1711        /* Came close to underrunning the Tx FIFO, increase threshold. */
1712        if (intr_status & IntrTxDataLow) {
1713                if (np->tx_threshold <= PKT_BUF_SZ / 16) {
1714                        writel(++np->tx_threshold, np->base + TxThreshold);
1715                        printk(KERN_NOTICE "%s: PCI bus congestion, increasing Tx FIFO threshold to %d bytes\n",
1716                               dev->name, np->tx_threshold * 16);
1717                } else
1718                        printk(KERN_WARNING "%s: PCI Tx underflow -- adapter is probably malfunctioning\n", dev->name);
1719        }
1720        if (intr_status & IntrRxGFPDead) {
1721                np->stats.rx_fifo_errors++;
1722                np->stats.rx_errors++;
1723        }
1724        if (intr_status & (IntrNoTxCsum | IntrDMAErr)) {
1725                np->stats.tx_fifo_errors++;
1726                np->stats.tx_errors++;
1727        }
1728        if ((intr_status & ~(IntrNormalMask | IntrAbnormalSummary | IntrLinkChange | IntrStatsMax | IntrTxDataLow | IntrRxGFPDead | IntrNoTxCsum | IntrPCIPad)) && debug)
1729                printk(KERN_ERR "%s: Something Wicked happened! %#8.8x.\n",
1730                       dev->name, intr_status);
1731}
1732
1733
1734static struct net_device_stats *get_stats(struct net_device *dev)
1735{
1736        struct netdev_private *np = netdev_priv(dev);
1737        void __iomem *ioaddr = np->base;
1738
1739        /* This adapter architecture needs no SMP locks. */
1740        np->stats.tx_bytes = readl(ioaddr + 0x57010);
1741        np->stats.rx_bytes = readl(ioaddr + 0x57044);
1742        np->stats.tx_packets = readl(ioaddr + 0x57000);
1743        np->stats.tx_aborted_errors =
1744                readl(ioaddr + 0x57024) + readl(ioaddr + 0x57028);
1745        np->stats.tx_window_errors = readl(ioaddr + 0x57018);
1746        np->stats.collisions =
1747                readl(ioaddr + 0x57004) + readl(ioaddr + 0x57008);
1748
1749        /* The chip only need report frame silently dropped. */
1750        np->stats.rx_dropped += readw(ioaddr + RxDMAStatus);
1751        writew(0, ioaddr + RxDMAStatus);
1752        np->stats.rx_crc_errors = readl(ioaddr + 0x5703C);
1753        np->stats.rx_frame_errors = readl(ioaddr + 0x57040);
1754        np->stats.rx_length_errors = readl(ioaddr + 0x57058);
1755        np->stats.rx_missed_errors = readl(ioaddr + 0x5707C);
1756
1757        return &np->stats;
1758}
1759
1760
1761static void set_rx_mode(struct net_device *dev)
1762{
1763        struct netdev_private *np = netdev_priv(dev);
1764        void __iomem *ioaddr = np->base;
1765        u32 rx_mode = MinVLANPrio;
1766        struct dev_mc_list *mclist;
1767        int i;
1768#ifdef VLAN_SUPPORT
1769
1770        rx_mode |= VlanMode;
1771        if (np->vlgrp) {
1772                int vlan_count = 0;
1773                void __iomem *filter_addr = ioaddr + HashTable + 8;
1774                for (i = 0; i < VLAN_VID_MASK; i++) {
1775                        if (vlan_group_get_device(np->vlgrp, i)) {
1776                                if (vlan_count >= 32)
1777                                        break;
1778                                writew(i, filter_addr);
1779                                filter_addr += 16;
1780                                vlan_count++;
1781                        }
1782                }
1783                if (i == VLAN_VID_MASK) {
1784                        rx_mode |= PerfectFilterVlan;
1785                        while (vlan_count < 32) {
1786                                writew(0, filter_addr);
1787                                filter_addr += 16;
1788                                vlan_count++;
1789                        }
1790                }
1791        }
1792#endif /* VLAN_SUPPORT */
1793
1794        if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1795                rx_mode |= AcceptAll;
1796        } else if ((dev->mc_count > multicast_filter_limit)
1797                   || (dev->flags & IFF_ALLMULTI)) {
1798                /* Too many to match, or accept all multicasts. */
1799                rx_mode |= AcceptBroadcast|AcceptAllMulticast|PerfectFilter;
1800        } else if (dev->mc_count <= 14) {
1801                /* Use the 16 element perfect filter, skip first two entries. */
1802                void __iomem *filter_addr = ioaddr + PerfFilterTable + 2 * 16;
1803                __be16 *eaddrs;
1804                for (i = 2, mclist = dev->mc_list; mclist && i < dev->mc_count + 2;
1805                     i++, mclist = mclist->next) {
1806                        eaddrs = (__be16 *)mclist->dmi_addr;
1807                        writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 4;
1808                        writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4;
1809                        writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 8;
1810                }
1811                eaddrs = (__be16 *)dev->dev_addr;
1812                while (i++ < 16) {
1813                        writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 4;
1814                        writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4;
1815                        writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 8;
1816                }
1817                rx_mode |= AcceptBroadcast|PerfectFilter;
1818        } else {
1819                /* Must use a multicast hash table. */
1820                void __iomem *filter_addr;
1821                __be16 *eaddrs;
1822                __le16 mc_filter[32] __attribute__ ((aligned(sizeof(long))));   /* Multicast hash filter */
1823
1824                memset(mc_filter, 0, sizeof(mc_filter));
1825                for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1826                     i++, mclist = mclist->next) {
1827                        /* The chip uses the upper 9 CRC bits
1828                           as index into the hash table */
1829                        int bit_nr = ether_crc_le(ETH_ALEN, mclist->dmi_addr) >> 23;
1830                        __le32 *fptr = (__le32 *) &mc_filter[(bit_nr >> 4) & ~1];
1831
1832                        *fptr |= cpu_to_le32(1 << (bit_nr & 31));
1833                }
1834                /* Clear the perfect filter list, skip first two entries. */
1835                filter_addr = ioaddr + PerfFilterTable + 2 * 16;
1836                eaddrs = (__be16 *)dev->dev_addr;
1837                for (i = 2; i < 16; i++) {
1838                        writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 4;
1839                        writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4;
1840                        writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 8;
1841                }
1842                for (filter_addr = ioaddr + HashTable, i = 0; i < 32; filter_addr+= 16, i++)
1843                        writew(mc_filter[i], filter_addr);
1844                rx_mode |= AcceptBroadcast|PerfectFilter|HashFilter;
1845        }
1846        writel(rx_mode, ioaddr + RxFilterMode);
1847}
1848
1849static int check_if_running(struct net_device *dev)
1850{
1851        if (!netif_running(dev))
1852                return -EINVAL;
1853        return 0;
1854}
1855
1856static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1857{
1858        struct netdev_private *np = netdev_priv(dev);
1859        strcpy(info->driver, DRV_NAME);
1860        strcpy(info->version, DRV_VERSION);
1861        strcpy(info->bus_info, pci_name(np->pci_dev));
1862}
1863
1864static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1865{
1866        struct netdev_private *np = netdev_priv(dev);
1867        spin_lock_irq(&np->lock);
1868        mii_ethtool_gset(&np->mii_if, ecmd);
1869        spin_unlock_irq(&np->lock);
1870        return 0;
1871}
1872
1873static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1874{
1875        struct netdev_private *np = netdev_priv(dev);
1876        int res;
1877        spin_lock_irq(&np->lock);
1878        res = mii_ethtool_sset(&np->mii_if, ecmd);
1879        spin_unlock_irq(&np->lock);
1880        check_duplex(dev);
1881        return res;
1882}
1883
1884static int nway_reset(struct net_device *dev)
1885{
1886        struct netdev_private *np = netdev_priv(dev);
1887        return mii_nway_restart(&np->mii_if);
1888}
1889
1890static u32 get_link(struct net_device *dev)
1891{
1892        struct netdev_private *np = netdev_priv(dev);
1893        return mii_link_ok(&np->mii_if);
1894}
1895
1896static u32 get_msglevel(struct net_device *dev)
1897{
1898        return debug;
1899}
1900
1901static void set_msglevel(struct net_device *dev, u32 val)
1902{
1903        debug = val;
1904}
1905
1906static const struct ethtool_ops ethtool_ops = {
1907        .begin = check_if_running,
1908        .get_drvinfo = get_drvinfo,
1909        .get_settings = get_settings,
1910        .set_settings = set_settings,
1911        .nway_reset = nway_reset,
1912        .get_link = get_link,
1913        .get_msglevel = get_msglevel,
1914        .set_msglevel = set_msglevel,
1915};
1916
1917static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1918{
1919        struct netdev_private *np = netdev_priv(dev);
1920        struct mii_ioctl_data *data = if_mii(rq);
1921        int rc;
1922
1923        if (!netif_running(dev))
1924                return -EINVAL;
1925
1926        spin_lock_irq(&np->lock);
1927        rc = generic_mii_ioctl(&np->mii_if, data, cmd, NULL);
1928        spin_unlock_irq(&np->lock);
1929
1930        if ((cmd == SIOCSMIIREG) && (data->phy_id == np->phys[0]))
1931                check_duplex(dev);
1932
1933        return rc;
1934}
1935
1936static int netdev_close(struct net_device *dev)
1937{
1938        struct netdev_private *np = netdev_priv(dev);
1939        void __iomem *ioaddr = np->base;
1940        int i;
1941
1942        netif_stop_queue(dev);
1943
1944        napi_disable(&np->napi);
1945
1946        if (debug > 1) {
1947                printk(KERN_DEBUG "%s: Shutting down ethercard, Intr status %#8.8x.\n",
1948                           dev->name, (int) readl(ioaddr + IntrStatus));
1949                printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
1950                       dev->name, np->cur_tx, np->dirty_tx,
1951                       np->cur_rx, np->dirty_rx);
1952        }
1953
1954        /* Disable interrupts by clearing the interrupt mask. */
1955        writel(0, ioaddr + IntrEnable);
1956
1957        /* Stop the chip's Tx and Rx processes. */
1958        writel(0, ioaddr + GenCtrl);
1959        readl(ioaddr + GenCtrl);
1960
1961        if (debug > 5) {
1962                printk(KERN_DEBUG"  Tx ring at %#llx:\n",
1963                       (long long) np->tx_ring_dma);
1964                for (i = 0; i < 8 /* TX_RING_SIZE is huge! */; i++)
1965                        printk(KERN_DEBUG " #%d desc. %#8.8x %#llx -> %#8.8x.\n",
1966                               i, le32_to_cpu(np->tx_ring[i].status),
1967                               (long long) dma_to_cpu(np->tx_ring[i].addr),
1968                               le32_to_cpu(np->tx_done_q[i].status));
1969                printk(KERN_DEBUG "  Rx ring at %#llx -> %p:\n",
1970                       (long long) np->rx_ring_dma, np->rx_done_q);
1971                if (np->rx_done_q)
1972                        for (i = 0; i < 8 /* RX_RING_SIZE */; i++) {
1973                                printk(KERN_DEBUG " #%d desc. %#llx -> %#8.8x\n",
1974                                       i, (long long) dma_to_cpu(np->rx_ring[i].rxaddr), le32_to_cpu(np->rx_done_q[i].status));
1975                }
1976        }
1977
1978        free_irq(dev->irq, dev);
1979
1980        /* Free all the skbuffs in the Rx queue. */
1981        for (i = 0; i < RX_RING_SIZE; i++) {
1982                np->rx_ring[i].rxaddr = cpu_to_dma(0xBADF00D0); /* An invalid address. */
1983                if (np->rx_info[i].skb != NULL) {
1984                        pci_unmap_single(np->pci_dev, np->rx_info[i].mapping, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1985                        dev_kfree_skb(np->rx_info[i].skb);
1986                }
1987                np->rx_info[i].skb = NULL;
1988                np->rx_info[i].mapping = 0;
1989        }
1990        for (i = 0; i < TX_RING_SIZE; i++) {
1991                struct sk_buff *skb = np->tx_info[i].skb;
1992                if (skb == NULL)
1993                        continue;
1994                pci_unmap_single(np->pci_dev,
1995                                 np->tx_info[i].mapping,
1996                                 skb_first_frag_len(skb), PCI_DMA_TODEVICE);
1997                np->tx_info[i].mapping = 0;
1998                dev_kfree_skb(skb);
1999                np->tx_info[i].skb = NULL;
2000        }
2001
2002        return 0;
2003}
2004
2005#ifdef CONFIG_PM
2006static int starfire_suspend(struct pci_dev *pdev, pm_message_t state)
2007{
2008        struct net_device *dev = pci_get_drvdata(pdev);
2009
2010        if (netif_running(dev)) {
2011                netif_device_detach(dev);
2012                netdev_close(dev);
2013        }
2014
2015        pci_save_state(pdev);
2016        pci_set_power_state(pdev, pci_choose_state(pdev,state));
2017
2018        return 0;
2019}
2020
2021static int starfire_resume(struct pci_dev *pdev)
2022{
2023        struct net_device *dev = pci_get_drvdata(pdev);
2024
2025        pci_set_power_state(pdev, PCI_D0);
2026        pci_restore_state(pdev);
2027
2028        if (netif_running(dev)) {
2029                netdev_open(dev);
2030                netif_device_attach(dev);
2031        }
2032
2033        return 0;
2034}
2035#endif /* CONFIG_PM */
2036
2037
2038static void __devexit starfire_remove_one (struct pci_dev *pdev)
2039{
2040        struct net_device *dev = pci_get_drvdata(pdev);
2041        struct netdev_private *np = netdev_priv(dev);
2042
2043        BUG_ON(!dev);
2044
2045        unregister_netdev(dev);
2046
2047        if (np->queue_mem)
2048                pci_free_consistent(pdev, np->queue_mem_size, np->queue_mem, np->queue_mem_dma);
2049
2050
2051        /* XXX: add wakeup code -- requires firmware for MagicPacket */
2052        pci_set_power_state(pdev, PCI_D3hot);   /* go to sleep in D3 mode */
2053        pci_disable_device(pdev);
2054
2055        iounmap(np->base);
2056        pci_release_regions(pdev);
2057
2058        pci_set_drvdata(pdev, NULL);
2059        free_netdev(dev);                       /* Will also free np!! */
2060}
2061
2062
2063static struct pci_driver starfire_driver = {
2064        .name           = DRV_NAME,
2065        .probe          = starfire_init_one,
2066        .remove         = __devexit_p(starfire_remove_one),
2067#ifdef CONFIG_PM
2068        .suspend        = starfire_suspend,
2069        .resume         = starfire_resume,
2070#endif /* CONFIG_PM */
2071        .id_table       = starfire_pci_tbl,
2072};
2073
2074
2075static int __init starfire_init (void)
2076{
2077/* when a module, this is printed whether or not devices are found in probe */
2078#ifdef MODULE
2079        printk(version);
2080
2081        printk(KERN_INFO DRV_NAME ": polling (NAPI) enabled\n");
2082#endif
2083
2084        /* we can do this test only at run-time... sigh */
2085        if (sizeof(dma_addr_t) != sizeof(netdrv_addr_t)) {
2086                printk("This driver has dma_addr_t issues, please send email to maintainer\n");
2087                return -ENODEV;
2088        }
2089
2090        return pci_register_driver(&starfire_driver);
2091}
2092
2093
2094static void __exit starfire_cleanup (void)
2095{
2096        pci_unregister_driver (&starfire_driver);
2097}
2098
2099
2100module_init(starfire_init);
2101module_exit(starfire_cleanup);
2102
2103
2104/*
2105 * Local variables:
2106 *  c-basic-offset: 8
2107 *  tab-width: 8
2108 * End:
2109 */
2110