linux/drivers/net/ethernet/adaptec/starfire.c
<<
>>
Prefs
   1/* starfire.c: Linux device driver for the Adaptec Starfire network adapter. */
   2/*
   3        Written 1998-2000 by Donald Becker.
   4
   5        Current maintainer is Ion Badulescu <ionut ta badula tod org>. Please
   6        send all bug reports to me, and not to Donald Becker, as this code
   7        has been heavily modified from Donald's original version.
   8
   9        This software may be used and distributed according to the terms of
  10        the GNU General Public License (GPL), incorporated herein by reference.
  11        Drivers based on or derived from this code fall under the GPL and must
  12        retain the authorship, copyright and license notice.  This file is not
  13        a complete program and may only be used when the entire operating
  14        system is licensed under the GPL.
  15
  16        The information below comes from Donald Becker's original driver:
  17
  18        The author may be reached as becker@scyld.com, or C/O
  19        Scyld Computing Corporation
  20        410 Severn Ave., Suite 210
  21        Annapolis MD 21403
  22
  23        Support and updates available at
  24        http://www.scyld.com/network/starfire.html
  25        [link no longer provides useful info -jgarzik]
  26
  27*/
  28
  29#define DRV_NAME        "starfire"
  30#define DRV_VERSION     "2.1"
  31#define DRV_RELDATE     "July  6, 2008"
  32
  33#include <linux/interrupt.h>
  34#include <linux/module.h>
  35#include <linux/kernel.h>
  36#include <linux/pci.h>
  37#include <linux/netdevice.h>
  38#include <linux/etherdevice.h>
  39#include <linux/init.h>
  40#include <linux/delay.h>
  41#include <linux/crc32.h>
  42#include <linux/ethtool.h>
  43#include <linux/mii.h>
  44#include <linux/if_vlan.h>
  45#include <linux/mm.h>
  46#include <linux/firmware.h>
  47#include <asm/processor.h>              /* Processor type for cache alignment. */
  48#include <asm/uaccess.h>
  49#include <asm/io.h>
  50
  51/*
  52 * The current frame processor firmware fails to checksum a fragment
  53 * of length 1. If and when this is fixed, the #define below can be removed.
  54 */
  55#define HAS_BROKEN_FIRMWARE
  56
  57/*
  58 * If using the broken firmware, data must be padded to the next 32-bit boundary.
  59 */
  60#ifdef HAS_BROKEN_FIRMWARE
  61#define PADDING_MASK 3
  62#endif
  63
  64/*
  65 * Define this if using the driver with the zero-copy patch
  66 */
  67#define ZEROCOPY
  68
  69#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
  70#define VLAN_SUPPORT
  71#endif
  72
  73/* The user-configurable values.
  74   These may be modified when a driver module is loaded.*/
  75
  76/* Used for tuning interrupt latency vs. overhead. */
  77static int intr_latency;
  78static int small_frames;
  79
  80static int debug = 1;                   /* 1 normal messages, 0 quiet .. 7 verbose. */
  81static int max_interrupt_work = 20;
  82static int mtu;
  83/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
  84   The Starfire has a 512 element hash table based on the Ethernet CRC. */
  85static const int multicast_filter_limit = 512;
  86/* Whether to do TCP/UDP checksums in hardware */
  87static int enable_hw_cksum = 1;
  88
  89#define PKT_BUF_SZ      1536            /* Size of each temporary Rx buffer.*/
  90/*
  91 * Set the copy breakpoint for the copy-only-tiny-frames scheme.
  92 * Setting to > 1518 effectively disables this feature.
  93 *
  94 * NOTE:
  95 * The ia64 doesn't allow for unaligned loads even of integers being
  96 * misaligned on a 2 byte boundary. Thus always force copying of
  97 * packets as the starfire doesn't allow for misaligned DMAs ;-(
  98 * 23/10/2000 - Jes
  99 *
 100 * The Alpha and the Sparc don't like unaligned loads, either. On Sparc64,
 101 * at least, having unaligned frames leads to a rather serious performance
 102 * penalty. -Ion
 103 */
 104#if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
 105static int rx_copybreak = PKT_BUF_SZ;
 106#else
 107static int rx_copybreak /* = 0 */;
 108#endif
 109
 110/* PCI DMA burst size -- on sparc64 we want to force it to 64 bytes, on the others the default of 128 is fine. */
 111#ifdef __sparc__
 112#define DMA_BURST_SIZE 64
 113#else
 114#define DMA_BURST_SIZE 128
 115#endif
 116
 117/* Operational parameters that are set at compile time. */
 118
 119/* The "native" ring sizes are either 256 or 2048.
 120   However in some modes a descriptor may be marked to wrap the ring earlier.
 121*/
 122#define RX_RING_SIZE    256
 123#define TX_RING_SIZE    32
 124/* The completion queues are fixed at 1024 entries i.e. 4K or 8KB. */
 125#define DONE_Q_SIZE     1024
 126/* All queues must be aligned on a 256-byte boundary */
 127#define QUEUE_ALIGN     256
 128
 129#if RX_RING_SIZE > 256
 130#define RX_Q_ENTRIES Rx2048QEntries
 131#else
 132#define RX_Q_ENTRIES Rx256QEntries
 133#endif
 134
 135/* Operational parameters that usually are not changed. */
 136/* Time in jiffies before concluding the transmitter is hung. */
 137#define TX_TIMEOUT      (2 * HZ)
 138
 139#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 140/* 64-bit dma_addr_t */
 141#define ADDR_64BITS     /* This chip uses 64 bit addresses. */
 142#define netdrv_addr_t __le64
 143#define cpu_to_dma(x) cpu_to_le64(x)
 144#define dma_to_cpu(x) le64_to_cpu(x)
 145#define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
 146#define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
 147#define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
 148#define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
 149#define RX_DESC_ADDR_SIZE RxDescAddr64bit
 150#else  /* 32-bit dma_addr_t */
 151#define netdrv_addr_t __le32
 152#define cpu_to_dma(x) cpu_to_le32(x)
 153#define dma_to_cpu(x) le32_to_cpu(x)
 154#define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
 155#define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
 156#define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
 157#define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
 158#define RX_DESC_ADDR_SIZE RxDescAddr32bit
 159#endif
 160
 161#define skb_first_frag_len(skb) skb_headlen(skb)
 162#define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
 163
 164/* Firmware names */
 165#define FIRMWARE_RX     "adaptec/starfire_rx.bin"
 166#define FIRMWARE_TX     "adaptec/starfire_tx.bin"
 167
 168/* These identify the driver base version and may not be removed. */
 169static const char version[] =
 170KERN_INFO "starfire.c:v1.03 7/26/2000  Written by Donald Becker <becker@scyld.com>\n"
 171" (unofficial 2.2/2.4 kernel port, version " DRV_VERSION ", " DRV_RELDATE ")\n";
 172
 173MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
 174MODULE_DESCRIPTION("Adaptec Starfire Ethernet driver");
 175MODULE_LICENSE("GPL");
 176MODULE_VERSION(DRV_VERSION);
 177MODULE_FIRMWARE(FIRMWARE_RX);
 178MODULE_FIRMWARE(FIRMWARE_TX);
 179
 180module_param(max_interrupt_work, int, 0);
 181module_param(mtu, int, 0);
 182module_param(debug, int, 0);
 183module_param(rx_copybreak, int, 0);
 184module_param(intr_latency, int, 0);
 185module_param(small_frames, int, 0);
 186module_param(enable_hw_cksum, int, 0);
 187MODULE_PARM_DESC(max_interrupt_work, "Maximum events handled per interrupt");
 188MODULE_PARM_DESC(mtu, "MTU (all boards)");
 189MODULE_PARM_DESC(debug, "Debug level (0-6)");
 190MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
 191MODULE_PARM_DESC(intr_latency, "Maximum interrupt latency, in microseconds");
 192MODULE_PARM_DESC(small_frames, "Maximum size of receive frames that bypass interrupt latency (0,64,128,256,512)");
 193MODULE_PARM_DESC(enable_hw_cksum, "Enable/disable hardware cksum support (0/1)");
 194
 195/*
 196                                Theory of Operation
 197
 198I. Board Compatibility
 199
 200This driver is for the Adaptec 6915 "Starfire" 64 bit PCI Ethernet adapter.
 201
 202II. Board-specific settings
 203
 204III. Driver operation
 205
 206IIIa. Ring buffers
 207
 208The Starfire hardware uses multiple fixed-size descriptor queues/rings.  The
 209ring sizes are set fixed by the hardware, but may optionally be wrapped
 210earlier by the END bit in the descriptor.
 211This driver uses that hardware queue size for the Rx ring, where a large
 212number of entries has no ill effect beyond increases the potential backlog.
 213The Tx ring is wrapped with the END bit, since a large hardware Tx queue
 214disables the queue layer priority ordering and we have no mechanism to
 215utilize the hardware two-level priority queue.  When modifying the
 216RX/TX_RING_SIZE pay close attention to page sizes and the ring-empty warning
 217levels.
 218
 219IIIb/c. Transmit/Receive Structure
 220
 221See the Adaptec manual for the many possible structures, and options for
 222each structure.  There are far too many to document all of them here.
 223
 224For transmit this driver uses type 0/1 transmit descriptors (depending
 225on the 32/64 bitness of the architecture), and relies on automatic
 226minimum-length padding.  It does not use the completion queue
 227consumer index, but instead checks for non-zero status entries.
 228
 229For receive this driver uses type 2/3 receive descriptors.  The driver
 230allocates full frame size skbuffs for the Rx ring buffers, so all frames
 231should fit in a single descriptor.  The driver does not use the completion
 232queue consumer index, but instead checks for non-zero status entries.
 233
 234When an incoming frame is less than RX_COPYBREAK bytes long, a fresh skbuff
 235is allocated and the frame is copied to the new skbuff.  When the incoming
 236frame is larger, the skbuff is passed directly up the protocol stack.
 237Buffers consumed this way are replaced by newly allocated skbuffs in a later
 238phase of receive.
 239
 240A notable aspect of operation is that unaligned buffers are not permitted by
 241the Starfire hardware.  Thus the IP header at offset 14 in an ethernet frame
 242isn't longword aligned, which may cause problems on some machine
 243e.g. Alphas and IA64. For these architectures, the driver is forced to copy
 244the frame into a new skbuff unconditionally. Copied frames are put into the
 245skbuff at an offset of "+2", thus 16-byte aligning the IP header.
 246
 247IIId. Synchronization
 248
 249The driver runs as two independent, single-threaded flows of control.  One
 250is the send-packet routine, which enforces single-threaded use by the
 251dev->tbusy flag.  The other thread is the interrupt handler, which is single
 252threaded by the hardware and interrupt handling software.
 253
 254The send packet thread has partial control over the Tx ring and the netif_queue
 255status. If the number of free Tx slots in the ring falls below a certain number
 256(currently hardcoded to 4), it signals the upper layer to stop the queue.
 257
 258The interrupt handler has exclusive control over the Rx ring and records stats
 259from the Tx ring.  After reaping the stats, it marks the Tx queue entry as
 260empty by incrementing the dirty_tx mark. Iff the netif_queue is stopped and the
 261number of free Tx slow is above the threshold, it signals the upper layer to
 262restart the queue.
 263
 264IV. Notes
 265
 266IVb. References
 267
 268The Adaptec Starfire manuals, available only from Adaptec.
 269http://www.scyld.com/expert/100mbps.html
 270http://www.scyld.com/expert/NWay.html
 271
 272IVc. Errata
 273
 274- StopOnPerr is broken, don't enable
 275- Hardware ethernet padding exposes random data, perform software padding
 276  instead (unverified -- works correctly for all the hardware I have)
 277
 278*/
 279
 280
 281
 282enum chip_capability_flags {CanHaveMII=1, };
 283
 284enum chipset {
 285        CH_6915 = 0,
 286};
 287
 288static DEFINE_PCI_DEVICE_TABLE(starfire_pci_tbl) = {
 289        { PCI_VDEVICE(ADAPTEC, 0x6915), CH_6915 },
 290        { 0, }
 291};
 292MODULE_DEVICE_TABLE(pci, starfire_pci_tbl);
 293
 294/* A chip capabilities table, matching the CH_xxx entries in xxx_pci_tbl[] above. */
 295static const struct chip_info {
 296        const char *name;
 297        int drv_flags;
 298} netdrv_tbl[] = {
 299        { "Adaptec Starfire 6915", CanHaveMII },
 300};
 301
 302
 303/* Offsets to the device registers.
 304   Unlike software-only systems, device drivers interact with complex hardware.
 305   It's not useful to define symbolic names for every register bit in the
 306   device.  The name can only partially document the semantics and make
 307   the driver longer and more difficult to read.
 308   In general, only the important configuration values or bits changed
 309   multiple times should be defined symbolically.
 310*/
 311enum register_offsets {
 312        PCIDeviceConfig=0x50040, GenCtrl=0x50070, IntrTimerCtrl=0x50074,
 313        IntrClear=0x50080, IntrStatus=0x50084, IntrEnable=0x50088,
 314        MIICtrl=0x52000, TxStationAddr=0x50120, EEPROMCtrl=0x51000,
 315        GPIOCtrl=0x5008C, TxDescCtrl=0x50090,
 316        TxRingPtr=0x50098, HiPriTxRingPtr=0x50094, /* Low and High priority. */
 317        TxRingHiAddr=0x5009C,           /* 64 bit address extension. */
 318        TxProducerIdx=0x500A0, TxConsumerIdx=0x500A4,
 319        TxThreshold=0x500B0,
 320        CompletionHiAddr=0x500B4, TxCompletionAddr=0x500B8,
 321        RxCompletionAddr=0x500BC, RxCompletionQ2Addr=0x500C0,
 322        CompletionQConsumerIdx=0x500C4, RxDMACtrl=0x500D0,
 323        RxDescQCtrl=0x500D4, RxDescQHiAddr=0x500DC, RxDescQAddr=0x500E0,
 324        RxDescQIdx=0x500E8, RxDMAStatus=0x500F0, RxFilterMode=0x500F4,
 325        TxMode=0x55000, VlanType=0x55064,
 326        PerfFilterTable=0x56000, HashTable=0x56100,
 327        TxGfpMem=0x58000, RxGfpMem=0x5a000,
 328};
 329
 330/*
 331 * Bits in the interrupt status/mask registers.
 332 * Warning: setting Intr[Ab]NormalSummary in the IntrEnable register
 333 * enables all the interrupt sources that are or'ed into those status bits.
 334 */
 335enum intr_status_bits {
 336        IntrLinkChange=0xf0000000, IntrStatsMax=0x08000000,
 337        IntrAbnormalSummary=0x02000000, IntrGeneralTimer=0x01000000,
 338        IntrSoftware=0x800000, IntrRxComplQ1Low=0x400000,
 339        IntrTxComplQLow=0x200000, IntrPCI=0x100000,
 340        IntrDMAErr=0x080000, IntrTxDataLow=0x040000,
 341        IntrRxComplQ2Low=0x020000, IntrRxDescQ1Low=0x010000,
 342        IntrNormalSummary=0x8000, IntrTxDone=0x4000,
 343        IntrTxDMADone=0x2000, IntrTxEmpty=0x1000,
 344        IntrEarlyRxQ2=0x0800, IntrEarlyRxQ1=0x0400,
 345        IntrRxQ2Done=0x0200, IntrRxQ1Done=0x0100,
 346        IntrRxGFPDead=0x80, IntrRxDescQ2Low=0x40,
 347        IntrNoTxCsum=0x20, IntrTxBadID=0x10,
 348        IntrHiPriTxBadID=0x08, IntrRxGfp=0x04,
 349        IntrTxGfp=0x02, IntrPCIPad=0x01,
 350        /* not quite bits */
 351        IntrRxDone=IntrRxQ2Done | IntrRxQ1Done,
 352        IntrRxEmpty=IntrRxDescQ1Low | IntrRxDescQ2Low,
 353        IntrNormalMask=0xff00, IntrAbnormalMask=0x3ff00fe,
 354};
 355
 356/* Bits in the RxFilterMode register. */
 357enum rx_mode_bits {
 358        AcceptBroadcast=0x04, AcceptAllMulticast=0x02, AcceptAll=0x01,
 359        AcceptMulticast=0x10, PerfectFilter=0x40, HashFilter=0x30,
 360        PerfectFilterVlan=0x80, MinVLANPrio=0xE000, VlanMode=0x0200,
 361        WakeupOnGFP=0x0800,
 362};
 363
 364/* Bits in the TxMode register */
 365enum tx_mode_bits {
 366        MiiSoftReset=0x8000, MIILoopback=0x4000,
 367        TxFlowEnable=0x0800, RxFlowEnable=0x0400,
 368        PadEnable=0x04, FullDuplex=0x02, HugeFrame=0x01,
 369};
 370
 371/* Bits in the TxDescCtrl register. */
 372enum tx_ctrl_bits {
 373        TxDescSpaceUnlim=0x00, TxDescSpace32=0x10, TxDescSpace64=0x20,
 374        TxDescSpace128=0x30, TxDescSpace256=0x40,
 375        TxDescType0=0x00, TxDescType1=0x01, TxDescType2=0x02,
 376        TxDescType3=0x03, TxDescType4=0x04,
 377        TxNoDMACompletion=0x08,
 378        TxDescQAddr64bit=0x80, TxDescQAddr32bit=0,
 379        TxHiPriFIFOThreshShift=24, TxPadLenShift=16,
 380        TxDMABurstSizeShift=8,
 381};
 382
 383/* Bits in the RxDescQCtrl register. */
 384enum rx_ctrl_bits {
 385        RxBufferLenShift=16, RxMinDescrThreshShift=0,
 386        RxPrefetchMode=0x8000, RxVariableQ=0x2000,
 387        Rx2048QEntries=0x4000, Rx256QEntries=0,
 388        RxDescAddr64bit=0x1000, RxDescAddr32bit=0,
 389        RxDescQAddr64bit=0x0100, RxDescQAddr32bit=0,
 390        RxDescSpace4=0x000, RxDescSpace8=0x100,
 391        RxDescSpace16=0x200, RxDescSpace32=0x300,
 392        RxDescSpace64=0x400, RxDescSpace128=0x500,
 393        RxConsumerWrEn=0x80,
 394};
 395
 396/* Bits in the RxDMACtrl register. */
 397enum rx_dmactrl_bits {
 398        RxReportBadFrames=0x80000000, RxDMAShortFrames=0x40000000,
 399        RxDMABadFrames=0x20000000, RxDMACrcErrorFrames=0x10000000,
 400        RxDMAControlFrame=0x08000000, RxDMAPauseFrame=0x04000000,
 401        RxChecksumIgnore=0, RxChecksumRejectTCPUDP=0x02000000,
 402        RxChecksumRejectTCPOnly=0x01000000,
 403        RxCompletionQ2Enable=0x800000,
 404        RxDMAQ2Disable=0, RxDMAQ2FPOnly=0x100000,
 405        RxDMAQ2SmallPkt=0x200000, RxDMAQ2HighPrio=0x300000,
 406        RxDMAQ2NonIP=0x400000,
 407        RxUseBackupQueue=0x080000, RxDMACRC=0x040000,
 408        RxEarlyIntThreshShift=12, RxHighPrioThreshShift=8,
 409        RxBurstSizeShift=0,
 410};
 411
 412/* Bits in the RxCompletionAddr register */
 413enum rx_compl_bits {
 414        RxComplQAddr64bit=0x80, RxComplQAddr32bit=0,
 415        RxComplProducerWrEn=0x40,
 416        RxComplType0=0x00, RxComplType1=0x10,
 417        RxComplType2=0x20, RxComplType3=0x30,
 418        RxComplThreshShift=0,
 419};
 420
 421/* Bits in the TxCompletionAddr register */
 422enum tx_compl_bits {
 423        TxComplQAddr64bit=0x80, TxComplQAddr32bit=0,
 424        TxComplProducerWrEn=0x40,
 425        TxComplIntrStatus=0x20,
 426        CommonQueueMode=0x10,
 427        TxComplThreshShift=0,
 428};
 429
 430/* Bits in the GenCtrl register */
 431enum gen_ctrl_bits {
 432        RxEnable=0x05, TxEnable=0x0a,
 433        RxGFPEnable=0x10, TxGFPEnable=0x20,
 434};
 435
 436/* Bits in the IntrTimerCtrl register */
 437enum intr_ctrl_bits {
 438        Timer10X=0x800, EnableIntrMasking=0x60, SmallFrameBypass=0x100,
 439        SmallFrame64=0, SmallFrame128=0x200, SmallFrame256=0x400, SmallFrame512=0x600,
 440        IntrLatencyMask=0x1f,
 441};
 442
 443/* The Rx and Tx buffer descriptors. */
 444struct starfire_rx_desc {
 445        netdrv_addr_t rxaddr;
 446};
 447enum rx_desc_bits {
 448        RxDescValid=1, RxDescEndRing=2,
 449};
 450
 451/* Completion queue entry. */
 452struct short_rx_done_desc {
 453        __le32 status;                  /* Low 16 bits is length. */
 454};
 455struct basic_rx_done_desc {
 456        __le32 status;                  /* Low 16 bits is length. */
 457        __le16 vlanid;
 458        __le16 status2;
 459};
 460struct csum_rx_done_desc {
 461        __le32 status;                  /* Low 16 bits is length. */
 462        __le16 csum;                    /* Partial checksum */
 463        __le16 status2;
 464};
 465struct full_rx_done_desc {
 466        __le32 status;                  /* Low 16 bits is length. */
 467        __le16 status3;
 468        __le16 status2;
 469        __le16 vlanid;
 470        __le16 csum;                    /* partial checksum */
 471        __le32 timestamp;
 472};
 473/* XXX: this is ugly and I'm not sure it's worth the trouble -Ion */
 474#ifdef VLAN_SUPPORT
 475typedef struct full_rx_done_desc rx_done_desc;
 476#define RxComplType RxComplType3
 477#else  /* not VLAN_SUPPORT */
 478typedef struct csum_rx_done_desc rx_done_desc;
 479#define RxComplType RxComplType2
 480#endif /* not VLAN_SUPPORT */
 481
 482enum rx_done_bits {
 483        RxOK=0x20000000, RxFIFOErr=0x10000000, RxBufQ2=0x08000000,
 484};
 485
 486/* Type 1 Tx descriptor. */
 487struct starfire_tx_desc_1 {
 488        __le32 status;                  /* Upper bits are status, lower 16 length. */
 489        __le32 addr;
 490};
 491
 492/* Type 2 Tx descriptor. */
 493struct starfire_tx_desc_2 {
 494        __le32 status;                  /* Upper bits are status, lower 16 length. */
 495        __le32 reserved;
 496        __le64 addr;
 497};
 498
 499#ifdef ADDR_64BITS
 500typedef struct starfire_tx_desc_2 starfire_tx_desc;
 501#define TX_DESC_TYPE TxDescType2
 502#else  /* not ADDR_64BITS */
 503typedef struct starfire_tx_desc_1 starfire_tx_desc;
 504#define TX_DESC_TYPE TxDescType1
 505#endif /* not ADDR_64BITS */
 506#define TX_DESC_SPACING TxDescSpaceUnlim
 507
 508enum tx_desc_bits {
 509        TxDescID=0xB0000000,
 510        TxCRCEn=0x01000000, TxDescIntr=0x08000000,
 511        TxRingWrap=0x04000000, TxCalTCP=0x02000000,
 512};
 513struct tx_done_desc {
 514        __le32 status;                  /* timestamp, index. */
 515#if 0
 516        __le32 intrstatus;              /* interrupt status */
 517#endif
 518};
 519
 520struct rx_ring_info {
 521        struct sk_buff *skb;
 522        dma_addr_t mapping;
 523};
 524struct tx_ring_info {
 525        struct sk_buff *skb;
 526        dma_addr_t mapping;
 527        unsigned int used_slots;
 528};
 529
 530#define PHY_CNT         2
 531struct netdev_private {
 532        /* Descriptor rings first for alignment. */
 533        struct starfire_rx_desc *rx_ring;
 534        starfire_tx_desc *tx_ring;
 535        dma_addr_t rx_ring_dma;
 536        dma_addr_t tx_ring_dma;
 537        /* The addresses of rx/tx-in-place skbuffs. */
 538        struct rx_ring_info rx_info[RX_RING_SIZE];
 539        struct tx_ring_info tx_info[TX_RING_SIZE];
 540        /* Pointers to completion queues (full pages). */
 541        rx_done_desc *rx_done_q;
 542        dma_addr_t rx_done_q_dma;
 543        unsigned int rx_done;
 544        struct tx_done_desc *tx_done_q;
 545        dma_addr_t tx_done_q_dma;
 546        unsigned int tx_done;
 547        struct napi_struct napi;
 548        struct net_device *dev;
 549        struct pci_dev *pci_dev;
 550#ifdef VLAN_SUPPORT
 551        unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
 552#endif
 553        void *queue_mem;
 554        dma_addr_t queue_mem_dma;
 555        size_t queue_mem_size;
 556
 557        /* Frequently used values: keep some adjacent for cache effect. */
 558        spinlock_t lock;
 559        unsigned int cur_rx, dirty_rx;  /* Producer/consumer ring indices */
 560        unsigned int cur_tx, dirty_tx, reap_tx;
 561        unsigned int rx_buf_sz;         /* Based on MTU+slack. */
 562        /* These values keep track of the transceiver/media in use. */
 563        int speed100;                   /* Set if speed == 100MBit. */
 564        u32 tx_mode;
 565        u32 intr_timer_ctrl;
 566        u8 tx_threshold;
 567        /* MII transceiver section. */
 568        struct mii_if_info mii_if;              /* MII lib hooks/info */
 569        int phy_cnt;                    /* MII device addresses. */
 570        unsigned char phys[PHY_CNT];    /* MII device addresses. */
 571        void __iomem *base;
 572};
 573
 574
 575static int      mdio_read(struct net_device *dev, int phy_id, int location);
 576static void     mdio_write(struct net_device *dev, int phy_id, int location, int value);
 577static int      netdev_open(struct net_device *dev);
 578static void     check_duplex(struct net_device *dev);
 579static void     tx_timeout(struct net_device *dev);
 580static void     init_ring(struct net_device *dev);
 581static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
 582static irqreturn_t intr_handler(int irq, void *dev_instance);
 583static void     netdev_error(struct net_device *dev, int intr_status);
 584static int      __netdev_rx(struct net_device *dev, int *quota);
 585static int      netdev_poll(struct napi_struct *napi, int budget);
 586static void     refill_rx_ring(struct net_device *dev);
 587static void     netdev_error(struct net_device *dev, int intr_status);
 588static void     set_rx_mode(struct net_device *dev);
 589static struct net_device_stats *get_stats(struct net_device *dev);
 590static int      netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 591static int      netdev_close(struct net_device *dev);
 592static void     netdev_media_change(struct net_device *dev);
 593static const struct ethtool_ops ethtool_ops;
 594
 595
 596#ifdef VLAN_SUPPORT
 597static int netdev_vlan_rx_add_vid(struct net_device *dev,
 598                                  __be16 proto, u16 vid)
 599{
 600        struct netdev_private *np = netdev_priv(dev);
 601
 602        spin_lock(&np->lock);
 603        if (debug > 1)
 604                printk("%s: Adding vlanid %d to vlan filter\n", dev->name, vid);
 605        set_bit(vid, np->active_vlans);
 606        set_rx_mode(dev);
 607        spin_unlock(&np->lock);
 608
 609        return 0;
 610}
 611
 612static int netdev_vlan_rx_kill_vid(struct net_device *dev,
 613                                   __be16 proto, u16 vid)
 614{
 615        struct netdev_private *np = netdev_priv(dev);
 616
 617        spin_lock(&np->lock);
 618        if (debug > 1)
 619                printk("%s: removing vlanid %d from vlan filter\n", dev->name, vid);
 620        clear_bit(vid, np->active_vlans);
 621        set_rx_mode(dev);
 622        spin_unlock(&np->lock);
 623
 624        return 0;
 625}
 626#endif /* VLAN_SUPPORT */
 627
 628
 629static const struct net_device_ops netdev_ops = {
 630        .ndo_open               = netdev_open,
 631        .ndo_stop               = netdev_close,
 632        .ndo_start_xmit         = start_tx,
 633        .ndo_tx_timeout         = tx_timeout,
 634        .ndo_get_stats          = get_stats,
 635        .ndo_set_rx_mode        = set_rx_mode,
 636        .ndo_do_ioctl           = netdev_ioctl,
 637        .ndo_change_mtu         = eth_change_mtu,
 638        .ndo_set_mac_address    = eth_mac_addr,
 639        .ndo_validate_addr      = eth_validate_addr,
 640#ifdef VLAN_SUPPORT
 641        .ndo_vlan_rx_add_vid    = netdev_vlan_rx_add_vid,
 642        .ndo_vlan_rx_kill_vid   = netdev_vlan_rx_kill_vid,
 643#endif
 644};
 645
 646static int starfire_init_one(struct pci_dev *pdev,
 647                             const struct pci_device_id *ent)
 648{
 649        struct device *d = &pdev->dev;
 650        struct netdev_private *np;
 651        int i, irq, chip_idx = ent->driver_data;
 652        struct net_device *dev;
 653        long ioaddr;
 654        void __iomem *base;
 655        int drv_flags, io_size;
 656        int boguscnt;
 657
 658/* when built into the kernel, we only print version if device is found */
 659#ifndef MODULE
 660        static int printed_version;
 661        if (!printed_version++)
 662                printk(version);
 663#endif
 664
 665        if (pci_enable_device (pdev))
 666                return -EIO;
 667
 668        ioaddr = pci_resource_start(pdev, 0);
 669        io_size = pci_resource_len(pdev, 0);
 670        if (!ioaddr || ((pci_resource_flags(pdev, 0) & IORESOURCE_MEM) == 0)) {
 671                dev_err(d, "no PCI MEM resources, aborting\n");
 672                return -ENODEV;
 673        }
 674
 675        dev = alloc_etherdev(sizeof(*np));
 676        if (!dev)
 677                return -ENOMEM;
 678
 679        SET_NETDEV_DEV(dev, &pdev->dev);
 680
 681        irq = pdev->irq;
 682
 683        if (pci_request_regions (pdev, DRV_NAME)) {
 684                dev_err(d, "cannot reserve PCI resources, aborting\n");
 685                goto err_out_free_netdev;
 686        }
 687
 688        base = ioremap(ioaddr, io_size);
 689        if (!base) {
 690                dev_err(d, "cannot remap %#x @ %#lx, aborting\n",
 691                        io_size, ioaddr);
 692                goto err_out_free_res;
 693        }
 694
 695        pci_set_master(pdev);
 696
 697        /* enable MWI -- it vastly improves Rx performance on sparc64 */
 698        pci_try_set_mwi(pdev);
 699
 700#ifdef ZEROCOPY
 701        /* Starfire can do TCP/UDP checksumming */
 702        if (enable_hw_cksum)
 703                dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
 704#endif /* ZEROCOPY */
 705
 706#ifdef VLAN_SUPPORT
 707        dev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
 708#endif /* VLAN_RX_KILL_VID */
 709#ifdef ADDR_64BITS
 710        dev->features |= NETIF_F_HIGHDMA;
 711#endif /* ADDR_64BITS */
 712
 713        /* Serial EEPROM reads are hidden by the hardware. */
 714        for (i = 0; i < 6; i++)
 715                dev->dev_addr[i] = readb(base + EEPROMCtrl + 20 - i);
 716
 717#if ! defined(final_version) /* Dump the EEPROM contents during development. */
 718        if (debug > 4)
 719                for (i = 0; i < 0x20; i++)
 720                        printk("%2.2x%s",
 721                               (unsigned int)readb(base + EEPROMCtrl + i),
 722                               i % 16 != 15 ? " " : "\n");
 723#endif
 724
 725        /* Issue soft reset */
 726        writel(MiiSoftReset, base + TxMode);
 727        udelay(1000);
 728        writel(0, base + TxMode);
 729
 730        /* Reset the chip to erase previous misconfiguration. */
 731        writel(1, base + PCIDeviceConfig);
 732        boguscnt = 1000;
 733        while (--boguscnt > 0) {
 734                udelay(10);
 735                if ((readl(base + PCIDeviceConfig) & 1) == 0)
 736                        break;
 737        }
 738        if (boguscnt == 0)
 739                printk("%s: chipset reset never completed!\n", dev->name);
 740        /* wait a little longer */
 741        udelay(1000);
 742
 743        np = netdev_priv(dev);
 744        np->dev = dev;
 745        np->base = base;
 746        spin_lock_init(&np->lock);
 747        pci_set_drvdata(pdev, dev);
 748
 749        np->pci_dev = pdev;
 750
 751        np->mii_if.dev = dev;
 752        np->mii_if.mdio_read = mdio_read;
 753        np->mii_if.mdio_write = mdio_write;
 754        np->mii_if.phy_id_mask = 0x1f;
 755        np->mii_if.reg_num_mask = 0x1f;
 756
 757        drv_flags = netdrv_tbl[chip_idx].drv_flags;
 758
 759        np->speed100 = 1;
 760
 761        /* timer resolution is 128 * 0.8us */
 762        np->intr_timer_ctrl = (((intr_latency * 10) / 1024) & IntrLatencyMask) |
 763                Timer10X | EnableIntrMasking;
 764
 765        if (small_frames > 0) {
 766                np->intr_timer_ctrl |= SmallFrameBypass;
 767                switch (small_frames) {
 768                case 1 ... 64:
 769                        np->intr_timer_ctrl |= SmallFrame64;
 770                        break;
 771                case 65 ... 128:
 772                        np->intr_timer_ctrl |= SmallFrame128;
 773                        break;
 774                case 129 ... 256:
 775                        np->intr_timer_ctrl |= SmallFrame256;
 776                        break;
 777                default:
 778                        np->intr_timer_ctrl |= SmallFrame512;
 779                        if (small_frames > 512)
 780                                printk("Adjusting small_frames down to 512\n");
 781                        break;
 782                }
 783        }
 784
 785        dev->netdev_ops = &netdev_ops;
 786        dev->watchdog_timeo = TX_TIMEOUT;
 787        SET_ETHTOOL_OPS(dev, &ethtool_ops);
 788
 789        netif_napi_add(dev, &np->napi, netdev_poll, max_interrupt_work);
 790
 791        if (mtu)
 792                dev->mtu = mtu;
 793
 794        if (register_netdev(dev))
 795                goto err_out_cleardev;
 796
 797        printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n",
 798               dev->name, netdrv_tbl[chip_idx].name, base,
 799               dev->dev_addr, irq);
 800
 801        if (drv_flags & CanHaveMII) {
 802                int phy, phy_idx = 0;
 803                int mii_status;
 804                for (phy = 0; phy < 32 && phy_idx < PHY_CNT; phy++) {
 805                        mdio_write(dev, phy, MII_BMCR, BMCR_RESET);
 806                        mdelay(100);
 807                        boguscnt = 1000;
 808                        while (--boguscnt > 0)
 809                                if ((mdio_read(dev, phy, MII_BMCR) & BMCR_RESET) == 0)
 810                                        break;
 811                        if (boguscnt == 0) {
 812                                printk("%s: PHY#%d reset never completed!\n", dev->name, phy);
 813                                continue;
 814                        }
 815                        mii_status = mdio_read(dev, phy, MII_BMSR);
 816                        if (mii_status != 0) {
 817                                np->phys[phy_idx++] = phy;
 818                                np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE);
 819                                printk(KERN_INFO "%s: MII PHY found at address %d, status "
 820                                           "%#4.4x advertising %#4.4x.\n",
 821                                           dev->name, phy, mii_status, np->mii_if.advertising);
 822                                /* there can be only one PHY on-board */
 823                                break;
 824                        }
 825                }
 826                np->phy_cnt = phy_idx;
 827                if (np->phy_cnt > 0)
 828                        np->mii_if.phy_id = np->phys[0];
 829                else
 830                        memset(&np->mii_if, 0, sizeof(np->mii_if));
 831        }
 832
 833        printk(KERN_INFO "%s: scatter-gather and hardware TCP cksumming %s.\n",
 834               dev->name, enable_hw_cksum ? "enabled" : "disabled");
 835        return 0;
 836
 837err_out_cleardev:
 838        pci_set_drvdata(pdev, NULL);
 839        iounmap(base);
 840err_out_free_res:
 841        pci_release_regions (pdev);
 842err_out_free_netdev:
 843        free_netdev(dev);
 844        return -ENODEV;
 845}
 846
 847
 848/* Read the MII Management Data I/O (MDIO) interfaces. */
 849static int mdio_read(struct net_device *dev, int phy_id, int location)
 850{
 851        struct netdev_private *np = netdev_priv(dev);
 852        void __iomem *mdio_addr = np->base + MIICtrl + (phy_id<<7) + (location<<2);
 853        int result, boguscnt=1000;
 854        /* ??? Should we add a busy-wait here? */
 855        do {
 856                result = readl(mdio_addr);
 857        } while ((result & 0xC0000000) != 0x80000000 && --boguscnt > 0);
 858        if (boguscnt == 0)
 859                return 0;
 860        if ((result & 0xffff) == 0xffff)
 861                return 0;
 862        return result & 0xffff;
 863}
 864
 865
 866static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
 867{
 868        struct netdev_private *np = netdev_priv(dev);
 869        void __iomem *mdio_addr = np->base + MIICtrl + (phy_id<<7) + (location<<2);
 870        writel(value, mdio_addr);
 871        /* The busy-wait will occur before a read. */
 872}
 873
 874
 875static int netdev_open(struct net_device *dev)
 876{
 877        const struct firmware *fw_rx, *fw_tx;
 878        const __be32 *fw_rx_data, *fw_tx_data;
 879        struct netdev_private *np = netdev_priv(dev);
 880        void __iomem *ioaddr = np->base;
 881        const int irq = np->pci_dev->irq;
 882        int i, retval;
 883        size_t tx_size, rx_size;
 884        size_t tx_done_q_size, rx_done_q_size, tx_ring_size, rx_ring_size;
 885
 886        /* Do we ever need to reset the chip??? */
 887
 888        retval = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
 889        if (retval)
 890                return retval;
 891
 892        /* Disable the Rx and Tx, and reset the chip. */
 893        writel(0, ioaddr + GenCtrl);
 894        writel(1, ioaddr + PCIDeviceConfig);
 895        if (debug > 1)
 896                printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
 897                       dev->name, irq);
 898
 899        /* Allocate the various queues. */
 900        if (!np->queue_mem) {
 901                tx_done_q_size = ((sizeof(struct tx_done_desc) * DONE_Q_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN;
 902                rx_done_q_size = ((sizeof(rx_done_desc) * DONE_Q_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN;
 903                tx_ring_size = ((sizeof(starfire_tx_desc) * TX_RING_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN;
 904                rx_ring_size = sizeof(struct starfire_rx_desc) * RX_RING_SIZE;
 905                np->queue_mem_size = tx_done_q_size + rx_done_q_size + tx_ring_size + rx_ring_size;
 906                np->queue_mem = pci_alloc_consistent(np->pci_dev, np->queue_mem_size, &np->queue_mem_dma);
 907                if (np->queue_mem == NULL) {
 908                        free_irq(irq, dev);
 909                        return -ENOMEM;
 910                }
 911
 912                np->tx_done_q     = np->queue_mem;
 913                np->tx_done_q_dma = np->queue_mem_dma;
 914                np->rx_done_q     = (void *) np->tx_done_q + tx_done_q_size;
 915                np->rx_done_q_dma = np->tx_done_q_dma + tx_done_q_size;
 916                np->tx_ring       = (void *) np->rx_done_q + rx_done_q_size;
 917                np->tx_ring_dma   = np->rx_done_q_dma + rx_done_q_size;
 918                np->rx_ring       = (void *) np->tx_ring + tx_ring_size;
 919                np->rx_ring_dma   = np->tx_ring_dma + tx_ring_size;
 920        }
 921
 922        /* Start with no carrier, it gets adjusted later */
 923        netif_carrier_off(dev);
 924        init_ring(dev);
 925        /* Set the size of the Rx buffers. */
 926        writel((np->rx_buf_sz << RxBufferLenShift) |
 927               (0 << RxMinDescrThreshShift) |
 928               RxPrefetchMode | RxVariableQ |
 929               RX_Q_ENTRIES |
 930               RX_DESC_Q_ADDR_SIZE | RX_DESC_ADDR_SIZE |
 931               RxDescSpace4,
 932               ioaddr + RxDescQCtrl);
 933
 934        /* Set up the Rx DMA controller. */
 935        writel(RxChecksumIgnore |
 936               (0 << RxEarlyIntThreshShift) |
 937               (6 << RxHighPrioThreshShift) |
 938               ((DMA_BURST_SIZE / 32) << RxBurstSizeShift),
 939               ioaddr + RxDMACtrl);
 940
 941        /* Set Tx descriptor */
 942        writel((2 << TxHiPriFIFOThreshShift) |
 943               (0 << TxPadLenShift) |
 944               ((DMA_BURST_SIZE / 32) << TxDMABurstSizeShift) |
 945               TX_DESC_Q_ADDR_SIZE |
 946               TX_DESC_SPACING | TX_DESC_TYPE,
 947               ioaddr + TxDescCtrl);
 948
 949        writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + RxDescQHiAddr);
 950        writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + TxRingHiAddr);
 951        writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + CompletionHiAddr);
 952        writel(np->rx_ring_dma, ioaddr + RxDescQAddr);
 953        writel(np->tx_ring_dma, ioaddr + TxRingPtr);
 954
 955        writel(np->tx_done_q_dma, ioaddr + TxCompletionAddr);
 956        writel(np->rx_done_q_dma |
 957               RxComplType |
 958               (0 << RxComplThreshShift),
 959               ioaddr + RxCompletionAddr);
 960
 961        if (debug > 1)
 962                printk(KERN_DEBUG "%s: Filling in the station address.\n", dev->name);
 963
 964        /* Fill both the Tx SA register and the Rx perfect filter. */
 965        for (i = 0; i < 6; i++)
 966                writeb(dev->dev_addr[i], ioaddr + TxStationAddr + 5 - i);
 967        /* The first entry is special because it bypasses the VLAN filter.
 968           Don't use it. */
 969        writew(0, ioaddr + PerfFilterTable);
 970        writew(0, ioaddr + PerfFilterTable + 4);
 971        writew(0, ioaddr + PerfFilterTable + 8);
 972        for (i = 1; i < 16; i++) {
 973                __be16 *eaddrs = (__be16 *)dev->dev_addr;
 974                void __iomem *setup_frm = ioaddr + PerfFilterTable + i * 16;
 975                writew(be16_to_cpu(eaddrs[2]), setup_frm); setup_frm += 4;
 976                writew(be16_to_cpu(eaddrs[1]), setup_frm); setup_frm += 4;
 977                writew(be16_to_cpu(eaddrs[0]), setup_frm); setup_frm += 8;
 978        }
 979
 980        /* Initialize other registers. */
 981        /* Configure the PCI bus bursts and FIFO thresholds. */
 982        np->tx_mode = TxFlowEnable|RxFlowEnable|PadEnable;      /* modified when link is up. */
 983        writel(MiiSoftReset | np->tx_mode, ioaddr + TxMode);
 984        udelay(1000);
 985        writel(np->tx_mode, ioaddr + TxMode);
 986        np->tx_threshold = 4;
 987        writel(np->tx_threshold, ioaddr + TxThreshold);
 988
 989        writel(np->intr_timer_ctrl, ioaddr + IntrTimerCtrl);
 990
 991        napi_enable(&np->napi);
 992
 993        netif_start_queue(dev);
 994
 995        if (debug > 1)
 996                printk(KERN_DEBUG "%s: Setting the Rx and Tx modes.\n", dev->name);
 997        set_rx_mode(dev);
 998
 999        np->mii_if.advertising = mdio_read(dev, np->phys[0], MII_ADVERTISE);
1000        check_duplex(dev);
1001
1002        /* Enable GPIO interrupts on link change */
1003        writel(0x0f00ff00, ioaddr + GPIOCtrl);
1004
1005        /* Set the interrupt mask */
1006        writel(IntrRxDone | IntrRxEmpty | IntrDMAErr |
1007               IntrTxDMADone | IntrStatsMax | IntrLinkChange |
1008               IntrRxGFPDead | IntrNoTxCsum | IntrTxBadID,
1009               ioaddr + IntrEnable);
1010        /* Enable PCI interrupts. */
1011        writel(0x00800000 | readl(ioaddr + PCIDeviceConfig),
1012               ioaddr + PCIDeviceConfig);
1013
1014#ifdef VLAN_SUPPORT
1015        /* Set VLAN type to 802.1q */
1016        writel(ETH_P_8021Q, ioaddr + VlanType);
1017#endif /* VLAN_SUPPORT */
1018
1019        retval = request_firmware(&fw_rx, FIRMWARE_RX, &np->pci_dev->dev);
1020        if (retval) {
1021                printk(KERN_ERR "starfire: Failed to load firmware \"%s\"\n",
1022                       FIRMWARE_RX);
1023                goto out_init;
1024        }
1025        if (fw_rx->size % 4) {
1026                printk(KERN_ERR "starfire: bogus length %zu in \"%s\"\n",
1027                       fw_rx->size, FIRMWARE_RX);
1028                retval = -EINVAL;
1029                goto out_rx;
1030        }
1031        retval = request_firmware(&fw_tx, FIRMWARE_TX, &np->pci_dev->dev);
1032        if (retval) {
1033                printk(KERN_ERR "starfire: Failed to load firmware \"%s\"\n",
1034                       FIRMWARE_TX);
1035                goto out_rx;
1036        }
1037        if (fw_tx->size % 4) {
1038                printk(KERN_ERR "starfire: bogus length %zu in \"%s\"\n",
1039                       fw_tx->size, FIRMWARE_TX);
1040                retval = -EINVAL;
1041                goto out_tx;
1042        }
1043        fw_rx_data = (const __be32 *)&fw_rx->data[0];
1044        fw_tx_data = (const __be32 *)&fw_tx->data[0];
1045        rx_size = fw_rx->size / 4;
1046        tx_size = fw_tx->size / 4;
1047
1048        /* Load Rx/Tx firmware into the frame processors */
1049        for (i = 0; i < rx_size; i++)
1050                writel(be32_to_cpup(&fw_rx_data[i]), ioaddr + RxGfpMem + i * 4);
1051        for (i = 0; i < tx_size; i++)
1052                writel(be32_to_cpup(&fw_tx_data[i]), ioaddr + TxGfpMem + i * 4);
1053        if (enable_hw_cksum)
1054                /* Enable the Rx and Tx units, and the Rx/Tx frame processors. */
1055                writel(TxEnable|TxGFPEnable|RxEnable|RxGFPEnable, ioaddr + GenCtrl);
1056        else
1057                /* Enable the Rx and Tx units only. */
1058                writel(TxEnable|RxEnable, ioaddr + GenCtrl);
1059
1060        if (debug > 1)
1061                printk(KERN_DEBUG "%s: Done netdev_open().\n",
1062                       dev->name);
1063
1064out_tx:
1065        release_firmware(fw_tx);
1066out_rx:
1067        release_firmware(fw_rx);
1068out_init:
1069        if (retval)
1070                netdev_close(dev);
1071        return retval;
1072}
1073
1074
1075static void check_duplex(struct net_device *dev)
1076{
1077        struct netdev_private *np = netdev_priv(dev);
1078        u16 reg0;
1079        int silly_count = 1000;
1080
1081        mdio_write(dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising);
1082        mdio_write(dev, np->phys[0], MII_BMCR, BMCR_RESET);
1083        udelay(500);
1084        while (--silly_count && mdio_read(dev, np->phys[0], MII_BMCR) & BMCR_RESET)
1085                /* do nothing */;
1086        if (!silly_count) {
1087                printk("%s: MII reset failed!\n", dev->name);
1088                return;
1089        }
1090
1091        reg0 = mdio_read(dev, np->phys[0], MII_BMCR);
1092
1093        if (!np->mii_if.force_media) {
1094                reg0 |= BMCR_ANENABLE | BMCR_ANRESTART;
1095        } else {
1096                reg0 &= ~(BMCR_ANENABLE | BMCR_ANRESTART);
1097                if (np->speed100)
1098                        reg0 |= BMCR_SPEED100;
1099                if (np->mii_if.full_duplex)
1100                        reg0 |= BMCR_FULLDPLX;
1101                printk(KERN_DEBUG "%s: Link forced to %sMbit %s-duplex\n",
1102                       dev->name,
1103                       np->speed100 ? "100" : "10",
1104                       np->mii_if.full_duplex ? "full" : "half");
1105        }
1106        mdio_write(dev, np->phys[0], MII_BMCR, reg0);
1107}
1108
1109
1110static void tx_timeout(struct net_device *dev)
1111{
1112        struct netdev_private *np = netdev_priv(dev);
1113        void __iomem *ioaddr = np->base;
1114        int old_debug;
1115
1116        printk(KERN_WARNING "%s: Transmit timed out, status %#8.8x, "
1117               "resetting...\n", dev->name, (int) readl(ioaddr + IntrStatus));
1118
1119        /* Perhaps we should reinitialize the hardware here. */
1120
1121        /*
1122         * Stop and restart the interface.
1123         * Cheat and increase the debug level temporarily.
1124         */
1125        old_debug = debug;
1126        debug = 2;
1127        netdev_close(dev);
1128        netdev_open(dev);
1129        debug = old_debug;
1130
1131        /* Trigger an immediate transmit demand. */
1132
1133        dev->trans_start = jiffies; /* prevent tx timeout */
1134        dev->stats.tx_errors++;
1135        netif_wake_queue(dev);
1136}
1137
1138
1139/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1140static void init_ring(struct net_device *dev)
1141{
1142        struct netdev_private *np = netdev_priv(dev);
1143        int i;
1144
1145        np->cur_rx = np->cur_tx = np->reap_tx = 0;
1146        np->dirty_rx = np->dirty_tx = np->rx_done = np->tx_done = 0;
1147
1148        np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1149
1150        /* Fill in the Rx buffers.  Handle allocation failure gracefully. */
1151        for (i = 0; i < RX_RING_SIZE; i++) {
1152                struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz);
1153                np->rx_info[i].skb = skb;
1154                if (skb == NULL)
1155                        break;
1156                np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1157                /* Grrr, we cannot offset to correctly align the IP header. */
1158                np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid);
1159        }
1160        writew(i - 1, np->base + RxDescQIdx);
1161        np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1162
1163        /* Clear the remainder of the Rx buffer ring. */
1164        for (  ; i < RX_RING_SIZE; i++) {
1165                np->rx_ring[i].rxaddr = 0;
1166                np->rx_info[i].skb = NULL;
1167                np->rx_info[i].mapping = 0;
1168        }
1169        /* Mark the last entry as wrapping the ring. */
1170        np->rx_ring[RX_RING_SIZE - 1].rxaddr |= cpu_to_dma(RxDescEndRing);
1171
1172        /* Clear the completion rings. */
1173        for (i = 0; i < DONE_Q_SIZE; i++) {
1174                np->rx_done_q[i].status = 0;
1175                np->tx_done_q[i].status = 0;
1176        }
1177
1178        for (i = 0; i < TX_RING_SIZE; i++)
1179                memset(&np->tx_info[i], 0, sizeof(np->tx_info[i]));
1180}
1181
1182
1183static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
1184{
1185        struct netdev_private *np = netdev_priv(dev);
1186        unsigned int entry;
1187        u32 status;
1188        int i;
1189
1190        /*
1191         * be cautious here, wrapping the queue has weird semantics
1192         * and we may not have enough slots even when it seems we do.
1193         */
1194        if ((np->cur_tx - np->dirty_tx) + skb_num_frags(skb) * 2 > TX_RING_SIZE) {
1195                netif_stop_queue(dev);
1196                return NETDEV_TX_BUSY;
1197        }
1198
1199#if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
1200        if (skb->ip_summed == CHECKSUM_PARTIAL) {
1201                if (skb_padto(skb, (skb->len + PADDING_MASK) & ~PADDING_MASK))
1202                        return NETDEV_TX_OK;
1203        }
1204#endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */
1205
1206        entry = np->cur_tx % TX_RING_SIZE;
1207        for (i = 0; i < skb_num_frags(skb); i++) {
1208                int wrap_ring = 0;
1209                status = TxDescID;
1210
1211                if (i == 0) {
1212                        np->tx_info[entry].skb = skb;
1213                        status |= TxCRCEn;
1214                        if (entry >= TX_RING_SIZE - skb_num_frags(skb)) {
1215                                status |= TxRingWrap;
1216                                wrap_ring = 1;
1217                        }
1218                        if (np->reap_tx) {
1219                                status |= TxDescIntr;
1220                                np->reap_tx = 0;
1221                        }
1222                        if (skb->ip_summed == CHECKSUM_PARTIAL) {
1223                                status |= TxCalTCP;
1224                                dev->stats.tx_compressed++;
1225                        }
1226                        status |= skb_first_frag_len(skb) | (skb_num_frags(skb) << 16);
1227
1228                        np->tx_info[entry].mapping =
1229                                pci_map_single(np->pci_dev, skb->data, skb_first_frag_len(skb), PCI_DMA_TODEVICE);
1230                } else {
1231                        const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[i - 1];
1232                        status |= skb_frag_size(this_frag);
1233                        np->tx_info[entry].mapping =
1234                                pci_map_single(np->pci_dev,
1235                                               skb_frag_address(this_frag),
1236                                               skb_frag_size(this_frag),
1237                                               PCI_DMA_TODEVICE);
1238                }
1239
1240                np->tx_ring[entry].addr = cpu_to_dma(np->tx_info[entry].mapping);
1241                np->tx_ring[entry].status = cpu_to_le32(status);
1242                if (debug > 3)
1243                        printk(KERN_DEBUG "%s: Tx #%d/#%d slot %d status %#8.8x.\n",
1244                               dev->name, np->cur_tx, np->dirty_tx,
1245                               entry, status);
1246                if (wrap_ring) {
1247                        np->tx_info[entry].used_slots = TX_RING_SIZE - entry;
1248                        np->cur_tx += np->tx_info[entry].used_slots;
1249                        entry = 0;
1250                } else {
1251                        np->tx_info[entry].used_slots = 1;
1252                        np->cur_tx += np->tx_info[entry].used_slots;
1253                        entry++;
1254                }
1255                /* scavenge the tx descriptors twice per TX_RING_SIZE */
1256                if (np->cur_tx % (TX_RING_SIZE / 2) == 0)
1257                        np->reap_tx = 1;
1258        }
1259
1260        /* Non-x86: explicitly flush descriptor cache lines here. */
1261        /* Ensure all descriptors are written back before the transmit is
1262           initiated. - Jes */
1263        wmb();
1264
1265        /* Update the producer index. */
1266        writel(entry * (sizeof(starfire_tx_desc) / 8), np->base + TxProducerIdx);
1267
1268        /* 4 is arbitrary, but should be ok */
1269        if ((np->cur_tx - np->dirty_tx) + 4 > TX_RING_SIZE)
1270                netif_stop_queue(dev);
1271
1272        return NETDEV_TX_OK;
1273}
1274
1275
1276/* The interrupt handler does all of the Rx thread work and cleans up
1277   after the Tx thread. */
1278static irqreturn_t intr_handler(int irq, void *dev_instance)
1279{
1280        struct net_device *dev = dev_instance;
1281        struct netdev_private *np = netdev_priv(dev);
1282        void __iomem *ioaddr = np->base;
1283        int boguscnt = max_interrupt_work;
1284        int consumer;
1285        int tx_status;
1286        int handled = 0;
1287
1288        do {
1289                u32 intr_status = readl(ioaddr + IntrClear);
1290
1291                if (debug > 4)
1292                        printk(KERN_DEBUG "%s: Interrupt status %#8.8x.\n",
1293                               dev->name, intr_status);
1294
1295                if (intr_status == 0 || intr_status == (u32) -1)
1296                        break;
1297
1298                handled = 1;
1299
1300                if (intr_status & (IntrRxDone | IntrRxEmpty)) {
1301                        u32 enable;
1302
1303                        if (likely(napi_schedule_prep(&np->napi))) {
1304                                __napi_schedule(&np->napi);
1305                                enable = readl(ioaddr + IntrEnable);
1306                                enable &= ~(IntrRxDone | IntrRxEmpty);
1307                                writel(enable, ioaddr + IntrEnable);
1308                                /* flush PCI posting buffers */
1309                                readl(ioaddr + IntrEnable);
1310                        } else {
1311                                /* Paranoia check */
1312                                enable = readl(ioaddr + IntrEnable);
1313                                if (enable & (IntrRxDone | IntrRxEmpty)) {
1314                                        printk(KERN_INFO
1315                                               "%s: interrupt while in poll!\n",
1316                                               dev->name);
1317                                        enable &= ~(IntrRxDone | IntrRxEmpty);
1318                                        writel(enable, ioaddr + IntrEnable);
1319                                }
1320                        }
1321                }
1322
1323                /* Scavenge the skbuff list based on the Tx-done queue.
1324                   There are redundant checks here that may be cleaned up
1325                   after the driver has proven to be reliable. */
1326                consumer = readl(ioaddr + TxConsumerIdx);
1327                if (debug > 3)
1328                        printk(KERN_DEBUG "%s: Tx Consumer index is %d.\n",
1329                               dev->name, consumer);
1330
1331                while ((tx_status = le32_to_cpu(np->tx_done_q[np->tx_done].status)) != 0) {
1332                        if (debug > 3)
1333                                printk(KERN_DEBUG "%s: Tx completion #%d entry %d is %#8.8x.\n",
1334                                       dev->name, np->dirty_tx, np->tx_done, tx_status);
1335                        if ((tx_status & 0xe0000000) == 0xa0000000) {
1336                                dev->stats.tx_packets++;
1337                        } else if ((tx_status & 0xe0000000) == 0x80000000) {
1338                                u16 entry = (tx_status & 0x7fff) / sizeof(starfire_tx_desc);
1339                                struct sk_buff *skb = np->tx_info[entry].skb;
1340                                np->tx_info[entry].skb = NULL;
1341                                pci_unmap_single(np->pci_dev,
1342                                                 np->tx_info[entry].mapping,
1343                                                 skb_first_frag_len(skb),
1344                                                 PCI_DMA_TODEVICE);
1345                                np->tx_info[entry].mapping = 0;
1346                                np->dirty_tx += np->tx_info[entry].used_slots;
1347                                entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE;
1348                                {
1349                                        int i;
1350                                        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1351                                                pci_unmap_single(np->pci_dev,
1352                                                                 np->tx_info[entry].mapping,
1353                                                                 skb_frag_size(&skb_shinfo(skb)->frags[i]),
1354                                                                 PCI_DMA_TODEVICE);
1355                                                np->dirty_tx++;
1356                                                entry++;
1357                                        }
1358                                }
1359
1360                                dev_kfree_skb_irq(skb);
1361                        }
1362                        np->tx_done_q[np->tx_done].status = 0;
1363                        np->tx_done = (np->tx_done + 1) % DONE_Q_SIZE;
1364                }
1365                writew(np->tx_done, ioaddr + CompletionQConsumerIdx + 2);
1366
1367                if (netif_queue_stopped(dev) &&
1368                    (np->cur_tx - np->dirty_tx + 4 < TX_RING_SIZE)) {
1369                        /* The ring is no longer full, wake the queue. */
1370                        netif_wake_queue(dev);
1371                }
1372
1373                /* Stats overflow */
1374                if (intr_status & IntrStatsMax)
1375                        get_stats(dev);
1376
1377                /* Media change interrupt. */
1378                if (intr_status & IntrLinkChange)
1379                        netdev_media_change(dev);
1380
1381                /* Abnormal error summary/uncommon events handlers. */
1382                if (intr_status & IntrAbnormalSummary)
1383                        netdev_error(dev, intr_status);
1384
1385                if (--boguscnt < 0) {
1386                        if (debug > 1)
1387                                printk(KERN_WARNING "%s: Too much work at interrupt, "
1388                                       "status=%#8.8x.\n",
1389                                       dev->name, intr_status);
1390                        break;
1391                }
1392        } while (1);
1393
1394        if (debug > 4)
1395                printk(KERN_DEBUG "%s: exiting interrupt, status=%#8.8x.\n",
1396                       dev->name, (int) readl(ioaddr + IntrStatus));
1397        return IRQ_RETVAL(handled);
1398}
1399
1400
1401/*
1402 * This routine is logically part of the interrupt/poll handler, but separated
1403 * for clarity and better register allocation.
1404 */
1405static int __netdev_rx(struct net_device *dev, int *quota)
1406{
1407        struct netdev_private *np = netdev_priv(dev);
1408        u32 desc_status;
1409        int retcode = 0;
1410
1411        /* If EOP is set on the next entry, it's a new packet. Send it up. */
1412        while ((desc_status = le32_to_cpu(np->rx_done_q[np->rx_done].status)) != 0) {
1413                struct sk_buff *skb;
1414                u16 pkt_len;
1415                int entry;
1416                rx_done_desc *desc = &np->rx_done_q[np->rx_done];
1417
1418                if (debug > 4)
1419                        printk(KERN_DEBUG "  netdev_rx() status of %d was %#8.8x.\n", np->rx_done, desc_status);
1420                if (!(desc_status & RxOK)) {
1421                        /* There was an error. */
1422                        if (debug > 2)
1423                                printk(KERN_DEBUG "  netdev_rx() Rx error was %#8.8x.\n", desc_status);
1424                        dev->stats.rx_errors++;
1425                        if (desc_status & RxFIFOErr)
1426                                dev->stats.rx_fifo_errors++;
1427                        goto next_rx;
1428                }
1429
1430                if (*quota <= 0) {      /* out of rx quota */
1431                        retcode = 1;
1432                        goto out;
1433                }
1434                (*quota)--;
1435
1436                pkt_len = desc_status;  /* Implicitly Truncate */
1437                entry = (desc_status >> 16) & 0x7ff;
1438
1439                if (debug > 4)
1440                        printk(KERN_DEBUG "  netdev_rx() normal Rx pkt length %d, quota %d.\n", pkt_len, *quota);
1441                /* Check if the packet is long enough to accept without copying
1442                   to a minimally-sized skbuff. */
1443                if (pkt_len < rx_copybreak &&
1444                    (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
1445                        skb_reserve(skb, 2);    /* 16 byte align the IP header */
1446                        pci_dma_sync_single_for_cpu(np->pci_dev,
1447                                                    np->rx_info[entry].mapping,
1448                                                    pkt_len, PCI_DMA_FROMDEVICE);
1449                        skb_copy_to_linear_data(skb, np->rx_info[entry].skb->data, pkt_len);
1450                        pci_dma_sync_single_for_device(np->pci_dev,
1451                                                       np->rx_info[entry].mapping,
1452                                                       pkt_len, PCI_DMA_FROMDEVICE);
1453                        skb_put(skb, pkt_len);
1454                } else {
1455                        pci_unmap_single(np->pci_dev, np->rx_info[entry].mapping, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1456                        skb = np->rx_info[entry].skb;
1457                        skb_put(skb, pkt_len);
1458                        np->rx_info[entry].skb = NULL;
1459                        np->rx_info[entry].mapping = 0;
1460                }
1461#ifndef final_version                   /* Remove after testing. */
1462                /* You will want this info for the initial debug. */
1463                if (debug > 5) {
1464                        printk(KERN_DEBUG "  Rx data %pM %pM %2.2x%2.2x.\n",
1465                               skb->data, skb->data + 6,
1466                               skb->data[12], skb->data[13]);
1467                }
1468#endif
1469
1470                skb->protocol = eth_type_trans(skb, dev);
1471#ifdef VLAN_SUPPORT
1472                if (debug > 4)
1473                        printk(KERN_DEBUG "  netdev_rx() status2 of %d was %#4.4x.\n", np->rx_done, le16_to_cpu(desc->status2));
1474#endif
1475                if (le16_to_cpu(desc->status2) & 0x0100) {
1476                        skb->ip_summed = CHECKSUM_UNNECESSARY;
1477                        dev->stats.rx_compressed++;
1478                }
1479                /*
1480                 * This feature doesn't seem to be working, at least
1481                 * with the two firmware versions I have. If the GFP sees
1482                 * an IP fragment, it either ignores it completely, or reports
1483                 * "bad checksum" on it.
1484                 *
1485                 * Maybe I missed something -- corrections are welcome.
1486                 * Until then, the printk stays. :-) -Ion
1487                 */
1488                else if (le16_to_cpu(desc->status2) & 0x0040) {
1489                        skb->ip_summed = CHECKSUM_COMPLETE;
1490                        skb->csum = le16_to_cpu(desc->csum);
1491                        printk(KERN_DEBUG "%s: checksum_hw, status2 = %#x\n", dev->name, le16_to_cpu(desc->status2));
1492                }
1493#ifdef VLAN_SUPPORT
1494                if (le16_to_cpu(desc->status2) & 0x0200) {
1495                        u16 vlid = le16_to_cpu(desc->vlanid);
1496
1497                        if (debug > 4) {
1498                                printk(KERN_DEBUG "  netdev_rx() vlanid = %d\n",
1499                                       vlid);
1500                        }
1501                        __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlid);
1502                }
1503#endif /* VLAN_SUPPORT */
1504                netif_receive_skb(skb);
1505                dev->stats.rx_packets++;
1506
1507        next_rx:
1508                np->cur_rx++;
1509                desc->status = 0;
1510                np->rx_done = (np->rx_done + 1) % DONE_Q_SIZE;
1511        }
1512
1513        if (*quota == 0) {      /* out of rx quota */
1514                retcode = 1;
1515                goto out;
1516        }
1517        writew(np->rx_done, np->base + CompletionQConsumerIdx);
1518
1519 out:
1520        refill_rx_ring(dev);
1521        if (debug > 5)
1522                printk(KERN_DEBUG "  exiting netdev_rx(): %d, status of %d was %#8.8x.\n",
1523                       retcode, np->rx_done, desc_status);
1524        return retcode;
1525}
1526
1527static int netdev_poll(struct napi_struct *napi, int budget)
1528{
1529        struct netdev_private *np = container_of(napi, struct netdev_private, napi);
1530        struct net_device *dev = np->dev;
1531        u32 intr_status;
1532        void __iomem *ioaddr = np->base;
1533        int quota = budget;
1534
1535        do {
1536                writel(IntrRxDone | IntrRxEmpty, ioaddr + IntrClear);
1537
1538                if (__netdev_rx(dev, &quota))
1539                        goto out;
1540
1541                intr_status = readl(ioaddr + IntrStatus);
1542        } while (intr_status & (IntrRxDone | IntrRxEmpty));
1543
1544        napi_complete(napi);
1545        intr_status = readl(ioaddr + IntrEnable);
1546        intr_status |= IntrRxDone | IntrRxEmpty;
1547        writel(intr_status, ioaddr + IntrEnable);
1548
1549 out:
1550        if (debug > 5)
1551                printk(KERN_DEBUG "  exiting netdev_poll(): %d.\n",
1552                       budget - quota);
1553
1554        /* Restart Rx engine if stopped. */
1555        return budget - quota;
1556}
1557
1558static void refill_rx_ring(struct net_device *dev)
1559{
1560        struct netdev_private *np = netdev_priv(dev);
1561        struct sk_buff *skb;
1562        int entry = -1;
1563
1564        /* Refill the Rx ring buffers. */
1565        for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
1566                entry = np->dirty_rx % RX_RING_SIZE;
1567                if (np->rx_info[entry].skb == NULL) {
1568                        skb = netdev_alloc_skb(dev, np->rx_buf_sz);
1569                        np->rx_info[entry].skb = skb;
1570                        if (skb == NULL)
1571                                break;  /* Better luck next round. */
1572                        np->rx_info[entry].mapping =
1573                                pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1574                        np->rx_ring[entry].rxaddr =
1575                                cpu_to_dma(np->rx_info[entry].mapping | RxDescValid);
1576                }
1577                if (entry == RX_RING_SIZE - 1)
1578                        np->rx_ring[entry].rxaddr |= cpu_to_dma(RxDescEndRing);
1579        }
1580        if (entry >= 0)
1581                writew(entry, np->base + RxDescQIdx);
1582}
1583
1584
1585static void netdev_media_change(struct net_device *dev)
1586{
1587        struct netdev_private *np = netdev_priv(dev);
1588        void __iomem *ioaddr = np->base;
1589        u16 reg0, reg1, reg4, reg5;
1590        u32 new_tx_mode;
1591        u32 new_intr_timer_ctrl;
1592
1593        /* reset status first */
1594        mdio_read(dev, np->phys[0], MII_BMCR);
1595        mdio_read(dev, np->phys[0], MII_BMSR);
1596
1597        reg0 = mdio_read(dev, np->phys[0], MII_BMCR);
1598        reg1 = mdio_read(dev, np->phys[0], MII_BMSR);
1599
1600        if (reg1 & BMSR_LSTATUS) {
1601                /* link is up */
1602                if (reg0 & BMCR_ANENABLE) {
1603                        /* autonegotiation is enabled */
1604                        reg4 = mdio_read(dev, np->phys[0], MII_ADVERTISE);
1605                        reg5 = mdio_read(dev, np->phys[0], MII_LPA);
1606                        if (reg4 & ADVERTISE_100FULL && reg5 & LPA_100FULL) {
1607                                np->speed100 = 1;
1608                                np->mii_if.full_duplex = 1;
1609                        } else if (reg4 & ADVERTISE_100HALF && reg5 & LPA_100HALF) {
1610                                np->speed100 = 1;
1611                                np->mii_if.full_duplex = 0;
1612                        } else if (reg4 & ADVERTISE_10FULL && reg5 & LPA_10FULL) {
1613                                np->speed100 = 0;
1614                                np->mii_if.full_duplex = 1;
1615                        } else {
1616                                np->speed100 = 0;
1617                                np->mii_if.full_duplex = 0;
1618                        }
1619                } else {
1620                        /* autonegotiation is disabled */
1621                        if (reg0 & BMCR_SPEED100)
1622                                np->speed100 = 1;
1623                        else
1624                                np->speed100 = 0;
1625                        if (reg0 & BMCR_FULLDPLX)
1626                                np->mii_if.full_duplex = 1;
1627                        else
1628                                np->mii_if.full_duplex = 0;
1629                }
1630                netif_carrier_on(dev);
1631                printk(KERN_DEBUG "%s: Link is up, running at %sMbit %s-duplex\n",
1632                       dev->name,
1633                       np->speed100 ? "100" : "10",
1634                       np->mii_if.full_duplex ? "full" : "half");
1635
1636                new_tx_mode = np->tx_mode & ~FullDuplex;        /* duplex setting */
1637                if (np->mii_if.full_duplex)
1638                        new_tx_mode |= FullDuplex;
1639                if (np->tx_mode != new_tx_mode) {
1640                        np->tx_mode = new_tx_mode;
1641                        writel(np->tx_mode | MiiSoftReset, ioaddr + TxMode);
1642                        udelay(1000);
1643                        writel(np->tx_mode, ioaddr + TxMode);
1644                }
1645
1646                new_intr_timer_ctrl = np->intr_timer_ctrl & ~Timer10X;
1647                if (np->speed100)
1648                        new_intr_timer_ctrl |= Timer10X;
1649                if (np->intr_timer_ctrl != new_intr_timer_ctrl) {
1650                        np->intr_timer_ctrl = new_intr_timer_ctrl;
1651                        writel(new_intr_timer_ctrl, ioaddr + IntrTimerCtrl);
1652                }
1653        } else {
1654                netif_carrier_off(dev);
1655                printk(KERN_DEBUG "%s: Link is down\n", dev->name);
1656        }
1657}
1658
1659
1660static void netdev_error(struct net_device *dev, int intr_status)
1661{
1662        struct netdev_private *np = netdev_priv(dev);
1663
1664        /* Came close to underrunning the Tx FIFO, increase threshold. */
1665        if (intr_status & IntrTxDataLow) {
1666                if (np->tx_threshold <= PKT_BUF_SZ / 16) {
1667                        writel(++np->tx_threshold, np->base + TxThreshold);
1668                        printk(KERN_NOTICE "%s: PCI bus congestion, increasing Tx FIFO threshold to %d bytes\n",
1669                               dev->name, np->tx_threshold * 16);
1670                } else
1671                        printk(KERN_WARNING "%s: PCI Tx underflow -- adapter is probably malfunctioning\n", dev->name);
1672        }
1673        if (intr_status & IntrRxGFPDead) {
1674                dev->stats.rx_fifo_errors++;
1675                dev->stats.rx_errors++;
1676        }
1677        if (intr_status & (IntrNoTxCsum | IntrDMAErr)) {
1678                dev->stats.tx_fifo_errors++;
1679                dev->stats.tx_errors++;
1680        }
1681        if ((intr_status & ~(IntrNormalMask | IntrAbnormalSummary | IntrLinkChange | IntrStatsMax | IntrTxDataLow | IntrRxGFPDead | IntrNoTxCsum | IntrPCIPad)) && debug)
1682                printk(KERN_ERR "%s: Something Wicked happened! %#8.8x.\n",
1683                       dev->name, intr_status);
1684}
1685
1686
1687static struct net_device_stats *get_stats(struct net_device *dev)
1688{
1689        struct netdev_private *np = netdev_priv(dev);
1690        void __iomem *ioaddr = np->base;
1691
1692        /* This adapter architecture needs no SMP locks. */
1693        dev->stats.tx_bytes = readl(ioaddr + 0x57010);
1694        dev->stats.rx_bytes = readl(ioaddr + 0x57044);
1695        dev->stats.tx_packets = readl(ioaddr + 0x57000);
1696        dev->stats.tx_aborted_errors =
1697                readl(ioaddr + 0x57024) + readl(ioaddr + 0x57028);
1698        dev->stats.tx_window_errors = readl(ioaddr + 0x57018);
1699        dev->stats.collisions =
1700                readl(ioaddr + 0x57004) + readl(ioaddr + 0x57008);
1701
1702        /* The chip only need report frame silently dropped. */
1703        dev->stats.rx_dropped += readw(ioaddr + RxDMAStatus);
1704        writew(0, ioaddr + RxDMAStatus);
1705        dev->stats.rx_crc_errors = readl(ioaddr + 0x5703C);
1706        dev->stats.rx_frame_errors = readl(ioaddr + 0x57040);
1707        dev->stats.rx_length_errors = readl(ioaddr + 0x57058);
1708        dev->stats.rx_missed_errors = readl(ioaddr + 0x5707C);
1709
1710        return &dev->stats;
1711}
1712
1713#ifdef VLAN_SUPPORT
1714static u32 set_vlan_mode(struct netdev_private *np)
1715{
1716        u32 ret = VlanMode;
1717        u16 vid;
1718        void __iomem *filter_addr = np->base + HashTable + 8;
1719        int vlan_count = 0;
1720
1721        for_each_set_bit(vid, np->active_vlans, VLAN_N_VID) {
1722                if (vlan_count == 32)
1723                        break;
1724                writew(vid, filter_addr);
1725                filter_addr += 16;
1726                vlan_count++;
1727        }
1728        if (vlan_count == 32) {
1729                ret |= PerfectFilterVlan;
1730                while (vlan_count < 32) {
1731                        writew(0, filter_addr);
1732                        filter_addr += 16;
1733                        vlan_count++;
1734                }
1735        }
1736        return ret;
1737}
1738#endif /* VLAN_SUPPORT */
1739
1740static void set_rx_mode(struct net_device *dev)
1741{
1742        struct netdev_private *np = netdev_priv(dev);
1743        void __iomem *ioaddr = np->base;
1744        u32 rx_mode = MinVLANPrio;
1745        struct netdev_hw_addr *ha;
1746        int i;
1747
1748#ifdef VLAN_SUPPORT
1749        rx_mode |= set_vlan_mode(np);
1750#endif /* VLAN_SUPPORT */
1751
1752        if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1753                rx_mode |= AcceptAll;
1754        } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
1755                   (dev->flags & IFF_ALLMULTI)) {
1756                /* Too many to match, or accept all multicasts. */
1757                rx_mode |= AcceptBroadcast|AcceptAllMulticast|PerfectFilter;
1758        } else if (netdev_mc_count(dev) <= 14) {
1759                /* Use the 16 element perfect filter, skip first two entries. */
1760                void __iomem *filter_addr = ioaddr + PerfFilterTable + 2 * 16;
1761                __be16 *eaddrs;
1762                netdev_for_each_mc_addr(ha, dev) {
1763                        eaddrs = (__be16 *) ha->addr;
1764                        writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 4;
1765                        writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4;
1766                        writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 8;
1767                }
1768                eaddrs = (__be16 *)dev->dev_addr;
1769                i = netdev_mc_count(dev) + 2;
1770                while (i++ < 16) {
1771                        writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 4;
1772                        writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4;
1773                        writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 8;
1774                }
1775                rx_mode |= AcceptBroadcast|PerfectFilter;
1776        } else {
1777                /* Must use a multicast hash table. */
1778                void __iomem *filter_addr;
1779                __be16 *eaddrs;
1780                __le16 mc_filter[32] __attribute__ ((aligned(sizeof(long))));   /* Multicast hash filter */
1781
1782                memset(mc_filter, 0, sizeof(mc_filter));
1783                netdev_for_each_mc_addr(ha, dev) {
1784                        /* The chip uses the upper 9 CRC bits
1785                           as index into the hash table */
1786                        int bit_nr = ether_crc_le(ETH_ALEN, ha->addr) >> 23;
1787                        __le32 *fptr = (__le32 *) &mc_filter[(bit_nr >> 4) & ~1];
1788
1789                        *fptr |= cpu_to_le32(1 << (bit_nr & 31));
1790                }
1791                /* Clear the perfect filter list, skip first two entries. */
1792                filter_addr = ioaddr + PerfFilterTable + 2 * 16;
1793                eaddrs = (__be16 *)dev->dev_addr;
1794                for (i = 2; i < 16; i++) {
1795                        writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 4;
1796                        writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4;
1797                        writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 8;
1798                }
1799                for (filter_addr = ioaddr + HashTable, i = 0; i < 32; filter_addr+= 16, i++)
1800                        writew(mc_filter[i], filter_addr);
1801                rx_mode |= AcceptBroadcast|PerfectFilter|HashFilter;
1802        }
1803        writel(rx_mode, ioaddr + RxFilterMode);
1804}
1805
1806static int check_if_running(struct net_device *dev)
1807{
1808        if (!netif_running(dev))
1809                return -EINVAL;
1810        return 0;
1811}
1812
1813static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1814{
1815        struct netdev_private *np = netdev_priv(dev);
1816        strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1817        strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1818        strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
1819}
1820
1821static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1822{
1823        struct netdev_private *np = netdev_priv(dev);
1824        spin_lock_irq(&np->lock);
1825        mii_ethtool_gset(&np->mii_if, ecmd);
1826        spin_unlock_irq(&np->lock);
1827        return 0;
1828}
1829
1830static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1831{
1832        struct netdev_private *np = netdev_priv(dev);
1833        int res;
1834        spin_lock_irq(&np->lock);
1835        res = mii_ethtool_sset(&np->mii_if, ecmd);
1836        spin_unlock_irq(&np->lock);
1837        check_duplex(dev);
1838        return res;
1839}
1840
1841static int nway_reset(struct net_device *dev)
1842{
1843        struct netdev_private *np = netdev_priv(dev);
1844        return mii_nway_restart(&np->mii_if);
1845}
1846
1847static u32 get_link(struct net_device *dev)
1848{
1849        struct netdev_private *np = netdev_priv(dev);
1850        return mii_link_ok(&np->mii_if);
1851}
1852
1853static u32 get_msglevel(struct net_device *dev)
1854{
1855        return debug;
1856}
1857
1858static void set_msglevel(struct net_device *dev, u32 val)
1859{
1860        debug = val;
1861}
1862
1863static const struct ethtool_ops ethtool_ops = {
1864        .begin = check_if_running,
1865        .get_drvinfo = get_drvinfo,
1866        .get_settings = get_settings,
1867        .set_settings = set_settings,
1868        .nway_reset = nway_reset,
1869        .get_link = get_link,
1870        .get_msglevel = get_msglevel,
1871        .set_msglevel = set_msglevel,
1872};
1873
1874static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1875{
1876        struct netdev_private *np = netdev_priv(dev);
1877        struct mii_ioctl_data *data = if_mii(rq);
1878        int rc;
1879
1880        if (!netif_running(dev))
1881                return -EINVAL;
1882
1883        spin_lock_irq(&np->lock);
1884        rc = generic_mii_ioctl(&np->mii_if, data, cmd, NULL);
1885        spin_unlock_irq(&np->lock);
1886
1887        if ((cmd == SIOCSMIIREG) && (data->phy_id == np->phys[0]))
1888                check_duplex(dev);
1889
1890        return rc;
1891}
1892
1893static int netdev_close(struct net_device *dev)
1894{
1895        struct netdev_private *np = netdev_priv(dev);
1896        void __iomem *ioaddr = np->base;
1897        int i;
1898
1899        netif_stop_queue(dev);
1900
1901        napi_disable(&np->napi);
1902
1903        if (debug > 1) {
1904                printk(KERN_DEBUG "%s: Shutting down ethercard, Intr status %#8.8x.\n",
1905                           dev->name, (int) readl(ioaddr + IntrStatus));
1906                printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
1907                       dev->name, np->cur_tx, np->dirty_tx,
1908                       np->cur_rx, np->dirty_rx);
1909        }
1910
1911        /* Disable interrupts by clearing the interrupt mask. */
1912        writel(0, ioaddr + IntrEnable);
1913
1914        /* Stop the chip's Tx and Rx processes. */
1915        writel(0, ioaddr + GenCtrl);
1916        readl(ioaddr + GenCtrl);
1917
1918        if (debug > 5) {
1919                printk(KERN_DEBUG"  Tx ring at %#llx:\n",
1920                       (long long) np->tx_ring_dma);
1921                for (i = 0; i < 8 /* TX_RING_SIZE is huge! */; i++)
1922                        printk(KERN_DEBUG " #%d desc. %#8.8x %#llx -> %#8.8x.\n",
1923                               i, le32_to_cpu(np->tx_ring[i].status),
1924                               (long long) dma_to_cpu(np->tx_ring[i].addr),
1925                               le32_to_cpu(np->tx_done_q[i].status));
1926                printk(KERN_DEBUG "  Rx ring at %#llx -> %p:\n",
1927                       (long long) np->rx_ring_dma, np->rx_done_q);
1928                if (np->rx_done_q)
1929                        for (i = 0; i < 8 /* RX_RING_SIZE */; i++) {
1930                                printk(KERN_DEBUG " #%d desc. %#llx -> %#8.8x\n",
1931                                       i, (long long) dma_to_cpu(np->rx_ring[i].rxaddr), le32_to_cpu(np->rx_done_q[i].status));
1932                }
1933        }
1934
1935        free_irq(np->pci_dev->irq, dev);
1936
1937        /* Free all the skbuffs in the Rx queue. */
1938        for (i = 0; i < RX_RING_SIZE; i++) {
1939                np->rx_ring[i].rxaddr = cpu_to_dma(0xBADF00D0); /* An invalid address. */
1940                if (np->rx_info[i].skb != NULL) {
1941                        pci_unmap_single(np->pci_dev, np->rx_info[i].mapping, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1942                        dev_kfree_skb(np->rx_info[i].skb);
1943                }
1944                np->rx_info[i].skb = NULL;
1945                np->rx_info[i].mapping = 0;
1946        }
1947        for (i = 0; i < TX_RING_SIZE; i++) {
1948                struct sk_buff *skb = np->tx_info[i].skb;
1949                if (skb == NULL)
1950                        continue;
1951                pci_unmap_single(np->pci_dev,
1952                                 np->tx_info[i].mapping,
1953                                 skb_first_frag_len(skb), PCI_DMA_TODEVICE);
1954                np->tx_info[i].mapping = 0;
1955                dev_kfree_skb(skb);
1956                np->tx_info[i].skb = NULL;
1957        }
1958
1959        return 0;
1960}
1961
1962#ifdef CONFIG_PM
1963static int starfire_suspend(struct pci_dev *pdev, pm_message_t state)
1964{
1965        struct net_device *dev = pci_get_drvdata(pdev);
1966
1967        if (netif_running(dev)) {
1968                netif_device_detach(dev);
1969                netdev_close(dev);
1970        }
1971
1972        pci_save_state(pdev);
1973        pci_set_power_state(pdev, pci_choose_state(pdev,state));
1974
1975        return 0;
1976}
1977
1978static int starfire_resume(struct pci_dev *pdev)
1979{
1980        struct net_device *dev = pci_get_drvdata(pdev);
1981
1982        pci_set_power_state(pdev, PCI_D0);
1983        pci_restore_state(pdev);
1984
1985        if (netif_running(dev)) {
1986                netdev_open(dev);
1987                netif_device_attach(dev);
1988        }
1989
1990        return 0;
1991}
1992#endif /* CONFIG_PM */
1993
1994
1995static void starfire_remove_one(struct pci_dev *pdev)
1996{
1997        struct net_device *dev = pci_get_drvdata(pdev);
1998        struct netdev_private *np = netdev_priv(dev);
1999
2000        BUG_ON(!dev);
2001
2002        unregister_netdev(dev);
2003
2004        if (np->queue_mem)
2005                pci_free_consistent(pdev, np->queue_mem_size, np->queue_mem, np->queue_mem_dma);
2006
2007
2008        /* XXX: add wakeup code -- requires firmware for MagicPacket */
2009        pci_set_power_state(pdev, PCI_D3hot);   /* go to sleep in D3 mode */
2010        pci_disable_device(pdev);
2011
2012        iounmap(np->base);
2013        pci_release_regions(pdev);
2014
2015        pci_set_drvdata(pdev, NULL);
2016        free_netdev(dev);                       /* Will also free np!! */
2017}
2018
2019
2020static struct pci_driver starfire_driver = {
2021        .name           = DRV_NAME,
2022        .probe          = starfire_init_one,
2023        .remove         = starfire_remove_one,
2024#ifdef CONFIG_PM
2025        .suspend        = starfire_suspend,
2026        .resume         = starfire_resume,
2027#endif /* CONFIG_PM */
2028        .id_table       = starfire_pci_tbl,
2029};
2030
2031
2032static int __init starfire_init (void)
2033{
2034/* when a module, this is printed whether or not devices are found in probe */
2035#ifdef MODULE
2036        printk(version);
2037
2038        printk(KERN_INFO DRV_NAME ": polling (NAPI) enabled\n");
2039#endif
2040
2041        BUILD_BUG_ON(sizeof(dma_addr_t) != sizeof(netdrv_addr_t));
2042
2043        return pci_register_driver(&starfire_driver);
2044}
2045
2046
2047static void __exit starfire_cleanup (void)
2048{
2049        pci_unregister_driver (&starfire_driver);
2050}
2051
2052
2053module_init(starfire_init);
2054module_exit(starfire_cleanup);
2055
2056
2057/*
2058 * Local variables:
2059 *  c-basic-offset: 8
2060 *  tab-width: 8
2061 * End:
2062 */
2063