linux/drivers/net/ethernet/adaptec/starfire.c
<<
>>
Prefs
   1/* starfire.c: Linux device driver for the Adaptec Starfire network adapter. */
   2/*
   3        Written 1998-2000 by Donald Becker.
   4
   5        Current maintainer is Ion Badulescu <ionut ta badula tod org>. Please
   6        send all bug reports to me, and not to Donald Becker, as this code
   7        has been heavily modified from Donald's original version.
   8
   9        This software may be used and distributed according to the terms of
  10        the GNU General Public License (GPL), incorporated herein by reference.
  11        Drivers based on or derived from this code fall under the GPL and must
  12        retain the authorship, copyright and license notice.  This file is not
  13        a complete program and may only be used when the entire operating
  14        system is licensed under the GPL.
  15
  16        The information below comes from Donald Becker's original driver:
  17
  18        The author may be reached as becker@scyld.com, or C/O
  19        Scyld Computing Corporation
  20        410 Severn Ave., Suite 210
  21        Annapolis MD 21403
  22
  23        Support and updates available at
  24        http://www.scyld.com/network/starfire.html
  25        [link no longer provides useful info -jgarzik]
  26
  27*/
  28
  29#define DRV_NAME        "starfire"
  30
  31#include <linux/interrupt.h>
  32#include <linux/module.h>
  33#include <linux/kernel.h>
  34#include <linux/pci.h>
  35#include <linux/netdevice.h>
  36#include <linux/etherdevice.h>
  37#include <linux/init.h>
  38#include <linux/delay.h>
  39#include <linux/crc32.h>
  40#include <linux/ethtool.h>
  41#include <linux/mii.h>
  42#include <linux/if_vlan.h>
  43#include <linux/mm.h>
  44#include <linux/firmware.h>
  45#include <asm/processor.h>              /* Processor type for cache alignment. */
  46#include <linux/uaccess.h>
  47#include <asm/io.h>
  48
  49/*
  50 * The current frame processor firmware fails to checksum a fragment
  51 * of length 1. If and when this is fixed, the #define below can be removed.
  52 */
  53#define HAS_BROKEN_FIRMWARE
  54
  55/*
  56 * If using the broken firmware, data must be padded to the next 32-bit boundary.
  57 */
  58#ifdef HAS_BROKEN_FIRMWARE
  59#define PADDING_MASK 3
  60#endif
  61
  62/*
  63 * Define this if using the driver with the zero-copy patch
  64 */
  65#define ZEROCOPY
  66
  67#if IS_ENABLED(CONFIG_VLAN_8021Q)
  68#define VLAN_SUPPORT
  69#endif
  70
  71/* The user-configurable values.
  72   These may be modified when a driver module is loaded.*/
  73
  74/* Used for tuning interrupt latency vs. overhead. */
  75static int intr_latency;
  76static int small_frames;
  77
  78static int debug = 1;                   /* 1 normal messages, 0 quiet .. 7 verbose. */
  79static int max_interrupt_work = 20;
  80static int mtu;
  81/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
  82   The Starfire has a 512 element hash table based on the Ethernet CRC. */
  83static const int multicast_filter_limit = 512;
  84/* Whether to do TCP/UDP checksums in hardware */
  85static int enable_hw_cksum = 1;
  86
  87#define PKT_BUF_SZ      1536            /* Size of each temporary Rx buffer.*/
  88/*
  89 * Set the copy breakpoint for the copy-only-tiny-frames scheme.
  90 * Setting to > 1518 effectively disables this feature.
  91 *
  92 * NOTE:
  93 * The ia64 doesn't allow for unaligned loads even of integers being
  94 * misaligned on a 2 byte boundary. Thus always force copying of
  95 * packets as the starfire doesn't allow for misaligned DMAs ;-(
  96 * 23/10/2000 - Jes
  97 *
  98 * The Alpha and the Sparc don't like unaligned loads, either. On Sparc64,
  99 * at least, having unaligned frames leads to a rather serious performance
 100 * penalty. -Ion
 101 */
 102#if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
 103static int rx_copybreak = PKT_BUF_SZ;
 104#else
 105static int rx_copybreak /* = 0 */;
 106#endif
 107
 108/* PCI DMA burst size -- on sparc64 we want to force it to 64 bytes, on the others the default of 128 is fine. */
 109#ifdef __sparc__
 110#define DMA_BURST_SIZE 64
 111#else
 112#define DMA_BURST_SIZE 128
 113#endif
 114
 115/* Operational parameters that are set at compile time. */
 116
 117/* The "native" ring sizes are either 256 or 2048.
 118   However in some modes a descriptor may be marked to wrap the ring earlier.
 119*/
 120#define RX_RING_SIZE    256
 121#define TX_RING_SIZE    32
 122/* The completion queues are fixed at 1024 entries i.e. 4K or 8KB. */
 123#define DONE_Q_SIZE     1024
 124/* All queues must be aligned on a 256-byte boundary */
 125#define QUEUE_ALIGN     256
 126
 127#if RX_RING_SIZE > 256
 128#define RX_Q_ENTRIES Rx2048QEntries
 129#else
 130#define RX_Q_ENTRIES Rx256QEntries
 131#endif
 132
 133/* Operational parameters that usually are not changed. */
 134/* Time in jiffies before concluding the transmitter is hung. */
 135#define TX_TIMEOUT      (2 * HZ)
 136
 137#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 138/* 64-bit dma_addr_t */
 139#define ADDR_64BITS     /* This chip uses 64 bit addresses. */
 140#define netdrv_addr_t __le64
 141#define cpu_to_dma(x) cpu_to_le64(x)
 142#define dma_to_cpu(x) le64_to_cpu(x)
 143#define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
 144#define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
 145#define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
 146#define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
 147#define RX_DESC_ADDR_SIZE RxDescAddr64bit
 148#else  /* 32-bit dma_addr_t */
 149#define netdrv_addr_t __le32
 150#define cpu_to_dma(x) cpu_to_le32(x)
 151#define dma_to_cpu(x) le32_to_cpu(x)
 152#define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
 153#define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
 154#define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
 155#define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
 156#define RX_DESC_ADDR_SIZE RxDescAddr32bit
 157#endif
 158
 159#define skb_first_frag_len(skb) skb_headlen(skb)
 160#define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
 161
 162/* Firmware names */
 163#define FIRMWARE_RX     "adaptec/starfire_rx.bin"
 164#define FIRMWARE_TX     "adaptec/starfire_tx.bin"
 165
 166MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
 167MODULE_DESCRIPTION("Adaptec Starfire Ethernet driver");
 168MODULE_LICENSE("GPL");
 169MODULE_FIRMWARE(FIRMWARE_RX);
 170MODULE_FIRMWARE(FIRMWARE_TX);
 171
 172module_param(max_interrupt_work, int, 0);
 173module_param(mtu, int, 0);
 174module_param(debug, int, 0);
 175module_param(rx_copybreak, int, 0);
 176module_param(intr_latency, int, 0);
 177module_param(small_frames, int, 0);
 178module_param(enable_hw_cksum, int, 0);
 179MODULE_PARM_DESC(max_interrupt_work, "Maximum events handled per interrupt");
 180MODULE_PARM_DESC(mtu, "MTU (all boards)");
 181MODULE_PARM_DESC(debug, "Debug level (0-6)");
 182MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
 183MODULE_PARM_DESC(intr_latency, "Maximum interrupt latency, in microseconds");
 184MODULE_PARM_DESC(small_frames, "Maximum size of receive frames that bypass interrupt latency (0,64,128,256,512)");
 185MODULE_PARM_DESC(enable_hw_cksum, "Enable/disable hardware cksum support (0/1)");
 186
 187/*
 188                                Theory of Operation
 189
 190I. Board Compatibility
 191
 192This driver is for the Adaptec 6915 "Starfire" 64 bit PCI Ethernet adapter.
 193
 194II. Board-specific settings
 195
 196III. Driver operation
 197
 198IIIa. Ring buffers
 199
 200The Starfire hardware uses multiple fixed-size descriptor queues/rings.  The
 201ring sizes are set fixed by the hardware, but may optionally be wrapped
 202earlier by the END bit in the descriptor.
 203This driver uses that hardware queue size for the Rx ring, where a large
 204number of entries has no ill effect beyond increases the potential backlog.
 205The Tx ring is wrapped with the END bit, since a large hardware Tx queue
 206disables the queue layer priority ordering and we have no mechanism to
 207utilize the hardware two-level priority queue.  When modifying the
 208RX/TX_RING_SIZE pay close attention to page sizes and the ring-empty warning
 209levels.
 210
 211IIIb/c. Transmit/Receive Structure
 212
 213See the Adaptec manual for the many possible structures, and options for
 214each structure.  There are far too many to document all of them here.
 215
 216For transmit this driver uses type 0/1 transmit descriptors (depending
 217on the 32/64 bitness of the architecture), and relies on automatic
 218minimum-length padding.  It does not use the completion queue
 219consumer index, but instead checks for non-zero status entries.
 220
 221For receive this driver uses type 2/3 receive descriptors.  The driver
 222allocates full frame size skbuffs for the Rx ring buffers, so all frames
 223should fit in a single descriptor.  The driver does not use the completion
 224queue consumer index, but instead checks for non-zero status entries.
 225
 226When an incoming frame is less than RX_COPYBREAK bytes long, a fresh skbuff
 227is allocated and the frame is copied to the new skbuff.  When the incoming
 228frame is larger, the skbuff is passed directly up the protocol stack.
 229Buffers consumed this way are replaced by newly allocated skbuffs in a later
 230phase of receive.
 231
 232A notable aspect of operation is that unaligned buffers are not permitted by
 233the Starfire hardware.  Thus the IP header at offset 14 in an ethernet frame
 234isn't longword aligned, which may cause problems on some machine
 235e.g. Alphas and IA64. For these architectures, the driver is forced to copy
 236the frame into a new skbuff unconditionally. Copied frames are put into the
 237skbuff at an offset of "+2", thus 16-byte aligning the IP header.
 238
 239IIId. Synchronization
 240
 241The driver runs as two independent, single-threaded flows of control.  One
 242is the send-packet routine, which enforces single-threaded use by the
 243dev->tbusy flag.  The other thread is the interrupt handler, which is single
 244threaded by the hardware and interrupt handling software.
 245
 246The send packet thread has partial control over the Tx ring and the netif_queue
 247status. If the number of free Tx slots in the ring falls below a certain number
 248(currently hardcoded to 4), it signals the upper layer to stop the queue.
 249
 250The interrupt handler has exclusive control over the Rx ring and records stats
 251from the Tx ring.  After reaping the stats, it marks the Tx queue entry as
 252empty by incrementing the dirty_tx mark. Iff the netif_queue is stopped and the
 253number of free Tx slow is above the threshold, it signals the upper layer to
 254restart the queue.
 255
 256IV. Notes
 257
 258IVb. References
 259
 260The Adaptec Starfire manuals, available only from Adaptec.
 261http://www.scyld.com/expert/100mbps.html
 262http://www.scyld.com/expert/NWay.html
 263
 264IVc. Errata
 265
 266- StopOnPerr is broken, don't enable
 267- Hardware ethernet padding exposes random data, perform software padding
 268  instead (unverified -- works correctly for all the hardware I have)
 269
 270*/
 271
 272
 273
 274enum chip_capability_flags {CanHaveMII=1, };
 275
 276enum chipset {
 277        CH_6915 = 0,
 278};
 279
 280static const struct pci_device_id starfire_pci_tbl[] = {
 281        { PCI_VDEVICE(ADAPTEC, 0x6915), CH_6915 },
 282        { 0, }
 283};
 284MODULE_DEVICE_TABLE(pci, starfire_pci_tbl);
 285
 286/* A chip capabilities table, matching the CH_xxx entries in xxx_pci_tbl[] above. */
 287static const struct chip_info {
 288        const char *name;
 289        int drv_flags;
 290} netdrv_tbl[] = {
 291        { "Adaptec Starfire 6915", CanHaveMII },
 292};
 293
 294
 295/* Offsets to the device registers.
 296   Unlike software-only systems, device drivers interact with complex hardware.
 297   It's not useful to define symbolic names for every register bit in the
 298   device.  The name can only partially document the semantics and make
 299   the driver longer and more difficult to read.
 300   In general, only the important configuration values or bits changed
 301   multiple times should be defined symbolically.
 302*/
 303enum register_offsets {
 304        PCIDeviceConfig=0x50040, GenCtrl=0x50070, IntrTimerCtrl=0x50074,
 305        IntrClear=0x50080, IntrStatus=0x50084, IntrEnable=0x50088,
 306        MIICtrl=0x52000, TxStationAddr=0x50120, EEPROMCtrl=0x51000,
 307        GPIOCtrl=0x5008C, TxDescCtrl=0x50090,
 308        TxRingPtr=0x50098, HiPriTxRingPtr=0x50094, /* Low and High priority. */
 309        TxRingHiAddr=0x5009C,           /* 64 bit address extension. */
 310        TxProducerIdx=0x500A0, TxConsumerIdx=0x500A4,
 311        TxThreshold=0x500B0,
 312        CompletionHiAddr=0x500B4, TxCompletionAddr=0x500B8,
 313        RxCompletionAddr=0x500BC, RxCompletionQ2Addr=0x500C0,
 314        CompletionQConsumerIdx=0x500C4, RxDMACtrl=0x500D0,
 315        RxDescQCtrl=0x500D4, RxDescQHiAddr=0x500DC, RxDescQAddr=0x500E0,
 316        RxDescQIdx=0x500E8, RxDMAStatus=0x500F0, RxFilterMode=0x500F4,
 317        TxMode=0x55000, VlanType=0x55064,
 318        PerfFilterTable=0x56000, HashTable=0x56100,
 319        TxGfpMem=0x58000, RxGfpMem=0x5a000,
 320};
 321
 322/*
 323 * Bits in the interrupt status/mask registers.
 324 * Warning: setting Intr[Ab]NormalSummary in the IntrEnable register
 325 * enables all the interrupt sources that are or'ed into those status bits.
 326 */
 327enum intr_status_bits {
 328        IntrLinkChange=0xf0000000, IntrStatsMax=0x08000000,
 329        IntrAbnormalSummary=0x02000000, IntrGeneralTimer=0x01000000,
 330        IntrSoftware=0x800000, IntrRxComplQ1Low=0x400000,
 331        IntrTxComplQLow=0x200000, IntrPCI=0x100000,
 332        IntrDMAErr=0x080000, IntrTxDataLow=0x040000,
 333        IntrRxComplQ2Low=0x020000, IntrRxDescQ1Low=0x010000,
 334        IntrNormalSummary=0x8000, IntrTxDone=0x4000,
 335        IntrTxDMADone=0x2000, IntrTxEmpty=0x1000,
 336        IntrEarlyRxQ2=0x0800, IntrEarlyRxQ1=0x0400,
 337        IntrRxQ2Done=0x0200, IntrRxQ1Done=0x0100,
 338        IntrRxGFPDead=0x80, IntrRxDescQ2Low=0x40,
 339        IntrNoTxCsum=0x20, IntrTxBadID=0x10,
 340        IntrHiPriTxBadID=0x08, IntrRxGfp=0x04,
 341        IntrTxGfp=0x02, IntrPCIPad=0x01,
 342        /* not quite bits */
 343        IntrRxDone=IntrRxQ2Done | IntrRxQ1Done,
 344        IntrRxEmpty=IntrRxDescQ1Low | IntrRxDescQ2Low,
 345        IntrNormalMask=0xff00, IntrAbnormalMask=0x3ff00fe,
 346};
 347
 348/* Bits in the RxFilterMode register. */
 349enum rx_mode_bits {
 350        AcceptBroadcast=0x04, AcceptAllMulticast=0x02, AcceptAll=0x01,
 351        AcceptMulticast=0x10, PerfectFilter=0x40, HashFilter=0x30,
 352        PerfectFilterVlan=0x80, MinVLANPrio=0xE000, VlanMode=0x0200,
 353        WakeupOnGFP=0x0800,
 354};
 355
 356/* Bits in the TxMode register */
 357enum tx_mode_bits {
 358        MiiSoftReset=0x8000, MIILoopback=0x4000,
 359        TxFlowEnable=0x0800, RxFlowEnable=0x0400,
 360        PadEnable=0x04, FullDuplex=0x02, HugeFrame=0x01,
 361};
 362
 363/* Bits in the TxDescCtrl register. */
 364enum tx_ctrl_bits {
 365        TxDescSpaceUnlim=0x00, TxDescSpace32=0x10, TxDescSpace64=0x20,
 366        TxDescSpace128=0x30, TxDescSpace256=0x40,
 367        TxDescType0=0x00, TxDescType1=0x01, TxDescType2=0x02,
 368        TxDescType3=0x03, TxDescType4=0x04,
 369        TxNoDMACompletion=0x08,
 370        TxDescQAddr64bit=0x80, TxDescQAddr32bit=0,
 371        TxHiPriFIFOThreshShift=24, TxPadLenShift=16,
 372        TxDMABurstSizeShift=8,
 373};
 374
 375/* Bits in the RxDescQCtrl register. */
 376enum rx_ctrl_bits {
 377        RxBufferLenShift=16, RxMinDescrThreshShift=0,
 378        RxPrefetchMode=0x8000, RxVariableQ=0x2000,
 379        Rx2048QEntries=0x4000, Rx256QEntries=0,
 380        RxDescAddr64bit=0x1000, RxDescAddr32bit=0,
 381        RxDescQAddr64bit=0x0100, RxDescQAddr32bit=0,
 382        RxDescSpace4=0x000, RxDescSpace8=0x100,
 383        RxDescSpace16=0x200, RxDescSpace32=0x300,
 384        RxDescSpace64=0x400, RxDescSpace128=0x500,
 385        RxConsumerWrEn=0x80,
 386};
 387
 388/* Bits in the RxDMACtrl register. */
 389enum rx_dmactrl_bits {
 390        RxReportBadFrames=0x80000000, RxDMAShortFrames=0x40000000,
 391        RxDMABadFrames=0x20000000, RxDMACrcErrorFrames=0x10000000,
 392        RxDMAControlFrame=0x08000000, RxDMAPauseFrame=0x04000000,
 393        RxChecksumIgnore=0, RxChecksumRejectTCPUDP=0x02000000,
 394        RxChecksumRejectTCPOnly=0x01000000,
 395        RxCompletionQ2Enable=0x800000,
 396        RxDMAQ2Disable=0, RxDMAQ2FPOnly=0x100000,
 397        RxDMAQ2SmallPkt=0x200000, RxDMAQ2HighPrio=0x300000,
 398        RxDMAQ2NonIP=0x400000,
 399        RxUseBackupQueue=0x080000, RxDMACRC=0x040000,
 400        RxEarlyIntThreshShift=12, RxHighPrioThreshShift=8,
 401        RxBurstSizeShift=0,
 402};
 403
 404/* Bits in the RxCompletionAddr register */
 405enum rx_compl_bits {
 406        RxComplQAddr64bit=0x80, RxComplQAddr32bit=0,
 407        RxComplProducerWrEn=0x40,
 408        RxComplType0=0x00, RxComplType1=0x10,
 409        RxComplType2=0x20, RxComplType3=0x30,
 410        RxComplThreshShift=0,
 411};
 412
 413/* Bits in the TxCompletionAddr register */
 414enum tx_compl_bits {
 415        TxComplQAddr64bit=0x80, TxComplQAddr32bit=0,
 416        TxComplProducerWrEn=0x40,
 417        TxComplIntrStatus=0x20,
 418        CommonQueueMode=0x10,
 419        TxComplThreshShift=0,
 420};
 421
 422/* Bits in the GenCtrl register */
 423enum gen_ctrl_bits {
 424        RxEnable=0x05, TxEnable=0x0a,
 425        RxGFPEnable=0x10, TxGFPEnable=0x20,
 426};
 427
 428/* Bits in the IntrTimerCtrl register */
 429enum intr_ctrl_bits {
 430        Timer10X=0x800, EnableIntrMasking=0x60, SmallFrameBypass=0x100,
 431        SmallFrame64=0, SmallFrame128=0x200, SmallFrame256=0x400, SmallFrame512=0x600,
 432        IntrLatencyMask=0x1f,
 433};
 434
 435/* The Rx and Tx buffer descriptors. */
 436struct starfire_rx_desc {
 437        netdrv_addr_t rxaddr;
 438};
 439enum rx_desc_bits {
 440        RxDescValid=1, RxDescEndRing=2,
 441};
 442
 443/* Completion queue entry. */
 444struct short_rx_done_desc {
 445        __le32 status;                  /* Low 16 bits is length. */
 446};
 447struct basic_rx_done_desc {
 448        __le32 status;                  /* Low 16 bits is length. */
 449        __le16 vlanid;
 450        __le16 status2;
 451};
 452struct csum_rx_done_desc {
 453        __le32 status;                  /* Low 16 bits is length. */
 454        __le16 csum;                    /* Partial checksum */
 455        __le16 status2;
 456};
 457struct full_rx_done_desc {
 458        __le32 status;                  /* Low 16 bits is length. */
 459        __le16 status3;
 460        __le16 status2;
 461        __le16 vlanid;
 462        __le16 csum;                    /* partial checksum */
 463        __le32 timestamp;
 464};
 465/* XXX: this is ugly and I'm not sure it's worth the trouble -Ion */
 466#ifdef VLAN_SUPPORT
 467typedef struct full_rx_done_desc rx_done_desc;
 468#define RxComplType RxComplType3
 469#else  /* not VLAN_SUPPORT */
 470typedef struct csum_rx_done_desc rx_done_desc;
 471#define RxComplType RxComplType2
 472#endif /* not VLAN_SUPPORT */
 473
 474enum rx_done_bits {
 475        RxOK=0x20000000, RxFIFOErr=0x10000000, RxBufQ2=0x08000000,
 476};
 477
 478/* Type 1 Tx descriptor. */
 479struct starfire_tx_desc_1 {
 480        __le32 status;                  /* Upper bits are status, lower 16 length. */
 481        __le32 addr;
 482};
 483
 484/* Type 2 Tx descriptor. */
 485struct starfire_tx_desc_2 {
 486        __le32 status;                  /* Upper bits are status, lower 16 length. */
 487        __le32 reserved;
 488        __le64 addr;
 489};
 490
 491#ifdef ADDR_64BITS
 492typedef struct starfire_tx_desc_2 starfire_tx_desc;
 493#define TX_DESC_TYPE TxDescType2
 494#else  /* not ADDR_64BITS */
 495typedef struct starfire_tx_desc_1 starfire_tx_desc;
 496#define TX_DESC_TYPE TxDescType1
 497#endif /* not ADDR_64BITS */
 498#define TX_DESC_SPACING TxDescSpaceUnlim
 499
 500enum tx_desc_bits {
 501        TxDescID=0xB0000000,
 502        TxCRCEn=0x01000000, TxDescIntr=0x08000000,
 503        TxRingWrap=0x04000000, TxCalTCP=0x02000000,
 504};
 505struct tx_done_desc {
 506        __le32 status;                  /* timestamp, index. */
 507#if 0
 508        __le32 intrstatus;              /* interrupt status */
 509#endif
 510};
 511
 512struct rx_ring_info {
 513        struct sk_buff *skb;
 514        dma_addr_t mapping;
 515};
 516struct tx_ring_info {
 517        struct sk_buff *skb;
 518        dma_addr_t mapping;
 519        unsigned int used_slots;
 520};
 521
 522#define PHY_CNT         2
 523struct netdev_private {
 524        /* Descriptor rings first for alignment. */
 525        struct starfire_rx_desc *rx_ring;
 526        starfire_tx_desc *tx_ring;
 527        dma_addr_t rx_ring_dma;
 528        dma_addr_t tx_ring_dma;
 529        /* The addresses of rx/tx-in-place skbuffs. */
 530        struct rx_ring_info rx_info[RX_RING_SIZE];
 531        struct tx_ring_info tx_info[TX_RING_SIZE];
 532        /* Pointers to completion queues (full pages). */
 533        rx_done_desc *rx_done_q;
 534        dma_addr_t rx_done_q_dma;
 535        unsigned int rx_done;
 536        struct tx_done_desc *tx_done_q;
 537        dma_addr_t tx_done_q_dma;
 538        unsigned int tx_done;
 539        struct napi_struct napi;
 540        struct net_device *dev;
 541        struct pci_dev *pci_dev;
 542#ifdef VLAN_SUPPORT
 543        unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
 544#endif
 545        void *queue_mem;
 546        dma_addr_t queue_mem_dma;
 547        size_t queue_mem_size;
 548
 549        /* Frequently used values: keep some adjacent for cache effect. */
 550        spinlock_t lock;
 551        unsigned int cur_rx, dirty_rx;  /* Producer/consumer ring indices */
 552        unsigned int cur_tx, dirty_tx, reap_tx;
 553        unsigned int rx_buf_sz;         /* Based on MTU+slack. */
 554        /* These values keep track of the transceiver/media in use. */
 555        int speed100;                   /* Set if speed == 100MBit. */
 556        u32 tx_mode;
 557        u32 intr_timer_ctrl;
 558        u8 tx_threshold;
 559        /* MII transceiver section. */
 560        struct mii_if_info mii_if;              /* MII lib hooks/info */
 561        int phy_cnt;                    /* MII device addresses. */
 562        unsigned char phys[PHY_CNT];    /* MII device addresses. */
 563        void __iomem *base;
 564};
 565
 566
 567static int      mdio_read(struct net_device *dev, int phy_id, int location);
 568static void     mdio_write(struct net_device *dev, int phy_id, int location, int value);
 569static int      netdev_open(struct net_device *dev);
 570static void     check_duplex(struct net_device *dev);
 571static void     tx_timeout(struct net_device *dev, unsigned int txqueue);
 572static void     init_ring(struct net_device *dev);
 573static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
 574static irqreturn_t intr_handler(int irq, void *dev_instance);
 575static void     netdev_error(struct net_device *dev, int intr_status);
 576static int      __netdev_rx(struct net_device *dev, int *quota);
 577static int      netdev_poll(struct napi_struct *napi, int budget);
 578static void     refill_rx_ring(struct net_device *dev);
 579static void     netdev_error(struct net_device *dev, int intr_status);
 580static void     set_rx_mode(struct net_device *dev);
 581static struct net_device_stats *get_stats(struct net_device *dev);
 582static int      netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 583static int      netdev_close(struct net_device *dev);
 584static void     netdev_media_change(struct net_device *dev);
 585static const struct ethtool_ops ethtool_ops;
 586
 587
 588#ifdef VLAN_SUPPORT
 589static int netdev_vlan_rx_add_vid(struct net_device *dev,
 590                                  __be16 proto, u16 vid)
 591{
 592        struct netdev_private *np = netdev_priv(dev);
 593
 594        spin_lock(&np->lock);
 595        if (debug > 1)
 596                printk("%s: Adding vlanid %d to vlan filter\n", dev->name, vid);
 597        set_bit(vid, np->active_vlans);
 598        set_rx_mode(dev);
 599        spin_unlock(&np->lock);
 600
 601        return 0;
 602}
 603
 604static int netdev_vlan_rx_kill_vid(struct net_device *dev,
 605                                   __be16 proto, u16 vid)
 606{
 607        struct netdev_private *np = netdev_priv(dev);
 608
 609        spin_lock(&np->lock);
 610        if (debug > 1)
 611                printk("%s: removing vlanid %d from vlan filter\n", dev->name, vid);
 612        clear_bit(vid, np->active_vlans);
 613        set_rx_mode(dev);
 614        spin_unlock(&np->lock);
 615
 616        return 0;
 617}
 618#endif /* VLAN_SUPPORT */
 619
 620
 621static const struct net_device_ops netdev_ops = {
 622        .ndo_open               = netdev_open,
 623        .ndo_stop               = netdev_close,
 624        .ndo_start_xmit         = start_tx,
 625        .ndo_tx_timeout         = tx_timeout,
 626        .ndo_get_stats          = get_stats,
 627        .ndo_set_rx_mode        = set_rx_mode,
 628        .ndo_eth_ioctl          = netdev_ioctl,
 629        .ndo_set_mac_address    = eth_mac_addr,
 630        .ndo_validate_addr      = eth_validate_addr,
 631#ifdef VLAN_SUPPORT
 632        .ndo_vlan_rx_add_vid    = netdev_vlan_rx_add_vid,
 633        .ndo_vlan_rx_kill_vid   = netdev_vlan_rx_kill_vid,
 634#endif
 635};
 636
 637static int starfire_init_one(struct pci_dev *pdev,
 638                             const struct pci_device_id *ent)
 639{
 640        struct device *d = &pdev->dev;
 641        struct netdev_private *np;
 642        int i, irq, chip_idx = ent->driver_data;
 643        struct net_device *dev;
 644        long ioaddr;
 645        void __iomem *base;
 646        int drv_flags, io_size;
 647        int boguscnt;
 648
 649        if (pci_enable_device (pdev))
 650                return -EIO;
 651
 652        ioaddr = pci_resource_start(pdev, 0);
 653        io_size = pci_resource_len(pdev, 0);
 654        if (!ioaddr || ((pci_resource_flags(pdev, 0) & IORESOURCE_MEM) == 0)) {
 655                dev_err(d, "no PCI MEM resources, aborting\n");
 656                return -ENODEV;
 657        }
 658
 659        dev = alloc_etherdev(sizeof(*np));
 660        if (!dev)
 661                return -ENOMEM;
 662
 663        SET_NETDEV_DEV(dev, &pdev->dev);
 664
 665        irq = pdev->irq;
 666
 667        if (pci_request_regions (pdev, DRV_NAME)) {
 668                dev_err(d, "cannot reserve PCI resources, aborting\n");
 669                goto err_out_free_netdev;
 670        }
 671
 672        base = ioremap(ioaddr, io_size);
 673        if (!base) {
 674                dev_err(d, "cannot remap %#x @ %#lx, aborting\n",
 675                        io_size, ioaddr);
 676                goto err_out_free_res;
 677        }
 678
 679        pci_set_master(pdev);
 680
 681        /* enable MWI -- it vastly improves Rx performance on sparc64 */
 682        pci_try_set_mwi(pdev);
 683
 684#ifdef ZEROCOPY
 685        /* Starfire can do TCP/UDP checksumming */
 686        if (enable_hw_cksum)
 687                dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
 688#endif /* ZEROCOPY */
 689
 690#ifdef VLAN_SUPPORT
 691        dev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
 692#endif /* VLAN_RX_KILL_VID */
 693#ifdef ADDR_64BITS
 694        dev->features |= NETIF_F_HIGHDMA;
 695#endif /* ADDR_64BITS */
 696
 697        /* Serial EEPROM reads are hidden by the hardware. */
 698        for (i = 0; i < 6; i++)
 699                dev->dev_addr[i] = readb(base + EEPROMCtrl + 20 - i);
 700
 701#if ! defined(final_version) /* Dump the EEPROM contents during development. */
 702        if (debug > 4)
 703                for (i = 0; i < 0x20; i++)
 704                        printk("%2.2x%s",
 705                               (unsigned int)readb(base + EEPROMCtrl + i),
 706                               i % 16 != 15 ? " " : "\n");
 707#endif
 708
 709        /* Issue soft reset */
 710        writel(MiiSoftReset, base + TxMode);
 711        udelay(1000);
 712        writel(0, base + TxMode);
 713
 714        /* Reset the chip to erase previous misconfiguration. */
 715        writel(1, base + PCIDeviceConfig);
 716        boguscnt = 1000;
 717        while (--boguscnt > 0) {
 718                udelay(10);
 719                if ((readl(base + PCIDeviceConfig) & 1) == 0)
 720                        break;
 721        }
 722        if (boguscnt == 0)
 723                printk("%s: chipset reset never completed!\n", dev->name);
 724        /* wait a little longer */
 725        udelay(1000);
 726
 727        np = netdev_priv(dev);
 728        np->dev = dev;
 729        np->base = base;
 730        spin_lock_init(&np->lock);
 731        pci_set_drvdata(pdev, dev);
 732
 733        np->pci_dev = pdev;
 734
 735        np->mii_if.dev = dev;
 736        np->mii_if.mdio_read = mdio_read;
 737        np->mii_if.mdio_write = mdio_write;
 738        np->mii_if.phy_id_mask = 0x1f;
 739        np->mii_if.reg_num_mask = 0x1f;
 740
 741        drv_flags = netdrv_tbl[chip_idx].drv_flags;
 742
 743        np->speed100 = 1;
 744
 745        /* timer resolution is 128 * 0.8us */
 746        np->intr_timer_ctrl = (((intr_latency * 10) / 1024) & IntrLatencyMask) |
 747                Timer10X | EnableIntrMasking;
 748
 749        if (small_frames > 0) {
 750                np->intr_timer_ctrl |= SmallFrameBypass;
 751                switch (small_frames) {
 752                case 1 ... 64:
 753                        np->intr_timer_ctrl |= SmallFrame64;
 754                        break;
 755                case 65 ... 128:
 756                        np->intr_timer_ctrl |= SmallFrame128;
 757                        break;
 758                case 129 ... 256:
 759                        np->intr_timer_ctrl |= SmallFrame256;
 760                        break;
 761                default:
 762                        np->intr_timer_ctrl |= SmallFrame512;
 763                        if (small_frames > 512)
 764                                printk("Adjusting small_frames down to 512\n");
 765                        break;
 766                }
 767        }
 768
 769        dev->netdev_ops = &netdev_ops;
 770        dev->watchdog_timeo = TX_TIMEOUT;
 771        dev->ethtool_ops = &ethtool_ops;
 772
 773        netif_napi_add(dev, &np->napi, netdev_poll, max_interrupt_work);
 774
 775        if (mtu)
 776                dev->mtu = mtu;
 777
 778        if (register_netdev(dev))
 779                goto err_out_cleardev;
 780
 781        printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n",
 782               dev->name, netdrv_tbl[chip_idx].name, base,
 783               dev->dev_addr, irq);
 784
 785        if (drv_flags & CanHaveMII) {
 786                int phy, phy_idx = 0;
 787                int mii_status;
 788                for (phy = 0; phy < 32 && phy_idx < PHY_CNT; phy++) {
 789                        mdio_write(dev, phy, MII_BMCR, BMCR_RESET);
 790                        msleep(100);
 791                        boguscnt = 1000;
 792                        while (--boguscnt > 0)
 793                                if ((mdio_read(dev, phy, MII_BMCR) & BMCR_RESET) == 0)
 794                                        break;
 795                        if (boguscnt == 0) {
 796                                printk("%s: PHY#%d reset never completed!\n", dev->name, phy);
 797                                continue;
 798                        }
 799                        mii_status = mdio_read(dev, phy, MII_BMSR);
 800                        if (mii_status != 0) {
 801                                np->phys[phy_idx++] = phy;
 802                                np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE);
 803                                printk(KERN_INFO "%s: MII PHY found at address %d, status "
 804                                           "%#4.4x advertising %#4.4x.\n",
 805                                           dev->name, phy, mii_status, np->mii_if.advertising);
 806                                /* there can be only one PHY on-board */
 807                                break;
 808                        }
 809                }
 810                np->phy_cnt = phy_idx;
 811                if (np->phy_cnt > 0)
 812                        np->mii_if.phy_id = np->phys[0];
 813                else
 814                        memset(&np->mii_if, 0, sizeof(np->mii_if));
 815        }
 816
 817        printk(KERN_INFO "%s: scatter-gather and hardware TCP cksumming %s.\n",
 818               dev->name, enable_hw_cksum ? "enabled" : "disabled");
 819        return 0;
 820
 821err_out_cleardev:
 822        iounmap(base);
 823err_out_free_res:
 824        pci_release_regions (pdev);
 825err_out_free_netdev:
 826        free_netdev(dev);
 827        return -ENODEV;
 828}
 829
 830
 831/* Read the MII Management Data I/O (MDIO) interfaces. */
 832static int mdio_read(struct net_device *dev, int phy_id, int location)
 833{
 834        struct netdev_private *np = netdev_priv(dev);
 835        void __iomem *mdio_addr = np->base + MIICtrl + (phy_id<<7) + (location<<2);
 836        int result, boguscnt=1000;
 837        /* ??? Should we add a busy-wait here? */
 838        do {
 839                result = readl(mdio_addr);
 840        } while ((result & 0xC0000000) != 0x80000000 && --boguscnt > 0);
 841        if (boguscnt == 0)
 842                return 0;
 843        if ((result & 0xffff) == 0xffff)
 844                return 0;
 845        return result & 0xffff;
 846}
 847
 848
 849static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
 850{
 851        struct netdev_private *np = netdev_priv(dev);
 852        void __iomem *mdio_addr = np->base + MIICtrl + (phy_id<<7) + (location<<2);
 853        writel(value, mdio_addr);
 854        /* The busy-wait will occur before a read. */
 855}
 856
 857
 858static int netdev_open(struct net_device *dev)
 859{
 860        const struct firmware *fw_rx, *fw_tx;
 861        const __be32 *fw_rx_data, *fw_tx_data;
 862        struct netdev_private *np = netdev_priv(dev);
 863        void __iomem *ioaddr = np->base;
 864        const int irq = np->pci_dev->irq;
 865        int i, retval;
 866        size_t tx_size, rx_size;
 867        size_t tx_done_q_size, rx_done_q_size, tx_ring_size, rx_ring_size;
 868
 869        /* Do we ever need to reset the chip??? */
 870
 871        retval = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
 872        if (retval)
 873                return retval;
 874
 875        /* Disable the Rx and Tx, and reset the chip. */
 876        writel(0, ioaddr + GenCtrl);
 877        writel(1, ioaddr + PCIDeviceConfig);
 878        if (debug > 1)
 879                printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
 880                       dev->name, irq);
 881
 882        /* Allocate the various queues. */
 883        if (!np->queue_mem) {
 884                tx_done_q_size = ((sizeof(struct tx_done_desc) * DONE_Q_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN;
 885                rx_done_q_size = ((sizeof(rx_done_desc) * DONE_Q_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN;
 886                tx_ring_size = ((sizeof(starfire_tx_desc) * TX_RING_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN;
 887                rx_ring_size = sizeof(struct starfire_rx_desc) * RX_RING_SIZE;
 888                np->queue_mem_size = tx_done_q_size + rx_done_q_size + tx_ring_size + rx_ring_size;
 889                np->queue_mem = dma_alloc_coherent(&np->pci_dev->dev,
 890                                                   np->queue_mem_size,
 891                                                   &np->queue_mem_dma, GFP_ATOMIC);
 892                if (np->queue_mem == NULL) {
 893                        free_irq(irq, dev);
 894                        return -ENOMEM;
 895                }
 896
 897                np->tx_done_q     = np->queue_mem;
 898                np->tx_done_q_dma = np->queue_mem_dma;
 899                np->rx_done_q     = (void *) np->tx_done_q + tx_done_q_size;
 900                np->rx_done_q_dma = np->tx_done_q_dma + tx_done_q_size;
 901                np->tx_ring       = (void *) np->rx_done_q + rx_done_q_size;
 902                np->tx_ring_dma   = np->rx_done_q_dma + rx_done_q_size;
 903                np->rx_ring       = (void *) np->tx_ring + tx_ring_size;
 904                np->rx_ring_dma   = np->tx_ring_dma + tx_ring_size;
 905        }
 906
 907        /* Start with no carrier, it gets adjusted later */
 908        netif_carrier_off(dev);
 909        init_ring(dev);
 910        /* Set the size of the Rx buffers. */
 911        writel((np->rx_buf_sz << RxBufferLenShift) |
 912               (0 << RxMinDescrThreshShift) |
 913               RxPrefetchMode | RxVariableQ |
 914               RX_Q_ENTRIES |
 915               RX_DESC_Q_ADDR_SIZE | RX_DESC_ADDR_SIZE |
 916               RxDescSpace4,
 917               ioaddr + RxDescQCtrl);
 918
 919        /* Set up the Rx DMA controller. */
 920        writel(RxChecksumIgnore |
 921               (0 << RxEarlyIntThreshShift) |
 922               (6 << RxHighPrioThreshShift) |
 923               ((DMA_BURST_SIZE / 32) << RxBurstSizeShift),
 924               ioaddr + RxDMACtrl);
 925
 926        /* Set Tx descriptor */
 927        writel((2 << TxHiPriFIFOThreshShift) |
 928               (0 << TxPadLenShift) |
 929               ((DMA_BURST_SIZE / 32) << TxDMABurstSizeShift) |
 930               TX_DESC_Q_ADDR_SIZE |
 931               TX_DESC_SPACING | TX_DESC_TYPE,
 932               ioaddr + TxDescCtrl);
 933
 934        writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + RxDescQHiAddr);
 935        writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + TxRingHiAddr);
 936        writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + CompletionHiAddr);
 937        writel(np->rx_ring_dma, ioaddr + RxDescQAddr);
 938        writel(np->tx_ring_dma, ioaddr + TxRingPtr);
 939
 940        writel(np->tx_done_q_dma, ioaddr + TxCompletionAddr);
 941        writel(np->rx_done_q_dma |
 942               RxComplType |
 943               (0 << RxComplThreshShift),
 944               ioaddr + RxCompletionAddr);
 945
 946        if (debug > 1)
 947                printk(KERN_DEBUG "%s: Filling in the station address.\n", dev->name);
 948
 949        /* Fill both the Tx SA register and the Rx perfect filter. */
 950        for (i = 0; i < 6; i++)
 951                writeb(dev->dev_addr[i], ioaddr + TxStationAddr + 5 - i);
 952        /* The first entry is special because it bypasses the VLAN filter.
 953           Don't use it. */
 954        writew(0, ioaddr + PerfFilterTable);
 955        writew(0, ioaddr + PerfFilterTable + 4);
 956        writew(0, ioaddr + PerfFilterTable + 8);
 957        for (i = 1; i < 16; i++) {
 958                __be16 *eaddrs = (__be16 *)dev->dev_addr;
 959                void __iomem *setup_frm = ioaddr + PerfFilterTable + i * 16;
 960                writew(be16_to_cpu(eaddrs[2]), setup_frm); setup_frm += 4;
 961                writew(be16_to_cpu(eaddrs[1]), setup_frm); setup_frm += 4;
 962                writew(be16_to_cpu(eaddrs[0]), setup_frm); setup_frm += 8;
 963        }
 964
 965        /* Initialize other registers. */
 966        /* Configure the PCI bus bursts and FIFO thresholds. */
 967        np->tx_mode = TxFlowEnable|RxFlowEnable|PadEnable;      /* modified when link is up. */
 968        writel(MiiSoftReset | np->tx_mode, ioaddr + TxMode);
 969        udelay(1000);
 970        writel(np->tx_mode, ioaddr + TxMode);
 971        np->tx_threshold = 4;
 972        writel(np->tx_threshold, ioaddr + TxThreshold);
 973
 974        writel(np->intr_timer_ctrl, ioaddr + IntrTimerCtrl);
 975
 976        napi_enable(&np->napi);
 977
 978        netif_start_queue(dev);
 979
 980        if (debug > 1)
 981                printk(KERN_DEBUG "%s: Setting the Rx and Tx modes.\n", dev->name);
 982        set_rx_mode(dev);
 983
 984        np->mii_if.advertising = mdio_read(dev, np->phys[0], MII_ADVERTISE);
 985        check_duplex(dev);
 986
 987        /* Enable GPIO interrupts on link change */
 988        writel(0x0f00ff00, ioaddr + GPIOCtrl);
 989
 990        /* Set the interrupt mask */
 991        writel(IntrRxDone | IntrRxEmpty | IntrDMAErr |
 992               IntrTxDMADone | IntrStatsMax | IntrLinkChange |
 993               IntrRxGFPDead | IntrNoTxCsum | IntrTxBadID,
 994               ioaddr + IntrEnable);
 995        /* Enable PCI interrupts. */
 996        writel(0x00800000 | readl(ioaddr + PCIDeviceConfig),
 997               ioaddr + PCIDeviceConfig);
 998
 999#ifdef VLAN_SUPPORT
1000        /* Set VLAN type to 802.1q */
1001        writel(ETH_P_8021Q, ioaddr + VlanType);
1002#endif /* VLAN_SUPPORT */
1003
1004        retval = request_firmware(&fw_rx, FIRMWARE_RX, &np->pci_dev->dev);
1005        if (retval) {
1006                printk(KERN_ERR "starfire: Failed to load firmware \"%s\"\n",
1007                       FIRMWARE_RX);
1008                goto out_init;
1009        }
1010        if (fw_rx->size % 4) {
1011                printk(KERN_ERR "starfire: bogus length %zu in \"%s\"\n",
1012                       fw_rx->size, FIRMWARE_RX);
1013                retval = -EINVAL;
1014                goto out_rx;
1015        }
1016        retval = request_firmware(&fw_tx, FIRMWARE_TX, &np->pci_dev->dev);
1017        if (retval) {
1018                printk(KERN_ERR "starfire: Failed to load firmware \"%s\"\n",
1019                       FIRMWARE_TX);
1020                goto out_rx;
1021        }
1022        if (fw_tx->size % 4) {
1023                printk(KERN_ERR "starfire: bogus length %zu in \"%s\"\n",
1024                       fw_tx->size, FIRMWARE_TX);
1025                retval = -EINVAL;
1026                goto out_tx;
1027        }
1028        fw_rx_data = (const __be32 *)&fw_rx->data[0];
1029        fw_tx_data = (const __be32 *)&fw_tx->data[0];
1030        rx_size = fw_rx->size / 4;
1031        tx_size = fw_tx->size / 4;
1032
1033        /* Load Rx/Tx firmware into the frame processors */
1034        for (i = 0; i < rx_size; i++)
1035                writel(be32_to_cpup(&fw_rx_data[i]), ioaddr + RxGfpMem + i * 4);
1036        for (i = 0; i < tx_size; i++)
1037                writel(be32_to_cpup(&fw_tx_data[i]), ioaddr + TxGfpMem + i * 4);
1038        if (enable_hw_cksum)
1039                /* Enable the Rx and Tx units, and the Rx/Tx frame processors. */
1040                writel(TxEnable|TxGFPEnable|RxEnable|RxGFPEnable, ioaddr + GenCtrl);
1041        else
1042                /* Enable the Rx and Tx units only. */
1043                writel(TxEnable|RxEnable, ioaddr + GenCtrl);
1044
1045        if (debug > 1)
1046                printk(KERN_DEBUG "%s: Done netdev_open().\n",
1047                       dev->name);
1048
1049out_tx:
1050        release_firmware(fw_tx);
1051out_rx:
1052        release_firmware(fw_rx);
1053out_init:
1054        if (retval)
1055                netdev_close(dev);
1056        return retval;
1057}
1058
1059
1060static void check_duplex(struct net_device *dev)
1061{
1062        struct netdev_private *np = netdev_priv(dev);
1063        u16 reg0;
1064        int silly_count = 1000;
1065
1066        mdio_write(dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising);
1067        mdio_write(dev, np->phys[0], MII_BMCR, BMCR_RESET);
1068        udelay(500);
1069        while (--silly_count && mdio_read(dev, np->phys[0], MII_BMCR) & BMCR_RESET)
1070                /* do nothing */;
1071        if (!silly_count) {
1072                printk("%s: MII reset failed!\n", dev->name);
1073                return;
1074        }
1075
1076        reg0 = mdio_read(dev, np->phys[0], MII_BMCR);
1077
1078        if (!np->mii_if.force_media) {
1079                reg0 |= BMCR_ANENABLE | BMCR_ANRESTART;
1080        } else {
1081                reg0 &= ~(BMCR_ANENABLE | BMCR_ANRESTART);
1082                if (np->speed100)
1083                        reg0 |= BMCR_SPEED100;
1084                if (np->mii_if.full_duplex)
1085                        reg0 |= BMCR_FULLDPLX;
1086                printk(KERN_DEBUG "%s: Link forced to %sMbit %s-duplex\n",
1087                       dev->name,
1088                       np->speed100 ? "100" : "10",
1089                       np->mii_if.full_duplex ? "full" : "half");
1090        }
1091        mdio_write(dev, np->phys[0], MII_BMCR, reg0);
1092}
1093
1094
1095static void tx_timeout(struct net_device *dev, unsigned int txqueue)
1096{
1097        struct netdev_private *np = netdev_priv(dev);
1098        void __iomem *ioaddr = np->base;
1099        int old_debug;
1100
1101        printk(KERN_WARNING "%s: Transmit timed out, status %#8.8x, "
1102               "resetting...\n", dev->name, (int) readl(ioaddr + IntrStatus));
1103
1104        /* Perhaps we should reinitialize the hardware here. */
1105
1106        /*
1107         * Stop and restart the interface.
1108         * Cheat and increase the debug level temporarily.
1109         */
1110        old_debug = debug;
1111        debug = 2;
1112        netdev_close(dev);
1113        netdev_open(dev);
1114        debug = old_debug;
1115
1116        /* Trigger an immediate transmit demand. */
1117
1118        netif_trans_update(dev); /* prevent tx timeout */
1119        dev->stats.tx_errors++;
1120        netif_wake_queue(dev);
1121}
1122
1123
1124/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1125static void init_ring(struct net_device *dev)
1126{
1127        struct netdev_private *np = netdev_priv(dev);
1128        int i;
1129
1130        np->cur_rx = np->cur_tx = np->reap_tx = 0;
1131        np->dirty_rx = np->dirty_tx = np->rx_done = np->tx_done = 0;
1132
1133        np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1134
1135        /* Fill in the Rx buffers.  Handle allocation failure gracefully. */
1136        for (i = 0; i < RX_RING_SIZE; i++) {
1137                struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz);
1138                np->rx_info[i].skb = skb;
1139                if (skb == NULL)
1140                        break;
1141                np->rx_info[i].mapping = dma_map_single(&np->pci_dev->dev,
1142                                                        skb->data,
1143                                                        np->rx_buf_sz,
1144                                                        DMA_FROM_DEVICE);
1145                if (dma_mapping_error(&np->pci_dev->dev, np->rx_info[i].mapping)) {
1146                        dev_kfree_skb(skb);
1147                        np->rx_info[i].skb = NULL;
1148                        break;
1149                }
1150                /* Grrr, we cannot offset to correctly align the IP header. */
1151                np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid);
1152        }
1153        writew(i - 1, np->base + RxDescQIdx);
1154        np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1155
1156        /* Clear the remainder of the Rx buffer ring. */
1157        for (  ; i < RX_RING_SIZE; i++) {
1158                np->rx_ring[i].rxaddr = 0;
1159                np->rx_info[i].skb = NULL;
1160                np->rx_info[i].mapping = 0;
1161        }
1162        /* Mark the last entry as wrapping the ring. */
1163        np->rx_ring[RX_RING_SIZE - 1].rxaddr |= cpu_to_dma(RxDescEndRing);
1164
1165        /* Clear the completion rings. */
1166        for (i = 0; i < DONE_Q_SIZE; i++) {
1167                np->rx_done_q[i].status = 0;
1168                np->tx_done_q[i].status = 0;
1169        }
1170
1171        for (i = 0; i < TX_RING_SIZE; i++)
1172                memset(&np->tx_info[i], 0, sizeof(np->tx_info[i]));
1173}
1174
1175
1176static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
1177{
1178        struct netdev_private *np = netdev_priv(dev);
1179        unsigned int entry;
1180        unsigned int prev_tx;
1181        u32 status;
1182        int i, j;
1183
1184        /*
1185         * be cautious here, wrapping the queue has weird semantics
1186         * and we may not have enough slots even when it seems we do.
1187         */
1188        if ((np->cur_tx - np->dirty_tx) + skb_num_frags(skb) * 2 > TX_RING_SIZE) {
1189                netif_stop_queue(dev);
1190                return NETDEV_TX_BUSY;
1191        }
1192
1193#if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
1194        if (skb->ip_summed == CHECKSUM_PARTIAL) {
1195                if (skb_padto(skb, (skb->len + PADDING_MASK) & ~PADDING_MASK))
1196                        return NETDEV_TX_OK;
1197        }
1198#endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */
1199
1200        prev_tx = np->cur_tx;
1201        entry = np->cur_tx % TX_RING_SIZE;
1202        for (i = 0; i < skb_num_frags(skb); i++) {
1203                int wrap_ring = 0;
1204                status = TxDescID;
1205
1206                if (i == 0) {
1207                        np->tx_info[entry].skb = skb;
1208                        status |= TxCRCEn;
1209                        if (entry >= TX_RING_SIZE - skb_num_frags(skb)) {
1210                                status |= TxRingWrap;
1211                                wrap_ring = 1;
1212                        }
1213                        if (np->reap_tx) {
1214                                status |= TxDescIntr;
1215                                np->reap_tx = 0;
1216                        }
1217                        if (skb->ip_summed == CHECKSUM_PARTIAL) {
1218                                status |= TxCalTCP;
1219                                dev->stats.tx_compressed++;
1220                        }
1221                        status |= skb_first_frag_len(skb) | (skb_num_frags(skb) << 16);
1222
1223                        np->tx_info[entry].mapping =
1224                                dma_map_single(&np->pci_dev->dev, skb->data,
1225                                               skb_first_frag_len(skb),
1226                                               DMA_TO_DEVICE);
1227                } else {
1228                        const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[i - 1];
1229                        status |= skb_frag_size(this_frag);
1230                        np->tx_info[entry].mapping =
1231                                dma_map_single(&np->pci_dev->dev,
1232                                               skb_frag_address(this_frag),
1233                                               skb_frag_size(this_frag),
1234                                               DMA_TO_DEVICE);
1235                }
1236                if (dma_mapping_error(&np->pci_dev->dev, np->tx_info[entry].mapping)) {
1237                        dev->stats.tx_dropped++;
1238                        goto err_out;
1239                }
1240
1241                np->tx_ring[entry].addr = cpu_to_dma(np->tx_info[entry].mapping);
1242                np->tx_ring[entry].status = cpu_to_le32(status);
1243                if (debug > 3)
1244                        printk(KERN_DEBUG "%s: Tx #%d/#%d slot %d status %#8.8x.\n",
1245                               dev->name, np->cur_tx, np->dirty_tx,
1246                               entry, status);
1247                if (wrap_ring) {
1248                        np->tx_info[entry].used_slots = TX_RING_SIZE - entry;
1249                        np->cur_tx += np->tx_info[entry].used_slots;
1250                        entry = 0;
1251                } else {
1252                        np->tx_info[entry].used_slots = 1;
1253                        np->cur_tx += np->tx_info[entry].used_slots;
1254                        entry++;
1255                }
1256                /* scavenge the tx descriptors twice per TX_RING_SIZE */
1257                if (np->cur_tx % (TX_RING_SIZE / 2) == 0)
1258                        np->reap_tx = 1;
1259        }
1260
1261        /* Non-x86: explicitly flush descriptor cache lines here. */
1262        /* Ensure all descriptors are written back before the transmit is
1263           initiated. - Jes */
1264        wmb();
1265
1266        /* Update the producer index. */
1267        writel(entry * (sizeof(starfire_tx_desc) / 8), np->base + TxProducerIdx);
1268
1269        /* 4 is arbitrary, but should be ok */
1270        if ((np->cur_tx - np->dirty_tx) + 4 > TX_RING_SIZE)
1271                netif_stop_queue(dev);
1272
1273        return NETDEV_TX_OK;
1274
1275err_out:
1276        entry = prev_tx % TX_RING_SIZE;
1277        np->tx_info[entry].skb = NULL;
1278        if (i > 0) {
1279                dma_unmap_single(&np->pci_dev->dev,
1280                                 np->tx_info[entry].mapping,
1281                                 skb_first_frag_len(skb), DMA_TO_DEVICE);
1282                np->tx_info[entry].mapping = 0;
1283                entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE;
1284                for (j = 1; j < i; j++) {
1285                        dma_unmap_single(&np->pci_dev->dev,
1286                                         np->tx_info[entry].mapping,
1287                                         skb_frag_size(&skb_shinfo(skb)->frags[j - 1]),
1288                                         DMA_TO_DEVICE);
1289                        entry++;
1290                }
1291        }
1292        dev_kfree_skb_any(skb);
1293        np->cur_tx = prev_tx;
1294        return NETDEV_TX_OK;
1295}
1296
1297/* The interrupt handler does all of the Rx thread work and cleans up
1298   after the Tx thread. */
1299static irqreturn_t intr_handler(int irq, void *dev_instance)
1300{
1301        struct net_device *dev = dev_instance;
1302        struct netdev_private *np = netdev_priv(dev);
1303        void __iomem *ioaddr = np->base;
1304        int boguscnt = max_interrupt_work;
1305        int consumer;
1306        int tx_status;
1307        int handled = 0;
1308
1309        do {
1310                u32 intr_status = readl(ioaddr + IntrClear);
1311
1312                if (debug > 4)
1313                        printk(KERN_DEBUG "%s: Interrupt status %#8.8x.\n",
1314                               dev->name, intr_status);
1315
1316                if (intr_status == 0 || intr_status == (u32) -1)
1317                        break;
1318
1319                handled = 1;
1320
1321                if (intr_status & (IntrRxDone | IntrRxEmpty)) {
1322                        u32 enable;
1323
1324                        if (likely(napi_schedule_prep(&np->napi))) {
1325                                __napi_schedule(&np->napi);
1326                                enable = readl(ioaddr + IntrEnable);
1327                                enable &= ~(IntrRxDone | IntrRxEmpty);
1328                                writel(enable, ioaddr + IntrEnable);
1329                                /* flush PCI posting buffers */
1330                                readl(ioaddr + IntrEnable);
1331                        } else {
1332                                /* Paranoia check */
1333                                enable = readl(ioaddr + IntrEnable);
1334                                if (enable & (IntrRxDone | IntrRxEmpty)) {
1335                                        printk(KERN_INFO
1336                                               "%s: interrupt while in poll!\n",
1337                                               dev->name);
1338                                        enable &= ~(IntrRxDone | IntrRxEmpty);
1339                                        writel(enable, ioaddr + IntrEnable);
1340                                }
1341                        }
1342                }
1343
1344                /* Scavenge the skbuff list based on the Tx-done queue.
1345                   There are redundant checks here that may be cleaned up
1346                   after the driver has proven to be reliable. */
1347                consumer = readl(ioaddr + TxConsumerIdx);
1348                if (debug > 3)
1349                        printk(KERN_DEBUG "%s: Tx Consumer index is %d.\n",
1350                               dev->name, consumer);
1351
1352                while ((tx_status = le32_to_cpu(np->tx_done_q[np->tx_done].status)) != 0) {
1353                        if (debug > 3)
1354                                printk(KERN_DEBUG "%s: Tx completion #%d entry %d is %#8.8x.\n",
1355                                       dev->name, np->dirty_tx, np->tx_done, tx_status);
1356                        if ((tx_status & 0xe0000000) == 0xa0000000) {
1357                                dev->stats.tx_packets++;
1358                        } else if ((tx_status & 0xe0000000) == 0x80000000) {
1359                                u16 entry = (tx_status & 0x7fff) / sizeof(starfire_tx_desc);
1360                                struct sk_buff *skb = np->tx_info[entry].skb;
1361                                np->tx_info[entry].skb = NULL;
1362                                dma_unmap_single(&np->pci_dev->dev,
1363                                                 np->tx_info[entry].mapping,
1364                                                 skb_first_frag_len(skb),
1365                                                 DMA_TO_DEVICE);
1366                                np->tx_info[entry].mapping = 0;
1367                                np->dirty_tx += np->tx_info[entry].used_slots;
1368                                entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE;
1369                                {
1370                                        int i;
1371                                        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1372                                                dma_unmap_single(&np->pci_dev->dev,
1373                                                                 np->tx_info[entry].mapping,
1374                                                                 skb_frag_size(&skb_shinfo(skb)->frags[i]),
1375                                                                 DMA_TO_DEVICE);
1376                                                np->dirty_tx++;
1377                                                entry++;
1378                                        }
1379                                }
1380
1381                                dev_consume_skb_irq(skb);
1382                        }
1383                        np->tx_done_q[np->tx_done].status = 0;
1384                        np->tx_done = (np->tx_done + 1) % DONE_Q_SIZE;
1385                }
1386                writew(np->tx_done, ioaddr + CompletionQConsumerIdx + 2);
1387
1388                if (netif_queue_stopped(dev) &&
1389                    (np->cur_tx - np->dirty_tx + 4 < TX_RING_SIZE)) {
1390                        /* The ring is no longer full, wake the queue. */
1391                        netif_wake_queue(dev);
1392                }
1393
1394                /* Stats overflow */
1395                if (intr_status & IntrStatsMax)
1396                        get_stats(dev);
1397
1398                /* Media change interrupt. */
1399                if (intr_status & IntrLinkChange)
1400                        netdev_media_change(dev);
1401
1402                /* Abnormal error summary/uncommon events handlers. */
1403                if (intr_status & IntrAbnormalSummary)
1404                        netdev_error(dev, intr_status);
1405
1406                if (--boguscnt < 0) {
1407                        if (debug > 1)
1408                                printk(KERN_WARNING "%s: Too much work at interrupt, "
1409                                       "status=%#8.8x.\n",
1410                                       dev->name, intr_status);
1411                        break;
1412                }
1413        } while (1);
1414
1415        if (debug > 4)
1416                printk(KERN_DEBUG "%s: exiting interrupt, status=%#8.8x.\n",
1417                       dev->name, (int) readl(ioaddr + IntrStatus));
1418        return IRQ_RETVAL(handled);
1419}
1420
1421
1422/*
1423 * This routine is logically part of the interrupt/poll handler, but separated
1424 * for clarity and better register allocation.
1425 */
1426static int __netdev_rx(struct net_device *dev, int *quota)
1427{
1428        struct netdev_private *np = netdev_priv(dev);
1429        u32 desc_status;
1430        int retcode = 0;
1431
1432        /* If EOP is set on the next entry, it's a new packet. Send it up. */
1433        while ((desc_status = le32_to_cpu(np->rx_done_q[np->rx_done].status)) != 0) {
1434                struct sk_buff *skb;
1435                u16 pkt_len;
1436                int entry;
1437                rx_done_desc *desc = &np->rx_done_q[np->rx_done];
1438
1439                if (debug > 4)
1440                        printk(KERN_DEBUG "  netdev_rx() status of %d was %#8.8x.\n", np->rx_done, desc_status);
1441                if (!(desc_status & RxOK)) {
1442                        /* There was an error. */
1443                        if (debug > 2)
1444                                printk(KERN_DEBUG "  netdev_rx() Rx error was %#8.8x.\n", desc_status);
1445                        dev->stats.rx_errors++;
1446                        if (desc_status & RxFIFOErr)
1447                                dev->stats.rx_fifo_errors++;
1448                        goto next_rx;
1449                }
1450
1451                if (*quota <= 0) {      /* out of rx quota */
1452                        retcode = 1;
1453                        goto out;
1454                }
1455                (*quota)--;
1456
1457                pkt_len = desc_status;  /* Implicitly Truncate */
1458                entry = (desc_status >> 16) & 0x7ff;
1459
1460                if (debug > 4)
1461                        printk(KERN_DEBUG "  netdev_rx() normal Rx pkt length %d, quota %d.\n", pkt_len, *quota);
1462                /* Check if the packet is long enough to accept without copying
1463                   to a minimally-sized skbuff. */
1464                if (pkt_len < rx_copybreak &&
1465                    (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
1466                        skb_reserve(skb, 2);    /* 16 byte align the IP header */
1467                        dma_sync_single_for_cpu(&np->pci_dev->dev,
1468                                                np->rx_info[entry].mapping,
1469                                                pkt_len, DMA_FROM_DEVICE);
1470                        skb_copy_to_linear_data(skb, np->rx_info[entry].skb->data, pkt_len);
1471                        dma_sync_single_for_device(&np->pci_dev->dev,
1472                                                   np->rx_info[entry].mapping,
1473                                                   pkt_len, DMA_FROM_DEVICE);
1474                        skb_put(skb, pkt_len);
1475                } else {
1476                        dma_unmap_single(&np->pci_dev->dev,
1477                                         np->rx_info[entry].mapping,
1478                                         np->rx_buf_sz, DMA_FROM_DEVICE);
1479                        skb = np->rx_info[entry].skb;
1480                        skb_put(skb, pkt_len);
1481                        np->rx_info[entry].skb = NULL;
1482                        np->rx_info[entry].mapping = 0;
1483                }
1484#ifndef final_version                   /* Remove after testing. */
1485                /* You will want this info for the initial debug. */
1486                if (debug > 5) {
1487                        printk(KERN_DEBUG "  Rx data %pM %pM %2.2x%2.2x.\n",
1488                               skb->data, skb->data + 6,
1489                               skb->data[12], skb->data[13]);
1490                }
1491#endif
1492
1493                skb->protocol = eth_type_trans(skb, dev);
1494#ifdef VLAN_SUPPORT
1495                if (debug > 4)
1496                        printk(KERN_DEBUG "  netdev_rx() status2 of %d was %#4.4x.\n", np->rx_done, le16_to_cpu(desc->status2));
1497#endif
1498                if (le16_to_cpu(desc->status2) & 0x0100) {
1499                        skb->ip_summed = CHECKSUM_UNNECESSARY;
1500                        dev->stats.rx_compressed++;
1501                }
1502                /*
1503                 * This feature doesn't seem to be working, at least
1504                 * with the two firmware versions I have. If the GFP sees
1505                 * an IP fragment, it either ignores it completely, or reports
1506                 * "bad checksum" on it.
1507                 *
1508                 * Maybe I missed something -- corrections are welcome.
1509                 * Until then, the printk stays. :-) -Ion
1510                 */
1511                else if (le16_to_cpu(desc->status2) & 0x0040) {
1512                        skb->ip_summed = CHECKSUM_COMPLETE;
1513                        skb->csum = le16_to_cpu(desc->csum);
1514                        printk(KERN_DEBUG "%s: checksum_hw, status2 = %#x\n", dev->name, le16_to_cpu(desc->status2));
1515                }
1516#ifdef VLAN_SUPPORT
1517                if (le16_to_cpu(desc->status2) & 0x0200) {
1518                        u16 vlid = le16_to_cpu(desc->vlanid);
1519
1520                        if (debug > 4) {
1521                                printk(KERN_DEBUG "  netdev_rx() vlanid = %d\n",
1522                                       vlid);
1523                        }
1524                        __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlid);
1525                }
1526#endif /* VLAN_SUPPORT */
1527                netif_receive_skb(skb);
1528                dev->stats.rx_packets++;
1529
1530        next_rx:
1531                np->cur_rx++;
1532                desc->status = 0;
1533                np->rx_done = (np->rx_done + 1) % DONE_Q_SIZE;
1534        }
1535
1536        if (*quota == 0) {      /* out of rx quota */
1537                retcode = 1;
1538                goto out;
1539        }
1540        writew(np->rx_done, np->base + CompletionQConsumerIdx);
1541
1542 out:
1543        refill_rx_ring(dev);
1544        if (debug > 5)
1545                printk(KERN_DEBUG "  exiting netdev_rx(): %d, status of %d was %#8.8x.\n",
1546                       retcode, np->rx_done, desc_status);
1547        return retcode;
1548}
1549
1550static int netdev_poll(struct napi_struct *napi, int budget)
1551{
1552        struct netdev_private *np = container_of(napi, struct netdev_private, napi);
1553        struct net_device *dev = np->dev;
1554        u32 intr_status;
1555        void __iomem *ioaddr = np->base;
1556        int quota = budget;
1557
1558        do {
1559                writel(IntrRxDone | IntrRxEmpty, ioaddr + IntrClear);
1560
1561                if (__netdev_rx(dev, &quota))
1562                        goto out;
1563
1564                intr_status = readl(ioaddr + IntrStatus);
1565        } while (intr_status & (IntrRxDone | IntrRxEmpty));
1566
1567        napi_complete(napi);
1568        intr_status = readl(ioaddr + IntrEnable);
1569        intr_status |= IntrRxDone | IntrRxEmpty;
1570        writel(intr_status, ioaddr + IntrEnable);
1571
1572 out:
1573        if (debug > 5)
1574                printk(KERN_DEBUG "  exiting netdev_poll(): %d.\n",
1575                       budget - quota);
1576
1577        /* Restart Rx engine if stopped. */
1578        return budget - quota;
1579}
1580
1581static void refill_rx_ring(struct net_device *dev)
1582{
1583        struct netdev_private *np = netdev_priv(dev);
1584        struct sk_buff *skb;
1585        int entry = -1;
1586
1587        /* Refill the Rx ring buffers. */
1588        for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
1589                entry = np->dirty_rx % RX_RING_SIZE;
1590                if (np->rx_info[entry].skb == NULL) {
1591                        skb = netdev_alloc_skb(dev, np->rx_buf_sz);
1592                        np->rx_info[entry].skb = skb;
1593                        if (skb == NULL)
1594                                break;  /* Better luck next round. */
1595                        np->rx_info[entry].mapping =
1596                                dma_map_single(&np->pci_dev->dev, skb->data,
1597                                               np->rx_buf_sz, DMA_FROM_DEVICE);
1598                        if (dma_mapping_error(&np->pci_dev->dev, np->rx_info[entry].mapping)) {
1599                                dev_kfree_skb(skb);
1600                                np->rx_info[entry].skb = NULL;
1601                                break;
1602                        }
1603                        np->rx_ring[entry].rxaddr =
1604                                cpu_to_dma(np->rx_info[entry].mapping | RxDescValid);
1605                }
1606                if (entry == RX_RING_SIZE - 1)
1607                        np->rx_ring[entry].rxaddr |= cpu_to_dma(RxDescEndRing);
1608        }
1609        if (entry >= 0)
1610                writew(entry, np->base + RxDescQIdx);
1611}
1612
1613
1614static void netdev_media_change(struct net_device *dev)
1615{
1616        struct netdev_private *np = netdev_priv(dev);
1617        void __iomem *ioaddr = np->base;
1618        u16 reg0, reg1, reg4, reg5;
1619        u32 new_tx_mode;
1620        u32 new_intr_timer_ctrl;
1621
1622        /* reset status first */
1623        mdio_read(dev, np->phys[0], MII_BMCR);
1624        mdio_read(dev, np->phys[0], MII_BMSR);
1625
1626        reg0 = mdio_read(dev, np->phys[0], MII_BMCR);
1627        reg1 = mdio_read(dev, np->phys[0], MII_BMSR);
1628
1629        if (reg1 & BMSR_LSTATUS) {
1630                /* link is up */
1631                if (reg0 & BMCR_ANENABLE) {
1632                        /* autonegotiation is enabled */
1633                        reg4 = mdio_read(dev, np->phys[0], MII_ADVERTISE);
1634                        reg5 = mdio_read(dev, np->phys[0], MII_LPA);
1635                        if (reg4 & ADVERTISE_100FULL && reg5 & LPA_100FULL) {
1636                                np->speed100 = 1;
1637                                np->mii_if.full_duplex = 1;
1638                        } else if (reg4 & ADVERTISE_100HALF && reg5 & LPA_100HALF) {
1639                                np->speed100 = 1;
1640                                np->mii_if.full_duplex = 0;
1641                        } else if (reg4 & ADVERTISE_10FULL && reg5 & LPA_10FULL) {
1642                                np->speed100 = 0;
1643                                np->mii_if.full_duplex = 1;
1644                        } else {
1645                                np->speed100 = 0;
1646                                np->mii_if.full_duplex = 0;
1647                        }
1648                } else {
1649                        /* autonegotiation is disabled */
1650                        if (reg0 & BMCR_SPEED100)
1651                                np->speed100 = 1;
1652                        else
1653                                np->speed100 = 0;
1654                        if (reg0 & BMCR_FULLDPLX)
1655                                np->mii_if.full_duplex = 1;
1656                        else
1657                                np->mii_if.full_duplex = 0;
1658                }
1659                netif_carrier_on(dev);
1660                printk(KERN_DEBUG "%s: Link is up, running at %sMbit %s-duplex\n",
1661                       dev->name,
1662                       np->speed100 ? "100" : "10",
1663                       np->mii_if.full_duplex ? "full" : "half");
1664
1665                new_tx_mode = np->tx_mode & ~FullDuplex;        /* duplex setting */
1666                if (np->mii_if.full_duplex)
1667                        new_tx_mode |= FullDuplex;
1668                if (np->tx_mode != new_tx_mode) {
1669                        np->tx_mode = new_tx_mode;
1670                        writel(np->tx_mode | MiiSoftReset, ioaddr + TxMode);
1671                        udelay(1000);
1672                        writel(np->tx_mode, ioaddr + TxMode);
1673                }
1674
1675                new_intr_timer_ctrl = np->intr_timer_ctrl & ~Timer10X;
1676                if (np->speed100)
1677                        new_intr_timer_ctrl |= Timer10X;
1678                if (np->intr_timer_ctrl != new_intr_timer_ctrl) {
1679                        np->intr_timer_ctrl = new_intr_timer_ctrl;
1680                        writel(new_intr_timer_ctrl, ioaddr + IntrTimerCtrl);
1681                }
1682        } else {
1683                netif_carrier_off(dev);
1684                printk(KERN_DEBUG "%s: Link is down\n", dev->name);
1685        }
1686}
1687
1688
1689static void netdev_error(struct net_device *dev, int intr_status)
1690{
1691        struct netdev_private *np = netdev_priv(dev);
1692
1693        /* Came close to underrunning the Tx FIFO, increase threshold. */
1694        if (intr_status & IntrTxDataLow) {
1695                if (np->tx_threshold <= PKT_BUF_SZ / 16) {
1696                        writel(++np->tx_threshold, np->base + TxThreshold);
1697                        printk(KERN_NOTICE "%s: PCI bus congestion, increasing Tx FIFO threshold to %d bytes\n",
1698                               dev->name, np->tx_threshold * 16);
1699                } else
1700                        printk(KERN_WARNING "%s: PCI Tx underflow -- adapter is probably malfunctioning\n", dev->name);
1701        }
1702        if (intr_status & IntrRxGFPDead) {
1703                dev->stats.rx_fifo_errors++;
1704                dev->stats.rx_errors++;
1705        }
1706        if (intr_status & (IntrNoTxCsum | IntrDMAErr)) {
1707                dev->stats.tx_fifo_errors++;
1708                dev->stats.tx_errors++;
1709        }
1710        if ((intr_status & ~(IntrNormalMask | IntrAbnormalSummary | IntrLinkChange | IntrStatsMax | IntrTxDataLow | IntrRxGFPDead | IntrNoTxCsum | IntrPCIPad)) && debug)
1711                printk(KERN_ERR "%s: Something Wicked happened! %#8.8x.\n",
1712                       dev->name, intr_status);
1713}
1714
1715
1716static struct net_device_stats *get_stats(struct net_device *dev)
1717{
1718        struct netdev_private *np = netdev_priv(dev);
1719        void __iomem *ioaddr = np->base;
1720
1721        /* This adapter architecture needs no SMP locks. */
1722        dev->stats.tx_bytes = readl(ioaddr + 0x57010);
1723        dev->stats.rx_bytes = readl(ioaddr + 0x57044);
1724        dev->stats.tx_packets = readl(ioaddr + 0x57000);
1725        dev->stats.tx_aborted_errors =
1726                readl(ioaddr + 0x57024) + readl(ioaddr + 0x57028);
1727        dev->stats.tx_window_errors = readl(ioaddr + 0x57018);
1728        dev->stats.collisions =
1729                readl(ioaddr + 0x57004) + readl(ioaddr + 0x57008);
1730
1731        /* The chip only need report frame silently dropped. */
1732        dev->stats.rx_dropped += readw(ioaddr + RxDMAStatus);
1733        writew(0, ioaddr + RxDMAStatus);
1734        dev->stats.rx_crc_errors = readl(ioaddr + 0x5703C);
1735        dev->stats.rx_frame_errors = readl(ioaddr + 0x57040);
1736        dev->stats.rx_length_errors = readl(ioaddr + 0x57058);
1737        dev->stats.rx_missed_errors = readl(ioaddr + 0x5707C);
1738
1739        return &dev->stats;
1740}
1741
1742#ifdef VLAN_SUPPORT
1743static u32 set_vlan_mode(struct netdev_private *np)
1744{
1745        u32 ret = VlanMode;
1746        u16 vid;
1747        void __iomem *filter_addr = np->base + HashTable + 8;
1748        int vlan_count = 0;
1749
1750        for_each_set_bit(vid, np->active_vlans, VLAN_N_VID) {
1751                if (vlan_count == 32)
1752                        break;
1753                writew(vid, filter_addr);
1754                filter_addr += 16;
1755                vlan_count++;
1756        }
1757        if (vlan_count == 32) {
1758                ret |= PerfectFilterVlan;
1759                while (vlan_count < 32) {
1760                        writew(0, filter_addr);
1761                        filter_addr += 16;
1762                        vlan_count++;
1763                }
1764        }
1765        return ret;
1766}
1767#endif /* VLAN_SUPPORT */
1768
1769static void set_rx_mode(struct net_device *dev)
1770{
1771        struct netdev_private *np = netdev_priv(dev);
1772        void __iomem *ioaddr = np->base;
1773        u32 rx_mode = MinVLANPrio;
1774        struct netdev_hw_addr *ha;
1775        int i;
1776
1777#ifdef VLAN_SUPPORT
1778        rx_mode |= set_vlan_mode(np);
1779#endif /* VLAN_SUPPORT */
1780
1781        if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1782                rx_mode |= AcceptAll;
1783        } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
1784                   (dev->flags & IFF_ALLMULTI)) {
1785                /* Too many to match, or accept all multicasts. */
1786                rx_mode |= AcceptBroadcast|AcceptAllMulticast|PerfectFilter;
1787        } else if (netdev_mc_count(dev) <= 14) {
1788                /* Use the 16 element perfect filter, skip first two entries. */
1789                void __iomem *filter_addr = ioaddr + PerfFilterTable + 2 * 16;
1790                __be16 *eaddrs;
1791                netdev_for_each_mc_addr(ha, dev) {
1792                        eaddrs = (__be16 *) ha->addr;
1793                        writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 4;
1794                        writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4;
1795                        writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 8;
1796                }
1797                eaddrs = (__be16 *)dev->dev_addr;
1798                i = netdev_mc_count(dev) + 2;
1799                while (i++ < 16) {
1800                        writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 4;
1801                        writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4;
1802                        writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 8;
1803                }
1804                rx_mode |= AcceptBroadcast|PerfectFilter;
1805        } else {
1806                /* Must use a multicast hash table. */
1807                void __iomem *filter_addr;
1808                __be16 *eaddrs;
1809                __le16 mc_filter[32] __attribute__ ((aligned(sizeof(long))));   /* Multicast hash filter */
1810
1811                memset(mc_filter, 0, sizeof(mc_filter));
1812                netdev_for_each_mc_addr(ha, dev) {
1813                        /* The chip uses the upper 9 CRC bits
1814                           as index into the hash table */
1815                        int bit_nr = ether_crc_le(ETH_ALEN, ha->addr) >> 23;
1816                        __le32 *fptr = (__le32 *) &mc_filter[(bit_nr >> 4) & ~1];
1817
1818                        *fptr |= cpu_to_le32(1 << (bit_nr & 31));
1819                }
1820                /* Clear the perfect filter list, skip first two entries. */
1821                filter_addr = ioaddr + PerfFilterTable + 2 * 16;
1822                eaddrs = (__be16 *)dev->dev_addr;
1823                for (i = 2; i < 16; i++) {
1824                        writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 4;
1825                        writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4;
1826                        writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 8;
1827                }
1828                for (filter_addr = ioaddr + HashTable, i = 0; i < 32; filter_addr+= 16, i++)
1829                        writew(mc_filter[i], filter_addr);
1830                rx_mode |= AcceptBroadcast|PerfectFilter|HashFilter;
1831        }
1832        writel(rx_mode, ioaddr + RxFilterMode);
1833}
1834
1835static int check_if_running(struct net_device *dev)
1836{
1837        if (!netif_running(dev))
1838                return -EINVAL;
1839        return 0;
1840}
1841
1842static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1843{
1844        struct netdev_private *np = netdev_priv(dev);
1845        strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1846        strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
1847}
1848
1849static int get_link_ksettings(struct net_device *dev,
1850                              struct ethtool_link_ksettings *cmd)
1851{
1852        struct netdev_private *np = netdev_priv(dev);
1853        spin_lock_irq(&np->lock);
1854        mii_ethtool_get_link_ksettings(&np->mii_if, cmd);
1855        spin_unlock_irq(&np->lock);
1856        return 0;
1857}
1858
1859static int set_link_ksettings(struct net_device *dev,
1860                              const struct ethtool_link_ksettings *cmd)
1861{
1862        struct netdev_private *np = netdev_priv(dev);
1863        int res;
1864        spin_lock_irq(&np->lock);
1865        res = mii_ethtool_set_link_ksettings(&np->mii_if, cmd);
1866        spin_unlock_irq(&np->lock);
1867        check_duplex(dev);
1868        return res;
1869}
1870
1871static int nway_reset(struct net_device *dev)
1872{
1873        struct netdev_private *np = netdev_priv(dev);
1874        return mii_nway_restart(&np->mii_if);
1875}
1876
1877static u32 get_link(struct net_device *dev)
1878{
1879        struct netdev_private *np = netdev_priv(dev);
1880        return mii_link_ok(&np->mii_if);
1881}
1882
1883static u32 get_msglevel(struct net_device *dev)
1884{
1885        return debug;
1886}
1887
1888static void set_msglevel(struct net_device *dev, u32 val)
1889{
1890        debug = val;
1891}
1892
1893static const struct ethtool_ops ethtool_ops = {
1894        .begin = check_if_running,
1895        .get_drvinfo = get_drvinfo,
1896        .nway_reset = nway_reset,
1897        .get_link = get_link,
1898        .get_msglevel = get_msglevel,
1899        .set_msglevel = set_msglevel,
1900        .get_link_ksettings = get_link_ksettings,
1901        .set_link_ksettings = set_link_ksettings,
1902};
1903
1904static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1905{
1906        struct netdev_private *np = netdev_priv(dev);
1907        struct mii_ioctl_data *data = if_mii(rq);
1908        int rc;
1909
1910        if (!netif_running(dev))
1911                return -EINVAL;
1912
1913        spin_lock_irq(&np->lock);
1914        rc = generic_mii_ioctl(&np->mii_if, data, cmd, NULL);
1915        spin_unlock_irq(&np->lock);
1916
1917        if ((cmd == SIOCSMIIREG) && (data->phy_id == np->phys[0]))
1918                check_duplex(dev);
1919
1920        return rc;
1921}
1922
1923static int netdev_close(struct net_device *dev)
1924{
1925        struct netdev_private *np = netdev_priv(dev);
1926        void __iomem *ioaddr = np->base;
1927        int i;
1928
1929        netif_stop_queue(dev);
1930
1931        napi_disable(&np->napi);
1932
1933        if (debug > 1) {
1934                printk(KERN_DEBUG "%s: Shutting down ethercard, Intr status %#8.8x.\n",
1935                           dev->name, (int) readl(ioaddr + IntrStatus));
1936                printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
1937                       dev->name, np->cur_tx, np->dirty_tx,
1938                       np->cur_rx, np->dirty_rx);
1939        }
1940
1941        /* Disable interrupts by clearing the interrupt mask. */
1942        writel(0, ioaddr + IntrEnable);
1943
1944        /* Stop the chip's Tx and Rx processes. */
1945        writel(0, ioaddr + GenCtrl);
1946        readl(ioaddr + GenCtrl);
1947
1948        if (debug > 5) {
1949                printk(KERN_DEBUG"  Tx ring at %#llx:\n",
1950                       (long long) np->tx_ring_dma);
1951                for (i = 0; i < 8 /* TX_RING_SIZE is huge! */; i++)
1952                        printk(KERN_DEBUG " #%d desc. %#8.8x %#llx -> %#8.8x.\n",
1953                               i, le32_to_cpu(np->tx_ring[i].status),
1954                               (long long) dma_to_cpu(np->tx_ring[i].addr),
1955                               le32_to_cpu(np->tx_done_q[i].status));
1956                printk(KERN_DEBUG "  Rx ring at %#llx -> %p:\n",
1957                       (long long) np->rx_ring_dma, np->rx_done_q);
1958                if (np->rx_done_q)
1959                        for (i = 0; i < 8 /* RX_RING_SIZE */; i++) {
1960                                printk(KERN_DEBUG " #%d desc. %#llx -> %#8.8x\n",
1961                                       i, (long long) dma_to_cpu(np->rx_ring[i].rxaddr), le32_to_cpu(np->rx_done_q[i].status));
1962                }
1963        }
1964
1965        free_irq(np->pci_dev->irq, dev);
1966
1967        /* Free all the skbuffs in the Rx queue. */
1968        for (i = 0; i < RX_RING_SIZE; i++) {
1969                np->rx_ring[i].rxaddr = cpu_to_dma(0xBADF00D0); /* An invalid address. */
1970                if (np->rx_info[i].skb != NULL) {
1971                        dma_unmap_single(&np->pci_dev->dev,
1972                                         np->rx_info[i].mapping,
1973                                         np->rx_buf_sz, DMA_FROM_DEVICE);
1974                        dev_kfree_skb(np->rx_info[i].skb);
1975                }
1976                np->rx_info[i].skb = NULL;
1977                np->rx_info[i].mapping = 0;
1978        }
1979        for (i = 0; i < TX_RING_SIZE; i++) {
1980                struct sk_buff *skb = np->tx_info[i].skb;
1981                if (skb == NULL)
1982                        continue;
1983                dma_unmap_single(&np->pci_dev->dev, np->tx_info[i].mapping,
1984                                 skb_first_frag_len(skb), DMA_TO_DEVICE);
1985                np->tx_info[i].mapping = 0;
1986                dev_kfree_skb(skb);
1987                np->tx_info[i].skb = NULL;
1988        }
1989
1990        return 0;
1991}
1992
1993static int __maybe_unused starfire_suspend(struct device *dev_d)
1994{
1995        struct net_device *dev = dev_get_drvdata(dev_d);
1996
1997        if (netif_running(dev)) {
1998                netif_device_detach(dev);
1999                netdev_close(dev);
2000        }
2001
2002        return 0;
2003}
2004
2005static int __maybe_unused starfire_resume(struct device *dev_d)
2006{
2007        struct net_device *dev = dev_get_drvdata(dev_d);
2008
2009        if (netif_running(dev)) {
2010                netdev_open(dev);
2011                netif_device_attach(dev);
2012        }
2013
2014        return 0;
2015}
2016
2017static void starfire_remove_one(struct pci_dev *pdev)
2018{
2019        struct net_device *dev = pci_get_drvdata(pdev);
2020        struct netdev_private *np = netdev_priv(dev);
2021
2022        BUG_ON(!dev);
2023
2024        unregister_netdev(dev);
2025
2026        if (np->queue_mem)
2027                dma_free_coherent(&pdev->dev, np->queue_mem_size,
2028                                  np->queue_mem, np->queue_mem_dma);
2029
2030
2031        /* XXX: add wakeup code -- requires firmware for MagicPacket */
2032        pci_set_power_state(pdev, PCI_D3hot);   /* go to sleep in D3 mode */
2033        pci_disable_device(pdev);
2034
2035        iounmap(np->base);
2036        pci_release_regions(pdev);
2037
2038        free_netdev(dev);                       /* Will also free np!! */
2039}
2040
2041static SIMPLE_DEV_PM_OPS(starfire_pm_ops, starfire_suspend, starfire_resume);
2042
2043static struct pci_driver starfire_driver = {
2044        .name           = DRV_NAME,
2045        .probe          = starfire_init_one,
2046        .remove         = starfire_remove_one,
2047        .driver.pm      = &starfire_pm_ops,
2048        .id_table       = starfire_pci_tbl,
2049};
2050
2051
2052static int __init starfire_init (void)
2053{
2054/* when a module, this is printed whether or not devices are found in probe */
2055#ifdef MODULE
2056        printk(KERN_INFO DRV_NAME ": polling (NAPI) enabled\n");
2057#endif
2058
2059        BUILD_BUG_ON(sizeof(dma_addr_t) != sizeof(netdrv_addr_t));
2060
2061        return pci_register_driver(&starfire_driver);
2062}
2063
2064
2065static void __exit starfire_cleanup (void)
2066{
2067        pci_unregister_driver (&starfire_driver);
2068}
2069
2070
2071module_init(starfire_init);
2072module_exit(starfire_cleanup);
2073