linux/drivers/net/typhoon.c
<<
>>
Prefs
   1/* typhoon.c: A Linux Ethernet device driver for 3Com 3CR990 family of NICs */
   2/*
   3        Written 2002-2004 by David Dillow <dave@thedillows.org>
   4        Based on code written 1998-2000 by Donald Becker <becker@scyld.com> and
   5        Linux 2.2.x driver by David P. McLean <davidpmclean@yahoo.com>.
   6
   7        This software may be used and distributed according to the terms of
   8        the GNU General Public License (GPL), incorporated herein by reference.
   9        Drivers based on or derived from this code fall under the GPL and must
  10        retain the authorship, copyright and license notice.  This file is not
  11        a complete program and may only be used when the entire operating
  12        system is licensed under the GPL.
  13
  14        This software is available on a public web site. It may enable
  15        cryptographic capabilities of the 3Com hardware, and may be
  16        exported from the United States under License Exception "TSU"
  17        pursuant to 15 C.F.R. Section 740.13(e).
  18
  19        This work was funded by the National Library of Medicine under
  20        the Department of Energy project number 0274DD06D1 and NLM project
  21        number Y1-LM-2015-01.
  22
  23        This driver is designed for the 3Com 3CR990 Family of cards with the
  24        3XP Processor. It has been tested on x86 and sparc64.
  25
  26        KNOWN ISSUES:
  27        *) Cannot DMA Rx packets to a 2 byte aligned address. Also firmware
  28                issue. Hopefully 3Com will fix it.
  29        *) Waiting for a command response takes 8ms due to non-preemptable
  30                polling. Only significant for getting stats and creating
  31                SAs, but an ugly wart never the less.
  32
  33        TODO:
  34        *) Doesn't do IPSEC offloading. Yet. Keep yer pants on, it's coming.
  35        *) Add more support for ethtool (especially for NIC stats)
  36        *) Allow disabling of RX checksum offloading
  37        *) Fix MAC changing to work while the interface is up
  38                (Need to put commands on the TX ring, which changes
  39                the locking)
  40        *) Add in FCS to {rx,tx}_bytes, since the hardware doesn't. See
  41                http://oss.sgi.com/cgi-bin/mesg.cgi?a=netdev&i=20031215152211.7003fe8e.rddunlap%40osdl.org
  42*/
  43
  44/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
  45 * Setting to > 1518 effectively disables this feature.
  46 */
  47static int rx_copybreak = 200;
  48
  49/* Should we use MMIO or Port IO?
  50 * 0: Port IO
  51 * 1: MMIO
  52 * 2: Try MMIO, fallback to Port IO
  53 */
  54static unsigned int use_mmio = 2;
  55
  56/* end user-configurable values */
  57
  58/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
  59 */
  60static const int multicast_filter_limit = 32;
  61
  62/* Operational parameters that are set at compile time. */
  63
  64/* Keep the ring sizes a power of two for compile efficiency.
  65 * The compiler will convert <unsigned>'%'<2^N> into a bit mask.
  66 * Making the Tx ring too large decreases the effectiveness of channel
  67 * bonding and packet priority.
  68 * There are no ill effects from too-large receive rings.
  69 *
  70 * We don't currently use the Hi Tx ring so, don't make it very big.
  71 *
  72 * Beware that if we start using the Hi Tx ring, we will need to change
  73 * typhoon_num_free_tx() and typhoon_tx_complete() to account for that.
  74 */
  75#define TXHI_ENTRIES            2
  76#define TXLO_ENTRIES            128
  77#define RX_ENTRIES              32
  78#define COMMAND_ENTRIES         16
  79#define RESPONSE_ENTRIES        32
  80
  81#define COMMAND_RING_SIZE       (COMMAND_ENTRIES * sizeof(struct cmd_desc))
  82#define RESPONSE_RING_SIZE      (RESPONSE_ENTRIES * sizeof(struct resp_desc))
  83
  84/* The 3XP will preload and remove 64 entries from the free buffer
  85 * list, and we need one entry to keep the ring from wrapping, so
  86 * to keep this a power of two, we use 128 entries.
  87 */
  88#define RXFREE_ENTRIES          128
  89#define RXENT_ENTRIES           (RXFREE_ENTRIES - 1)
  90
  91/* Operational parameters that usually are not changed. */
  92
  93/* Time in jiffies before concluding the transmitter is hung. */
  94#define TX_TIMEOUT  (2*HZ)
  95
  96#define PKT_BUF_SZ              1536
  97#define FIRMWARE_NAME           "3com/typhoon.bin"
  98
  99#define pr_fmt(fmt)             KBUILD_MODNAME " " fmt
 100
 101#include <linux/module.h>
 102#include <linux/kernel.h>
 103#include <linux/sched.h>
 104#include <linux/string.h>
 105#include <linux/timer.h>
 106#include <linux/errno.h>
 107#include <linux/ioport.h>
 108#include <linux/interrupt.h>
 109#include <linux/pci.h>
 110#include <linux/netdevice.h>
 111#include <linux/etherdevice.h>
 112#include <linux/skbuff.h>
 113#include <linux/mm.h>
 114#include <linux/init.h>
 115#include <linux/delay.h>
 116#include <linux/ethtool.h>
 117#include <linux/if_vlan.h>
 118#include <linux/crc32.h>
 119#include <linux/bitops.h>
 120#include <asm/processor.h>
 121#include <asm/io.h>
 122#include <asm/uaccess.h>
 123#include <linux/in6.h>
 124#include <linux/dma-mapping.h>
 125#include <linux/firmware.h>
 126
 127#include "typhoon.h"
 128
 129MODULE_AUTHOR("David Dillow <dave@thedillows.org>");
 130MODULE_VERSION("1.0");
 131MODULE_LICENSE("GPL");
 132MODULE_FIRMWARE(FIRMWARE_NAME);
 133MODULE_DESCRIPTION("3Com Typhoon Family (3C990, 3CR990, and variants)");
 134MODULE_PARM_DESC(rx_copybreak, "Packets smaller than this are copied and "
 135                               "the buffer given back to the NIC. Default "
 136                               "is 200.");
 137MODULE_PARM_DESC(use_mmio, "Use MMIO (1) or PIO(0) to access the NIC. "
 138                           "Default is to try MMIO and fallback to PIO.");
 139module_param(rx_copybreak, int, 0);
 140module_param(use_mmio, int, 0);
 141
 142#if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32
 143#warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO
 144#undef NETIF_F_TSO
 145#endif
 146
 147#if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS)
 148#error TX ring too small!
 149#endif
 150
 151struct typhoon_card_info {
 152        const char *name;
 153        const int capabilities;
 154};
 155
 156#define TYPHOON_CRYPTO_NONE             0x00
 157#define TYPHOON_CRYPTO_DES              0x01
 158#define TYPHOON_CRYPTO_3DES             0x02
 159#define TYPHOON_CRYPTO_VARIABLE         0x04
 160#define TYPHOON_FIBER                   0x08
 161#define TYPHOON_WAKEUP_NEEDS_RESET      0x10
 162
 163enum typhoon_cards {
 164        TYPHOON_TX = 0, TYPHOON_TX95, TYPHOON_TX97, TYPHOON_SVR,
 165        TYPHOON_SVR95, TYPHOON_SVR97, TYPHOON_TXM, TYPHOON_BSVR,
 166        TYPHOON_FX95, TYPHOON_FX97, TYPHOON_FX95SVR, TYPHOON_FX97SVR,
 167        TYPHOON_FXM,
 168};
 169
 170/* directly indexed by enum typhoon_cards, above */
 171static struct typhoon_card_info typhoon_card_info[] __devinitdata = {
 172        { "3Com Typhoon (3C990-TX)",
 173                TYPHOON_CRYPTO_NONE},
 174        { "3Com Typhoon (3CR990-TX-95)",
 175                TYPHOON_CRYPTO_DES},
 176        { "3Com Typhoon (3CR990-TX-97)",
 177                TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES},
 178        { "3Com Typhoon (3C990SVR)",
 179                TYPHOON_CRYPTO_NONE},
 180        { "3Com Typhoon (3CR990SVR95)",
 181                TYPHOON_CRYPTO_DES},
 182        { "3Com Typhoon (3CR990SVR97)",
 183                TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES},
 184        { "3Com Typhoon2 (3C990B-TX-M)",
 185                TYPHOON_CRYPTO_VARIABLE},
 186        { "3Com Typhoon2 (3C990BSVR)",
 187                TYPHOON_CRYPTO_VARIABLE},
 188        { "3Com Typhoon (3CR990-FX-95)",
 189                TYPHOON_CRYPTO_DES | TYPHOON_FIBER},
 190        { "3Com Typhoon (3CR990-FX-97)",
 191                TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER},
 192        { "3Com Typhoon (3CR990-FX-95 Server)",
 193                TYPHOON_CRYPTO_DES | TYPHOON_FIBER},
 194        { "3Com Typhoon (3CR990-FX-97 Server)",
 195                TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER},
 196        { "3Com Typhoon2 (3C990B-FX-97)",
 197                TYPHOON_CRYPTO_VARIABLE | TYPHOON_FIBER},
 198};
 199
 200/* Notes on the new subsystem numbering scheme:
 201 * bits 0-1 indicate crypto capabilities: (0) variable, (1) DES, or (2) 3DES
 202 * bit 4 indicates if this card has secured firmware (we don't support it)
 203 * bit 8 indicates if this is a (0) copper or (1) fiber card
 204 * bits 12-16 indicate card type: (0) client and (1) server
 205 */
 206static DEFINE_PCI_DEVICE_TABLE(typhoon_pci_tbl) = {
 207        { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990,
 208          PCI_ANY_ID, PCI_ANY_ID, 0, 0,TYPHOON_TX },
 209        { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_95,
 210          PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX95 },
 211        { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_97,
 212          PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX97 },
 213        { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
 214          PCI_ANY_ID, 0x1000, 0, 0, TYPHOON_TXM },
 215        { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
 216          PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FXM },
 217        { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
 218          PCI_ANY_ID, 0x2000, 0, 0, TYPHOON_BSVR },
 219        { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
 220          PCI_ANY_ID, 0x1101, 0, 0, TYPHOON_FX95 },
 221        { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
 222          PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FX97 },
 223        { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
 224          PCI_ANY_ID, 0x2101, 0, 0, TYPHOON_FX95SVR },
 225        { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
 226          PCI_ANY_ID, 0x2102, 0, 0, TYPHOON_FX97SVR },
 227        { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR95,
 228          PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR95 },
 229        { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR97,
 230          PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR97 },
 231        { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR,
 232          PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR },
 233        { 0, }
 234};
 235MODULE_DEVICE_TABLE(pci, typhoon_pci_tbl);
 236
 237/* Define the shared memory area
 238 * Align everything the 3XP will normally be using.
 239 * We'll need to move/align txHi if we start using that ring.
 240 */
 241#define __3xp_aligned   ____cacheline_aligned
 242struct typhoon_shared {
 243        struct typhoon_interface        iface;
 244        struct typhoon_indexes          indexes                 __3xp_aligned;
 245        struct tx_desc                  txLo[TXLO_ENTRIES]      __3xp_aligned;
 246        struct rx_desc                  rxLo[RX_ENTRIES]        __3xp_aligned;
 247        struct rx_desc                  rxHi[RX_ENTRIES]        __3xp_aligned;
 248        struct cmd_desc                 cmd[COMMAND_ENTRIES]    __3xp_aligned;
 249        struct resp_desc                resp[RESPONSE_ENTRIES]  __3xp_aligned;
 250        struct rx_free                  rxBuff[RXFREE_ENTRIES]  __3xp_aligned;
 251        u32                             zeroWord;
 252        struct tx_desc                  txHi[TXHI_ENTRIES];
 253} __packed;
 254
 255struct rxbuff_ent {
 256        struct sk_buff *skb;
 257        dma_addr_t      dma_addr;
 258};
 259
 260struct typhoon {
 261        /* Tx cache line section */
 262        struct transmit_ring    txLoRing        ____cacheline_aligned;
 263        struct pci_dev *        tx_pdev;
 264        void __iomem            *tx_ioaddr;
 265        u32                     txlo_dma_addr;
 266
 267        /* Irq/Rx cache line section */
 268        void __iomem            *ioaddr         ____cacheline_aligned;
 269        struct typhoon_indexes *indexes;
 270        u8                      awaiting_resp;
 271        u8                      duplex;
 272        u8                      speed;
 273        u8                      card_state;
 274        struct basic_ring       rxLoRing;
 275        struct pci_dev *        pdev;
 276        struct net_device *     dev;
 277        struct napi_struct      napi;
 278        struct basic_ring       rxHiRing;
 279        struct basic_ring       rxBuffRing;
 280        struct rxbuff_ent       rxbuffers[RXENT_ENTRIES];
 281
 282        /* general section */
 283        spinlock_t              command_lock    ____cacheline_aligned;
 284        struct basic_ring       cmdRing;
 285        struct basic_ring       respRing;
 286        struct net_device_stats stats;
 287        struct net_device_stats stats_saved;
 288        struct typhoon_shared * shared;
 289        dma_addr_t              shared_dma;
 290        __le16                  xcvr_select;
 291        __le16                  wol_events;
 292        __le32                  offload;
 293
 294        /* unused stuff (future use) */
 295        int                     capabilities;
 296        struct transmit_ring    txHiRing;
 297};
 298
 299enum completion_wait_values {
 300        NoWait = 0, WaitNoSleep, WaitSleep,
 301};
 302
 303/* These are the values for the typhoon.card_state variable.
 304 * These determine where the statistics will come from in get_stats().
 305 * The sleep image does not support the statistics we need.
 306 */
 307enum state_values {
 308        Sleeping = 0, Running,
 309};
 310
 311/* PCI writes are not guaranteed to be posted in order, but outstanding writes
 312 * cannot pass a read, so this forces current writes to post.
 313 */
 314#define typhoon_post_pci_writes(x) \
 315        do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0)
 316
 317/* We'll wait up to six seconds for a reset, and half a second normally.
 318 */
 319#define TYPHOON_UDELAY                  50
 320#define TYPHOON_RESET_TIMEOUT_SLEEP     (6 * HZ)
 321#define TYPHOON_RESET_TIMEOUT_NOSLEEP   ((6 * 1000000) / TYPHOON_UDELAY)
 322#define TYPHOON_WAIT_TIMEOUT            ((1000000 / 2) / TYPHOON_UDELAY)
 323
 324#if defined(NETIF_F_TSO)
 325#define skb_tso_size(x)         (skb_shinfo(x)->gso_size)
 326#define TSO_NUM_DESCRIPTORS     2
 327#define TSO_OFFLOAD_ON          TYPHOON_OFFLOAD_TCP_SEGMENT
 328#else
 329#define NETIF_F_TSO             0
 330#define skb_tso_size(x)         0
 331#define TSO_NUM_DESCRIPTORS     0
 332#define TSO_OFFLOAD_ON          0
 333#endif
 334
 335static inline void
 336typhoon_inc_index(u32 *index, const int count, const int num_entries)
 337{
 338        /* Increment a ring index -- we can use this for all rings execept
 339         * the Rx rings, as they use different size descriptors
 340         * otherwise, everything is the same size as a cmd_desc
 341         */
 342        *index += count * sizeof(struct cmd_desc);
 343        *index %= num_entries * sizeof(struct cmd_desc);
 344}
 345
 346static inline void
 347typhoon_inc_cmd_index(u32 *index, const int count)
 348{
 349        typhoon_inc_index(index, count, COMMAND_ENTRIES);
 350}
 351
 352static inline void
 353typhoon_inc_resp_index(u32 *index, const int count)
 354{
 355        typhoon_inc_index(index, count, RESPONSE_ENTRIES);
 356}
 357
 358static inline void
 359typhoon_inc_rxfree_index(u32 *index, const int count)
 360{
 361        typhoon_inc_index(index, count, RXFREE_ENTRIES);
 362}
 363
 364static inline void
 365typhoon_inc_tx_index(u32 *index, const int count)
 366{
 367        /* if we start using the Hi Tx ring, this needs updateing */
 368        typhoon_inc_index(index, count, TXLO_ENTRIES);
 369}
 370
 371static inline void
 372typhoon_inc_rx_index(u32 *index, const int count)
 373{
 374        /* sizeof(struct rx_desc) != sizeof(struct cmd_desc) */
 375        *index += count * sizeof(struct rx_desc);
 376        *index %= RX_ENTRIES * sizeof(struct rx_desc);
 377}
 378
 379static int
 380typhoon_reset(void __iomem *ioaddr, int wait_type)
 381{
 382        int i, err = 0;
 383        int timeout;
 384
 385        if(wait_type == WaitNoSleep)
 386                timeout = TYPHOON_RESET_TIMEOUT_NOSLEEP;
 387        else
 388                timeout = TYPHOON_RESET_TIMEOUT_SLEEP;
 389
 390        iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
 391        iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
 392
 393        iowrite32(TYPHOON_RESET_ALL, ioaddr + TYPHOON_REG_SOFT_RESET);
 394        typhoon_post_pci_writes(ioaddr);
 395        udelay(1);
 396        iowrite32(TYPHOON_RESET_NONE, ioaddr + TYPHOON_REG_SOFT_RESET);
 397
 398        if(wait_type != NoWait) {
 399                for(i = 0; i < timeout; i++) {
 400                        if(ioread32(ioaddr + TYPHOON_REG_STATUS) ==
 401                           TYPHOON_STATUS_WAITING_FOR_HOST)
 402                                goto out;
 403
 404                        if(wait_type == WaitSleep)
 405                                schedule_timeout_uninterruptible(1);
 406                        else
 407                                udelay(TYPHOON_UDELAY);
 408                }
 409
 410                err = -ETIMEDOUT;
 411        }
 412
 413out:
 414        iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
 415        iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
 416
 417        /* The 3XP seems to need a little extra time to complete the load
 418         * of the sleep image before we can reliably boot it. Failure to
 419         * do this occasionally results in a hung adapter after boot in
 420         * typhoon_init_one() while trying to read the MAC address or
 421         * putting the card to sleep. 3Com's driver waits 5ms, but
 422         * that seems to be overkill. However, if we can sleep, we might
 423         * as well give it that much time. Otherwise, we'll give it 500us,
 424         * which should be enough (I've see it work well at 100us, but still
 425         * saw occasional problems.)
 426         */
 427        if(wait_type == WaitSleep)
 428                msleep(5);
 429        else
 430                udelay(500);
 431        return err;
 432}
 433
 434static int
 435typhoon_wait_status(void __iomem *ioaddr, u32 wait_value)
 436{
 437        int i, err = 0;
 438
 439        for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
 440                if(ioread32(ioaddr + TYPHOON_REG_STATUS) == wait_value)
 441                        goto out;
 442                udelay(TYPHOON_UDELAY);
 443        }
 444
 445        err = -ETIMEDOUT;
 446
 447out:
 448        return err;
 449}
 450
 451static inline void
 452typhoon_media_status(struct net_device *dev, struct resp_desc *resp)
 453{
 454        if(resp->parm1 & TYPHOON_MEDIA_STAT_NO_LINK)
 455                netif_carrier_off(dev);
 456        else
 457                netif_carrier_on(dev);
 458}
 459
 460static inline void
 461typhoon_hello(struct typhoon *tp)
 462{
 463        struct basic_ring *ring = &tp->cmdRing;
 464        struct cmd_desc *cmd;
 465
 466        /* We only get a hello request if we've not sent anything to the
 467         * card in a long while. If the lock is held, then we're in the
 468         * process of issuing a command, so we don't need to respond.
 469         */
 470        if(spin_trylock(&tp->command_lock)) {
 471                cmd = (struct cmd_desc *)(ring->ringBase + ring->lastWrite);
 472                typhoon_inc_cmd_index(&ring->lastWrite, 1);
 473
 474                INIT_COMMAND_NO_RESPONSE(cmd, TYPHOON_CMD_HELLO_RESP);
 475                wmb();
 476                iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
 477                spin_unlock(&tp->command_lock);
 478        }
 479}
 480
 481static int
 482typhoon_process_response(struct typhoon *tp, int resp_size,
 483                                struct resp_desc *resp_save)
 484{
 485        struct typhoon_indexes *indexes = tp->indexes;
 486        struct resp_desc *resp;
 487        u8 *base = tp->respRing.ringBase;
 488        int count, len, wrap_len;
 489        u32 cleared;
 490        u32 ready;
 491
 492        cleared = le32_to_cpu(indexes->respCleared);
 493        ready = le32_to_cpu(indexes->respReady);
 494        while(cleared != ready) {
 495                resp = (struct resp_desc *)(base + cleared);
 496                count = resp->numDesc + 1;
 497                if(resp_save && resp->seqNo) {
 498                        if(count > resp_size) {
 499                                resp_save->flags = TYPHOON_RESP_ERROR;
 500                                goto cleanup;
 501                        }
 502
 503                        wrap_len = 0;
 504                        len = count * sizeof(*resp);
 505                        if(unlikely(cleared + len > RESPONSE_RING_SIZE)) {
 506                                wrap_len = cleared + len - RESPONSE_RING_SIZE;
 507                                len = RESPONSE_RING_SIZE - cleared;
 508                        }
 509
 510                        memcpy(resp_save, resp, len);
 511                        if(unlikely(wrap_len)) {
 512                                resp_save += len / sizeof(*resp);
 513                                memcpy(resp_save, base, wrap_len);
 514                        }
 515
 516                        resp_save = NULL;
 517                } else if(resp->cmd == TYPHOON_CMD_READ_MEDIA_STATUS) {
 518                        typhoon_media_status(tp->dev, resp);
 519                } else if(resp->cmd == TYPHOON_CMD_HELLO_RESP) {
 520                        typhoon_hello(tp);
 521                } else {
 522                        netdev_err(tp->dev,
 523                                   "dumping unexpected response 0x%04x:%d:0x%02x:0x%04x:%08x:%08x\n",
 524                                   le16_to_cpu(resp->cmd),
 525                                   resp->numDesc, resp->flags,
 526                                   le16_to_cpu(resp->parm1),
 527                                   le32_to_cpu(resp->parm2),
 528                                   le32_to_cpu(resp->parm3));
 529                }
 530
 531cleanup:
 532                typhoon_inc_resp_index(&cleared, count);
 533        }
 534
 535        indexes->respCleared = cpu_to_le32(cleared);
 536        wmb();
 537        return resp_save == NULL;
 538}
 539
 540static inline int
 541typhoon_num_free(int lastWrite, int lastRead, int ringSize)
 542{
 543        /* this works for all descriptors but rx_desc, as they are a
 544         * different size than the cmd_desc -- everyone else is the same
 545         */
 546        lastWrite /= sizeof(struct cmd_desc);
 547        lastRead /= sizeof(struct cmd_desc);
 548        return (ringSize + lastRead - lastWrite - 1) % ringSize;
 549}
 550
 551static inline int
 552typhoon_num_free_cmd(struct typhoon *tp)
 553{
 554        int lastWrite = tp->cmdRing.lastWrite;
 555        int cmdCleared = le32_to_cpu(tp->indexes->cmdCleared);
 556
 557        return typhoon_num_free(lastWrite, cmdCleared, COMMAND_ENTRIES);
 558}
 559
 560static inline int
 561typhoon_num_free_resp(struct typhoon *tp)
 562{
 563        int respReady = le32_to_cpu(tp->indexes->respReady);
 564        int respCleared = le32_to_cpu(tp->indexes->respCleared);
 565
 566        return typhoon_num_free(respReady, respCleared, RESPONSE_ENTRIES);
 567}
 568
 569static inline int
 570typhoon_num_free_tx(struct transmit_ring *ring)
 571{
 572        /* if we start using the Hi Tx ring, this needs updating */
 573        return typhoon_num_free(ring->lastWrite, ring->lastRead, TXLO_ENTRIES);
 574}
 575
 576static int
 577typhoon_issue_command(struct typhoon *tp, int num_cmd, struct cmd_desc *cmd,
 578                      int num_resp, struct resp_desc *resp)
 579{
 580        struct typhoon_indexes *indexes = tp->indexes;
 581        struct basic_ring *ring = &tp->cmdRing;
 582        struct resp_desc local_resp;
 583        int i, err = 0;
 584        int got_resp;
 585        int freeCmd, freeResp;
 586        int len, wrap_len;
 587
 588        spin_lock(&tp->command_lock);
 589
 590        freeCmd = typhoon_num_free_cmd(tp);
 591        freeResp = typhoon_num_free_resp(tp);
 592
 593        if(freeCmd < num_cmd || freeResp < num_resp) {
 594                netdev_err(tp->dev, "no descs for cmd, had (needed) %d (%d) cmd, %d (%d) resp\n",
 595                           freeCmd, num_cmd, freeResp, num_resp);
 596                err = -ENOMEM;
 597                goto out;
 598        }
 599
 600        if(cmd->flags & TYPHOON_CMD_RESPOND) {
 601                /* If we're expecting a response, but the caller hasn't given
 602                 * us a place to put it, we'll provide one.
 603                 */
 604                tp->awaiting_resp = 1;
 605                if(resp == NULL) {
 606                        resp = &local_resp;
 607                        num_resp = 1;
 608                }
 609        }
 610
 611        wrap_len = 0;
 612        len = num_cmd * sizeof(*cmd);
 613        if(unlikely(ring->lastWrite + len > COMMAND_RING_SIZE)) {
 614                wrap_len = ring->lastWrite + len - COMMAND_RING_SIZE;
 615                len = COMMAND_RING_SIZE - ring->lastWrite;
 616        }
 617
 618        memcpy(ring->ringBase + ring->lastWrite, cmd, len);
 619        if(unlikely(wrap_len)) {
 620                struct cmd_desc *wrap_ptr = cmd;
 621                wrap_ptr += len / sizeof(*cmd);
 622                memcpy(ring->ringBase, wrap_ptr, wrap_len);
 623        }
 624
 625        typhoon_inc_cmd_index(&ring->lastWrite, num_cmd);
 626
 627        /* "I feel a presence... another warrior is on the mesa."
 628         */
 629        wmb();
 630        iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
 631        typhoon_post_pci_writes(tp->ioaddr);
 632
 633        if((cmd->flags & TYPHOON_CMD_RESPOND) == 0)
 634                goto out;
 635
 636        /* Ugh. We'll be here about 8ms, spinning our thumbs, unable to
 637         * preempt or do anything other than take interrupts. So, don't
 638         * wait for a response unless you have to.
 639         *
 640         * I've thought about trying to sleep here, but we're called
 641         * from many contexts that don't allow that. Also, given the way
 642         * 3Com has implemented irq coalescing, we would likely timeout --
 643         * this has been observed in real life!
 644         *
 645         * The big killer is we have to wait to get stats from the card,
 646         * though we could go to a periodic refresh of those if we don't
 647         * mind them getting somewhat stale. The rest of the waiting
 648         * commands occur during open/close/suspend/resume, so they aren't
 649         * time critical. Creating SAs in the future will also have to
 650         * wait here.
 651         */
 652        got_resp = 0;
 653        for(i = 0; i < TYPHOON_WAIT_TIMEOUT && !got_resp; i++) {
 654                if(indexes->respCleared != indexes->respReady)
 655                        got_resp = typhoon_process_response(tp, num_resp,
 656                                                                resp);
 657                udelay(TYPHOON_UDELAY);
 658        }
 659
 660        if(!got_resp) {
 661                err = -ETIMEDOUT;
 662                goto out;
 663        }
 664
 665        /* Collect the error response even if we don't care about the
 666         * rest of the response
 667         */
 668        if(resp->flags & TYPHOON_RESP_ERROR)
 669                err = -EIO;
 670
 671out:
 672        if(tp->awaiting_resp) {
 673                tp->awaiting_resp = 0;
 674                smp_wmb();
 675
 676                /* Ugh. If a response was added to the ring between
 677                 * the call to typhoon_process_response() and the clearing
 678                 * of tp->awaiting_resp, we could have missed the interrupt
 679                 * and it could hang in the ring an indeterminate amount of
 680                 * time. So, check for it, and interrupt ourselves if this
 681                 * is the case.
 682                 */
 683                if(indexes->respCleared != indexes->respReady)
 684                        iowrite32(1, tp->ioaddr + TYPHOON_REG_SELF_INTERRUPT);
 685        }
 686
 687        spin_unlock(&tp->command_lock);
 688        return err;
 689}
 690
 691static inline void
 692typhoon_tso_fill(struct sk_buff *skb, struct transmit_ring *txRing,
 693                        u32 ring_dma)
 694{
 695        struct tcpopt_desc *tcpd;
 696        u32 tcpd_offset = ring_dma;
 697
 698        tcpd = (struct tcpopt_desc *) (txRing->ringBase + txRing->lastWrite);
 699        tcpd_offset += txRing->lastWrite;
 700        tcpd_offset += offsetof(struct tcpopt_desc, bytesTx);
 701        typhoon_inc_tx_index(&txRing->lastWrite, 1);
 702
 703        tcpd->flags = TYPHOON_OPT_DESC | TYPHOON_OPT_TCP_SEG;
 704        tcpd->numDesc = 1;
 705        tcpd->mss_flags = cpu_to_le16(skb_tso_size(skb));
 706        tcpd->mss_flags |= TYPHOON_TSO_FIRST | TYPHOON_TSO_LAST;
 707        tcpd->respAddrLo = cpu_to_le32(tcpd_offset);
 708        tcpd->bytesTx = cpu_to_le32(skb->len);
 709        tcpd->status = 0;
 710}
 711
 712static netdev_tx_t
 713typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
 714{
 715        struct typhoon *tp = netdev_priv(dev);
 716        struct transmit_ring *txRing;
 717        struct tx_desc *txd, *first_txd;
 718        dma_addr_t skb_dma;
 719        int numDesc;
 720
 721        /* we have two rings to choose from, but we only use txLo for now
 722         * If we start using the Hi ring as well, we'll need to update
 723         * typhoon_stop_runtime(), typhoon_interrupt(), typhoon_num_free_tx(),
 724         * and TXHI_ENTRIES to match, as well as update the TSO code below
 725         * to get the right DMA address
 726         */
 727        txRing = &tp->txLoRing;
 728
 729        /* We need one descriptor for each fragment of the sk_buff, plus the
 730         * one for the ->data area of it.
 731         *
 732         * The docs say a maximum of 16 fragment descriptors per TCP option
 733         * descriptor, then make a new packet descriptor and option descriptor
 734         * for the next 16 fragments. The engineers say just an option
 735         * descriptor is needed. I've tested up to 26 fragments with a single
 736         * packet descriptor/option descriptor combo, so I use that for now.
 737         *
 738         * If problems develop with TSO, check this first.
 739         */
 740        numDesc = skb_shinfo(skb)->nr_frags + 1;
 741        if (skb_is_gso(skb))
 742                numDesc++;
 743
 744        /* When checking for free space in the ring, we need to also
 745         * account for the initial Tx descriptor, and we always must leave
 746         * at least one descriptor unused in the ring so that it doesn't
 747         * wrap and look empty.
 748         *
 749         * The only time we should loop here is when we hit the race
 750         * between marking the queue awake and updating the cleared index.
 751         * Just loop and it will appear. This comes from the acenic driver.
 752         */
 753        while(unlikely(typhoon_num_free_tx(txRing) < (numDesc + 2)))
 754                smp_rmb();
 755
 756        first_txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite);
 757        typhoon_inc_tx_index(&txRing->lastWrite, 1);
 758
 759        first_txd->flags = TYPHOON_TX_DESC | TYPHOON_DESC_VALID;
 760        first_txd->numDesc = 0;
 761        first_txd->len = 0;
 762        first_txd->tx_addr = (u64)((unsigned long) skb);
 763        first_txd->processFlags = 0;
 764
 765        if(skb->ip_summed == CHECKSUM_PARTIAL) {
 766                /* The 3XP will figure out if this is UDP/TCP */
 767                first_txd->processFlags |= TYPHOON_TX_PF_TCP_CHKSUM;
 768                first_txd->processFlags |= TYPHOON_TX_PF_UDP_CHKSUM;
 769                first_txd->processFlags |= TYPHOON_TX_PF_IP_CHKSUM;
 770        }
 771
 772        if(vlan_tx_tag_present(skb)) {
 773                first_txd->processFlags |=
 774                    TYPHOON_TX_PF_INSERT_VLAN | TYPHOON_TX_PF_VLAN_PRIORITY;
 775                first_txd->processFlags |=
 776                    cpu_to_le32(htons(vlan_tx_tag_get(skb)) <<
 777                                TYPHOON_TX_PF_VLAN_TAG_SHIFT);
 778        }
 779
 780        if (skb_is_gso(skb)) {
 781                first_txd->processFlags |= TYPHOON_TX_PF_TCP_SEGMENT;
 782                first_txd->numDesc++;
 783
 784                typhoon_tso_fill(skb, txRing, tp->txlo_dma_addr);
 785        }
 786
 787        txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite);
 788        typhoon_inc_tx_index(&txRing->lastWrite, 1);
 789
 790        /* No need to worry about padding packet -- the firmware pads
 791         * it with zeros to ETH_ZLEN for us.
 792         */
 793        if(skb_shinfo(skb)->nr_frags == 0) {
 794                skb_dma = pci_map_single(tp->tx_pdev, skb->data, skb->len,
 795                                       PCI_DMA_TODEVICE);
 796                txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
 797                txd->len = cpu_to_le16(skb->len);
 798                txd->frag.addr = cpu_to_le32(skb_dma);
 799                txd->frag.addrHi = 0;
 800                first_txd->numDesc++;
 801        } else {
 802                int i, len;
 803
 804                len = skb_headlen(skb);
 805                skb_dma = pci_map_single(tp->tx_pdev, skb->data, len,
 806                                         PCI_DMA_TODEVICE);
 807                txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
 808                txd->len = cpu_to_le16(len);
 809                txd->frag.addr = cpu_to_le32(skb_dma);
 810                txd->frag.addrHi = 0;
 811                first_txd->numDesc++;
 812
 813                for(i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 814                        skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 815                        void *frag_addr;
 816
 817                        txd = (struct tx_desc *) (txRing->ringBase +
 818                                                txRing->lastWrite);
 819                        typhoon_inc_tx_index(&txRing->lastWrite, 1);
 820
 821                        len = frag->size;
 822                        frag_addr = (void *) page_address(frag->page) +
 823                                                frag->page_offset;
 824                        skb_dma = pci_map_single(tp->tx_pdev, frag_addr, len,
 825                                         PCI_DMA_TODEVICE);
 826                        txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
 827                        txd->len = cpu_to_le16(len);
 828                        txd->frag.addr = cpu_to_le32(skb_dma);
 829                        txd->frag.addrHi = 0;
 830                        first_txd->numDesc++;
 831                }
 832        }
 833
 834        /* Kick the 3XP
 835         */
 836        wmb();
 837        iowrite32(txRing->lastWrite, tp->tx_ioaddr + txRing->writeRegister);
 838
 839        /* If we don't have room to put the worst case packet on the
 840         * queue, then we must stop the queue. We need 2 extra
 841         * descriptors -- one to prevent ring wrap, and one for the
 842         * Tx header.
 843         */
 844        numDesc = MAX_SKB_FRAGS + TSO_NUM_DESCRIPTORS + 1;
 845
 846        if(typhoon_num_free_tx(txRing) < (numDesc + 2)) {
 847                netif_stop_queue(dev);
 848
 849                /* A Tx complete IRQ could have gotten between, making
 850                 * the ring free again. Only need to recheck here, since
 851                 * Tx is serialized.
 852                 */
 853                if(typhoon_num_free_tx(txRing) >= (numDesc + 2))
 854                        netif_wake_queue(dev);
 855        }
 856
 857        return NETDEV_TX_OK;
 858}
 859
 860static void
 861typhoon_set_rx_mode(struct net_device *dev)
 862{
 863        struct typhoon *tp = netdev_priv(dev);
 864        struct cmd_desc xp_cmd;
 865        u32 mc_filter[2];
 866        __le16 filter;
 867
 868        filter = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
 869        if(dev->flags & IFF_PROMISC) {
 870                filter |= TYPHOON_RX_FILTER_PROMISCOUS;
 871        } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
 872                  (dev->flags & IFF_ALLMULTI)) {
 873                /* Too many to match, or accept all multicasts. */
 874                filter |= TYPHOON_RX_FILTER_ALL_MCAST;
 875        } else if (!netdev_mc_empty(dev)) {
 876                struct netdev_hw_addr *ha;
 877
 878                memset(mc_filter, 0, sizeof(mc_filter));
 879                netdev_for_each_mc_addr(ha, dev) {
 880                        int bit = ether_crc(ETH_ALEN, ha->addr) & 0x3f;
 881                        mc_filter[bit >> 5] |= 1 << (bit & 0x1f);
 882                }
 883
 884                INIT_COMMAND_NO_RESPONSE(&xp_cmd,
 885                                         TYPHOON_CMD_SET_MULTICAST_HASH);
 886                xp_cmd.parm1 = TYPHOON_MCAST_HASH_SET;
 887                xp_cmd.parm2 = cpu_to_le32(mc_filter[0]);
 888                xp_cmd.parm3 = cpu_to_le32(mc_filter[1]);
 889                typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
 890
 891                filter |= TYPHOON_RX_FILTER_MCAST_HASH;
 892        }
 893
 894        INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
 895        xp_cmd.parm1 = filter;
 896        typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
 897}
 898
 899static int
 900typhoon_do_get_stats(struct typhoon *tp)
 901{
 902        struct net_device_stats *stats = &tp->stats;
 903        struct net_device_stats *saved = &tp->stats_saved;
 904        struct cmd_desc xp_cmd;
 905        struct resp_desc xp_resp[7];
 906        struct stats_resp *s = (struct stats_resp *) xp_resp;
 907        int err;
 908
 909        INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_STATS);
 910        err = typhoon_issue_command(tp, 1, &xp_cmd, 7, xp_resp);
 911        if(err < 0)
 912                return err;
 913
 914        /* 3Com's Linux driver uses txMultipleCollisions as it's
 915         * collisions value, but there is some other collision info as well...
 916         *
 917         * The extra status reported would be a good candidate for
 918         * ethtool_ops->get_{strings,stats}()
 919         */
 920        stats->tx_packets = le32_to_cpu(s->txPackets) +
 921                        saved->tx_packets;
 922        stats->tx_bytes = le64_to_cpu(s->txBytes) +
 923                        saved->tx_bytes;
 924        stats->tx_errors = le32_to_cpu(s->txCarrierLost) +
 925                        saved->tx_errors;
 926        stats->tx_carrier_errors = le32_to_cpu(s->txCarrierLost) +
 927                        saved->tx_carrier_errors;
 928        stats->collisions = le32_to_cpu(s->txMultipleCollisions) +
 929                        saved->collisions;
 930        stats->rx_packets = le32_to_cpu(s->rxPacketsGood) +
 931                        saved->rx_packets;
 932        stats->rx_bytes = le64_to_cpu(s->rxBytesGood) +
 933                        saved->rx_bytes;
 934        stats->rx_fifo_errors = le32_to_cpu(s->rxFifoOverruns) +
 935                        saved->rx_fifo_errors;
 936        stats->rx_errors = le32_to_cpu(s->rxFifoOverruns) +
 937                        le32_to_cpu(s->BadSSD) + le32_to_cpu(s->rxCrcErrors) +
 938                        saved->rx_errors;
 939        stats->rx_crc_errors = le32_to_cpu(s->rxCrcErrors) +
 940                        saved->rx_crc_errors;
 941        stats->rx_length_errors = le32_to_cpu(s->rxOversized) +
 942                        saved->rx_length_errors;
 943        tp->speed = (s->linkStatus & TYPHOON_LINK_100MBPS) ?
 944                        SPEED_100 : SPEED_10;
 945        tp->duplex = (s->linkStatus & TYPHOON_LINK_FULL_DUPLEX) ?
 946                        DUPLEX_FULL : DUPLEX_HALF;
 947
 948        return 0;
 949}
 950
 951static struct net_device_stats *
 952typhoon_get_stats(struct net_device *dev)
 953{
 954        struct typhoon *tp = netdev_priv(dev);
 955        struct net_device_stats *stats = &tp->stats;
 956        struct net_device_stats *saved = &tp->stats_saved;
 957
 958        smp_rmb();
 959        if(tp->card_state == Sleeping)
 960                return saved;
 961
 962        if(typhoon_do_get_stats(tp) < 0) {
 963                netdev_err(dev, "error getting stats\n");
 964                return saved;
 965        }
 966
 967        return stats;
 968}
 969
 970static int
 971typhoon_set_mac_address(struct net_device *dev, void *addr)
 972{
 973        struct sockaddr *saddr = (struct sockaddr *) addr;
 974
 975        if(netif_running(dev))
 976                return -EBUSY;
 977
 978        memcpy(dev->dev_addr, saddr->sa_data, dev->addr_len);
 979        return 0;
 980}
 981
 982static void
 983typhoon_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
 984{
 985        struct typhoon *tp = netdev_priv(dev);
 986        struct pci_dev *pci_dev = tp->pdev;
 987        struct cmd_desc xp_cmd;
 988        struct resp_desc xp_resp[3];
 989
 990        smp_rmb();
 991        if(tp->card_state == Sleeping) {
 992                strcpy(info->fw_version, "Sleep image");
 993        } else {
 994                INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
 995                if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
 996                        strcpy(info->fw_version, "Unknown runtime");
 997                } else {
 998                        u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
 999                        snprintf(info->fw_version, 32, "%02x.%03x.%03x",
1000                                 sleep_ver >> 24, (sleep_ver >> 12) & 0xfff,
1001                                 sleep_ver & 0xfff);
1002                }
1003        }
1004
1005        strcpy(info->driver, KBUILD_MODNAME);
1006        strcpy(info->bus_info, pci_name(pci_dev));
1007}
1008
1009static int
1010typhoon_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1011{
1012        struct typhoon *tp = netdev_priv(dev);
1013
1014        cmd->supported = SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
1015                                SUPPORTED_Autoneg;
1016
1017        switch (tp->xcvr_select) {
1018        case TYPHOON_XCVR_10HALF:
1019                cmd->advertising = ADVERTISED_10baseT_Half;
1020                break;
1021        case TYPHOON_XCVR_10FULL:
1022                cmd->advertising = ADVERTISED_10baseT_Full;
1023                break;
1024        case TYPHOON_XCVR_100HALF:
1025                cmd->advertising = ADVERTISED_100baseT_Half;
1026                break;
1027        case TYPHOON_XCVR_100FULL:
1028                cmd->advertising = ADVERTISED_100baseT_Full;
1029                break;
1030        case TYPHOON_XCVR_AUTONEG:
1031                cmd->advertising = ADVERTISED_10baseT_Half |
1032                                            ADVERTISED_10baseT_Full |
1033                                            ADVERTISED_100baseT_Half |
1034                                            ADVERTISED_100baseT_Full |
1035                                            ADVERTISED_Autoneg;
1036                break;
1037        }
1038
1039        if(tp->capabilities & TYPHOON_FIBER) {
1040                cmd->supported |= SUPPORTED_FIBRE;
1041                cmd->advertising |= ADVERTISED_FIBRE;
1042                cmd->port = PORT_FIBRE;
1043        } else {
1044                cmd->supported |= SUPPORTED_10baseT_Half |
1045                                        SUPPORTED_10baseT_Full |
1046                                        SUPPORTED_TP;
1047                cmd->advertising |= ADVERTISED_TP;
1048                cmd->port = PORT_TP;
1049        }
1050
1051        /* need to get stats to make these link speed/duplex valid */
1052        typhoon_do_get_stats(tp);
1053        ethtool_cmd_speed_set(cmd, tp->speed);
1054        cmd->duplex = tp->duplex;
1055        cmd->phy_address = 0;
1056        cmd->transceiver = XCVR_INTERNAL;
1057        if(tp->xcvr_select == TYPHOON_XCVR_AUTONEG)
1058                cmd->autoneg = AUTONEG_ENABLE;
1059        else
1060                cmd->autoneg = AUTONEG_DISABLE;
1061        cmd->maxtxpkt = 1;
1062        cmd->maxrxpkt = 1;
1063
1064        return 0;
1065}
1066
1067static int
1068typhoon_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1069{
1070        struct typhoon *tp = netdev_priv(dev);
1071        u32 speed = ethtool_cmd_speed(cmd);
1072        struct cmd_desc xp_cmd;
1073        __le16 xcvr;
1074        int err;
1075
1076        err = -EINVAL;
1077        if (cmd->autoneg == AUTONEG_ENABLE) {
1078                xcvr = TYPHOON_XCVR_AUTONEG;
1079        } else {
1080                if (cmd->duplex == DUPLEX_HALF) {
1081                        if (speed == SPEED_10)
1082                                xcvr = TYPHOON_XCVR_10HALF;
1083                        else if (speed == SPEED_100)
1084                                xcvr = TYPHOON_XCVR_100HALF;
1085                        else
1086                                goto out;
1087                } else if (cmd->duplex == DUPLEX_FULL) {
1088                        if (speed == SPEED_10)
1089                                xcvr = TYPHOON_XCVR_10FULL;
1090                        else if (speed == SPEED_100)
1091                                xcvr = TYPHOON_XCVR_100FULL;
1092                        else
1093                                goto out;
1094                } else
1095                        goto out;
1096        }
1097
1098        INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT);
1099        xp_cmd.parm1 = xcvr;
1100        err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1101        if(err < 0)
1102                goto out;
1103
1104        tp->xcvr_select = xcvr;
1105        if(cmd->autoneg == AUTONEG_ENABLE) {
1106                tp->speed = 0xff;       /* invalid */
1107                tp->duplex = 0xff;      /* invalid */
1108        } else {
1109                tp->speed = speed;
1110                tp->duplex = cmd->duplex;
1111        }
1112
1113out:
1114        return err;
1115}
1116
1117static void
1118typhoon_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1119{
1120        struct typhoon *tp = netdev_priv(dev);
1121
1122        wol->supported = WAKE_PHY | WAKE_MAGIC;
1123        wol->wolopts = 0;
1124        if(tp->wol_events & TYPHOON_WAKE_LINK_EVENT)
1125                wol->wolopts |= WAKE_PHY;
1126        if(tp->wol_events & TYPHOON_WAKE_MAGIC_PKT)
1127                wol->wolopts |= WAKE_MAGIC;
1128        memset(&wol->sopass, 0, sizeof(wol->sopass));
1129}
1130
1131static int
1132typhoon_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1133{
1134        struct typhoon *tp = netdev_priv(dev);
1135
1136        if(wol->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
1137                return -EINVAL;
1138
1139        tp->wol_events = 0;
1140        if(wol->wolopts & WAKE_PHY)
1141                tp->wol_events |= TYPHOON_WAKE_LINK_EVENT;
1142        if(wol->wolopts & WAKE_MAGIC)
1143                tp->wol_events |= TYPHOON_WAKE_MAGIC_PKT;
1144
1145        return 0;
1146}
1147
1148static void
1149typhoon_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
1150{
1151        ering->rx_max_pending = RXENT_ENTRIES;
1152        ering->rx_mini_max_pending = 0;
1153        ering->rx_jumbo_max_pending = 0;
1154        ering->tx_max_pending = TXLO_ENTRIES - 1;
1155
1156        ering->rx_pending = RXENT_ENTRIES;
1157        ering->rx_mini_pending = 0;
1158        ering->rx_jumbo_pending = 0;
1159        ering->tx_pending = TXLO_ENTRIES - 1;
1160}
1161
1162static const struct ethtool_ops typhoon_ethtool_ops = {
1163        .get_settings           = typhoon_get_settings,
1164        .set_settings           = typhoon_set_settings,
1165        .get_drvinfo            = typhoon_get_drvinfo,
1166        .get_wol                = typhoon_get_wol,
1167        .set_wol                = typhoon_set_wol,
1168        .get_link               = ethtool_op_get_link,
1169        .get_ringparam          = typhoon_get_ringparam,
1170};
1171
1172static int
1173typhoon_wait_interrupt(void __iomem *ioaddr)
1174{
1175        int i, err = 0;
1176
1177        for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
1178                if(ioread32(ioaddr + TYPHOON_REG_INTR_STATUS) &
1179                   TYPHOON_INTR_BOOTCMD)
1180                        goto out;
1181                udelay(TYPHOON_UDELAY);
1182        }
1183
1184        err = -ETIMEDOUT;
1185
1186out:
1187        iowrite32(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS);
1188        return err;
1189}
1190
1191#define shared_offset(x)        offsetof(struct typhoon_shared, x)
1192
1193static void
1194typhoon_init_interface(struct typhoon *tp)
1195{
1196        struct typhoon_interface *iface = &tp->shared->iface;
1197        dma_addr_t shared_dma;
1198
1199        memset(tp->shared, 0, sizeof(struct typhoon_shared));
1200
1201        /* The *Hi members of iface are all init'd to zero by the memset().
1202         */
1203        shared_dma = tp->shared_dma + shared_offset(indexes);
1204        iface->ringIndex = cpu_to_le32(shared_dma);
1205
1206        shared_dma = tp->shared_dma + shared_offset(txLo);
1207        iface->txLoAddr = cpu_to_le32(shared_dma);
1208        iface->txLoSize = cpu_to_le32(TXLO_ENTRIES * sizeof(struct tx_desc));
1209
1210        shared_dma = tp->shared_dma + shared_offset(txHi);
1211        iface->txHiAddr = cpu_to_le32(shared_dma);
1212        iface->txHiSize = cpu_to_le32(TXHI_ENTRIES * sizeof(struct tx_desc));
1213
1214        shared_dma = tp->shared_dma + shared_offset(rxBuff);
1215        iface->rxBuffAddr = cpu_to_le32(shared_dma);
1216        iface->rxBuffSize = cpu_to_le32(RXFREE_ENTRIES *
1217                                        sizeof(struct rx_free));
1218
1219        shared_dma = tp->shared_dma + shared_offset(rxLo);
1220        iface->rxLoAddr = cpu_to_le32(shared_dma);
1221        iface->rxLoSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc));
1222
1223        shared_dma = tp->shared_dma + shared_offset(rxHi);
1224        iface->rxHiAddr = cpu_to_le32(shared_dma);
1225        iface->rxHiSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc));
1226
1227        shared_dma = tp->shared_dma + shared_offset(cmd);
1228        iface->cmdAddr = cpu_to_le32(shared_dma);
1229        iface->cmdSize = cpu_to_le32(COMMAND_RING_SIZE);
1230
1231        shared_dma = tp->shared_dma + shared_offset(resp);
1232        iface->respAddr = cpu_to_le32(shared_dma);
1233        iface->respSize = cpu_to_le32(RESPONSE_RING_SIZE);
1234
1235        shared_dma = tp->shared_dma + shared_offset(zeroWord);
1236        iface->zeroAddr = cpu_to_le32(shared_dma);
1237
1238        tp->indexes = &tp->shared->indexes;
1239        tp->txLoRing.ringBase = (u8 *) tp->shared->txLo;
1240        tp->txHiRing.ringBase = (u8 *) tp->shared->txHi;
1241        tp->rxLoRing.ringBase = (u8 *) tp->shared->rxLo;
1242        tp->rxHiRing.ringBase = (u8 *) tp->shared->rxHi;
1243        tp->rxBuffRing.ringBase = (u8 *) tp->shared->rxBuff;
1244        tp->cmdRing.ringBase = (u8 *) tp->shared->cmd;
1245        tp->respRing.ringBase = (u8 *) tp->shared->resp;
1246
1247        tp->txLoRing.writeRegister = TYPHOON_REG_TX_LO_READY;
1248        tp->txHiRing.writeRegister = TYPHOON_REG_TX_HI_READY;
1249
1250        tp->txlo_dma_addr = le32_to_cpu(iface->txLoAddr);
1251        tp->card_state = Sleeping;
1252
1253        tp->offload = TYPHOON_OFFLOAD_IP_CHKSUM | TYPHOON_OFFLOAD_TCP_CHKSUM;
1254        tp->offload |= TYPHOON_OFFLOAD_UDP_CHKSUM | TSO_OFFLOAD_ON;
1255        tp->offload |= TYPHOON_OFFLOAD_VLAN;
1256
1257        spin_lock_init(&tp->command_lock);
1258
1259        /* Force the writes to the shared memory area out before continuing. */
1260        wmb();
1261}
1262
1263static void
1264typhoon_init_rings(struct typhoon *tp)
1265{
1266        memset(tp->indexes, 0, sizeof(struct typhoon_indexes));
1267
1268        tp->txLoRing.lastWrite = 0;
1269        tp->txHiRing.lastWrite = 0;
1270        tp->rxLoRing.lastWrite = 0;
1271        tp->rxHiRing.lastWrite = 0;
1272        tp->rxBuffRing.lastWrite = 0;
1273        tp->cmdRing.lastWrite = 0;
1274        tp->respRing.lastWrite = 0;
1275
1276        tp->txLoRing.lastRead = 0;
1277        tp->txHiRing.lastRead = 0;
1278}
1279
1280static const struct firmware *typhoon_fw;
1281
1282static int
1283typhoon_request_firmware(struct typhoon *tp)
1284{
1285        const struct typhoon_file_header *fHdr;
1286        const struct typhoon_section_header *sHdr;
1287        const u8 *image_data;
1288        u32 numSections;
1289        u32 section_len;
1290        u32 remaining;
1291        int err;
1292
1293        if (typhoon_fw)
1294                return 0;
1295
1296        err = request_firmware(&typhoon_fw, FIRMWARE_NAME, &tp->pdev->dev);
1297        if (err) {
1298                netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
1299                           FIRMWARE_NAME);
1300                return err;
1301        }
1302
1303        image_data = (u8 *) typhoon_fw->data;
1304        remaining = typhoon_fw->size;
1305        if (remaining < sizeof(struct typhoon_file_header))
1306                goto invalid_fw;
1307
1308        fHdr = (struct typhoon_file_header *) image_data;
1309        if (memcmp(fHdr->tag, "TYPHOON", 8))
1310                goto invalid_fw;
1311
1312        numSections = le32_to_cpu(fHdr->numSections);
1313        image_data += sizeof(struct typhoon_file_header);
1314        remaining -= sizeof(struct typhoon_file_header);
1315
1316        while (numSections--) {
1317                if (remaining < sizeof(struct typhoon_section_header))
1318                        goto invalid_fw;
1319
1320                sHdr = (struct typhoon_section_header *) image_data;
1321                image_data += sizeof(struct typhoon_section_header);
1322                section_len = le32_to_cpu(sHdr->len);
1323
1324                if (remaining < section_len)
1325                        goto invalid_fw;
1326
1327                image_data += section_len;
1328                remaining -= section_len;
1329        }
1330
1331        return 0;
1332
1333invalid_fw:
1334        netdev_err(tp->dev, "Invalid firmware image\n");
1335        release_firmware(typhoon_fw);
1336        typhoon_fw = NULL;
1337        return -EINVAL;
1338}
1339
1340static int
1341typhoon_download_firmware(struct typhoon *tp)
1342{
1343        void __iomem *ioaddr = tp->ioaddr;
1344        struct pci_dev *pdev = tp->pdev;
1345        const struct typhoon_file_header *fHdr;
1346        const struct typhoon_section_header *sHdr;
1347        const u8 *image_data;
1348        void *dpage;
1349        dma_addr_t dpage_dma;
1350        __sum16 csum;
1351        u32 irqEnabled;
1352        u32 irqMasked;
1353        u32 numSections;
1354        u32 section_len;
1355        u32 len;
1356        u32 load_addr;
1357        u32 hmac;
1358        int i;
1359        int err;
1360
1361        image_data = (u8 *) typhoon_fw->data;
1362        fHdr = (struct typhoon_file_header *) image_data;
1363
1364        /* Cannot just map the firmware image using pci_map_single() as
1365         * the firmware is vmalloc()'d and may not be physically contiguous,
1366         * so we allocate some consistent memory to copy the sections into.
1367         */
1368        err = -ENOMEM;
1369        dpage = pci_alloc_consistent(pdev, PAGE_SIZE, &dpage_dma);
1370        if(!dpage) {
1371                netdev_err(tp->dev, "no DMA mem for firmware\n");
1372                goto err_out;
1373        }
1374
1375        irqEnabled = ioread32(ioaddr + TYPHOON_REG_INTR_ENABLE);
1376        iowrite32(irqEnabled | TYPHOON_INTR_BOOTCMD,
1377               ioaddr + TYPHOON_REG_INTR_ENABLE);
1378        irqMasked = ioread32(ioaddr + TYPHOON_REG_INTR_MASK);
1379        iowrite32(irqMasked | TYPHOON_INTR_BOOTCMD,
1380               ioaddr + TYPHOON_REG_INTR_MASK);
1381
1382        err = -ETIMEDOUT;
1383        if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
1384                netdev_err(tp->dev, "card ready timeout\n");
1385                goto err_out_irq;
1386        }
1387
1388        numSections = le32_to_cpu(fHdr->numSections);
1389        load_addr = le32_to_cpu(fHdr->startAddr);
1390
1391        iowrite32(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS);
1392        iowrite32(load_addr, ioaddr + TYPHOON_REG_DOWNLOAD_BOOT_ADDR);
1393        hmac = le32_to_cpu(fHdr->hmacDigest[0]);
1394        iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_0);
1395        hmac = le32_to_cpu(fHdr->hmacDigest[1]);
1396        iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_1);
1397        hmac = le32_to_cpu(fHdr->hmacDigest[2]);
1398        iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_2);
1399        hmac = le32_to_cpu(fHdr->hmacDigest[3]);
1400        iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_3);
1401        hmac = le32_to_cpu(fHdr->hmacDigest[4]);
1402        iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_4);
1403        typhoon_post_pci_writes(ioaddr);
1404        iowrite32(TYPHOON_BOOTCMD_RUNTIME_IMAGE, ioaddr + TYPHOON_REG_COMMAND);
1405
1406        image_data += sizeof(struct typhoon_file_header);
1407
1408        /* The ioread32() in typhoon_wait_interrupt() will force the
1409         * last write to the command register to post, so
1410         * we don't need a typhoon_post_pci_writes() after it.
1411         */
1412        for(i = 0; i < numSections; i++) {
1413                sHdr = (struct typhoon_section_header *) image_data;
1414                image_data += sizeof(struct typhoon_section_header);
1415                load_addr = le32_to_cpu(sHdr->startAddr);
1416                section_len = le32_to_cpu(sHdr->len);
1417
1418                while(section_len) {
1419                        len = min_t(u32, section_len, PAGE_SIZE);
1420
1421                        if(typhoon_wait_interrupt(ioaddr) < 0 ||
1422                           ioread32(ioaddr + TYPHOON_REG_STATUS) !=
1423                           TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
1424                                netdev_err(tp->dev, "segment ready timeout\n");
1425                                goto err_out_irq;
1426                        }
1427
1428                        /* Do an pseudo IPv4 checksum on the data -- first
1429                         * need to convert each u16 to cpu order before
1430                         * summing. Fortunately, due to the properties of
1431                         * the checksum, we can do this once, at the end.
1432                         */
1433                        csum = csum_fold(csum_partial_copy_nocheck(image_data,
1434                                                                   dpage, len,
1435                                                                   0));
1436
1437                        iowrite32(len, ioaddr + TYPHOON_REG_BOOT_LENGTH);
1438                        iowrite32(le16_to_cpu((__force __le16)csum),
1439                                        ioaddr + TYPHOON_REG_BOOT_CHECKSUM);
1440                        iowrite32(load_addr,
1441                                        ioaddr + TYPHOON_REG_BOOT_DEST_ADDR);
1442                        iowrite32(0, ioaddr + TYPHOON_REG_BOOT_DATA_HI);
1443                        iowrite32(dpage_dma, ioaddr + TYPHOON_REG_BOOT_DATA_LO);
1444                        typhoon_post_pci_writes(ioaddr);
1445                        iowrite32(TYPHOON_BOOTCMD_SEG_AVAILABLE,
1446                                        ioaddr + TYPHOON_REG_COMMAND);
1447
1448                        image_data += len;
1449                        load_addr += len;
1450                        section_len -= len;
1451                }
1452        }
1453
1454        if(typhoon_wait_interrupt(ioaddr) < 0 ||
1455           ioread32(ioaddr + TYPHOON_REG_STATUS) !=
1456           TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
1457                netdev_err(tp->dev, "final segment ready timeout\n");
1458                goto err_out_irq;
1459        }
1460
1461        iowrite32(TYPHOON_BOOTCMD_DNLD_COMPLETE, ioaddr + TYPHOON_REG_COMMAND);
1462
1463        if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
1464                netdev_err(tp->dev, "boot ready timeout, status 0x%0x\n",
1465                           ioread32(ioaddr + TYPHOON_REG_STATUS));
1466                goto err_out_irq;
1467        }
1468
1469        err = 0;
1470
1471err_out_irq:
1472        iowrite32(irqMasked, ioaddr + TYPHOON_REG_INTR_MASK);
1473        iowrite32(irqEnabled, ioaddr + TYPHOON_REG_INTR_ENABLE);
1474
1475        pci_free_consistent(pdev, PAGE_SIZE, dpage, dpage_dma);
1476
1477err_out:
1478        return err;
1479}
1480
1481static int
1482typhoon_boot_3XP(struct typhoon *tp, u32 initial_status)
1483{
1484        void __iomem *ioaddr = tp->ioaddr;
1485
1486        if(typhoon_wait_status(ioaddr, initial_status) < 0) {
1487                netdev_err(tp->dev, "boot ready timeout\n");
1488                goto out_timeout;
1489        }
1490
1491        iowrite32(0, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_HI);
1492        iowrite32(tp->shared_dma, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_LO);
1493        typhoon_post_pci_writes(ioaddr);
1494        iowrite32(TYPHOON_BOOTCMD_REG_BOOT_RECORD,
1495                                ioaddr + TYPHOON_REG_COMMAND);
1496
1497        if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_RUNNING) < 0) {
1498                netdev_err(tp->dev, "boot finish timeout (status 0x%x)\n",
1499                           ioread32(ioaddr + TYPHOON_REG_STATUS));
1500                goto out_timeout;
1501        }
1502
1503        /* Clear the Transmit and Command ready registers
1504         */
1505        iowrite32(0, ioaddr + TYPHOON_REG_TX_HI_READY);
1506        iowrite32(0, ioaddr + TYPHOON_REG_CMD_READY);
1507        iowrite32(0, ioaddr + TYPHOON_REG_TX_LO_READY);
1508        typhoon_post_pci_writes(ioaddr);
1509        iowrite32(TYPHOON_BOOTCMD_BOOT, ioaddr + TYPHOON_REG_COMMAND);
1510
1511        return 0;
1512
1513out_timeout:
1514        return -ETIMEDOUT;
1515}
1516
1517static u32
1518typhoon_clean_tx(struct typhoon *tp, struct transmit_ring *txRing,
1519                        volatile __le32 * index)
1520{
1521        u32 lastRead = txRing->lastRead;
1522        struct tx_desc *tx;
1523        dma_addr_t skb_dma;
1524        int dma_len;
1525        int type;
1526
1527        while(lastRead != le32_to_cpu(*index)) {
1528                tx = (struct tx_desc *) (txRing->ringBase + lastRead);
1529                type = tx->flags & TYPHOON_TYPE_MASK;
1530
1531                if(type == TYPHOON_TX_DESC) {
1532                        /* This tx_desc describes a packet.
1533                         */
1534                        unsigned long ptr = tx->tx_addr;
1535                        struct sk_buff *skb = (struct sk_buff *) ptr;
1536                        dev_kfree_skb_irq(skb);
1537                } else if(type == TYPHOON_FRAG_DESC) {
1538                        /* This tx_desc describes a memory mapping. Free it.
1539                         */
1540                        skb_dma = (dma_addr_t) le32_to_cpu(tx->frag.addr);
1541                        dma_len = le16_to_cpu(tx->len);
1542                        pci_unmap_single(tp->pdev, skb_dma, dma_len,
1543                                       PCI_DMA_TODEVICE);
1544                }
1545
1546                tx->flags = 0;
1547                typhoon_inc_tx_index(&lastRead, 1);
1548        }
1549
1550        return lastRead;
1551}
1552
1553static void
1554typhoon_tx_complete(struct typhoon *tp, struct transmit_ring *txRing,
1555                        volatile __le32 * index)
1556{
1557        u32 lastRead;
1558        int numDesc = MAX_SKB_FRAGS + 1;
1559
1560        /* This will need changing if we start to use the Hi Tx ring. */
1561        lastRead = typhoon_clean_tx(tp, txRing, index);
1562        if(netif_queue_stopped(tp->dev) && typhoon_num_free(txRing->lastWrite,
1563                                lastRead, TXLO_ENTRIES) > (numDesc + 2))
1564                netif_wake_queue(tp->dev);
1565
1566        txRing->lastRead = lastRead;
1567        smp_wmb();
1568}
1569
1570static void
1571typhoon_recycle_rx_skb(struct typhoon *tp, u32 idx)
1572{
1573        struct typhoon_indexes *indexes = tp->indexes;
1574        struct rxbuff_ent *rxb = &tp->rxbuffers[idx];
1575        struct basic_ring *ring = &tp->rxBuffRing;
1576        struct rx_free *r;
1577
1578        if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
1579                                le32_to_cpu(indexes->rxBuffCleared)) {
1580                /* no room in ring, just drop the skb
1581                 */
1582                dev_kfree_skb_any(rxb->skb);
1583                rxb->skb = NULL;
1584                return;
1585        }
1586
1587        r = (struct rx_free *) (ring->ringBase + ring->lastWrite);
1588        typhoon_inc_rxfree_index(&ring->lastWrite, 1);
1589        r->virtAddr = idx;
1590        r->physAddr = cpu_to_le32(rxb->dma_addr);
1591
1592        /* Tell the card about it */
1593        wmb();
1594        indexes->rxBuffReady = cpu_to_le32(ring->lastWrite);
1595}
1596
1597static int
1598typhoon_alloc_rx_skb(struct typhoon *tp, u32 idx)
1599{
1600        struct typhoon_indexes *indexes = tp->indexes;
1601        struct rxbuff_ent *rxb = &tp->rxbuffers[idx];
1602        struct basic_ring *ring = &tp->rxBuffRing;
1603        struct rx_free *r;
1604        struct sk_buff *skb;
1605        dma_addr_t dma_addr;
1606
1607        rxb->skb = NULL;
1608
1609        if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
1610                                le32_to_cpu(indexes->rxBuffCleared))
1611                return -ENOMEM;
1612
1613        skb = dev_alloc_skb(PKT_BUF_SZ);
1614        if(!skb)
1615                return -ENOMEM;
1616
1617#if 0
1618        /* Please, 3com, fix the firmware to allow DMA to a unaligned
1619         * address! Pretty please?
1620         */
1621        skb_reserve(skb, 2);
1622#endif
1623
1624        skb->dev = tp->dev;
1625        dma_addr = pci_map_single(tp->pdev, skb->data,
1626                                  PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
1627
1628        /* Since no card does 64 bit DAC, the high bits will never
1629         * change from zero.
1630         */
1631        r = (struct rx_free *) (ring->ringBase + ring->lastWrite);
1632        typhoon_inc_rxfree_index(&ring->lastWrite, 1);
1633        r->virtAddr = idx;
1634        r->physAddr = cpu_to_le32(dma_addr);
1635        rxb->skb = skb;
1636        rxb->dma_addr = dma_addr;
1637
1638        /* Tell the card about it */
1639        wmb();
1640        indexes->rxBuffReady = cpu_to_le32(ring->lastWrite);
1641        return 0;
1642}
1643
1644static int
1645typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile __le32 * ready,
1646           volatile __le32 * cleared, int budget)
1647{
1648        struct rx_desc *rx;
1649        struct sk_buff *skb, *new_skb;
1650        struct rxbuff_ent *rxb;
1651        dma_addr_t dma_addr;
1652        u32 local_ready;
1653        u32 rxaddr;
1654        int pkt_len;
1655        u32 idx;
1656        __le32 csum_bits;
1657        int received;
1658
1659        received = 0;
1660        local_ready = le32_to_cpu(*ready);
1661        rxaddr = le32_to_cpu(*cleared);
1662        while(rxaddr != local_ready && budget > 0) {
1663                rx = (struct rx_desc *) (rxRing->ringBase + rxaddr);
1664                idx = rx->addr;
1665                rxb = &tp->rxbuffers[idx];
1666                skb = rxb->skb;
1667                dma_addr = rxb->dma_addr;
1668
1669                typhoon_inc_rx_index(&rxaddr, 1);
1670
1671                if(rx->flags & TYPHOON_RX_ERROR) {
1672                        typhoon_recycle_rx_skb(tp, idx);
1673                        continue;
1674                }
1675
1676                pkt_len = le16_to_cpu(rx->frameLen);
1677
1678                if(pkt_len < rx_copybreak &&
1679                   (new_skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1680                        skb_reserve(new_skb, 2);
1681                        pci_dma_sync_single_for_cpu(tp->pdev, dma_addr,
1682                                                    PKT_BUF_SZ,
1683                                                    PCI_DMA_FROMDEVICE);
1684                        skb_copy_to_linear_data(new_skb, skb->data, pkt_len);
1685                        pci_dma_sync_single_for_device(tp->pdev, dma_addr,
1686                                                       PKT_BUF_SZ,
1687                                                       PCI_DMA_FROMDEVICE);
1688                        skb_put(new_skb, pkt_len);
1689                        typhoon_recycle_rx_skb(tp, idx);
1690                } else {
1691                        new_skb = skb;
1692                        skb_put(new_skb, pkt_len);
1693                        pci_unmap_single(tp->pdev, dma_addr, PKT_BUF_SZ,
1694                                       PCI_DMA_FROMDEVICE);
1695                        typhoon_alloc_rx_skb(tp, idx);
1696                }
1697                new_skb->protocol = eth_type_trans(new_skb, tp->dev);
1698                csum_bits = rx->rxStatus & (TYPHOON_RX_IP_CHK_GOOD |
1699                        TYPHOON_RX_UDP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD);
1700                if(csum_bits ==
1701                   (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD) ||
1702                   csum_bits ==
1703                   (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_UDP_CHK_GOOD)) {
1704                        new_skb->ip_summed = CHECKSUM_UNNECESSARY;
1705                } else
1706                        skb_checksum_none_assert(new_skb);
1707
1708                if (rx->rxStatus & TYPHOON_RX_VLAN)
1709                        __vlan_hwaccel_put_tag(new_skb,
1710                                               ntohl(rx->vlanTag) & 0xffff);
1711                netif_receive_skb(new_skb);
1712
1713                received++;
1714                budget--;
1715        }
1716        *cleared = cpu_to_le32(rxaddr);
1717
1718        return received;
1719}
1720
1721static void
1722typhoon_fill_free_ring(struct typhoon *tp)
1723{
1724        u32 i;
1725
1726        for(i = 0; i < RXENT_ENTRIES; i++) {
1727                struct rxbuff_ent *rxb = &tp->rxbuffers[i];
1728                if(rxb->skb)
1729                        continue;
1730                if(typhoon_alloc_rx_skb(tp, i) < 0)
1731                        break;
1732        }
1733}
1734
1735static int
1736typhoon_poll(struct napi_struct *napi, int budget)
1737{
1738        struct typhoon *tp = container_of(napi, struct typhoon, napi);
1739        struct typhoon_indexes *indexes = tp->indexes;
1740        int work_done;
1741
1742        rmb();
1743        if(!tp->awaiting_resp && indexes->respReady != indexes->respCleared)
1744                        typhoon_process_response(tp, 0, NULL);
1745
1746        if(le32_to_cpu(indexes->txLoCleared) != tp->txLoRing.lastRead)
1747                typhoon_tx_complete(tp, &tp->txLoRing, &indexes->txLoCleared);
1748
1749        work_done = 0;
1750
1751        if(indexes->rxHiCleared != indexes->rxHiReady) {
1752                work_done += typhoon_rx(tp, &tp->rxHiRing, &indexes->rxHiReady,
1753                                        &indexes->rxHiCleared, budget);
1754        }
1755
1756        if(indexes->rxLoCleared != indexes->rxLoReady) {
1757                work_done += typhoon_rx(tp, &tp->rxLoRing, &indexes->rxLoReady,
1758                                        &indexes->rxLoCleared, budget - work_done);
1759        }
1760
1761        if(le32_to_cpu(indexes->rxBuffCleared) == tp->rxBuffRing.lastWrite) {
1762                /* rxBuff ring is empty, try to fill it. */
1763                typhoon_fill_free_ring(tp);
1764        }
1765
1766        if (work_done < budget) {
1767                napi_complete(napi);
1768                iowrite32(TYPHOON_INTR_NONE,
1769                                tp->ioaddr + TYPHOON_REG_INTR_MASK);
1770                typhoon_post_pci_writes(tp->ioaddr);
1771        }
1772
1773        return work_done;
1774}
1775
1776static irqreturn_t
1777typhoon_interrupt(int irq, void *dev_instance)
1778{
1779        struct net_device *dev = dev_instance;
1780        struct typhoon *tp = netdev_priv(dev);
1781        void __iomem *ioaddr = tp->ioaddr;
1782        u32 intr_status;
1783
1784        intr_status = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
1785        if(!(intr_status & TYPHOON_INTR_HOST_INT))
1786                return IRQ_NONE;
1787
1788        iowrite32(intr_status, ioaddr + TYPHOON_REG_INTR_STATUS);
1789
1790        if (napi_schedule_prep(&tp->napi)) {
1791                iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
1792                typhoon_post_pci_writes(ioaddr);
1793                __napi_schedule(&tp->napi);
1794        } else {
1795                netdev_err(dev, "Error, poll already scheduled\n");
1796        }
1797        return IRQ_HANDLED;
1798}
1799
1800static void
1801typhoon_free_rx_rings(struct typhoon *tp)
1802{
1803        u32 i;
1804
1805        for(i = 0; i < RXENT_ENTRIES; i++) {
1806                struct rxbuff_ent *rxb = &tp->rxbuffers[i];
1807                if(rxb->skb) {
1808                        pci_unmap_single(tp->pdev, rxb->dma_addr, PKT_BUF_SZ,
1809                                       PCI_DMA_FROMDEVICE);
1810                        dev_kfree_skb(rxb->skb);
1811                        rxb->skb = NULL;
1812                }
1813        }
1814}
1815
1816static int
1817typhoon_sleep(struct typhoon *tp, pci_power_t state, __le16 events)
1818{
1819        struct pci_dev *pdev = tp->pdev;
1820        void __iomem *ioaddr = tp->ioaddr;
1821        struct cmd_desc xp_cmd;
1822        int err;
1823
1824        INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_ENABLE_WAKE_EVENTS);
1825        xp_cmd.parm1 = events;
1826        err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1827        if(err < 0) {
1828                netdev_err(tp->dev, "typhoon_sleep(): wake events cmd err %d\n",
1829                           err);
1830                return err;
1831        }
1832
1833        INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_GOTO_SLEEP);
1834        err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1835        if(err < 0) {
1836                netdev_err(tp->dev, "typhoon_sleep(): sleep cmd err %d\n", err);
1837                return err;
1838        }
1839
1840        if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_SLEEPING) < 0)
1841                return -ETIMEDOUT;
1842
1843        /* Since we cannot monitor the status of the link while sleeping,
1844         * tell the world it went away.
1845         */
1846        netif_carrier_off(tp->dev);
1847
1848        pci_enable_wake(tp->pdev, state, 1);
1849        pci_disable_device(pdev);
1850        return pci_set_power_state(pdev, state);
1851}
1852
1853static int
1854typhoon_wakeup(struct typhoon *tp, int wait_type)
1855{
1856        struct pci_dev *pdev = tp->pdev;
1857        void __iomem *ioaddr = tp->ioaddr;
1858
1859        pci_set_power_state(pdev, PCI_D0);
1860        pci_restore_state(pdev);
1861
1862        /* Post 2.x.x versions of the Sleep Image require a reset before
1863         * we can download the Runtime Image. But let's not make users of
1864         * the old firmware pay for the reset.
1865         */
1866        iowrite32(TYPHOON_BOOTCMD_WAKEUP, ioaddr + TYPHOON_REG_COMMAND);
1867        if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0 ||
1868                        (tp->capabilities & TYPHOON_WAKEUP_NEEDS_RESET))
1869                return typhoon_reset(ioaddr, wait_type);
1870
1871        return 0;
1872}
1873
1874static int
1875typhoon_start_runtime(struct typhoon *tp)
1876{
1877        struct net_device *dev = tp->dev;
1878        void __iomem *ioaddr = tp->ioaddr;
1879        struct cmd_desc xp_cmd;
1880        int err;
1881
1882        typhoon_init_rings(tp);
1883        typhoon_fill_free_ring(tp);
1884
1885        err = typhoon_download_firmware(tp);
1886        if(err < 0) {
1887                netdev_err(tp->dev, "cannot load runtime on 3XP\n");
1888                goto error_out;
1889        }
1890
1891        if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
1892                netdev_err(tp->dev, "cannot boot 3XP\n");
1893                err = -EIO;
1894                goto error_out;
1895        }
1896
1897        INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAX_PKT_SIZE);
1898        xp_cmd.parm1 = cpu_to_le16(PKT_BUF_SZ);
1899        err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1900        if(err < 0)
1901                goto error_out;
1902
1903        INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS);
1904        xp_cmd.parm1 = cpu_to_le16(ntohs(*(__be16 *)&dev->dev_addr[0]));
1905        xp_cmd.parm2 = cpu_to_le32(ntohl(*(__be32 *)&dev->dev_addr[2]));
1906        err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1907        if(err < 0)
1908                goto error_out;
1909
1910        /* Disable IRQ coalescing -- we can reenable it when 3Com gives
1911         * us some more information on how to control it.
1912         */
1913        INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_IRQ_COALESCE_CTRL);
1914        xp_cmd.parm1 = 0;
1915        err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1916        if(err < 0)
1917                goto error_out;
1918
1919        INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT);
1920        xp_cmd.parm1 = tp->xcvr_select;
1921        err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1922        if(err < 0)
1923                goto error_out;
1924
1925        INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_VLAN_TYPE_WRITE);
1926        xp_cmd.parm1 = cpu_to_le16(ETH_P_8021Q);
1927        err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1928        if(err < 0)
1929                goto error_out;
1930
1931        INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_OFFLOAD_TASKS);
1932        xp_cmd.parm2 = tp->offload;
1933        xp_cmd.parm3 = tp->offload;
1934        err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1935        if(err < 0)
1936                goto error_out;
1937
1938        typhoon_set_rx_mode(dev);
1939
1940        INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_ENABLE);
1941        err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1942        if(err < 0)
1943                goto error_out;
1944
1945        INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_ENABLE);
1946        err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1947        if(err < 0)
1948                goto error_out;
1949
1950        tp->card_state = Running;
1951        smp_wmb();
1952
1953        iowrite32(TYPHOON_INTR_ENABLE_ALL, ioaddr + TYPHOON_REG_INTR_ENABLE);
1954        iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_MASK);
1955        typhoon_post_pci_writes(ioaddr);
1956
1957        return 0;
1958
1959error_out:
1960        typhoon_reset(ioaddr, WaitNoSleep);
1961        typhoon_free_rx_rings(tp);
1962        typhoon_init_rings(tp);
1963        return err;
1964}
1965
1966static int
1967typhoon_stop_runtime(struct typhoon *tp, int wait_type)
1968{
1969        struct typhoon_indexes *indexes = tp->indexes;
1970        struct transmit_ring *txLo = &tp->txLoRing;
1971        void __iomem *ioaddr = tp->ioaddr;
1972        struct cmd_desc xp_cmd;
1973        int i;
1974
1975        /* Disable interrupts early, since we can't schedule a poll
1976         * when called with !netif_running(). This will be posted
1977         * when we force the posting of the command.
1978         */
1979        iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_ENABLE);
1980
1981        INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_DISABLE);
1982        typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1983
1984        /* Wait 1/2 sec for any outstanding transmits to occur
1985         * We'll cleanup after the reset if this times out.
1986         */
1987        for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
1988                if(indexes->txLoCleared == cpu_to_le32(txLo->lastWrite))
1989                        break;
1990                udelay(TYPHOON_UDELAY);
1991        }
1992
1993        if(i == TYPHOON_WAIT_TIMEOUT)
1994                netdev_err(tp->dev, "halt timed out waiting for Tx to complete\n");
1995
1996        INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_DISABLE);
1997        typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1998
1999        /* save the statistics so when we bring the interface up again,
2000         * the values reported to userspace are correct.
2001         */
2002        tp->card_state = Sleeping;
2003        smp_wmb();
2004        typhoon_do_get_stats(tp);
2005        memcpy(&tp->stats_saved, &tp->stats, sizeof(struct net_device_stats));
2006
2007        INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_HALT);
2008        typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2009
2010        if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_HALTED) < 0)
2011                netdev_err(tp->dev, "timed out waiting for 3XP to halt\n");
2012
2013        if(typhoon_reset(ioaddr, wait_type) < 0) {
2014                netdev_err(tp->dev, "unable to reset 3XP\n");
2015                return -ETIMEDOUT;
2016        }
2017
2018        /* cleanup any outstanding Tx packets */
2019        if(indexes->txLoCleared != cpu_to_le32(txLo->lastWrite)) {
2020                indexes->txLoCleared = cpu_to_le32(txLo->lastWrite);
2021                typhoon_clean_tx(tp, &tp->txLoRing, &indexes->txLoCleared);
2022        }
2023
2024        return 0;
2025}
2026
2027static void
2028typhoon_tx_timeout(struct net_device *dev)
2029{
2030        struct typhoon *tp = netdev_priv(dev);
2031
2032        if(typhoon_reset(tp->ioaddr, WaitNoSleep) < 0) {
2033                netdev_warn(dev, "could not reset in tx timeout\n");
2034                goto truly_dead;
2035        }
2036
2037        /* If we ever start using the Hi ring, it will need cleaning too */
2038        typhoon_clean_tx(tp, &tp->txLoRing, &tp->indexes->txLoCleared);
2039        typhoon_free_rx_rings(tp);
2040
2041        if(typhoon_start_runtime(tp) < 0) {
2042                netdev_err(dev, "could not start runtime in tx timeout\n");
2043                goto truly_dead;
2044        }
2045
2046        netif_wake_queue(dev);
2047        return;
2048
2049truly_dead:
2050        /* Reset the hardware, and turn off carrier to avoid more timeouts */
2051        typhoon_reset(tp->ioaddr, NoWait);
2052        netif_carrier_off(dev);
2053}
2054
2055static int
2056typhoon_open(struct net_device *dev)
2057{
2058        struct typhoon *tp = netdev_priv(dev);
2059        int err;
2060
2061        err = typhoon_request_firmware(tp);
2062        if (err)
2063                goto out;
2064
2065        err = typhoon_wakeup(tp, WaitSleep);
2066        if(err < 0) {
2067                netdev_err(dev, "unable to wakeup device\n");
2068                goto out_sleep;
2069        }
2070
2071        err = request_irq(dev->irq, typhoon_interrupt, IRQF_SHARED,
2072                                dev->name, dev);
2073        if(err < 0)
2074                goto out_sleep;
2075
2076        napi_enable(&tp->napi);
2077
2078        err = typhoon_start_runtime(tp);
2079        if(err < 0) {
2080                napi_disable(&tp->napi);
2081                goto out_irq;
2082        }
2083
2084        netif_start_queue(dev);
2085        return 0;
2086
2087out_irq:
2088        free_irq(dev->irq, dev);
2089
2090out_sleep:
2091        if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2092                netdev_err(dev, "unable to reboot into sleep img\n");
2093                typhoon_reset(tp->ioaddr, NoWait);
2094                goto out;
2095        }
2096
2097        if(typhoon_sleep(tp, PCI_D3hot, 0) < 0)
2098                netdev_err(dev, "unable to go back to sleep\n");
2099
2100out:
2101        return err;
2102}
2103
2104static int
2105typhoon_close(struct net_device *dev)
2106{
2107        struct typhoon *tp = netdev_priv(dev);
2108
2109        netif_stop_queue(dev);
2110        napi_disable(&tp->napi);
2111
2112        if(typhoon_stop_runtime(tp, WaitSleep) < 0)
2113                netdev_err(dev, "unable to stop runtime\n");
2114
2115        /* Make sure there is no irq handler running on a different CPU. */
2116        free_irq(dev->irq, dev);
2117
2118        typhoon_free_rx_rings(tp);
2119        typhoon_init_rings(tp);
2120
2121        if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0)
2122                netdev_err(dev, "unable to boot sleep image\n");
2123
2124        if(typhoon_sleep(tp, PCI_D3hot, 0) < 0)
2125                netdev_err(dev, "unable to put card to sleep\n");
2126
2127        return 0;
2128}
2129
2130#ifdef CONFIG_PM
2131static int
2132typhoon_resume(struct pci_dev *pdev)
2133{
2134        struct net_device *dev = pci_get_drvdata(pdev);
2135        struct typhoon *tp = netdev_priv(dev);
2136
2137        /* If we're down, resume when we are upped.
2138         */
2139        if(!netif_running(dev))
2140                return 0;
2141
2142        if(typhoon_wakeup(tp, WaitNoSleep) < 0) {
2143                netdev_err(dev, "critical: could not wake up in resume\n");
2144                goto reset;
2145        }
2146
2147        if(typhoon_start_runtime(tp) < 0) {
2148                netdev_err(dev, "critical: could not start runtime in resume\n");
2149                goto reset;
2150        }
2151
2152        netif_device_attach(dev);
2153        return 0;
2154
2155reset:
2156        typhoon_reset(tp->ioaddr, NoWait);
2157        return -EBUSY;
2158}
2159
2160static int
2161typhoon_suspend(struct pci_dev *pdev, pm_message_t state)
2162{
2163        struct net_device *dev = pci_get_drvdata(pdev);
2164        struct typhoon *tp = netdev_priv(dev);
2165        struct cmd_desc xp_cmd;
2166
2167        /* If we're down, we're already suspended.
2168         */
2169        if(!netif_running(dev))
2170                return 0;
2171
2172        /* TYPHOON_OFFLOAD_VLAN is always on now, so this doesn't work */
2173        if(tp->wol_events & TYPHOON_WAKE_MAGIC_PKT)
2174                netdev_warn(dev, "cannot do WAKE_MAGIC with VLAN offloading\n");
2175
2176        netif_device_detach(dev);
2177
2178        if(typhoon_stop_runtime(tp, WaitNoSleep) < 0) {
2179                netdev_err(dev, "unable to stop runtime\n");
2180                goto need_resume;
2181        }
2182
2183        typhoon_free_rx_rings(tp);
2184        typhoon_init_rings(tp);
2185
2186        if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2187                netdev_err(dev, "unable to boot sleep image\n");
2188                goto need_resume;
2189        }
2190
2191        INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS);
2192        xp_cmd.parm1 = cpu_to_le16(ntohs(*(__be16 *)&dev->dev_addr[0]));
2193        xp_cmd.parm2 = cpu_to_le32(ntohl(*(__be32 *)&dev->dev_addr[2]));
2194        if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
2195                netdev_err(dev, "unable to set mac address in suspend\n");
2196                goto need_resume;
2197        }
2198
2199        INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
2200        xp_cmd.parm1 = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
2201        if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
2202                netdev_err(dev, "unable to set rx filter in suspend\n");
2203                goto need_resume;
2204        }
2205
2206        if(typhoon_sleep(tp, pci_choose_state(pdev, state), tp->wol_events) < 0) {
2207                netdev_err(dev, "unable to put card to sleep\n");
2208                goto need_resume;
2209        }
2210
2211        return 0;
2212
2213need_resume:
2214        typhoon_resume(pdev);
2215        return -EBUSY;
2216}
2217#endif
2218
2219static int __devinit
2220typhoon_test_mmio(struct pci_dev *pdev)
2221{
2222        void __iomem *ioaddr = pci_iomap(pdev, 1, 128);
2223        int mode = 0;
2224        u32 val;
2225
2226        if(!ioaddr)
2227                goto out;
2228
2229        if(ioread32(ioaddr + TYPHOON_REG_STATUS) !=
2230                                TYPHOON_STATUS_WAITING_FOR_HOST)
2231                goto out_unmap;
2232
2233        iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
2234        iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
2235        iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_ENABLE);
2236
2237        /* Ok, see if we can change our interrupt status register by
2238         * sending ourselves an interrupt. If so, then MMIO works.
2239         * The 50usec delay is arbitrary -- it could probably be smaller.
2240         */
2241        val = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2242        if((val & TYPHOON_INTR_SELF) == 0) {
2243                iowrite32(1, ioaddr + TYPHOON_REG_SELF_INTERRUPT);
2244                ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2245                udelay(50);
2246                val = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2247                if(val & TYPHOON_INTR_SELF)
2248                        mode = 1;
2249        }
2250
2251        iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
2252        iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
2253        iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_ENABLE);
2254        ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2255
2256out_unmap:
2257        pci_iounmap(pdev, ioaddr);
2258
2259out:
2260        if(!mode)
2261                pr_info("%s: falling back to port IO\n", pci_name(pdev));
2262        return mode;
2263}
2264
2265static const struct net_device_ops typhoon_netdev_ops = {
2266        .ndo_open               = typhoon_open,
2267        .ndo_stop               = typhoon_close,
2268        .ndo_start_xmit         = typhoon_start_tx,
2269        .ndo_set_multicast_list = typhoon_set_rx_mode,
2270        .ndo_tx_timeout         = typhoon_tx_timeout,
2271        .ndo_get_stats          = typhoon_get_stats,
2272        .ndo_validate_addr      = eth_validate_addr,
2273        .ndo_set_mac_address    = typhoon_set_mac_address,
2274        .ndo_change_mtu         = eth_change_mtu,
2275};
2276
2277static int __devinit
2278typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2279{
2280        struct net_device *dev;
2281        struct typhoon *tp;
2282        int card_id = (int) ent->driver_data;
2283        void __iomem *ioaddr;
2284        void *shared;
2285        dma_addr_t shared_dma;
2286        struct cmd_desc xp_cmd;
2287        struct resp_desc xp_resp[3];
2288        int err = 0;
2289        const char *err_msg;
2290
2291        dev = alloc_etherdev(sizeof(*tp));
2292        if(dev == NULL) {
2293                err_msg = "unable to alloc new net device";
2294                err = -ENOMEM;
2295                goto error_out;
2296        }
2297        SET_NETDEV_DEV(dev, &pdev->dev);
2298
2299        err = pci_enable_device(pdev);
2300        if(err < 0) {
2301                err_msg = "unable to enable device";
2302                goto error_out_dev;
2303        }
2304
2305        err = pci_set_mwi(pdev);
2306        if(err < 0) {
2307                err_msg = "unable to set MWI";
2308                goto error_out_disable;
2309        }
2310
2311        err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2312        if(err < 0) {
2313                err_msg = "No usable DMA configuration";
2314                goto error_out_mwi;
2315        }
2316
2317        /* sanity checks on IO and MMIO BARs
2318         */
2319        if(!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) {
2320                err_msg = "region #1 not a PCI IO resource, aborting";
2321                err = -ENODEV;
2322                goto error_out_mwi;
2323        }
2324        if(pci_resource_len(pdev, 0) < 128) {
2325                err_msg = "Invalid PCI IO region size, aborting";
2326                err = -ENODEV;
2327                goto error_out_mwi;
2328        }
2329        if(!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
2330                err_msg = "region #1 not a PCI MMIO resource, aborting";
2331                err = -ENODEV;
2332                goto error_out_mwi;
2333        }
2334        if(pci_resource_len(pdev, 1) < 128) {
2335                err_msg = "Invalid PCI MMIO region size, aborting";
2336                err = -ENODEV;
2337                goto error_out_mwi;
2338        }
2339
2340        err = pci_request_regions(pdev, KBUILD_MODNAME);
2341        if(err < 0) {
2342                err_msg = "could not request regions";
2343                goto error_out_mwi;
2344        }
2345
2346        /* map our registers
2347         */
2348        if(use_mmio != 0 && use_mmio != 1)
2349                use_mmio = typhoon_test_mmio(pdev);
2350
2351        ioaddr = pci_iomap(pdev, use_mmio, 128);
2352        if (!ioaddr) {
2353                err_msg = "cannot remap registers, aborting";
2354                err = -EIO;
2355                goto error_out_regions;
2356        }
2357
2358        /* allocate pci dma space for rx and tx descriptor rings
2359         */
2360        shared = pci_alloc_consistent(pdev, sizeof(struct typhoon_shared),
2361                                      &shared_dma);
2362        if(!shared) {
2363                err_msg = "could not allocate DMA memory";
2364                err = -ENOMEM;
2365                goto error_out_remap;
2366        }
2367
2368        dev->irq = pdev->irq;
2369        tp = netdev_priv(dev);
2370        tp->shared = (struct typhoon_shared *) shared;
2371        tp->shared_dma = shared_dma;
2372        tp->pdev = pdev;
2373        tp->tx_pdev = pdev;
2374        tp->ioaddr = ioaddr;
2375        tp->tx_ioaddr = ioaddr;
2376        tp->dev = dev;
2377
2378        /* Init sequence:
2379         * 1) Reset the adapter to clear any bad juju
2380         * 2) Reload the sleep image
2381         * 3) Boot the sleep image
2382         * 4) Get the hardware address.
2383         * 5) Put the card to sleep.
2384         */
2385        if (typhoon_reset(ioaddr, WaitSleep) < 0) {
2386                err_msg = "could not reset 3XP";
2387                err = -EIO;
2388                goto error_out_dma;
2389        }
2390
2391        /* Now that we've reset the 3XP and are sure it's not going to
2392         * write all over memory, enable bus mastering, and save our
2393         * state for resuming after a suspend.
2394         */
2395        pci_set_master(pdev);
2396        pci_save_state(pdev);
2397
2398        typhoon_init_interface(tp);
2399        typhoon_init_rings(tp);
2400
2401        if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2402                err_msg = "cannot boot 3XP sleep image";
2403                err = -EIO;
2404                goto error_out_reset;
2405        }
2406
2407        INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_MAC_ADDRESS);
2408        if(typhoon_issue_command(tp, 1, &xp_cmd, 1, xp_resp) < 0) {
2409                err_msg = "cannot read MAC address";
2410                err = -EIO;
2411                goto error_out_reset;
2412        }
2413
2414        *(__be16 *)&dev->dev_addr[0] = htons(le16_to_cpu(xp_resp[0].parm1));
2415        *(__be32 *)&dev->dev_addr[2] = htonl(le32_to_cpu(xp_resp[0].parm2));
2416
2417        if(!is_valid_ether_addr(dev->dev_addr)) {
2418                err_msg = "Could not obtain valid ethernet address, aborting";
2419                goto error_out_reset;
2420        }
2421
2422        /* Read the Sleep Image version last, so the response is valid
2423         * later when we print out the version reported.
2424         */
2425        INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
2426        if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
2427                err_msg = "Could not get Sleep Image version";
2428                goto error_out_reset;
2429        }
2430
2431        tp->capabilities = typhoon_card_info[card_id].capabilities;
2432        tp->xcvr_select = TYPHOON_XCVR_AUTONEG;
2433
2434        /* Typhoon 1.0 Sleep Images return one response descriptor to the
2435         * READ_VERSIONS command. Those versions are OK after waking up
2436         * from sleep without needing a reset. Typhoon 1.1+ Sleep Images
2437         * seem to need a little extra help to get started. Since we don't
2438         * know how to nudge it along, just kick it.
2439         */
2440        if(xp_resp[0].numDesc != 0)
2441                tp->capabilities |= TYPHOON_WAKEUP_NEEDS_RESET;
2442
2443        if(typhoon_sleep(tp, PCI_D3hot, 0) < 0) {
2444                err_msg = "cannot put adapter to sleep";
2445                err = -EIO;
2446                goto error_out_reset;
2447        }
2448
2449        /* The chip-specific entries in the device structure. */
2450        dev->netdev_ops         = &typhoon_netdev_ops;
2451        netif_napi_add(dev, &tp->napi, typhoon_poll, 16);
2452        dev->watchdog_timeo     = TX_TIMEOUT;
2453
2454        SET_ETHTOOL_OPS(dev, &typhoon_ethtool_ops);
2455
2456        /* We can handle scatter gather, up to 16 entries, and
2457         * we can do IP checksumming (only version 4, doh...)
2458         *
2459         * There's no way to turn off the RX VLAN offloading and stripping
2460         * on the current 3XP firmware -- it does not respect the offload
2461         * settings -- so we only allow the user to toggle the TX processing.
2462         */
2463        dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
2464                NETIF_F_HW_VLAN_TX;
2465        dev->features = dev->hw_features |
2466                NETIF_F_HW_VLAN_RX | NETIF_F_RXCSUM;
2467
2468        if(register_netdev(dev) < 0) {
2469                err_msg = "unable to register netdev";
2470                goto error_out_reset;
2471        }
2472
2473        pci_set_drvdata(pdev, dev);
2474
2475        netdev_info(dev, "%s at %s 0x%llx, %pM\n",
2476                    typhoon_card_info[card_id].name,
2477                    use_mmio ? "MMIO" : "IO",
2478                    (unsigned long long)pci_resource_start(pdev, use_mmio),
2479                    dev->dev_addr);
2480
2481        /* xp_resp still contains the response to the READ_VERSIONS command.
2482         * For debugging, let the user know what version he has.
2483         */
2484        if(xp_resp[0].numDesc == 0) {
2485                /* This is the Typhoon 1.0 type Sleep Image, last 16 bits
2486                 * of version is Month/Day of build.
2487                 */
2488                u16 monthday = le32_to_cpu(xp_resp[0].parm2) & 0xffff;
2489                netdev_info(dev, "Typhoon 1.0 Sleep Image built %02u/%02u/2000\n",
2490                            monthday >> 8, monthday & 0xff);
2491        } else if(xp_resp[0].numDesc == 2) {
2492                /* This is the Typhoon 1.1+ type Sleep Image
2493                 */
2494                u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
2495                u8 *ver_string = (u8 *) &xp_resp[1];
2496                ver_string[25] = 0;
2497                netdev_info(dev, "Typhoon 1.1+ Sleep Image version %02x.%03x.%03x %s\n",
2498                            sleep_ver >> 24, (sleep_ver >> 12) & 0xfff,
2499                            sleep_ver & 0xfff, ver_string);
2500        } else {
2501                netdev_warn(dev, "Unknown Sleep Image version (%u:%04x)\n",
2502                            xp_resp[0].numDesc, le32_to_cpu(xp_resp[0].parm2));
2503        }
2504
2505        return 0;
2506
2507error_out_reset:
2508        typhoon_reset(ioaddr, NoWait);
2509
2510error_out_dma:
2511        pci_free_consistent(pdev, sizeof(struct typhoon_shared),
2512                            shared, shared_dma);
2513error_out_remap:
2514        pci_iounmap(pdev, ioaddr);
2515error_out_regions:
2516        pci_release_regions(pdev);
2517error_out_mwi:
2518        pci_clear_mwi(pdev);
2519error_out_disable:
2520        pci_disable_device(pdev);
2521error_out_dev:
2522        free_netdev(dev);
2523error_out:
2524        pr_err("%s: %s\n", pci_name(pdev), err_msg);
2525        return err;
2526}
2527
2528static void __devexit
2529typhoon_remove_one(struct pci_dev *pdev)
2530{
2531        struct net_device *dev = pci_get_drvdata(pdev);
2532        struct typhoon *tp = netdev_priv(dev);
2533
2534        unregister_netdev(dev);
2535        pci_set_power_state(pdev, PCI_D0);
2536        pci_restore_state(pdev);
2537        typhoon_reset(tp->ioaddr, NoWait);
2538        pci_iounmap(pdev, tp->ioaddr);
2539        pci_free_consistent(pdev, sizeof(struct typhoon_shared),
2540                            tp->shared, tp->shared_dma);
2541        pci_release_regions(pdev);
2542        pci_clear_mwi(pdev);
2543        pci_disable_device(pdev);
2544        pci_set_drvdata(pdev, NULL);
2545        free_netdev(dev);
2546}
2547
2548static struct pci_driver typhoon_driver = {
2549        .name           = KBUILD_MODNAME,
2550        .id_table       = typhoon_pci_tbl,
2551        .probe          = typhoon_init_one,
2552        .remove         = __devexit_p(typhoon_remove_one),
2553#ifdef CONFIG_PM
2554        .suspend        = typhoon_suspend,
2555        .resume         = typhoon_resume,
2556#endif
2557};
2558
2559static int __init
2560typhoon_init(void)
2561{
2562        return pci_register_driver(&typhoon_driver);
2563}
2564
2565static void __exit
2566typhoon_cleanup(void)
2567{
2568        if (typhoon_fw)
2569                release_firmware(typhoon_fw);
2570        pci_unregister_driver(&typhoon_driver);
2571}
2572
2573module_init(typhoon_init);
2574module_exit(typhoon_cleanup);
2575