linux/drivers/net/3c527.c
<<
>>
Prefs
   1/* 3c527.c: 3Com Etherlink/MC32 driver for Linux 2.4 and 2.6.
   2 *
   3 *      (c) Copyright 1998 Red Hat Software Inc
   4 *      Written by Alan Cox.
   5 *      Further debugging by Carl Drougge.
   6 *      Initial SMP support by Felipe W Damasio <felipewd@terra.com.br>
   7 *      Heavily modified by Richard Procter <rnp@paradise.net.nz>
   8 *
   9 *      Based on skeleton.c written 1993-94 by Donald Becker and ne2.c
  10 *      (for the MCA stuff) written by Wim Dumon.
  11 *
  12 *      Thanks to 3Com for making this possible by providing me with the
  13 *      documentation.
  14 *
  15 *      This software may be used and distributed according to the terms
  16 *      of the GNU General Public License, incorporated herein by reference.
  17 *
  18 */
  19
  20#define DRV_NAME                "3c527"
  21#define DRV_VERSION             "0.7-SMP"
  22#define DRV_RELDATE             "2003/09/21"
  23
  24static const char *version =
  25DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Richard Procter <rnp@paradise.net.nz>\n";
  26
  27/**
  28 * DOC: Traps for the unwary
  29 *
  30 *      The diagram (Figure 1-1) and the POS summary disagree with the
  31 *      "Interrupt Level" section in the manual.
  32 *
  33 *      The manual contradicts itself when describing the minimum number
  34 *      buffers in the 'configure lists' command.
  35 *      My card accepts a buffer config of 4/4.
  36 *
  37 *      Setting the SAV BP bit does not save bad packets, but
  38 *      only enables RX on-card stats collection.
  39 *
  40 *      The documentation in places seems to miss things. In actual fact
  41 *      I've always eventually found everything is documented, it just
  42 *      requires careful study.
  43 *
  44 * DOC: Theory Of Operation
  45 *
  46 *      The 3com 3c527 is a 32bit MCA bus mastering adapter with a large
  47 *      amount of on board intelligence that housekeeps a somewhat dumber
  48 *      Intel NIC. For performance we want to keep the transmit queue deep
  49 *      as the card can transmit packets while fetching others from main
  50 *      memory by bus master DMA. Transmission and reception are driven by
  51 *      circular buffer queues.
  52 *
  53 *      The mailboxes can be used for controlling how the card traverses
  54 *      its buffer rings, but are used only for inital setup in this
  55 *      implementation.  The exec mailbox allows a variety of commands to
  56 *      be executed. Each command must complete before the next is
  57 *      executed. Primarily we use the exec mailbox for controlling the
  58 *      multicast lists.  We have to do a certain amount of interesting
  59 *      hoop jumping as the multicast list changes can occur in interrupt
  60 *      state when the card has an exec command pending. We defer such
  61 *      events until the command completion interrupt.
  62 *
  63 *      A copy break scheme (taken from 3c59x.c) is employed whereby
  64 *      received frames exceeding a configurable length are passed
  65 *      directly to the higher networking layers without incuring a copy,
  66 *      in what amounts to a time/space trade-off.
  67 *
  68 *      The card also keeps a large amount of statistical information
  69 *      on-board. In a perfect world, these could be used safely at no
  70 *      cost. However, lacking information to the contrary, processing
  71 *      them without races would involve so much extra complexity as to
  72 *      make it unworthwhile to do so. In the end, a hybrid SW/HW
  73 *      implementation was made necessary --- see mc32_update_stats().
  74 *
  75 * DOC: Notes
  76 *
  77 *      It should be possible to use two or more cards, but at this stage
  78 *      only by loading two copies of the same module.
  79 *
  80 *      The on-board 82586 NIC has trouble receiving multiple
  81 *      back-to-back frames and so is likely to drop packets from fast
  82 *      senders.
  83**/
  84
  85#include <linux/module.h>
  86
  87#include <linux/errno.h>
  88#include <linux/netdevice.h>
  89#include <linux/etherdevice.h>
  90#include <linux/if_ether.h>
  91#include <linux/init.h>
  92#include <linux/kernel.h>
  93#include <linux/types.h>
  94#include <linux/fcntl.h>
  95#include <linux/interrupt.h>
  96#include <linux/mca-legacy.h>
  97#include <linux/ioport.h>
  98#include <linux/in.h>
  99#include <linux/skbuff.h>
 100#include <linux/slab.h>
 101#include <linux/string.h>
 102#include <linux/wait.h>
 103#include <linux/ethtool.h>
 104#include <linux/completion.h>
 105#include <linux/bitops.h>
 106#include <linux/semaphore.h>
 107
 108#include <asm/uaccess.h>
 109#include <asm/system.h>
 110#include <asm/io.h>
 111#include <asm/dma.h>
 112
 113#include "3c527.h"
 114
 115MODULE_LICENSE("GPL");
 116
 117/*
 118 * The name of the card. Is used for messages and in the requests for
 119 * io regions, irqs and dma channels
 120 */
 121static const char* cardname = DRV_NAME;
 122
 123/* use 0 for production, 1 for verification, >2 for debug */
 124#ifndef NET_DEBUG
 125#define NET_DEBUG 2
 126#endif
 127
 128static unsigned int mc32_debug = NET_DEBUG;
 129
 130/* The number of low I/O ports used by the ethercard. */
 131#define MC32_IO_EXTENT  8
 132
 133/* As implemented, values must be a power-of-2 -- 4/8/16/32 */
 134#define TX_RING_LEN     32       /* Typically the card supports 37  */
 135#define RX_RING_LEN     8        /*     "       "        "          */
 136
 137/* Copy break point, see above for details.
 138 * Setting to > 1512 effectively disables this feature. */
 139#define RX_COPYBREAK    200      /* Value from 3c59x.c */
 140
 141/* Issue the 82586 workaround command - this is for "busy lans", but
 142 * basically means for all lans now days - has a performance (latency)
 143 * cost, but best set. */
 144static const int WORKAROUND_82586=1;
 145
 146/* Pointers to buffers and their on-card records */
 147struct mc32_ring_desc
 148{
 149        volatile struct skb_header *p;
 150        struct sk_buff *skb;
 151};
 152
 153/* Information that needs to be kept for each board. */
 154struct mc32_local
 155{
 156        int slot;
 157
 158        u32 base;
 159        volatile struct mc32_mailbox *rx_box;
 160        volatile struct mc32_mailbox *tx_box;
 161        volatile struct mc32_mailbox *exec_box;
 162        volatile struct mc32_stats *stats;    /* Start of on-card statistics */
 163        u16 tx_chain;           /* Transmit list start offset */
 164        u16 rx_chain;           /* Receive list start offset */
 165        u16 tx_len;             /* Transmit list count */
 166        u16 rx_len;             /* Receive list count */
 167
 168        u16 xceiver_desired_state; /* HALTED or RUNNING */
 169        u16 cmd_nonblocking;    /* Thread is uninterested in command result */
 170        u16 mc_reload_wait;     /* A multicast load request is pending */
 171        u32 mc_list_valid;      /* True when the mclist is set */
 172
 173        struct mc32_ring_desc tx_ring[TX_RING_LEN];     /* Host Transmit ring */
 174        struct mc32_ring_desc rx_ring[RX_RING_LEN];     /* Host Receive ring */
 175
 176        atomic_t tx_count;      /* buffers left */
 177        atomic_t tx_ring_head;  /* index to tx en-queue end */
 178        u16 tx_ring_tail;       /* index to tx de-queue end */
 179
 180        u16 rx_ring_tail;       /* index to rx de-queue end */
 181
 182        struct semaphore cmd_mutex;    /* Serialises issuing of execute commands */
 183        struct completion execution_cmd; /* Card has completed an execute command */
 184        struct completion xceiver_cmd;   /* Card has completed a tx or rx command */
 185};
 186
 187/* The station (ethernet) address prefix, used for a sanity check. */
 188#define SA_ADDR0 0x02
 189#define SA_ADDR1 0x60
 190#define SA_ADDR2 0xAC
 191
 192struct mca_adapters_t {
 193        unsigned int    id;
 194        char            *name;
 195};
 196
 197static const struct mca_adapters_t mc32_adapters[] = {
 198        { 0x0041, "3COM EtherLink MC/32" },
 199        { 0x8EF5, "IBM High Performance Lan Adapter" },
 200        { 0x0000, NULL }
 201};
 202
 203
 204/* Macros for ring index manipulations */
 205static inline u16 next_rx(u16 rx) { return (rx+1)&(RX_RING_LEN-1); };
 206static inline u16 prev_rx(u16 rx) { return (rx-1)&(RX_RING_LEN-1); };
 207
 208static inline u16 next_tx(u16 tx) { return (tx+1)&(TX_RING_LEN-1); };
 209
 210
 211/* Index to functions, as function prototypes. */
 212static int      mc32_probe1(struct net_device *dev, int ioaddr);
 213static int      mc32_command(struct net_device *dev, u16 cmd, void *data, int len);
 214static int      mc32_open(struct net_device *dev);
 215static void     mc32_timeout(struct net_device *dev);
 216static netdev_tx_t mc32_send_packet(struct sk_buff *skb,
 217                                    struct net_device *dev);
 218static irqreturn_t mc32_interrupt(int irq, void *dev_id);
 219static int      mc32_close(struct net_device *dev);
 220static struct   net_device_stats *mc32_get_stats(struct net_device *dev);
 221static void     mc32_set_multicast_list(struct net_device *dev);
 222static void     mc32_reset_multicast_list(struct net_device *dev);
 223static const struct ethtool_ops netdev_ethtool_ops;
 224
 225static void cleanup_card(struct net_device *dev)
 226{
 227        struct mc32_local *lp = netdev_priv(dev);
 228        unsigned slot = lp->slot;
 229        mca_mark_as_unused(slot);
 230        mca_set_adapter_name(slot, NULL);
 231        free_irq(dev->irq, dev);
 232        release_region(dev->base_addr, MC32_IO_EXTENT);
 233}
 234
 235/**
 236 * mc32_probe   -       Search for supported boards
 237 * @unit: interface number to use
 238 *
 239 * Because MCA bus is a real bus and we can scan for cards we could do a
 240 * single scan for all boards here. Right now we use the passed in device
 241 * structure and scan for only one board. This needs fixing for modules
 242 * in particular.
 243 */
 244
 245struct net_device *__init mc32_probe(int unit)
 246{
 247        struct net_device *dev = alloc_etherdev(sizeof(struct mc32_local));
 248        static int current_mca_slot = -1;
 249        int i;
 250        int err;
 251
 252        if (!dev)
 253                return ERR_PTR(-ENOMEM);
 254
 255        if (unit >= 0)
 256                sprintf(dev->name, "eth%d", unit);
 257
 258        /* Do not check any supplied i/o locations.
 259           POS registers usually don't fail :) */
 260
 261        /* MCA cards have POS registers.
 262           Autodetecting MCA cards is extremely simple.
 263           Just search for the card. */
 264
 265        for(i = 0; (mc32_adapters[i].name != NULL); i++) {
 266                current_mca_slot =
 267                        mca_find_unused_adapter(mc32_adapters[i].id, 0);
 268
 269                if(current_mca_slot != MCA_NOTFOUND) {
 270                        if(!mc32_probe1(dev, current_mca_slot))
 271                        {
 272                                mca_set_adapter_name(current_mca_slot,
 273                                                mc32_adapters[i].name);
 274                                mca_mark_as_used(current_mca_slot);
 275                                err = register_netdev(dev);
 276                                if (err) {
 277                                        cleanup_card(dev);
 278                                        free_netdev(dev);
 279                                        dev = ERR_PTR(err);
 280                                }
 281                                return dev;
 282                        }
 283
 284                }
 285        }
 286        free_netdev(dev);
 287        return ERR_PTR(-ENODEV);
 288}
 289
 290static const struct net_device_ops netdev_ops = {
 291        .ndo_open               = mc32_open,
 292        .ndo_stop               = mc32_close,
 293        .ndo_start_xmit         = mc32_send_packet,
 294        .ndo_get_stats          = mc32_get_stats,
 295        .ndo_set_multicast_list = mc32_set_multicast_list,
 296        .ndo_tx_timeout         = mc32_timeout,
 297        .ndo_change_mtu         = eth_change_mtu,
 298        .ndo_set_mac_address    = eth_mac_addr,
 299        .ndo_validate_addr      = eth_validate_addr,
 300};
 301
 302/**
 303 * mc32_probe1  -       Check a given slot for a board and test the card
 304 * @dev:  Device structure to fill in
 305 * @slot: The MCA bus slot being used by this card
 306 *
 307 * Decode the slot data and configure the card structures. Having done this we
 308 * can reset the card and configure it. The card does a full self test cycle
 309 * in firmware so we have to wait for it to return and post us either a
 310 * failure case or some addresses we use to find the board internals.
 311 */
 312
 313static int __init mc32_probe1(struct net_device *dev, int slot)
 314{
 315        static unsigned version_printed;
 316        int i, err;
 317        u8 POS;
 318        u32 base;
 319        struct mc32_local *lp = netdev_priv(dev);
 320        static u16 mca_io_bases[]={
 321                0x7280,0x7290,
 322                0x7680,0x7690,
 323                0x7A80,0x7A90,
 324                0x7E80,0x7E90
 325        };
 326        static u32 mca_mem_bases[]={
 327                0x00C0000,
 328                0x00C4000,
 329                0x00C8000,
 330                0x00CC000,
 331                0x00D0000,
 332                0x00D4000,
 333                0x00D8000,
 334                0x00DC000
 335        };
 336        static char *failures[]={
 337                "Processor instruction",
 338                "Processor data bus",
 339                "Processor data bus",
 340                "Processor data bus",
 341                "Adapter bus",
 342                "ROM checksum",
 343                "Base RAM",
 344                "Extended RAM",
 345                "82586 internal loopback",
 346                "82586 initialisation failure",
 347                "Adapter list configuration error"
 348        };
 349
 350        /* Time to play MCA games */
 351
 352        if (mc32_debug  &&  version_printed++ == 0)
 353                pr_debug("%s", version);
 354
 355        pr_info("%s: %s found in slot %d: ", dev->name, cardname, slot);
 356
 357        POS = mca_read_stored_pos(slot, 2);
 358
 359        if(!(POS&1))
 360        {
 361                pr_cont("disabled.\n");
 362                return -ENODEV;
 363        }
 364
 365        /* Fill in the 'dev' fields. */
 366        dev->base_addr = mca_io_bases[(POS>>1)&7];
 367        dev->mem_start = mca_mem_bases[(POS>>4)&7];
 368
 369        POS = mca_read_stored_pos(slot, 4);
 370        if(!(POS&1))
 371        {
 372                pr_cont("memory window disabled.\n");
 373                return -ENODEV;
 374        }
 375
 376        POS = mca_read_stored_pos(slot, 5);
 377
 378        i=(POS>>4)&3;
 379        if(i==3)
 380        {
 381                pr_cont("invalid memory window.\n");
 382                return -ENODEV;
 383        }
 384
 385        i*=16384;
 386        i+=16384;
 387
 388        dev->mem_end=dev->mem_start + i;
 389
 390        dev->irq = ((POS>>2)&3)+9;
 391
 392        if(!request_region(dev->base_addr, MC32_IO_EXTENT, cardname))
 393        {
 394                pr_cont("io 0x%3lX, which is busy.\n", dev->base_addr);
 395                return -EBUSY;
 396        }
 397
 398        pr_cont("io 0x%3lX irq %d mem 0x%lX (%dK)\n",
 399                dev->base_addr, dev->irq, dev->mem_start, i/1024);
 400
 401
 402        /* We ought to set the cache line size here.. */
 403
 404
 405        /*
 406         *      Go PROM browsing
 407         */
 408
 409        /* Retrieve and print the ethernet address. */
 410        for (i = 0; i < 6; i++)
 411        {
 412                mca_write_pos(slot, 6, i+12);
 413                mca_write_pos(slot, 7, 0);
 414
 415                dev->dev_addr[i] = mca_read_pos(slot,3);
 416        }
 417
 418        pr_info("%s: Address %pM ", dev->name, dev->dev_addr);
 419
 420        mca_write_pos(slot, 6, 0);
 421        mca_write_pos(slot, 7, 0);
 422
 423        POS = mca_read_stored_pos(slot, 4);
 424
 425        if(POS&2)
 426                pr_cont(": BNC port selected.\n");
 427        else
 428                pr_cont(": AUI port selected.\n");
 429
 430        POS=inb(dev->base_addr+HOST_CTRL);
 431        POS|=HOST_CTRL_ATTN|HOST_CTRL_RESET;
 432        POS&=~HOST_CTRL_INTE;
 433        outb(POS, dev->base_addr+HOST_CTRL);
 434        /* Reset adapter */
 435        udelay(100);
 436        /* Reset off */
 437        POS&=~(HOST_CTRL_ATTN|HOST_CTRL_RESET);
 438        outb(POS, dev->base_addr+HOST_CTRL);
 439
 440        udelay(300);
 441
 442        /*
 443         *      Grab the IRQ
 444         */
 445
 446        err = request_irq(dev->irq, &mc32_interrupt, IRQF_SHARED | IRQF_SAMPLE_RANDOM, DRV_NAME, dev);
 447        if (err) {
 448                release_region(dev->base_addr, MC32_IO_EXTENT);
 449                pr_err("%s: unable to get IRQ %d.\n", DRV_NAME, dev->irq);
 450                goto err_exit_ports;
 451        }
 452
 453        memset(lp, 0, sizeof(struct mc32_local));
 454        lp->slot = slot;
 455
 456        i=0;
 457
 458        base = inb(dev->base_addr);
 459
 460        while(base == 0xFF)
 461        {
 462                i++;
 463                if(i == 1000)
 464                {
 465                        pr_err("%s: failed to boot adapter.\n", dev->name);
 466                        err = -ENODEV;
 467                        goto err_exit_irq;
 468                }
 469                udelay(1000);
 470                if(inb(dev->base_addr+2)&(1<<5))
 471                        base = inb(dev->base_addr);
 472        }
 473
 474        if(base>0)
 475        {
 476                if(base < 0x0C)
 477                        pr_err("%s: %s%s.\n", dev->name, failures[base-1],
 478                                base<0x0A?" test failure":"");
 479                else
 480                        pr_err("%s: unknown failure %d.\n", dev->name, base);
 481                err = -ENODEV;
 482                goto err_exit_irq;
 483        }
 484
 485        base=0;
 486        for(i=0;i<4;i++)
 487        {
 488                int n=0;
 489
 490                while(!(inb(dev->base_addr+2)&(1<<5)))
 491                {
 492                        n++;
 493                        udelay(50);
 494                        if(n>100)
 495                        {
 496                                pr_err("%s: mailbox read fail (%d).\n", dev->name, i);
 497                                err = -ENODEV;
 498                                goto err_exit_irq;
 499                        }
 500                }
 501
 502                base|=(inb(dev->base_addr)<<(8*i));
 503        }
 504
 505        lp->exec_box=isa_bus_to_virt(dev->mem_start+base);
 506
 507        base=lp->exec_box->data[1]<<16|lp->exec_box->data[0];
 508
 509        lp->base = dev->mem_start+base;
 510
 511        lp->rx_box=isa_bus_to_virt(lp->base + lp->exec_box->data[2]);
 512        lp->tx_box=isa_bus_to_virt(lp->base + lp->exec_box->data[3]);
 513
 514        lp->stats = isa_bus_to_virt(lp->base + lp->exec_box->data[5]);
 515
 516        /*
 517         *      Descriptor chains (card relative)
 518         */
 519
 520        lp->tx_chain            = lp->exec_box->data[8];   /* Transmit list start offset */
 521        lp->rx_chain            = lp->exec_box->data[10];  /* Receive list start offset */
 522        lp->tx_len              = lp->exec_box->data[9];   /* Transmit list count */
 523        lp->rx_len              = lp->exec_box->data[11];  /* Receive list count */
 524
 525        init_MUTEX_LOCKED(&lp->cmd_mutex);
 526        init_completion(&lp->execution_cmd);
 527        init_completion(&lp->xceiver_cmd);
 528
 529        pr_info("%s: Firmware Rev %d. %d RX buffers, %d TX buffers. Base of 0x%08X.\n",
 530                dev->name, lp->exec_box->data[12], lp->rx_len, lp->tx_len, lp->base);
 531
 532        dev->netdev_ops         = &netdev_ops;
 533        dev->watchdog_timeo     = HZ*5; /* Board does all the work */
 534        dev->ethtool_ops        = &netdev_ethtool_ops;
 535
 536        return 0;
 537
 538err_exit_irq:
 539        free_irq(dev->irq, dev);
 540err_exit_ports:
 541        release_region(dev->base_addr, MC32_IO_EXTENT);
 542        return err;
 543}
 544
 545
 546/**
 547 *      mc32_ready_poll         -       wait until we can feed it a command
 548 *      @dev:   The device to wait for
 549 *
 550 *      Wait until the card becomes ready to accept a command via the
 551 *      command register. This tells us nothing about the completion
 552 *      status of any pending commands and takes very little time at all.
 553 */
 554
 555static inline void mc32_ready_poll(struct net_device *dev)
 556{
 557        int ioaddr = dev->base_addr;
 558        while(!(inb(ioaddr+HOST_STATUS)&HOST_STATUS_CRR));
 559}
 560
 561
 562/**
 563 *      mc32_command_nowait     -       send a command non blocking
 564 *      @dev: The 3c527 to issue the command to
 565 *      @cmd: The command word to write to the mailbox
 566 *      @data: A data block if the command expects one
 567 *      @len: Length of the data block
 568 *
 569 *      Send a command from interrupt state. If there is a command
 570 *      currently being executed then we return an error of -1. It
 571 *      simply isn't viable to wait around as commands may be
 572 *      slow. This can theoretically be starved on SMP, but it's hard
 573 *      to see a realistic situation.  We do not wait for the command
 574 *      to complete --- we rely on the interrupt handler to tidy up
 575 *      after us.
 576 */
 577
 578static int mc32_command_nowait(struct net_device *dev, u16 cmd, void *data, int len)
 579{
 580        struct mc32_local *lp = netdev_priv(dev);
 581        int ioaddr = dev->base_addr;
 582        int ret = -1;
 583
 584        if (down_trylock(&lp->cmd_mutex) == 0)
 585        {
 586                lp->cmd_nonblocking=1;
 587                lp->exec_box->mbox=0;
 588                lp->exec_box->mbox=cmd;
 589                memcpy((void *)lp->exec_box->data, data, len);
 590                barrier();      /* the memcpy forgot the volatile so be sure */
 591
 592                /* Send the command */
 593                mc32_ready_poll(dev);
 594                outb(1<<6, ioaddr+HOST_CMD);
 595
 596                ret = 0;
 597
 598                /* Interrupt handler will signal mutex on completion */
 599        }
 600
 601        return ret;
 602}
 603
 604
 605/**
 606 *      mc32_command    -       send a command and sleep until completion
 607 *      @dev: The 3c527 card to issue the command to
 608 *      @cmd: The command word to write to the mailbox
 609 *      @data: A data block if the command expects one
 610 *      @len: Length of the data block
 611 *
 612 *      Sends exec commands in a user context. This permits us to wait around
 613 *      for the replies and also to wait for the command buffer to complete
 614 *      from a previous command before we execute our command. After our
 615 *      command completes we will attempt any pending multicast reload
 616 *      we blocked off by hogging the exec buffer.
 617 *
 618 *      You feed the card a command, you wait, it interrupts you get a
 619 *      reply. All well and good. The complication arises because you use
 620 *      commands for filter list changes which come in at bh level from things
 621 *      like IPV6 group stuff.
 622 */
 623
 624static int mc32_command(struct net_device *dev, u16 cmd, void *data, int len)
 625{
 626        struct mc32_local *lp = netdev_priv(dev);
 627        int ioaddr = dev->base_addr;
 628        int ret = 0;
 629
 630        down(&lp->cmd_mutex);
 631
 632        /*
 633         *     My Turn
 634         */
 635
 636        lp->cmd_nonblocking=0;
 637        lp->exec_box->mbox=0;
 638        lp->exec_box->mbox=cmd;
 639        memcpy((void *)lp->exec_box->data, data, len);
 640        barrier();      /* the memcpy forgot the volatile so be sure */
 641
 642        mc32_ready_poll(dev);
 643        outb(1<<6, ioaddr+HOST_CMD);
 644
 645        wait_for_completion(&lp->execution_cmd);
 646
 647        if(lp->exec_box->mbox&(1<<13))
 648                ret = -1;
 649
 650        up(&lp->cmd_mutex);
 651
 652        /*
 653         *      A multicast set got blocked - try it now
 654         */
 655
 656        if(lp->mc_reload_wait)
 657        {
 658                mc32_reset_multicast_list(dev);
 659        }
 660
 661        return ret;
 662}
 663
 664
 665/**
 666 *      mc32_start_transceiver  -       tell board to restart tx/rx
 667 *      @dev: The 3c527 card to issue the command to
 668 *
 669 *      This may be called from the interrupt state, where it is used
 670 *      to restart the rx ring if the card runs out of rx buffers.
 671 *
 672 *      We must first check if it's ok to (re)start the transceiver. See
 673 *      mc32_close for details.
 674 */
 675
 676static void mc32_start_transceiver(struct net_device *dev) {
 677
 678        struct mc32_local *lp = netdev_priv(dev);
 679        int ioaddr = dev->base_addr;
 680
 681        /* Ignore RX overflow on device closure */
 682        if (lp->xceiver_desired_state==HALTED)
 683                return;
 684
 685        /* Give the card the offset to the post-EOL-bit RX descriptor */
 686        mc32_ready_poll(dev);
 687        lp->rx_box->mbox=0;
 688        lp->rx_box->data[0]=lp->rx_ring[prev_rx(lp->rx_ring_tail)].p->next;
 689        outb(HOST_CMD_START_RX, ioaddr+HOST_CMD);
 690
 691        mc32_ready_poll(dev);
 692        lp->tx_box->mbox=0;
 693        outb(HOST_CMD_RESTRT_TX, ioaddr+HOST_CMD);   /* card ignores this on RX restart */
 694
 695        /* We are not interrupted on start completion */
 696}
 697
 698
 699/**
 700 *      mc32_halt_transceiver   -       tell board to stop tx/rx
 701 *      @dev: The 3c527 card to issue the command to
 702 *
 703 *      We issue the commands to halt the card's transceiver. In fact,
 704 *      after some experimenting we now simply tell the card to
 705 *      suspend. When issuing aborts occasionally odd things happened.
 706 *
 707 *      We then sleep until the card has notified us that both rx and
 708 *      tx have been suspended.
 709 */
 710
 711static void mc32_halt_transceiver(struct net_device *dev)
 712{
 713        struct mc32_local *lp = netdev_priv(dev);
 714        int ioaddr = dev->base_addr;
 715
 716        mc32_ready_poll(dev);
 717        lp->rx_box->mbox=0;
 718        outb(HOST_CMD_SUSPND_RX, ioaddr+HOST_CMD);
 719        wait_for_completion(&lp->xceiver_cmd);
 720
 721        mc32_ready_poll(dev);
 722        lp->tx_box->mbox=0;
 723        outb(HOST_CMD_SUSPND_TX, ioaddr+HOST_CMD);
 724        wait_for_completion(&lp->xceiver_cmd);
 725}
 726
 727
 728/**
 729 *      mc32_load_rx_ring       -       load the ring of receive buffers
 730 *      @dev: 3c527 to build the ring for
 731 *
 732 *      This initalises the on-card and driver datastructures to
 733 *      the point where mc32_start_transceiver() can be called.
 734 *
 735 *      The card sets up the receive ring for us. We are required to use the
 736 *      ring it provides, although the size of the ring is configurable.
 737 *
 738 *      We allocate an sk_buff for each ring entry in turn and
 739 *      initalise its house-keeping info. At the same time, we read
 740 *      each 'next' pointer in our rx_ring array. This reduces slow
 741 *      shared-memory reads and makes it easy to access predecessor
 742 *      descriptors.
 743 *
 744 *      We then set the end-of-list bit for the last entry so that the
 745 *      card will know when it has run out of buffers.
 746 */
 747
 748static int mc32_load_rx_ring(struct net_device *dev)
 749{
 750        struct mc32_local *lp = netdev_priv(dev);
 751        int i;
 752        u16 rx_base;
 753        volatile struct skb_header *p;
 754
 755        rx_base=lp->rx_chain;
 756
 757        for(i=0; i<RX_RING_LEN; i++) {
 758                lp->rx_ring[i].skb=alloc_skb(1532, GFP_KERNEL);
 759                if (lp->rx_ring[i].skb==NULL) {
 760                        for (;i>=0;i--)
 761                                kfree_skb(lp->rx_ring[i].skb);
 762                        return -ENOBUFS;
 763                }
 764                skb_reserve(lp->rx_ring[i].skb, 18);
 765
 766                p=isa_bus_to_virt(lp->base+rx_base);
 767
 768                p->control=0;
 769                p->data=isa_virt_to_bus(lp->rx_ring[i].skb->data);
 770                p->status=0;
 771                p->length=1532;
 772
 773                lp->rx_ring[i].p=p;
 774                rx_base=p->next;
 775        }
 776
 777        lp->rx_ring[i-1].p->control |= CONTROL_EOL;
 778
 779        lp->rx_ring_tail=0;
 780
 781        return 0;
 782}
 783
 784
 785/**
 786 *      mc32_flush_rx_ring      -       free the ring of receive buffers
 787 *      @lp: Local data of 3c527 to flush the rx ring of
 788 *
 789 *      Free the buffer for each ring slot. This may be called
 790 *      before mc32_load_rx_ring(), eg. on error in mc32_open().
 791 *      Requires rx skb pointers to point to a valid skb, or NULL.
 792 */
 793
 794static void mc32_flush_rx_ring(struct net_device *dev)
 795{
 796        struct mc32_local *lp = netdev_priv(dev);
 797        int i;
 798
 799        for(i=0; i < RX_RING_LEN; i++)
 800        {
 801                if (lp->rx_ring[i].skb) {
 802                        dev_kfree_skb(lp->rx_ring[i].skb);
 803                        lp->rx_ring[i].skb = NULL;
 804                }
 805                lp->rx_ring[i].p=NULL;
 806        }
 807}
 808
 809
 810/**
 811 *      mc32_load_tx_ring       -       load transmit ring
 812 *      @dev: The 3c527 card to issue the command to
 813 *
 814 *      This sets up the host transmit data-structures.
 815 *
 816 *      First, we obtain from the card it's current postion in the tx
 817 *      ring, so that we will know where to begin transmitting
 818 *      packets.
 819 *
 820 *      Then, we read the 'next' pointers from the on-card tx ring into
 821 *      our tx_ring array to reduce slow shared-mem reads. Finally, we
 822 *      intitalise the tx house keeping variables.
 823 *
 824 */
 825
 826static void mc32_load_tx_ring(struct net_device *dev)
 827{
 828        struct mc32_local *lp = netdev_priv(dev);
 829        volatile struct skb_header *p;
 830        int i;
 831        u16 tx_base;
 832
 833        tx_base=lp->tx_box->data[0];
 834
 835        for(i=0 ; i<TX_RING_LEN ; i++)
 836        {
 837                p=isa_bus_to_virt(lp->base+tx_base);
 838                lp->tx_ring[i].p=p;
 839                lp->tx_ring[i].skb=NULL;
 840
 841                tx_base=p->next;
 842        }
 843
 844        /* -1 so that tx_ring_head cannot "lap" tx_ring_tail */
 845        /* see mc32_tx_ring */
 846
 847        atomic_set(&lp->tx_count, TX_RING_LEN-1);
 848        atomic_set(&lp->tx_ring_head, 0);
 849        lp->tx_ring_tail=0;
 850}
 851
 852
 853/**
 854 *      mc32_flush_tx_ring      -       free transmit ring
 855 *      @lp: Local data of 3c527 to flush the tx ring of
 856 *
 857 *      If the ring is non-empty, zip over the it, freeing any
 858 *      allocated skb_buffs.  The tx ring house-keeping variables are
 859 *      then reset. Requires rx skb pointers to point to a valid skb,
 860 *      or NULL.
 861 */
 862
 863static void mc32_flush_tx_ring(struct net_device *dev)
 864{
 865        struct mc32_local *lp = netdev_priv(dev);
 866        int i;
 867
 868        for (i=0; i < TX_RING_LEN; i++)
 869        {
 870                if (lp->tx_ring[i].skb)
 871                {
 872                        dev_kfree_skb(lp->tx_ring[i].skb);
 873                        lp->tx_ring[i].skb = NULL;
 874                }
 875        }
 876
 877        atomic_set(&lp->tx_count, 0);
 878        atomic_set(&lp->tx_ring_head, 0);
 879        lp->tx_ring_tail=0;
 880}
 881
 882
 883/**
 884 *      mc32_open       -       handle 'up' of card
 885 *      @dev: device to open
 886 *
 887 *      The user is trying to bring the card into ready state. This requires
 888 *      a brief dialogue with the card. Firstly we enable interrupts and then
 889 *      'indications'. Without these enabled the card doesn't bother telling
 890 *      us what it has done. This had me puzzled for a week.
 891 *
 892 *      We configure the number of card descriptors, then load the network
 893 *      address and multicast filters. Turn on the workaround mode. This
 894 *      works around a bug in the 82586 - it asks the firmware to do
 895 *      so. It has a performance (latency) hit but is needed on busy
 896 *      [read most] lans. We load the ring with buffers then we kick it
 897 *      all off.
 898 */
 899
 900static int mc32_open(struct net_device *dev)
 901{
 902        int ioaddr = dev->base_addr;
 903        struct mc32_local *lp = netdev_priv(dev);
 904        u8 one=1;
 905        u8 regs;
 906        u16 descnumbuffs[2] = {TX_RING_LEN, RX_RING_LEN};
 907
 908        /*
 909         *      Interrupts enabled
 910         */
 911
 912        regs=inb(ioaddr+HOST_CTRL);
 913        regs|=HOST_CTRL_INTE;
 914        outb(regs, ioaddr+HOST_CTRL);
 915
 916        /*
 917         *      Allow ourselves to issue commands
 918         */
 919
 920        up(&lp->cmd_mutex);
 921
 922
 923        /*
 924         *      Send the indications on command
 925         */
 926
 927        mc32_command(dev, 4, &one, 2);
 928
 929        /*
 930         *      Poke it to make sure it's really dead.
 931         */
 932
 933        mc32_halt_transceiver(dev);
 934        mc32_flush_tx_ring(dev);
 935
 936        /*
 937         *      Ask card to set up on-card descriptors to our spec
 938         */
 939
 940        if(mc32_command(dev, 8, descnumbuffs, 4)) {
 941                pr_info("%s: %s rejected our buffer configuration!\n",
 942                       dev->name, cardname);
 943                mc32_close(dev);
 944                return -ENOBUFS;
 945        }
 946
 947        /* Report new configuration */
 948        mc32_command(dev, 6, NULL, 0);
 949
 950        lp->tx_chain            = lp->exec_box->data[8];   /* Transmit list start offset */
 951        lp->rx_chain            = lp->exec_box->data[10];  /* Receive list start offset */
 952        lp->tx_len              = lp->exec_box->data[9];   /* Transmit list count */
 953        lp->rx_len              = lp->exec_box->data[11];  /* Receive list count */
 954
 955        /* Set Network Address */
 956        mc32_command(dev, 1, dev->dev_addr, 6);
 957
 958        /* Set the filters */
 959        mc32_set_multicast_list(dev);
 960
 961        if (WORKAROUND_82586) {
 962                u16 zero_word=0;
 963                mc32_command(dev, 0x0D, &zero_word, 2);   /* 82586 bug workaround on  */
 964        }
 965
 966        mc32_load_tx_ring(dev);
 967
 968        if(mc32_load_rx_ring(dev))
 969        {
 970                mc32_close(dev);
 971                return -ENOBUFS;
 972        }
 973
 974        lp->xceiver_desired_state = RUNNING;
 975
 976        /* And finally, set the ball rolling... */
 977        mc32_start_transceiver(dev);
 978
 979        netif_start_queue(dev);
 980
 981        return 0;
 982}
 983
 984
 985/**
 986 *      mc32_timeout    -       handle a timeout from the network layer
 987 *      @dev: 3c527 that timed out
 988 *
 989 *      Handle a timeout on transmit from the 3c527. This normally means
 990 *      bad things as the hardware handles cable timeouts and mess for
 991 *      us.
 992 *
 993 */
 994
 995static void mc32_timeout(struct net_device *dev)
 996{
 997        pr_warning("%s: transmit timed out?\n", dev->name);
 998        /* Try to restart the adaptor. */
 999        netif_wake_queue(dev);
1000}
1001
1002
1003/**
1004 *      mc32_send_packet        -       queue a frame for transmit
1005 *      @skb: buffer to transmit
1006 *      @dev: 3c527 to send it out of
1007 *
1008 *      Transmit a buffer. This normally means throwing the buffer onto
1009 *      the transmit queue as the queue is quite large. If the queue is
1010 *      full then we set tx_busy and return. Once the interrupt handler
1011 *      gets messages telling it to reclaim transmit queue entries, we will
1012 *      clear tx_busy and the kernel will start calling this again.
1013 *
1014 *      We do not disable interrupts or acquire any locks; this can
1015 *      run concurrently with mc32_tx_ring(), and the function itself
1016 *      is serialised at a higher layer. However, similarly for the
1017 *      card itself, we must ensure that we update tx_ring_head only
1018 *      after we've established a valid packet on the tx ring (and
1019 *      before we let the card "see" it, to prevent it racing with the
1020 *      irq handler).
1021 *
1022 */
1023
1024static netdev_tx_t mc32_send_packet(struct sk_buff *skb,
1025                                    struct net_device *dev)
1026{
1027        struct mc32_local *lp = netdev_priv(dev);
1028        u32 head = atomic_read(&lp->tx_ring_head);
1029
1030        volatile struct skb_header *p, *np;
1031
1032        netif_stop_queue(dev);
1033
1034        if(atomic_read(&lp->tx_count)==0) {
1035                return NETDEV_TX_BUSY;
1036        }
1037
1038        if (skb_padto(skb, ETH_ZLEN)) {
1039                netif_wake_queue(dev);
1040                return NETDEV_TX_OK;
1041        }
1042
1043        atomic_dec(&lp->tx_count);
1044
1045        /* P is the last sending/sent buffer as a pointer */
1046        p=lp->tx_ring[head].p;
1047
1048        head = next_tx(head);
1049
1050        /* NP is the buffer we will be loading */
1051        np=lp->tx_ring[head].p;
1052
1053        /* We will need this to flush the buffer out */
1054        lp->tx_ring[head].skb=skb;
1055
1056        np->length      = unlikely(skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len;
1057        np->data        = isa_virt_to_bus(skb->data);
1058        np->status      = 0;
1059        np->control     = CONTROL_EOP | CONTROL_EOL;
1060        wmb();
1061
1062        /*
1063         * The new frame has been setup; we can now
1064         * let the interrupt handler and card "see" it
1065         */
1066
1067        atomic_set(&lp->tx_ring_head, head);
1068        p->control     &= ~CONTROL_EOL;
1069
1070        netif_wake_queue(dev);
1071        return NETDEV_TX_OK;
1072}
1073
1074
1075/**
1076 *      mc32_update_stats       -       pull off the on board statistics
1077 *      @dev: 3c527 to service
1078 *
1079 *
1080 *      Query and reset the on-card stats. There's the small possibility
1081 *      of a race here, which would result in an underestimation of
1082 *      actual errors. As such, we'd prefer to keep all our stats
1083 *      collection in software. As a rule, we do. However it can't be
1084 *      used for rx errors and collisions as, by default, the card discards
1085 *      bad rx packets.
1086 *
1087 *      Setting the SAV BP in the rx filter command supposedly
1088 *      stops this behaviour. However, testing shows that it only seems to
1089 *      enable the collation of on-card rx statistics --- the driver
1090 *      never sees an RX descriptor with an error status set.
1091 *
1092 */
1093
1094static void mc32_update_stats(struct net_device *dev)
1095{
1096        struct mc32_local *lp = netdev_priv(dev);
1097        volatile struct mc32_stats *st = lp->stats;
1098
1099        u32 rx_errors=0;
1100
1101        rx_errors+=dev->stats.rx_crc_errors   +=st->rx_crc_errors;
1102                                                   st->rx_crc_errors=0;
1103        rx_errors+=dev->stats.rx_fifo_errors  +=st->rx_overrun_errors;
1104                                                   st->rx_overrun_errors=0;
1105        rx_errors+=dev->stats.rx_frame_errors +=st->rx_alignment_errors;
1106                                                   st->rx_alignment_errors=0;
1107        rx_errors+=dev->stats.rx_length_errors+=st->rx_tooshort_errors;
1108                                                   st->rx_tooshort_errors=0;
1109        rx_errors+=dev->stats.rx_missed_errors+=st->rx_outofresource_errors;
1110                                                   st->rx_outofresource_errors=0;
1111        dev->stats.rx_errors=rx_errors;
1112
1113        /* Number of packets which saw one collision */
1114        dev->stats.collisions+=st->dataC[10];
1115        st->dataC[10]=0;
1116
1117        /* Number of packets which saw 2--15 collisions */
1118        dev->stats.collisions+=st->dataC[11];
1119        st->dataC[11]=0;
1120}
1121
1122
1123/**
1124 *      mc32_rx_ring    -       process the receive ring
1125 *      @dev: 3c527 that needs its receive ring processing
1126 *
1127 *
1128 *      We have received one or more indications from the card that a
1129 *      receive has completed. The buffer ring thus contains dirty
1130 *      entries. We walk the ring by iterating over the circular rx_ring
1131 *      array, starting at the next dirty buffer (which happens to be the
1132 *      one we finished up at last time around).
1133 *
1134 *      For each completed packet, we will either copy it and pass it up
1135 *      the stack or, if the packet is near MTU sized, we allocate
1136 *      another buffer and flip the old one up the stack.
1137 *
1138 *      We must succeed in keeping a buffer on the ring. If necessary we
1139 *      will toss a received packet rather than lose a ring entry. Once
1140 *      the first uncompleted descriptor is found, we move the
1141 *      End-Of-List bit to include the buffers just processed.
1142 *
1143 */
1144
1145static void mc32_rx_ring(struct net_device *dev)
1146{
1147        struct mc32_local *lp = netdev_priv(dev);
1148        volatile struct skb_header *p;
1149        u16 rx_ring_tail;
1150        u16 rx_old_tail;
1151        int x=0;
1152
1153        rx_old_tail = rx_ring_tail = lp->rx_ring_tail;
1154
1155        do
1156        {
1157                p=lp->rx_ring[rx_ring_tail].p;
1158
1159                if(!(p->status & (1<<7))) { /* Not COMPLETED */
1160                        break;
1161                }
1162                if(p->status & (1<<6)) /* COMPLETED_OK */
1163                {
1164
1165                        u16 length=p->length;
1166                        struct sk_buff *skb;
1167                        struct sk_buff *newskb;
1168
1169                        /* Try to save time by avoiding a copy on big frames */
1170
1171                        if ((length > RX_COPYBREAK)
1172                            && ((newskb=dev_alloc_skb(1532)) != NULL))
1173                        {
1174                                skb=lp->rx_ring[rx_ring_tail].skb;
1175                                skb_put(skb, length);
1176
1177                                skb_reserve(newskb,18);
1178                                lp->rx_ring[rx_ring_tail].skb=newskb;
1179                                p->data=isa_virt_to_bus(newskb->data);
1180                        }
1181                        else
1182                        {
1183                                skb=dev_alloc_skb(length+2);
1184
1185                                if(skb==NULL) {
1186                                        dev->stats.rx_dropped++;
1187                                        goto dropped;
1188                                }
1189
1190                                skb_reserve(skb,2);
1191                                memcpy(skb_put(skb, length),
1192                                       lp->rx_ring[rx_ring_tail].skb->data, length);
1193                        }
1194
1195                        skb->protocol=eth_type_trans(skb,dev);
1196                        dev->stats.rx_packets++;
1197                        dev->stats.rx_bytes += length;
1198                        netif_rx(skb);
1199                }
1200
1201        dropped:
1202                p->length = 1532;
1203                p->status = 0;
1204
1205                rx_ring_tail=next_rx(rx_ring_tail);
1206        }
1207        while(x++<48);
1208
1209        /* If there was actually a frame to be processed, place the EOL bit */
1210        /* at the descriptor prior to the one to be filled next */
1211
1212        if (rx_ring_tail != rx_old_tail)
1213        {
1214                lp->rx_ring[prev_rx(rx_ring_tail)].p->control |=  CONTROL_EOL;
1215                lp->rx_ring[prev_rx(rx_old_tail)].p->control  &= ~CONTROL_EOL;
1216
1217                lp->rx_ring_tail=rx_ring_tail;
1218        }
1219}
1220
1221
1222/**
1223 *      mc32_tx_ring    -       process completed transmits
1224 *      @dev: 3c527 that needs its transmit ring processing
1225 *
1226 *
1227 *      This operates in a similar fashion to mc32_rx_ring. We iterate
1228 *      over the transmit ring. For each descriptor which has been
1229 *      processed by the card, we free its associated buffer and note
1230 *      any errors. This continues until the transmit ring is emptied
1231 *      or we reach a descriptor that hasn't yet been processed by the
1232 *      card.
1233 *
1234 */
1235
1236static void mc32_tx_ring(struct net_device *dev)
1237{
1238        struct mc32_local *lp = netdev_priv(dev);
1239        volatile struct skb_header *np;
1240
1241        /*
1242         * We rely on head==tail to mean 'queue empty'.
1243         * This is why lp->tx_count=TX_RING_LEN-1: in order to prevent
1244         * tx_ring_head wrapping to tail and confusing a 'queue empty'
1245         * condition with 'queue full'
1246         */
1247
1248        while (lp->tx_ring_tail != atomic_read(&lp->tx_ring_head))
1249        {
1250                u16 t;
1251
1252                t=next_tx(lp->tx_ring_tail);
1253                np=lp->tx_ring[t].p;
1254
1255                if(!(np->status & (1<<7)))
1256                {
1257                        /* Not COMPLETED */
1258                        break;
1259                }
1260                dev->stats.tx_packets++;
1261                if(!(np->status & (1<<6))) /* Not COMPLETED_OK */
1262                {
1263                        dev->stats.tx_errors++;
1264
1265                        switch(np->status&0x0F)
1266                        {
1267                                case 1:
1268                                        dev->stats.tx_aborted_errors++;
1269                                        break; /* Max collisions */
1270                                case 2:
1271                                        dev->stats.tx_fifo_errors++;
1272                                        break;
1273                                case 3:
1274                                        dev->stats.tx_carrier_errors++;
1275                                        break;
1276                                case 4:
1277                                        dev->stats.tx_window_errors++;
1278                                        break;  /* CTS Lost */
1279                                case 5:
1280                                        dev->stats.tx_aborted_errors++;
1281                                        break; /* Transmit timeout */
1282                        }
1283                }
1284                /* Packets are sent in order - this is
1285                    basically a FIFO queue of buffers matching
1286                    the card ring */
1287                dev->stats.tx_bytes+=lp->tx_ring[t].skb->len;
1288                dev_kfree_skb_irq(lp->tx_ring[t].skb);
1289                lp->tx_ring[t].skb=NULL;
1290                atomic_inc(&lp->tx_count);
1291                netif_wake_queue(dev);
1292
1293                lp->tx_ring_tail=t;
1294        }
1295
1296}
1297
1298
1299/**
1300 *      mc32_interrupt          -       handle an interrupt from a 3c527
1301 *      @irq: Interrupt number
1302 *      @dev_id: 3c527 that requires servicing
1303 *      @regs: Registers (unused)
1304 *
1305 *
1306 *      An interrupt is raised whenever the 3c527 writes to the command
1307 *      register. This register contains the message it wishes to send us
1308 *      packed into a single byte field. We keep reading status entries
1309 *      until we have processed all the control items, but simply count
1310 *      transmit and receive reports. When all reports are in we empty the
1311 *      transceiver rings as appropriate. This saves the overhead of
1312 *      multiple command requests.
1313 *
1314 *      Because MCA is level-triggered, we shouldn't miss indications.
1315 *      Therefore, we needn't ask the card to suspend interrupts within
1316 *      this handler. The card receives an implicit acknowledgment of the
1317 *      current interrupt when we read the command register.
1318 *
1319 */
1320
1321static irqreturn_t mc32_interrupt(int irq, void *dev_id)
1322{
1323        struct net_device *dev = dev_id;
1324        struct mc32_local *lp;
1325        int ioaddr, status, boguscount = 0;
1326        int rx_event = 0;
1327        int tx_event = 0;
1328
1329        ioaddr = dev->base_addr;
1330        lp = netdev_priv(dev);
1331
1332        /* See whats cooking */
1333
1334        while((inb(ioaddr+HOST_STATUS)&HOST_STATUS_CWR) && boguscount++<2000)
1335        {
1336                status=inb(ioaddr+HOST_CMD);
1337
1338                pr_debug("Status TX%d RX%d EX%d OV%d BC%d\n",
1339                        (status&7), (status>>3)&7, (status>>6)&1,
1340                        (status>>7)&1, boguscount);
1341
1342                switch(status&7)
1343                {
1344                        case 0:
1345                                break;
1346                        case 6: /* TX fail */
1347                        case 2: /* TX ok */
1348                                tx_event = 1;
1349                                break;
1350                        case 3: /* Halt */
1351                        case 4: /* Abort */
1352                                complete(&lp->xceiver_cmd);
1353                                break;
1354                        default:
1355                                pr_notice("%s: strange tx ack %d\n", dev->name, status&7);
1356                }
1357                status>>=3;
1358                switch(status&7)
1359                {
1360                        case 0:
1361                                break;
1362                        case 2: /* RX */
1363                                rx_event=1;
1364                                break;
1365                        case 3: /* Halt */
1366                        case 4: /* Abort */
1367                                complete(&lp->xceiver_cmd);
1368                                break;
1369                        case 6:
1370                                /* Out of RX buffers stat */
1371                                /* Must restart rx */
1372                                dev->stats.rx_dropped++;
1373                                mc32_rx_ring(dev);
1374                                mc32_start_transceiver(dev);
1375                                break;
1376                        default:
1377                                pr_notice("%s: strange rx ack %d\n",
1378                                        dev->name, status&7);
1379                }
1380                status>>=3;
1381                if(status&1)
1382                {
1383                        /*
1384                         * No thread is waiting: we need to tidy
1385                         * up ourself.
1386                         */
1387
1388                        if (lp->cmd_nonblocking) {
1389                                up(&lp->cmd_mutex);
1390                                if (lp->mc_reload_wait)
1391                                        mc32_reset_multicast_list(dev);
1392                        }
1393                        else complete(&lp->execution_cmd);
1394                }
1395                if(status&2)
1396                {
1397                        /*
1398                         *      We get interrupted once per
1399                         *      counter that is about to overflow.
1400                         */
1401
1402                        mc32_update_stats(dev);
1403                }
1404        }
1405
1406
1407        /*
1408         *      Process the transmit and receive rings
1409         */
1410
1411        if(tx_event)
1412                mc32_tx_ring(dev);
1413
1414        if(rx_event)
1415                mc32_rx_ring(dev);
1416
1417        return IRQ_HANDLED;
1418}
1419
1420
1421/**
1422 *      mc32_close      -       user configuring the 3c527 down
1423 *      @dev: 3c527 card to shut down
1424 *
1425 *      The 3c527 is a bus mastering device. We must be careful how we
1426 *      shut it down. It may also be running shared interrupt so we have
1427 *      to be sure to silence it properly
1428 *
1429 *      We indicate that the card is closing to the rest of the
1430 *      driver.  Otherwise, it is possible that the card may run out
1431 *      of receive buffers and restart the transceiver while we're
1432 *      trying to close it.
1433 *
1434 *      We abort any receive and transmits going on and then wait until
1435 *      any pending exec commands have completed in other code threads.
1436 *      In theory we can't get here while that is true, in practice I am
1437 *      paranoid
1438 *
1439 *      We turn off the interrupt enable for the board to be sure it can't
1440 *      intefere with other devices.
1441 */
1442
1443static int mc32_close(struct net_device *dev)
1444{
1445        struct mc32_local *lp = netdev_priv(dev);
1446        int ioaddr = dev->base_addr;
1447
1448        u8 regs;
1449        u16 one=1;
1450
1451        lp->xceiver_desired_state = HALTED;
1452        netif_stop_queue(dev);
1453
1454        /*
1455         *      Send the indications on command (handy debug check)
1456         */
1457
1458        mc32_command(dev, 4, &one, 2);
1459
1460        /* Shut down the transceiver */
1461
1462        mc32_halt_transceiver(dev);
1463
1464        /* Ensure we issue no more commands beyond this point */
1465
1466        down(&lp->cmd_mutex);
1467
1468        /* Ok the card is now stopping */
1469
1470        regs=inb(ioaddr+HOST_CTRL);
1471        regs&=~HOST_CTRL_INTE;
1472        outb(regs, ioaddr+HOST_CTRL);
1473
1474        mc32_flush_rx_ring(dev);
1475        mc32_flush_tx_ring(dev);
1476
1477        mc32_update_stats(dev);
1478
1479        return 0;
1480}
1481
1482
1483/**
1484 *      mc32_get_stats          -       hand back stats to network layer
1485 *      @dev: The 3c527 card to handle
1486 *
1487 *      We've collected all the stats we can in software already. Now
1488 *      it's time to update those kept on-card and return the lot.
1489 *
1490 */
1491
1492static struct net_device_stats *mc32_get_stats(struct net_device *dev)
1493{
1494        mc32_update_stats(dev);
1495        return &dev->stats;
1496}
1497
1498
1499/**
1500 *      do_mc32_set_multicast_list      -       attempt to update multicasts
1501 *      @dev: 3c527 device to load the list on
1502 *      @retry: indicates this is not the first call.
1503 *
1504 *
1505 *      Actually set or clear the multicast filter for this adaptor. The
1506 *      locking issues are handled by this routine. We have to track
1507 *      state as it may take multiple calls to get the command sequence
1508 *      completed. We just keep trying to schedule the loads until we
1509 *      manage to process them all.
1510 *
1511 *      num_addrs == -1 Promiscuous mode, receive all packets
1512 *
1513 *      num_addrs == 0  Normal mode, clear multicast list
1514 *
1515 *      num_addrs > 0   Multicast mode, receive normal and MC packets,
1516 *                      and do best-effort filtering.
1517 *
1518 *      See mc32_update_stats() regards setting the SAV BP bit.
1519 *
1520 */
1521
1522static void do_mc32_set_multicast_list(struct net_device *dev, int retry)
1523{
1524        struct mc32_local *lp = netdev_priv(dev);
1525        u16 filt = (1<<2); /* Save Bad Packets, for stats purposes */
1526
1527        if ((dev->flags&IFF_PROMISC) ||
1528            (dev->flags&IFF_ALLMULTI) ||
1529            dev->mc_count > 10)
1530                /* Enable promiscuous mode */
1531                filt |= 1;
1532        else if(dev->mc_count)
1533        {
1534                unsigned char block[62];
1535                unsigned char *bp;
1536                struct dev_mc_list *dmc=dev->mc_list;
1537
1538                int i;
1539
1540                if(retry==0)
1541                        lp->mc_list_valid = 0;
1542                if(!lp->mc_list_valid)
1543                {
1544                        block[1]=0;
1545                        block[0]=dev->mc_count;
1546                        bp=block+2;
1547
1548                        for(i=0;i<dev->mc_count;i++)
1549                        {
1550                                memcpy(bp, dmc->dmi_addr, 6);
1551                                bp+=6;
1552                                dmc=dmc->next;
1553                        }
1554                        if(mc32_command_nowait(dev, 2, block, 2+6*dev->mc_count)==-1)
1555                        {
1556                                lp->mc_reload_wait = 1;
1557                                return;
1558                        }
1559                        lp->mc_list_valid=1;
1560                }
1561        }
1562
1563        if(mc32_command_nowait(dev, 0, &filt, 2)==-1)
1564        {
1565                lp->mc_reload_wait = 1;
1566        }
1567        else {
1568                lp->mc_reload_wait = 0;
1569        }
1570}
1571
1572
1573/**
1574 *      mc32_set_multicast_list -       queue multicast list update
1575 *      @dev: The 3c527 to use
1576 *
1577 *      Commence loading the multicast list. This is called when the kernel
1578 *      changes the lists. It will override any pending list we are trying to
1579 *      load.
1580 */
1581
1582static void mc32_set_multicast_list(struct net_device *dev)
1583{
1584        do_mc32_set_multicast_list(dev,0);
1585}
1586
1587
1588/**
1589 *      mc32_reset_multicast_list       -       reset multicast list
1590 *      @dev: The 3c527 to use
1591 *
1592 *      Attempt the next step in loading the multicast lists. If this attempt
1593 *      fails to complete then it will be scheduled and this function called
1594 *      again later from elsewhere.
1595 */
1596
1597static void mc32_reset_multicast_list(struct net_device *dev)
1598{
1599        do_mc32_set_multicast_list(dev,1);
1600}
1601
1602static void netdev_get_drvinfo(struct net_device *dev,
1603                               struct ethtool_drvinfo *info)
1604{
1605        strcpy(info->driver, DRV_NAME);
1606        strcpy(info->version, DRV_VERSION);
1607        sprintf(info->bus_info, "MCA 0x%lx", dev->base_addr);
1608}
1609
1610static u32 netdev_get_msglevel(struct net_device *dev)
1611{
1612        return mc32_debug;
1613}
1614
1615static void netdev_set_msglevel(struct net_device *dev, u32 level)
1616{
1617        mc32_debug = level;
1618}
1619
1620static const struct ethtool_ops netdev_ethtool_ops = {
1621        .get_drvinfo            = netdev_get_drvinfo,
1622        .get_msglevel           = netdev_get_msglevel,
1623        .set_msglevel           = netdev_set_msglevel,
1624};
1625
1626#ifdef MODULE
1627
1628static struct net_device *this_device;
1629
1630/**
1631 *      init_module             -       entry point
1632 *
1633 *      Probe and locate a 3c527 card. This really should probe and locate
1634 *      all the 3c527 cards in the machine not just one of them. Yes you can
1635 *      insmod multiple modules for now but it's a hack.
1636 */
1637
1638int __init init_module(void)
1639{
1640        this_device = mc32_probe(-1);
1641        if (IS_ERR(this_device))
1642                return PTR_ERR(this_device);
1643        return 0;
1644}
1645
1646/**
1647 *      cleanup_module  -       free resources for an unload
1648 *
1649 *      Unloading time. We release the MCA bus resources and the interrupt
1650 *      at which point everything is ready to unload. The card must be stopped
1651 *      at this point or we would not have been called. When we unload we
1652 *      leave the card stopped but not totally shut down. When the card is
1653 *      initialized it must be rebooted or the rings reloaded before any
1654 *      transmit operations are allowed to start scribbling into memory.
1655 */
1656
1657void __exit cleanup_module(void)
1658{
1659        unregister_netdev(this_device);
1660        cleanup_card(this_device);
1661        free_netdev(this_device);
1662}
1663
1664#endif /* MODULE */
1665