linux/drivers/net/lib82596.c
<<
>>
Prefs
   1/* lasi_82596.c -- driver for the intel 82596 ethernet controller, as
   2   munged into HPPA boxen .
   3
   4   This driver is based upon 82596.c, original credits are below...
   5   but there were too many hoops which HP wants jumped through to
   6   keep this code in there in a sane manner.
   7
   8   3 primary sources of the mess --
   9   1) hppa needs *lots* of cacheline flushing to keep this kind of
  10   MMIO running.
  11
  12   2) The 82596 needs to see all of its pointers as their physical
  13   address.  Thus virt_to_bus/bus_to_virt are *everywhere*.
  14
  15   3) The implementation HP is using seems to be significantly pickier
  16   about when and how the command and RX units are started.  some
  17   command ordering was changed.
  18
  19   Examination of the mach driver leads one to believe that there
  20   might be a saner way to pull this off...  anyone who feels like a
  21   full rewrite can be my guest.
  22
  23   Split 02/13/2000 Sam Creasey (sammy@oh.verio.com)
  24
  25   02/01/2000  Initial modifications for parisc by Helge Deller (deller@gmx.de)
  26   03/02/2000  changes for better/correct(?) cache-flushing (deller)
  27*/
  28
  29/* 82596.c: A generic 82596 ethernet driver for linux. */
  30/*
  31   Based on Apricot.c
  32   Written 1994 by Mark Evans.
  33   This driver is for the Apricot 82596 bus-master interface
  34
  35   Modularised 12/94 Mark Evans
  36
  37
  38   Modified to support the 82596 ethernet chips on 680x0 VME boards.
  39   by Richard Hirst <richard@sleepie.demon.co.uk>
  40   Renamed to be 82596.c
  41
  42   980825:  Changed to receive directly in to sk_buffs which are
  43   allocated at open() time.  Eliminates copy on incoming frames
  44   (small ones are still copied).  Shared data now held in a
  45   non-cached page, so we can run on 68060 in copyback mode.
  46
  47   TBD:
  48   * look at deferring rx frames rather than discarding (as per tulip)
  49   * handle tx ring full as per tulip
  50   * performace test to tune rx_copybreak
  51
  52   Most of my modifications relate to the braindead big-endian
  53   implementation by Intel.  When the i596 is operating in
  54   'big-endian' mode, it thinks a 32 bit value of 0x12345678
  55   should be stored as 0x56781234.  This is a real pain, when
  56   you have linked lists which are shared by the 680x0 and the
  57   i596.
  58
  59   Driver skeleton
  60   Written 1993 by Donald Becker.
  61   Copyright 1993 United States Government as represented by the Director,
  62   National Security Agency. This software may only be used and distributed
  63   according to the terms of the GNU General Public License as modified by SRC,
  64   incorporated herein by reference.
  65
  66   The author may be reached as becker@scyld.com, or C/O
  67   Scyld Computing Corporation, 410 Severn Ave., Suite 210, Annapolis MD 21403
  68
  69 */
  70
  71#include <linux/module.h>
  72#include <linux/kernel.h>
  73#include <linux/string.h>
  74#include <linux/errno.h>
  75#include <linux/ioport.h>
  76#include <linux/slab.h>
  77#include <linux/interrupt.h>
  78#include <linux/delay.h>
  79#include <linux/netdevice.h>
  80#include <linux/etherdevice.h>
  81#include <linux/skbuff.h>
  82#include <linux/init.h>
  83#include <linux/types.h>
  84#include <linux/bitops.h>
  85#include <linux/dma-mapping.h>
  86#include <linux/io.h>
  87#include <linux/irq.h>
  88
  89/* DEBUG flags
  90 */
  91
  92#define DEB_INIT        0x0001
  93#define DEB_PROBE       0x0002
  94#define DEB_SERIOUS     0x0004
  95#define DEB_ERRORS      0x0008
  96#define DEB_MULTI       0x0010
  97#define DEB_TDR         0x0020
  98#define DEB_OPEN        0x0040
  99#define DEB_RESET       0x0080
 100#define DEB_ADDCMD      0x0100
 101#define DEB_STATUS      0x0200
 102#define DEB_STARTTX     0x0400
 103#define DEB_RXADDR      0x0800
 104#define DEB_TXADDR      0x1000
 105#define DEB_RXFRAME     0x2000
 106#define DEB_INTS        0x4000
 107#define DEB_STRUCT      0x8000
 108#define DEB_ANY         0xffff
 109
 110
 111#define DEB(x, y)       if (i596_debug & (x)) { y; }
 112
 113
 114/*
 115 * The MPU_PORT command allows direct access to the 82596. With PORT access
 116 * the following commands are available (p5-18). The 32-bit port command
 117 * must be word-swapped with the most significant word written first.
 118 * This only applies to VME boards.
 119 */
 120#define PORT_RESET              0x00    /* reset 82596 */
 121#define PORT_SELFTEST           0x01    /* selftest */
 122#define PORT_ALTSCP             0x02    /* alternate SCB address */
 123#define PORT_ALTDUMP            0x03    /* Alternate DUMP address */
 124
 125static int i596_debug = (DEB_SERIOUS|DEB_PROBE);
 126
 127/* Copy frames shorter than rx_copybreak, otherwise pass on up in
 128 * a full sized sk_buff.  Value of 100 stolen from tulip.c (!alpha).
 129 */
 130static int rx_copybreak = 100;
 131
 132#define PKT_BUF_SZ      1536
 133#define MAX_MC_CNT      64
 134
 135#define ISCP_BUSY       0x0001
 136
 137#define I596_NULL ((u32)0xffffffff)
 138
 139#define CMD_EOL         0x8000  /* The last command of the list, stop. */
 140#define CMD_SUSP        0x4000  /* Suspend after doing cmd. */
 141#define CMD_INTR        0x2000  /* Interrupt after doing cmd. */
 142
 143#define CMD_FLEX        0x0008  /* Enable flexible memory model */
 144
 145enum commands {
 146        CmdNOp = 0, CmdSASetup = 1, CmdConfigure = 2, CmdMulticastList = 3,
 147        CmdTx = 4, CmdTDR = 5, CmdDump = 6, CmdDiagnose = 7
 148};
 149
 150#define STAT_C          0x8000  /* Set to 0 after execution */
 151#define STAT_B          0x4000  /* Command being executed */
 152#define STAT_OK         0x2000  /* Command executed ok */
 153#define STAT_A          0x1000  /* Command aborted */
 154
 155#define  CUC_START      0x0100
 156#define  CUC_RESUME     0x0200
 157#define  CUC_SUSPEND    0x0300
 158#define  CUC_ABORT      0x0400
 159#define  RX_START       0x0010
 160#define  RX_RESUME      0x0020
 161#define  RX_SUSPEND     0x0030
 162#define  RX_ABORT       0x0040
 163
 164#define TX_TIMEOUT      5
 165
 166
 167struct i596_reg {
 168        unsigned short porthi;
 169        unsigned short portlo;
 170        u32            ca;
 171};
 172
 173#define EOF             0x8000
 174#define SIZE_MASK       0x3fff
 175
 176struct i596_tbd {
 177        unsigned short size;
 178        unsigned short pad;
 179        u32            next;
 180        u32            data;
 181        u32 cache_pad[5];               /* Total 32 bytes... */
 182};
 183
 184/* The command structure has two 'next' pointers; v_next is the address of
 185 * the next command as seen by the CPU, b_next is the address of the next
 186 * command as seen by the 82596.  The b_next pointer, as used by the 82596
 187 * always references the status field of the next command, rather than the
 188 * v_next field, because the 82596 is unaware of v_next.  It may seem more
 189 * logical to put v_next at the end of the structure, but we cannot do that
 190 * because the 82596 expects other fields to be there, depending on command
 191 * type.
 192 */
 193
 194struct i596_cmd {
 195        struct i596_cmd *v_next;        /* Address from CPUs viewpoint */
 196        unsigned short status;
 197        unsigned short command;
 198        u32            b_next;  /* Address from i596 viewpoint */
 199};
 200
 201struct tx_cmd {
 202        struct i596_cmd cmd;
 203        u32            tbd;
 204        unsigned short size;
 205        unsigned short pad;
 206        struct sk_buff *skb;            /* So we can free it after tx */
 207        dma_addr_t dma_addr;
 208#ifdef __LP64__
 209        u32 cache_pad[6];               /* Total 64 bytes... */
 210#else
 211        u32 cache_pad[1];               /* Total 32 bytes... */
 212#endif
 213};
 214
 215struct tdr_cmd {
 216        struct i596_cmd cmd;
 217        unsigned short status;
 218        unsigned short pad;
 219};
 220
 221struct mc_cmd {
 222        struct i596_cmd cmd;
 223        short mc_cnt;
 224        char mc_addrs[MAX_MC_CNT*6];
 225};
 226
 227struct sa_cmd {
 228        struct i596_cmd cmd;
 229        char eth_addr[8];
 230};
 231
 232struct cf_cmd {
 233        struct i596_cmd cmd;
 234        char i596_config[16];
 235};
 236
 237struct i596_rfd {
 238        unsigned short stat;
 239        unsigned short cmd;
 240        u32            b_next;  /* Address from i596 viewpoint */
 241        u32            rbd;
 242        unsigned short count;
 243        unsigned short size;
 244        struct i596_rfd *v_next;        /* Address from CPUs viewpoint */
 245        struct i596_rfd *v_prev;
 246#ifndef __LP64__
 247        u32 cache_pad[2];               /* Total 32 bytes... */
 248#endif
 249};
 250
 251struct i596_rbd {
 252        /* hardware data */
 253        unsigned short count;
 254        unsigned short zero1;
 255        u32            b_next;
 256        u32            b_data;          /* Address from i596 viewpoint */
 257        unsigned short size;
 258        unsigned short zero2;
 259        /* driver data */
 260        struct sk_buff *skb;
 261        struct i596_rbd *v_next;
 262        u32            b_addr;          /* This rbd addr from i596 view */
 263        unsigned char *v_data;          /* Address from CPUs viewpoint */
 264                                        /* Total 32 bytes... */
 265#ifdef __LP64__
 266    u32 cache_pad[4];
 267#endif
 268};
 269
 270/* These values as chosen so struct i596_dma fits in one page... */
 271
 272#define TX_RING_SIZE 32
 273#define RX_RING_SIZE 16
 274
 275struct i596_scb {
 276        unsigned short status;
 277        unsigned short command;
 278        u32           cmd;
 279        u32           rfd;
 280        u32           crc_err;
 281        u32           align_err;
 282        u32           resource_err;
 283        u32           over_err;
 284        u32           rcvdt_err;
 285        u32           short_err;
 286        unsigned short t_on;
 287        unsigned short t_off;
 288};
 289
 290struct i596_iscp {
 291        u32 stat;
 292        u32 scb;
 293};
 294
 295struct i596_scp {
 296        u32 sysbus;
 297        u32 pad;
 298        u32 iscp;
 299};
 300
 301struct i596_dma {
 302        struct i596_scp scp                     __attribute__((aligned(32)));
 303        volatile struct i596_iscp iscp          __attribute__((aligned(32)));
 304        volatile struct i596_scb scb            __attribute__((aligned(32)));
 305        struct sa_cmd sa_cmd                    __attribute__((aligned(32)));
 306        struct cf_cmd cf_cmd                    __attribute__((aligned(32)));
 307        struct tdr_cmd tdr_cmd                  __attribute__((aligned(32)));
 308        struct mc_cmd mc_cmd                    __attribute__((aligned(32)));
 309        struct i596_rfd rfds[RX_RING_SIZE]      __attribute__((aligned(32)));
 310        struct i596_rbd rbds[RX_RING_SIZE]      __attribute__((aligned(32)));
 311        struct tx_cmd tx_cmds[TX_RING_SIZE]     __attribute__((aligned(32)));
 312        struct i596_tbd tbds[TX_RING_SIZE]      __attribute__((aligned(32)));
 313};
 314
 315struct i596_private {
 316        struct i596_dma *dma;
 317        u32    stat;
 318        int last_restart;
 319        struct i596_rfd *rfd_head;
 320        struct i596_rbd *rbd_head;
 321        struct i596_cmd *cmd_tail;
 322        struct i596_cmd *cmd_head;
 323        int cmd_backlog;
 324        u32    last_cmd;
 325        int next_tx_cmd;
 326        int options;
 327        spinlock_t lock;       /* serialize access to chip */
 328        dma_addr_t dma_addr;
 329        void __iomem *mpu_port;
 330        void __iomem *ca;
 331};
 332
 333static const char init_setup[] =
 334{
 335        0x8E,           /* length, prefetch on */
 336        0xC8,           /* fifo to 8, monitor off */
 337        0x80,           /* don't save bad frames */
 338        0x2E,           /* No source address insertion, 8 byte preamble */
 339        0x00,           /* priority and backoff defaults */
 340        0x60,           /* interframe spacing */
 341        0x00,           /* slot time LSB */
 342        0xf2,           /* slot time and retries */
 343        0x00,           /* promiscuous mode */
 344        0x00,           /* collision detect */
 345        0x40,           /* minimum frame length */
 346        0xff,
 347        0x00,
 348        0x7f /*  *multi IA */ };
 349
 350static int i596_open(struct net_device *dev);
 351static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev);
 352static irqreturn_t i596_interrupt(int irq, void *dev_id);
 353static int i596_close(struct net_device *dev);
 354static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd);
 355static void i596_tx_timeout (struct net_device *dev);
 356static void print_eth(unsigned char *buf, char *str);
 357static void set_multicast_list(struct net_device *dev);
 358static inline void ca(struct net_device *dev);
 359static void mpu_port(struct net_device *dev, int c, dma_addr_t x);
 360
 361static int rx_ring_size = RX_RING_SIZE;
 362static int ticks_limit = 100;
 363static int max_cmd_backlog = TX_RING_SIZE-1;
 364
 365#ifdef CONFIG_NET_POLL_CONTROLLER
 366static void i596_poll_controller(struct net_device *dev);
 367#endif
 368
 369
 370static inline int wait_istat(struct net_device *dev, struct i596_dma *dma, int delcnt, char *str)
 371{
 372        DMA_INV(dev, &(dma->iscp), sizeof(struct i596_iscp));
 373        while (--delcnt && dma->iscp.stat) {
 374                udelay(10);
 375                DMA_INV(dev, &(dma->iscp), sizeof(struct i596_iscp));
 376        }
 377        if (!delcnt) {
 378                printk(KERN_ERR "%s: %s, iscp.stat %04x, didn't clear\n",
 379                     dev->name, str, SWAP16(dma->iscp.stat));
 380                return -1;
 381        } else
 382                return 0;
 383}
 384
 385
 386static inline int wait_cmd(struct net_device *dev, struct i596_dma *dma, int delcnt, char *str)
 387{
 388        DMA_INV(dev, &(dma->scb), sizeof(struct i596_scb));
 389        while (--delcnt && dma->scb.command) {
 390                udelay(10);
 391                DMA_INV(dev, &(dma->scb), sizeof(struct i596_scb));
 392        }
 393        if (!delcnt) {
 394                printk(KERN_ERR "%s: %s, status %4.4x, cmd %4.4x.\n",
 395                       dev->name, str,
 396                       SWAP16(dma->scb.status),
 397                       SWAP16(dma->scb.command));
 398                return -1;
 399        } else
 400                return 0;
 401}
 402
 403
 404static void i596_display_data(struct net_device *dev)
 405{
 406        struct i596_private *lp = netdev_priv(dev);
 407        struct i596_dma *dma = lp->dma;
 408        struct i596_cmd *cmd;
 409        struct i596_rfd *rfd;
 410        struct i596_rbd *rbd;
 411
 412        printk(KERN_DEBUG "lp and scp at %p, .sysbus = %08x, .iscp = %08x\n",
 413               &dma->scp, dma->scp.sysbus, SWAP32(dma->scp.iscp));
 414        printk(KERN_DEBUG "iscp at %p, iscp.stat = %08x, .scb = %08x\n",
 415               &dma->iscp, SWAP32(dma->iscp.stat), SWAP32(dma->iscp.scb));
 416        printk(KERN_DEBUG "scb at %p, scb.status = %04x, .command = %04x,"
 417                " .cmd = %08x, .rfd = %08x\n",
 418               &dma->scb, SWAP16(dma->scb.status), SWAP16(dma->scb.command),
 419                SWAP16(dma->scb.cmd), SWAP32(dma->scb.rfd));
 420        printk(KERN_DEBUG "   errors: crc %x, align %x, resource %x,"
 421               " over %x, rcvdt %x, short %x\n",
 422               SWAP32(dma->scb.crc_err), SWAP32(dma->scb.align_err),
 423               SWAP32(dma->scb.resource_err), SWAP32(dma->scb.over_err),
 424               SWAP32(dma->scb.rcvdt_err), SWAP32(dma->scb.short_err));
 425        cmd = lp->cmd_head;
 426        while (cmd != NULL) {
 427                printk(KERN_DEBUG
 428                       "cmd at %p, .status = %04x, .command = %04x,"
 429                       " .b_next = %08x\n",
 430                       cmd, SWAP16(cmd->status), SWAP16(cmd->command),
 431                       SWAP32(cmd->b_next));
 432                cmd = cmd->v_next;
 433        }
 434        rfd = lp->rfd_head;
 435        printk(KERN_DEBUG "rfd_head = %p\n", rfd);
 436        do {
 437                printk(KERN_DEBUG
 438                       "   %p .stat %04x, .cmd %04x, b_next %08x, rbd %08x,"
 439                       " count %04x\n",
 440                       rfd, SWAP16(rfd->stat), SWAP16(rfd->cmd),
 441                       SWAP32(rfd->b_next), SWAP32(rfd->rbd),
 442                       SWAP16(rfd->count));
 443                rfd = rfd->v_next;
 444        } while (rfd != lp->rfd_head);
 445        rbd = lp->rbd_head;
 446        printk(KERN_DEBUG "rbd_head = %p\n", rbd);
 447        do {
 448                printk(KERN_DEBUG
 449                       "   %p .count %04x, b_next %08x, b_data %08x,"
 450                       " size %04x\n",
 451                        rbd, SWAP16(rbd->count), SWAP32(rbd->b_next),
 452                       SWAP32(rbd->b_data), SWAP16(rbd->size));
 453                rbd = rbd->v_next;
 454        } while (rbd != lp->rbd_head);
 455        DMA_INV(dev, dma, sizeof(struct i596_dma));
 456}
 457
 458
 459#define virt_to_dma(lp, v) ((lp)->dma_addr + (dma_addr_t)((unsigned long)(v)-(unsigned long)((lp)->dma)))
 460
 461static inline int init_rx_bufs(struct net_device *dev)
 462{
 463        struct i596_private *lp = netdev_priv(dev);
 464        struct i596_dma *dma = lp->dma;
 465        int i;
 466        struct i596_rfd *rfd;
 467        struct i596_rbd *rbd;
 468
 469        /* First build the Receive Buffer Descriptor List */
 470
 471        for (i = 0, rbd = dma->rbds; i < rx_ring_size; i++, rbd++) {
 472                dma_addr_t dma_addr;
 473                struct sk_buff *skb = netdev_alloc_skb(dev, PKT_BUF_SZ + 4);
 474
 475                if (skb == NULL)
 476                        return -1;
 477                skb_reserve(skb, 2);
 478                dma_addr = dma_map_single(dev->dev.parent, skb->data,
 479                                          PKT_BUF_SZ, DMA_FROM_DEVICE);
 480                rbd->v_next = rbd+1;
 481                rbd->b_next = SWAP32(virt_to_dma(lp, rbd+1));
 482                rbd->b_addr = SWAP32(virt_to_dma(lp, rbd));
 483                rbd->skb = skb;
 484                rbd->v_data = skb->data;
 485                rbd->b_data = SWAP32(dma_addr);
 486                rbd->size = SWAP16(PKT_BUF_SZ);
 487        }
 488        lp->rbd_head = dma->rbds;
 489        rbd = dma->rbds + rx_ring_size - 1;
 490        rbd->v_next = dma->rbds;
 491        rbd->b_next = SWAP32(virt_to_dma(lp, dma->rbds));
 492
 493        /* Now build the Receive Frame Descriptor List */
 494
 495        for (i = 0, rfd = dma->rfds; i < rx_ring_size; i++, rfd++) {
 496                rfd->rbd = I596_NULL;
 497                rfd->v_next = rfd+1;
 498                rfd->v_prev = rfd-1;
 499                rfd->b_next = SWAP32(virt_to_dma(lp, rfd+1));
 500                rfd->cmd = SWAP16(CMD_FLEX);
 501        }
 502        lp->rfd_head = dma->rfds;
 503        dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds));
 504        rfd = dma->rfds;
 505        rfd->rbd = SWAP32(virt_to_dma(lp, lp->rbd_head));
 506        rfd->v_prev = dma->rfds + rx_ring_size - 1;
 507        rfd = dma->rfds + rx_ring_size - 1;
 508        rfd->v_next = dma->rfds;
 509        rfd->b_next = SWAP32(virt_to_dma(lp, dma->rfds));
 510        rfd->cmd = SWAP16(CMD_EOL|CMD_FLEX);
 511
 512        DMA_WBACK_INV(dev, dma, sizeof(struct i596_dma));
 513        return 0;
 514}
 515
 516static inline void remove_rx_bufs(struct net_device *dev)
 517{
 518        struct i596_private *lp = netdev_priv(dev);
 519        struct i596_rbd *rbd;
 520        int i;
 521
 522        for (i = 0, rbd = lp->dma->rbds; i < rx_ring_size; i++, rbd++) {
 523                if (rbd->skb == NULL)
 524                        break;
 525                dma_unmap_single(dev->dev.parent,
 526                                 (dma_addr_t)SWAP32(rbd->b_data),
 527                                 PKT_BUF_SZ, DMA_FROM_DEVICE);
 528                dev_kfree_skb(rbd->skb);
 529        }
 530}
 531
 532
 533static void rebuild_rx_bufs(struct net_device *dev)
 534{
 535        struct i596_private *lp = netdev_priv(dev);
 536        struct i596_dma *dma = lp->dma;
 537        int i;
 538
 539        /* Ensure rx frame/buffer descriptors are tidy */
 540
 541        for (i = 0; i < rx_ring_size; i++) {
 542                dma->rfds[i].rbd = I596_NULL;
 543                dma->rfds[i].cmd = SWAP16(CMD_FLEX);
 544        }
 545        dma->rfds[rx_ring_size-1].cmd = SWAP16(CMD_EOL|CMD_FLEX);
 546        lp->rfd_head = dma->rfds;
 547        dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds));
 548        lp->rbd_head = dma->rbds;
 549        dma->rfds[0].rbd = SWAP32(virt_to_dma(lp, dma->rbds));
 550
 551        DMA_WBACK_INV(dev, dma, sizeof(struct i596_dma));
 552}
 553
 554
 555static int init_i596_mem(struct net_device *dev)
 556{
 557        struct i596_private *lp = netdev_priv(dev);
 558        struct i596_dma *dma = lp->dma;
 559        unsigned long flags;
 560
 561        mpu_port(dev, PORT_RESET, 0);
 562        udelay(100);                    /* Wait 100us - seems to help */
 563
 564        /* change the scp address */
 565
 566        lp->last_cmd = jiffies;
 567
 568        dma->scp.sysbus = SYSBUS;
 569        dma->scp.iscp = SWAP32(virt_to_dma(lp, &(dma->iscp)));
 570        dma->iscp.scb = SWAP32(virt_to_dma(lp, &(dma->scb)));
 571        dma->iscp.stat = SWAP32(ISCP_BUSY);
 572        lp->cmd_backlog = 0;
 573
 574        lp->cmd_head = NULL;
 575        dma->scb.cmd = I596_NULL;
 576
 577        DEB(DEB_INIT, printk(KERN_DEBUG "%s: starting i82596.\n", dev->name));
 578
 579        DMA_WBACK(dev, &(dma->scp), sizeof(struct i596_scp));
 580        DMA_WBACK(dev, &(dma->iscp), sizeof(struct i596_iscp));
 581        DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb));
 582
 583        mpu_port(dev, PORT_ALTSCP, virt_to_dma(lp, &dma->scp));
 584        ca(dev);
 585        if (wait_istat(dev, dma, 1000, "initialization timed out"))
 586                goto failed;
 587        DEB(DEB_INIT, printk(KERN_DEBUG
 588                             "%s: i82596 initialization successful\n",
 589                             dev->name));
 590
 591        if (request_irq(dev->irq, &i596_interrupt, 0, "i82596", dev)) {
 592                printk(KERN_ERR "%s: IRQ %d not free\n", dev->name, dev->irq);
 593                goto failed;
 594        }
 595
 596        /* Ensure rx frame/buffer descriptors are tidy */
 597        rebuild_rx_bufs(dev);
 598
 599        dma->scb.command = 0;
 600        DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb));
 601
 602        DEB(DEB_INIT, printk(KERN_DEBUG
 603                             "%s: queuing CmdConfigure\n", dev->name));
 604        memcpy(dma->cf_cmd.i596_config, init_setup, 14);
 605        dma->cf_cmd.cmd.command = SWAP16(CmdConfigure);
 606        DMA_WBACK(dev, &(dma->cf_cmd), sizeof(struct cf_cmd));
 607        i596_add_cmd(dev, &dma->cf_cmd.cmd);
 608
 609        DEB(DEB_INIT, printk(KERN_DEBUG "%s: queuing CmdSASetup\n", dev->name));
 610        memcpy(dma->sa_cmd.eth_addr, dev->dev_addr, 6);
 611        dma->sa_cmd.cmd.command = SWAP16(CmdSASetup);
 612        DMA_WBACK(dev, &(dma->sa_cmd), sizeof(struct sa_cmd));
 613        i596_add_cmd(dev, &dma->sa_cmd.cmd);
 614
 615        DEB(DEB_INIT, printk(KERN_DEBUG "%s: queuing CmdTDR\n", dev->name));
 616        dma->tdr_cmd.cmd.command = SWAP16(CmdTDR);
 617        DMA_WBACK(dev, &(dma->tdr_cmd), sizeof(struct tdr_cmd));
 618        i596_add_cmd(dev, &dma->tdr_cmd.cmd);
 619
 620        spin_lock_irqsave (&lp->lock, flags);
 621
 622        if (wait_cmd(dev, dma, 1000, "timed out waiting to issue RX_START")) {
 623                spin_unlock_irqrestore (&lp->lock, flags);
 624                goto failed_free_irq;
 625        }
 626        DEB(DEB_INIT, printk(KERN_DEBUG "%s: Issuing RX_START\n", dev->name));
 627        dma->scb.command = SWAP16(RX_START);
 628        dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds));
 629        DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb));
 630
 631        ca(dev);
 632
 633        spin_unlock_irqrestore (&lp->lock, flags);
 634        if (wait_cmd(dev, dma, 1000, "RX_START not processed"))
 635                goto failed_free_irq;
 636        DEB(DEB_INIT, printk(KERN_DEBUG
 637                             "%s: Receive unit started OK\n", dev->name));
 638        return 0;
 639
 640failed_free_irq:
 641        free_irq(dev->irq, dev);
 642failed:
 643        printk(KERN_ERR "%s: Failed to initialise 82596\n", dev->name);
 644        mpu_port(dev, PORT_RESET, 0);
 645        return -1;
 646}
 647
 648
 649static inline int i596_rx(struct net_device *dev)
 650{
 651        struct i596_private *lp = netdev_priv(dev);
 652        struct i596_rfd *rfd;
 653        struct i596_rbd *rbd;
 654        int frames = 0;
 655
 656        DEB(DEB_RXFRAME, printk(KERN_DEBUG
 657                                "i596_rx(), rfd_head %p, rbd_head %p\n",
 658                                lp->rfd_head, lp->rbd_head));
 659
 660
 661        rfd = lp->rfd_head;             /* Ref next frame to check */
 662
 663        DMA_INV(dev, rfd, sizeof(struct i596_rfd));
 664        while (rfd->stat & SWAP16(STAT_C)) {    /* Loop while complete frames */
 665                if (rfd->rbd == I596_NULL)
 666                        rbd = NULL;
 667                else if (rfd->rbd == lp->rbd_head->b_addr) {
 668                        rbd = lp->rbd_head;
 669                        DMA_INV(dev, rbd, sizeof(struct i596_rbd));
 670                } else {
 671                        printk(KERN_ERR "%s: rbd chain broken!\n", dev->name);
 672                        /* XXX Now what? */
 673                        rbd = NULL;
 674                }
 675                DEB(DEB_RXFRAME, printk(KERN_DEBUG
 676                                      "  rfd %p, rfd.rbd %08x, rfd.stat %04x\n",
 677                                      rfd, rfd->rbd, rfd->stat));
 678
 679                if (rbd != NULL && (rfd->stat & SWAP16(STAT_OK))) {
 680                        /* a good frame */
 681                        int pkt_len = SWAP16(rbd->count) & 0x3fff;
 682                        struct sk_buff *skb = rbd->skb;
 683                        int rx_in_place = 0;
 684
 685                        DEB(DEB_RXADDR, print_eth(rbd->v_data, "received"));
 686                        frames++;
 687
 688                        /* Check if the packet is long enough to just accept
 689                         * without copying to a properly sized skbuff.
 690                         */
 691
 692                        if (pkt_len > rx_copybreak) {
 693                                struct sk_buff *newskb;
 694                                dma_addr_t dma_addr;
 695
 696                                dma_unmap_single(dev->dev.parent,
 697                                                 (dma_addr_t)SWAP32(rbd->b_data),
 698                                                 PKT_BUF_SZ, DMA_FROM_DEVICE);
 699                                /* Get fresh skbuff to replace filled one. */
 700                                newskb = netdev_alloc_skb(dev, PKT_BUF_SZ + 4);
 701                                if (newskb == NULL) {
 702                                        skb = NULL;     /* drop pkt */
 703                                        goto memory_squeeze;
 704                                }
 705                                skb_reserve(newskb, 2);
 706
 707                                /* Pass up the skb already on the Rx ring. */
 708                                skb_put(skb, pkt_len);
 709                                rx_in_place = 1;
 710                                rbd->skb = newskb;
 711                                dma_addr = dma_map_single(dev->dev.parent,
 712                                                          newskb->data,
 713                                                          PKT_BUF_SZ,
 714                                                          DMA_FROM_DEVICE);
 715                                rbd->v_data = newskb->data;
 716                                rbd->b_data = SWAP32(dma_addr);
 717                                DMA_WBACK_INV(dev, rbd, sizeof(struct i596_rbd));
 718                        } else
 719                                skb = netdev_alloc_skb(dev, pkt_len + 2);
 720memory_squeeze:
 721                        if (skb == NULL) {
 722                                /* XXX tulip.c can defer packets here!! */
 723                                printk(KERN_ERR
 724                                       "%s: i596_rx Memory squeeze, dropping packet.\n",
 725                                       dev->name);
 726                                dev->stats.rx_dropped++;
 727                        } else {
 728                                if (!rx_in_place) {
 729                                        /* 16 byte align the data fields */
 730                                        dma_sync_single_for_cpu(dev->dev.parent,
 731                                                                (dma_addr_t)SWAP32(rbd->b_data),
 732                                                                PKT_BUF_SZ, DMA_FROM_DEVICE);
 733                                        skb_reserve(skb, 2);
 734                                        memcpy(skb_put(skb, pkt_len), rbd->v_data, pkt_len);
 735                                        dma_sync_single_for_device(dev->dev.parent,
 736                                                                   (dma_addr_t)SWAP32(rbd->b_data),
 737                                                                   PKT_BUF_SZ, DMA_FROM_DEVICE);
 738                                }
 739                                skb->len = pkt_len;
 740                                skb->protocol = eth_type_trans(skb, dev);
 741                                netif_rx(skb);
 742                                dev->stats.rx_packets++;
 743                                dev->stats.rx_bytes += pkt_len;
 744                        }
 745                } else {
 746                        DEB(DEB_ERRORS, printk(KERN_DEBUG
 747                                               "%s: Error, rfd.stat = 0x%04x\n",
 748                                               dev->name, rfd->stat));
 749                        dev->stats.rx_errors++;
 750                        if (rfd->stat & SWAP16(0x0100))
 751                                dev->stats.collisions++;
 752                        if (rfd->stat & SWAP16(0x8000))
 753                                dev->stats.rx_length_errors++;
 754                        if (rfd->stat & SWAP16(0x0001))
 755                                dev->stats.rx_over_errors++;
 756                        if (rfd->stat & SWAP16(0x0002))
 757                                dev->stats.rx_fifo_errors++;
 758                        if (rfd->stat & SWAP16(0x0004))
 759                                dev->stats.rx_frame_errors++;
 760                        if (rfd->stat & SWAP16(0x0008))
 761                                dev->stats.rx_crc_errors++;
 762                        if (rfd->stat & SWAP16(0x0010))
 763                                dev->stats.rx_length_errors++;
 764                }
 765
 766                /* Clear the buffer descriptor count and EOF + F flags */
 767
 768                if (rbd != NULL && (rbd->count & SWAP16(0x4000))) {
 769                        rbd->count = 0;
 770                        lp->rbd_head = rbd->v_next;
 771                        DMA_WBACK_INV(dev, rbd, sizeof(struct i596_rbd));
 772                }
 773
 774                /* Tidy the frame descriptor, marking it as end of list */
 775
 776                rfd->rbd = I596_NULL;
 777                rfd->stat = 0;
 778                rfd->cmd = SWAP16(CMD_EOL|CMD_FLEX);
 779                rfd->count = 0;
 780
 781                /* Update record of next frame descriptor to process */
 782
 783                lp->dma->scb.rfd = rfd->b_next;
 784                lp->rfd_head = rfd->v_next;
 785                DMA_WBACK_INV(dev, rfd, sizeof(struct i596_rfd));
 786
 787                /* Remove end-of-list from old end descriptor */
 788
 789                rfd->v_prev->cmd = SWAP16(CMD_FLEX);
 790                DMA_WBACK_INV(dev, rfd->v_prev, sizeof(struct i596_rfd));
 791                rfd = lp->rfd_head;
 792                DMA_INV(dev, rfd, sizeof(struct i596_rfd));
 793        }
 794
 795        DEB(DEB_RXFRAME, printk(KERN_DEBUG "frames %d\n", frames));
 796
 797        return 0;
 798}
 799
 800
 801static inline void i596_cleanup_cmd(struct net_device *dev, struct i596_private *lp)
 802{
 803        struct i596_cmd *ptr;
 804
 805        while (lp->cmd_head != NULL) {
 806                ptr = lp->cmd_head;
 807                lp->cmd_head = ptr->v_next;
 808                lp->cmd_backlog--;
 809
 810                switch (SWAP16(ptr->command) & 0x7) {
 811                case CmdTx:
 812                        {
 813                                struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
 814                                struct sk_buff *skb = tx_cmd->skb;
 815                                dma_unmap_single(dev->dev.parent,
 816                                                 tx_cmd->dma_addr,
 817                                                 skb->len, DMA_TO_DEVICE);
 818
 819                                dev_kfree_skb(skb);
 820
 821                                dev->stats.tx_errors++;
 822                                dev->stats.tx_aborted_errors++;
 823
 824                                ptr->v_next = NULL;
 825                                ptr->b_next = I596_NULL;
 826                                tx_cmd->cmd.command = 0;  /* Mark as free */
 827                                break;
 828                        }
 829                default:
 830                        ptr->v_next = NULL;
 831                        ptr->b_next = I596_NULL;
 832                }
 833                DMA_WBACK_INV(dev, ptr, sizeof(struct i596_cmd));
 834        }
 835
 836        wait_cmd(dev, lp->dma, 100, "i596_cleanup_cmd timed out");
 837        lp->dma->scb.cmd = I596_NULL;
 838        DMA_WBACK(dev, &(lp->dma->scb), sizeof(struct i596_scb));
 839}
 840
 841
 842static inline void i596_reset(struct net_device *dev, struct i596_private *lp)
 843{
 844        unsigned long flags;
 845
 846        DEB(DEB_RESET, printk(KERN_DEBUG "i596_reset\n"));
 847
 848        spin_lock_irqsave (&lp->lock, flags);
 849
 850        wait_cmd(dev, lp->dma, 100, "i596_reset timed out");
 851
 852        netif_stop_queue(dev);
 853
 854        /* FIXME: this command might cause an lpmc */
 855        lp->dma->scb.command = SWAP16(CUC_ABORT | RX_ABORT);
 856        DMA_WBACK(dev, &(lp->dma->scb), sizeof(struct i596_scb));
 857        ca(dev);
 858
 859        /* wait for shutdown */
 860        wait_cmd(dev, lp->dma, 1000, "i596_reset 2 timed out");
 861        spin_unlock_irqrestore (&lp->lock, flags);
 862
 863        i596_cleanup_cmd(dev, lp);
 864        i596_rx(dev);
 865
 866        netif_start_queue(dev);
 867        init_i596_mem(dev);
 868}
 869
 870
 871static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd)
 872{
 873        struct i596_private *lp = netdev_priv(dev);
 874        struct i596_dma *dma = lp->dma;
 875        unsigned long flags;
 876
 877        DEB(DEB_ADDCMD, printk(KERN_DEBUG "i596_add_cmd cmd_head %p\n",
 878                               lp->cmd_head));
 879
 880        cmd->status = 0;
 881        cmd->command |= SWAP16(CMD_EOL | CMD_INTR);
 882        cmd->v_next = NULL;
 883        cmd->b_next = I596_NULL;
 884        DMA_WBACK(dev, cmd, sizeof(struct i596_cmd));
 885
 886        spin_lock_irqsave (&lp->lock, flags);
 887
 888        if (lp->cmd_head != NULL) {
 889                lp->cmd_tail->v_next = cmd;
 890                lp->cmd_tail->b_next = SWAP32(virt_to_dma(lp, &cmd->status));
 891                DMA_WBACK(dev, lp->cmd_tail, sizeof(struct i596_cmd));
 892        } else {
 893                lp->cmd_head = cmd;
 894                wait_cmd(dev, dma, 100, "i596_add_cmd timed out");
 895                dma->scb.cmd = SWAP32(virt_to_dma(lp, &cmd->status));
 896                dma->scb.command = SWAP16(CUC_START);
 897                DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb));
 898                ca(dev);
 899        }
 900        lp->cmd_tail = cmd;
 901        lp->cmd_backlog++;
 902
 903        spin_unlock_irqrestore (&lp->lock, flags);
 904
 905        if (lp->cmd_backlog > max_cmd_backlog) {
 906                unsigned long tickssofar = jiffies - lp->last_cmd;
 907
 908                if (tickssofar < ticks_limit)
 909                        return;
 910
 911                printk(KERN_ERR
 912                       "%s: command unit timed out, status resetting.\n",
 913                       dev->name);
 914#if 1
 915                i596_reset(dev, lp);
 916#endif
 917        }
 918}
 919
 920static int i596_open(struct net_device *dev)
 921{
 922        DEB(DEB_OPEN, printk(KERN_DEBUG
 923                             "%s: i596_open() irq %d.\n", dev->name, dev->irq));
 924
 925        if (init_rx_bufs(dev)) {
 926                printk(KERN_ERR "%s: Failed to init rx bufs\n", dev->name);
 927                return -EAGAIN;
 928        }
 929        if (init_i596_mem(dev)) {
 930                printk(KERN_ERR "%s: Failed to init memory\n", dev->name);
 931                goto out_remove_rx_bufs;
 932        }
 933        netif_start_queue(dev);
 934
 935        return 0;
 936
 937out_remove_rx_bufs:
 938        remove_rx_bufs(dev);
 939        return -EAGAIN;
 940}
 941
 942static void i596_tx_timeout (struct net_device *dev)
 943{
 944        struct i596_private *lp = netdev_priv(dev);
 945
 946        /* Transmitter timeout, serious problems. */
 947        DEB(DEB_ERRORS, printk(KERN_DEBUG
 948                               "%s: transmit timed out, status resetting.\n",
 949                               dev->name));
 950
 951        dev->stats.tx_errors++;
 952
 953        /* Try to restart the adaptor */
 954        if (lp->last_restart == dev->stats.tx_packets) {
 955                DEB(DEB_ERRORS, printk(KERN_DEBUG "Resetting board.\n"));
 956                /* Shutdown and restart */
 957                i596_reset (dev, lp);
 958        } else {
 959                /* Issue a channel attention signal */
 960                DEB(DEB_ERRORS, printk(KERN_DEBUG "Kicking board.\n"));
 961                lp->dma->scb.command = SWAP16(CUC_START | RX_START);
 962                DMA_WBACK_INV(dev, &(lp->dma->scb), sizeof(struct i596_scb));
 963                ca (dev);
 964                lp->last_restart = dev->stats.tx_packets;
 965        }
 966
 967        dev->trans_start = jiffies;
 968        netif_wake_queue (dev);
 969}
 970
 971
 972static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
 973{
 974        struct i596_private *lp = netdev_priv(dev);
 975        struct tx_cmd *tx_cmd;
 976        struct i596_tbd *tbd;
 977        short length = skb->len;
 978        dev->trans_start = jiffies;
 979
 980        DEB(DEB_STARTTX, printk(KERN_DEBUG
 981                                "%s: i596_start_xmit(%x,%p) called\n",
 982                                dev->name, skb->len, skb->data));
 983
 984        if (length < ETH_ZLEN) {
 985                if (skb_padto(skb, ETH_ZLEN))
 986                        return NETDEV_TX_OK;
 987                length = ETH_ZLEN;
 988        }
 989
 990        netif_stop_queue(dev);
 991
 992        tx_cmd = lp->dma->tx_cmds + lp->next_tx_cmd;
 993        tbd = lp->dma->tbds + lp->next_tx_cmd;
 994
 995        if (tx_cmd->cmd.command) {
 996                DEB(DEB_ERRORS, printk(KERN_DEBUG
 997                                       "%s: xmit ring full, dropping packet.\n",
 998                                       dev->name));
 999                dev->stats.tx_dropped++;
1000
1001                dev_kfree_skb(skb);
1002        } else {
1003                if (++lp->next_tx_cmd == TX_RING_SIZE)
1004                        lp->next_tx_cmd = 0;
1005                tx_cmd->tbd = SWAP32(virt_to_dma(lp, tbd));
1006                tbd->next = I596_NULL;
1007
1008                tx_cmd->cmd.command = SWAP16(CMD_FLEX | CmdTx);
1009                tx_cmd->skb = skb;
1010
1011                tx_cmd->pad = 0;
1012                tx_cmd->size = 0;
1013                tbd->pad = 0;
1014                tbd->size = SWAP16(EOF | length);
1015
1016                tx_cmd->dma_addr = dma_map_single(dev->dev.parent, skb->data,
1017                                                  skb->len, DMA_TO_DEVICE);
1018                tbd->data = SWAP32(tx_cmd->dma_addr);
1019
1020                DEB(DEB_TXADDR, print_eth(skb->data, "tx-queued"));
1021                DMA_WBACK_INV(dev, tx_cmd, sizeof(struct tx_cmd));
1022                DMA_WBACK_INV(dev, tbd, sizeof(struct i596_tbd));
1023                i596_add_cmd(dev, &tx_cmd->cmd);
1024
1025                dev->stats.tx_packets++;
1026                dev->stats.tx_bytes += length;
1027        }
1028
1029        netif_start_queue(dev);
1030
1031        return NETDEV_TX_OK;
1032}
1033
1034static void print_eth(unsigned char *add, char *str)
1035{
1036        printk(KERN_DEBUG "i596 0x%p, %pM --> %pM %02X%02X, %s\n",
1037               add, add + 6, add, add[12], add[13], str);
1038}
1039static const struct net_device_ops i596_netdev_ops = {
1040        .ndo_open               = i596_open,
1041        .ndo_stop               = i596_close,
1042        .ndo_start_xmit         = i596_start_xmit,
1043        .ndo_set_multicast_list = set_multicast_list,
1044        .ndo_tx_timeout         = i596_tx_timeout,
1045        .ndo_change_mtu         = eth_change_mtu,
1046        .ndo_validate_addr      = eth_validate_addr,
1047        .ndo_set_mac_address    = eth_mac_addr,
1048#ifdef CONFIG_NET_POLL_CONTROLLER
1049        .ndo_poll_controller    = i596_poll_controller,
1050#endif
1051};
1052
1053static int __devinit i82596_probe(struct net_device *dev)
1054{
1055        int i;
1056        struct i596_private *lp = netdev_priv(dev);
1057        struct i596_dma *dma;
1058
1059        /* This lot is ensure things have been cache line aligned. */
1060        BUILD_BUG_ON(sizeof(struct i596_rfd) != 32);
1061        BUILD_BUG_ON(sizeof(struct i596_rbd) &  31);
1062        BUILD_BUG_ON(sizeof(struct tx_cmd)   &  31);
1063        BUILD_BUG_ON(sizeof(struct i596_tbd) != 32);
1064#ifndef __LP64__
1065        BUILD_BUG_ON(sizeof(struct i596_dma) > 4096);
1066#endif
1067
1068        if (!dev->base_addr || !dev->irq)
1069                return -ENODEV;
1070
1071        dma = (struct i596_dma *) DMA_ALLOC(dev->dev.parent,
1072                sizeof(struct i596_dma), &lp->dma_addr, GFP_KERNEL);
1073        if (!dma) {
1074                printk(KERN_ERR "%s: Couldn't get shared memory\n", __FILE__);
1075                return -ENOMEM;
1076        }
1077
1078        dev->netdev_ops = &i596_netdev_ops;
1079        dev->watchdog_timeo = TX_TIMEOUT;
1080
1081        memset(dma, 0, sizeof(struct i596_dma));
1082        lp->dma = dma;
1083
1084        dma->scb.command = 0;
1085        dma->scb.cmd = I596_NULL;
1086        dma->scb.rfd = I596_NULL;
1087        spin_lock_init(&lp->lock);
1088
1089        DMA_WBACK_INV(dev, dma, sizeof(struct i596_dma));
1090
1091        i = register_netdev(dev);
1092        if (i) {
1093                DMA_FREE(dev->dev.parent, sizeof(struct i596_dma),
1094                                    (void *)dma, lp->dma_addr);
1095                return i;
1096        };
1097
1098        DEB(DEB_PROBE, printk(KERN_INFO "%s: 82596 at %#3lx,",
1099                              dev->name, dev->base_addr));
1100        for (i = 0; i < 6; i++)
1101                DEB(DEB_PROBE, printk(" %2.2X", dev->dev_addr[i]));
1102        DEB(DEB_PROBE, printk(" IRQ %d.\n", dev->irq));
1103        DEB(DEB_INIT, printk(KERN_INFO
1104                             "%s: dma at 0x%p (%d bytes), lp->scb at 0x%p\n",
1105                             dev->name, dma, (int)sizeof(struct i596_dma),
1106                             &dma->scb));
1107
1108        return 0;
1109}
1110
1111#ifdef CONFIG_NET_POLL_CONTROLLER
1112static void i596_poll_controller(struct net_device *dev)
1113{
1114        disable_irq(dev->irq);
1115        i596_interrupt(dev->irq, dev);
1116        enable_irq(dev->irq);
1117}
1118#endif
1119
1120static irqreturn_t i596_interrupt(int irq, void *dev_id)
1121{
1122        struct net_device *dev = dev_id;
1123        struct i596_private *lp;
1124        struct i596_dma *dma;
1125        unsigned short status, ack_cmd = 0;
1126
1127        lp = netdev_priv(dev);
1128        dma = lp->dma;
1129
1130        spin_lock (&lp->lock);
1131
1132        wait_cmd(dev, dma, 100, "i596 interrupt, timeout");
1133        status = SWAP16(dma->scb.status);
1134
1135        DEB(DEB_INTS, printk(KERN_DEBUG
1136                             "%s: i596 interrupt, IRQ %d, status %4.4x.\n",
1137                        dev->name, dev->irq, status));
1138
1139        ack_cmd = status & 0xf000;
1140
1141        if (!ack_cmd) {
1142                DEB(DEB_ERRORS, printk(KERN_DEBUG
1143                                       "%s: interrupt with no events\n",
1144                                       dev->name));
1145                spin_unlock (&lp->lock);
1146                return IRQ_NONE;
1147        }
1148
1149        if ((status & 0x8000) || (status & 0x2000)) {
1150                struct i596_cmd *ptr;
1151
1152                if ((status & 0x8000))
1153                        DEB(DEB_INTS,
1154                            printk(KERN_DEBUG
1155                                   "%s: i596 interrupt completed command.\n",
1156                                   dev->name));
1157                if ((status & 0x2000))
1158                        DEB(DEB_INTS,
1159                            printk(KERN_DEBUG
1160                                   "%s: i596 interrupt command unit inactive %x.\n",
1161                                   dev->name, status & 0x0700));
1162
1163                while (lp->cmd_head != NULL) {
1164                        DMA_INV(dev, lp->cmd_head, sizeof(struct i596_cmd));
1165                        if (!(lp->cmd_head->status & SWAP16(STAT_C)))
1166                                break;
1167
1168                        ptr = lp->cmd_head;
1169
1170                        DEB(DEB_STATUS,
1171                            printk(KERN_DEBUG
1172                                   "cmd_head->status = %04x, ->command = %04x\n",
1173                                   SWAP16(lp->cmd_head->status),
1174                                   SWAP16(lp->cmd_head->command)));
1175                        lp->cmd_head = ptr->v_next;
1176                        lp->cmd_backlog--;
1177
1178                        switch (SWAP16(ptr->command) & 0x7) {
1179                        case CmdTx:
1180                            {
1181                                struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
1182                                struct sk_buff *skb = tx_cmd->skb;
1183
1184                                if (ptr->status & SWAP16(STAT_OK)) {
1185                                        DEB(DEB_TXADDR,
1186                                            print_eth(skb->data, "tx-done"));
1187                                } else {
1188                                        dev->stats.tx_errors++;
1189                                        if (ptr->status & SWAP16(0x0020))
1190                                                dev->stats.collisions++;
1191                                        if (!(ptr->status & SWAP16(0x0040)))
1192                                                dev->stats.tx_heartbeat_errors++;
1193                                        if (ptr->status & SWAP16(0x0400))
1194                                                dev->stats.tx_carrier_errors++;
1195                                        if (ptr->status & SWAP16(0x0800))
1196                                                dev->stats.collisions++;
1197                                        if (ptr->status & SWAP16(0x1000))
1198                                                dev->stats.tx_aborted_errors++;
1199                                }
1200                                dma_unmap_single(dev->dev.parent,
1201                                                 tx_cmd->dma_addr,
1202                                                 skb->len, DMA_TO_DEVICE);
1203                                dev_kfree_skb_irq(skb);
1204
1205                                tx_cmd->cmd.command = 0; /* Mark free */
1206                                break;
1207                            }
1208                        case CmdTDR:
1209                            {
1210                                unsigned short status = SWAP16(((struct tdr_cmd *)ptr)->status);
1211
1212                                if (status & 0x8000) {
1213                                        DEB(DEB_ANY,
1214                                            printk(KERN_DEBUG "%s: link ok.\n",
1215                                                   dev->name));
1216                                } else {
1217                                        if (status & 0x4000)
1218                                                printk(KERN_ERR
1219                                                       "%s: Transceiver problem.\n",
1220                                                       dev->name);
1221                                        if (status & 0x2000)
1222                                                printk(KERN_ERR
1223                                                       "%s: Termination problem.\n",
1224                                                       dev->name);
1225                                        if (status & 0x1000)
1226                                                printk(KERN_ERR
1227                                                       "%s: Short circuit.\n",
1228                                                       dev->name);
1229
1230                                        DEB(DEB_TDR,
1231                                            printk(KERN_DEBUG "%s: Time %d.\n",
1232                                                   dev->name, status & 0x07ff));
1233                                }
1234                                break;
1235                            }
1236                        case CmdConfigure:
1237                                /*
1238                                 * Zap command so set_multicast_list() know
1239                                 * it is free
1240                                 */
1241                                ptr->command = 0;
1242                                break;
1243                        }
1244                        ptr->v_next = NULL;
1245                        ptr->b_next = I596_NULL;
1246                        DMA_WBACK(dev, ptr, sizeof(struct i596_cmd));
1247                        lp->last_cmd = jiffies;
1248                }
1249
1250                /* This mess is arranging that only the last of any outstanding
1251                 * commands has the interrupt bit set.  Should probably really
1252                 * only add to the cmd queue when the CU is stopped.
1253                 */
1254                ptr = lp->cmd_head;
1255                while ((ptr != NULL) && (ptr != lp->cmd_tail)) {
1256                        struct i596_cmd *prev = ptr;
1257
1258                        ptr->command &= SWAP16(0x1fff);
1259                        ptr = ptr->v_next;
1260                        DMA_WBACK_INV(dev, prev, sizeof(struct i596_cmd));
1261                }
1262
1263                if (lp->cmd_head != NULL)
1264                        ack_cmd |= CUC_START;
1265                dma->scb.cmd = SWAP32(virt_to_dma(lp, &lp->cmd_head->status));
1266                DMA_WBACK_INV(dev, &dma->scb, sizeof(struct i596_scb));
1267        }
1268        if ((status & 0x1000) || (status & 0x4000)) {
1269                if ((status & 0x4000))
1270                        DEB(DEB_INTS,
1271                            printk(KERN_DEBUG
1272                                   "%s: i596 interrupt received a frame.\n",
1273                                   dev->name));
1274                i596_rx(dev);
1275                /* Only RX_START if stopped - RGH 07-07-96 */
1276                if (status & 0x1000) {
1277                        if (netif_running(dev)) {
1278                                DEB(DEB_ERRORS,
1279                                    printk(KERN_DEBUG
1280                                           "%s: i596 interrupt receive unit inactive, status 0x%x\n",
1281                                           dev->name, status));
1282                                ack_cmd |= RX_START;
1283                                dev->stats.rx_errors++;
1284                                dev->stats.rx_fifo_errors++;
1285                                rebuild_rx_bufs(dev);
1286                        }
1287                }
1288        }
1289        wait_cmd(dev, dma, 100, "i596 interrupt, timeout");
1290        dma->scb.command = SWAP16(ack_cmd);
1291        DMA_WBACK(dev, &dma->scb, sizeof(struct i596_scb));
1292
1293        /* DANGER: I suspect that some kind of interrupt
1294         acknowledgement aside from acking the 82596 might be needed
1295         here...  but it's running acceptably without */
1296
1297        ca(dev);
1298
1299        wait_cmd(dev, dma, 100, "i596 interrupt, exit timeout");
1300        DEB(DEB_INTS, printk(KERN_DEBUG "%s: exiting interrupt.\n", dev->name));
1301
1302        spin_unlock (&lp->lock);
1303        return IRQ_HANDLED;
1304}
1305
1306static int i596_close(struct net_device *dev)
1307{
1308        struct i596_private *lp = netdev_priv(dev);
1309        unsigned long flags;
1310
1311        netif_stop_queue(dev);
1312
1313        DEB(DEB_INIT,
1314            printk(KERN_DEBUG
1315                   "%s: Shutting down ethercard, status was %4.4x.\n",
1316                   dev->name, SWAP16(lp->dma->scb.status)));
1317
1318        spin_lock_irqsave(&lp->lock, flags);
1319
1320        wait_cmd(dev, lp->dma, 100, "close1 timed out");
1321        lp->dma->scb.command = SWAP16(CUC_ABORT | RX_ABORT);
1322        DMA_WBACK(dev, &lp->dma->scb, sizeof(struct i596_scb));
1323
1324        ca(dev);
1325
1326        wait_cmd(dev, lp->dma, 100, "close2 timed out");
1327        spin_unlock_irqrestore(&lp->lock, flags);
1328        DEB(DEB_STRUCT, i596_display_data(dev));
1329        i596_cleanup_cmd(dev, lp);
1330
1331        free_irq(dev->irq, dev);
1332        remove_rx_bufs(dev);
1333
1334        return 0;
1335}
1336
1337/*
1338 *    Set or clear the multicast filter for this adaptor.
1339 */
1340
1341static void set_multicast_list(struct net_device *dev)
1342{
1343        struct i596_private *lp = netdev_priv(dev);
1344        struct i596_dma *dma = lp->dma;
1345        int config = 0, cnt;
1346
1347        DEB(DEB_MULTI,
1348            printk(KERN_DEBUG
1349                   "%s: set multicast list, %d entries, promisc %s, allmulti %s\n",
1350                   dev->name, dev->mc_count,
1351                   dev->flags & IFF_PROMISC ? "ON" : "OFF",
1352                   dev->flags & IFF_ALLMULTI ? "ON" : "OFF"));
1353
1354        if ((dev->flags & IFF_PROMISC) &&
1355            !(dma->cf_cmd.i596_config[8] & 0x01)) {
1356                dma->cf_cmd.i596_config[8] |= 0x01;
1357                config = 1;
1358        }
1359        if (!(dev->flags & IFF_PROMISC) &&
1360            (dma->cf_cmd.i596_config[8] & 0x01)) {
1361                dma->cf_cmd.i596_config[8] &= ~0x01;
1362                config = 1;
1363        }
1364        if ((dev->flags & IFF_ALLMULTI) &&
1365            (dma->cf_cmd.i596_config[11] & 0x20)) {
1366                dma->cf_cmd.i596_config[11] &= ~0x20;
1367                config = 1;
1368        }
1369        if (!(dev->flags & IFF_ALLMULTI) &&
1370            !(dma->cf_cmd.i596_config[11] & 0x20)) {
1371                dma->cf_cmd.i596_config[11] |= 0x20;
1372                config = 1;
1373        }
1374        if (config) {
1375                if (dma->cf_cmd.cmd.command)
1376                        printk(KERN_INFO
1377                               "%s: config change request already queued\n",
1378                               dev->name);
1379                else {
1380                        dma->cf_cmd.cmd.command = SWAP16(CmdConfigure);
1381                        DMA_WBACK_INV(dev, &dma->cf_cmd, sizeof(struct cf_cmd));
1382                        i596_add_cmd(dev, &dma->cf_cmd.cmd);
1383                }
1384        }
1385
1386        cnt = dev->mc_count;
1387        if (cnt > MAX_MC_CNT) {
1388                cnt = MAX_MC_CNT;
1389                printk(KERN_NOTICE "%s: Only %d multicast addresses supported",
1390                        dev->name, cnt);
1391        }
1392
1393        if (dev->mc_count > 0) {
1394                struct dev_mc_list *dmi;
1395                unsigned char *cp;
1396                struct mc_cmd *cmd;
1397
1398                cmd = &dma->mc_cmd;
1399                cmd->cmd.command = SWAP16(CmdMulticastList);
1400                cmd->mc_cnt = SWAP16(dev->mc_count * 6);
1401                cp = cmd->mc_addrs;
1402                for (dmi = dev->mc_list;
1403                     cnt && dmi != NULL;
1404                     dmi = dmi->next, cnt--, cp += 6) {
1405                        memcpy(cp, dmi->dmi_addr, 6);
1406                        if (i596_debug > 1)
1407                                DEB(DEB_MULTI,
1408                                    printk(KERN_DEBUG
1409                                           "%s: Adding address %pM\n",
1410                                           dev->name, cp));
1411                }
1412                DMA_WBACK_INV(dev, &dma->mc_cmd, sizeof(struct mc_cmd));
1413                i596_add_cmd(dev, &cmd->cmd);
1414        }
1415}
1416