linux/drivers/net/ethernet/8390/lib8390.c
<<
>>
Prefs
   1/* 8390.c: A general NS8390 ethernet driver core for linux. */
   2/*
   3        Written 1992-94 by Donald Becker.
   4
   5        Copyright 1993 United States Government as represented by the
   6        Director, National Security Agency.
   7
   8        This software may be used and distributed according to the terms
   9        of the GNU General Public License, incorporated herein by reference.
  10
  11        The author may be reached as becker@scyld.com, or C/O
  12        Scyld Computing Corporation
  13        410 Severn Ave., Suite 210
  14        Annapolis MD 21403
  15
  16
  17  This is the chip-specific code for many 8390-based ethernet adaptors.
  18  This is not a complete driver, it must be combined with board-specific
  19  code such as ne.c, wd.c, 3c503.c, etc.
  20
  21  Seeing how at least eight drivers use this code, (not counting the
  22  PCMCIA ones either) it is easy to break some card by what seems like
  23  a simple innocent change. Please contact me or Donald if you think
  24  you have found something that needs changing. -- PG
  25
  26
  27  Changelog:
  28
  29  Paul Gortmaker        : remove set_bit lock, other cleanups.
  30  Paul Gortmaker        : add ei_get_8390_hdr() so we can pass skb's to
  31                          ei_block_input() for eth_io_copy_and_sum().
  32  Paul Gortmaker        : exchange static int ei_pingpong for a #define,
  33                          also add better Tx error handling.
  34  Paul Gortmaker        : rewrite Rx overrun handling as per NS specs.
  35  Alexey Kuznetsov      : use the 8390's six bit hash multicast filter.
  36  Paul Gortmaker        : tweak ANK's above multicast changes a bit.
  37  Paul Gortmaker        : update packet statistics for v2.1.x
  38  Alan Cox              : support arbitrary stupid port mappings on the
  39                          68K Macintosh. Support >16bit I/O spaces
  40  Paul Gortmaker        : add kmod support for auto-loading of the 8390
  41                          module by all drivers that require it.
  42  Alan Cox              : Spinlocking work, added 'BUG_83C690'
  43  Paul Gortmaker        : Separate out Tx timeout code from Tx path.
  44  Paul Gortmaker        : Remove old unused single Tx buffer code.
  45  Hayato Fujiwara       : Add m32r support.
  46  Paul Gortmaker        : use skb_padto() instead of stack scratch area
  47
  48  Sources:
  49  The National Semiconductor LAN Databook, and the 3Com 3c503 databook.
  50
  51  */
  52
  53#include <linux/build_bug.h>
  54#include <linux/module.h>
  55#include <linux/kernel.h>
  56#include <linux/jiffies.h>
  57#include <linux/fs.h>
  58#include <linux/types.h>
  59#include <linux/string.h>
  60#include <linux/bitops.h>
  61#include <linux/uaccess.h>
  62#include <linux/io.h>
  63#include <asm/irq.h>
  64#include <linux/delay.h>
  65#include <linux/errno.h>
  66#include <linux/fcntl.h>
  67#include <linux/in.h>
  68#include <linux/interrupt.h>
  69#include <linux/init.h>
  70#include <linux/crc32.h>
  71
  72#include <linux/netdevice.h>
  73#include <linux/etherdevice.h>
  74
  75#define NS8390_CORE
  76#include "8390.h"
  77
  78#define BUG_83C690
  79
  80/* These are the operational function interfaces to board-specific
  81   routines.
  82        void reset_8390(struct net_device *dev)
  83                Resets the board associated with DEV, including a hardware reset of
  84                the 8390.  This is only called when there is a transmit timeout, and
  85                it is always followed by 8390_init().
  86        void block_output(struct net_device *dev, int count, const unsigned char *buf,
  87                                          int start_page)
  88                Write the COUNT bytes of BUF to the packet buffer at START_PAGE.  The
  89                "page" value uses the 8390's 256-byte pages.
  90        void get_8390_hdr(struct net_device *dev, struct e8390_hdr *hdr, int ring_page)
  91                Read the 4 byte, page aligned 8390 header. *If* there is a
  92                subsequent read, it will be of the rest of the packet.
  93        void block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
  94                Read COUNT bytes from the packet buffer into the skb data area. Start
  95                reading from RING_OFFSET, the address as the 8390 sees it.  This will always
  96                follow the read of the 8390 header.
  97*/
  98#define ei_reset_8390 (ei_local->reset_8390)
  99#define ei_block_output (ei_local->block_output)
 100#define ei_block_input (ei_local->block_input)
 101#define ei_get_8390_hdr (ei_local->get_8390_hdr)
 102
 103/* Index to functions. */
 104static void ei_tx_intr(struct net_device *dev);
 105static void ei_tx_err(struct net_device *dev);
 106static void ei_receive(struct net_device *dev);
 107static void ei_rx_overrun(struct net_device *dev);
 108
 109/* Routines generic to NS8390-based boards. */
 110static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
 111                                                                int start_page);
 112static void do_set_multicast_list(struct net_device *dev);
 113static void __NS8390_init(struct net_device *dev, int startp);
 114
 115static unsigned version_printed;
 116static int msg_enable;
 117static const int default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_RX_ERR |
 118                                     NETIF_MSG_TX_ERR);
 119module_param(msg_enable, int, 0444);
 120MODULE_PARM_DESC(msg_enable, "Debug message level (see linux/netdevice.h for bitmap)");
 121
 122/*
 123 *      SMP and the 8390 setup.
 124 *
 125 *      The 8390 isn't exactly designed to be multithreaded on RX/TX. There is
 126 *      a page register that controls bank and packet buffer access. We guard
 127 *      this with ei_local->page_lock. Nobody should assume or set the page other
 128 *      than zero when the lock is not held. Lock holders must restore page 0
 129 *      before unlocking. Even pure readers must take the lock to protect in
 130 *      page 0.
 131 *
 132 *      To make life difficult the chip can also be very slow. We therefore can't
 133 *      just use spinlocks. For the longer lockups we disable the irq the device
 134 *      sits on and hold the lock. We must hold the lock because there is a dual
 135 *      processor case other than interrupts (get stats/set multicast list in
 136 *      parallel with each other and transmit).
 137 *
 138 *      Note: in theory we can just disable the irq on the card _but_ there is
 139 *      a latency on SMP irq delivery. So we can easily go "disable irq" "sync irqs"
 140 *      enter lock, take the queued irq. So we waddle instead of flying.
 141 *
 142 *      Finally by special arrangement for the purpose of being generally
 143 *      annoying the transmit function is called bh atomic. That places
 144 *      restrictions on the user context callers as disable_irq won't save
 145 *      them.
 146 *
 147 *      Additional explanation of problems with locking by Alan Cox:
 148 *
 149 *      "The author (me) didn't use spin_lock_irqsave because the slowness of the
 150 *      card means that approach caused horrible problems like losing serial data
 151 *      at 38400 baud on some chips. Remember many 8390 nics on PCI were ISA
 152 *      chips with FPGA front ends.
 153 *
 154 *      Ok the logic behind the 8390 is very simple:
 155 *
 156 *      Things to know
 157 *              - IRQ delivery is asynchronous to the PCI bus
 158 *              - Blocking the local CPU IRQ via spin locks was too slow
 159 *              - The chip has register windows needing locking work
 160 *
 161 *      So the path was once (I say once as people appear to have changed it
 162 *      in the mean time and it now looks rather bogus if the changes to use
 163 *      disable_irq_nosync_irqsave are disabling the local IRQ)
 164 *
 165 *
 166 *              Take the page lock
 167 *              Mask the IRQ on chip
 168 *              Disable the IRQ (but not mask locally- someone seems to have
 169 *                      broken this with the lock validator stuff)
 170 *                      [This must be _nosync as the page lock may otherwise
 171 *                              deadlock us]
 172 *              Drop the page lock and turn IRQs back on
 173 *
 174 *              At this point an existing IRQ may still be running but we can't
 175 *              get a new one
 176 *
 177 *              Take the lock (so we know the IRQ has terminated) but don't mask
 178 *      the IRQs on the processor
 179 *              Set irqlock [for debug]
 180 *
 181 *              Transmit (slow as ****)
 182 *
 183 *              re-enable the IRQ
 184 *
 185 *
 186 *      We have to use disable_irq because otherwise you will get delayed
 187 *      interrupts on the APIC bus deadlocking the transmit path.
 188 *
 189 *      Quite hairy but the chip simply wasn't designed for SMP and you can't
 190 *      even ACK an interrupt without risking corrupting other parallel
 191 *      activities on the chip." [lkml, 25 Jul 2007]
 192 */
 193
 194
 195
 196/**
 197 * ei_open - Open/initialize the board.
 198 * @dev: network device to initialize
 199 *
 200 * This routine goes all-out, setting everything
 201 * up anew at each open, even though many of these registers should only
 202 * need to be set once at boot.
 203 */
 204static int __ei_open(struct net_device *dev)
 205{
 206        unsigned long flags;
 207        struct ei_device *ei_local = netdev_priv(dev);
 208
 209        if (dev->watchdog_timeo <= 0)
 210                dev->watchdog_timeo = TX_TIMEOUT;
 211
 212        /*
 213         *      Grab the page lock so we own the register set, then call
 214         *      the init function.
 215         */
 216
 217        spin_lock_irqsave(&ei_local->page_lock, flags);
 218        __NS8390_init(dev, 1);
 219        /* Set the flag before we drop the lock, That way the IRQ arrives
 220           after its set and we get no silly warnings */
 221        netif_start_queue(dev);
 222        spin_unlock_irqrestore(&ei_local->page_lock, flags);
 223        ei_local->irqlock = 0;
 224        return 0;
 225}
 226
 227/**
 228 * ei_close - shut down network device
 229 * @dev: network device to close
 230 *
 231 * Opposite of ei_open(). Only used when "ifconfig <devname> down" is done.
 232 */
 233static int __ei_close(struct net_device *dev)
 234{
 235        struct ei_device *ei_local = netdev_priv(dev);
 236        unsigned long flags;
 237
 238        /*
 239         *      Hold the page lock during close
 240         */
 241
 242        spin_lock_irqsave(&ei_local->page_lock, flags);
 243        __NS8390_init(dev, 0);
 244        spin_unlock_irqrestore(&ei_local->page_lock, flags);
 245        netif_stop_queue(dev);
 246        return 0;
 247}
 248
 249/**
 250 * ei_tx_timeout - handle transmit time out condition
 251 * @dev: network device which has apparently fallen asleep
 252 *
 253 * Called by kernel when device never acknowledges a transmit has
 254 * completed (or failed) - i.e. never posted a Tx related interrupt.
 255 */
 256
 257static void __ei_tx_timeout(struct net_device *dev, unsigned int txqueue)
 258{
 259        unsigned long e8390_base = dev->base_addr;
 260        struct ei_device *ei_local = netdev_priv(dev);
 261        int txsr, isr, tickssofar = jiffies - dev_trans_start(dev);
 262        unsigned long flags;
 263
 264        dev->stats.tx_errors++;
 265
 266        spin_lock_irqsave(&ei_local->page_lock, flags);
 267        txsr = ei_inb(e8390_base+EN0_TSR);
 268        isr = ei_inb(e8390_base+EN0_ISR);
 269        spin_unlock_irqrestore(&ei_local->page_lock, flags);
 270
 271        netdev_dbg(dev, "Tx timed out, %s TSR=%#2x, ISR=%#2x, t=%d\n",
 272                   (txsr & ENTSR_ABT) ? "excess collisions." :
 273                   (isr) ? "lost interrupt?" : "cable problem?",
 274                   txsr, isr, tickssofar);
 275
 276        if (!isr && !dev->stats.tx_packets) {
 277                /* The 8390 probably hasn't gotten on the cable yet. */
 278                ei_local->interface_num ^= 1;   /* Try a different xcvr.  */
 279        }
 280
 281        /* Ugly but a reset can be slow, yet must be protected */
 282
 283        disable_irq_nosync_lockdep(dev->irq);
 284        spin_lock(&ei_local->page_lock);
 285
 286        /* Try to restart the card.  Perhaps the user has fixed something. */
 287        ei_reset_8390(dev);
 288        __NS8390_init(dev, 1);
 289
 290        spin_unlock(&ei_local->page_lock);
 291        enable_irq_lockdep(dev->irq);
 292        netif_wake_queue(dev);
 293}
 294
 295/**
 296 * ei_start_xmit - begin packet transmission
 297 * @skb: packet to be sent
 298 * @dev: network device to which packet is sent
 299 *
 300 * Sends a packet to an 8390 network device.
 301 */
 302
 303static netdev_tx_t __ei_start_xmit(struct sk_buff *skb,
 304                                   struct net_device *dev)
 305{
 306        unsigned long e8390_base = dev->base_addr;
 307        struct ei_device *ei_local = netdev_priv(dev);
 308        int send_length = skb->len, output_page;
 309        unsigned long flags;
 310        char buf[ETH_ZLEN];
 311        char *data = skb->data;
 312
 313        if (skb->len < ETH_ZLEN) {
 314                memset(buf, 0, ETH_ZLEN);       /* more efficient than doing just the needed bits */
 315                memcpy(buf, data, skb->len);
 316                send_length = ETH_ZLEN;
 317                data = buf;
 318        }
 319
 320        /* Mask interrupts from the ethercard.
 321           SMP: We have to grab the lock here otherwise the IRQ handler
 322           on another CPU can flip window and race the IRQ mask set. We end
 323           up trashing the mcast filter not disabling irqs if we don't lock */
 324
 325        spin_lock_irqsave(&ei_local->page_lock, flags);
 326        ei_outb_p(0x00, e8390_base + EN0_IMR);
 327        spin_unlock_irqrestore(&ei_local->page_lock, flags);
 328
 329
 330        /*
 331         *      Slow phase with lock held.
 332         */
 333
 334        disable_irq_nosync_lockdep_irqsave(dev->irq, &flags);
 335
 336        spin_lock(&ei_local->page_lock);
 337
 338        ei_local->irqlock = 1;
 339
 340        /*
 341         * We have two Tx slots available for use. Find the first free
 342         * slot, and then perform some sanity checks. With two Tx bufs,
 343         * you get very close to transmitting back-to-back packets. With
 344         * only one Tx buf, the transmitter sits idle while you reload the
 345         * card, leaving a substantial gap between each transmitted packet.
 346         */
 347
 348        if (ei_local->tx1 == 0) {
 349                output_page = ei_local->tx_start_page;
 350                ei_local->tx1 = send_length;
 351                if ((netif_msg_tx_queued(ei_local)) &&
 352                    ei_local->tx2 > 0)
 353                        netdev_dbg(dev,
 354                                   "idle transmitter tx2=%d, lasttx=%d, txing=%d\n",
 355                                   ei_local->tx2, ei_local->lasttx, ei_local->txing);
 356        } else if (ei_local->tx2 == 0) {
 357                output_page = ei_local->tx_start_page + TX_PAGES/2;
 358                ei_local->tx2 = send_length;
 359                if ((netif_msg_tx_queued(ei_local)) &&
 360                    ei_local->tx1 > 0)
 361                        netdev_dbg(dev,
 362                                   "idle transmitter, tx1=%d, lasttx=%d, txing=%d\n",
 363                                   ei_local->tx1, ei_local->lasttx, ei_local->txing);
 364        } else {                        /* We should never get here. */
 365                netif_dbg(ei_local, tx_err, dev,
 366                          "No Tx buffers free! tx1=%d tx2=%d last=%d\n",
 367                          ei_local->tx1, ei_local->tx2, ei_local->lasttx);
 368                ei_local->irqlock = 0;
 369                netif_stop_queue(dev);
 370                ei_outb_p(ENISR_ALL, e8390_base + EN0_IMR);
 371                spin_unlock(&ei_local->page_lock);
 372                enable_irq_lockdep_irqrestore(dev->irq, &flags);
 373                dev->stats.tx_errors++;
 374                return NETDEV_TX_BUSY;
 375        }
 376
 377        /*
 378         * Okay, now upload the packet and trigger a send if the transmitter
 379         * isn't already sending. If it is busy, the interrupt handler will
 380         * trigger the send later, upon receiving a Tx done interrupt.
 381         */
 382
 383        ei_block_output(dev, send_length, data, output_page);
 384
 385        if (!ei_local->txing) {
 386                ei_local->txing = 1;
 387                NS8390_trigger_send(dev, send_length, output_page);
 388                if (output_page == ei_local->tx_start_page) {
 389                        ei_local->tx1 = -1;
 390                        ei_local->lasttx = -1;
 391                } else {
 392                        ei_local->tx2 = -1;
 393                        ei_local->lasttx = -2;
 394                }
 395        } else
 396                ei_local->txqueue++;
 397
 398        if (ei_local->tx1 && ei_local->tx2)
 399                netif_stop_queue(dev);
 400        else
 401                netif_start_queue(dev);
 402
 403        /* Turn 8390 interrupts back on. */
 404        ei_local->irqlock = 0;
 405        ei_outb_p(ENISR_ALL, e8390_base + EN0_IMR);
 406
 407        spin_unlock(&ei_local->page_lock);
 408        enable_irq_lockdep_irqrestore(dev->irq, &flags);
 409        skb_tx_timestamp(skb);
 410        dev_consume_skb_any(skb);
 411        dev->stats.tx_bytes += send_length;
 412
 413        return NETDEV_TX_OK;
 414}
 415
 416/**
 417 * ei_interrupt - handle the interrupts from an 8390
 418 * @irq: interrupt number
 419 * @dev_id: a pointer to the net_device
 420 *
 421 * Handle the ether interface interrupts. We pull packets from
 422 * the 8390 via the card specific functions and fire them at the networking
 423 * stack. We also handle transmit completions and wake the transmit path if
 424 * necessary. We also update the counters and do other housekeeping as
 425 * needed.
 426 */
 427
 428static irqreturn_t __ei_interrupt(int irq, void *dev_id)
 429{
 430        struct net_device *dev = dev_id;
 431        unsigned long e8390_base = dev->base_addr;
 432        int interrupts, nr_serviced = 0;
 433        struct ei_device *ei_local = netdev_priv(dev);
 434
 435        /*
 436         *      Protect the irq test too.
 437         */
 438
 439        spin_lock(&ei_local->page_lock);
 440
 441        if (ei_local->irqlock) {
 442                /*
 443                 * This might just be an interrupt for a PCI device sharing
 444                 * this line
 445                 */
 446                netdev_err(dev, "Interrupted while interrupts are masked! isr=%#2x imr=%#2x\n",
 447                           ei_inb_p(e8390_base + EN0_ISR),
 448                           ei_inb_p(e8390_base + EN0_IMR));
 449                spin_unlock(&ei_local->page_lock);
 450                return IRQ_NONE;
 451        }
 452
 453        /* Change to page 0 and read the intr status reg. */
 454        ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base + E8390_CMD);
 455        netif_dbg(ei_local, intr, dev, "interrupt(isr=%#2.2x)\n",
 456                  ei_inb_p(e8390_base + EN0_ISR));
 457
 458        /* !!Assumption!! -- we stay in page 0.  Don't break this. */
 459        while ((interrupts = ei_inb_p(e8390_base + EN0_ISR)) != 0 &&
 460               ++nr_serviced < MAX_SERVICE) {
 461                if (!netif_running(dev)) {
 462                        netdev_warn(dev, "interrupt from stopped card\n");
 463                        /* rmk - acknowledge the interrupts */
 464                        ei_outb_p(interrupts, e8390_base + EN0_ISR);
 465                        interrupts = 0;
 466                        break;
 467                }
 468                if (interrupts & ENISR_OVER)
 469                        ei_rx_overrun(dev);
 470                else if (interrupts & (ENISR_RX+ENISR_RX_ERR)) {
 471                        /* Got a good (?) packet. */
 472                        ei_receive(dev);
 473                }
 474                /* Push the next to-transmit packet through. */
 475                if (interrupts & ENISR_TX)
 476                        ei_tx_intr(dev);
 477                else if (interrupts & ENISR_TX_ERR)
 478                        ei_tx_err(dev);
 479
 480                if (interrupts & ENISR_COUNTERS) {
 481                        dev->stats.rx_frame_errors += ei_inb_p(e8390_base + EN0_COUNTER0);
 482                        dev->stats.rx_crc_errors   += ei_inb_p(e8390_base + EN0_COUNTER1);
 483                        dev->stats.rx_missed_errors += ei_inb_p(e8390_base + EN0_COUNTER2);
 484                        ei_outb_p(ENISR_COUNTERS, e8390_base + EN0_ISR); /* Ack intr. */
 485                }
 486
 487                /* Ignore any RDC interrupts that make it back to here. */
 488                if (interrupts & ENISR_RDC)
 489                        ei_outb_p(ENISR_RDC, e8390_base + EN0_ISR);
 490
 491                ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD);
 492        }
 493
 494        if (interrupts && (netif_msg_intr(ei_local))) {
 495                ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD);
 496                if (nr_serviced >= MAX_SERVICE) {
 497                        /* 0xFF is valid for a card removal */
 498                        if (interrupts != 0xFF)
 499                                netdev_warn(dev, "Too much work at interrupt, status %#2.2x\n",
 500                                            interrupts);
 501                        ei_outb_p(ENISR_ALL, e8390_base + EN0_ISR); /* Ack. most intrs. */
 502                } else {
 503                        netdev_warn(dev, "unknown interrupt %#2x\n", interrupts);
 504                        ei_outb_p(0xff, e8390_base + EN0_ISR); /* Ack. all intrs. */
 505                }
 506        }
 507        spin_unlock(&ei_local->page_lock);
 508        return IRQ_RETVAL(nr_serviced > 0);
 509}
 510
 511#ifdef CONFIG_NET_POLL_CONTROLLER
 512static void __ei_poll(struct net_device *dev)
 513{
 514        disable_irq(dev->irq);
 515        __ei_interrupt(dev->irq, dev);
 516        enable_irq(dev->irq);
 517}
 518#endif
 519
 520/**
 521 * ei_tx_err - handle transmitter error
 522 * @dev: network device which threw the exception
 523 *
 524 * A transmitter error has happened. Most likely excess collisions (which
 525 * is a fairly normal condition). If the error is one where the Tx will
 526 * have been aborted, we try and send another one right away, instead of
 527 * letting the failed packet sit and collect dust in the Tx buffer. This
 528 * is a much better solution as it avoids kernel based Tx timeouts, and
 529 * an unnecessary card reset.
 530 *
 531 * Called with lock held.
 532 */
 533
 534static void ei_tx_err(struct net_device *dev)
 535{
 536        unsigned long e8390_base = dev->base_addr;
 537        /* ei_local is used on some platforms via the EI_SHIFT macro */
 538        struct ei_device *ei_local __maybe_unused = netdev_priv(dev);
 539        unsigned char txsr = ei_inb_p(e8390_base+EN0_TSR);
 540        unsigned char tx_was_aborted = txsr & (ENTSR_ABT+ENTSR_FU);
 541
 542#ifdef VERBOSE_ERROR_DUMP
 543        netdev_dbg(dev, "transmitter error (%#2x):", txsr);
 544        if (txsr & ENTSR_ABT)
 545                pr_cont(" excess-collisions ");
 546        if (txsr & ENTSR_ND)
 547                pr_cont(" non-deferral ");
 548        if (txsr & ENTSR_CRS)
 549                pr_cont(" lost-carrier ");
 550        if (txsr & ENTSR_FU)
 551                pr_cont(" FIFO-underrun ");
 552        if (txsr & ENTSR_CDH)
 553                pr_cont(" lost-heartbeat ");
 554        pr_cont("\n");
 555#endif
 556
 557        ei_outb_p(ENISR_TX_ERR, e8390_base + EN0_ISR); /* Ack intr. */
 558
 559        if (tx_was_aborted)
 560                ei_tx_intr(dev);
 561        else {
 562                dev->stats.tx_errors++;
 563                if (txsr & ENTSR_CRS)
 564                        dev->stats.tx_carrier_errors++;
 565                if (txsr & ENTSR_CDH)
 566                        dev->stats.tx_heartbeat_errors++;
 567                if (txsr & ENTSR_OWC)
 568                        dev->stats.tx_window_errors++;
 569        }
 570}
 571
 572/**
 573 * ei_tx_intr - transmit interrupt handler
 574 * @dev: network device for which tx intr is handled
 575 *
 576 * We have finished a transmit: check for errors and then trigger the next
 577 * packet to be sent. Called with lock held.
 578 */
 579
 580static void ei_tx_intr(struct net_device *dev)
 581{
 582        unsigned long e8390_base = dev->base_addr;
 583        struct ei_device *ei_local = netdev_priv(dev);
 584        int status = ei_inb(e8390_base + EN0_TSR);
 585
 586        ei_outb_p(ENISR_TX, e8390_base + EN0_ISR); /* Ack intr. */
 587
 588        /*
 589         * There are two Tx buffers, see which one finished, and trigger
 590         * the send of another one if it exists.
 591         */
 592        ei_local->txqueue--;
 593
 594        if (ei_local->tx1 < 0) {
 595                if (ei_local->lasttx != 1 && ei_local->lasttx != -1)
 596                        pr_err("%s: bogus last_tx_buffer %d, tx1=%d\n",
 597                               ei_local->name, ei_local->lasttx, ei_local->tx1);
 598                ei_local->tx1 = 0;
 599                if (ei_local->tx2 > 0) {
 600                        ei_local->txing = 1;
 601                        NS8390_trigger_send(dev, ei_local->tx2, ei_local->tx_start_page + 6);
 602                        netif_trans_update(dev);
 603                        ei_local->tx2 = -1;
 604                        ei_local->lasttx = 2;
 605                } else {
 606                        ei_local->lasttx = 20;
 607                        ei_local->txing = 0;
 608                }
 609        } else if (ei_local->tx2 < 0) {
 610                if (ei_local->lasttx != 2  &&  ei_local->lasttx != -2)
 611                        pr_err("%s: bogus last_tx_buffer %d, tx2=%d\n",
 612                               ei_local->name, ei_local->lasttx, ei_local->tx2);
 613                ei_local->tx2 = 0;
 614                if (ei_local->tx1 > 0) {
 615                        ei_local->txing = 1;
 616                        NS8390_trigger_send(dev, ei_local->tx1, ei_local->tx_start_page);
 617                        netif_trans_update(dev);
 618                        ei_local->tx1 = -1;
 619                        ei_local->lasttx = 1;
 620                } else {
 621                        ei_local->lasttx = 10;
 622                        ei_local->txing = 0;
 623                }
 624        } /* else
 625                netdev_warn(dev, "unexpected TX-done interrupt, lasttx=%d\n",
 626                            ei_local->lasttx);
 627*/
 628
 629        /* Minimize Tx latency: update the statistics after we restart TXing. */
 630        if (status & ENTSR_COL)
 631                dev->stats.collisions++;
 632        if (status & ENTSR_PTX)
 633                dev->stats.tx_packets++;
 634        else {
 635                dev->stats.tx_errors++;
 636                if (status & ENTSR_ABT) {
 637                        dev->stats.tx_aborted_errors++;
 638                        dev->stats.collisions += 16;
 639                }
 640                if (status & ENTSR_CRS)
 641                        dev->stats.tx_carrier_errors++;
 642                if (status & ENTSR_FU)
 643                        dev->stats.tx_fifo_errors++;
 644                if (status & ENTSR_CDH)
 645                        dev->stats.tx_heartbeat_errors++;
 646                if (status & ENTSR_OWC)
 647                        dev->stats.tx_window_errors++;
 648        }
 649        netif_wake_queue(dev);
 650}
 651
 652/**
 653 * ei_receive - receive some packets
 654 * @dev: network device with which receive will be run
 655 *
 656 * We have a good packet(s), get it/them out of the buffers.
 657 * Called with lock held.
 658 */
 659
 660static void ei_receive(struct net_device *dev)
 661{
 662        unsigned long e8390_base = dev->base_addr;
 663        struct ei_device *ei_local = netdev_priv(dev);
 664        unsigned char rxing_page, this_frame, next_frame;
 665        unsigned short current_offset;
 666        int rx_pkt_count = 0;
 667        struct e8390_pkt_hdr rx_frame;
 668        int num_rx_pages = ei_local->stop_page-ei_local->rx_start_page;
 669
 670        while (++rx_pkt_count < 10) {
 671                int pkt_len, pkt_stat;
 672
 673                /* Get the rx page (incoming packet pointer). */
 674                ei_outb_p(E8390_NODMA+E8390_PAGE1, e8390_base + E8390_CMD);
 675                rxing_page = ei_inb_p(e8390_base + EN1_CURPAG);
 676                ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base + E8390_CMD);
 677
 678                /* Remove one frame from the ring.  Boundary is always a page behind. */
 679                this_frame = ei_inb_p(e8390_base + EN0_BOUNDARY) + 1;
 680                if (this_frame >= ei_local->stop_page)
 681                        this_frame = ei_local->rx_start_page;
 682
 683                /* Someday we'll omit the previous, iff we never get this message.
 684                   (There is at least one clone claimed to have a problem.)
 685
 686                   Keep quiet if it looks like a card removal. One problem here
 687                   is that some clones crash in roughly the same way.
 688                 */
 689                if ((netif_msg_rx_status(ei_local)) &&
 690                    this_frame != ei_local->current_page &&
 691                    (this_frame != 0x0 || rxing_page != 0xFF))
 692                        netdev_err(dev,
 693                                   "mismatched read page pointers %2x vs %2x\n",
 694                                   this_frame, ei_local->current_page);
 695
 696                if (this_frame == rxing_page)   /* Read all the frames? */
 697                        break;                          /* Done for now */
 698
 699                current_offset = this_frame << 8;
 700                ei_get_8390_hdr(dev, &rx_frame, this_frame);
 701
 702                pkt_len = rx_frame.count - sizeof(struct e8390_pkt_hdr);
 703                pkt_stat = rx_frame.status;
 704
 705                next_frame = this_frame + 1 + ((pkt_len+4)>>8);
 706
 707                /* Check for bogosity warned by 3c503 book: the status byte is never
 708                   written.  This happened a lot during testing! This code should be
 709                   cleaned up someday. */
 710                if (rx_frame.next != next_frame &&
 711                    rx_frame.next != next_frame + 1 &&
 712                    rx_frame.next != next_frame - num_rx_pages &&
 713                    rx_frame.next != next_frame + 1 - num_rx_pages) {
 714                        ei_local->current_page = rxing_page;
 715                        ei_outb(ei_local->current_page-1, e8390_base+EN0_BOUNDARY);
 716                        dev->stats.rx_errors++;
 717                        continue;
 718                }
 719
 720                if (pkt_len < 60  ||  pkt_len > 1518) {
 721                        netif_dbg(ei_local, rx_status, dev,
 722                                  "bogus packet size: %d, status=%#2x nxpg=%#2x\n",
 723                                  rx_frame.count, rx_frame.status,
 724                                  rx_frame.next);
 725                        dev->stats.rx_errors++;
 726                        dev->stats.rx_length_errors++;
 727                } else if ((pkt_stat & 0x0F) == ENRSR_RXOK) {
 728                        struct sk_buff *skb;
 729
 730                        skb = netdev_alloc_skb(dev, pkt_len + 2);
 731                        if (skb == NULL) {
 732                                netif_err(ei_local, rx_err, dev,
 733                                          "Couldn't allocate a sk_buff of size %d\n",
 734                                          pkt_len);
 735                                dev->stats.rx_dropped++;
 736                                break;
 737                        } else {
 738                                skb_reserve(skb, 2);    /* IP headers on 16 byte boundaries */
 739                                skb_put(skb, pkt_len);  /* Make room */
 740                                ei_block_input(dev, pkt_len, skb, current_offset + sizeof(rx_frame));
 741                                skb->protocol = eth_type_trans(skb, dev);
 742                                if (!skb_defer_rx_timestamp(skb))
 743                                        netif_rx(skb);
 744                                dev->stats.rx_packets++;
 745                                dev->stats.rx_bytes += pkt_len;
 746                                if (pkt_stat & ENRSR_PHY)
 747                                        dev->stats.multicast++;
 748                        }
 749                } else {
 750                        netif_err(ei_local, rx_err, dev,
 751                                  "bogus packet: status=%#2x nxpg=%#2x size=%d\n",
 752                                  rx_frame.status, rx_frame.next,
 753                                  rx_frame.count);
 754                        dev->stats.rx_errors++;
 755                        /* NB: The NIC counts CRC, frame and missed errors. */
 756                        if (pkt_stat & ENRSR_FO)
 757                                dev->stats.rx_fifo_errors++;
 758                }
 759                next_frame = rx_frame.next;
 760
 761                /* This _should_ never happen: it's here for avoiding bad clones. */
 762                if (next_frame >= ei_local->stop_page) {
 763                        netdev_notice(dev, "next frame inconsistency, %#2x\n",
 764                                      next_frame);
 765                        next_frame = ei_local->rx_start_page;
 766                }
 767                ei_local->current_page = next_frame;
 768                ei_outb_p(next_frame-1, e8390_base+EN0_BOUNDARY);
 769        }
 770
 771        /* We used to also ack ENISR_OVER here, but that would sometimes mask
 772           a real overrun, leaving the 8390 in a stopped state with rec'vr off. */
 773        ei_outb_p(ENISR_RX+ENISR_RX_ERR, e8390_base+EN0_ISR);
 774}
 775
 776/**
 777 * ei_rx_overrun - handle receiver overrun
 778 * @dev: network device which threw exception
 779 *
 780 * We have a receiver overrun: we have to kick the 8390 to get it started
 781 * again. Problem is that you have to kick it exactly as NS prescribes in
 782 * the updated datasheets, or "the NIC may act in an unpredictable manner."
 783 * This includes causing "the NIC to defer indefinitely when it is stopped
 784 * on a busy network."  Ugh.
 785 * Called with lock held. Don't call this with the interrupts off or your
 786 * computer will hate you - it takes 10ms or so.
 787 */
 788
 789static void ei_rx_overrun(struct net_device *dev)
 790{
 791        unsigned long e8390_base = dev->base_addr;
 792        unsigned char was_txing, must_resend = 0;
 793        /* ei_local is used on some platforms via the EI_SHIFT macro */
 794        struct ei_device *ei_local __maybe_unused = netdev_priv(dev);
 795
 796        /*
 797         * Record whether a Tx was in progress and then issue the
 798         * stop command.
 799         */
 800        was_txing = ei_inb_p(e8390_base+E8390_CMD) & E8390_TRANS;
 801        ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
 802
 803        netif_dbg(ei_local, rx_err, dev, "Receiver overrun\n");
 804        dev->stats.rx_over_errors++;
 805
 806        /*
 807         * Wait a full Tx time (1.2ms) + some guard time, NS says 1.6ms total.
 808         * Early datasheets said to poll the reset bit, but now they say that
 809         * it "is not a reliable indicator and subsequently should be ignored."
 810         * We wait at least 10ms.
 811         */
 812
 813        mdelay(10);
 814
 815        /*
 816         * Reset RBCR[01] back to zero as per magic incantation.
 817         */
 818        ei_outb_p(0x00, e8390_base+EN0_RCNTLO);
 819        ei_outb_p(0x00, e8390_base+EN0_RCNTHI);
 820
 821        /*
 822         * See if any Tx was interrupted or not. According to NS, this
 823         * step is vital, and skipping it will cause no end of havoc.
 824         */
 825
 826        if (was_txing) {
 827                unsigned char tx_completed = ei_inb_p(e8390_base+EN0_ISR) & (ENISR_TX+ENISR_TX_ERR);
 828                if (!tx_completed)
 829                        must_resend = 1;
 830        }
 831
 832        /*
 833         * Have to enter loopback mode and then restart the NIC before
 834         * you are allowed to slurp packets up off the ring.
 835         */
 836        ei_outb_p(E8390_TXOFF, e8390_base + EN0_TXCR);
 837        ei_outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START, e8390_base + E8390_CMD);
 838
 839        /*
 840         * Clear the Rx ring of all the debris, and ack the interrupt.
 841         */
 842        ei_receive(dev);
 843        ei_outb_p(ENISR_OVER, e8390_base+EN0_ISR);
 844
 845        /*
 846         * Leave loopback mode, and resend any packet that got stopped.
 847         */
 848        ei_outb_p(E8390_TXCONFIG, e8390_base + EN0_TXCR);
 849        if (must_resend)
 850                ei_outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START + E8390_TRANS, e8390_base + E8390_CMD);
 851}
 852
 853/*
 854 *      Collect the stats. This is called unlocked and from several contexts.
 855 */
 856
 857static struct net_device_stats *__ei_get_stats(struct net_device *dev)
 858{
 859        unsigned long ioaddr = dev->base_addr;
 860        struct ei_device *ei_local = netdev_priv(dev);
 861        unsigned long flags;
 862
 863        /* If the card is stopped, just return the present stats. */
 864        if (!netif_running(dev))
 865                return &dev->stats;
 866
 867        spin_lock_irqsave(&ei_local->page_lock, flags);
 868        /* Read the counter registers, assuming we are in page 0. */
 869        dev->stats.rx_frame_errors  += ei_inb_p(ioaddr + EN0_COUNTER0);
 870        dev->stats.rx_crc_errors    += ei_inb_p(ioaddr + EN0_COUNTER1);
 871        dev->stats.rx_missed_errors += ei_inb_p(ioaddr + EN0_COUNTER2);
 872        spin_unlock_irqrestore(&ei_local->page_lock, flags);
 873
 874        return &dev->stats;
 875}
 876
 877/*
 878 * Form the 64 bit 8390 multicast table from the linked list of addresses
 879 * associated with this dev structure.
 880 */
 881
 882static inline void make_mc_bits(u8 *bits, struct net_device *dev)
 883{
 884        struct netdev_hw_addr *ha;
 885
 886        netdev_for_each_mc_addr(ha, dev) {
 887                u32 crc = ether_crc(ETH_ALEN, ha->addr);
 888                /*
 889                 * The 8390 uses the 6 most significant bits of the
 890                 * CRC to index the multicast table.
 891                 */
 892                bits[crc>>29] |= (1<<((crc>>26)&7));
 893        }
 894}
 895
 896/**
 897 * do_set_multicast_list - set/clear multicast filter
 898 * @dev: net device for which multicast filter is adjusted
 899 *
 900 *      Set or clear the multicast filter for this adaptor. May be called
 901 *      from a BH in 2.1.x. Must be called with lock held.
 902 */
 903
 904static void do_set_multicast_list(struct net_device *dev)
 905{
 906        unsigned long e8390_base = dev->base_addr;
 907        int i;
 908        struct ei_device *ei_local = netdev_priv(dev);
 909
 910        if (!(dev->flags&(IFF_PROMISC|IFF_ALLMULTI))) {
 911                memset(ei_local->mcfilter, 0, 8);
 912                if (!netdev_mc_empty(dev))
 913                        make_mc_bits(ei_local->mcfilter, dev);
 914        } else
 915                memset(ei_local->mcfilter, 0xFF, 8);    /* mcast set to accept-all */
 916
 917        /*
 918         * DP8390 manuals don't specify any magic sequence for altering
 919         * the multicast regs on an already running card. To be safe, we
 920         * ensure multicast mode is off prior to loading up the new hash
 921         * table. If this proves to be not enough, we can always resort
 922         * to stopping the NIC, loading the table and then restarting.
 923         *
 924         * Bug Alert!  The MC regs on the SMC 83C690 (SMC Elite and SMC
 925         * Elite16) appear to be write-only. The NS 8390 data sheet lists
 926         * them as r/w so this is a bug.  The SMC 83C790 (SMC Ultra and
 927         * Ultra32 EISA) appears to have this bug fixed.
 928         */
 929
 930        if (netif_running(dev))
 931                ei_outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR);
 932        ei_outb_p(E8390_NODMA + E8390_PAGE1, e8390_base + E8390_CMD);
 933        for (i = 0; i < 8; i++) {
 934                ei_outb_p(ei_local->mcfilter[i], e8390_base + EN1_MULT_SHIFT(i));
 935#ifndef BUG_83C690
 936                if (ei_inb_p(e8390_base + EN1_MULT_SHIFT(i)) != ei_local->mcfilter[i])
 937                        netdev_err(dev, "Multicast filter read/write mismap %d\n",
 938                                   i);
 939#endif
 940        }
 941        ei_outb_p(E8390_NODMA + E8390_PAGE0, e8390_base + E8390_CMD);
 942
 943        if (dev->flags&IFF_PROMISC)
 944                ei_outb_p(E8390_RXCONFIG | 0x18, e8390_base + EN0_RXCR);
 945        else if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev))
 946                ei_outb_p(E8390_RXCONFIG | 0x08, e8390_base + EN0_RXCR);
 947        else
 948                ei_outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR);
 949}
 950
 951/*
 952 *      Called without lock held. This is invoked from user context and may
 953 *      be parallel to just about everything else. Its also fairly quick and
 954 *      not called too often. Must protect against both bh and irq users
 955 */
 956
 957static void __ei_set_multicast_list(struct net_device *dev)
 958{
 959        unsigned long flags;
 960        struct ei_device *ei_local = netdev_priv(dev);
 961
 962        spin_lock_irqsave(&ei_local->page_lock, flags);
 963        do_set_multicast_list(dev);
 964        spin_unlock_irqrestore(&ei_local->page_lock, flags);
 965}
 966
 967/**
 968 * ethdev_setup - init rest of 8390 device struct
 969 * @dev: network device structure to init
 970 *
 971 * Initialize the rest of the 8390 device structure.  Do NOT __init
 972 * this, as it is used by 8390 based modular drivers too.
 973 */
 974
 975static void ethdev_setup(struct net_device *dev)
 976{
 977        struct ei_device *ei_local = netdev_priv(dev);
 978
 979        ether_setup(dev);
 980
 981        spin_lock_init(&ei_local->page_lock);
 982
 983        ei_local->msg_enable = netif_msg_init(msg_enable, default_msg_level);
 984
 985        if (netif_msg_drv(ei_local) && (version_printed++ == 0))
 986                pr_info("%s", version);
 987}
 988
 989/**
 990 * alloc_ei_netdev - alloc_etherdev counterpart for 8390
 991 * @size: extra bytes to allocate
 992 *
 993 * Allocate 8390-specific net_device.
 994 */
 995static struct net_device *____alloc_ei_netdev(int size)
 996{
 997        return alloc_netdev(sizeof(struct ei_device) + size, "eth%d",
 998                            NET_NAME_UNKNOWN, ethdev_setup);
 999}
1000
1001
1002
1003
1004/* This page of functions should be 8390 generic */
1005/* Follow National Semi's recommendations for initializing the "NIC". */
1006
1007/**
1008 * NS8390_init - initialize 8390 hardware
1009 * @dev: network device to initialize
1010 * @startp: boolean.  non-zero value to initiate chip processing
1011 *
1012 *      Must be called with lock held.
1013 */
1014
1015static void __NS8390_init(struct net_device *dev, int startp)
1016{
1017        unsigned long e8390_base = dev->base_addr;
1018        struct ei_device *ei_local = netdev_priv(dev);
1019        int i;
1020        int endcfg = ei_local->word16
1021            ? (0x48 | ENDCFG_WTS | (ei_local->bigendian ? ENDCFG_BOS : 0))
1022            : 0x48;
1023
1024        BUILD_BUG_ON(sizeof(struct e8390_pkt_hdr) != 4);
1025        /* Follow National Semi's recommendations for initing the DP83902. */
1026        ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD); /* 0x21 */
1027        ei_outb_p(endcfg, e8390_base + EN0_DCFG);       /* 0x48 or 0x49 */
1028        /* Clear the remote byte count registers. */
1029        ei_outb_p(0x00,  e8390_base + EN0_RCNTLO);
1030        ei_outb_p(0x00,  e8390_base + EN0_RCNTHI);
1031        /* Set to monitor and loopback mode -- this is vital!. */
1032        ei_outb_p(E8390_RXOFF, e8390_base + EN0_RXCR); /* 0x20 */
1033        ei_outb_p(E8390_TXOFF, e8390_base + EN0_TXCR); /* 0x02 */
1034        /* Set the transmit page and receive ring. */
1035        ei_outb_p(ei_local->tx_start_page, e8390_base + EN0_TPSR);
1036        ei_local->tx1 = ei_local->tx2 = 0;
1037        ei_outb_p(ei_local->rx_start_page, e8390_base + EN0_STARTPG);
1038        ei_outb_p(ei_local->stop_page-1, e8390_base + EN0_BOUNDARY);    /* 3c503 says 0x3f,NS0x26*/
1039        ei_local->current_page = ei_local->rx_start_page;               /* assert boundary+1 */
1040        ei_outb_p(ei_local->stop_page, e8390_base + EN0_STOPPG);
1041        /* Clear the pending interrupts and mask. */
1042        ei_outb_p(0xFF, e8390_base + EN0_ISR);
1043        ei_outb_p(0x00,  e8390_base + EN0_IMR);
1044
1045        /* Copy the station address into the DS8390 registers. */
1046
1047        ei_outb_p(E8390_NODMA + E8390_PAGE1 + E8390_STOP, e8390_base+E8390_CMD); /* 0x61 */
1048        for (i = 0; i < 6; i++) {
1049                ei_outb_p(dev->dev_addr[i], e8390_base + EN1_PHYS_SHIFT(i));
1050                if ((netif_msg_probe(ei_local)) &&
1051                    ei_inb_p(e8390_base + EN1_PHYS_SHIFT(i)) != dev->dev_addr[i])
1052                        netdev_err(dev,
1053                                   "Hw. address read/write mismap %d\n", i);
1054        }
1055
1056        ei_outb_p(ei_local->rx_start_page, e8390_base + EN1_CURPAG);
1057        ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
1058
1059        ei_local->tx1 = ei_local->tx2 = 0;
1060        ei_local->txing = 0;
1061
1062        if (startp) {
1063                ei_outb_p(0xff,  e8390_base + EN0_ISR);
1064                ei_outb_p(ENISR_ALL,  e8390_base + EN0_IMR);
1065                ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base+E8390_CMD);
1066                ei_outb_p(E8390_TXCONFIG, e8390_base + EN0_TXCR); /* xmit on. */
1067                /* 3c503 TechMan says rxconfig only after the NIC is started. */
1068                ei_outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR); /* rx on,  */
1069                do_set_multicast_list(dev);     /* (re)load the mcast table */
1070        }
1071}
1072
1073/* Trigger a transmit start, assuming the length is valid.
1074   Always called with the page lock held */
1075
1076static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
1077                                                                int start_page)
1078{
1079        unsigned long e8390_base = dev->base_addr;
1080        struct ei_device *ei_local __attribute((unused)) = netdev_priv(dev);
1081
1082        ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base+E8390_CMD);
1083
1084        if (ei_inb_p(e8390_base + E8390_CMD) & E8390_TRANS) {
1085                netdev_warn(dev, "trigger_send() called with the transmitter busy\n");
1086                return;
1087        }
1088        ei_outb_p(length & 0xff, e8390_base + EN0_TCNTLO);
1089        ei_outb_p(length >> 8, e8390_base + EN0_TCNTHI);
1090        ei_outb_p(start_page, e8390_base + EN0_TPSR);
1091        ei_outb_p(E8390_NODMA+E8390_TRANS+E8390_START, e8390_base+E8390_CMD);
1092}
1093