linux/drivers/net/tulip/interrupt.c
<<
>>
Prefs
   1/*
   2        drivers/net/tulip/interrupt.c
   3
   4        Copyright 2000,2001  The Linux Kernel Team
   5        Written/copyright 1994-2001 by Donald Becker.
   6
   7        This software may be used and distributed according to the terms
   8        of the GNU General Public License, incorporated herein by reference.
   9
  10        Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html}
  11        for more information on this driver.
  12        Please submit bugs to http://bugzilla.kernel.org/ .
  13
  14*/
  15
  16#include <linux/pci.h>
  17#include "tulip.h"
  18#include <linux/etherdevice.h>
  19
  20int tulip_rx_copybreak;
  21unsigned int tulip_max_interrupt_work;
  22
  23#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
  24#define MIT_SIZE 15
  25#define MIT_TABLE 15 /* We use 0 or max */
  26
  27static unsigned int mit_table[MIT_SIZE+1] =
  28{
  29        /*  CRS11 21143 hardware Mitigation Control Interrupt
  30            We use only RX mitigation we other techniques for
  31            TX intr. mitigation.
  32
  33           31    Cycle Size (timer control)
  34           30:27 TX timer in 16 * Cycle size
  35           26:24 TX No pkts before Int.
  36           23:20 RX timer in Cycle size
  37           19:17 RX No pkts before Int.
  38           16       Continues Mode (CM)
  39        */
  40
  41        0x0,             /* IM disabled */
  42        0x80150000,      /* RX time = 1, RX pkts = 2, CM = 1 */
  43        0x80150000,
  44        0x80270000,
  45        0x80370000,
  46        0x80490000,
  47        0x80590000,
  48        0x80690000,
  49        0x807B0000,
  50        0x808B0000,
  51        0x809D0000,
  52        0x80AD0000,
  53        0x80BD0000,
  54        0x80CF0000,
  55        0x80DF0000,
  56//       0x80FF0000      /* RX time = 16, RX pkts = 7, CM = 1 */
  57        0x80F10000      /* RX time = 16, RX pkts = 0, CM = 1 */
  58};
  59#endif
  60
  61
  62int tulip_refill_rx(struct net_device *dev)
  63{
  64        struct tulip_private *tp = netdev_priv(dev);
  65        int entry;
  66        int refilled = 0;
  67
  68        /* Refill the Rx ring buffers. */
  69        for (; tp->cur_rx - tp->dirty_rx > 0; tp->dirty_rx++) {
  70                entry = tp->dirty_rx % RX_RING_SIZE;
  71                if (tp->rx_buffers[entry].skb == NULL) {
  72                        struct sk_buff *skb;
  73                        dma_addr_t mapping;
  74
  75                        skb = tp->rx_buffers[entry].skb = dev_alloc_skb(PKT_BUF_SZ);
  76                        if (skb == NULL)
  77                                break;
  78
  79                        mapping = pci_map_single(tp->pdev, skb->data, PKT_BUF_SZ,
  80                                                 PCI_DMA_FROMDEVICE);
  81                        tp->rx_buffers[entry].mapping = mapping;
  82
  83                        skb->dev = dev;                 /* Mark as being used by this device. */
  84                        tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping);
  85                        refilled++;
  86                }
  87                tp->rx_ring[entry].status = cpu_to_le32(DescOwned);
  88        }
  89        if(tp->chip_id == LC82C168) {
  90                if(((ioread32(tp->base_addr + CSR5)>>17)&0x07) == 4) {
  91                        /* Rx stopped due to out of buffers,
  92                         * restart it
  93                         */
  94                        iowrite32(0x01, tp->base_addr + CSR2);
  95                }
  96        }
  97        return refilled;
  98}
  99
 100#ifdef CONFIG_TULIP_NAPI
 101
 102void oom_timer(unsigned long data)
 103{
 104        struct net_device *dev = (struct net_device *)data;
 105        struct tulip_private *tp = netdev_priv(dev);
 106        napi_schedule(&tp->napi);
 107}
 108
 109int tulip_poll(struct napi_struct *napi, int budget)
 110{
 111        struct tulip_private *tp = container_of(napi, struct tulip_private, napi);
 112        struct net_device *dev = tp->dev;
 113        int entry = tp->cur_rx % RX_RING_SIZE;
 114        int work_done = 0;
 115#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
 116        int received = 0;
 117#endif
 118
 119#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
 120
 121/* that one buffer is needed for mit activation; or might be a
 122   bug in the ring buffer code; check later -- JHS*/
 123
 124        if (budget >=RX_RING_SIZE) budget--;
 125#endif
 126
 127        if (tulip_debug > 4)
 128                printk(KERN_DEBUG " In tulip_rx(), entry %d %08x\n",
 129                       entry, tp->rx_ring[entry].status);
 130
 131       do {
 132                if (ioread32(tp->base_addr + CSR5) == 0xffffffff) {
 133                        printk(KERN_DEBUG " In tulip_poll(), hardware disappeared\n");
 134                        break;
 135                }
 136               /* Acknowledge current RX interrupt sources. */
 137               iowrite32((RxIntr | RxNoBuf), tp->base_addr + CSR5);
 138
 139
 140               /* If we own the next entry, it is a new packet. Send it up. */
 141               while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
 142                       s32 status = le32_to_cpu(tp->rx_ring[entry].status);
 143                       short pkt_len;
 144
 145                       if (tp->dirty_rx + RX_RING_SIZE == tp->cur_rx)
 146                               break;
 147
 148                       if (tulip_debug > 5)
 149                               printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %08x\n",
 150                                      dev->name, entry, status);
 151
 152                       if (++work_done >= budget)
 153                               goto not_done;
 154
 155                       /*
 156                        * Omit the four octet CRC from the length.
 157                        * (May not be considered valid until we have
 158                        * checked status for RxLengthOver2047 bits)
 159                        */
 160                       pkt_len = ((status >> 16) & 0x7ff) - 4;
 161
 162                       /*
 163                        * Maximum pkt_len is 1518 (1514 + vlan header)
 164                        * Anything higher than this is always invalid
 165                        * regardless of RxLengthOver2047 bits
 166                        */
 167
 168                       if ((status & (RxLengthOver2047 |
 169                                      RxDescCRCError |
 170                                      RxDescCollisionSeen |
 171                                      RxDescRunt |
 172                                      RxDescDescErr |
 173                                      RxWholePkt)) != RxWholePkt ||
 174                           pkt_len > 1518) {
 175                               if ((status & (RxLengthOver2047 |
 176                                              RxWholePkt)) != RxWholePkt) {
 177                                /* Ingore earlier buffers. */
 178                                       if ((status & 0xffff) != 0x7fff) {
 179                                               if (tulip_debug > 1)
 180                                                       dev_warn(&dev->dev,
 181                                                                "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
 182                                                                status);
 183                                                dev->stats.rx_length_errors++;
 184                                        }
 185                               } else {
 186                                /* There was a fatal error. */
 187                                       if (tulip_debug > 2)
 188                                               printk(KERN_DEBUG "%s: Receive error, Rx status %08x\n",
 189                                                      dev->name, status);
 190                                        dev->stats.rx_errors++; /* end of a packet.*/
 191                                        if (pkt_len > 1518 ||
 192                                            (status & RxDescRunt))
 193                                                dev->stats.rx_length_errors++;
 194
 195                                        if (status & 0x0004)
 196                                                dev->stats.rx_frame_errors++;
 197                                        if (status & 0x0002)
 198                                                dev->stats.rx_crc_errors++;
 199                                        if (status & 0x0001)
 200                                                dev->stats.rx_fifo_errors++;
 201                               }
 202                       } else {
 203                               struct sk_buff *skb;
 204
 205                               /* Check if the packet is long enough to accept without copying
 206                                  to a minimally-sized skbuff. */
 207                               if (pkt_len < tulip_rx_copybreak &&
 208                                   (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
 209                                       skb_reserve(skb, 2);    /* 16 byte align the IP header */
 210                                       pci_dma_sync_single_for_cpu(tp->pdev,
 211                                                                   tp->rx_buffers[entry].mapping,
 212                                                                   pkt_len, PCI_DMA_FROMDEVICE);
 213#if ! defined(__alpha__)
 214                                       skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data,
 215                                                        pkt_len);
 216                                       skb_put(skb, pkt_len);
 217#else
 218                                       memcpy(skb_put(skb, pkt_len),
 219                                              tp->rx_buffers[entry].skb->data,
 220                                              pkt_len);
 221#endif
 222                                       pci_dma_sync_single_for_device(tp->pdev,
 223                                                                      tp->rx_buffers[entry].mapping,
 224                                                                      pkt_len, PCI_DMA_FROMDEVICE);
 225                               } else {        /* Pass up the skb already on the Rx ring. */
 226                                       char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
 227                                                            pkt_len);
 228
 229#ifndef final_version
 230                                       if (tp->rx_buffers[entry].mapping !=
 231                                           le32_to_cpu(tp->rx_ring[entry].buffer1)) {
 232                                               dev_err(&dev->dev,
 233                                                       "Internal fault: The skbuff addresses do not match in tulip_rx: %08x vs. %08llx %p / %p\n",
 234                                                       le32_to_cpu(tp->rx_ring[entry].buffer1),
 235                                                       (unsigned long long)tp->rx_buffers[entry].mapping,
 236                                                       skb->head, temp);
 237                                       }
 238#endif
 239
 240                                       pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
 241                                                        PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
 242
 243                                       tp->rx_buffers[entry].skb = NULL;
 244                                       tp->rx_buffers[entry].mapping = 0;
 245                               }
 246                               skb->protocol = eth_type_trans(skb, dev);
 247
 248                               netif_receive_skb(skb);
 249
 250                                dev->stats.rx_packets++;
 251                                dev->stats.rx_bytes += pkt_len;
 252                       }
 253#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
 254                       received++;
 255#endif
 256
 257                       entry = (++tp->cur_rx) % RX_RING_SIZE;
 258                       if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/4)
 259                               tulip_refill_rx(dev);
 260
 261                }
 262
 263               /* New ack strategy... irq does not ack Rx any longer
 264                  hopefully this helps */
 265
 266               /* Really bad things can happen here... If new packet arrives
 267                * and an irq arrives (tx or just due to occasionally unset
 268                * mask), it will be acked by irq handler, but new thread
 269                * is not scheduled. It is major hole in design.
 270                * No idea how to fix this if "playing with fire" will fail
 271                * tomorrow (night 011029). If it will not fail, we won
 272                * finally: amount of IO did not increase at all. */
 273       } while ((ioread32(tp->base_addr + CSR5) & RxIntr));
 274
 275 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
 276
 277          /* We use this simplistic scheme for IM. It's proven by
 278             real life installations. We can have IM enabled
 279            continuesly but this would cause unnecessary latency.
 280            Unfortunely we can't use all the NET_RX_* feedback here.
 281            This would turn on IM for devices that is not contributing
 282            to backlog congestion with unnecessary latency.
 283
 284             We monitor the device RX-ring and have:
 285
 286             HW Interrupt Mitigation either ON or OFF.
 287
 288            ON:  More then 1 pkt received (per intr.) OR we are dropping
 289             OFF: Only 1 pkt received
 290
 291             Note. We only use min and max (0, 15) settings from mit_table */
 292
 293
 294          if( tp->flags &  HAS_INTR_MITIGATION) {
 295                 if( received > 1 ) {
 296                         if( ! tp->mit_on ) {
 297                                 tp->mit_on = 1;
 298                                 iowrite32(mit_table[MIT_TABLE], tp->base_addr + CSR11);
 299                         }
 300                  }
 301                 else {
 302                         if( tp->mit_on ) {
 303                                 tp->mit_on = 0;
 304                                 iowrite32(0, tp->base_addr + CSR11);
 305                         }
 306                  }
 307          }
 308
 309#endif /* CONFIG_TULIP_NAPI_HW_MITIGATION */
 310
 311         tulip_refill_rx(dev);
 312
 313         /* If RX ring is not full we are out of memory. */
 314         if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
 315                 goto oom;
 316
 317         /* Remove us from polling list and enable RX intr. */
 318
 319         napi_complete(napi);
 320         iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7);
 321
 322         /* The last op happens after poll completion. Which means the following:
 323          * 1. it can race with disabling irqs in irq handler
 324          * 2. it can race with dise/enabling irqs in other poll threads
 325          * 3. if an irq raised after beginning loop, it will be immediately
 326          *    triggered here.
 327          *
 328          * Summarizing: the logic results in some redundant irqs both
 329          * due to races in masking and due to too late acking of already
 330          * processed irqs. But it must not result in losing events.
 331          */
 332
 333         return work_done;
 334
 335 not_done:
 336         if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/2 ||
 337             tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
 338                 tulip_refill_rx(dev);
 339
 340         if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
 341                 goto oom;
 342
 343         return work_done;
 344
 345 oom:    /* Executed with RX ints disabled */
 346
 347         /* Start timer, stop polling, but do not enable rx interrupts. */
 348         mod_timer(&tp->oom_timer, jiffies+1);
 349
 350         /* Think: timer_pending() was an explicit signature of bug.
 351          * Timer can be pending now but fired and completed
 352          * before we did napi_complete(). See? We would lose it. */
 353
 354         /* remove ourselves from the polling list */
 355         napi_complete(napi);
 356
 357         return work_done;
 358}
 359
 360#else /* CONFIG_TULIP_NAPI */
 361
 362static int tulip_rx(struct net_device *dev)
 363{
 364        struct tulip_private *tp = netdev_priv(dev);
 365        int entry = tp->cur_rx % RX_RING_SIZE;
 366        int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx;
 367        int received = 0;
 368
 369        if (tulip_debug > 4)
 370                printk(KERN_DEBUG " In tulip_rx(), entry %d %08x\n",
 371                       entry, tp->rx_ring[entry].status);
 372        /* If we own the next entry, it is a new packet. Send it up. */
 373        while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
 374                s32 status = le32_to_cpu(tp->rx_ring[entry].status);
 375                short pkt_len;
 376
 377                if (tulip_debug > 5)
 378                        printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %08x\n",
 379                               dev->name, entry, status);
 380                if (--rx_work_limit < 0)
 381                        break;
 382
 383                /*
 384                  Omit the four octet CRC from the length.
 385                  (May not be considered valid until we have
 386                  checked status for RxLengthOver2047 bits)
 387                */
 388                pkt_len = ((status >> 16) & 0x7ff) - 4;
 389                /*
 390                  Maximum pkt_len is 1518 (1514 + vlan header)
 391                  Anything higher than this is always invalid
 392                  regardless of RxLengthOver2047 bits
 393                */
 394
 395                if ((status & (RxLengthOver2047 |
 396                               RxDescCRCError |
 397                               RxDescCollisionSeen |
 398                               RxDescRunt |
 399                               RxDescDescErr |
 400                               RxWholePkt))        != RxWholePkt ||
 401                    pkt_len > 1518) {
 402                        if ((status & (RxLengthOver2047 |
 403                             RxWholePkt))         != RxWholePkt) {
 404                                /* Ingore earlier buffers. */
 405                                if ((status & 0xffff) != 0x7fff) {
 406                                        if (tulip_debug > 1)
 407                                                dev_warn(&dev->dev,
 408                                                         "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
 409                                                         status);
 410                                        dev->stats.rx_length_errors++;
 411                                }
 412                        } else {
 413                                /* There was a fatal error. */
 414                                if (tulip_debug > 2)
 415                                        printk(KERN_DEBUG "%s: Receive error, Rx status %08x\n",
 416                                               dev->name, status);
 417                                dev->stats.rx_errors++; /* end of a packet.*/
 418                                if (pkt_len > 1518 ||
 419                                    (status & RxDescRunt))
 420                                        dev->stats.rx_length_errors++;
 421                                if (status & 0x0004)
 422                                        dev->stats.rx_frame_errors++;
 423                                if (status & 0x0002)
 424                                        dev->stats.rx_crc_errors++;
 425                                if (status & 0x0001)
 426                                        dev->stats.rx_fifo_errors++;
 427                        }
 428                } else {
 429                        struct sk_buff *skb;
 430
 431                        /* Check if the packet is long enough to accept without copying
 432                           to a minimally-sized skbuff. */
 433                        if (pkt_len < tulip_rx_copybreak &&
 434                            (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
 435                                skb_reserve(skb, 2);    /* 16 byte align the IP header */
 436                                pci_dma_sync_single_for_cpu(tp->pdev,
 437                                                            tp->rx_buffers[entry].mapping,
 438                                                            pkt_len, PCI_DMA_FROMDEVICE);
 439#if ! defined(__alpha__)
 440                                skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data,
 441                                                 pkt_len);
 442                                skb_put(skb, pkt_len);
 443#else
 444                                memcpy(skb_put(skb, pkt_len),
 445                                       tp->rx_buffers[entry].skb->data,
 446                                       pkt_len);
 447#endif
 448                                pci_dma_sync_single_for_device(tp->pdev,
 449                                                               tp->rx_buffers[entry].mapping,
 450                                                               pkt_len, PCI_DMA_FROMDEVICE);
 451                        } else {        /* Pass up the skb already on the Rx ring. */
 452                                char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
 453                                                     pkt_len);
 454
 455#ifndef final_version
 456                                if (tp->rx_buffers[entry].mapping !=
 457                                    le32_to_cpu(tp->rx_ring[entry].buffer1)) {
 458                                        dev_err(&dev->dev,
 459                                                "Internal fault: The skbuff addresses do not match in tulip_rx: %08x vs. %Lx %p / %p\n",
 460                                                le32_to_cpu(tp->rx_ring[entry].buffer1),
 461                                                (long long)tp->rx_buffers[entry].mapping,
 462                                                skb->head, temp);
 463                                }
 464#endif
 465
 466                                pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
 467                                                 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
 468
 469                                tp->rx_buffers[entry].skb = NULL;
 470                                tp->rx_buffers[entry].mapping = 0;
 471                        }
 472                        skb->protocol = eth_type_trans(skb, dev);
 473
 474                        netif_rx(skb);
 475
 476                        dev->stats.rx_packets++;
 477                        dev->stats.rx_bytes += pkt_len;
 478                }
 479                received++;
 480                entry = (++tp->cur_rx) % RX_RING_SIZE;
 481        }
 482        return received;
 483}
 484#endif  /* CONFIG_TULIP_NAPI */
 485
 486static inline unsigned int phy_interrupt (struct net_device *dev)
 487{
 488#ifdef __hppa__
 489        struct tulip_private *tp = netdev_priv(dev);
 490        int csr12 = ioread32(tp->base_addr + CSR12) & 0xff;
 491
 492        if (csr12 != tp->csr12_shadow) {
 493                /* ack interrupt */
 494                iowrite32(csr12 | 0x02, tp->base_addr + CSR12);
 495                tp->csr12_shadow = csr12;
 496                /* do link change stuff */
 497                spin_lock(&tp->lock);
 498                tulip_check_duplex(dev);
 499                spin_unlock(&tp->lock);
 500                /* clear irq ack bit */
 501                iowrite32(csr12 & ~0x02, tp->base_addr + CSR12);
 502
 503                return 1;
 504        }
 505#endif
 506
 507        return 0;
 508}
 509
 510/* The interrupt handler does all of the Rx thread work and cleans up
 511   after the Tx thread. */
 512irqreturn_t tulip_interrupt(int irq, void *dev_instance)
 513{
 514        struct net_device *dev = (struct net_device *)dev_instance;
 515        struct tulip_private *tp = netdev_priv(dev);
 516        void __iomem *ioaddr = tp->base_addr;
 517        int csr5;
 518        int missed;
 519        int rx = 0;
 520        int tx = 0;
 521        int oi = 0;
 522        int maxrx = RX_RING_SIZE;
 523        int maxtx = TX_RING_SIZE;
 524        int maxoi = TX_RING_SIZE;
 525#ifdef CONFIG_TULIP_NAPI
 526        int rxd = 0;
 527#else
 528        int entry;
 529#endif
 530        unsigned int work_count = tulip_max_interrupt_work;
 531        unsigned int handled = 0;
 532
 533        /* Let's see whether the interrupt really is for us */
 534        csr5 = ioread32(ioaddr + CSR5);
 535
 536        if (tp->flags & HAS_PHY_IRQ)
 537                handled = phy_interrupt (dev);
 538
 539        if ((csr5 & (NormalIntr|AbnormalIntr)) == 0)
 540                return IRQ_RETVAL(handled);
 541
 542        tp->nir++;
 543
 544        do {
 545
 546#ifdef CONFIG_TULIP_NAPI
 547
 548                if (!rxd && (csr5 & (RxIntr | RxNoBuf))) {
 549                        rxd++;
 550                        /* Mask RX intrs and add the device to poll list. */
 551                        iowrite32(tulip_tbl[tp->chip_id].valid_intrs&~RxPollInt, ioaddr + CSR7);
 552                        napi_schedule(&tp->napi);
 553
 554                        if (!(csr5&~(AbnormalIntr|NormalIntr|RxPollInt|TPLnkPass)))
 555                               break;
 556                }
 557
 558               /* Acknowledge the interrupt sources we handle here ASAP
 559                  the poll function does Rx and RxNoBuf acking */
 560
 561                iowrite32(csr5 & 0x0001ff3f, ioaddr + CSR5);
 562
 563#else
 564                /* Acknowledge all of the current interrupt sources ASAP. */
 565                iowrite32(csr5 & 0x0001ffff, ioaddr + CSR5);
 566
 567
 568                if (csr5 & (RxIntr | RxNoBuf)) {
 569                                rx += tulip_rx(dev);
 570                        tulip_refill_rx(dev);
 571                }
 572
 573#endif /*  CONFIG_TULIP_NAPI */
 574
 575                if (tulip_debug > 4)
 576                        printk(KERN_DEBUG "%s: interrupt  csr5=%#8.8x new csr5=%#8.8x\n",
 577                               dev->name, csr5, ioread32(ioaddr + CSR5));
 578
 579
 580                if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) {
 581                        unsigned int dirty_tx;
 582
 583                        spin_lock(&tp->lock);
 584
 585                        for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0;
 586                                 dirty_tx++) {
 587                                int entry = dirty_tx % TX_RING_SIZE;
 588                                int status = le32_to_cpu(tp->tx_ring[entry].status);
 589
 590                                if (status < 0)
 591                                        break;                  /* It still has not been Txed */
 592
 593                                /* Check for Rx filter setup frames. */
 594                                if (tp->tx_buffers[entry].skb == NULL) {
 595                                        /* test because dummy frames not mapped */
 596                                        if (tp->tx_buffers[entry].mapping)
 597                                                pci_unmap_single(tp->pdev,
 598                                                         tp->tx_buffers[entry].mapping,
 599                                                         sizeof(tp->setup_frame),
 600                                                         PCI_DMA_TODEVICE);
 601                                        continue;
 602                                }
 603
 604                                if (status & 0x8000) {
 605                                        /* There was an major error, log it. */
 606#ifndef final_version
 607                                        if (tulip_debug > 1)
 608                                                printk(KERN_DEBUG "%s: Transmit error, Tx status %08x\n",
 609                                                       dev->name, status);
 610#endif
 611                                        dev->stats.tx_errors++;
 612                                        if (status & 0x4104)
 613                                                dev->stats.tx_aborted_errors++;
 614                                        if (status & 0x0C00)
 615                                                dev->stats.tx_carrier_errors++;
 616                                        if (status & 0x0200)
 617                                                dev->stats.tx_window_errors++;
 618                                        if (status & 0x0002)
 619                                                dev->stats.tx_fifo_errors++;
 620                                        if ((status & 0x0080) && tp->full_duplex == 0)
 621                                                dev->stats.tx_heartbeat_errors++;
 622                                } else {
 623                                        dev->stats.tx_bytes +=
 624                                                tp->tx_buffers[entry].skb->len;
 625                                        dev->stats.collisions += (status >> 3) & 15;
 626                                        dev->stats.tx_packets++;
 627                                }
 628
 629                                pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
 630                                                 tp->tx_buffers[entry].skb->len,
 631                                                 PCI_DMA_TODEVICE);
 632
 633                                /* Free the original skb. */
 634                                dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
 635                                tp->tx_buffers[entry].skb = NULL;
 636                                tp->tx_buffers[entry].mapping = 0;
 637                                tx++;
 638                        }
 639
 640#ifndef final_version
 641                        if (tp->cur_tx - dirty_tx > TX_RING_SIZE) {
 642                                dev_err(&dev->dev,
 643                                        "Out-of-sync dirty pointer, %d vs. %d\n",
 644                                        dirty_tx, tp->cur_tx);
 645                                dirty_tx += TX_RING_SIZE;
 646                        }
 647#endif
 648
 649                        if (tp->cur_tx - dirty_tx < TX_RING_SIZE - 2)
 650                                netif_wake_queue(dev);
 651
 652                        tp->dirty_tx = dirty_tx;
 653                        if (csr5 & TxDied) {
 654                                if (tulip_debug > 2)
 655                                        dev_warn(&dev->dev,
 656                                                 "The transmitter stopped.  CSR5 is %x, CSR6 %x, new CSR6 %x\n",
 657                                                 csr5, ioread32(ioaddr + CSR6),
 658                                                 tp->csr6);
 659                                tulip_restart_rxtx(tp);
 660                        }
 661                        spin_unlock(&tp->lock);
 662                }
 663
 664                /* Log errors. */
 665                if (csr5 & AbnormalIntr) {      /* Abnormal error summary bit. */
 666                        if (csr5 == 0xffffffff)
 667                                break;
 668                        if (csr5 & TxJabber)
 669                                dev->stats.tx_errors++;
 670                        if (csr5 & TxFIFOUnderflow) {
 671                                if ((tp->csr6 & 0xC000) != 0xC000)
 672                                        tp->csr6 += 0x4000;     /* Bump up the Tx threshold */
 673                                else
 674                                        tp->csr6 |= 0x00200000;  /* Store-n-forward. */
 675                                /* Restart the transmit process. */
 676                                tulip_restart_rxtx(tp);
 677                                iowrite32(0, ioaddr + CSR1);
 678                        }
 679                        if (csr5 & (RxDied | RxNoBuf)) {
 680                                if (tp->flags & COMET_MAC_ADDR) {
 681                                        iowrite32(tp->mc_filter[0], ioaddr + 0xAC);
 682                                        iowrite32(tp->mc_filter[1], ioaddr + 0xB0);
 683                                }
 684                        }
 685                        if (csr5 & RxDied) {            /* Missed a Rx frame. */
 686                                dev->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
 687                                dev->stats.rx_errors++;
 688                                tulip_start_rxtx(tp);
 689                        }
 690                        /*
 691                         * NB: t21142_lnk_change() does a del_timer_sync(), so be careful if this
 692                         * call is ever done under the spinlock
 693                         */
 694                        if (csr5 & (TPLnkPass | TPLnkFail | 0x08000000)) {
 695                                if (tp->link_change)
 696                                        (tp->link_change)(dev, csr5);
 697                        }
 698                        if (csr5 & SystemError) {
 699                                int error = (csr5 >> 23) & 7;
 700                                /* oops, we hit a PCI error.  The code produced corresponds
 701                                 * to the reason:
 702                                 *  0 - parity error
 703                                 *  1 - master abort
 704                                 *  2 - target abort
 705                                 * Note that on parity error, we should do a software reset
 706                                 * of the chip to get it back into a sane state (according
 707                                 * to the 21142/3 docs that is).
 708                                 *   -- rmk
 709                                 */
 710                                dev_err(&dev->dev,
 711                                        "(%lu) System Error occurred (%d)\n",
 712                                        tp->nir, error);
 713                        }
 714                        /* Clear all error sources, included undocumented ones! */
 715                        iowrite32(0x0800f7ba, ioaddr + CSR5);
 716                        oi++;
 717                }
 718                if (csr5 & TimerInt) {
 719
 720                        if (tulip_debug > 2)
 721                                dev_err(&dev->dev,
 722                                        "Re-enabling interrupts, %08x\n",
 723                                        csr5);
 724                        iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
 725                        tp->ttimer = 0;
 726                        oi++;
 727                }
 728                if (tx > maxtx || rx > maxrx || oi > maxoi) {
 729                        if (tulip_debug > 1)
 730                                dev_warn(&dev->dev, "Too much work during an interrupt, csr5=0x%08x. (%lu) (%d,%d,%d)\n",
 731                                         csr5, tp->nir, tx, rx, oi);
 732
 733                       /* Acknowledge all interrupt sources. */
 734                        iowrite32(0x8001ffff, ioaddr + CSR5);
 735                        if (tp->flags & HAS_INTR_MITIGATION) {
 736                     /* Josip Loncaric at ICASE did extensive experimentation
 737                        to develop a good interrupt mitigation setting.*/
 738                                iowrite32(0x8b240000, ioaddr + CSR11);
 739                        } else if (tp->chip_id == LC82C168) {
 740                                /* the LC82C168 doesn't have a hw timer.*/
 741                                iowrite32(0x00, ioaddr + CSR7);
 742                                mod_timer(&tp->timer, RUN_AT(HZ/50));
 743                        } else {
 744                          /* Mask all interrupting sources, set timer to
 745                                re-enable. */
 746                                iowrite32(((~csr5) & 0x0001ebef) | AbnormalIntr | TimerInt, ioaddr + CSR7);
 747                                iowrite32(0x0012, ioaddr + CSR11);
 748                        }
 749                        break;
 750                }
 751
 752                work_count--;
 753                if (work_count == 0)
 754                        break;
 755
 756                csr5 = ioread32(ioaddr + CSR5);
 757
 758#ifdef CONFIG_TULIP_NAPI
 759                if (rxd)
 760                        csr5 &= ~RxPollInt;
 761        } while ((csr5 & (TxNoBuf |
 762                          TxDied |
 763                          TxIntr |
 764                          TimerInt |
 765                          /* Abnormal intr. */
 766                          RxDied |
 767                          TxFIFOUnderflow |
 768                          TxJabber |
 769                          TPLnkFail |
 770                          SystemError )) != 0);
 771#else
 772        } while ((csr5 & (NormalIntr|AbnormalIntr)) != 0);
 773
 774        tulip_refill_rx(dev);
 775
 776        /* check if the card is in suspend mode */
 777        entry = tp->dirty_rx % RX_RING_SIZE;
 778        if (tp->rx_buffers[entry].skb == NULL) {
 779                if (tulip_debug > 1)
 780                        dev_warn(&dev->dev,
 781                                 "in rx suspend mode: (%lu) (tp->cur_rx = %u, ttimer = %d, rx = %d) go/stay in suspend mode\n",
 782                                 tp->nir, tp->cur_rx, tp->ttimer, rx);
 783                if (tp->chip_id == LC82C168) {
 784                        iowrite32(0x00, ioaddr + CSR7);
 785                        mod_timer(&tp->timer, RUN_AT(HZ/50));
 786                } else {
 787                        if (tp->ttimer == 0 || (ioread32(ioaddr + CSR11) & 0xffff) == 0) {
 788                                if (tulip_debug > 1)
 789                                        dev_warn(&dev->dev,
 790                                                 "in rx suspend mode: (%lu) set timer\n",
 791                                                 tp->nir);
 792                                iowrite32(tulip_tbl[tp->chip_id].valid_intrs | TimerInt,
 793                                        ioaddr + CSR7);
 794                                iowrite32(TimerInt, ioaddr + CSR5);
 795                                iowrite32(12, ioaddr + CSR11);
 796                                tp->ttimer = 1;
 797                        }
 798                }
 799        }
 800#endif /* CONFIG_TULIP_NAPI */
 801
 802        if ((missed = ioread32(ioaddr + CSR8) & 0x1ffff)) {
 803                dev->stats.rx_dropped += missed & 0x10000 ? 0x10000 : missed;
 804        }
 805
 806        if (tulip_debug > 4)
 807                printk(KERN_DEBUG "%s: exiting interrupt, csr5=%#04x\n",
 808                       dev->name, ioread32(ioaddr + CSR5));
 809
 810        return IRQ_HANDLED;
 811}
 812