1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#include <linux/pci.h>
17#include "tulip.h"
18#include <linux/etherdevice.h>
19
20int tulip_rx_copybreak;
21unsigned int tulip_max_interrupt_work;
22
23#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
24#define MIT_SIZE 15
25#define MIT_TABLE 15
26
27static unsigned int mit_table[MIT_SIZE+1] =
28{
29
30
31
32
33
34
35
36
37
38
39
40
41 0x0,
42 0x80150000,
43 0x80150000,
44 0x80270000,
45 0x80370000,
46 0x80490000,
47 0x80590000,
48 0x80690000,
49 0x807B0000,
50 0x808B0000,
51 0x809D0000,
52 0x80AD0000,
53 0x80BD0000,
54 0x80CF0000,
55 0x80DF0000,
56
57 0x80F10000
58};
59#endif
60
61
62int tulip_refill_rx(struct net_device *dev)
63{
64 struct tulip_private *tp = netdev_priv(dev);
65 int entry;
66 int refilled = 0;
67
68
69 for (; tp->cur_rx - tp->dirty_rx > 0; tp->dirty_rx++) {
70 entry = tp->dirty_rx % RX_RING_SIZE;
71 if (tp->rx_buffers[entry].skb == NULL) {
72 struct sk_buff *skb;
73 dma_addr_t mapping;
74
75 skb = tp->rx_buffers[entry].skb = dev_alloc_skb(PKT_BUF_SZ);
76 if (skb == NULL)
77 break;
78
79 mapping = pci_map_single(tp->pdev, skb->data, PKT_BUF_SZ,
80 PCI_DMA_FROMDEVICE);
81 tp->rx_buffers[entry].mapping = mapping;
82
83 skb->dev = dev;
84 tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping);
85 refilled++;
86 }
87 tp->rx_ring[entry].status = cpu_to_le32(DescOwned);
88 }
89 if(tp->chip_id == LC82C168) {
90 if(((ioread32(tp->base_addr + CSR5)>>17)&0x07) == 4) {
91
92
93
94 iowrite32(0x01, tp->base_addr + CSR2);
95 }
96 }
97 return refilled;
98}
99
100#ifdef CONFIG_TULIP_NAPI
101
102void oom_timer(unsigned long data)
103{
104 struct net_device *dev = (struct net_device *)data;
105 struct tulip_private *tp = netdev_priv(dev);
106 napi_schedule(&tp->napi);
107}
108
109int tulip_poll(struct napi_struct *napi, int budget)
110{
111 struct tulip_private *tp = container_of(napi, struct tulip_private, napi);
112 struct net_device *dev = tp->dev;
113 int entry = tp->cur_rx % RX_RING_SIZE;
114 int work_done = 0;
115#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
116 int received = 0;
117#endif
118
119#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
120
121
122
123
124 if (budget >=RX_RING_SIZE) budget--;
125#endif
126
127 if (tulip_debug > 4)
128 printk(KERN_DEBUG " In tulip_rx(), entry %d %08x\n",
129 entry, tp->rx_ring[entry].status);
130
131 do {
132 if (ioread32(tp->base_addr + CSR5) == 0xffffffff) {
133 printk(KERN_DEBUG " In tulip_poll(), hardware disappeared\n");
134 break;
135 }
136
137 iowrite32((RxIntr | RxNoBuf), tp->base_addr + CSR5);
138
139
140
141 while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
142 s32 status = le32_to_cpu(tp->rx_ring[entry].status);
143 short pkt_len;
144
145 if (tp->dirty_rx + RX_RING_SIZE == tp->cur_rx)
146 break;
147
148 if (tulip_debug > 5)
149 printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %08x\n",
150 dev->name, entry, status);
151
152 if (++work_done >= budget)
153 goto not_done;
154
155
156
157
158
159
160 pkt_len = ((status >> 16) & 0x7ff) - 4;
161
162
163
164
165
166
167
168 if ((status & (RxLengthOver2047 |
169 RxDescCRCError |
170 RxDescCollisionSeen |
171 RxDescRunt |
172 RxDescDescErr |
173 RxWholePkt)) != RxWholePkt ||
174 pkt_len > 1518) {
175 if ((status & (RxLengthOver2047 |
176 RxWholePkt)) != RxWholePkt) {
177
178 if ((status & 0xffff) != 0x7fff) {
179 if (tulip_debug > 1)
180 dev_warn(&dev->dev,
181 "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
182 status);
183 dev->stats.rx_length_errors++;
184 }
185 } else {
186
187 if (tulip_debug > 2)
188 printk(KERN_DEBUG "%s: Receive error, Rx status %08x\n",
189 dev->name, status);
190 dev->stats.rx_errors++;
191 if (pkt_len > 1518 ||
192 (status & RxDescRunt))
193 dev->stats.rx_length_errors++;
194
195 if (status & 0x0004)
196 dev->stats.rx_frame_errors++;
197 if (status & 0x0002)
198 dev->stats.rx_crc_errors++;
199 if (status & 0x0001)
200 dev->stats.rx_fifo_errors++;
201 }
202 } else {
203 struct sk_buff *skb;
204
205
206
207 if (pkt_len < tulip_rx_copybreak &&
208 (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
209 skb_reserve(skb, 2);
210 pci_dma_sync_single_for_cpu(tp->pdev,
211 tp->rx_buffers[entry].mapping,
212 pkt_len, PCI_DMA_FROMDEVICE);
213#if ! defined(__alpha__)
214 skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data,
215 pkt_len);
216 skb_put(skb, pkt_len);
217#else
218 memcpy(skb_put(skb, pkt_len),
219 tp->rx_buffers[entry].skb->data,
220 pkt_len);
221#endif
222 pci_dma_sync_single_for_device(tp->pdev,
223 tp->rx_buffers[entry].mapping,
224 pkt_len, PCI_DMA_FROMDEVICE);
225 } else {
226 char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
227 pkt_len);
228
229#ifndef final_version
230 if (tp->rx_buffers[entry].mapping !=
231 le32_to_cpu(tp->rx_ring[entry].buffer1)) {
232 dev_err(&dev->dev,
233 "Internal fault: The skbuff addresses do not match in tulip_rx: %08x vs. %08llx %p / %p\n",
234 le32_to_cpu(tp->rx_ring[entry].buffer1),
235 (unsigned long long)tp->rx_buffers[entry].mapping,
236 skb->head, temp);
237 }
238#endif
239
240 pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
241 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
242
243 tp->rx_buffers[entry].skb = NULL;
244 tp->rx_buffers[entry].mapping = 0;
245 }
246 skb->protocol = eth_type_trans(skb, dev);
247
248 netif_receive_skb(skb);
249
250 dev->stats.rx_packets++;
251 dev->stats.rx_bytes += pkt_len;
252 }
253#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
254 received++;
255#endif
256
257 entry = (++tp->cur_rx) % RX_RING_SIZE;
258 if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/4)
259 tulip_refill_rx(dev);
260
261 }
262
263
264
265
266
267
268
269
270
271
272
273 } while ((ioread32(tp->base_addr + CSR5) & RxIntr));
274
275 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294 if( tp->flags & HAS_INTR_MITIGATION) {
295 if( received > 1 ) {
296 if( ! tp->mit_on ) {
297 tp->mit_on = 1;
298 iowrite32(mit_table[MIT_TABLE], tp->base_addr + CSR11);
299 }
300 }
301 else {
302 if( tp->mit_on ) {
303 tp->mit_on = 0;
304 iowrite32(0, tp->base_addr + CSR11);
305 }
306 }
307 }
308
309#endif
310
311 tulip_refill_rx(dev);
312
313
314 if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
315 goto oom;
316
317
318
319 napi_complete(napi);
320 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7);
321
322
323
324
325
326
327
328
329
330
331
332
333 return work_done;
334
335 not_done:
336 if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/2 ||
337 tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
338 tulip_refill_rx(dev);
339
340 if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
341 goto oom;
342
343 return work_done;
344
345 oom:
346
347
348 mod_timer(&tp->oom_timer, jiffies+1);
349
350
351
352
353
354
355 napi_complete(napi);
356
357 return work_done;
358}
359
360#else
361
362static int tulip_rx(struct net_device *dev)
363{
364 struct tulip_private *tp = netdev_priv(dev);
365 int entry = tp->cur_rx % RX_RING_SIZE;
366 int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx;
367 int received = 0;
368
369 if (tulip_debug > 4)
370 printk(KERN_DEBUG " In tulip_rx(), entry %d %08x\n",
371 entry, tp->rx_ring[entry].status);
372
373 while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
374 s32 status = le32_to_cpu(tp->rx_ring[entry].status);
375 short pkt_len;
376
377 if (tulip_debug > 5)
378 printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %08x\n",
379 dev->name, entry, status);
380 if (--rx_work_limit < 0)
381 break;
382
383
384
385
386
387
388 pkt_len = ((status >> 16) & 0x7ff) - 4;
389
390
391
392
393
394
395 if ((status & (RxLengthOver2047 |
396 RxDescCRCError |
397 RxDescCollisionSeen |
398 RxDescRunt |
399 RxDescDescErr |
400 RxWholePkt)) != RxWholePkt ||
401 pkt_len > 1518) {
402 if ((status & (RxLengthOver2047 |
403 RxWholePkt)) != RxWholePkt) {
404
405 if ((status & 0xffff) != 0x7fff) {
406 if (tulip_debug > 1)
407 dev_warn(&dev->dev,
408 "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
409 status);
410 dev->stats.rx_length_errors++;
411 }
412 } else {
413
414 if (tulip_debug > 2)
415 printk(KERN_DEBUG "%s: Receive error, Rx status %08x\n",
416 dev->name, status);
417 dev->stats.rx_errors++;
418 if (pkt_len > 1518 ||
419 (status & RxDescRunt))
420 dev->stats.rx_length_errors++;
421 if (status & 0x0004)
422 dev->stats.rx_frame_errors++;
423 if (status & 0x0002)
424 dev->stats.rx_crc_errors++;
425 if (status & 0x0001)
426 dev->stats.rx_fifo_errors++;
427 }
428 } else {
429 struct sk_buff *skb;
430
431
432
433 if (pkt_len < tulip_rx_copybreak &&
434 (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
435 skb_reserve(skb, 2);
436 pci_dma_sync_single_for_cpu(tp->pdev,
437 tp->rx_buffers[entry].mapping,
438 pkt_len, PCI_DMA_FROMDEVICE);
439#if ! defined(__alpha__)
440 skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data,
441 pkt_len);
442 skb_put(skb, pkt_len);
443#else
444 memcpy(skb_put(skb, pkt_len),
445 tp->rx_buffers[entry].skb->data,
446 pkt_len);
447#endif
448 pci_dma_sync_single_for_device(tp->pdev,
449 tp->rx_buffers[entry].mapping,
450 pkt_len, PCI_DMA_FROMDEVICE);
451 } else {
452 char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
453 pkt_len);
454
455#ifndef final_version
456 if (tp->rx_buffers[entry].mapping !=
457 le32_to_cpu(tp->rx_ring[entry].buffer1)) {
458 dev_err(&dev->dev,
459 "Internal fault: The skbuff addresses do not match in tulip_rx: %08x vs. %Lx %p / %p\n",
460 le32_to_cpu(tp->rx_ring[entry].buffer1),
461 (long long)tp->rx_buffers[entry].mapping,
462 skb->head, temp);
463 }
464#endif
465
466 pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
467 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
468
469 tp->rx_buffers[entry].skb = NULL;
470 tp->rx_buffers[entry].mapping = 0;
471 }
472 skb->protocol = eth_type_trans(skb, dev);
473
474 netif_rx(skb);
475
476 dev->stats.rx_packets++;
477 dev->stats.rx_bytes += pkt_len;
478 }
479 received++;
480 entry = (++tp->cur_rx) % RX_RING_SIZE;
481 }
482 return received;
483}
484#endif
485
486static inline unsigned int phy_interrupt (struct net_device *dev)
487{
488#ifdef __hppa__
489 struct tulip_private *tp = netdev_priv(dev);
490 int csr12 = ioread32(tp->base_addr + CSR12) & 0xff;
491
492 if (csr12 != tp->csr12_shadow) {
493
494 iowrite32(csr12 | 0x02, tp->base_addr + CSR12);
495 tp->csr12_shadow = csr12;
496
497 spin_lock(&tp->lock);
498 tulip_check_duplex(dev);
499 spin_unlock(&tp->lock);
500
501 iowrite32(csr12 & ~0x02, tp->base_addr + CSR12);
502
503 return 1;
504 }
505#endif
506
507 return 0;
508}
509
510
511
512irqreturn_t tulip_interrupt(int irq, void *dev_instance)
513{
514 struct net_device *dev = (struct net_device *)dev_instance;
515 struct tulip_private *tp = netdev_priv(dev);
516 void __iomem *ioaddr = tp->base_addr;
517 int csr5;
518 int missed;
519 int rx = 0;
520 int tx = 0;
521 int oi = 0;
522 int maxrx = RX_RING_SIZE;
523 int maxtx = TX_RING_SIZE;
524 int maxoi = TX_RING_SIZE;
525#ifdef CONFIG_TULIP_NAPI
526 int rxd = 0;
527#else
528 int entry;
529#endif
530 unsigned int work_count = tulip_max_interrupt_work;
531 unsigned int handled = 0;
532
533
534 csr5 = ioread32(ioaddr + CSR5);
535
536 if (tp->flags & HAS_PHY_IRQ)
537 handled = phy_interrupt (dev);
538
539 if ((csr5 & (NormalIntr|AbnormalIntr)) == 0)
540 return IRQ_RETVAL(handled);
541
542 tp->nir++;
543
544 do {
545
546#ifdef CONFIG_TULIP_NAPI
547
548 if (!rxd && (csr5 & (RxIntr | RxNoBuf))) {
549 rxd++;
550
551 iowrite32(tulip_tbl[tp->chip_id].valid_intrs&~RxPollInt, ioaddr + CSR7);
552 napi_schedule(&tp->napi);
553
554 if (!(csr5&~(AbnormalIntr|NormalIntr|RxPollInt|TPLnkPass)))
555 break;
556 }
557
558
559
560
561 iowrite32(csr5 & 0x0001ff3f, ioaddr + CSR5);
562
563#else
564
565 iowrite32(csr5 & 0x0001ffff, ioaddr + CSR5);
566
567
568 if (csr5 & (RxIntr | RxNoBuf)) {
569 rx += tulip_rx(dev);
570 tulip_refill_rx(dev);
571 }
572
573#endif
574
575 if (tulip_debug > 4)
576 printk(KERN_DEBUG "%s: interrupt csr5=%#8.8x new csr5=%#8.8x\n",
577 dev->name, csr5, ioread32(ioaddr + CSR5));
578
579
580 if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) {
581 unsigned int dirty_tx;
582
583 spin_lock(&tp->lock);
584
585 for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0;
586 dirty_tx++) {
587 int entry = dirty_tx % TX_RING_SIZE;
588 int status = le32_to_cpu(tp->tx_ring[entry].status);
589
590 if (status < 0)
591 break;
592
593
594 if (tp->tx_buffers[entry].skb == NULL) {
595
596 if (tp->tx_buffers[entry].mapping)
597 pci_unmap_single(tp->pdev,
598 tp->tx_buffers[entry].mapping,
599 sizeof(tp->setup_frame),
600 PCI_DMA_TODEVICE);
601 continue;
602 }
603
604 if (status & 0x8000) {
605
606#ifndef final_version
607 if (tulip_debug > 1)
608 printk(KERN_DEBUG "%s: Transmit error, Tx status %08x\n",
609 dev->name, status);
610#endif
611 dev->stats.tx_errors++;
612 if (status & 0x4104)
613 dev->stats.tx_aborted_errors++;
614 if (status & 0x0C00)
615 dev->stats.tx_carrier_errors++;
616 if (status & 0x0200)
617 dev->stats.tx_window_errors++;
618 if (status & 0x0002)
619 dev->stats.tx_fifo_errors++;
620 if ((status & 0x0080) && tp->full_duplex == 0)
621 dev->stats.tx_heartbeat_errors++;
622 } else {
623 dev->stats.tx_bytes +=
624 tp->tx_buffers[entry].skb->len;
625 dev->stats.collisions += (status >> 3) & 15;
626 dev->stats.tx_packets++;
627 }
628
629 pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
630 tp->tx_buffers[entry].skb->len,
631 PCI_DMA_TODEVICE);
632
633
634 dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
635 tp->tx_buffers[entry].skb = NULL;
636 tp->tx_buffers[entry].mapping = 0;
637 tx++;
638 }
639
640#ifndef final_version
641 if (tp->cur_tx - dirty_tx > TX_RING_SIZE) {
642 dev_err(&dev->dev,
643 "Out-of-sync dirty pointer, %d vs. %d\n",
644 dirty_tx, tp->cur_tx);
645 dirty_tx += TX_RING_SIZE;
646 }
647#endif
648
649 if (tp->cur_tx - dirty_tx < TX_RING_SIZE - 2)
650 netif_wake_queue(dev);
651
652 tp->dirty_tx = dirty_tx;
653 if (csr5 & TxDied) {
654 if (tulip_debug > 2)
655 dev_warn(&dev->dev,
656 "The transmitter stopped. CSR5 is %x, CSR6 %x, new CSR6 %x\n",
657 csr5, ioread32(ioaddr + CSR6),
658 tp->csr6);
659 tulip_restart_rxtx(tp);
660 }
661 spin_unlock(&tp->lock);
662 }
663
664
665 if (csr5 & AbnormalIntr) {
666 if (csr5 == 0xffffffff)
667 break;
668 if (csr5 & TxJabber)
669 dev->stats.tx_errors++;
670 if (csr5 & TxFIFOUnderflow) {
671 if ((tp->csr6 & 0xC000) != 0xC000)
672 tp->csr6 += 0x4000;
673 else
674 tp->csr6 |= 0x00200000;
675
676 tulip_restart_rxtx(tp);
677 iowrite32(0, ioaddr + CSR1);
678 }
679 if (csr5 & (RxDied | RxNoBuf)) {
680 if (tp->flags & COMET_MAC_ADDR) {
681 iowrite32(tp->mc_filter[0], ioaddr + 0xAC);
682 iowrite32(tp->mc_filter[1], ioaddr + 0xB0);
683 }
684 }
685 if (csr5 & RxDied) {
686 dev->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
687 dev->stats.rx_errors++;
688 tulip_start_rxtx(tp);
689 }
690
691
692
693
694 if (csr5 & (TPLnkPass | TPLnkFail | 0x08000000)) {
695 if (tp->link_change)
696 (tp->link_change)(dev, csr5);
697 }
698 if (csr5 & SystemError) {
699 int error = (csr5 >> 23) & 7;
700
701
702
703
704
705
706
707
708
709
710 dev_err(&dev->dev,
711 "(%lu) System Error occurred (%d)\n",
712 tp->nir, error);
713 }
714
715 iowrite32(0x0800f7ba, ioaddr + CSR5);
716 oi++;
717 }
718 if (csr5 & TimerInt) {
719
720 if (tulip_debug > 2)
721 dev_err(&dev->dev,
722 "Re-enabling interrupts, %08x\n",
723 csr5);
724 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
725 tp->ttimer = 0;
726 oi++;
727 }
728 if (tx > maxtx || rx > maxrx || oi > maxoi) {
729 if (tulip_debug > 1)
730 dev_warn(&dev->dev, "Too much work during an interrupt, csr5=0x%08x. (%lu) (%d,%d,%d)\n",
731 csr5, tp->nir, tx, rx, oi);
732
733
734 iowrite32(0x8001ffff, ioaddr + CSR5);
735 if (tp->flags & HAS_INTR_MITIGATION) {
736
737
738 iowrite32(0x8b240000, ioaddr + CSR11);
739 } else if (tp->chip_id == LC82C168) {
740
741 iowrite32(0x00, ioaddr + CSR7);
742 mod_timer(&tp->timer, RUN_AT(HZ/50));
743 } else {
744
745
746 iowrite32(((~csr5) & 0x0001ebef) | AbnormalIntr | TimerInt, ioaddr + CSR7);
747 iowrite32(0x0012, ioaddr + CSR11);
748 }
749 break;
750 }
751
752 work_count--;
753 if (work_count == 0)
754 break;
755
756 csr5 = ioread32(ioaddr + CSR5);
757
758#ifdef CONFIG_TULIP_NAPI
759 if (rxd)
760 csr5 &= ~RxPollInt;
761 } while ((csr5 & (TxNoBuf |
762 TxDied |
763 TxIntr |
764 TimerInt |
765
766 RxDied |
767 TxFIFOUnderflow |
768 TxJabber |
769 TPLnkFail |
770 SystemError )) != 0);
771#else
772 } while ((csr5 & (NormalIntr|AbnormalIntr)) != 0);
773
774 tulip_refill_rx(dev);
775
776
777 entry = tp->dirty_rx % RX_RING_SIZE;
778 if (tp->rx_buffers[entry].skb == NULL) {
779 if (tulip_debug > 1)
780 dev_warn(&dev->dev,
781 "in rx suspend mode: (%lu) (tp->cur_rx = %u, ttimer = %d, rx = %d) go/stay in suspend mode\n",
782 tp->nir, tp->cur_rx, tp->ttimer, rx);
783 if (tp->chip_id == LC82C168) {
784 iowrite32(0x00, ioaddr + CSR7);
785 mod_timer(&tp->timer, RUN_AT(HZ/50));
786 } else {
787 if (tp->ttimer == 0 || (ioread32(ioaddr + CSR11) & 0xffff) == 0) {
788 if (tulip_debug > 1)
789 dev_warn(&dev->dev,
790 "in rx suspend mode: (%lu) set timer\n",
791 tp->nir);
792 iowrite32(tulip_tbl[tp->chip_id].valid_intrs | TimerInt,
793 ioaddr + CSR7);
794 iowrite32(TimerInt, ioaddr + CSR5);
795 iowrite32(12, ioaddr + CSR11);
796 tp->ttimer = 1;
797 }
798 }
799 }
800#endif
801
802 if ((missed = ioread32(ioaddr + CSR8) & 0x1ffff)) {
803 dev->stats.rx_dropped += missed & 0x10000 ? 0x10000 : missed;
804 }
805
806 if (tulip_debug > 4)
807 printk(KERN_DEBUG "%s: exiting interrupt, csr5=%#04x\n",
808 dev->name, ioread32(ioaddr + CSR5));
809
810 return IRQ_HANDLED;
811}
812