1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/pci.h>
14#include "tulip.h"
15#include <linux/etherdevice.h>
16
17int tulip_rx_copybreak;
18unsigned int tulip_max_interrupt_work;
19
20#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
21#define MIT_SIZE 15
22#define MIT_TABLE 15
23
24static unsigned int mit_table[MIT_SIZE+1] =
25{
26
27
28
29
30
31
32
33
34
35
36
37
38 0x0,
39 0x80150000,
40 0x80150000,
41 0x80270000,
42 0x80370000,
43 0x80490000,
44 0x80590000,
45 0x80690000,
46 0x807B0000,
47 0x808B0000,
48 0x809D0000,
49 0x80AD0000,
50 0x80BD0000,
51 0x80CF0000,
52 0x80DF0000,
53
54 0x80F10000
55};
56#endif
57
58
59int tulip_refill_rx(struct net_device *dev)
60{
61 struct tulip_private *tp = netdev_priv(dev);
62 int entry;
63 int refilled = 0;
64
65
66 for (; tp->cur_rx - tp->dirty_rx > 0; tp->dirty_rx++) {
67 entry = tp->dirty_rx % RX_RING_SIZE;
68 if (tp->rx_buffers[entry].skb == NULL) {
69 struct sk_buff *skb;
70 dma_addr_t mapping;
71
72 skb = tp->rx_buffers[entry].skb =
73 netdev_alloc_skb(dev, PKT_BUF_SZ);
74 if (skb == NULL)
75 break;
76
77 mapping = pci_map_single(tp->pdev, skb->data, PKT_BUF_SZ,
78 PCI_DMA_FROMDEVICE);
79 if (dma_mapping_error(&tp->pdev->dev, mapping)) {
80 dev_kfree_skb(skb);
81 tp->rx_buffers[entry].skb = NULL;
82 break;
83 }
84
85 tp->rx_buffers[entry].mapping = mapping;
86
87 tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping);
88 refilled++;
89 }
90 tp->rx_ring[entry].status = cpu_to_le32(DescOwned);
91 }
92 if(tp->chip_id == LC82C168) {
93 if(((ioread32(tp->base_addr + CSR5)>>17)&0x07) == 4) {
94
95
96
97 iowrite32(0x01, tp->base_addr + CSR2);
98 }
99 }
100 return refilled;
101}
102
103#ifdef CONFIG_TULIP_NAPI
104
105void oom_timer(unsigned long data)
106{
107 struct net_device *dev = (struct net_device *)data;
108 struct tulip_private *tp = netdev_priv(dev);
109 napi_schedule(&tp->napi);
110}
111
112int tulip_poll(struct napi_struct *napi, int budget)
113{
114 struct tulip_private *tp = container_of(napi, struct tulip_private, napi);
115 struct net_device *dev = tp->dev;
116 int entry = tp->cur_rx % RX_RING_SIZE;
117 int work_done = 0;
118#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
119 int received = 0;
120#endif
121
122#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
123
124
125
126
127 if (budget >=RX_RING_SIZE) budget--;
128#endif
129
130 if (tulip_debug > 4)
131 netdev_dbg(dev, " In tulip_rx(), entry %d %08x\n",
132 entry, tp->rx_ring[entry].status);
133
134 do {
135 if (ioread32(tp->base_addr + CSR5) == 0xffffffff) {
136 netdev_dbg(dev, " In tulip_poll(), hardware disappeared\n");
137 break;
138 }
139
140 iowrite32((RxIntr | RxNoBuf), tp->base_addr + CSR5);
141
142
143
144 while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
145 s32 status = le32_to_cpu(tp->rx_ring[entry].status);
146 short pkt_len;
147
148 if (tp->dirty_rx + RX_RING_SIZE == tp->cur_rx)
149 break;
150
151 if (tulip_debug > 5)
152 netdev_dbg(dev, "In tulip_rx(), entry %d %08x\n",
153 entry, status);
154
155 if (++work_done >= budget)
156 goto not_done;
157
158
159
160
161
162
163 pkt_len = ((status >> 16) & 0x7ff) - 4;
164
165
166
167
168
169
170
171 if ((status & (RxLengthOver2047 |
172 RxDescCRCError |
173 RxDescCollisionSeen |
174 RxDescRunt |
175 RxDescDescErr |
176 RxWholePkt)) != RxWholePkt ||
177 pkt_len > 1518) {
178 if ((status & (RxLengthOver2047 |
179 RxWholePkt)) != RxWholePkt) {
180
181 if ((status & 0xffff) != 0x7fff) {
182 if (tulip_debug > 1)
183 dev_warn(&dev->dev,
184 "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
185 status);
186 dev->stats.rx_length_errors++;
187 }
188 } else {
189
190 if (tulip_debug > 2)
191 netdev_dbg(dev, "Receive error, Rx status %08x\n",
192 status);
193 dev->stats.rx_errors++;
194 if (pkt_len > 1518 ||
195 (status & RxDescRunt))
196 dev->stats.rx_length_errors++;
197
198 if (status & 0x0004)
199 dev->stats.rx_frame_errors++;
200 if (status & 0x0002)
201 dev->stats.rx_crc_errors++;
202 if (status & 0x0001)
203 dev->stats.rx_fifo_errors++;
204 }
205 } else {
206 struct sk_buff *skb;
207
208
209
210 if (pkt_len < tulip_rx_copybreak &&
211 (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
212 skb_reserve(skb, 2);
213 pci_dma_sync_single_for_cpu(tp->pdev,
214 tp->rx_buffers[entry].mapping,
215 pkt_len, PCI_DMA_FROMDEVICE);
216#if ! defined(__alpha__)
217 skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data,
218 pkt_len);
219 skb_put(skb, pkt_len);
220#else
221 memcpy(skb_put(skb, pkt_len),
222 tp->rx_buffers[entry].skb->data,
223 pkt_len);
224#endif
225 pci_dma_sync_single_for_device(tp->pdev,
226 tp->rx_buffers[entry].mapping,
227 pkt_len, PCI_DMA_FROMDEVICE);
228 } else {
229 char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
230 pkt_len);
231
232#ifndef final_version
233 if (tp->rx_buffers[entry].mapping !=
234 le32_to_cpu(tp->rx_ring[entry].buffer1)) {
235 dev_err(&dev->dev,
236 "Internal fault: The skbuff addresses do not match in tulip_rx: %08x vs. %08llx %p / %p\n",
237 le32_to_cpu(tp->rx_ring[entry].buffer1),
238 (unsigned long long)tp->rx_buffers[entry].mapping,
239 skb->head, temp);
240 }
241#endif
242
243 pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
244 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
245
246 tp->rx_buffers[entry].skb = NULL;
247 tp->rx_buffers[entry].mapping = 0;
248 }
249 skb->protocol = eth_type_trans(skb, dev);
250
251 netif_receive_skb(skb);
252
253 dev->stats.rx_packets++;
254 dev->stats.rx_bytes += pkt_len;
255 }
256#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
257 received++;
258#endif
259
260 entry = (++tp->cur_rx) % RX_RING_SIZE;
261 if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/4)
262 tulip_refill_rx(dev);
263
264 }
265
266
267
268
269
270
271
272
273
274
275
276 } while ((ioread32(tp->base_addr + CSR5) & RxIntr));
277
278 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297 if( tp->flags & HAS_INTR_MITIGATION) {
298 if( received > 1 ) {
299 if( ! tp->mit_on ) {
300 tp->mit_on = 1;
301 iowrite32(mit_table[MIT_TABLE], tp->base_addr + CSR11);
302 }
303 }
304 else {
305 if( tp->mit_on ) {
306 tp->mit_on = 0;
307 iowrite32(0, tp->base_addr + CSR11);
308 }
309 }
310 }
311
312#endif
313
314 tulip_refill_rx(dev);
315
316
317 if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
318 goto oom;
319
320
321
322 napi_complete(napi);
323 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7);
324
325
326
327
328
329
330
331
332
333
334
335
336 return work_done;
337
338 not_done:
339 if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/2 ||
340 tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
341 tulip_refill_rx(dev);
342
343 if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
344 goto oom;
345
346 return work_done;
347
348 oom:
349
350
351 mod_timer(&tp->oom_timer, jiffies+1);
352
353
354
355
356
357
358 napi_complete(napi);
359
360 return work_done;
361}
362
363#else
364
365static int tulip_rx(struct net_device *dev)
366{
367 struct tulip_private *tp = netdev_priv(dev);
368 int entry = tp->cur_rx % RX_RING_SIZE;
369 int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx;
370 int received = 0;
371
372 if (tulip_debug > 4)
373 netdev_dbg(dev, "In tulip_rx(), entry %d %08x\n",
374 entry, tp->rx_ring[entry].status);
375
376 while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
377 s32 status = le32_to_cpu(tp->rx_ring[entry].status);
378 short pkt_len;
379
380 if (tulip_debug > 5)
381 netdev_dbg(dev, "In tulip_rx(), entry %d %08x\n",
382 entry, status);
383 if (--rx_work_limit < 0)
384 break;
385
386
387
388
389
390
391 pkt_len = ((status >> 16) & 0x7ff) - 4;
392
393
394
395
396
397
398 if ((status & (RxLengthOver2047 |
399 RxDescCRCError |
400 RxDescCollisionSeen |
401 RxDescRunt |
402 RxDescDescErr |
403 RxWholePkt)) != RxWholePkt ||
404 pkt_len > 1518) {
405 if ((status & (RxLengthOver2047 |
406 RxWholePkt)) != RxWholePkt) {
407
408 if ((status & 0xffff) != 0x7fff) {
409 if (tulip_debug > 1)
410 netdev_warn(dev,
411 "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
412 status);
413 dev->stats.rx_length_errors++;
414 }
415 } else {
416
417 if (tulip_debug > 2)
418 netdev_dbg(dev, "Receive error, Rx status %08x\n",
419 status);
420 dev->stats.rx_errors++;
421 if (pkt_len > 1518 ||
422 (status & RxDescRunt))
423 dev->stats.rx_length_errors++;
424 if (status & 0x0004)
425 dev->stats.rx_frame_errors++;
426 if (status & 0x0002)
427 dev->stats.rx_crc_errors++;
428 if (status & 0x0001)
429 dev->stats.rx_fifo_errors++;
430 }
431 } else {
432 struct sk_buff *skb;
433
434
435
436 if (pkt_len < tulip_rx_copybreak &&
437 (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
438 skb_reserve(skb, 2);
439 pci_dma_sync_single_for_cpu(tp->pdev,
440 tp->rx_buffers[entry].mapping,
441 pkt_len, PCI_DMA_FROMDEVICE);
442#if ! defined(__alpha__)
443 skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data,
444 pkt_len);
445 skb_put(skb, pkt_len);
446#else
447 memcpy(skb_put(skb, pkt_len),
448 tp->rx_buffers[entry].skb->data,
449 pkt_len);
450#endif
451 pci_dma_sync_single_for_device(tp->pdev,
452 tp->rx_buffers[entry].mapping,
453 pkt_len, PCI_DMA_FROMDEVICE);
454 } else {
455 char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
456 pkt_len);
457
458#ifndef final_version
459 if (tp->rx_buffers[entry].mapping !=
460 le32_to_cpu(tp->rx_ring[entry].buffer1)) {
461 dev_err(&dev->dev,
462 "Internal fault: The skbuff addresses do not match in tulip_rx: %08x vs. %Lx %p / %p\n",
463 le32_to_cpu(tp->rx_ring[entry].buffer1),
464 (long long)tp->rx_buffers[entry].mapping,
465 skb->head, temp);
466 }
467#endif
468
469 pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
470 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
471
472 tp->rx_buffers[entry].skb = NULL;
473 tp->rx_buffers[entry].mapping = 0;
474 }
475 skb->protocol = eth_type_trans(skb, dev);
476
477 netif_rx(skb);
478
479 dev->stats.rx_packets++;
480 dev->stats.rx_bytes += pkt_len;
481 }
482 received++;
483 entry = (++tp->cur_rx) % RX_RING_SIZE;
484 }
485 return received;
486}
487#endif
488
489static inline unsigned int phy_interrupt (struct net_device *dev)
490{
491#ifdef __hppa__
492 struct tulip_private *tp = netdev_priv(dev);
493 int csr12 = ioread32(tp->base_addr + CSR12) & 0xff;
494
495 if (csr12 != tp->csr12_shadow) {
496
497 iowrite32(csr12 | 0x02, tp->base_addr + CSR12);
498 tp->csr12_shadow = csr12;
499
500 spin_lock(&tp->lock);
501 tulip_check_duplex(dev);
502 spin_unlock(&tp->lock);
503
504 iowrite32(csr12 & ~0x02, tp->base_addr + CSR12);
505
506 return 1;
507 }
508#endif
509
510 return 0;
511}
512
513
514
515irqreturn_t tulip_interrupt(int irq, void *dev_instance)
516{
517 struct net_device *dev = (struct net_device *)dev_instance;
518 struct tulip_private *tp = netdev_priv(dev);
519 void __iomem *ioaddr = tp->base_addr;
520 int csr5;
521 int missed;
522 int rx = 0;
523 int tx = 0;
524 int oi = 0;
525 int maxrx = RX_RING_SIZE;
526 int maxtx = TX_RING_SIZE;
527 int maxoi = TX_RING_SIZE;
528#ifdef CONFIG_TULIP_NAPI
529 int rxd = 0;
530#else
531 int entry;
532#endif
533 unsigned int work_count = tulip_max_interrupt_work;
534 unsigned int handled = 0;
535
536
537 csr5 = ioread32(ioaddr + CSR5);
538
539 if (tp->flags & HAS_PHY_IRQ)
540 handled = phy_interrupt (dev);
541
542 if ((csr5 & (NormalIntr|AbnormalIntr)) == 0)
543 return IRQ_RETVAL(handled);
544
545 tp->nir++;
546
547 do {
548
549#ifdef CONFIG_TULIP_NAPI
550
551 if (!rxd && (csr5 & (RxIntr | RxNoBuf))) {
552 rxd++;
553
554 iowrite32(tulip_tbl[tp->chip_id].valid_intrs&~RxPollInt, ioaddr + CSR7);
555 napi_schedule(&tp->napi);
556
557 if (!(csr5&~(AbnormalIntr|NormalIntr|RxPollInt|TPLnkPass)))
558 break;
559 }
560
561
562
563
564 iowrite32(csr5 & 0x0001ff3f, ioaddr + CSR5);
565
566#else
567
568 iowrite32(csr5 & 0x0001ffff, ioaddr + CSR5);
569
570
571 if (csr5 & (RxIntr | RxNoBuf)) {
572 rx += tulip_rx(dev);
573 tulip_refill_rx(dev);
574 }
575
576#endif
577
578 if (tulip_debug > 4)
579 netdev_dbg(dev, "interrupt csr5=%#8.8x new csr5=%#8.8x\n",
580 csr5, ioread32(ioaddr + CSR5));
581
582
583 if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) {
584 unsigned int dirty_tx;
585
586 spin_lock(&tp->lock);
587
588 for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0;
589 dirty_tx++) {
590 int entry = dirty_tx % TX_RING_SIZE;
591 int status = le32_to_cpu(tp->tx_ring[entry].status);
592
593 if (status < 0)
594 break;
595
596
597 if (tp->tx_buffers[entry].skb == NULL) {
598
599 if (tp->tx_buffers[entry].mapping)
600 pci_unmap_single(tp->pdev,
601 tp->tx_buffers[entry].mapping,
602 sizeof(tp->setup_frame),
603 PCI_DMA_TODEVICE);
604 continue;
605 }
606
607 if (status & 0x8000) {
608
609#ifndef final_version
610 if (tulip_debug > 1)
611 netdev_dbg(dev, "Transmit error, Tx status %08x\n",
612 status);
613#endif
614 dev->stats.tx_errors++;
615 if (status & 0x4104)
616 dev->stats.tx_aborted_errors++;
617 if (status & 0x0C00)
618 dev->stats.tx_carrier_errors++;
619 if (status & 0x0200)
620 dev->stats.tx_window_errors++;
621 if (status & 0x0002)
622 dev->stats.tx_fifo_errors++;
623 if ((status & 0x0080) && tp->full_duplex == 0)
624 dev->stats.tx_heartbeat_errors++;
625 } else {
626 dev->stats.tx_bytes +=
627 tp->tx_buffers[entry].skb->len;
628 dev->stats.collisions += (status >> 3) & 15;
629 dev->stats.tx_packets++;
630 }
631
632 pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
633 tp->tx_buffers[entry].skb->len,
634 PCI_DMA_TODEVICE);
635
636
637 dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
638 tp->tx_buffers[entry].skb = NULL;
639 tp->tx_buffers[entry].mapping = 0;
640 tx++;
641 }
642
643#ifndef final_version
644 if (tp->cur_tx - dirty_tx > TX_RING_SIZE) {
645 dev_err(&dev->dev,
646 "Out-of-sync dirty pointer, %d vs. %d\n",
647 dirty_tx, tp->cur_tx);
648 dirty_tx += TX_RING_SIZE;
649 }
650#endif
651
652 if (tp->cur_tx - dirty_tx < TX_RING_SIZE - 2)
653 netif_wake_queue(dev);
654
655 tp->dirty_tx = dirty_tx;
656 if (csr5 & TxDied) {
657 if (tulip_debug > 2)
658 dev_warn(&dev->dev,
659 "The transmitter stopped. CSR5 is %x, CSR6 %x, new CSR6 %x\n",
660 csr5, ioread32(ioaddr + CSR6),
661 tp->csr6);
662 tulip_restart_rxtx(tp);
663 }
664 spin_unlock(&tp->lock);
665 }
666
667
668 if (csr5 & AbnormalIntr) {
669 if (csr5 == 0xffffffff)
670 break;
671 if (csr5 & TxJabber)
672 dev->stats.tx_errors++;
673 if (csr5 & TxFIFOUnderflow) {
674 if ((tp->csr6 & 0xC000) != 0xC000)
675 tp->csr6 += 0x4000;
676 else
677 tp->csr6 |= 0x00200000;
678
679 tulip_restart_rxtx(tp);
680 iowrite32(0, ioaddr + CSR1);
681 }
682 if (csr5 & (RxDied | RxNoBuf)) {
683 if (tp->flags & COMET_MAC_ADDR) {
684 iowrite32(tp->mc_filter[0], ioaddr + 0xAC);
685 iowrite32(tp->mc_filter[1], ioaddr + 0xB0);
686 }
687 }
688 if (csr5 & RxDied) {
689 dev->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
690 dev->stats.rx_errors++;
691 tulip_start_rxtx(tp);
692 }
693
694
695
696
697 if (csr5 & (TPLnkPass | TPLnkFail | 0x08000000)) {
698 if (tp->link_change)
699 (tp->link_change)(dev, csr5);
700 }
701 if (csr5 & SystemError) {
702 int error = (csr5 >> 23) & 7;
703
704
705
706
707
708
709
710
711
712
713 dev_err(&dev->dev,
714 "(%lu) System Error occurred (%d)\n",
715 tp->nir, error);
716 }
717
718 iowrite32(0x0800f7ba, ioaddr + CSR5);
719 oi++;
720 }
721 if (csr5 & TimerInt) {
722
723 if (tulip_debug > 2)
724 dev_err(&dev->dev,
725 "Re-enabling interrupts, %08x\n",
726 csr5);
727 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
728 tp->ttimer = 0;
729 oi++;
730 }
731 if (tx > maxtx || rx > maxrx || oi > maxoi) {
732 if (tulip_debug > 1)
733 dev_warn(&dev->dev, "Too much work during an interrupt, csr5=0x%08x. (%lu) (%d,%d,%d)\n",
734 csr5, tp->nir, tx, rx, oi);
735
736
737 iowrite32(0x8001ffff, ioaddr + CSR5);
738 if (tp->flags & HAS_INTR_MITIGATION) {
739
740
741 iowrite32(0x8b240000, ioaddr + CSR11);
742 } else if (tp->chip_id == LC82C168) {
743
744 iowrite32(0x00, ioaddr + CSR7);
745 mod_timer(&tp->timer, RUN_AT(HZ/50));
746 } else {
747
748
749 iowrite32(((~csr5) & 0x0001ebef) | AbnormalIntr | TimerInt, ioaddr + CSR7);
750 iowrite32(0x0012, ioaddr + CSR11);
751 }
752 break;
753 }
754
755 work_count--;
756 if (work_count == 0)
757 break;
758
759 csr5 = ioread32(ioaddr + CSR5);
760
761#ifdef CONFIG_TULIP_NAPI
762 if (rxd)
763 csr5 &= ~RxPollInt;
764 } while ((csr5 & (TxNoBuf |
765 TxDied |
766 TxIntr |
767 TimerInt |
768
769 RxDied |
770 TxFIFOUnderflow |
771 TxJabber |
772 TPLnkFail |
773 SystemError )) != 0);
774#else
775 } while ((csr5 & (NormalIntr|AbnormalIntr)) != 0);
776
777 tulip_refill_rx(dev);
778
779
780 entry = tp->dirty_rx % RX_RING_SIZE;
781 if (tp->rx_buffers[entry].skb == NULL) {
782 if (tulip_debug > 1)
783 dev_warn(&dev->dev,
784 "in rx suspend mode: (%lu) (tp->cur_rx = %u, ttimer = %d, rx = %d) go/stay in suspend mode\n",
785 tp->nir, tp->cur_rx, tp->ttimer, rx);
786 if (tp->chip_id == LC82C168) {
787 iowrite32(0x00, ioaddr + CSR7);
788 mod_timer(&tp->timer, RUN_AT(HZ/50));
789 } else {
790 if (tp->ttimer == 0 || (ioread32(ioaddr + CSR11) & 0xffff) == 0) {
791 if (tulip_debug > 1)
792 dev_warn(&dev->dev,
793 "in rx suspend mode: (%lu) set timer\n",
794 tp->nir);
795 iowrite32(tulip_tbl[tp->chip_id].valid_intrs | TimerInt,
796 ioaddr + CSR7);
797 iowrite32(TimerInt, ioaddr + CSR5);
798 iowrite32(12, ioaddr + CSR11);
799 tp->ttimer = 1;
800 }
801 }
802 }
803#endif
804
805 if ((missed = ioread32(ioaddr + CSR8) & 0x1ffff)) {
806 dev->stats.rx_dropped += missed & 0x10000 ? 0x10000 : missed;
807 }
808
809 if (tulip_debug > 4)
810 netdev_dbg(dev, "exiting interrupt, csr5=%#04x\n",
811 ioread32(ioaddr + CSR5));
812
813 return IRQ_HANDLED;
814}
815