1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/pci.h>
14#include "tulip.h"
15#include <linux/etherdevice.h>
16
17int tulip_rx_copybreak;
18unsigned int tulip_max_interrupt_work;
19
20#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
21#define MIT_SIZE 15
22#define MIT_TABLE 15
23
24static unsigned int mit_table[MIT_SIZE+1] =
25{
26
27
28
29
30
31
32
33
34
35
36
37
38 0x0,
39 0x80150000,
40 0x80150000,
41 0x80270000,
42 0x80370000,
43 0x80490000,
44 0x80590000,
45 0x80690000,
46 0x807B0000,
47 0x808B0000,
48 0x809D0000,
49 0x80AD0000,
50 0x80BD0000,
51 0x80CF0000,
52 0x80DF0000,
53
54 0x80F10000
55};
56#endif
57
58
59int tulip_refill_rx(struct net_device *dev)
60{
61 struct tulip_private *tp = netdev_priv(dev);
62 int entry;
63 int refilled = 0;
64
65
66 for (; tp->cur_rx - tp->dirty_rx > 0; tp->dirty_rx++) {
67 entry = tp->dirty_rx % RX_RING_SIZE;
68 if (tp->rx_buffers[entry].skb == NULL) {
69 struct sk_buff *skb;
70 dma_addr_t mapping;
71
72 skb = tp->rx_buffers[entry].skb =
73 netdev_alloc_skb(dev, PKT_BUF_SZ);
74 if (skb == NULL)
75 break;
76
77 mapping = dma_map_single(&tp->pdev->dev, skb->data,
78 PKT_BUF_SZ, DMA_FROM_DEVICE);
79 if (dma_mapping_error(&tp->pdev->dev, mapping)) {
80 dev_kfree_skb(skb);
81 tp->rx_buffers[entry].skb = NULL;
82 break;
83 }
84
85 tp->rx_buffers[entry].mapping = mapping;
86
87 tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping);
88 refilled++;
89 }
90 tp->rx_ring[entry].status = cpu_to_le32(DescOwned);
91 }
92 if(tp->chip_id == LC82C168) {
93 if(((ioread32(tp->base_addr + CSR5)>>17)&0x07) == 4) {
94
95
96
97 iowrite32(0x01, tp->base_addr + CSR2);
98 }
99 }
100 return refilled;
101}
102
103#ifdef CONFIG_TULIP_NAPI
104
105void oom_timer(struct timer_list *t)
106{
107 struct tulip_private *tp = from_timer(tp, t, oom_timer);
108
109 napi_schedule(&tp->napi);
110}
111
112int tulip_poll(struct napi_struct *napi, int budget)
113{
114 struct tulip_private *tp = container_of(napi, struct tulip_private, napi);
115 struct net_device *dev = tp->dev;
116 int entry = tp->cur_rx % RX_RING_SIZE;
117 int work_done = 0;
118#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
119 int received = 0;
120#endif
121
122#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
123
124
125
126
127 if (budget >=RX_RING_SIZE) budget--;
128#endif
129
130 if (tulip_debug > 4)
131 netdev_dbg(dev, " In tulip_rx(), entry %d %08x\n",
132 entry, tp->rx_ring[entry].status);
133
134 do {
135 if (ioread32(tp->base_addr + CSR5) == 0xffffffff) {
136 netdev_dbg(dev, " In tulip_poll(), hardware disappeared\n");
137 break;
138 }
139
140 iowrite32((RxIntr | RxNoBuf), tp->base_addr + CSR5);
141
142
143
144 while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
145 s32 status = le32_to_cpu(tp->rx_ring[entry].status);
146 short pkt_len;
147
148 if (tp->dirty_rx + RX_RING_SIZE == tp->cur_rx)
149 break;
150
151 if (tulip_debug > 5)
152 netdev_dbg(dev, "In tulip_rx(), entry %d %08x\n",
153 entry, status);
154
155 if (++work_done >= budget)
156 goto not_done;
157
158
159
160
161
162
163 pkt_len = ((status >> 16) & 0x7ff) - 4;
164
165
166
167
168
169
170
171 if ((status & (RxLengthOver2047 |
172 RxDescCRCError |
173 RxDescCollisionSeen |
174 RxDescRunt |
175 RxDescDescErr |
176 RxWholePkt)) != RxWholePkt ||
177 pkt_len > 1518) {
178 if ((status & (RxLengthOver2047 |
179 RxWholePkt)) != RxWholePkt) {
180
181 if ((status & 0xffff) != 0x7fff) {
182 if (tulip_debug > 1)
183 dev_warn(&dev->dev,
184 "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
185 status);
186 dev->stats.rx_length_errors++;
187 }
188 } else {
189
190 if (tulip_debug > 2)
191 netdev_dbg(dev, "Receive error, Rx status %08x\n",
192 status);
193 dev->stats.rx_errors++;
194 if (pkt_len > 1518 ||
195 (status & RxDescRunt))
196 dev->stats.rx_length_errors++;
197
198 if (status & 0x0004)
199 dev->stats.rx_frame_errors++;
200 if (status & 0x0002)
201 dev->stats.rx_crc_errors++;
202 if (status & 0x0001)
203 dev->stats.rx_fifo_errors++;
204 }
205 } else {
206 struct sk_buff *skb;
207
208
209
210 if (pkt_len < tulip_rx_copybreak &&
211 (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
212 skb_reserve(skb, 2);
213 dma_sync_single_for_cpu(&tp->pdev->dev,
214 tp->rx_buffers[entry].mapping,
215 pkt_len,
216 DMA_FROM_DEVICE);
217#if ! defined(__alpha__)
218 skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data,
219 pkt_len);
220 skb_put(skb, pkt_len);
221#else
222 skb_put_data(skb,
223 tp->rx_buffers[entry].skb->data,
224 pkt_len);
225#endif
226 dma_sync_single_for_device(&tp->pdev->dev,
227 tp->rx_buffers[entry].mapping,
228 pkt_len,
229 DMA_FROM_DEVICE);
230 } else {
231 char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
232 pkt_len);
233
234#ifndef final_version
235 if (tp->rx_buffers[entry].mapping !=
236 le32_to_cpu(tp->rx_ring[entry].buffer1)) {
237 dev_err(&dev->dev,
238 "Internal fault: The skbuff addresses do not match in tulip_rx: %08x vs. %08llx %p / %p\n",
239 le32_to_cpu(tp->rx_ring[entry].buffer1),
240 (unsigned long long)tp->rx_buffers[entry].mapping,
241 skb->head, temp);
242 }
243#endif
244
245 dma_unmap_single(&tp->pdev->dev,
246 tp->rx_buffers[entry].mapping,
247 PKT_BUF_SZ,
248 DMA_FROM_DEVICE);
249
250 tp->rx_buffers[entry].skb = NULL;
251 tp->rx_buffers[entry].mapping = 0;
252 }
253 skb->protocol = eth_type_trans(skb, dev);
254
255 netif_receive_skb(skb);
256
257 dev->stats.rx_packets++;
258 dev->stats.rx_bytes += pkt_len;
259 }
260#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
261 received++;
262#endif
263
264 entry = (++tp->cur_rx) % RX_RING_SIZE;
265 if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/4)
266 tulip_refill_rx(dev);
267
268 }
269
270
271
272
273
274
275
276
277
278
279
280 } while ((ioread32(tp->base_addr + CSR5) & RxIntr));
281
282 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301 if( tp->flags & HAS_INTR_MITIGATION) {
302 if( received > 1 ) {
303 if( ! tp->mit_on ) {
304 tp->mit_on = 1;
305 iowrite32(mit_table[MIT_TABLE], tp->base_addr + CSR11);
306 }
307 }
308 else {
309 if( tp->mit_on ) {
310 tp->mit_on = 0;
311 iowrite32(0, tp->base_addr + CSR11);
312 }
313 }
314 }
315
316#endif
317
318 tulip_refill_rx(dev);
319
320
321 if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
322 goto oom;
323
324
325
326 napi_complete_done(napi, work_done);
327 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7);
328
329
330
331
332
333
334
335
336
337
338
339
340 return work_done;
341
342 not_done:
343 if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/2 ||
344 tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
345 tulip_refill_rx(dev);
346
347 if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
348 goto oom;
349
350 return work_done;
351
352 oom:
353
354
355 mod_timer(&tp->oom_timer, jiffies+1);
356
357
358
359
360
361
362 napi_complete_done(napi, work_done);
363
364 return work_done;
365}
366
367#else
368
369static int tulip_rx(struct net_device *dev)
370{
371 struct tulip_private *tp = netdev_priv(dev);
372 int entry = tp->cur_rx % RX_RING_SIZE;
373 int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx;
374 int received = 0;
375
376 if (tulip_debug > 4)
377 netdev_dbg(dev, "In tulip_rx(), entry %d %08x\n",
378 entry, tp->rx_ring[entry].status);
379
380 while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
381 s32 status = le32_to_cpu(tp->rx_ring[entry].status);
382 short pkt_len;
383
384 if (tulip_debug > 5)
385 netdev_dbg(dev, "In tulip_rx(), entry %d %08x\n",
386 entry, status);
387 if (--rx_work_limit < 0)
388 break;
389
390
391
392
393
394
395 pkt_len = ((status >> 16) & 0x7ff) - 4;
396
397
398
399
400
401
402 if ((status & (RxLengthOver2047 |
403 RxDescCRCError |
404 RxDescCollisionSeen |
405 RxDescRunt |
406 RxDescDescErr |
407 RxWholePkt)) != RxWholePkt ||
408 pkt_len > 1518) {
409 if ((status & (RxLengthOver2047 |
410 RxWholePkt)) != RxWholePkt) {
411
412 if ((status & 0xffff) != 0x7fff) {
413 if (tulip_debug > 1)
414 netdev_warn(dev,
415 "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
416 status);
417 dev->stats.rx_length_errors++;
418 }
419 } else {
420
421 if (tulip_debug > 2)
422 netdev_dbg(dev, "Receive error, Rx status %08x\n",
423 status);
424 dev->stats.rx_errors++;
425 if (pkt_len > 1518 ||
426 (status & RxDescRunt))
427 dev->stats.rx_length_errors++;
428 if (status & 0x0004)
429 dev->stats.rx_frame_errors++;
430 if (status & 0x0002)
431 dev->stats.rx_crc_errors++;
432 if (status & 0x0001)
433 dev->stats.rx_fifo_errors++;
434 }
435 } else {
436 struct sk_buff *skb;
437
438
439
440 if (pkt_len < tulip_rx_copybreak &&
441 (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
442 skb_reserve(skb, 2);
443 dma_sync_single_for_cpu(&tp->pdev->dev,
444 tp->rx_buffers[entry].mapping,
445 pkt_len,
446 DMA_FROM_DEVICE);
447#if ! defined(__alpha__)
448 skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data,
449 pkt_len);
450 skb_put(skb, pkt_len);
451#else
452 skb_put_data(skb,
453 tp->rx_buffers[entry].skb->data,
454 pkt_len);
455#endif
456 dma_sync_single_for_device(&tp->pdev->dev,
457 tp->rx_buffers[entry].mapping,
458 pkt_len,
459 DMA_FROM_DEVICE);
460 } else {
461 char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
462 pkt_len);
463
464#ifndef final_version
465 if (tp->rx_buffers[entry].mapping !=
466 le32_to_cpu(tp->rx_ring[entry].buffer1)) {
467 dev_err(&dev->dev,
468 "Internal fault: The skbuff addresses do not match in tulip_rx: %08x vs. %Lx %p / %p\n",
469 le32_to_cpu(tp->rx_ring[entry].buffer1),
470 (long long)tp->rx_buffers[entry].mapping,
471 skb->head, temp);
472 }
473#endif
474
475 dma_unmap_single(&tp->pdev->dev,
476 tp->rx_buffers[entry].mapping,
477 PKT_BUF_SZ, DMA_FROM_DEVICE);
478
479 tp->rx_buffers[entry].skb = NULL;
480 tp->rx_buffers[entry].mapping = 0;
481 }
482 skb->protocol = eth_type_trans(skb, dev);
483
484 netif_rx(skb);
485
486 dev->stats.rx_packets++;
487 dev->stats.rx_bytes += pkt_len;
488 }
489 received++;
490 entry = (++tp->cur_rx) % RX_RING_SIZE;
491 }
492 return received;
493}
494#endif
495
496static inline unsigned int phy_interrupt (struct net_device *dev)
497{
498#ifdef __hppa__
499 struct tulip_private *tp = netdev_priv(dev);
500 int csr12 = ioread32(tp->base_addr + CSR12) & 0xff;
501
502 if (csr12 != tp->csr12_shadow) {
503
504 iowrite32(csr12 | 0x02, tp->base_addr + CSR12);
505 tp->csr12_shadow = csr12;
506
507 spin_lock(&tp->lock);
508 tulip_check_duplex(dev);
509 spin_unlock(&tp->lock);
510
511 iowrite32(csr12 & ~0x02, tp->base_addr + CSR12);
512
513 return 1;
514 }
515#endif
516
517 return 0;
518}
519
520
521
522irqreturn_t tulip_interrupt(int irq, void *dev_instance)
523{
524 struct net_device *dev = (struct net_device *)dev_instance;
525 struct tulip_private *tp = netdev_priv(dev);
526 void __iomem *ioaddr = tp->base_addr;
527 int csr5;
528 int missed;
529 int rx = 0;
530 int tx = 0;
531 int oi = 0;
532 int maxrx = RX_RING_SIZE;
533 int maxtx = TX_RING_SIZE;
534 int maxoi = TX_RING_SIZE;
535#ifdef CONFIG_TULIP_NAPI
536 int rxd = 0;
537#else
538 int entry;
539#endif
540 unsigned int work_count = tulip_max_interrupt_work;
541 unsigned int handled = 0;
542
543
544 csr5 = ioread32(ioaddr + CSR5);
545
546 if (tp->flags & HAS_PHY_IRQ)
547 handled = phy_interrupt (dev);
548
549 if ((csr5 & (NormalIntr|AbnormalIntr)) == 0)
550 return IRQ_RETVAL(handled);
551
552 tp->nir++;
553
554 do {
555
556#ifdef CONFIG_TULIP_NAPI
557
558 if (!rxd && (csr5 & (RxIntr | RxNoBuf))) {
559 rxd++;
560
561 iowrite32(tulip_tbl[tp->chip_id].valid_intrs&~RxPollInt, ioaddr + CSR7);
562 napi_schedule(&tp->napi);
563
564 if (!(csr5&~(AbnormalIntr|NormalIntr|RxPollInt|TPLnkPass)))
565 break;
566 }
567
568
569
570
571 iowrite32(csr5 & 0x0001ff3f, ioaddr + CSR5);
572
573#else
574
575 iowrite32(csr5 & 0x0001ffff, ioaddr + CSR5);
576
577
578 if (csr5 & (RxIntr | RxNoBuf)) {
579 rx += tulip_rx(dev);
580 tulip_refill_rx(dev);
581 }
582
583#endif
584
585 if (tulip_debug > 4)
586 netdev_dbg(dev, "interrupt csr5=%#8.8x new csr5=%#8.8x\n",
587 csr5, ioread32(ioaddr + CSR5));
588
589
590 if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) {
591 unsigned int dirty_tx;
592
593 spin_lock(&tp->lock);
594
595 for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0;
596 dirty_tx++) {
597 int entry = dirty_tx % TX_RING_SIZE;
598 int status = le32_to_cpu(tp->tx_ring[entry].status);
599
600 if (status < 0)
601 break;
602
603
604 if (tp->tx_buffers[entry].skb == NULL) {
605
606 if (tp->tx_buffers[entry].mapping)
607 dma_unmap_single(&tp->pdev->dev,
608 tp->tx_buffers[entry].mapping,
609 sizeof(tp->setup_frame),
610 DMA_TO_DEVICE);
611 continue;
612 }
613
614 if (status & 0x8000) {
615
616#ifndef final_version
617 if (tulip_debug > 1)
618 netdev_dbg(dev, "Transmit error, Tx status %08x\n",
619 status);
620#endif
621 dev->stats.tx_errors++;
622 if (status & 0x4104)
623 dev->stats.tx_aborted_errors++;
624 if (status & 0x0C00)
625 dev->stats.tx_carrier_errors++;
626 if (status & 0x0200)
627 dev->stats.tx_window_errors++;
628 if (status & 0x0002)
629 dev->stats.tx_fifo_errors++;
630 if ((status & 0x0080) && tp->full_duplex == 0)
631 dev->stats.tx_heartbeat_errors++;
632 } else {
633 dev->stats.tx_bytes +=
634 tp->tx_buffers[entry].skb->len;
635 dev->stats.collisions += (status >> 3) & 15;
636 dev->stats.tx_packets++;
637 }
638
639 dma_unmap_single(&tp->pdev->dev,
640 tp->tx_buffers[entry].mapping,
641 tp->tx_buffers[entry].skb->len,
642 DMA_TO_DEVICE);
643
644
645 dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
646 tp->tx_buffers[entry].skb = NULL;
647 tp->tx_buffers[entry].mapping = 0;
648 tx++;
649 }
650
651#ifndef final_version
652 if (tp->cur_tx - dirty_tx > TX_RING_SIZE) {
653 dev_err(&dev->dev,
654 "Out-of-sync dirty pointer, %d vs. %d\n",
655 dirty_tx, tp->cur_tx);
656 dirty_tx += TX_RING_SIZE;
657 }
658#endif
659
660 if (tp->cur_tx - dirty_tx < TX_RING_SIZE - 2)
661 netif_wake_queue(dev);
662
663 tp->dirty_tx = dirty_tx;
664 if (csr5 & TxDied) {
665 if (tulip_debug > 2)
666 dev_warn(&dev->dev,
667 "The transmitter stopped. CSR5 is %x, CSR6 %x, new CSR6 %x\n",
668 csr5, ioread32(ioaddr + CSR6),
669 tp->csr6);
670 tulip_restart_rxtx(tp);
671 }
672 spin_unlock(&tp->lock);
673 }
674
675
676 if (csr5 & AbnormalIntr) {
677 if (csr5 == 0xffffffff)
678 break;
679 if (csr5 & TxJabber)
680 dev->stats.tx_errors++;
681 if (csr5 & TxFIFOUnderflow) {
682 if ((tp->csr6 & 0xC000) != 0xC000)
683 tp->csr6 += 0x4000;
684 else
685 tp->csr6 |= 0x00200000;
686
687 tulip_restart_rxtx(tp);
688 iowrite32(0, ioaddr + CSR1);
689 }
690 if (csr5 & (RxDied | RxNoBuf)) {
691 if (tp->flags & COMET_MAC_ADDR) {
692 iowrite32(tp->mc_filter[0], ioaddr + 0xAC);
693 iowrite32(tp->mc_filter[1], ioaddr + 0xB0);
694 }
695 }
696 if (csr5 & RxDied) {
697 dev->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
698 dev->stats.rx_errors++;
699 tulip_start_rxtx(tp);
700 }
701
702
703
704
705 if (csr5 & (TPLnkPass | TPLnkFail | 0x08000000)) {
706 if (tp->link_change)
707 (tp->link_change)(dev, csr5);
708 }
709 if (csr5 & SystemError) {
710 int error = (csr5 >> 23) & 7;
711
712
713
714
715
716
717
718
719
720
721 dev_err(&dev->dev,
722 "(%lu) System Error occurred (%d)\n",
723 tp->nir, error);
724 }
725
726 iowrite32(0x0800f7ba, ioaddr + CSR5);
727 oi++;
728 }
729 if (csr5 & TimerInt) {
730
731 if (tulip_debug > 2)
732 dev_err(&dev->dev,
733 "Re-enabling interrupts, %08x\n",
734 csr5);
735 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
736 tp->ttimer = 0;
737 oi++;
738 }
739 if (tx > maxtx || rx > maxrx || oi > maxoi) {
740 if (tulip_debug > 1)
741 dev_warn(&dev->dev, "Too much work during an interrupt, csr5=0x%08x. (%lu) (%d,%d,%d)\n",
742 csr5, tp->nir, tx, rx, oi);
743
744
745 iowrite32(0x8001ffff, ioaddr + CSR5);
746 if (tp->flags & HAS_INTR_MITIGATION) {
747
748
749 iowrite32(0x8b240000, ioaddr + CSR11);
750 } else if (tp->chip_id == LC82C168) {
751
752 iowrite32(0x00, ioaddr + CSR7);
753 mod_timer(&tp->timer, RUN_AT(HZ/50));
754 } else {
755
756
757 iowrite32(((~csr5) & 0x0001ebef) | AbnormalIntr | TimerInt, ioaddr + CSR7);
758 iowrite32(0x0012, ioaddr + CSR11);
759 }
760 break;
761 }
762
763 work_count--;
764 if (work_count == 0)
765 break;
766
767 csr5 = ioread32(ioaddr + CSR5);
768
769#ifdef CONFIG_TULIP_NAPI
770 if (rxd)
771 csr5 &= ~RxPollInt;
772 } while ((csr5 & (TxNoBuf |
773 TxDied |
774 TxIntr |
775 TimerInt |
776
777 RxDied |
778 TxFIFOUnderflow |
779 TxJabber |
780 TPLnkFail |
781 SystemError )) != 0);
782#else
783 } while ((csr5 & (NormalIntr|AbnormalIntr)) != 0);
784
785 tulip_refill_rx(dev);
786
787
788 entry = tp->dirty_rx % RX_RING_SIZE;
789 if (tp->rx_buffers[entry].skb == NULL) {
790 if (tulip_debug > 1)
791 dev_warn(&dev->dev,
792 "in rx suspend mode: (%lu) (tp->cur_rx = %u, ttimer = %d, rx = %d) go/stay in suspend mode\n",
793 tp->nir, tp->cur_rx, tp->ttimer, rx);
794 if (tp->chip_id == LC82C168) {
795 iowrite32(0x00, ioaddr + CSR7);
796 mod_timer(&tp->timer, RUN_AT(HZ/50));
797 } else {
798 if (tp->ttimer == 0 || (ioread32(ioaddr + CSR11) & 0xffff) == 0) {
799 if (tulip_debug > 1)
800 dev_warn(&dev->dev,
801 "in rx suspend mode: (%lu) set timer\n",
802 tp->nir);
803 iowrite32(tulip_tbl[tp->chip_id].valid_intrs | TimerInt,
804 ioaddr + CSR7);
805 iowrite32(TimerInt, ioaddr + CSR5);
806 iowrite32(12, ioaddr + CSR11);
807 tp->ttimer = 1;
808 }
809 }
810 }
811#endif
812
813 if ((missed = ioread32(ioaddr + CSR8) & 0x1ffff)) {
814 dev->stats.rx_dropped += missed & 0x10000 ? 0x10000 : missed;
815 }
816
817 if (tulip_debug > 4)
818 netdev_dbg(dev, "exiting interrupt, csr5=%#04x\n",
819 ioread32(ioaddr + CSR5));
820
821 return IRQ_HANDLED;
822}
823