1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45static const char version[] = "lance.c:v1.16 2006/11/09 dplatt@3do.com, becker@cesdis.gsfc.nasa.gov\n";
46
47#include <linux/module.h>
48#include <linux/kernel.h>
49#include <linux/string.h>
50#include <linux/delay.h>
51#include <linux/errno.h>
52#include <linux/ioport.h>
53#include <linux/slab.h>
54#include <linux/interrupt.h>
55#include <linux/pci.h>
56#include <linux/init.h>
57#include <linux/netdevice.h>
58#include <linux/etherdevice.h>
59#include <linux/skbuff.h>
60#include <linux/mm.h>
61#include <linux/bitops.h>
62
63#include <asm/io.h>
64#include <asm/dma.h>
65
66static unsigned int lance_portlist[] __initdata = { 0x300, 0x320, 0x340, 0x360, 0};
67static int lance_probe1(struct net_device *dev, int ioaddr, int irq, int options);
68static int __init do_lance_probe(struct net_device *dev);
69
70
71static struct card {
72 char id_offset14;
73 char id_offset15;
74} cards[] = {
75 {
76 .id_offset14 = 0x57,
77 .id_offset15 = 0x57,
78 },
79 {
80 .id_offset14 = 0x52,
81 .id_offset15 = 0x44,
82 },
83 {
84 .id_offset14 = 0x52,
85 .id_offset15 = 0x49,
86 },
87};
88#define NUM_CARDS 3
89
90#ifdef LANCE_DEBUG
91static int lance_debug = LANCE_DEBUG;
92#else
93static int lance_debug = 1;
94#endif
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188#ifndef LANCE_LOG_TX_BUFFERS
189#define LANCE_LOG_TX_BUFFERS 4
190#define LANCE_LOG_RX_BUFFERS 4
191#endif
192
193#define TX_RING_SIZE (1 << (LANCE_LOG_TX_BUFFERS))
194#define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
195#define TX_RING_LEN_BITS ((LANCE_LOG_TX_BUFFERS) << 29)
196
197#define RX_RING_SIZE (1 << (LANCE_LOG_RX_BUFFERS))
198#define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
199#define RX_RING_LEN_BITS ((LANCE_LOG_RX_BUFFERS) << 29)
200
201#define PKT_BUF_SZ 1544
202
203
204#define LANCE_DATA 0x10
205#define LANCE_ADDR 0x12
206#define LANCE_RESET 0x14
207#define LANCE_BUS_IF 0x16
208#define LANCE_TOTAL_SIZE 0x18
209
210#define TX_TIMEOUT 20
211
212
213struct lance_rx_head {
214 s32 base;
215 s16 buf_length;
216 s16 msg_length;
217};
218
219struct lance_tx_head {
220 s32 base;
221 s16 length;
222 s16 misc;
223};
224
225
226struct lance_init_block {
227 u16 mode;
228 u8 phys_addr[6];
229 u32 filter[2];
230
231 u32 rx_ring;
232 u32 tx_ring;
233};
234
235struct lance_private {
236
237 struct lance_rx_head rx_ring[RX_RING_SIZE];
238 struct lance_tx_head tx_ring[TX_RING_SIZE];
239 struct lance_init_block init_block;
240 const char *name;
241
242 struct sk_buff* tx_skbuff[TX_RING_SIZE];
243
244 struct sk_buff* rx_skbuff[RX_RING_SIZE];
245 unsigned long rx_buffs;
246
247 char (*tx_bounce_buffs)[PKT_BUF_SZ];
248 int cur_rx, cur_tx;
249 int dirty_rx, dirty_tx;
250 int dma;
251 struct net_device_stats stats;
252 unsigned char chip_version;
253 spinlock_t devlock;
254};
255
256#define LANCE_MUST_PAD 0x00000001
257#define LANCE_ENABLE_AUTOSELECT 0x00000002
258#define LANCE_MUST_REINIT_RING 0x00000004
259#define LANCE_MUST_UNRESET 0x00000008
260#define LANCE_HAS_MISSED_FRAME 0x00000010
261
262
263
264
265static struct lance_chip_type {
266 int id_number;
267 const char *name;
268 int flags;
269} chip_table[] = {
270 {0x0000, "LANCE 7990",
271 LANCE_MUST_PAD + LANCE_MUST_UNRESET},
272 {0x0003, "PCnet/ISA 79C960",
273 LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
274 LANCE_HAS_MISSED_FRAME},
275 {0x2260, "PCnet/ISA+ 79C961",
276 LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
277 LANCE_HAS_MISSED_FRAME},
278 {0x2420, "PCnet/PCI 79C970",
279 LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
280 LANCE_HAS_MISSED_FRAME},
281
282
283 {0x2430, "PCnet32",
284 LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
285 LANCE_HAS_MISSED_FRAME},
286 {0x2621, "PCnet/PCI-II 79C970A",
287 LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
288 LANCE_HAS_MISSED_FRAME},
289 {0x0, "PCnet (unknown)",
290 LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
291 LANCE_HAS_MISSED_FRAME},
292};
293
294enum {OLD_LANCE = 0, PCNET_ISA=1, PCNET_ISAP=2, PCNET_PCI=3, PCNET_VLB=4, PCNET_PCI_II=5, LANCE_UNKNOWN=6};
295
296
297
298
299static unsigned char lance_need_isa_bounce_buffers = 1;
300
301static int lance_open(struct net_device *dev);
302static void lance_init_ring(struct net_device *dev, gfp_t mode);
303static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
304 struct net_device *dev);
305static int lance_rx(struct net_device *dev);
306static irqreturn_t lance_interrupt(int irq, void *dev_id);
307static int lance_close(struct net_device *dev);
308static struct net_device_stats *lance_get_stats(struct net_device *dev);
309static void set_multicast_list(struct net_device *dev);
310static void lance_tx_timeout (struct net_device *dev);
311
312
313
314#ifdef MODULE
315#define MAX_CARDS 8
316
317static struct net_device *dev_lance[MAX_CARDS];
318static int io[MAX_CARDS];
319static int dma[MAX_CARDS];
320static int irq[MAX_CARDS];
321
322module_param_array(io, int, NULL, 0);
323module_param_array(dma, int, NULL, 0);
324module_param_array(irq, int, NULL, 0);
325module_param(lance_debug, int, 0);
326MODULE_PARM_DESC(io, "LANCE/PCnet I/O base address(es),required");
327MODULE_PARM_DESC(dma, "LANCE/PCnet ISA DMA channel (ignored for some devices)");
328MODULE_PARM_DESC(irq, "LANCE/PCnet IRQ number (ignored for some devices)");
329MODULE_PARM_DESC(lance_debug, "LANCE/PCnet debug level (0-7)");
330
331int __init init_module(void)
332{
333 struct net_device *dev;
334 int this_dev, found = 0;
335
336 for (this_dev = 0; this_dev < MAX_CARDS; this_dev++) {
337 if (io[this_dev] == 0) {
338 if (this_dev != 0)
339 break;
340 printk(KERN_NOTICE "lance.c: Module autoprobing not allowed. Append \"io=0xNNN\" value(s).\n");
341 return -EPERM;
342 }
343 dev = alloc_etherdev(0);
344 if (!dev)
345 break;
346 dev->irq = irq[this_dev];
347 dev->base_addr = io[this_dev];
348 dev->dma = dma[this_dev];
349 if (do_lance_probe(dev) == 0) {
350 dev_lance[found++] = dev;
351 continue;
352 }
353 free_netdev(dev);
354 break;
355 }
356 if (found != 0)
357 return 0;
358 return -ENXIO;
359}
360
361static void cleanup_card(struct net_device *dev)
362{
363 struct lance_private *lp = dev->ml_priv;
364 if (dev->dma != 4)
365 free_dma(dev->dma);
366 release_region(dev->base_addr, LANCE_TOTAL_SIZE);
367 kfree(lp->tx_bounce_buffs);
368 kfree((void*)lp->rx_buffs);
369 kfree(lp);
370}
371
372void __exit cleanup_module(void)
373{
374 int this_dev;
375
376 for (this_dev = 0; this_dev < MAX_CARDS; this_dev++) {
377 struct net_device *dev = dev_lance[this_dev];
378 if (dev) {
379 unregister_netdev(dev);
380 cleanup_card(dev);
381 free_netdev(dev);
382 }
383 }
384}
385#endif
386MODULE_LICENSE("GPL");
387
388
389
390
391
392
393static int __init do_lance_probe(struct net_device *dev)
394{
395 unsigned int *port;
396 int result;
397
398 if (high_memory <= phys_to_virt(16*1024*1024))
399 lance_need_isa_bounce_buffers = 0;
400
401 for (port = lance_portlist; *port; port++) {
402 int ioaddr = *port;
403 struct resource *r = request_region(ioaddr, LANCE_TOTAL_SIZE,
404 "lance-probe");
405
406 if (r) {
407
408 char offset14 = inb(ioaddr + 14);
409 int card;
410 for (card = 0; card < NUM_CARDS; ++card)
411 if (cards[card].id_offset14 == offset14)
412 break;
413 if (card < NUM_CARDS) {
414 char offset15 = inb(ioaddr + 15);
415 for (card = 0; card < NUM_CARDS; ++card)
416 if ((cards[card].id_offset14 == offset14) &&
417 (cards[card].id_offset15 == offset15))
418 break;
419 }
420 if (card < NUM_CARDS) {
421 result = lance_probe1(dev, ioaddr, 0, 0);
422 if (!result) {
423 struct lance_private *lp = dev->ml_priv;
424 int ver = lp->chip_version;
425
426 r->name = chip_table[ver].name;
427 return 0;
428 }
429 }
430 release_region(ioaddr, LANCE_TOTAL_SIZE);
431 }
432 }
433 return -ENODEV;
434}
435
436#ifndef MODULE
437struct net_device * __init lance_probe(int unit)
438{
439 struct net_device *dev = alloc_etherdev(0);
440 int err;
441
442 if (!dev)
443 return ERR_PTR(-ENODEV);
444
445 sprintf(dev->name, "eth%d", unit);
446 netdev_boot_setup_check(dev);
447
448 err = do_lance_probe(dev);
449 if (err)
450 goto out;
451 return dev;
452out:
453 free_netdev(dev);
454 return ERR_PTR(err);
455}
456#endif
457
458static const struct net_device_ops lance_netdev_ops = {
459 .ndo_open = lance_open,
460 .ndo_start_xmit = lance_start_xmit,
461 .ndo_stop = lance_close,
462 .ndo_get_stats = lance_get_stats,
463 .ndo_set_multicast_list = set_multicast_list,
464 .ndo_tx_timeout = lance_tx_timeout,
465 .ndo_change_mtu = eth_change_mtu,
466 .ndo_set_mac_address = eth_mac_addr,
467 .ndo_validate_addr = eth_validate_addr,
468};
469
470static int __init lance_probe1(struct net_device *dev, int ioaddr, int irq, int options)
471{
472 struct lance_private *lp;
473 unsigned long dma_channels;
474 int i, reset_val, lance_version;
475 const char *chipname;
476
477 unsigned char hpJ2405A = 0;
478 int hp_builtin = 0;
479 static int did_version;
480 unsigned long flags;
481 int err = -ENOMEM;
482 void __iomem *bios;
483
484
485
486
487
488
489 bios = ioremap(0xf00f0, 0x14);
490 if (!bios)
491 return -ENOMEM;
492 if (readw(bios + 0x12) == 0x5048) {
493 static const short ioaddr_table[] = { 0x300, 0x320, 0x340, 0x360};
494 int hp_port = (readl(bios + 1) & 1) ? 0x499 : 0x99;
495
496 if ((inb(hp_port) & 0xc0) == 0x80
497 && ioaddr_table[inb(hp_port) & 3] == ioaddr)
498 hp_builtin = hp_port;
499 }
500 iounmap(bios);
501
502 hpJ2405A = (inb(ioaddr) == 0x08 && inb(ioaddr+1) == 0x00
503 && inb(ioaddr+2) == 0x09);
504
505
506 reset_val = inw(ioaddr+LANCE_RESET);
507
508
509
510 if (!hpJ2405A)
511 outw(reset_val, ioaddr+LANCE_RESET);
512
513 outw(0x0000, ioaddr+LANCE_ADDR);
514 if (inw(ioaddr+LANCE_DATA) != 0x0004)
515 return -ENODEV;
516
517
518 outw(88, ioaddr+LANCE_ADDR);
519 if (inw(ioaddr+LANCE_ADDR) != 88) {
520 lance_version = 0;
521 } else {
522 int chip_version = inw(ioaddr+LANCE_DATA);
523 outw(89, ioaddr+LANCE_ADDR);
524 chip_version |= inw(ioaddr+LANCE_DATA) << 16;
525 if (lance_debug > 2)
526 printk(" LANCE chip version is %#x.\n", chip_version);
527 if ((chip_version & 0xfff) != 0x003)
528 return -ENODEV;
529 chip_version = (chip_version >> 12) & 0xffff;
530 for (lance_version = 1; chip_table[lance_version].id_number; lance_version++) {
531 if (chip_table[lance_version].id_number == chip_version)
532 break;
533 }
534 }
535
536
537
538 chipname = chip_table[lance_version].name;
539 printk("%s: %s at %#3x, ", dev->name, chipname, ioaddr);
540
541
542
543 for (i = 0; i < 6; i++)
544 dev->dev_addr[i] = inb(ioaddr + i);
545 printk("%pM", dev->dev_addr);
546
547 dev->base_addr = ioaddr;
548
549
550 lp = kzalloc(sizeof(*lp), GFP_DMA | GFP_KERNEL);
551 if(lp==NULL)
552 return -ENODEV;
553 if (lance_debug > 6) printk(" (#0x%05lx)", (unsigned long)lp);
554 dev->ml_priv = lp;
555 lp->name = chipname;
556 lp->rx_buffs = (unsigned long)kmalloc(PKT_BUF_SZ*RX_RING_SIZE,
557 GFP_DMA | GFP_KERNEL);
558 if (!lp->rx_buffs)
559 goto out_lp;
560 if (lance_need_isa_bounce_buffers) {
561 lp->tx_bounce_buffs = kmalloc(PKT_BUF_SZ*TX_RING_SIZE,
562 GFP_DMA | GFP_KERNEL);
563 if (!lp->tx_bounce_buffs)
564 goto out_rx;
565 } else
566 lp->tx_bounce_buffs = NULL;
567
568 lp->chip_version = lance_version;
569 spin_lock_init(&lp->devlock);
570
571 lp->init_block.mode = 0x0003;
572 for (i = 0; i < 6; i++)
573 lp->init_block.phys_addr[i] = dev->dev_addr[i];
574 lp->init_block.filter[0] = 0x00000000;
575 lp->init_block.filter[1] = 0x00000000;
576 lp->init_block.rx_ring = ((u32)isa_virt_to_bus(lp->rx_ring) & 0xffffff) | RX_RING_LEN_BITS;
577 lp->init_block.tx_ring = ((u32)isa_virt_to_bus(lp->tx_ring) & 0xffffff) | TX_RING_LEN_BITS;
578
579 outw(0x0001, ioaddr+LANCE_ADDR);
580 inw(ioaddr+LANCE_ADDR);
581 outw((short) (u32) isa_virt_to_bus(&lp->init_block), ioaddr+LANCE_DATA);
582 outw(0x0002, ioaddr+LANCE_ADDR);
583 inw(ioaddr+LANCE_ADDR);
584 outw(((u32)isa_virt_to_bus(&lp->init_block)) >> 16, ioaddr+LANCE_DATA);
585 outw(0x0000, ioaddr+LANCE_ADDR);
586 inw(ioaddr+LANCE_ADDR);
587
588 if (irq) {
589 dev->dma = 4;
590 dev->irq = irq;
591 } else if (hp_builtin) {
592 static const char dma_tbl[4] = {3, 5, 6, 0};
593 static const char irq_tbl[4] = {3, 4, 5, 9};
594 unsigned char port_val = inb(hp_builtin);
595 dev->dma = dma_tbl[(port_val >> 4) & 3];
596 dev->irq = irq_tbl[(port_val >> 2) & 3];
597 printk(" HP Vectra IRQ %d DMA %d.\n", dev->irq, dev->dma);
598 } else if (hpJ2405A) {
599 static const char dma_tbl[4] = {3, 5, 6, 7};
600 static const char irq_tbl[8] = {3, 4, 5, 9, 10, 11, 12, 15};
601 short reset_val = inw(ioaddr+LANCE_RESET);
602 dev->dma = dma_tbl[(reset_val >> 2) & 3];
603 dev->irq = irq_tbl[(reset_val >> 4) & 7];
604 printk(" HP J2405A IRQ %d DMA %d.\n", dev->irq, dev->dma);
605 } else if (lance_version == PCNET_ISAP) {
606 short bus_info;
607 outw(8, ioaddr+LANCE_ADDR);
608 bus_info = inw(ioaddr+LANCE_BUS_IF);
609 dev->dma = bus_info & 0x07;
610 dev->irq = (bus_info >> 4) & 0x0F;
611 } else {
612
613 if (dev->mem_start & 0x07)
614 dev->dma = dev->mem_start & 0x07;
615 }
616
617 if (dev->dma == 0) {
618
619
620 dma_channels = ((inb(DMA1_STAT_REG) >> 4) & 0x0f) |
621 (inb(DMA2_STAT_REG) & 0xf0);
622 }
623 err = -ENODEV;
624 if (dev->irq >= 2)
625 printk(" assigned IRQ %d", dev->irq);
626 else if (lance_version != 0) {
627 unsigned long irq_mask;
628
629
630
631
632 irq_mask = probe_irq_on();
633
634
635 outw(0x0041, ioaddr+LANCE_DATA);
636
637 mdelay(20);
638 dev->irq = probe_irq_off(irq_mask);
639 if (dev->irq)
640 printk(", probed IRQ %d", dev->irq);
641 else {
642 printk(", failed to detect IRQ line.\n");
643 goto out_tx;
644 }
645
646
647
648 if (inw(ioaddr+LANCE_DATA) & 0x0100)
649 dev->dma = 4;
650 }
651
652 if (dev->dma == 4) {
653 printk(", no DMA needed.\n");
654 } else if (dev->dma) {
655 if (request_dma(dev->dma, chipname)) {
656 printk("DMA %d allocation failed.\n", dev->dma);
657 goto out_tx;
658 } else
659 printk(", assigned DMA %d.\n", dev->dma);
660 } else {
661 for (i = 0; i < 4; i++) {
662 static const char dmas[] = { 5, 6, 7, 3 };
663 int dma = dmas[i];
664 int boguscnt;
665
666
667
668 if (test_bit(dma, &dma_channels))
669 continue;
670 outw(0x7f04, ioaddr+LANCE_DATA);
671 if (request_dma(dma, chipname))
672 continue;
673
674 flags=claim_dma_lock();
675 set_dma_mode(dma, DMA_MODE_CASCADE);
676 enable_dma(dma);
677 release_dma_lock(flags);
678
679
680 outw(0x0001, ioaddr+LANCE_DATA);
681 for (boguscnt = 100; boguscnt > 0; --boguscnt)
682 if (inw(ioaddr+LANCE_DATA) & 0x0900)
683 break;
684 if (inw(ioaddr+LANCE_DATA) & 0x0100) {
685 dev->dma = dma;
686 printk(", DMA %d.\n", dev->dma);
687 break;
688 } else {
689 flags=claim_dma_lock();
690 disable_dma(dma);
691 release_dma_lock(flags);
692 free_dma(dma);
693 }
694 }
695 if (i == 4) {
696 printk("DMA detection failed.\n");
697 goto out_tx;
698 }
699 }
700
701 if (lance_version == 0 && dev->irq == 0) {
702
703
704 unsigned long irq_mask;
705
706 irq_mask = probe_irq_on();
707 outw(0x0041, ioaddr+LANCE_DATA);
708
709 mdelay(40);
710 dev->irq = probe_irq_off(irq_mask);
711 if (dev->irq == 0) {
712 printk(" Failed to detect the 7990 IRQ line.\n");
713 goto out_dma;
714 }
715 printk(" Auto-IRQ detected IRQ%d.\n", dev->irq);
716 }
717
718 if (chip_table[lp->chip_version].flags & LANCE_ENABLE_AUTOSELECT) {
719
720
721 outw(0x0002, ioaddr+LANCE_ADDR);
722
723 outw(inw(ioaddr+LANCE_BUS_IF) | 0x0002, ioaddr+LANCE_BUS_IF);
724 }
725
726 if (lance_debug > 0 && did_version++ == 0)
727 printk(version);
728
729
730 dev->netdev_ops = &lance_netdev_ops;
731 dev->watchdog_timeo = TX_TIMEOUT;
732
733 err = register_netdev(dev);
734 if (err)
735 goto out_dma;
736 return 0;
737out_dma:
738 if (dev->dma != 4)
739 free_dma(dev->dma);
740out_tx:
741 kfree(lp->tx_bounce_buffs);
742out_rx:
743 kfree((void*)lp->rx_buffs);
744out_lp:
745 kfree(lp);
746 return err;
747}
748
749
750static int
751lance_open(struct net_device *dev)
752{
753 struct lance_private *lp = dev->ml_priv;
754 int ioaddr = dev->base_addr;
755 int i;
756
757 if (dev->irq == 0 ||
758 request_irq(dev->irq, &lance_interrupt, 0, lp->name, dev)) {
759 return -EAGAIN;
760 }
761
762
763
764
765
766 inw(ioaddr+LANCE_RESET);
767
768
769 if (dev->dma != 4) {
770 unsigned long flags=claim_dma_lock();
771 enable_dma(dev->dma);
772 set_dma_mode(dev->dma, DMA_MODE_CASCADE);
773 release_dma_lock(flags);
774 }
775
776
777 if (chip_table[lp->chip_version].flags & LANCE_MUST_UNRESET)
778 outw(0, ioaddr+LANCE_RESET);
779
780 if (chip_table[lp->chip_version].flags & LANCE_ENABLE_AUTOSELECT) {
781
782 outw(0x0002, ioaddr+LANCE_ADDR);
783
784 outw(inw(ioaddr+LANCE_BUS_IF) | 0x0002, ioaddr+LANCE_BUS_IF);
785 }
786
787 if (lance_debug > 1)
788 printk("%s: lance_open() irq %d dma %d tx/rx rings %#x/%#x init %#x.\n",
789 dev->name, dev->irq, dev->dma,
790 (u32) isa_virt_to_bus(lp->tx_ring),
791 (u32) isa_virt_to_bus(lp->rx_ring),
792 (u32) isa_virt_to_bus(&lp->init_block));
793
794 lance_init_ring(dev, GFP_KERNEL);
795
796 outw(0x0001, ioaddr+LANCE_ADDR);
797 outw((short) (u32) isa_virt_to_bus(&lp->init_block), ioaddr+LANCE_DATA);
798 outw(0x0002, ioaddr+LANCE_ADDR);
799 outw(((u32)isa_virt_to_bus(&lp->init_block)) >> 16, ioaddr+LANCE_DATA);
800
801 outw(0x0004, ioaddr+LANCE_ADDR);
802 outw(0x0915, ioaddr+LANCE_DATA);
803
804 outw(0x0000, ioaddr+LANCE_ADDR);
805 outw(0x0001, ioaddr+LANCE_DATA);
806
807 netif_start_queue (dev);
808
809 i = 0;
810 while (i++ < 100)
811 if (inw(ioaddr+LANCE_DATA) & 0x0100)
812 break;
813
814
815
816
817 outw(0x0042, ioaddr+LANCE_DATA);
818
819 if (lance_debug > 2)
820 printk("%s: LANCE open after %d ticks, init block %#x csr0 %4.4x.\n",
821 dev->name, i, (u32) isa_virt_to_bus(&lp->init_block), inw(ioaddr+LANCE_DATA));
822
823 return 0;
824}
825
826
827
828
829
830
831
832
833
834
835
836
837
838static void
839lance_purge_ring(struct net_device *dev)
840{
841 struct lance_private *lp = dev->ml_priv;
842 int i;
843
844
845 for (i = 0; i < RX_RING_SIZE; i++) {
846 struct sk_buff *skb = lp->rx_skbuff[i];
847 lp->rx_skbuff[i] = NULL;
848 lp->rx_ring[i].base = 0;
849 if (skb)
850 dev_kfree_skb_any(skb);
851 }
852 for (i = 0; i < TX_RING_SIZE; i++) {
853 if (lp->tx_skbuff[i]) {
854 dev_kfree_skb_any(lp->tx_skbuff[i]);
855 lp->tx_skbuff[i] = NULL;
856 }
857 }
858}
859
860
861
862static void
863lance_init_ring(struct net_device *dev, gfp_t gfp)
864{
865 struct lance_private *lp = dev->ml_priv;
866 int i;
867
868 lp->cur_rx = lp->cur_tx = 0;
869 lp->dirty_rx = lp->dirty_tx = 0;
870
871 for (i = 0; i < RX_RING_SIZE; i++) {
872 struct sk_buff *skb;
873 void *rx_buff;
874
875 skb = alloc_skb(PKT_BUF_SZ, GFP_DMA | gfp);
876 lp->rx_skbuff[i] = skb;
877 if (skb) {
878 skb->dev = dev;
879 rx_buff = skb->data;
880 } else
881 rx_buff = kmalloc(PKT_BUF_SZ, GFP_DMA | gfp);
882 if (rx_buff == NULL)
883 lp->rx_ring[i].base = 0;
884 else
885 lp->rx_ring[i].base = (u32)isa_virt_to_bus(rx_buff) | 0x80000000;
886 lp->rx_ring[i].buf_length = -PKT_BUF_SZ;
887 }
888
889
890 for (i = 0; i < TX_RING_SIZE; i++) {
891 lp->tx_skbuff[i] = NULL;
892 lp->tx_ring[i].base = 0;
893 }
894
895 lp->init_block.mode = 0x0000;
896 for (i = 0; i < 6; i++)
897 lp->init_block.phys_addr[i] = dev->dev_addr[i];
898 lp->init_block.filter[0] = 0x00000000;
899 lp->init_block.filter[1] = 0x00000000;
900 lp->init_block.rx_ring = ((u32)isa_virt_to_bus(lp->rx_ring) & 0xffffff) | RX_RING_LEN_BITS;
901 lp->init_block.tx_ring = ((u32)isa_virt_to_bus(lp->tx_ring) & 0xffffff) | TX_RING_LEN_BITS;
902}
903
904static void
905lance_restart(struct net_device *dev, unsigned int csr0_bits, int must_reinit)
906{
907 struct lance_private *lp = dev->ml_priv;
908
909 if (must_reinit ||
910 (chip_table[lp->chip_version].flags & LANCE_MUST_REINIT_RING)) {
911 lance_purge_ring(dev);
912 lance_init_ring(dev, GFP_ATOMIC);
913 }
914 outw(0x0000, dev->base_addr + LANCE_ADDR);
915 outw(csr0_bits, dev->base_addr + LANCE_DATA);
916}
917
918
919static void lance_tx_timeout (struct net_device *dev)
920{
921 struct lance_private *lp = (struct lance_private *) dev->ml_priv;
922 int ioaddr = dev->base_addr;
923
924 outw (0, ioaddr + LANCE_ADDR);
925 printk ("%s: transmit timed out, status %4.4x, resetting.\n",
926 dev->name, inw (ioaddr + LANCE_DATA));
927 outw (0x0004, ioaddr + LANCE_DATA);
928 lp->stats.tx_errors++;
929#ifndef final_version
930 if (lance_debug > 3) {
931 int i;
932 printk (" Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.",
933 lp->dirty_tx, lp->cur_tx, netif_queue_stopped(dev) ? " (full)" : "",
934 lp->cur_rx);
935 for (i = 0; i < RX_RING_SIZE; i++)
936 printk ("%s %08x %04x %04x", i & 0x3 ? "" : "\n ",
937 lp->rx_ring[i].base, -lp->rx_ring[i].buf_length,
938 lp->rx_ring[i].msg_length);
939 for (i = 0; i < TX_RING_SIZE; i++)
940 printk ("%s %08x %04x %04x", i & 0x3 ? "" : "\n ",
941 lp->tx_ring[i].base, -lp->tx_ring[i].length,
942 lp->tx_ring[i].misc);
943 printk ("\n");
944 }
945#endif
946 lance_restart (dev, 0x0043, 1);
947
948 dev->trans_start = jiffies;
949 netif_wake_queue (dev);
950}
951
952
953static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
954 struct net_device *dev)
955{
956 struct lance_private *lp = dev->ml_priv;
957 int ioaddr = dev->base_addr;
958 int entry;
959 unsigned long flags;
960
961 spin_lock_irqsave(&lp->devlock, flags);
962
963 if (lance_debug > 3) {
964 outw(0x0000, ioaddr+LANCE_ADDR);
965 printk("%s: lance_start_xmit() called, csr0 %4.4x.\n", dev->name,
966 inw(ioaddr+LANCE_DATA));
967 outw(0x0000, ioaddr+LANCE_DATA);
968 }
969
970
971
972
973 entry = lp->cur_tx & TX_RING_MOD_MASK;
974
975
976
977
978
979 if (chip_table[lp->chip_version].flags & LANCE_MUST_PAD) {
980 if (skb->len < ETH_ZLEN) {
981 if (skb_padto(skb, ETH_ZLEN))
982 goto out;
983 lp->tx_ring[entry].length = -ETH_ZLEN;
984 }
985 else
986 lp->tx_ring[entry].length = -skb->len;
987 } else
988 lp->tx_ring[entry].length = -skb->len;
989
990 lp->tx_ring[entry].misc = 0x0000;
991
992 lp->stats.tx_bytes += skb->len;
993
994
995
996 if ((u32)isa_virt_to_bus(skb->data) + skb->len > 0x01000000) {
997 if (lance_debug > 5)
998 printk("%s: bouncing a high-memory packet (%#x).\n",
999 dev->name, (u32)isa_virt_to_bus(skb->data));
1000 skb_copy_from_linear_data(skb, &lp->tx_bounce_buffs[entry], skb->len);
1001 lp->tx_ring[entry].base =
1002 ((u32)isa_virt_to_bus((lp->tx_bounce_buffs + entry)) & 0xffffff) | 0x83000000;
1003 dev_kfree_skb(skb);
1004 } else {
1005 lp->tx_skbuff[entry] = skb;
1006 lp->tx_ring[entry].base = ((u32)isa_virt_to_bus(skb->data) & 0xffffff) | 0x83000000;
1007 }
1008 lp->cur_tx++;
1009
1010
1011 outw(0x0000, ioaddr+LANCE_ADDR);
1012 outw(0x0048, ioaddr+LANCE_DATA);
1013
1014 dev->trans_start = jiffies;
1015
1016 if ((lp->cur_tx - lp->dirty_tx) >= TX_RING_SIZE)
1017 netif_stop_queue(dev);
1018
1019out:
1020 spin_unlock_irqrestore(&lp->devlock, flags);
1021 return NETDEV_TX_OK;
1022}
1023
1024
1025static irqreturn_t lance_interrupt(int irq, void *dev_id)
1026{
1027 struct net_device *dev = dev_id;
1028 struct lance_private *lp;
1029 int csr0, ioaddr, boguscnt=10;
1030 int must_restart;
1031
1032 ioaddr = dev->base_addr;
1033 lp = dev->ml_priv;
1034
1035 spin_lock (&lp->devlock);
1036
1037 outw(0x00, dev->base_addr + LANCE_ADDR);
1038 while ((csr0 = inw(dev->base_addr + LANCE_DATA)) & 0x8600
1039 && --boguscnt >= 0) {
1040
1041 outw(csr0 & ~0x004f, dev->base_addr + LANCE_DATA);
1042
1043 must_restart = 0;
1044
1045 if (lance_debug > 5)
1046 printk("%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n",
1047 dev->name, csr0, inw(dev->base_addr + LANCE_DATA));
1048
1049 if (csr0 & 0x0400)
1050 lance_rx(dev);
1051
1052 if (csr0 & 0x0200) {
1053 int dirty_tx = lp->dirty_tx;
1054
1055 while (dirty_tx < lp->cur_tx) {
1056 int entry = dirty_tx & TX_RING_MOD_MASK;
1057 int status = lp->tx_ring[entry].base;
1058
1059 if (status < 0)
1060 break;
1061
1062 lp->tx_ring[entry].base = 0;
1063
1064 if (status & 0x40000000) {
1065
1066 int err_status = lp->tx_ring[entry].misc;
1067 lp->stats.tx_errors++;
1068 if (err_status & 0x0400) lp->stats.tx_aborted_errors++;
1069 if (err_status & 0x0800) lp->stats.tx_carrier_errors++;
1070 if (err_status & 0x1000) lp->stats.tx_window_errors++;
1071 if (err_status & 0x4000) {
1072
1073 lp->stats.tx_fifo_errors++;
1074
1075 printk("%s: Tx FIFO error! Status %4.4x.\n",
1076 dev->name, csr0);
1077
1078 must_restart = 1;
1079 }
1080 } else {
1081 if (status & 0x18000000)
1082 lp->stats.collisions++;
1083 lp->stats.tx_packets++;
1084 }
1085
1086
1087
1088 if (lp->tx_skbuff[entry]) {
1089 dev_kfree_skb_irq(lp->tx_skbuff[entry]);
1090 lp->tx_skbuff[entry] = NULL;
1091 }
1092 dirty_tx++;
1093 }
1094
1095#ifndef final_version
1096 if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) {
1097 printk("out-of-sync dirty pointer, %d vs. %d, full=%s.\n",
1098 dirty_tx, lp->cur_tx,
1099 netif_queue_stopped(dev) ? "yes" : "no");
1100 dirty_tx += TX_RING_SIZE;
1101 }
1102#endif
1103
1104
1105 if (netif_queue_stopped(dev) &&
1106 dirty_tx > lp->cur_tx - TX_RING_SIZE + 2)
1107 netif_wake_queue (dev);
1108
1109 lp->dirty_tx = dirty_tx;
1110 }
1111
1112
1113 if (csr0 & 0x4000) lp->stats.tx_errors++;
1114 if (csr0 & 0x1000) lp->stats.rx_errors++;
1115 if (csr0 & 0x0800) {
1116 printk("%s: Bus master arbitration failure, status %4.4x.\n",
1117 dev->name, csr0);
1118
1119 must_restart = 1;
1120 }
1121
1122 if (must_restart) {
1123
1124 outw(0x0000, dev->base_addr + LANCE_ADDR);
1125 outw(0x0004, dev->base_addr + LANCE_DATA);
1126 lance_restart(dev, 0x0002, 0);
1127 }
1128 }
1129
1130
1131 outw(0x0000, dev->base_addr + LANCE_ADDR);
1132 outw(0x7940, dev->base_addr + LANCE_DATA);
1133
1134 if (lance_debug > 4)
1135 printk("%s: exiting interrupt, csr%d=%#4.4x.\n",
1136 dev->name, inw(ioaddr + LANCE_ADDR),
1137 inw(dev->base_addr + LANCE_DATA));
1138
1139 spin_unlock (&lp->devlock);
1140 return IRQ_HANDLED;
1141}
1142
1143static int
1144lance_rx(struct net_device *dev)
1145{
1146 struct lance_private *lp = dev->ml_priv;
1147 int entry = lp->cur_rx & RX_RING_MOD_MASK;
1148 int i;
1149
1150
1151 while (lp->rx_ring[entry].base >= 0) {
1152 int status = lp->rx_ring[entry].base >> 24;
1153
1154 if (status != 0x03) {
1155
1156
1157
1158
1159 if (status & 0x01)
1160 lp->stats.rx_errors++;
1161 if (status & 0x20) lp->stats.rx_frame_errors++;
1162 if (status & 0x10) lp->stats.rx_over_errors++;
1163 if (status & 0x08) lp->stats.rx_crc_errors++;
1164 if (status & 0x04) lp->stats.rx_fifo_errors++;
1165 lp->rx_ring[entry].base &= 0x03ffffff;
1166 }
1167 else
1168 {
1169
1170 short pkt_len = (lp->rx_ring[entry].msg_length & 0xfff)-4;
1171 struct sk_buff *skb;
1172
1173 if(pkt_len<60)
1174 {
1175 printk("%s: Runt packet!\n",dev->name);
1176 lp->stats.rx_errors++;
1177 }
1178 else
1179 {
1180 skb = dev_alloc_skb(pkt_len+2);
1181 if (skb == NULL)
1182 {
1183 printk("%s: Memory squeeze, deferring packet.\n", dev->name);
1184 for (i=0; i < RX_RING_SIZE; i++)
1185 if (lp->rx_ring[(entry+i) & RX_RING_MOD_MASK].base < 0)
1186 break;
1187
1188 if (i > RX_RING_SIZE -2)
1189 {
1190 lp->stats.rx_dropped++;
1191 lp->rx_ring[entry].base |= 0x80000000;
1192 lp->cur_rx++;
1193 }
1194 break;
1195 }
1196 skb_reserve(skb,2);
1197 skb_put(skb,pkt_len);
1198 skb_copy_to_linear_data(skb,
1199 (unsigned char *)isa_bus_to_virt((lp->rx_ring[entry].base & 0x00ffffff)),
1200 pkt_len);
1201 skb->protocol=eth_type_trans(skb,dev);
1202 netif_rx(skb);
1203 lp->stats.rx_packets++;
1204 lp->stats.rx_bytes+=pkt_len;
1205 }
1206 }
1207
1208
1209 lp->rx_ring[entry].buf_length = -PKT_BUF_SZ;
1210 lp->rx_ring[entry].base |= 0x80000000;
1211 entry = (++lp->cur_rx) & RX_RING_MOD_MASK;
1212 }
1213
1214
1215
1216
1217 return 0;
1218}
1219
1220static int
1221lance_close(struct net_device *dev)
1222{
1223 int ioaddr = dev->base_addr;
1224 struct lance_private *lp = dev->ml_priv;
1225
1226 netif_stop_queue (dev);
1227
1228 if (chip_table[lp->chip_version].flags & LANCE_HAS_MISSED_FRAME) {
1229 outw(112, ioaddr+LANCE_ADDR);
1230 lp->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA);
1231 }
1232 outw(0, ioaddr+LANCE_ADDR);
1233
1234 if (lance_debug > 1)
1235 printk("%s: Shutting down ethercard, status was %2.2x.\n",
1236 dev->name, inw(ioaddr+LANCE_DATA));
1237
1238
1239
1240 outw(0x0004, ioaddr+LANCE_DATA);
1241
1242 if (dev->dma != 4)
1243 {
1244 unsigned long flags=claim_dma_lock();
1245 disable_dma(dev->dma);
1246 release_dma_lock(flags);
1247 }
1248 free_irq(dev->irq, dev);
1249
1250 lance_purge_ring(dev);
1251
1252 return 0;
1253}
1254
1255static struct net_device_stats *lance_get_stats(struct net_device *dev)
1256{
1257 struct lance_private *lp = dev->ml_priv;
1258
1259 if (chip_table[lp->chip_version].flags & LANCE_HAS_MISSED_FRAME) {
1260 short ioaddr = dev->base_addr;
1261 short saved_addr;
1262 unsigned long flags;
1263
1264 spin_lock_irqsave(&lp->devlock, flags);
1265 saved_addr = inw(ioaddr+LANCE_ADDR);
1266 outw(112, ioaddr+LANCE_ADDR);
1267 lp->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA);
1268 outw(saved_addr, ioaddr+LANCE_ADDR);
1269 spin_unlock_irqrestore(&lp->devlock, flags);
1270 }
1271
1272 return &lp->stats;
1273}
1274
1275
1276
1277
1278static void set_multicast_list(struct net_device *dev)
1279{
1280 short ioaddr = dev->base_addr;
1281
1282 outw(0, ioaddr+LANCE_ADDR);
1283 outw(0x0004, ioaddr+LANCE_DATA);
1284
1285 if (dev->flags&IFF_PROMISC) {
1286 outw(15, ioaddr+LANCE_ADDR);
1287 outw(0x8000, ioaddr+LANCE_DATA);
1288 } else {
1289 short multicast_table[4];
1290 int i;
1291 int num_addrs=dev->mc_count;
1292 if(dev->flags&IFF_ALLMULTI)
1293 num_addrs=1;
1294
1295 memset(multicast_table, (num_addrs == 0) ? 0 : -1, sizeof(multicast_table));
1296 for (i = 0; i < 4; i++) {
1297 outw(8 + i, ioaddr+LANCE_ADDR);
1298 outw(multicast_table[i], ioaddr+LANCE_DATA);
1299 }
1300 outw(15, ioaddr+LANCE_ADDR);
1301 outw(0x0000, ioaddr+LANCE_DATA);
1302 }
1303
1304 lance_restart(dev, 0x0142, 0);
1305
1306}
1307
1308