1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62#include <linux/kernel.h>
63#include <linux/string.h>
64#include <linux/errno.h>
65#include <linux/ioport.h>
66#include <linux/slab.h>
67#include <linux/interrupt.h>
68#include <linux/delay.h>
69#include <linux/init.h>
70#include <linux/netdevice.h>
71#include <linux/etherdevice.h>
72#include <linux/skbuff.h>
73#include <linux/module.h>
74#include <linux/bitops.h>
75
76#include <asm/io.h>
77#include <asm/dma.h>
78
79#include "ni65.h"
80
81
82
83
84
85
86
87
88#undef XMT_VIA_SKB
89#undef RCV_VIA_SKB
90#define RCV_PARANOIA_CHECK
91
92#define MID_PERFORMANCE
93
94#if defined( LOW_PERFORMANCE )
95 static int isa0=7,isa1=7,csr80=0x0c10;
96#elif defined( MID_PERFORMANCE )
97 static int isa0=5,isa1=5,csr80=0x2810;
98#else
99 static int isa0=4,isa1=4,csr80=0x0017;
100#endif
101
102
103
104
105#define NI65_ID0 0x00
106#define NI65_ID1 0x55
107#define NI65_EB_ID0 0x52
108#define NI65_EB_ID1 0x44
109#define NE2100_ID0 0x57
110#define NE2100_ID1 0x57
111
112#define PORT p->cmdr_addr
113
114
115
116
117#if 1
118#define RMDNUM 16
119#define RMDNUMMASK 0x80000000
120#else
121#define RMDNUM 8
122#define RMDNUMMASK 0x60000000
123#endif
124
125#if 0
126#define TMDNUM 1
127#define TMDNUMMASK 0x00000000
128#else
129#define TMDNUM 4
130#define TMDNUMMASK 0x40000000
131#endif
132
133
134#define R_BUF_SIZE 1544
135#define T_BUF_SIZE 1544
136
137
138
139
140#define L_DATAREG 0x00
141#define L_ADDRREG 0x02
142#define L_RESET 0x04
143#define L_CONFIG 0x05
144#define L_BUSIF 0x06
145
146
147
148
149
150#define CSR0 0x00
151#define CSR1 0x01
152#define CSR2 0x02
153#define CSR3 0x03
154
155#define INIT_RING_BEFORE_START 0x1
156#define FULL_RESET_ON_ERROR 0x2
157
158#if 0
159#define writereg(val,reg) {outw(reg,PORT+L_ADDRREG);inw(PORT+L_ADDRREG); \
160 outw(val,PORT+L_DATAREG);inw(PORT+L_DATAREG);}
161#define readreg(reg) (outw(reg,PORT+L_ADDRREG),inw(PORT+L_ADDRREG),\
162 inw(PORT+L_DATAREG))
163#if 0
164#define writedatareg(val) {outw(val,PORT+L_DATAREG);inw(PORT+L_DATAREG);}
165#else
166#define writedatareg(val) { writereg(val,CSR0); }
167#endif
168#else
169#define writereg(val,reg) {outw(reg,PORT+L_ADDRREG);outw(val,PORT+L_DATAREG);}
170#define readreg(reg) (outw(reg,PORT+L_ADDRREG),inw(PORT+L_DATAREG))
171#define writedatareg(val) { writereg(val,CSR0); }
172#endif
173
174static unsigned char ni_vendor[] = { 0x02,0x07,0x01 };
175
176static struct card {
177 unsigned char id0,id1;
178 short id_offset;
179 short total_size;
180 short cmd_offset;
181 short addr_offset;
182 unsigned char *vendor_id;
183 char *cardname;
184 unsigned long config;
185} cards[] = {
186 {
187 .id0 = NI65_ID0,
188 .id1 = NI65_ID1,
189 .id_offset = 0x0e,
190 .total_size = 0x10,
191 .cmd_offset = 0x0,
192 .addr_offset = 0x8,
193 .vendor_id = ni_vendor,
194 .cardname = "ni6510",
195 .config = 0x1,
196 },
197 {
198 .id0 = NI65_EB_ID0,
199 .id1 = NI65_EB_ID1,
200 .id_offset = 0x0e,
201 .total_size = 0x18,
202 .cmd_offset = 0x10,
203 .addr_offset = 0x0,
204 .vendor_id = ni_vendor,
205 .cardname = "ni6510 EtherBlaster",
206 .config = 0x2,
207 },
208 {
209 .id0 = NE2100_ID0,
210 .id1 = NE2100_ID1,
211 .id_offset = 0x0e,
212 .total_size = 0x18,
213 .cmd_offset = 0x10,
214 .addr_offset = 0x0,
215 .vendor_id = NULL,
216 .cardname = "generic NE2100",
217 .config = 0x0,
218 },
219};
220#define NUM_CARDS 3
221
222struct priv
223{
224 struct rmd rmdhead[RMDNUM];
225 struct tmd tmdhead[TMDNUM];
226 struct init_block ib;
227 int rmdnum;
228 int tmdnum,tmdlast;
229#ifdef RCV_VIA_SKB
230 struct sk_buff *recv_skb[RMDNUM];
231#else
232 void *recvbounce[RMDNUM];
233#endif
234#ifdef XMT_VIA_SKB
235 struct sk_buff *tmd_skb[TMDNUM];
236#endif
237 void *tmdbounce[TMDNUM];
238 int tmdbouncenum;
239 int lock,xmit_queued;
240
241 void *self;
242 int cmdr_addr;
243 int cardno;
244 int features;
245 spinlock_t ring_lock;
246};
247
248static int ni65_probe1(struct net_device *dev,int);
249static irqreturn_t ni65_interrupt(int irq, void * dev_id);
250static void ni65_recv_intr(struct net_device *dev,int);
251static void ni65_xmit_intr(struct net_device *dev,int);
252static int ni65_open(struct net_device *dev);
253static int ni65_lance_reinit(struct net_device *dev);
254static void ni65_init_lance(struct priv *p,unsigned char*,int,int);
255static netdev_tx_t ni65_send_packet(struct sk_buff *skb,
256 struct net_device *dev);
257static void ni65_timeout(struct net_device *dev);
258static int ni65_close(struct net_device *dev);
259static int ni65_alloc_buffer(struct net_device *dev);
260static void ni65_free_buffer(struct priv *p);
261static void set_multicast_list(struct net_device *dev);
262
263static int irqtab[] __initdata = { 9,12,15,5 };
264static int dmatab[] __initdata = { 0,3,5,6,7 };
265
266static int debuglevel = 1;
267
268
269
270
271static void ni65_set_performance(struct priv *p)
272{
273 writereg(CSR0_STOP | CSR0_CLRALL,CSR0);
274
275 if( !(cards[p->cardno].config & 0x02) )
276 return;
277
278 outw(80,PORT+L_ADDRREG);
279 if(inw(PORT+L_ADDRREG) != 80)
280 return;
281
282 writereg( (csr80 & 0x3fff) ,80);
283 outw(0,PORT+L_ADDRREG);
284 outw((short)isa0,PORT+L_BUSIF);
285 outw(1,PORT+L_ADDRREG);
286 outw((short)isa1,PORT+L_BUSIF);
287
288 outw(CSR0,PORT+L_ADDRREG);
289}
290
291
292
293
294static int ni65_open(struct net_device *dev)
295{
296 struct priv *p = dev->ml_priv;
297 int irqval = request_irq(dev->irq, ni65_interrupt,0,
298 cards[p->cardno].cardname,dev);
299 if (irqval) {
300 printk(KERN_ERR "%s: unable to get IRQ %d (irqval=%d).\n",
301 dev->name,dev->irq, irqval);
302 return -EAGAIN;
303 }
304
305 if(ni65_lance_reinit(dev))
306 {
307 netif_start_queue(dev);
308 return 0;
309 }
310 else
311 {
312 free_irq(dev->irq,dev);
313 return -EAGAIN;
314 }
315}
316
317
318
319
320static int ni65_close(struct net_device *dev)
321{
322 struct priv *p = dev->ml_priv;
323
324 netif_stop_queue(dev);
325
326 outw(inw(PORT+L_RESET),PORT+L_RESET);
327
328#ifdef XMT_VIA_SKB
329 {
330 int i;
331 for(i=0;i<TMDNUM;i++)
332 {
333 if(p->tmd_skb[i]) {
334 dev_kfree_skb(p->tmd_skb[i]);
335 p->tmd_skb[i] = NULL;
336 }
337 }
338 }
339#endif
340 free_irq(dev->irq,dev);
341 return 0;
342}
343
344static void cleanup_card(struct net_device *dev)
345{
346 struct priv *p = dev->ml_priv;
347 disable_dma(dev->dma);
348 free_dma(dev->dma);
349 release_region(dev->base_addr, cards[p->cardno].total_size);
350 ni65_free_buffer(p);
351}
352
353
354static int irq;
355static int io;
356static int dma;
357
358
359
360
361struct net_device * __init ni65_probe(int unit)
362{
363 struct net_device *dev = alloc_etherdev(0);
364 static const int ports[] = { 0x360, 0x300, 0x320, 0x340, 0 };
365 const int *port;
366 int err = 0;
367
368 if (!dev)
369 return ERR_PTR(-ENOMEM);
370
371 if (unit >= 0) {
372 sprintf(dev->name, "eth%d", unit);
373 netdev_boot_setup_check(dev);
374 irq = dev->irq;
375 dma = dev->dma;
376 } else {
377 dev->base_addr = io;
378 }
379
380 if (dev->base_addr > 0x1ff) {
381 err = ni65_probe1(dev, dev->base_addr);
382 } else if (dev->base_addr > 0) {
383 err = -ENXIO;
384 } else {
385 for (port = ports; *port && ni65_probe1(dev, *port); port++)
386 ;
387 if (!*port)
388 err = -ENODEV;
389 }
390 if (err)
391 goto out;
392
393 err = register_netdev(dev);
394 if (err)
395 goto out1;
396 return dev;
397out1:
398 cleanup_card(dev);
399out:
400 free_netdev(dev);
401 return ERR_PTR(err);
402}
403
404static const struct net_device_ops ni65_netdev_ops = {
405 .ndo_open = ni65_open,
406 .ndo_stop = ni65_close,
407 .ndo_start_xmit = ni65_send_packet,
408 .ndo_tx_timeout = ni65_timeout,
409 .ndo_set_rx_mode = set_multicast_list,
410 .ndo_change_mtu = eth_change_mtu,
411 .ndo_set_mac_address = eth_mac_addr,
412 .ndo_validate_addr = eth_validate_addr,
413};
414
415
416
417
418static int __init ni65_probe1(struct net_device *dev,int ioaddr)
419{
420 int i,j;
421 struct priv *p;
422 unsigned long flags;
423
424 dev->irq = irq;
425 dev->dma = dma;
426
427 for(i=0;i<NUM_CARDS;i++) {
428 if(!request_region(ioaddr, cards[i].total_size, cards[i].cardname))
429 continue;
430 if(cards[i].id_offset >= 0) {
431 if(inb(ioaddr+cards[i].id_offset+0) != cards[i].id0 ||
432 inb(ioaddr+cards[i].id_offset+1) != cards[i].id1) {
433 release_region(ioaddr, cards[i].total_size);
434 continue;
435 }
436 }
437 if(cards[i].vendor_id) {
438 for(j=0;j<3;j++)
439 if(inb(ioaddr+cards[i].addr_offset+j) != cards[i].vendor_id[j]) {
440 release_region(ioaddr, cards[i].total_size);
441 continue;
442 }
443 }
444 break;
445 }
446 if(i == NUM_CARDS)
447 return -ENODEV;
448
449 for(j=0;j<6;j++)
450 dev->dev_addr[j] = inb(ioaddr+cards[i].addr_offset+j);
451
452 if( (j=ni65_alloc_buffer(dev)) < 0) {
453 release_region(ioaddr, cards[i].total_size);
454 return j;
455 }
456 p = dev->ml_priv;
457 p->cmdr_addr = ioaddr + cards[i].cmd_offset;
458 p->cardno = i;
459 spin_lock_init(&p->ring_lock);
460
461 printk(KERN_INFO "%s: %s found at %#3x, ", dev->name, cards[p->cardno].cardname , ioaddr);
462
463 outw(inw(PORT+L_RESET),PORT+L_RESET);
464 if( (j=readreg(CSR0)) != 0x4) {
465 printk("failed.\n");
466 printk(KERN_ERR "%s: Can't RESET card: %04x\n", dev->name, j);
467 ni65_free_buffer(p);
468 release_region(ioaddr, cards[p->cardno].total_size);
469 return -EAGAIN;
470 }
471
472 outw(88,PORT+L_ADDRREG);
473 if(inw(PORT+L_ADDRREG) == 88) {
474 unsigned long v;
475 v = inw(PORT+L_DATAREG);
476 v <<= 16;
477 outw(89,PORT+L_ADDRREG);
478 v |= inw(PORT+L_DATAREG);
479 printk("Version %#08lx, ",v);
480 p->features = INIT_RING_BEFORE_START;
481 }
482 else {
483 printk("ancient LANCE, ");
484 p->features = 0x0;
485 }
486
487 if(test_bit(0,&cards[i].config)) {
488 dev->irq = irqtab[(inw(ioaddr+L_CONFIG)>>2)&3];
489 dev->dma = dmatab[inw(ioaddr+L_CONFIG)&3];
490 printk("IRQ %d (from card), DMA %d (from card).\n",dev->irq,dev->dma);
491 }
492 else {
493 if(dev->dma == 0) {
494
495 unsigned long dma_channels =
496 ((inb(DMA1_STAT_REG) >> 4) & 0x0f)
497 | (inb(DMA2_STAT_REG) & 0xf0);
498 for(i=1;i<5;i++) {
499 int dma = dmatab[i];
500 if(test_bit(dma,&dma_channels) || request_dma(dma,"ni6510"))
501 continue;
502
503 flags=claim_dma_lock();
504 disable_dma(dma);
505 set_dma_mode(dma,DMA_MODE_CASCADE);
506 enable_dma(dma);
507 release_dma_lock(flags);
508
509 ni65_init_lance(p,dev->dev_addr,0,0);
510
511 flags=claim_dma_lock();
512 disable_dma(dma);
513 free_dma(dma);
514 release_dma_lock(flags);
515
516 if(readreg(CSR0) & CSR0_IDON)
517 break;
518 }
519 if(i == 5) {
520 printk("failed.\n");
521 printk(KERN_ERR "%s: Can't detect DMA channel!\n", dev->name);
522 ni65_free_buffer(p);
523 release_region(ioaddr, cards[p->cardno].total_size);
524 return -EAGAIN;
525 }
526 dev->dma = dmatab[i];
527 printk("DMA %d (autodetected), ",dev->dma);
528 }
529 else
530 printk("DMA %d (assigned), ",dev->dma);
531
532 if(dev->irq < 2)
533 {
534 unsigned long irq_mask;
535
536 ni65_init_lance(p,dev->dev_addr,0,0);
537 irq_mask = probe_irq_on();
538 writereg(CSR0_INIT|CSR0_INEA,CSR0);
539 msleep(20);
540 dev->irq = probe_irq_off(irq_mask);
541 if(!dev->irq)
542 {
543 printk("Failed to detect IRQ line!\n");
544 ni65_free_buffer(p);
545 release_region(ioaddr, cards[p->cardno].total_size);
546 return -EAGAIN;
547 }
548 printk("IRQ %d (autodetected).\n",dev->irq);
549 }
550 else
551 printk("IRQ %d (assigned).\n",dev->irq);
552 }
553
554 if(request_dma(dev->dma, cards[p->cardno].cardname ) != 0)
555 {
556 printk(KERN_ERR "%s: Can't request dma-channel %d\n",dev->name,(int) dev->dma);
557 ni65_free_buffer(p);
558 release_region(ioaddr, cards[p->cardno].total_size);
559 return -EAGAIN;
560 }
561
562 dev->base_addr = ioaddr;
563 dev->netdev_ops = &ni65_netdev_ops;
564 dev->watchdog_timeo = HZ/2;
565
566 return 0;
567}
568
569
570
571
572static void ni65_init_lance(struct priv *p,unsigned char *daddr,int filter,int mode)
573{
574 int i;
575 u32 pib;
576
577 writereg(CSR0_CLRALL|CSR0_STOP,CSR0);
578
579 for(i=0;i<6;i++)
580 p->ib.eaddr[i] = daddr[i];
581
582 for(i=0;i<8;i++)
583 p->ib.filter[i] = filter;
584 p->ib.mode = mode;
585
586 p->ib.trp = (u32) isa_virt_to_bus(p->tmdhead) | TMDNUMMASK;
587 p->ib.rrp = (u32) isa_virt_to_bus(p->rmdhead) | RMDNUMMASK;
588 writereg(0,CSR3);
589 pib = (u32) isa_virt_to_bus(&p->ib);
590 writereg(pib & 0xffff,CSR1);
591 writereg(pib >> 16,CSR2);
592
593 writereg(CSR0_INIT,CSR0);
594
595 for(i=0;i<32;i++)
596 {
597 mdelay(4);
598 if(inw(PORT+L_DATAREG) & (CSR0_IDON | CSR0_MERR) )
599 break;
600 }
601}
602
603
604
605
606static void *ni65_alloc_mem(struct net_device *dev,char *what,int size,int type)
607{
608 struct sk_buff *skb=NULL;
609 unsigned char *ptr;
610 void *ret;
611
612 if(type) {
613 ret = skb = alloc_skb(2+16+size,GFP_KERNEL|GFP_DMA);
614 if(!skb) {
615 printk(KERN_WARNING "%s: unable to allocate %s memory.\n",dev->name,what);
616 return NULL;
617 }
618 skb_reserve(skb,2+16);
619 skb_put(skb,R_BUF_SIZE);
620 ptr = skb->data;
621 }
622 else {
623 ret = ptr = kmalloc(T_BUF_SIZE,GFP_KERNEL | GFP_DMA);
624 if(!ret)
625 return NULL;
626 }
627 if( (u32) virt_to_phys(ptr+size) > 0x1000000) {
628 printk(KERN_WARNING "%s: unable to allocate %s memory in lower 16MB!\n",dev->name,what);
629 if(type)
630 kfree_skb(skb);
631 else
632 kfree(ptr);
633 return NULL;
634 }
635 return ret;
636}
637
638
639
640
641static int ni65_alloc_buffer(struct net_device *dev)
642{
643 unsigned char *ptr;
644 struct priv *p;
645 int i;
646
647
648
649
650 ptr = ni65_alloc_mem(dev,"BUFFER",sizeof(struct priv)+8,0);
651 if(!ptr)
652 return -ENOMEM;
653
654 p = dev->ml_priv = (struct priv *) (((unsigned long) ptr + 7) & ~0x7);
655 memset((char *)p, 0, sizeof(struct priv));
656 p->self = ptr;
657
658 for(i=0;i<TMDNUM;i++)
659 {
660#ifdef XMT_VIA_SKB
661 p->tmd_skb[i] = NULL;
662#endif
663 p->tmdbounce[i] = ni65_alloc_mem(dev,"XMIT",T_BUF_SIZE,0);
664 if(!p->tmdbounce[i]) {
665 ni65_free_buffer(p);
666 return -ENOMEM;
667 }
668 }
669
670 for(i=0;i<RMDNUM;i++)
671 {
672#ifdef RCV_VIA_SKB
673 p->recv_skb[i] = ni65_alloc_mem(dev,"RECV",R_BUF_SIZE,1);
674 if(!p->recv_skb[i]) {
675 ni65_free_buffer(p);
676 return -ENOMEM;
677 }
678#else
679 p->recvbounce[i] = ni65_alloc_mem(dev,"RECV",R_BUF_SIZE,0);
680 if(!p->recvbounce[i]) {
681 ni65_free_buffer(p);
682 return -ENOMEM;
683 }
684#endif
685 }
686
687 return 0;
688}
689
690
691
692
693static void ni65_free_buffer(struct priv *p)
694{
695 int i;
696
697 if(!p)
698 return;
699
700 for(i=0;i<TMDNUM;i++) {
701 kfree(p->tmdbounce[i]);
702#ifdef XMT_VIA_SKB
703 if(p->tmd_skb[i])
704 dev_kfree_skb(p->tmd_skb[i]);
705#endif
706 }
707
708 for(i=0;i<RMDNUM;i++)
709 {
710#ifdef RCV_VIA_SKB
711 if(p->recv_skb[i])
712 dev_kfree_skb(p->recv_skb[i]);
713#else
714 kfree(p->recvbounce[i]);
715#endif
716 }
717 kfree(p->self);
718}
719
720
721
722
723
724static void ni65_stop_start(struct net_device *dev,struct priv *p)
725{
726 int csr0 = CSR0_INEA;
727
728 writedatareg(CSR0_STOP);
729
730 if(debuglevel > 1)
731 printk(KERN_DEBUG "ni65_stop_start\n");
732
733 if(p->features & INIT_RING_BEFORE_START) {
734 int i;
735#ifdef XMT_VIA_SKB
736 struct sk_buff *skb_save[TMDNUM];
737#endif
738 unsigned long buffer[TMDNUM];
739 short blen[TMDNUM];
740
741 if(p->xmit_queued) {
742 while(1) {
743 if((p->tmdhead[p->tmdlast].u.s.status & XMIT_OWN))
744 break;
745 p->tmdlast = (p->tmdlast + 1) & (TMDNUM-1);
746 if(p->tmdlast == p->tmdnum)
747 break;
748 }
749 }
750
751 for(i=0;i<TMDNUM;i++) {
752 struct tmd *tmdp = p->tmdhead + i;
753#ifdef XMT_VIA_SKB
754 skb_save[i] = p->tmd_skb[i];
755#endif
756 buffer[i] = (u32) isa_bus_to_virt(tmdp->u.buffer);
757 blen[i] = tmdp->blen;
758 tmdp->u.s.status = 0x0;
759 }
760
761 for(i=0;i<RMDNUM;i++) {
762 struct rmd *rmdp = p->rmdhead + i;
763 rmdp->u.s.status = RCV_OWN;
764 }
765 p->tmdnum = p->xmit_queued = 0;
766 writedatareg(CSR0_STRT | csr0);
767
768 for(i=0;i<TMDNUM;i++) {
769 int num = (i + p->tmdlast) & (TMDNUM-1);
770 p->tmdhead[i].u.buffer = (u32) isa_virt_to_bus((char *)buffer[num]);
771 p->tmdhead[i].blen = blen[num];
772 if(p->tmdhead[i].u.s.status & XMIT_OWN) {
773 p->tmdnum = (p->tmdnum + 1) & (TMDNUM-1);
774 p->xmit_queued = 1;
775 writedatareg(CSR0_TDMD | CSR0_INEA | csr0);
776 }
777#ifdef XMT_VIA_SKB
778 p->tmd_skb[i] = skb_save[num];
779#endif
780 }
781 p->rmdnum = p->tmdlast = 0;
782 if(!p->lock)
783 if (p->tmdnum || !p->xmit_queued)
784 netif_wake_queue(dev);
785 netif_trans_update(dev);
786 }
787 else
788 writedatareg(CSR0_STRT | csr0);
789}
790
791
792
793
794static int ni65_lance_reinit(struct net_device *dev)
795{
796 int i;
797 struct priv *p = dev->ml_priv;
798 unsigned long flags;
799
800 p->lock = 0;
801 p->xmit_queued = 0;
802
803 flags=claim_dma_lock();
804 disable_dma(dev->dma);
805 set_dma_mode(dev->dma,DMA_MODE_CASCADE);
806 enable_dma(dev->dma);
807 release_dma_lock(flags);
808
809 outw(inw(PORT+L_RESET),PORT+L_RESET);
810 if( (i=readreg(CSR0) ) != 0x4)
811 {
812 printk(KERN_ERR "%s: can't RESET %s card: %04x\n",dev->name,
813 cards[p->cardno].cardname,(int) i);
814 flags=claim_dma_lock();
815 disable_dma(dev->dma);
816 release_dma_lock(flags);
817 return 0;
818 }
819
820 p->rmdnum = p->tmdnum = p->tmdlast = p->tmdbouncenum = 0;
821 for(i=0;i<TMDNUM;i++)
822 {
823 struct tmd *tmdp = p->tmdhead + i;
824#ifdef XMT_VIA_SKB
825 if(p->tmd_skb[i]) {
826 dev_kfree_skb(p->tmd_skb[i]);
827 p->tmd_skb[i] = NULL;
828 }
829#endif
830 tmdp->u.buffer = 0x0;
831 tmdp->u.s.status = XMIT_START | XMIT_END;
832 tmdp->blen = tmdp->status2 = 0;
833 }
834
835 for(i=0;i<RMDNUM;i++)
836 {
837 struct rmd *rmdp = p->rmdhead + i;
838#ifdef RCV_VIA_SKB
839 rmdp->u.buffer = (u32) isa_virt_to_bus(p->recv_skb[i]->data);
840#else
841 rmdp->u.buffer = (u32) isa_virt_to_bus(p->recvbounce[i]);
842#endif
843 rmdp->blen = -(R_BUF_SIZE-8);
844 rmdp->mlen = 0;
845 rmdp->u.s.status = RCV_OWN;
846 }
847
848 if(dev->flags & IFF_PROMISC)
849 ni65_init_lance(p,dev->dev_addr,0x00,M_PROM);
850 else if (netdev_mc_count(dev) || dev->flags & IFF_ALLMULTI)
851 ni65_init_lance(p,dev->dev_addr,0xff,0x0);
852 else
853 ni65_init_lance(p,dev->dev_addr,0x00,0x00);
854
855
856
857
858
859
860 if(inw(PORT+L_DATAREG) & CSR0_IDON) {
861 ni65_set_performance(p);
862
863 writedatareg(CSR0_CLRALL | CSR0_INEA | CSR0_STRT);
864 return 1;
865 }
866 printk(KERN_ERR "%s: can't init lance, status: %04x\n",dev->name,(int) inw(PORT+L_DATAREG));
867 flags=claim_dma_lock();
868 disable_dma(dev->dma);
869 release_dma_lock(flags);
870 return 0;
871}
872
873
874
875
876static irqreturn_t ni65_interrupt(int irq, void * dev_id)
877{
878 int csr0 = 0;
879 struct net_device *dev = dev_id;
880 struct priv *p;
881 int bcnt = 32;
882
883 p = dev->ml_priv;
884
885 spin_lock(&p->ring_lock);
886
887 while(--bcnt) {
888 csr0 = inw(PORT+L_DATAREG);
889
890#if 0
891 writedatareg( (csr0 & CSR0_CLRALL) );
892#else
893 writedatareg( (csr0 & CSR0_CLRALL) | CSR0_INEA );
894#endif
895
896 if(!(csr0 & (CSR0_ERR | CSR0_RINT | CSR0_TINT)))
897 break;
898
899 if(csr0 & CSR0_RINT)
900 ni65_recv_intr(dev,csr0);
901 if(csr0 & CSR0_TINT)
902 ni65_xmit_intr(dev,csr0);
903
904 if(csr0 & CSR0_ERR)
905 {
906 if(debuglevel > 1)
907 printk(KERN_ERR "%s: general error: %04x.\n",dev->name,csr0);
908 if(csr0 & CSR0_BABL)
909 dev->stats.tx_errors++;
910 if(csr0 & CSR0_MISS) {
911 int i;
912 for(i=0;i<RMDNUM;i++)
913 printk("%02x ",p->rmdhead[i].u.s.status);
914 printk("\n");
915 dev->stats.rx_errors++;
916 }
917 if(csr0 & CSR0_MERR) {
918 if(debuglevel > 1)
919 printk(KERN_ERR "%s: Ooops .. memory error: %04x.\n",dev->name,csr0);
920 ni65_stop_start(dev,p);
921 }
922 }
923 }
924
925#ifdef RCV_PARANOIA_CHECK
926{
927 int j;
928 for(j=0;j<RMDNUM;j++)
929 {
930 int i, num2;
931 for(i=RMDNUM-1;i>0;i--) {
932 num2 = (p->rmdnum + i) & (RMDNUM-1);
933 if(!(p->rmdhead[num2].u.s.status & RCV_OWN))
934 break;
935 }
936
937 if(i) {
938 int k, num1;
939 for(k=0;k<RMDNUM;k++) {
940 num1 = (p->rmdnum + k) & (RMDNUM-1);
941 if(!(p->rmdhead[num1].u.s.status & RCV_OWN))
942 break;
943 }
944 if(!k)
945 break;
946
947 if(debuglevel > 0)
948 {
949 char buf[256],*buf1;
950 buf1 = buf;
951 for(k=0;k<RMDNUM;k++) {
952 sprintf(buf1,"%02x ",(p->rmdhead[k].u.s.status));
953 buf1 += 3;
954 }
955 *buf1 = 0;
956 printk(KERN_ERR "%s: Ooops, receive ring corrupted %2d %2d | %s\n",dev->name,p->rmdnum,i,buf);
957 }
958
959 p->rmdnum = num1;
960 ni65_recv_intr(dev,csr0);
961 if((p->rmdhead[num2].u.s.status & RCV_OWN))
962 break;
963 }
964 else
965 break;
966 }
967}
968#endif
969
970 if( (csr0 & (CSR0_RXON | CSR0_TXON)) != (CSR0_RXON | CSR0_TXON) ) {
971 printk(KERN_DEBUG "%s: RX or TX was offline -> restart\n",dev->name);
972 ni65_stop_start(dev,p);
973 }
974 else
975 writedatareg(CSR0_INEA);
976
977 spin_unlock(&p->ring_lock);
978 return IRQ_HANDLED;
979}
980
981
982
983
984
985static void ni65_xmit_intr(struct net_device *dev,int csr0)
986{
987 struct priv *p = dev->ml_priv;
988
989 while(p->xmit_queued)
990 {
991 struct tmd *tmdp = p->tmdhead + p->tmdlast;
992 int tmdstat = tmdp->u.s.status;
993
994 if(tmdstat & XMIT_OWN)
995 break;
996
997 if(tmdstat & XMIT_ERR)
998 {
999#if 0
1000 if(tmdp->status2 & XMIT_TDRMASK && debuglevel > 3)
1001 printk(KERN_ERR "%s: tdr-problems (e.g. no resistor)\n",dev->name);
1002#endif
1003
1004 if(tmdp->status2 & XMIT_RTRY)
1005 dev->stats.tx_aborted_errors++;
1006 if(tmdp->status2 & XMIT_LCAR)
1007 dev->stats.tx_carrier_errors++;
1008 if(tmdp->status2 & (XMIT_BUFF | XMIT_UFLO )) {
1009
1010 dev->stats.tx_fifo_errors++;
1011 if(debuglevel > 0)
1012 printk(KERN_ERR "%s: Xmit FIFO/BUFF error\n",dev->name);
1013 if(p->features & INIT_RING_BEFORE_START) {
1014 tmdp->u.s.status = XMIT_OWN | XMIT_START | XMIT_END;
1015 ni65_stop_start(dev,p);
1016 break;
1017 }
1018 else
1019 ni65_stop_start(dev,p);
1020 }
1021 if(debuglevel > 2)
1022 printk(KERN_ERR "%s: xmit-error: %04x %02x-%04x\n",dev->name,csr0,(int) tmdstat,(int) tmdp->status2);
1023 if(!(csr0 & CSR0_BABL))
1024 dev->stats.tx_errors++;
1025 tmdp->status2 = 0;
1026 }
1027 else {
1028 dev->stats.tx_bytes -= (short)(tmdp->blen);
1029 dev->stats.tx_packets++;
1030 }
1031
1032#ifdef XMT_VIA_SKB
1033 if(p->tmd_skb[p->tmdlast]) {
1034 dev_kfree_skb_irq(p->tmd_skb[p->tmdlast]);
1035 p->tmd_skb[p->tmdlast] = NULL;
1036 }
1037#endif
1038
1039 p->tmdlast = (p->tmdlast + 1) & (TMDNUM-1);
1040 if(p->tmdlast == p->tmdnum)
1041 p->xmit_queued = 0;
1042 }
1043 netif_wake_queue(dev);
1044}
1045
1046
1047
1048
1049static void ni65_recv_intr(struct net_device *dev,int csr0)
1050{
1051 struct rmd *rmdp;
1052 int rmdstat,len;
1053 int cnt=0;
1054 struct priv *p = dev->ml_priv;
1055
1056 rmdp = p->rmdhead + p->rmdnum;
1057 while(!( (rmdstat = rmdp->u.s.status) & RCV_OWN))
1058 {
1059 cnt++;
1060 if( (rmdstat & (RCV_START | RCV_END | RCV_ERR)) != (RCV_START | RCV_END) )
1061 {
1062 if(!(rmdstat & RCV_ERR)) {
1063 if(rmdstat & RCV_START)
1064 {
1065 dev->stats.rx_length_errors++;
1066 printk(KERN_ERR "%s: recv, packet too long: %d\n",dev->name,rmdp->mlen & 0x0fff);
1067 }
1068 }
1069 else {
1070 if(debuglevel > 2)
1071 printk(KERN_ERR "%s: receive-error: %04x, lance-status: %04x/%04x\n",
1072 dev->name,(int) rmdstat,csr0,(int) inw(PORT+L_DATAREG) );
1073 if(rmdstat & RCV_FRAM)
1074 dev->stats.rx_frame_errors++;
1075 if(rmdstat & RCV_OFLO)
1076 dev->stats.rx_over_errors++;
1077 if(rmdstat & RCV_CRC)
1078 dev->stats.rx_crc_errors++;
1079 if(rmdstat & RCV_BUF_ERR)
1080 dev->stats.rx_fifo_errors++;
1081 }
1082 if(!(csr0 & CSR0_MISS))
1083 dev->stats.rx_errors++;
1084 }
1085 else if( (len = (rmdp->mlen & 0x0fff) - 4) >= 60)
1086 {
1087#ifdef RCV_VIA_SKB
1088 struct sk_buff *skb = alloc_skb(R_BUF_SIZE+2+16,GFP_ATOMIC);
1089 if (skb)
1090 skb_reserve(skb,16);
1091#else
1092 struct sk_buff *skb = netdev_alloc_skb(dev, len + 2);
1093#endif
1094 if(skb)
1095 {
1096 skb_reserve(skb,2);
1097#ifdef RCV_VIA_SKB
1098 if( (unsigned long) (skb->data + R_BUF_SIZE) > 0x1000000) {
1099 skb_put(skb,len);
1100 skb_copy_to_linear_data(skb, (unsigned char *)(p->recv_skb[p->rmdnum]->data),len);
1101 }
1102 else {
1103 struct sk_buff *skb1 = p->recv_skb[p->rmdnum];
1104 skb_put(skb,R_BUF_SIZE);
1105 p->recv_skb[p->rmdnum] = skb;
1106 rmdp->u.buffer = (u32) isa_virt_to_bus(skb->data);
1107 skb = skb1;
1108 skb_trim(skb,len);
1109 }
1110#else
1111 skb_put(skb,len);
1112 skb_copy_to_linear_data(skb, (unsigned char *) p->recvbounce[p->rmdnum],len);
1113#endif
1114 dev->stats.rx_packets++;
1115 dev->stats.rx_bytes += len;
1116 skb->protocol=eth_type_trans(skb,dev);
1117 netif_rx(skb);
1118 }
1119 else
1120 {
1121 printk(KERN_ERR "%s: can't alloc new sk_buff\n",dev->name);
1122 dev->stats.rx_dropped++;
1123 }
1124 }
1125 else {
1126 printk(KERN_INFO "%s: received runt packet\n",dev->name);
1127 dev->stats.rx_errors++;
1128 }
1129 rmdp->blen = -(R_BUF_SIZE-8);
1130 rmdp->mlen = 0;
1131 rmdp->u.s.status = RCV_OWN;
1132 p->rmdnum = (p->rmdnum + 1) & (RMDNUM-1);
1133 rmdp = p->rmdhead + p->rmdnum;
1134 }
1135}
1136
1137
1138
1139
1140
1141static void ni65_timeout(struct net_device *dev)
1142{
1143 int i;
1144 struct priv *p = dev->ml_priv;
1145
1146 printk(KERN_ERR "%s: xmitter timed out, try to restart!\n",dev->name);
1147 for(i=0;i<TMDNUM;i++)
1148 printk("%02x ",p->tmdhead[i].u.s.status);
1149 printk("\n");
1150 ni65_lance_reinit(dev);
1151 netif_trans_update(dev);
1152 netif_wake_queue(dev);
1153}
1154
1155
1156
1157
1158
1159static netdev_tx_t ni65_send_packet(struct sk_buff *skb,
1160 struct net_device *dev)
1161{
1162 struct priv *p = dev->ml_priv;
1163
1164 netif_stop_queue(dev);
1165
1166 if (test_and_set_bit(0, (void*)&p->lock)) {
1167 printk(KERN_ERR "%s: Queue was locked.\n", dev->name);
1168 return NETDEV_TX_BUSY;
1169 }
1170
1171 {
1172 short len = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
1173 struct tmd *tmdp;
1174 unsigned long flags;
1175
1176#ifdef XMT_VIA_SKB
1177 if( (unsigned long) (skb->data + skb->len) > 0x1000000) {
1178#endif
1179
1180 skb_copy_from_linear_data(skb, p->tmdbounce[p->tmdbouncenum],
1181 skb->len > T_BUF_SIZE ? T_BUF_SIZE :
1182 skb->len);
1183 if (len > skb->len)
1184 memset((char *)p->tmdbounce[p->tmdbouncenum]+skb->len, 0, len-skb->len);
1185 dev_kfree_skb (skb);
1186
1187 spin_lock_irqsave(&p->ring_lock, flags);
1188 tmdp = p->tmdhead + p->tmdnum;
1189 tmdp->u.buffer = (u32) isa_virt_to_bus(p->tmdbounce[p->tmdbouncenum]);
1190 p->tmdbouncenum = (p->tmdbouncenum + 1) & (TMDNUM - 1);
1191
1192#ifdef XMT_VIA_SKB
1193 }
1194 else {
1195 spin_lock_irqsave(&p->ring_lock, flags);
1196
1197 tmdp = p->tmdhead + p->tmdnum;
1198 tmdp->u.buffer = (u32) isa_virt_to_bus(skb->data);
1199 p->tmd_skb[p->tmdnum] = skb;
1200 }
1201#endif
1202 tmdp->blen = -len;
1203
1204 tmdp->u.s.status = XMIT_OWN | XMIT_START | XMIT_END;
1205 writedatareg(CSR0_TDMD | CSR0_INEA);
1206
1207 p->xmit_queued = 1;
1208 p->tmdnum = (p->tmdnum + 1) & (TMDNUM-1);
1209
1210 if(p->tmdnum != p->tmdlast)
1211 netif_wake_queue(dev);
1212
1213 p->lock = 0;
1214
1215 spin_unlock_irqrestore(&p->ring_lock, flags);
1216 }
1217
1218 return NETDEV_TX_OK;
1219}
1220
1221static void set_multicast_list(struct net_device *dev)
1222{
1223 if(!ni65_lance_reinit(dev))
1224 printk(KERN_ERR "%s: Can't switch card into MC mode!\n",dev->name);
1225 netif_wake_queue(dev);
1226}
1227
1228#ifdef MODULE
1229static struct net_device *dev_ni65;
1230
1231module_param(irq, int, 0);
1232module_param(io, int, 0);
1233module_param(dma, int, 0);
1234MODULE_PARM_DESC(irq, "ni6510 IRQ number (ignored for some cards)");
1235MODULE_PARM_DESC(io, "ni6510 I/O base address");
1236MODULE_PARM_DESC(dma, "ni6510 ISA DMA channel (ignored for some cards)");
1237
1238int __init init_module(void)
1239{
1240 dev_ni65 = ni65_probe(-1);
1241 return PTR_RET(dev_ni65);
1242}
1243
1244void __exit cleanup_module(void)
1245{
1246 unregister_netdev(dev_ni65);
1247 cleanup_card(dev_ni65);
1248 free_netdev(dev_ni65);
1249}
1250#endif
1251
1252MODULE_LICENSE("GPL");
1253