1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102static int debuglevel;
103static int automatic_resume;
104static int rfdadd;
105static int fifo = 0x8;
106
107#include <linux/module.h>
108#include <linux/kernel.h>
109#include <linux/string.h>
110#include <linux/errno.h>
111#include <linux/ioport.h>
112#include <linux/slab.h>
113#include <linux/interrupt.h>
114#include <linux/delay.h>
115#include <linux/init.h>
116#include <linux/bitops.h>
117#include <asm/io.h>
118
119#include <linux/netdevice.h>
120#include <linux/etherdevice.h>
121#include <linux/skbuff.h>
122
123#include "ni52.h"
124
125#define DRV_NAME "ni52"
126
127#define DEBUG
128#define SYSBUSVAL 1
129
130#define ni_attn586() { outb(0, dev->base_addr + NI52_ATTENTION); }
131#define ni_reset586() { outb(0, dev->base_addr + NI52_RESET); }
132#define ni_disint() { outb(0, dev->base_addr + NI52_INTDIS); }
133#define ni_enaint() { outb(0, dev->base_addr + NI52_INTENA); }
134
135#define make32(ptr16) ((void __iomem *)(p->memtop + (short) (ptr16)))
136#define make24(ptr32) ((char __iomem *)(ptr32)) - p->base
137#define make16(ptr32) ((unsigned short) ((char __iomem *)(ptr32)\
138 - p->memtop))
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154#define RECV_BUFF_SIZE 1524
155#define XMIT_BUFF_SIZE 1524
156#define NUM_XMIT_BUFFS 1
157#define NUM_RECV_BUFFS_8 4
158#define NUM_RECV_BUFFS_16 9
159#define NO_NOPCOMMANDS
160
161
162
163
164#define NI52_TOTAL_SIZE 16
165#define NI52_ADDR0 0x02
166#define NI52_ADDR1 0x07
167#define NI52_ADDR2 0x01
168
169static int ni52_probe1(struct net_device *dev, int ioaddr);
170static irqreturn_t ni52_interrupt(int irq, void *dev_id);
171static int ni52_open(struct net_device *dev);
172static int ni52_close(struct net_device *dev);
173static netdev_tx_t ni52_send_packet(struct sk_buff *, struct net_device *);
174static struct net_device_stats *ni52_get_stats(struct net_device *dev);
175static void set_multicast_list(struct net_device *dev);
176static void ni52_timeout(struct net_device *dev);
177
178
179static int init586(struct net_device *dev);
180static int check586(struct net_device *dev, unsigned size);
181static void alloc586(struct net_device *dev);
182static void startrecv586(struct net_device *dev);
183static void __iomem *alloc_rfa(struct net_device *dev, void __iomem *ptr);
184static void ni52_rcv_int(struct net_device *dev);
185static void ni52_xmt_int(struct net_device *dev);
186static void ni52_rnr_int(struct net_device *dev);
187
188struct priv {
189 struct net_device_stats stats;
190 char __iomem *base;
191 char __iomem *mapped;
192 char __iomem *memtop;
193 spinlock_t spinlock;
194 int reset;
195 struct rfd_struct __iomem *rfd_last, *rfd_top, *rfd_first;
196 struct scp_struct __iomem *scp;
197 struct iscp_struct __iomem *iscp;
198 struct scb_struct __iomem *scb;
199 struct tbd_struct __iomem *xmit_buffs[NUM_XMIT_BUFFS];
200#if (NUM_XMIT_BUFFS == 1)
201 struct transmit_cmd_struct __iomem *xmit_cmds[2];
202 struct nop_cmd_struct __iomem *nop_cmds[2];
203#else
204 struct transmit_cmd_struct __iomem *xmit_cmds[NUM_XMIT_BUFFS];
205 struct nop_cmd_struct __iomem *nop_cmds[NUM_XMIT_BUFFS];
206#endif
207 int nop_point, num_recv_buffs;
208 char __iomem *xmit_cbuffs[NUM_XMIT_BUFFS];
209 int xmit_count, xmit_last;
210};
211
212
213static void wait_for_scb_cmd(struct net_device *dev)
214{
215 struct priv *p = netdev_priv(dev);
216 int i;
217 for (i = 0; i < 16384; i++) {
218 if (readb(&p->scb->cmd_cuc) == 0)
219 break;
220 udelay(4);
221 if (i == 16383) {
222 printk(KERN_ERR "%s: scb_cmd timed out: %04x,%04x .. disabling i82586!!\n",
223 dev->name, readb(&p->scb->cmd_cuc), readb(&p->scb->cus));
224 if (!p->reset) {
225 p->reset = 1;
226 ni_reset586();
227 }
228 }
229 }
230}
231
232static void wait_for_scb_cmd_ruc(struct net_device *dev)
233{
234 struct priv *p = netdev_priv(dev);
235 int i;
236 for (i = 0; i < 16384; i++) {
237 if (readb(&p->scb->cmd_ruc) == 0)
238 break;
239 udelay(4);
240 if (i == 16383) {
241 printk(KERN_ERR "%s: scb_cmd (ruc) timed out: %04x,%04x .. disabling i82586!!\n",
242 dev->name, readb(&p->scb->cmd_ruc),
243 readb(&p->scb->rus));
244 if (!p->reset) {
245 p->reset = 1;
246 ni_reset586();
247 }
248 }
249 }
250}
251
252static void wait_for_stat_compl(void __iomem *p)
253{
254 struct nop_cmd_struct __iomem *addr = p;
255 int i;
256 for (i = 0; i < 32767; i++) {
257 if (readw(&((addr)->cmd_status)) & STAT_COMPL)
258 break;
259 udelay(32);
260 }
261}
262
263
264
265
266static int ni52_close(struct net_device *dev)
267{
268 free_irq(dev->irq, dev);
269 ni_reset586();
270 netif_stop_queue(dev);
271 return 0;
272}
273
274
275
276
277static int ni52_open(struct net_device *dev)
278{
279 int ret;
280
281 ni_disint();
282 alloc586(dev);
283 init586(dev);
284 startrecv586(dev);
285 ni_enaint();
286
287 ret = request_irq(dev->irq, &ni52_interrupt, 0, dev->name, dev);
288 if (ret) {
289 ni_reset586();
290 return ret;
291 }
292 netif_start_queue(dev);
293 return 0;
294}
295
296static int check_iscp(struct net_device *dev, void __iomem *addr)
297{
298 struct iscp_struct __iomem *iscp = addr;
299 struct priv *p = netdev_priv(dev);
300 memset_io(iscp, 0, sizeof(struct iscp_struct));
301
302 writel(make24(iscp), &p->scp->iscp);
303 writeb(1, &iscp->busy);
304
305 ni_reset586();
306 ni_attn586();
307 mdelay(32);
308
309 if (readb(&iscp->busy))
310 return 0;
311 return 1;
312}
313
314
315
316
317static int check586(struct net_device *dev, unsigned size)
318{
319 struct priv *p = netdev_priv(dev);
320 int i;
321
322 p->mapped = ioremap(dev->mem_start, size);
323 if (!p->mapped)
324 return 0;
325
326 p->base = p->mapped + size - 0x01000000;
327 p->memtop = p->mapped + size;
328 p->scp = (struct scp_struct __iomem *)(p->base + SCP_DEFAULT_ADDRESS);
329 p->scb = (struct scb_struct __iomem *) p->mapped;
330 p->iscp = (struct iscp_struct __iomem *)p->scp - 1;
331 memset_io(p->scp, 0, sizeof(struct scp_struct));
332 for (i = 0; i < sizeof(struct scp_struct); i++)
333
334 if (readb((char __iomem *)p->scp + i))
335 goto Enodev;
336 writeb(SYSBUSVAL, &p->scp->sysbus);
337 if (readb(&p->scp->sysbus) != SYSBUSVAL)
338 goto Enodev;
339
340 if (!check_iscp(dev, p->mapped))
341 goto Enodev;
342 if (!check_iscp(dev, p->iscp))
343 goto Enodev;
344 return 1;
345Enodev:
346 iounmap(p->mapped);
347 return 0;
348}
349
350
351
352
353static void alloc586(struct net_device *dev)
354{
355 struct priv *p = netdev_priv(dev);
356
357 ni_reset586();
358 mdelay(32);
359
360 memset_io(p->iscp, 0, sizeof(struct iscp_struct));
361 memset_io(p->scp , 0, sizeof(struct scp_struct));
362
363 writel(make24(p->iscp), &p->scp->iscp);
364 writeb(SYSBUSVAL, &p->scp->sysbus);
365 writew(make16(p->scb), &p->iscp->scb_offset);
366
367 writeb(1, &p->iscp->busy);
368 ni_reset586();
369 ni_attn586();
370
371 mdelay(32);
372
373 if (readb(&p->iscp->busy))
374 printk(KERN_ERR "%s: Init-Problems (alloc).\n", dev->name);
375
376 p->reset = 0;
377
378 memset_io(p->scb, 0, sizeof(struct scb_struct));
379}
380
381
382static int irq = 9;
383static int io = 0x300;
384static long memstart;
385static long memend;
386
387
388
389
390struct net_device * __init ni52_probe(int unit)
391{
392 struct net_device *dev = alloc_etherdev(sizeof(struct priv));
393 static int ports[] = {0x300, 0x280, 0x360 , 0x320 , 0x340, 0};
394 struct priv *p;
395 int *port;
396 int err = 0;
397
398 if (!dev)
399 return ERR_PTR(-ENOMEM);
400
401 p = netdev_priv(dev);
402
403 if (unit >= 0) {
404 sprintf(dev->name, "eth%d", unit);
405 netdev_boot_setup_check(dev);
406 io = dev->base_addr;
407 irq = dev->irq;
408 memstart = dev->mem_start;
409 memend = dev->mem_end;
410 }
411
412 if (io > 0x1ff) {
413 err = ni52_probe1(dev, io);
414 } else if (io > 0) {
415 err = -ENXIO;
416 } else {
417 for (port = ports; *port && ni52_probe1(dev, *port) ; port++)
418 ;
419 if (*port)
420 goto got_it;
421#ifdef FULL_IO_PROBE
422 for (io = 0x200; io < 0x400 && ni52_probe1(dev, io); io += 8)
423 ;
424 if (io < 0x400)
425 goto got_it;
426#endif
427 err = -ENODEV;
428 }
429 if (err)
430 goto out;
431got_it:
432 err = register_netdev(dev);
433 if (err)
434 goto out1;
435 return dev;
436out1:
437 iounmap(p->mapped);
438 release_region(dev->base_addr, NI52_TOTAL_SIZE);
439out:
440 free_netdev(dev);
441 return ERR_PTR(err);
442}
443
444static const struct net_device_ops ni52_netdev_ops = {
445 .ndo_open = ni52_open,
446 .ndo_stop = ni52_close,
447 .ndo_get_stats = ni52_get_stats,
448 .ndo_tx_timeout = ni52_timeout,
449 .ndo_start_xmit = ni52_send_packet,
450 .ndo_set_multicast_list = set_multicast_list,
451 .ndo_change_mtu = eth_change_mtu,
452 .ndo_set_mac_address = eth_mac_addr,
453 .ndo_validate_addr = eth_validate_addr,
454};
455
456static int __init ni52_probe1(struct net_device *dev, int ioaddr)
457{
458 int i, size, retval;
459 struct priv *priv = netdev_priv(dev);
460
461 dev->base_addr = ioaddr;
462 dev->irq = irq;
463 dev->mem_start = memstart;
464 dev->mem_end = memend;
465
466 spin_lock_init(&priv->spinlock);
467
468 if (!request_region(ioaddr, NI52_TOTAL_SIZE, DRV_NAME))
469 return -EBUSY;
470
471 if (!(inb(ioaddr+NI52_MAGIC1) == NI52_MAGICVAL1) ||
472 !(inb(ioaddr+NI52_MAGIC2) == NI52_MAGICVAL2)) {
473 retval = -ENODEV;
474 goto out;
475 }
476
477 for (i = 0; i < ETH_ALEN; i++)
478 dev->dev_addr[i] = inb(dev->base_addr+i);
479
480 if (dev->dev_addr[0] != NI52_ADDR0 || dev->dev_addr[1] != NI52_ADDR1
481 || dev->dev_addr[2] != NI52_ADDR2) {
482 retval = -ENODEV;
483 goto out;
484 }
485
486 printk(KERN_INFO "%s: NI5210 found at %#3lx, ",
487 dev->name, dev->base_addr);
488
489
490
491
492#ifdef MODULE
493 size = dev->mem_end - dev->mem_start;
494 if (size != 0x2000 && size != 0x4000) {
495 printk("\n");
496 printk(KERN_ERR "%s: Invalid memory size %d. Allowed is 0x2000 or 0x4000 bytes.\n", dev->name, size);
497 retval = -ENODEV;
498 goto out;
499 }
500 if (!check586(dev, size)) {
501 printk(KERN_ERR "?memcheck, Can't find memory at 0x%lx with size %d!\n", dev->mem_start, size);
502 retval = -ENODEV;
503 goto out;
504 }
505#else
506 if (dev->mem_start != 0) {
507
508 size = 0x4000;
509 if (!check586(dev, size)) {
510 size = 0x2000;
511 if (!check586(dev, size)) {
512 printk(KERN_ERR "?memprobe, Can't find memory at 0x%lx!\n", dev->mem_start);
513 retval = -ENODEV;
514 goto out;
515 }
516 }
517 } else {
518 static const unsigned long memaddrs[] = {
519 0xc8000, 0xca000, 0xcc000, 0xce000, 0xd0000, 0xd2000,
520 0xd4000, 0xd6000, 0xd8000, 0xda000, 0xdc000, 0
521 };
522 for (i = 0;; i++) {
523 if (!memaddrs[i]) {
524 printk(KERN_ERR "?memprobe, Can't find io-memory!\n");
525 retval = -ENODEV;
526 goto out;
527 }
528 dev->mem_start = memaddrs[i];
529 size = 0x2000;
530 if (check586(dev, size))
531
532 break;
533 size = 0x4000;
534 if (check586(dev, size))
535
536 break;
537 }
538 }
539
540 dev->mem_end = dev->mem_start + size;
541#endif
542
543 alloc586(dev);
544
545
546 if (size == 0x2000)
547 priv->num_recv_buffs = NUM_RECV_BUFFS_8;
548 else
549 priv->num_recv_buffs = NUM_RECV_BUFFS_16;
550
551 printk(KERN_DEBUG "Memaddr: 0x%lx, Memsize: %d, ",
552 dev->mem_start, size);
553
554 if (dev->irq < 2) {
555 unsigned long irq_mask;
556
557 irq_mask = probe_irq_on();
558 ni_reset586();
559 ni_attn586();
560
561 mdelay(20);
562 dev->irq = probe_irq_off(irq_mask);
563 if (!dev->irq) {
564 printk("?autoirq, Failed to detect IRQ line!\n");
565 retval = -EAGAIN;
566 iounmap(priv->mapped);
567 goto out;
568 }
569 printk("IRQ %d (autodetected).\n", dev->irq);
570 } else {
571 if (dev->irq == 2)
572 dev->irq = 9;
573 printk("IRQ %d (assigned and not checked!).\n", dev->irq);
574 }
575
576 dev->netdev_ops = &ni52_netdev_ops;
577 dev->watchdog_timeo = HZ/20;
578
579 return 0;
580out:
581 release_region(ioaddr, NI52_TOTAL_SIZE);
582 return retval;
583}
584
585
586
587
588
589
590static int init586(struct net_device *dev)
591{
592 void __iomem *ptr;
593 int i, result = 0;
594 struct priv *p = netdev_priv(dev);
595 struct configure_cmd_struct __iomem *cfg_cmd;
596 struct iasetup_cmd_struct __iomem *ias_cmd;
597 struct tdr_cmd_struct __iomem *tdr_cmd;
598 struct mcsetup_cmd_struct __iomem *mc_cmd;
599 struct dev_mc_list *dmi = dev->mc_list;
600 int num_addrs = dev->mc_count;
601
602 ptr = p->scb + 1;
603
604 cfg_cmd = ptr;
605 writew(0, &cfg_cmd->cmd_status);
606 writew(CMD_CONFIGURE | CMD_LAST, &cfg_cmd->cmd_cmd);
607 writew(0xFFFF, &cfg_cmd->cmd_link);
608
609
610 writeb(0x0a, &cfg_cmd->byte_cnt);
611
612 writeb(fifo, &cfg_cmd->fifo);
613
614 writeb(0x40, &cfg_cmd->sav_bf);
615
616 writeb(0x2e, &cfg_cmd->adr_len);
617 writeb(0x00, &cfg_cmd->priority);
618 writeb(0x60, &cfg_cmd->ifs);
619 writeb(0x00, &cfg_cmd->time_low);
620 writeb(0xf2, &cfg_cmd->time_high);
621 writeb(0x00, &cfg_cmd->promisc);
622 if (dev->flags & IFF_ALLMULTI) {
623 int len = ((char __iomem *)p->iscp - (char __iomem *)ptr - 8) / 6;
624 if (num_addrs > len) {
625 printk(KERN_ERR "%s: switching to promisc. mode\n",
626 dev->name);
627 writeb(0x01, &cfg_cmd->promisc);
628 }
629 }
630 if (dev->flags & IFF_PROMISC)
631 writeb(0x01, &cfg_cmd->promisc);
632 writeb(0x00, &cfg_cmd->carr_coll);
633 writew(make16(cfg_cmd), &p->scb->cbl_offset);
634 writeb(0, &p->scb->cmd_ruc);
635
636 writeb(CUC_START, &p->scb->cmd_cuc);
637 ni_attn586();
638
639 wait_for_stat_compl(cfg_cmd);
640
641 if ((readw(&cfg_cmd->cmd_status) & (STAT_OK|STAT_COMPL)) !=
642 (STAT_COMPL|STAT_OK)) {
643 printk(KERN_ERR "%s: configure command failed: %x\n",
644 dev->name, readw(&cfg_cmd->cmd_status));
645 return 1;
646 }
647
648
649
650
651
652 ias_cmd = ptr;
653
654 writew(0, &ias_cmd->cmd_status);
655 writew(CMD_IASETUP | CMD_LAST, &ias_cmd->cmd_cmd);
656 writew(0xffff, &ias_cmd->cmd_link);
657
658 memcpy_toio(&ias_cmd->iaddr, (char *)dev->dev_addr, ETH_ALEN);
659
660 writew(make16(ias_cmd), &p->scb->cbl_offset);
661
662 writeb(CUC_START, &p->scb->cmd_cuc);
663 ni_attn586();
664
665 wait_for_stat_compl(ias_cmd);
666
667 if ((readw(&ias_cmd->cmd_status) & (STAT_OK|STAT_COMPL)) !=
668 (STAT_OK|STAT_COMPL)) {
669 printk(KERN_ERR "%s (ni52): individual address setup command failed: %04x\n", dev->name, readw(&ias_cmd->cmd_status));
670 return 1;
671 }
672
673
674
675
676
677 tdr_cmd = ptr;
678
679 writew(0, &tdr_cmd->cmd_status);
680 writew(CMD_TDR | CMD_LAST, &tdr_cmd->cmd_cmd);
681 writew(0xffff, &tdr_cmd->cmd_link);
682 writew(0, &tdr_cmd->status);
683
684 writew(make16(tdr_cmd), &p->scb->cbl_offset);
685 writeb(CUC_START, &p->scb->cmd_cuc);
686 ni_attn586();
687
688 wait_for_stat_compl(tdr_cmd);
689
690 if (!(readw(&tdr_cmd->cmd_status) & STAT_COMPL))
691 printk(KERN_ERR "%s: Problems while running the TDR.\n",
692 dev->name);
693 else {
694 udelay(16);
695 result = readw(&tdr_cmd->status);
696 writeb(readb(&p->scb->cus) & STAT_MASK, &p->scb->cmd_cuc);
697 ni_attn586();
698
699 if (result & TDR_LNK_OK)
700 ;
701 else if (result & TDR_XCVR_PRB)
702 printk(KERN_ERR "%s: TDR: Transceiver problem. Check the cable(s)!\n",
703 dev->name);
704 else if (result & TDR_ET_OPN)
705 printk(KERN_ERR "%s: TDR: No correct termination %d clocks away.\n",
706 dev->name, result & TDR_TIMEMASK);
707 else if (result & TDR_ET_SRT) {
708
709 if (result & TDR_TIMEMASK)
710 printk(KERN_ERR "%s: TDR: Detected a short circuit %d clocks away.\n",
711 dev->name, result & TDR_TIMEMASK);
712 } else
713 printk(KERN_ERR "%s: TDR: Unknown status %04x\n",
714 dev->name, result);
715 }
716
717
718
719
720 if (num_addrs && !(dev->flags & IFF_PROMISC)) {
721 mc_cmd = ptr;
722 writew(0, &mc_cmd->cmd_status);
723 writew(CMD_MCSETUP | CMD_LAST, &mc_cmd->cmd_cmd);
724 writew(0xffff, &mc_cmd->cmd_link);
725 writew(num_addrs * 6, &mc_cmd->mc_cnt);
726
727 for (i = 0; i < num_addrs; i++, dmi = dmi->next)
728 memcpy_toio(mc_cmd->mc_list[i],
729 dmi->dmi_addr, 6);
730
731 writew(make16(mc_cmd), &p->scb->cbl_offset);
732 writeb(CUC_START, &p->scb->cmd_cuc);
733 ni_attn586();
734
735 wait_for_stat_compl(mc_cmd);
736
737 if ((readw(&mc_cmd->cmd_status) & (STAT_COMPL|STAT_OK))
738 != (STAT_COMPL|STAT_OK))
739 printk(KERN_ERR "%s: Can't apply multicast-address-list.\n", dev->name);
740 }
741
742
743
744
745#if (NUM_XMIT_BUFFS == 1)
746 for (i = 0; i < 2; i++) {
747 p->nop_cmds[i] = ptr;
748 writew(CMD_NOP, &p->nop_cmds[i]->cmd_cmd);
749 writew(0, &p->nop_cmds[i]->cmd_status);
750 writew(make16(p->nop_cmds[i]), &p->nop_cmds[i]->cmd_link);
751 ptr = ptr + sizeof(struct nop_cmd_struct);
752 }
753#else
754 for (i = 0; i < NUM_XMIT_BUFFS; i++) {
755 p->nop_cmds[i] = ptr;
756 writew(CMD_NOP, &p->nop_cmds[i]->cmd_cmd);
757 writew(0, &p->nop_cmds[i]->cmd_status);
758 writew(make16(p->nop_cmds[i]), &p->nop_cmds[i]->cmd_link);
759 ptr = ptr + sizeof(struct nop_cmd_struct);
760 }
761#endif
762
763 ptr = alloc_rfa(dev, ptr);
764
765
766
767
768 for (i = 0; i < NUM_XMIT_BUFFS; i++) {
769
770 p->xmit_cmds[i] = ptr;
771 ptr = ptr + sizeof(struct transmit_cmd_struct);
772 p->xmit_cbuffs[i] = ptr;
773 ptr = ptr + XMIT_BUFF_SIZE;
774 p->xmit_buffs[i] = ptr;
775 ptr = ptr + sizeof(struct tbd_struct);
776 if ((void __iomem *)ptr > (void __iomem *)p->iscp) {
777 printk(KERN_ERR "%s: not enough shared-mem for your configuration!\n",
778 dev->name);
779 return 1;
780 }
781 memset_io(p->xmit_cmds[i], 0,
782 sizeof(struct transmit_cmd_struct));
783 memset_io(p->xmit_buffs[i], 0,
784 sizeof(struct tbd_struct));
785 writew(make16(p->nop_cmds[(i+1)%NUM_XMIT_BUFFS]),
786 &p->xmit_cmds[i]->cmd_link);
787 writew(STAT_COMPL, &p->xmit_cmds[i]->cmd_status);
788 writew(CMD_XMIT|CMD_INT, &p->xmit_cmds[i]->cmd_cmd);
789 writew(make16(p->xmit_buffs[i]), &p->xmit_cmds[i]->tbd_offset);
790 writew(0xffff, &p->xmit_buffs[i]->next);
791 writel(make24(p->xmit_cbuffs[i]), &p->xmit_buffs[i]->buffer);
792 }
793
794 p->xmit_count = 0;
795 p->xmit_last = 0;
796#ifndef NO_NOPCOMMANDS
797 p->nop_point = 0;
798#endif
799
800
801
802
803#ifndef NO_NOPCOMMANDS
804 writew(make16(p->nop_cmds[0]), &p->scb->cbl_offset);
805 writeb(CUC_START, &p->scb->cmd_cuc);
806 ni_attn586();
807 wait_for_scb_cmd(dev);
808#else
809 writew(make16(p->xmit_cmds[0]), &p->xmit_cmds[0]->cmd_link);
810 writew(CMD_XMIT | CMD_SUSPEND | CMD_INT, &p->xmit_cmds[0]->cmd_cmd);
811#endif
812
813
814
815
816 writeb(readb(&p->scb->cus) & STAT_MASK, &p->scb->cmd_cuc);
817 ni_attn586();
818 udelay(16);
819
820 ni_enaint();
821
822 return 0;
823}
824
825
826
827
828
829
830static void __iomem *alloc_rfa(struct net_device *dev, void __iomem *ptr)
831{
832 struct rfd_struct __iomem *rfd = ptr;
833 struct rbd_struct __iomem *rbd;
834 int i;
835 struct priv *p = netdev_priv(dev);
836
837 memset_io(rfd, 0,
838 sizeof(struct rfd_struct) * (p->num_recv_buffs + rfdadd));
839 p->rfd_first = rfd;
840
841 for (i = 0; i < (p->num_recv_buffs + rfdadd); i++) {
842 writew(make16(rfd + (i+1) % (p->num_recv_buffs+rfdadd)),
843 &rfd[i].next);
844 writew(0xffff, &rfd[i].rbd_offset);
845 }
846
847 writeb(RFD_SUSP, &rfd[p->num_recv_buffs-1+rfdadd].last);
848
849 ptr = rfd + (p->num_recv_buffs + rfdadd);
850
851 rbd = ptr;
852 ptr = rbd + p->num_recv_buffs;
853
854
855 memset_io(rbd, 0, sizeof(struct rbd_struct) * (p->num_recv_buffs));
856
857 for (i = 0; i < p->num_recv_buffs; i++) {
858 writew(make16(rbd + (i+1) % p->num_recv_buffs), &rbd[i].next);
859 writew(RECV_BUFF_SIZE, &rbd[i].size);
860 writel(make24(ptr), &rbd[i].buffer);
861 ptr = ptr + RECV_BUFF_SIZE;
862 }
863 p->rfd_top = p->rfd_first;
864 p->rfd_last = p->rfd_first + (p->num_recv_buffs - 1 + rfdadd);
865
866 writew(make16(p->rfd_first), &p->scb->rfa_offset);
867 writew(make16(rbd), &p->rfd_first->rbd_offset);
868
869 return ptr;
870}
871
872
873
874
875
876
877static irqreturn_t ni52_interrupt(int irq, void *dev_id)
878{
879 struct net_device *dev = dev_id;
880 unsigned int stat;
881 int cnt = 0;
882 struct priv *p;
883
884 p = netdev_priv(dev);
885
886 if (debuglevel > 1)
887 printk("I");
888
889 spin_lock(&p->spinlock);
890
891 wait_for_scb_cmd(dev);
892
893 while ((stat = readb(&p->scb->cus) & STAT_MASK)) {
894 writeb(stat, &p->scb->cmd_cuc);
895 ni_attn586();
896
897 if (stat & STAT_FR)
898 ni52_rcv_int(dev);
899
900 if (stat & STAT_RNR) {
901 printk("(R)");
902 if (readb(&p->scb->rus) & RU_SUSPEND) {
903
904 wait_for_scb_cmd(dev);
905 writeb(RUC_RESUME, &p->scb->cmd_ruc);
906 ni_attn586();
907 wait_for_scb_cmd_ruc(dev);
908 } else {
909 printk(KERN_ERR "%s: Receiver-Unit went 'NOT READY': %04x/%02x.\n",
910 dev->name, stat, readb(&p->scb->rus));
911 ni52_rnr_int(dev);
912 }
913 }
914
915
916 if (stat & STAT_CX)
917 ni52_xmt_int(dev);
918
919#ifndef NO_NOPCOMMANDS
920 if (stat & STAT_CNA) {
921 if (netif_running(dev))
922 printk(KERN_ERR "%s: oops! CU has left active state. stat: %04x/%02x.\n",
923 dev->name, stat, readb(&p->scb->cus));
924 }
925#endif
926
927 if (debuglevel > 1)
928 printk("%d", cnt++);
929
930
931 wait_for_scb_cmd(dev);
932 if (readb(&p->scb->cmd_cuc)) {
933 printk(KERN_ERR "%s: Acknowledge timed out.\n",
934 dev->name);
935 ni_disint();
936 break;
937 }
938 }
939 spin_unlock(&p->spinlock);
940
941 if (debuglevel > 1)
942 printk("i");
943 return IRQ_HANDLED;
944}
945
946
947
948
949
950static void ni52_rcv_int(struct net_device *dev)
951{
952 int status, cnt = 0;
953 unsigned short totlen;
954 struct sk_buff *skb;
955 struct rbd_struct __iomem *rbd;
956 struct priv *p = netdev_priv(dev);
957
958 if (debuglevel > 0)
959 printk("R");
960
961 for (; (status = readb(&p->rfd_top->stat_high)) & RFD_COMPL;) {
962 rbd = make32(readw(&p->rfd_top->rbd_offset));
963 if (status & RFD_OK) {
964 totlen = readw(&rbd->status);
965 if (totlen & RBD_LAST) {
966
967 totlen &= RBD_MASK;
968 writew(0x00, &rbd->status);
969 skb = (struct sk_buff *)dev_alloc_skb(totlen+2);
970 if (skb != NULL) {
971 skb_reserve(skb, 2);
972 skb_put(skb, totlen);
973 memcpy_fromio(skb->data, p->base + readl(&rbd->buffer), totlen);
974 skb->protocol = eth_type_trans(skb, dev);
975 netif_rx(skb);
976 p->stats.rx_packets++;
977 p->stats.rx_bytes += totlen;
978 } else
979 p->stats.rx_dropped++;
980 } else {
981 int rstat;
982
983 totlen = 0;
984 while (!((rstat = readw(&rbd->status)) & RBD_LAST)) {
985 totlen += rstat & RBD_MASK;
986 if (!rstat) {
987 printk(KERN_ERR "%s: Whoops .. no end mark in RBD list\n", dev->name);
988 break;
989 }
990 writew(0, &rbd->status);
991 rbd = make32(readw(&rbd->next));
992 }
993 totlen += rstat & RBD_MASK;
994 writew(0, &rbd->status);
995 printk(KERN_ERR "%s: received oversized frame! length: %d\n",
996 dev->name, totlen);
997 p->stats.rx_dropped++;
998 }
999 } else {
1000 printk(KERN_ERR "%s: oops! rfd-error-status: %04x\n",
1001 dev->name, status);
1002 p->stats.rx_errors++;
1003 }
1004 writeb(0, &p->rfd_top->stat_high);
1005 writeb(RFD_SUSP, &p->rfd_top->last);
1006 writew(0xffff, &p->rfd_top->rbd_offset);
1007 writeb(0, &p->rfd_last->last);
1008 p->rfd_last = p->rfd_top;
1009 p->rfd_top = make32(readw(&p->rfd_top->next));
1010 writew(make16(p->rfd_top), &p->scb->rfa_offset);
1011
1012 if (debuglevel > 0)
1013 printk("%d", cnt++);
1014 }
1015
1016 if (automatic_resume) {
1017 wait_for_scb_cmd(dev);
1018 writeb(RUC_RESUME, &p->scb->cmd_ruc);
1019 ni_attn586();
1020 wait_for_scb_cmd_ruc(dev);
1021 }
1022
1023#ifdef WAIT_4_BUSY
1024 {
1025 int i;
1026 for (i = 0; i < 1024; i++) {
1027 if (p->rfd_top->status)
1028 break;
1029 udelay(16);
1030 if (i == 1023)
1031 printk(KERN_ERR "%s: RU hasn't fetched next RFD (not busy/complete)\n", dev->name);
1032 }
1033 }
1034#endif
1035 if (debuglevel > 0)
1036 printk("r");
1037}
1038
1039
1040
1041
1042
1043static void ni52_rnr_int(struct net_device *dev)
1044{
1045 struct priv *p = netdev_priv(dev);
1046
1047 p->stats.rx_errors++;
1048
1049 wait_for_scb_cmd(dev);
1050 writeb(RUC_ABORT, &p->scb->cmd_ruc);
1051 ni_attn586();
1052 wait_for_scb_cmd_ruc(dev);
1053
1054 alloc_rfa(dev, p->rfd_first);
1055
1056 startrecv586(dev);
1057
1058 printk(KERN_ERR "%s: Receive-Unit restarted. Status: %04x\n",
1059 dev->name, readb(&p->scb->rus));
1060
1061}
1062
1063
1064
1065
1066
1067static void ni52_xmt_int(struct net_device *dev)
1068{
1069 int status;
1070 struct priv *p = netdev_priv(dev);
1071
1072 if (debuglevel > 0)
1073 printk("X");
1074
1075 status = readw(&p->xmit_cmds[p->xmit_last]->cmd_status);
1076 if (!(status & STAT_COMPL))
1077 printk(KERN_ERR "%s: strange .. xmit-int without a 'COMPLETE'\n", dev->name);
1078
1079 if (status & STAT_OK) {
1080 p->stats.tx_packets++;
1081 p->stats.collisions += (status & TCMD_MAXCOLLMASK);
1082 } else {
1083 p->stats.tx_errors++;
1084 if (status & TCMD_LATECOLL) {
1085 printk(KERN_ERR "%s: late collision detected.\n",
1086 dev->name);
1087 p->stats.collisions++;
1088 } else if (status & TCMD_NOCARRIER) {
1089 p->stats.tx_carrier_errors++;
1090 printk(KERN_ERR "%s: no carrier detected.\n",
1091 dev->name);
1092 } else if (status & TCMD_LOSTCTS)
1093 printk(KERN_ERR "%s: loss of CTS detected.\n",
1094 dev->name);
1095 else if (status & TCMD_UNDERRUN) {
1096 p->stats.tx_fifo_errors++;
1097 printk(KERN_ERR "%s: DMA underrun detected.\n",
1098 dev->name);
1099 } else if (status & TCMD_MAXCOLL) {
1100 printk(KERN_ERR "%s: Max. collisions exceeded.\n",
1101 dev->name);
1102 p->stats.collisions += 16;
1103 }
1104 }
1105#if (NUM_XMIT_BUFFS > 1)
1106 if ((++p->xmit_last) == NUM_XMIT_BUFFS)
1107 p->xmit_last = 0;
1108#endif
1109 netif_wake_queue(dev);
1110}
1111
1112
1113
1114
1115
1116static void startrecv586(struct net_device *dev)
1117{
1118 struct priv *p = netdev_priv(dev);
1119
1120 wait_for_scb_cmd(dev);
1121 wait_for_scb_cmd_ruc(dev);
1122 writew(make16(p->rfd_first), &p->scb->rfa_offset);
1123 writeb(RUC_START, &p->scb->cmd_ruc);
1124 ni_attn586();
1125 wait_for_scb_cmd_ruc(dev);
1126
1127}
1128
1129static void ni52_timeout(struct net_device *dev)
1130{
1131 struct priv *p = netdev_priv(dev);
1132#ifndef NO_NOPCOMMANDS
1133 if (readb(&p->scb->cus) & CU_ACTIVE) {
1134 netif_wake_queue(dev);
1135#ifdef DEBUG
1136 printk(KERN_ERR "%s: strange ... timeout with CU active?!?\n",
1137 dev->name);
1138 printk(KERN_ERR "%s: X0: %04x N0: %04x N1: %04x %d\n",
1139 dev->name, (int)p->xmit_cmds[0]->cmd_status,
1140 readw(&p->nop_cmds[0]->cmd_status),
1141 readw(&p->nop_cmds[1]->cmd_status),
1142 p->nop_point);
1143#endif
1144 writeb(CUC_ABORT, &p->scb->cmd_cuc);
1145 ni_attn586();
1146 wait_for_scb_cmd(dev);
1147 writew(make16(p->nop_cmds[p->nop_point]), &p->scb->cbl_offset);
1148 writeb(CUC_START, &p->scb->cmd_cuc);
1149 ni_attn586();
1150 wait_for_scb_cmd(dev);
1151 dev->trans_start = jiffies;
1152 return 0;
1153 }
1154#endif
1155 {
1156#ifdef DEBUG
1157 printk(KERN_ERR "%s: xmitter timed out, try to restart! stat: %02x\n",
1158 dev->name, readb(&p->scb->cus));
1159 printk(KERN_ERR "%s: command-stats: %04x %04x\n",
1160 dev->name,
1161 readw(&p->xmit_cmds[0]->cmd_status),
1162 readw(&p->xmit_cmds[1]->cmd_status));
1163 printk(KERN_ERR "%s: check, whether you set the right interrupt number!\n",
1164 dev->name);
1165#endif
1166 ni52_close(dev);
1167 ni52_open(dev);
1168 }
1169 dev->trans_start = jiffies;
1170}
1171
1172
1173
1174
1175
1176static netdev_tx_t ni52_send_packet(struct sk_buff *skb,
1177 struct net_device *dev)
1178{
1179 int len, i;
1180#ifndef NO_NOPCOMMANDS
1181 int next_nop;
1182#endif
1183 struct priv *p = netdev_priv(dev);
1184
1185 if (skb->len > XMIT_BUFF_SIZE) {
1186 printk(KERN_ERR "%s: Sorry, max. framelength is %d bytes. The length of your frame is %d bytes.\n", dev->name, XMIT_BUFF_SIZE, skb->len);
1187 return NETDEV_TX_OK;
1188 }
1189
1190 netif_stop_queue(dev);
1191
1192 memcpy_toio(p->xmit_cbuffs[p->xmit_count], skb->data, skb->len);
1193 len = skb->len;
1194 if (len < ETH_ZLEN) {
1195 len = ETH_ZLEN;
1196 memset_io(p->xmit_cbuffs[p->xmit_count]+skb->len, 0,
1197 len - skb->len);
1198 }
1199
1200#if (NUM_XMIT_BUFFS == 1)
1201# ifdef NO_NOPCOMMANDS
1202
1203#ifdef DEBUG
1204 if (readb(&p->scb->cus) & CU_ACTIVE) {
1205 printk(KERN_ERR "%s: Hmmm .. CU is still running and we wanna send a new packet.\n", dev->name);
1206 printk(KERN_ERR "%s: stat: %04x %04x\n",
1207 dev->name, readb(&p->scb->cus),
1208 readw(&p->xmit_cmds[0]->cmd_status));
1209 }
1210#endif
1211 writew(TBD_LAST | len, &p->xmit_buffs[0]->size);
1212 for (i = 0; i < 16; i++) {
1213 writew(0, &p->xmit_cmds[0]->cmd_status);
1214 wait_for_scb_cmd(dev);
1215 if ((readb(&p->scb->cus) & CU_STATUS) == CU_SUSPEND)
1216 writeb(CUC_RESUME, &p->scb->cmd_cuc);
1217 else {
1218 writew(make16(p->xmit_cmds[0]), &p->scb->cbl_offset);
1219 writeb(CUC_START, &p->scb->cmd_cuc);
1220 }
1221 ni_attn586();
1222 dev->trans_start = jiffies;
1223 if (!i)
1224 dev_kfree_skb(skb);
1225 wait_for_scb_cmd(dev);
1226
1227 if (readb(&p->scb->cus) & CU_ACTIVE)
1228 break;
1229 if (readw(&p->xmit_cmds[0]->cmd_status))
1230 break;
1231 if (i == 15)
1232 printk(KERN_WARNING "%s: Can't start transmit-command.\n", dev->name);
1233 }
1234# else
1235 next_nop = (p->nop_point + 1) & 0x1;
1236 writew(TBD_LAST | len, &p->xmit_buffs[0]->size);
1237 writew(make16(p->nop_cmds[next_nop]), &p->xmit_cmds[0]->cmd_link);
1238 writew(make16(p->nop_cmds[next_nop]),
1239 &p->nop_cmds[next_nop]->cmd_link);
1240 writew(0, &p->xmit_cmds[0]->cmd_status);
1241 writew(0, &p->nop_cmds[next_nop]->cmd_status);
1242
1243 writew(make16(p->xmit_cmds[0]), &p->nop_cmds[p->nop_point]->cmd_link);
1244 dev->trans_start = jiffies;
1245 p->nop_point = next_nop;
1246 dev_kfree_skb(skb);
1247# endif
1248#else
1249 writew(TBD_LAST | len, &p->xmit_buffs[p->xmit_count]->size);
1250 next_nop = p->xmit_count + 1
1251 if (next_nop == NUM_XMIT_BUFFS)
1252 next_nop = 0;
1253 writew(0, &p->xmit_cmds[p->xmit_count]->cmd_status);
1254
1255 writew(make16(p->nop_cmds[next_nop]),
1256 &p->nop_cmds[next_nop]->cmd_link);
1257 writew(0, &p->nop_cmds[next_nop]->cmd_status);
1258 writew(make16(p->xmit_cmds[p->xmit_count]),
1259 &p->nop_cmds[p->xmit_count]->cmd_link);
1260 dev->trans_start = jiffies;
1261 p->xmit_count = next_nop;
1262 {
1263 unsigned long flags;
1264 spin_lock_irqsave(&p->spinlock);
1265 if (p->xmit_count != p->xmit_last)
1266 netif_wake_queue(dev);
1267 spin_unlock_irqrestore(&p->spinlock);
1268 }
1269 dev_kfree_skb(skb);
1270#endif
1271 return NETDEV_TX_OK;
1272}
1273
1274
1275
1276
1277
1278static struct net_device_stats *ni52_get_stats(struct net_device *dev)
1279{
1280 struct priv *p = netdev_priv(dev);
1281 unsigned short crc, aln, rsc, ovrn;
1282
1283
1284 crc = readw(&p->scb->crc_errs);
1285 writew(0, &p->scb->crc_errs);
1286 aln = readw(&p->scb->aln_errs);
1287 writew(0, &p->scb->aln_errs);
1288 rsc = readw(&p->scb->rsc_errs);
1289 writew(0, &p->scb->rsc_errs);
1290 ovrn = readw(&p->scb->ovrn_errs);
1291 writew(0, &p->scb->ovrn_errs);
1292
1293 p->stats.rx_crc_errors += crc;
1294 p->stats.rx_fifo_errors += ovrn;
1295 p->stats.rx_frame_errors += aln;
1296 p->stats.rx_dropped += rsc;
1297
1298 return &p->stats;
1299}
1300
1301
1302
1303
1304
1305static void set_multicast_list(struct net_device *dev)
1306{
1307 netif_stop_queue(dev);
1308 ni_disint();
1309 alloc586(dev);
1310 init586(dev);
1311 startrecv586(dev);
1312 ni_enaint();
1313 netif_wake_queue(dev);
1314}
1315
1316#ifdef MODULE
1317static struct net_device *dev_ni52;
1318
1319module_param(io, int, 0);
1320module_param(irq, int, 0);
1321module_param(memstart, long, 0);
1322module_param(memend, long, 0);
1323MODULE_PARM_DESC(io, "NI5210 I/O base address,required");
1324MODULE_PARM_DESC(irq, "NI5210 IRQ number,required");
1325MODULE_PARM_DESC(memstart, "NI5210 memory base address,required");
1326MODULE_PARM_DESC(memend, "NI5210 memory end address,required");
1327
1328int __init init_module(void)
1329{
1330 if (io <= 0x0 || !memend || !memstart || irq < 2) {
1331 printk(KERN_ERR "ni52: Autoprobing not allowed for modules.\n");
1332 printk(KERN_ERR "ni52: Set symbols 'io' 'irq' 'memstart' and 'memend'\n");
1333 return -ENODEV;
1334 }
1335 dev_ni52 = ni52_probe(-1);
1336 if (IS_ERR(dev_ni52))
1337 return PTR_ERR(dev_ni52);
1338 return 0;
1339}
1340
1341void __exit cleanup_module(void)
1342{
1343 struct priv *p = netdev_priv(dev_ni52);
1344 unregister_netdev(dev_ni52);
1345 iounmap(p->mapped);
1346 release_region(dev_ni52->base_addr, NI52_TOTAL_SIZE);
1347 free_netdev(dev_ni52);
1348}
1349#endif
1350
1351MODULE_LICENSE("GPL");
1352