1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59#include <linux/module.h>
60#include <linux/init.h>
61#include <linux/delay.h>
62#include <linux/kernel.h>
63#include <linux/string.h>
64#include <linux/errno.h>
65#include <linux/ioport.h>
66#include <linux/slab.h>
67#include <linux/interrupt.h>
68#include <linux/netdevice.h>
69#include <linux/etherdevice.h>
70#include <linux/skbuff.h>
71#include <linux/bitops.h>
72
73#include <asm/io.h>
74#include <asm/dma.h>
75
76#define DRV_NAME "lp486e"
77
78
79#define LOG_SRCDST 0x80000000
80#define LOG_STATINT 0x40000000
81#define LOG_STARTINT 0x20000000
82
83#define i596_debug debug
84
85static int i596_debug = 0;
86
87static const char * const medianame[] = {
88 "10baseT", "AUI",
89 "10baseT-FD", "AUI-FD",
90};
91
92#define LP486E_TOTAL_SIZE 16
93
94#define I596_NULL (0xffffffff)
95
96#define CMD_EOL 0x8000
97#define CMD_SUSP 0x4000
98#define CMD_INTR 0x2000
99
100#define CMD_FLEX 0x0008
101
102enum commands {
103 CmdNOP = 0,
104 CmdIASetup = 1,
105 CmdConfigure = 2,
106 CmdMulticastList = 3,
107 CmdTx = 4,
108 CmdTDR = 5,
109 CmdDump = 6,
110 CmdDiagnose = 7
111};
112
113#if 0
114static const char *CUcmdnames[8] = { "NOP", "IASetup", "Configure", "MulticastList",
115 "Tx", "TDR", "Dump", "Diagnose" };
116#endif
117
118
119#define STAT_CX 0x8000
120
121#define STAT_FR 0x4000
122#define STAT_CNA 0x2000
123#define STAT_RNR 0x1000
124#define STAT_ACK (STAT_CX | STAT_FR | STAT_CNA | STAT_RNR)
125#define STAT_CUS 0x0700
126
127#define STAT_RUS 0x00f0
128
129
130
131#define STAT_T 0x0008
132#define STAT_ZERO 0x0807
133
134#if 0
135static char *CUstates[8] = {
136 "idle", "suspended", "active", 0, 0, 0, 0, 0
137};
138static char *RUstates[16] = {
139 "idle", "suspended", "no resources", 0, "ready", 0, 0, 0,
140 0, 0, "no RBDs", 0, "out of RBDs", 0, 0, 0
141};
142
143static void
144i596_out_status(int status) {
145 int bad = 0;
146 char *s;
147
148 printk("status %4.4x:", status);
149 if (status == 0xffff)
150 printk(" strange..\n");
151 else {
152 if (status & STAT_CX)
153 printk(" CU done");
154 if (status & STAT_CNA)
155 printk(" CU stopped");
156 if (status & STAT_FR)
157 printk(" got a frame");
158 if (status & STAT_RNR)
159 printk(" RU stopped");
160 if (status & STAT_T)
161 printk(" throttled");
162 if (status & STAT_ZERO)
163 bad = 1;
164 s = CUstates[(status & STAT_CUS) >> 8];
165 if (!s)
166 bad = 1;
167 else
168 printk(" CU(%s)", s);
169 s = RUstates[(status & STAT_RUS) >> 4];
170 if (!s)
171 bad = 1;
172 else
173 printk(" RU(%s)", s);
174 if (bad)
175 printk(" bad status");
176 printk("\n");
177 }
178}
179#endif
180
181
182#define ACK_CX 0x8000
183#define ACK_FR 0x4000
184#define ACK_CNA 0x2000
185#define ACK_RNR 0x1000
186
187#define CUC_START 0x0100
188#define CUC_RESUME 0x0200
189#define CUC_SUSPEND 0x0300
190#define CUC_ABORT 0x0400
191
192#define RX_START 0x0010
193#define RX_RESUME 0x0020
194#define RX_SUSPEND 0x0030
195#define RX_ABORT 0x0040
196
197typedef u32 phys_addr;
198
199static inline phys_addr
200va_to_pa(void *x) {
201 return x ? virt_to_bus(x) : I596_NULL;
202}
203
204static inline void *
205pa_to_va(phys_addr x) {
206 return (x == I596_NULL) ? NULL : bus_to_virt(x);
207}
208
209
210#define CMD_STAT_C 0x8000
211#define CMD_STAT_B 0x4000
212#define CMD_STAT_OK 0x2000
213#define CMD_STAT_A 0x1000
214
215struct i596_cmd {
216 unsigned short status;
217 unsigned short command;
218 phys_addr pa_next;
219};
220
221#define EOF 0x8000
222#define SIZE_MASK 0x3fff
223
224struct i596_tbd {
225 unsigned short size;
226 unsigned short pad;
227 phys_addr pa_next;
228 phys_addr pa_data;
229 struct sk_buff *skb;
230};
231
232struct tx_cmd {
233 struct i596_cmd cmd;
234 phys_addr pa_tbd;
235 unsigned short size;
236 unsigned short pad;
237};
238
239
240#define RFD_STAT_C 0x8000
241#define RFD_STAT_B 0x4000
242#define RFD_STAT_OK 0x2000
243#define RFD_STATUS 0x1fff
244#define RFD_LENGTH_ERR 0x1000
245#define RFD_CRC_ERR 0x0800
246#define RFD_ALIGN_ERR 0x0400
247#define RFD_NOBUFS_ERR 0x0200
248#define RFD_DMA_ERR 0x0100
249#define RFD_SHORT_FRAME_ERR 0x0080
250#define RFD_NOEOP_ERR 0x0040
251#define RFD_TRUNC_ERR 0x0020
252#define RFD_MULTICAST 0x0002
253
254#define RFD_COLLISION 0x0001
255
256
257struct i596_rfd {
258 unsigned short stat;
259 unsigned short cmd;
260 phys_addr pa_next;
261 phys_addr pa_rbd;
262 unsigned short count;
263 unsigned short size;
264 char data[1532];
265};
266
267#define RBD_EL 0x8000
268#define RBD_P 0x4000
269#define RBD_SIZEMASK 0x3fff
270#define RBD_EOF 0x8000
271#define RBD_F 0x4000
272
273
274struct i596_rbd {
275 unsigned short size;
276 unsigned short pad;
277 phys_addr pa_next;
278 phys_addr pa_data;
279 phys_addr pa_prev;
280
281
282 struct sk_buff *skb;
283};
284
285#define RX_RING_SIZE 64
286#define RX_SKBSIZE (ETH_FRAME_LEN+10)
287#define RX_RBD_SIZE 32
288
289
290struct i596_scb {
291 u16 status;
292 u16 command;
293 phys_addr pa_cmd;
294 phys_addr pa_rfd;
295 u32 crc_err;
296 u32 align_err;
297 u32 resource_err;
298 u32 over_err;
299 u32 rcvdt_err;
300 u32 short_err;
301 u16 t_on;
302 u16 t_off;
303};
304
305
306struct i596_iscp {
307 u32 busy;
308 phys_addr pa_scb;
309};
310
311
312struct i596_scp {
313 u32 sysbus;
314 u32 pad;
315 phys_addr pa_iscp;
316};
317
318
319
320
321
322
323
324
325struct i596_dump {
326 u16 dump[153];
327};
328
329struct i596_private {
330 struct i596_scp scp;
331 struct i596_iscp iscp;
332 struct i596_scb scb;
333 u32 dummy;
334 struct i596_dump dump;
335
336 struct i596_cmd set_add;
337 char eth_addr[8];
338
339 struct i596_cmd set_conf;
340 char i596_config[16];
341
342 struct i596_cmd tdr;
343 unsigned long tdr_stat;
344
345 int last_restart;
346 struct i596_rbd *rbd_list;
347 struct i596_rbd *rbd_tail;
348 struct i596_rfd *rx_tail;
349 struct i596_cmd *cmd_tail;
350 struct i596_cmd *cmd_head;
351 int cmd_backlog;
352 unsigned long last_cmd;
353 spinlock_t cmd_lock;
354};
355
356static char init_setup[14] = {
357 0x8E,
358 0xC8,
359 0x40,
360 0x2E,
361
362 0x00,
363 0x60,
364 0x00,
365 0xf2,
366 0x00,
367
368
369
370
371 0x00,
372 0x40,
373 0xff,
374
375 0x00,
376 0x7f
377};
378
379static int i596_open(struct net_device *dev);
380static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev);
381static irqreturn_t i596_interrupt(int irq, void *dev_id);
382static int i596_close(struct net_device *dev);
383static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd);
384static void print_eth(char *);
385static void set_multicast_list(struct net_device *dev);
386static void i596_tx_timeout(struct net_device *dev);
387
388static int
389i596_timeout(struct net_device *dev, char *msg, int ct) {
390 struct i596_private *lp;
391 int boguscnt = ct;
392
393 lp = netdev_priv(dev);
394 while (lp->scb.command) {
395 if (--boguscnt == 0) {
396 printk("%s: %s timed out - stat %4.4x, cmd %4.4x\n",
397 dev->name, msg,
398 lp->scb.status, lp->scb.command);
399 return 1;
400 }
401 udelay(5);
402 barrier();
403 }
404 return 0;
405}
406
407static inline int
408init_rx_bufs(struct net_device *dev, int num) {
409 struct i596_private *lp;
410 struct i596_rfd *rfd;
411 int i;
412
413
414 lp = netdev_priv(dev);
415 lp->scb.pa_rfd = I596_NULL;
416
417 for (i = 0; i < num; i++) {
418 rfd = kmalloc(sizeof(struct i596_rfd), GFP_KERNEL);
419 if (rfd == NULL)
420 break;
421
422 rfd->stat = 0;
423 rfd->pa_rbd = I596_NULL;
424 rfd->count = 0;
425 rfd->size = 1532;
426 if (i == 0) {
427 rfd->cmd = CMD_EOL;
428 lp->rx_tail = rfd;
429 } else {
430 rfd->cmd = 0;
431 }
432 rfd->pa_next = lp->scb.pa_rfd;
433 lp->scb.pa_rfd = va_to_pa(rfd);
434 lp->rx_tail->pa_next = lp->scb.pa_rfd;
435 }
436
437#if 0
438 for (i = 0; i<RX_RBD_SIZE; i++) {
439 rbd = kmalloc(sizeof(struct i596_rbd), GFP_KERNEL);
440 if (rbd) {
441 rbd->pad = 0;
442 rbd->count = 0;
443 rbd->skb = dev_alloc_skb(RX_SKBSIZE);
444 if (!rbd->skb) {
445 printk("dev_alloc_skb failed");
446 }
447 rbd->next = rfd->rbd;
448 if (i) {
449 rfd->rbd->prev = rbd;
450 rbd->size = RX_SKBSIZE;
451 } else {
452 rbd->size = (RX_SKBSIZE | RBD_EL);
453 lp->rbd_tail = rbd;
454 }
455
456 rfd->rbd = rbd;
457 }
458 }
459 lp->rbd_tail->next = rfd->rbd;
460#endif
461 return i;
462}
463
464static inline void
465remove_rx_bufs(struct net_device *dev) {
466 struct i596_private *lp;
467 struct i596_rfd *rfd;
468
469 lp = netdev_priv(dev);
470 lp->rx_tail->pa_next = I596_NULL;
471
472 do {
473 rfd = pa_to_va(lp->scb.pa_rfd);
474 lp->scb.pa_rfd = rfd->pa_next;
475 kfree(rfd);
476 } while (rfd != lp->rx_tail);
477
478 lp->rx_tail = NULL;
479
480#if 0
481 for (lp->rbd_list) {
482 }
483#endif
484}
485
486#define PORT_RESET 0x00
487#define PORT_SELFTEST 0x01
488#define PORT_ALTSCP 0x02
489#define PORT_DUMP 0x03
490
491#define IOADDR 0xcb0
492#define IRQ 10
493
494
495static inline void
496PORT(phys_addr a, unsigned int cmd) {
497 if (a & 0xf)
498 printk("lp486e.c: PORT: address not aligned\n");
499 outw(((a & 0xffff) | cmd), IOADDR);
500 outw(((a>>16) & 0xffff), IOADDR+2);
501}
502
503static inline void
504CA(void) {
505 outb(0, IOADDR+4);
506 udelay(8);
507}
508
509static inline void
510CLEAR_INT(void) {
511 outb(0, IOADDR+8);
512}
513
514#if 0
515
516static void
517i596_port_do(struct net_device *dev, int portcmd, char *cmdname) {
518 struct i596_private *lp = netdev_priv(dev);
519 u16 *outp;
520 int i, m;
521
522 memset((void *)&(lp->dump), 0, sizeof(struct i596_dump));
523 outp = &(lp->dump.dump[0]);
524
525 PORT(va_to_pa(outp), portcmd);
526 mdelay(30);
527
528 printk("lp486e i82596 %s result:\n", cmdname);
529 for (m = ARRAY_SIZE(lp->dump.dump); m && lp->dump.dump[m-1] == 0; m--)
530 ;
531 for (i = 0; i < m; i++) {
532 printk(" %04x", lp->dump.dump[i]);
533 if (i%8 == 7)
534 printk("\n");
535 }
536 printk("\n");
537}
538#endif
539
540static int
541i596_scp_setup(struct net_device *dev) {
542 struct i596_private *lp = netdev_priv(dev);
543 int boguscnt;
544
545
546
547
548
549
550
551
552
553
554
555
556
557 lp->scp.sysbus = 0x00440000;
558 lp->scp.pad = 0;
559 lp->scp.pa_iscp = va_to_pa(&(lp->iscp));
560
561
562
563
564 lp->iscp.busy = 0x0001;
565 lp->iscp.pa_scb = va_to_pa(&(lp->scb));
566
567 lp->scb.command = 0;
568 lp->scb.status = 0;
569 lp->scb.pa_cmd = I596_NULL;
570
571
572 lp->last_cmd = jiffies;
573 lp->cmd_backlog = 0;
574 lp->cmd_head = NULL;
575
576
577
578
579
580
581 PORT(0, PORT_RESET);
582 udelay(100);
583
584
585
586
587
588 PORT(va_to_pa(&lp->scp), PORT_ALTSCP);
589
590
591
592
593
594
595 CA();
596
597
598
599
600 boguscnt = 100;
601 while (lp->iscp.busy) {
602 if (--boguscnt == 0) {
603
604 printk("%s: i82596 initialization timed out\n",
605 dev->name);
606 return 1;
607 }
608 udelay(5);
609 barrier();
610 }
611
612
613 return 0;
614}
615
616static int
617init_i596(struct net_device *dev) {
618 struct i596_private *lp;
619
620 if (i596_scp_setup(dev))
621 return 1;
622
623 lp = netdev_priv(dev);
624 lp->scb.command = 0;
625
626 memcpy ((void *)lp->i596_config, init_setup, 14);
627 lp->set_conf.command = CmdConfigure;
628 i596_add_cmd(dev, (void *)&lp->set_conf);
629
630 memcpy ((void *)lp->eth_addr, dev->dev_addr, 6);
631 lp->set_add.command = CmdIASetup;
632 i596_add_cmd(dev, &lp->set_add);
633
634 lp->tdr.command = CmdTDR;
635 i596_add_cmd(dev, &lp->tdr);
636
637 if (lp->scb.command && i596_timeout(dev, "i82596 init", 200))
638 return 1;
639
640 lp->scb.command = RX_START;
641 CA();
642
643 barrier();
644
645 if (lp->scb.command && i596_timeout(dev, "Receive Unit start", 100))
646 return 1;
647
648 return 0;
649}
650
651
652static inline int
653i596_rx_one(struct net_device *dev, struct i596_private *lp,
654 struct i596_rfd *rfd, int *frames) {
655
656 if (rfd->stat & RFD_STAT_OK) {
657
658 int pkt_len = (rfd->count & 0x3fff);
659 struct sk_buff *skb = netdev_alloc_skb(dev, pkt_len);
660
661 (*frames)++;
662
663 if (rfd->cmd & CMD_EOL)
664 printk("Received on EOL\n");
665
666 if (skb == NULL) {
667 printk ("%s: i596_rx Memory squeeze, "
668 "dropping packet.\n", dev->name);
669 dev->stats.rx_dropped++;
670 return 1;
671 }
672
673 memcpy(skb_put(skb,pkt_len), rfd->data, pkt_len);
674
675 skb->protocol = eth_type_trans(skb,dev);
676 netif_rx(skb);
677 dev->stats.rx_packets++;
678 } else {
679#if 0
680 printk("Frame reception error status %04x\n",
681 rfd->stat);
682#endif
683 dev->stats.rx_errors++;
684 if (rfd->stat & RFD_COLLISION)
685 dev->stats.collisions++;
686 if (rfd->stat & RFD_SHORT_FRAME_ERR)
687 dev->stats.rx_length_errors++;
688 if (rfd->stat & RFD_DMA_ERR)
689 dev->stats.rx_over_errors++;
690 if (rfd->stat & RFD_NOBUFS_ERR)
691 dev->stats.rx_fifo_errors++;
692 if (rfd->stat & RFD_ALIGN_ERR)
693 dev->stats.rx_frame_errors++;
694 if (rfd->stat & RFD_CRC_ERR)
695 dev->stats.rx_crc_errors++;
696 if (rfd->stat & RFD_LENGTH_ERR)
697 dev->stats.rx_length_errors++;
698 }
699 rfd->stat = rfd->count = 0;
700 return 0;
701}
702
703static int
704i596_rx(struct net_device *dev) {
705 struct i596_private *lp = netdev_priv(dev);
706 struct i596_rfd *rfd;
707 int frames = 0;
708
709 while (1) {
710 rfd = pa_to_va(lp->scb.pa_rfd);
711 if (!rfd) {
712 printk(KERN_ERR "i596_rx: NULL rfd?\n");
713 return 0;
714 }
715#if 1
716 if (rfd->stat && !(rfd->stat & (RFD_STAT_C | RFD_STAT_B)))
717 printk("SF:%p-%04x\n", rfd, rfd->stat);
718#endif
719 if (!(rfd->stat & RFD_STAT_C))
720 break;
721 if (i596_rx_one(dev, lp, rfd, &frames))
722 break;
723 rfd->cmd = CMD_EOL;
724 lp->rx_tail->cmd = 0;
725 lp->rx_tail = rfd;
726 lp->scb.pa_rfd = rfd->pa_next;
727 barrier();
728 }
729
730 return frames;
731}
732
733static void
734i596_cleanup_cmd(struct net_device *dev) {
735 struct i596_private *lp;
736 struct i596_cmd *cmd;
737
738 lp = netdev_priv(dev);
739 while (lp->cmd_head) {
740 cmd = lp->cmd_head;
741
742 lp->cmd_head = pa_to_va(lp->cmd_head->pa_next);
743 lp->cmd_backlog--;
744
745 switch ((cmd->command) & 0x7) {
746 case CmdTx: {
747 struct tx_cmd *tx_cmd = (struct tx_cmd *) cmd;
748 struct i596_tbd * tx_cmd_tbd;
749 tx_cmd_tbd = pa_to_va(tx_cmd->pa_tbd);
750
751 dev_kfree_skb_any(tx_cmd_tbd->skb);
752
753 dev->stats.tx_errors++;
754 dev->stats.tx_aborted_errors++;
755
756 cmd->pa_next = I596_NULL;
757 kfree((unsigned char *)tx_cmd);
758 netif_wake_queue(dev);
759 break;
760 }
761 case CmdMulticastList: {
762
763
764 cmd->pa_next = I596_NULL;
765 kfree((unsigned char *)cmd);
766 break;
767 }
768 default: {
769 cmd->pa_next = I596_NULL;
770 break;
771 }
772 }
773 barrier();
774 }
775
776 if (lp->scb.command && i596_timeout(dev, "i596_cleanup_cmd", 100))
777 ;
778
779 lp->scb.pa_cmd = va_to_pa(lp->cmd_head);
780}
781
782static void i596_reset(struct net_device *dev, struct i596_private *lp, int ioaddr) {
783
784 if (lp->scb.command && i596_timeout(dev, "i596_reset", 100))
785 ;
786
787 netif_stop_queue(dev);
788
789 lp->scb.command = CUC_ABORT | RX_ABORT;
790 CA();
791 barrier();
792
793
794 if (lp->scb.command && i596_timeout(dev, "i596_reset(2)", 400))
795 ;
796
797 i596_cleanup_cmd(dev);
798 i596_rx(dev);
799
800 netif_start_queue(dev);
801
802 init_i596(dev);
803}
804
805static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd) {
806 struct i596_private *lp = netdev_priv(dev);
807 int ioaddr = dev->base_addr;
808 unsigned long flags;
809
810 cmd->status = 0;
811 cmd->command |= (CMD_EOL | CMD_INTR);
812 cmd->pa_next = I596_NULL;
813
814 spin_lock_irqsave(&lp->cmd_lock, flags);
815
816 if (lp->cmd_head) {
817 lp->cmd_tail->pa_next = va_to_pa(cmd);
818 } else {
819 lp->cmd_head = cmd;
820 if (lp->scb.command && i596_timeout(dev, "i596_add_cmd", 100))
821 ;
822 lp->scb.pa_cmd = va_to_pa(cmd);
823 lp->scb.command = CUC_START;
824 CA();
825 }
826 lp->cmd_tail = cmd;
827 lp->cmd_backlog++;
828
829 lp->cmd_head = pa_to_va(lp->scb.pa_cmd);
830 spin_unlock_irqrestore(&lp->cmd_lock, flags);
831
832 if (lp->cmd_backlog > 16) {
833 int tickssofar = jiffies - lp->last_cmd;
834 if (tickssofar < HZ/4)
835 return;
836
837 printk(KERN_WARNING "%s: command unit timed out, status resetting.\n", dev->name);
838 i596_reset(dev, lp, ioaddr);
839 }
840}
841
842static int i596_open(struct net_device *dev)
843{
844 int i;
845
846 i = request_irq(dev->irq, i596_interrupt, IRQF_SHARED, dev->name, dev);
847 if (i) {
848 printk(KERN_ERR "%s: IRQ %d not free\n", dev->name, dev->irq);
849 return i;
850 }
851
852 if ((i = init_rx_bufs(dev, RX_RING_SIZE)) < RX_RING_SIZE)
853 printk(KERN_ERR "%s: only able to allocate %d receive buffers\n", dev->name, i);
854
855 if (i < 4) {
856 free_irq(dev->irq, dev);
857 return -EAGAIN;
858 }
859 netif_start_queue(dev);
860 init_i596(dev);
861 return 0;
862}
863
864static netdev_tx_t i596_start_xmit (struct sk_buff *skb, struct net_device *dev) {
865 struct tx_cmd *tx_cmd;
866 short length;
867
868 length = skb->len;
869
870 if (length < ETH_ZLEN) {
871 if (skb_padto(skb, ETH_ZLEN))
872 return NETDEV_TX_OK;
873 length = ETH_ZLEN;
874 }
875
876 tx_cmd = kmalloc((sizeof (struct tx_cmd) + sizeof (struct i596_tbd)), GFP_ATOMIC);
877 if (tx_cmd == NULL) {
878 printk(KERN_WARNING "%s: i596_xmit Memory squeeze, dropping packet.\n", dev->name);
879 dev->stats.tx_dropped++;
880 dev_kfree_skb (skb);
881 } else {
882 struct i596_tbd *tx_cmd_tbd;
883 tx_cmd_tbd = (struct i596_tbd *) (tx_cmd + 1);
884 tx_cmd->pa_tbd = va_to_pa (tx_cmd_tbd);
885 tx_cmd_tbd->pa_next = I596_NULL;
886
887 tx_cmd->cmd.command = (CMD_FLEX | CmdTx);
888
889 tx_cmd->pad = 0;
890 tx_cmd->size = 0;
891 tx_cmd_tbd->pad = 0;
892 tx_cmd_tbd->size = (EOF | length);
893
894 tx_cmd_tbd->pa_data = va_to_pa (skb->data);
895 tx_cmd_tbd->skb = skb;
896
897 if (i596_debug & LOG_SRCDST)
898 print_eth (skb->data);
899
900 i596_add_cmd (dev, (struct i596_cmd *) tx_cmd);
901
902 dev->stats.tx_packets++;
903 }
904
905 return NETDEV_TX_OK;
906}
907
908static void
909i596_tx_timeout (struct net_device *dev) {
910 struct i596_private *lp = netdev_priv(dev);
911 int ioaddr = dev->base_addr;
912
913
914 printk(KERN_WARNING "%s: transmit timed out, status resetting.\n", dev->name);
915 dev->stats.tx_errors++;
916
917
918 if (lp->last_restart == dev->stats.tx_packets) {
919 printk ("Resetting board.\n");
920
921
922 i596_reset (dev, lp, ioaddr);
923 } else {
924
925 printk ("Kicking board.\n");
926 lp->scb.command = (CUC_START | RX_START);
927 CA();
928 lp->last_restart = dev->stats.tx_packets;
929 }
930 netif_wake_queue(dev);
931}
932
933static void print_eth(char *add)
934{
935 int i;
936
937 printk ("Dest ");
938 for (i = 0; i < 6; i++)
939 printk(" %2.2X", (unsigned char) add[i]);
940 printk ("\n");
941
942 printk ("Source");
943 for (i = 0; i < 6; i++)
944 printk(" %2.2X", (unsigned char) add[i+6]);
945 printk ("\n");
946
947 printk ("type %2.2X%2.2X\n",
948 (unsigned char) add[12], (unsigned char) add[13]);
949}
950
951static const struct net_device_ops i596_netdev_ops = {
952 .ndo_open = i596_open,
953 .ndo_stop = i596_close,
954 .ndo_start_xmit = i596_start_xmit,
955 .ndo_set_rx_mode = set_multicast_list,
956 .ndo_tx_timeout = i596_tx_timeout,
957 .ndo_change_mtu = eth_change_mtu,
958 .ndo_set_mac_address = eth_mac_addr,
959 .ndo_validate_addr = eth_validate_addr,
960};
961
962static int __init lp486e_probe(struct net_device *dev) {
963 struct i596_private *lp;
964 unsigned char eth_addr[6] = { 0, 0xaa, 0, 0, 0, 0 };
965 unsigned char *bios;
966 int i, j;
967 int ret = -ENOMEM;
968 static int probed;
969
970 if (probed)
971 return -ENODEV;
972 probed++;
973
974 if (!request_region(IOADDR, LP486E_TOTAL_SIZE, DRV_NAME)) {
975 printk(KERN_ERR "lp486e: IO address 0x%x in use\n", IOADDR);
976 return -EBUSY;
977 }
978
979 lp = netdev_priv(dev);
980 spin_lock_init(&lp->cmd_lock);
981
982
983
984
985 if (i596_scp_setup(dev)) {
986 ret = -ENODEV;
987 goto err_out_kfree;
988 }
989
990 dev->base_addr = IOADDR;
991 dev->irq = IRQ;
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004 bios = bus_to_virt(0xe8000);
1005 for (j = 0; j < 0x2000; j++) {
1006 if (bios[j] == 0 && bios[j+1] == 0xaa && bios[j+2] == 0) {
1007 printk("%s: maybe address at BIOS 0x%x:",
1008 dev->name, 0xe8000+j);
1009 for (i = 0; i < 6; i++) {
1010 eth_addr[i] = bios[i+j];
1011 printk(" %2.2X", eth_addr[i]);
1012 }
1013 printk("\n");
1014 }
1015 }
1016
1017 printk("%s: lp486e 82596 at %#3lx, IRQ %d,",
1018 dev->name, dev->base_addr, dev->irq);
1019 for (i = 0; i < 6; i++)
1020 printk(" %2.2X", dev->dev_addr[i] = eth_addr[i]);
1021 printk("\n");
1022
1023
1024 dev->netdev_ops = &i596_netdev_ops;
1025 dev->watchdog_timeo = 5*HZ;
1026
1027#if 0
1028
1029 i596_port_do(dev, PORT_SELFTEST, "selftest");
1030 i596_port_do(dev, PORT_DUMP, "dump");
1031#endif
1032 return 0;
1033
1034err_out_kfree:
1035 release_region(IOADDR, LP486E_TOTAL_SIZE);
1036 return ret;
1037}
1038
1039static inline void
1040i596_handle_CU_completion(struct net_device *dev,
1041 struct i596_private *lp,
1042 unsigned short status,
1043 unsigned short *ack_cmdp) {
1044 struct i596_cmd *cmd;
1045 int frames_out = 0;
1046 int commands_done = 0;
1047 int cmd_val;
1048 unsigned long flags;
1049
1050 spin_lock_irqsave(&lp->cmd_lock, flags);
1051 cmd = lp->cmd_head;
1052
1053 while (lp->cmd_head && (lp->cmd_head->status & CMD_STAT_C)) {
1054 cmd = lp->cmd_head;
1055
1056 lp->cmd_head = pa_to_va(lp->cmd_head->pa_next);
1057 lp->cmd_backlog--;
1058
1059 commands_done++;
1060 cmd_val = cmd->command & 0x7;
1061#if 0
1062 printk("finished CU %s command (%d)\n",
1063 CUcmdnames[cmd_val], cmd_val);
1064#endif
1065 switch (cmd_val) {
1066 case CmdTx:
1067 {
1068 struct tx_cmd *tx_cmd;
1069 struct i596_tbd *tx_cmd_tbd;
1070
1071 tx_cmd = (struct tx_cmd *) cmd;
1072 tx_cmd_tbd = pa_to_va(tx_cmd->pa_tbd);
1073
1074 frames_out++;
1075 if (cmd->status & CMD_STAT_OK) {
1076 if (i596_debug)
1077 print_eth(pa_to_va(tx_cmd_tbd->pa_data));
1078 } else {
1079 dev->stats.tx_errors++;
1080 if (i596_debug)
1081 printk("transmission failure:%04x\n",
1082 cmd->status);
1083 if (cmd->status & 0x0020)
1084 dev->stats.collisions++;
1085 if (!(cmd->status & 0x0040))
1086 dev->stats.tx_heartbeat_errors++;
1087 if (cmd->status & 0x0400)
1088 dev->stats.tx_carrier_errors++;
1089 if (cmd->status & 0x0800)
1090 dev->stats.collisions++;
1091 if (cmd->status & 0x1000)
1092 dev->stats.tx_aborted_errors++;
1093 }
1094 dev_kfree_skb_irq(tx_cmd_tbd->skb);
1095
1096 cmd->pa_next = I596_NULL;
1097 kfree((unsigned char *)tx_cmd);
1098 netif_wake_queue(dev);
1099 break;
1100 }
1101
1102 case CmdMulticastList:
1103 cmd->pa_next = I596_NULL;
1104 kfree((unsigned char *)cmd);
1105 break;
1106
1107 case CmdTDR:
1108 {
1109 unsigned long status = *((unsigned long *) (cmd + 1));
1110 if (status & 0x8000) {
1111 if (i596_debug)
1112 printk("%s: link ok.\n", dev->name);
1113 } else {
1114 if (status & 0x4000)
1115 printk("%s: Transceiver problem.\n",
1116 dev->name);
1117 if (status & 0x2000)
1118 printk("%s: Termination problem.\n",
1119 dev->name);
1120 if (status & 0x1000)
1121 printk("%s: Short circuit.\n",
1122 dev->name);
1123 printk("%s: Time %ld.\n",
1124 dev->name, status & 0x07ff);
1125 }
1126 }
1127 default:
1128 cmd->pa_next = I596_NULL;
1129 lp->last_cmd = jiffies;
1130
1131 }
1132 barrier();
1133 }
1134
1135 cmd = lp->cmd_head;
1136 while (cmd && (cmd != lp->cmd_tail)) {
1137 cmd->command &= 0x1fff;
1138 cmd = pa_to_va(cmd->pa_next);
1139 barrier();
1140 }
1141
1142 if (lp->cmd_head)
1143 *ack_cmdp |= CUC_START;
1144 lp->scb.pa_cmd = va_to_pa(lp->cmd_head);
1145 spin_unlock_irqrestore(&lp->cmd_lock, flags);
1146}
1147
1148static irqreturn_t
1149i596_interrupt(int irq, void *dev_instance)
1150{
1151 struct net_device *dev = dev_instance;
1152 struct i596_private *lp = netdev_priv(dev);
1153 unsigned short status, ack_cmd = 0;
1154 int frames_in = 0;
1155
1156
1157
1158
1159
1160 if (lp->scb.command && i596_timeout(dev, "interrupt", 40))
1161 ;
1162
1163
1164
1165
1166
1167
1168
1169
1170 status = lp->scb.status;
1171#if 0
1172 if (i596_debug) {
1173 printk("%s: i596 interrupt, ", dev->name);
1174 i596_out_status(status);
1175 }
1176#endif
1177
1178
1179 if (status == 0xffff) {
1180 printk("%s: i596_interrupt: got status 0xffff\n", dev->name);
1181 goto out;
1182 }
1183
1184 ack_cmd = (status & STAT_ACK);
1185
1186 if (status & (STAT_CX | STAT_CNA))
1187 i596_handle_CU_completion(dev, lp, status, &ack_cmd);
1188
1189 if (status & (STAT_FR | STAT_RNR)) {
1190
1191 if ((status & STAT_RNR) && netif_running(dev))
1192 ack_cmd |= RX_START;
1193
1194 if (status & STAT_FR) {
1195 frames_in = i596_rx(dev);
1196 if (!frames_in)
1197 printk("receive frame reported, but no frames\n");
1198 }
1199 }
1200
1201
1202
1203
1204
1205
1206
1207 if (lp->scb.command && i596_timeout(dev, "i596 interrupt", 100))
1208 ;
1209
1210 lp->scb.command = ack_cmd;
1211
1212 CLEAR_INT();
1213 CA();
1214
1215 out:
1216 return IRQ_HANDLED;
1217}
1218
1219static int i596_close(struct net_device *dev) {
1220 struct i596_private *lp = netdev_priv(dev);
1221
1222 netif_stop_queue(dev);
1223
1224 if (i596_debug)
1225 printk("%s: Shutting down ethercard, status was %4.4x.\n",
1226 dev->name, lp->scb.status);
1227
1228 lp->scb.command = (CUC_ABORT | RX_ABORT);
1229 CA();
1230
1231 i596_cleanup_cmd(dev);
1232
1233 if (lp->scb.command && i596_timeout(dev, "i596_close", 200))
1234 ;
1235
1236 free_irq(dev->irq, dev);
1237 remove_rx_bufs(dev);
1238
1239 return 0;
1240}
1241
1242
1243
1244
1245
1246static void set_multicast_list(struct net_device *dev) {
1247 struct i596_private *lp = netdev_priv(dev);
1248 struct i596_cmd *cmd;
1249
1250 if (i596_debug > 1)
1251 printk ("%s: set multicast list %d\n",
1252 dev->name, netdev_mc_count(dev));
1253
1254 if (!netdev_mc_empty(dev)) {
1255 struct netdev_hw_addr *ha;
1256 char *cp;
1257 cmd = kmalloc(sizeof(struct i596_cmd) + 2 +
1258 netdev_mc_count(dev) * 6, GFP_ATOMIC);
1259 if (cmd == NULL) {
1260 printk (KERN_ERR "%s: set_multicast Memory squeeze.\n", dev->name);
1261 return;
1262 }
1263 cmd->command = CmdMulticastList;
1264 *((unsigned short *) (cmd + 1)) = netdev_mc_count(dev) * 6;
1265 cp = ((char *)(cmd + 1))+2;
1266 netdev_for_each_mc_addr(ha, dev) {
1267 memcpy(cp, ha->addr, 6);
1268 cp += 6;
1269 }
1270 if (i596_debug & LOG_SRCDST)
1271 print_eth (((char *)(cmd + 1)) + 2);
1272 i596_add_cmd(dev, cmd);
1273 } else {
1274 if (lp->set_conf.pa_next != I596_NULL) {
1275 return;
1276 }
1277 if (netdev_mc_empty(dev) &&
1278 !(dev->flags & (IFF_PROMISC | IFF_ALLMULTI))) {
1279 lp->i596_config[8] &= ~0x01;
1280 } else {
1281 lp->i596_config[8] |= 0x01;
1282 }
1283
1284 i596_add_cmd(dev, &lp->set_conf);
1285 }
1286}
1287
1288MODULE_AUTHOR("Ard van Breemen <ard@cstmel.nl.eu.org>");
1289MODULE_DESCRIPTION("Intel Panther onboard i82596 driver");
1290MODULE_LICENSE("GPL");
1291
1292static struct net_device *dev_lp486e;
1293static int full_duplex;
1294static int options;
1295static int io = IOADDR;
1296static int irq = IRQ;
1297
1298module_param(debug, int, 0);
1299
1300
1301
1302module_param(options, int, 0);
1303module_param(full_duplex, int, 0);
1304
1305static int __init lp486e_init_module(void) {
1306 int err;
1307 struct net_device *dev = alloc_etherdev(sizeof(struct i596_private));
1308 if (!dev)
1309 return -ENOMEM;
1310
1311 dev->irq = irq;
1312 dev->base_addr = io;
1313 err = lp486e_probe(dev);
1314 if (err) {
1315 free_netdev(dev);
1316 return err;
1317 }
1318 err = register_netdev(dev);
1319 if (err) {
1320 release_region(dev->base_addr, LP486E_TOTAL_SIZE);
1321 free_netdev(dev);
1322 return err;
1323 }
1324 dev_lp486e = dev;
1325 full_duplex = 0;
1326 options = 0;
1327 return 0;
1328}
1329
1330static void __exit lp486e_cleanup_module(void) {
1331 unregister_netdev(dev_lp486e);
1332 release_region(dev_lp486e->base_addr, LP486E_TOTAL_SIZE);
1333 free_netdev(dev_lp486e);
1334}
1335
1336module_init(lp486e_init_module);
1337module_exit(lp486e_cleanup_module);
1338