1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
38
39
40#include <linux/module.h>
41#include <linux/stddef.h>
42#include <linux/kernel.h>
43#include <linux/string.h>
44#include <linux/errno.h>
45#include <linux/ioport.h>
46#include <linux/netdevice.h>
47#include <linux/etherdevice.h>
48#include <linux/interrupt.h>
49#include <linux/skbuff.h>
50#include <linux/init.h>
51#include <linux/zorro.h>
52#include <linux/bitops.h>
53
54#include <asm/byteorder.h>
55#include <asm/amigaints.h>
56#include <asm/amigahw.h>
57#include <asm/irq.h>
58
59#include "ariadne.h"
60
61#ifdef ARIADNE_DEBUG
62int ariadne_debug = ARIADNE_DEBUG;
63#else
64int ariadne_debug = 1;
65#endif
66
67
68
69
70#define swapw(x) (((x >> 8) & 0x00ff) | ((x << 8) & 0xff00))
71
72#define lowb(x) (x & 0xff)
73
74#define swhighw(x) ((((x) >> 8) & 0xff00) | (((x) >> 24) & 0x00ff))
75
76#define swloww(x) ((((x) << 8) & 0xff00) | (((x) >> 8) & 0x00ff))
77
78
79
80#define TX_RING_SIZE 5
81#define RX_RING_SIZE 16
82
83#define PKT_BUF_SIZE 1520
84
85
86
87struct ariadne_private {
88 volatile struct TDRE *tx_ring[TX_RING_SIZE];
89 volatile struct RDRE *rx_ring[RX_RING_SIZE];
90 volatile u_short *tx_buff[TX_RING_SIZE];
91 volatile u_short *rx_buff[RX_RING_SIZE];
92 int cur_tx, cur_rx;
93 int dirty_tx;
94 char tx_full;
95};
96
97
98
99struct lancedata {
100 struct TDRE tx_ring[TX_RING_SIZE];
101 struct RDRE rx_ring[RX_RING_SIZE];
102 u_short tx_buff[TX_RING_SIZE][PKT_BUF_SIZE / sizeof(u_short)];
103 u_short rx_buff[RX_RING_SIZE][PKT_BUF_SIZE / sizeof(u_short)];
104};
105
106static void memcpyw(volatile u_short *dest, u_short *src, int len)
107{
108 while (len >= 2) {
109 *(dest++) = *(src++);
110 len -= 2;
111 }
112 if (len == 1)
113 *dest = (*(u_char *)src) << 8;
114}
115
116static void ariadne_init_ring(struct net_device *dev)
117{
118 struct ariadne_private *priv = netdev_priv(dev);
119 volatile struct lancedata *lancedata = (struct lancedata *)dev->mem_start;
120 int i;
121
122 netif_stop_queue(dev);
123
124 priv->tx_full = 0;
125 priv->cur_rx = priv->cur_tx = 0;
126 priv->dirty_tx = 0;
127
128
129 for (i = 0; i < TX_RING_SIZE; i++) {
130 volatile struct TDRE *t = &lancedata->tx_ring[i];
131 t->TMD0 = swloww(ARIADNE_RAM +
132 offsetof(struct lancedata, tx_buff[i]));
133 t->TMD1 = swhighw(ARIADNE_RAM +
134 offsetof(struct lancedata, tx_buff[i])) |
135 TF_STP | TF_ENP;
136 t->TMD2 = swapw((u_short)-PKT_BUF_SIZE);
137 t->TMD3 = 0;
138 priv->tx_ring[i] = &lancedata->tx_ring[i];
139 priv->tx_buff[i] = lancedata->tx_buff[i];
140 netdev_dbg(dev, "TX Entry %2d at %p, Buf at %p\n",
141 i, &lancedata->tx_ring[i], lancedata->tx_buff[i]);
142 }
143
144
145 for (i = 0; i < RX_RING_SIZE; i++) {
146 volatile struct RDRE *r = &lancedata->rx_ring[i];
147 r->RMD0 = swloww(ARIADNE_RAM +
148 offsetof(struct lancedata, rx_buff[i]));
149 r->RMD1 = swhighw(ARIADNE_RAM +
150 offsetof(struct lancedata, rx_buff[i])) |
151 RF_OWN;
152 r->RMD2 = swapw((u_short)-PKT_BUF_SIZE);
153 r->RMD3 = 0x0000;
154 priv->rx_ring[i] = &lancedata->rx_ring[i];
155 priv->rx_buff[i] = lancedata->rx_buff[i];
156 netdev_dbg(dev, "RX Entry %2d at %p, Buf at %p\n",
157 i, &lancedata->rx_ring[i], lancedata->rx_buff[i]);
158 }
159}
160
161static int ariadne_rx(struct net_device *dev)
162{
163 struct ariadne_private *priv = netdev_priv(dev);
164 int entry = priv->cur_rx % RX_RING_SIZE;
165 int i;
166
167
168 while (!(lowb(priv->rx_ring[entry]->RMD1) & RF_OWN)) {
169 int status = lowb(priv->rx_ring[entry]->RMD1);
170
171 if (status != (RF_STP | RF_ENP)) {
172
173
174
175
176
177
178
179 if (status & RF_ENP)
180 dev->stats.rx_errors++;
181 if (status & RF_FRAM)
182 dev->stats.rx_frame_errors++;
183 if (status & RF_OFLO)
184 dev->stats.rx_over_errors++;
185 if (status & RF_CRC)
186 dev->stats.rx_crc_errors++;
187 if (status & RF_BUFF)
188 dev->stats.rx_fifo_errors++;
189 priv->rx_ring[entry]->RMD1 &= 0xff00 | RF_STP | RF_ENP;
190 } else {
191
192 short pkt_len = swapw(priv->rx_ring[entry]->RMD3);
193 struct sk_buff *skb;
194
195 skb = netdev_alloc_skb(dev, pkt_len + 2);
196 if (skb == NULL) {
197 for (i = 0; i < RX_RING_SIZE; i++)
198 if (lowb(priv->rx_ring[(entry + i) % RX_RING_SIZE]->RMD1) & RF_OWN)
199 break;
200
201 if (i > RX_RING_SIZE - 2) {
202 dev->stats.rx_dropped++;
203 priv->rx_ring[entry]->RMD1 |= RF_OWN;
204 priv->cur_rx++;
205 }
206 break;
207 }
208
209
210 skb_reserve(skb, 2);
211 skb_put(skb, pkt_len);
212 skb_copy_to_linear_data(skb,
213 (const void *)priv->rx_buff[entry],
214 pkt_len);
215 skb->protocol = eth_type_trans(skb, dev);
216 netdev_dbg(dev, "RX pkt type 0x%04x from %pM to %pM data %p len %u\n",
217 ((u_short *)skb->data)[6],
218 skb->data + 6, skb->data,
219 skb->data, skb->len);
220
221 netif_rx(skb);
222 dev->stats.rx_packets++;
223 dev->stats.rx_bytes += pkt_len;
224 }
225
226 priv->rx_ring[entry]->RMD1 |= RF_OWN;
227 entry = (++priv->cur_rx) % RX_RING_SIZE;
228 }
229
230 priv->cur_rx = priv->cur_rx % RX_RING_SIZE;
231
232
233
234
235
236 return 0;
237}
238
239static irqreturn_t ariadne_interrupt(int irq, void *data)
240{
241 struct net_device *dev = (struct net_device *)data;
242 volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr;
243 struct ariadne_private *priv;
244 int csr0, boguscnt;
245 int handled = 0;
246
247 lance->RAP = CSR0;
248
249 if (!(lance->RDP & INTR))
250 return IRQ_NONE;
251
252 priv = netdev_priv(dev);
253
254 boguscnt = 10;
255 while ((csr0 = lance->RDP) & (ERR | RINT | TINT) && --boguscnt >= 0) {
256
257 lance->RDP = csr0 & ~(INEA | TDMD | STOP | STRT | INIT);
258
259#ifdef DEBUG
260 if (ariadne_debug > 5) {
261 netdev_dbg(dev, "interrupt csr0=%#02x new csr=%#02x [",
262 csr0, lance->RDP);
263 if (csr0 & INTR)
264 pr_cont(" INTR");
265 if (csr0 & INEA)
266 pr_cont(" INEA");
267 if (csr0 & RXON)
268 pr_cont(" RXON");
269 if (csr0 & TXON)
270 pr_cont(" TXON");
271 if (csr0 & TDMD)
272 pr_cont(" TDMD");
273 if (csr0 & STOP)
274 pr_cont(" STOP");
275 if (csr0 & STRT)
276 pr_cont(" STRT");
277 if (csr0 & INIT)
278 pr_cont(" INIT");
279 if (csr0 & ERR)
280 pr_cont(" ERR");
281 if (csr0 & BABL)
282 pr_cont(" BABL");
283 if (csr0 & CERR)
284 pr_cont(" CERR");
285 if (csr0 & MISS)
286 pr_cont(" MISS");
287 if (csr0 & MERR)
288 pr_cont(" MERR");
289 if (csr0 & RINT)
290 pr_cont(" RINT");
291 if (csr0 & TINT)
292 pr_cont(" TINT");
293 if (csr0 & IDON)
294 pr_cont(" IDON");
295 pr_cont(" ]\n");
296 }
297#endif
298
299 if (csr0 & RINT) {
300 handled = 1;
301 ariadne_rx(dev);
302 }
303
304 if (csr0 & TINT) {
305 int dirty_tx = priv->dirty_tx;
306
307 handled = 1;
308 while (dirty_tx < priv->cur_tx) {
309 int entry = dirty_tx % TX_RING_SIZE;
310 int status = lowb(priv->tx_ring[entry]->TMD1);
311
312 if (status & TF_OWN)
313 break;
314
315 priv->tx_ring[entry]->TMD1 &= 0xff00;
316
317 if (status & TF_ERR) {
318
319 int err_status = priv->tx_ring[entry]->TMD3;
320 dev->stats.tx_errors++;
321 if (err_status & EF_RTRY)
322 dev->stats.tx_aborted_errors++;
323 if (err_status & EF_LCAR)
324 dev->stats.tx_carrier_errors++;
325 if (err_status & EF_LCOL)
326 dev->stats.tx_window_errors++;
327 if (err_status & EF_UFLO) {
328
329 dev->stats.tx_fifo_errors++;
330
331 netdev_err(dev, "Tx FIFO error! Status %04x\n",
332 csr0);
333
334 lance->RDP = STRT;
335 }
336 } else {
337 if (status & (TF_MORE | TF_ONE))
338 dev->stats.collisions++;
339 dev->stats.tx_packets++;
340 }
341 dirty_tx++;
342 }
343
344#ifndef final_version
345 if (priv->cur_tx - dirty_tx >= TX_RING_SIZE) {
346 netdev_err(dev, "out-of-sync dirty pointer, %d vs. %d, full=%d\n",
347 dirty_tx, priv->cur_tx,
348 priv->tx_full);
349 dirty_tx += TX_RING_SIZE;
350 }
351#endif
352
353 if (priv->tx_full && netif_queue_stopped(dev) &&
354 dirty_tx > priv->cur_tx - TX_RING_SIZE + 2) {
355
356 priv->tx_full = 0;
357 netif_wake_queue(dev);
358 }
359
360 priv->dirty_tx = dirty_tx;
361 }
362
363
364 if (csr0 & BABL) {
365 handled = 1;
366 dev->stats.tx_errors++;
367 }
368 if (csr0 & MISS) {
369 handled = 1;
370 dev->stats.rx_errors++;
371 }
372 if (csr0 & MERR) {
373 handled = 1;
374 netdev_err(dev, "Bus master arbitration failure, status %04x\n",
375 csr0);
376
377 lance->RDP = STRT;
378 }
379 }
380
381
382 lance->RAP = CSR0;
383 lance->RDP = INEA | BABL | CERR | MISS | MERR | IDON;
384
385 if (ariadne_debug > 4)
386 netdev_dbg(dev, "exiting interrupt, csr%d=%#04x\n",
387 lance->RAP, lance->RDP);
388
389 return IRQ_RETVAL(handled);
390}
391
392static int ariadne_open(struct net_device *dev)
393{
394 volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr;
395 u_short in;
396 u_long version;
397 int i;
398
399
400 in = lance->Reset;
401
402
403 lance->RAP = CSR0;
404 lance->RDP = STOP;
405
406
407 lance->RAP = CSR88;
408 version = swapw(lance->RDP);
409 lance->RAP = CSR89;
410 version |= swapw(lance->RDP) << 16;
411 if ((version & 0x00000fff) != 0x00000003) {
412 pr_warn("Couldn't find AMD Ethernet Chip\n");
413 return -EAGAIN;
414 }
415 if ((version & 0x0ffff000) != 0x00003000) {
416 pr_warn("Couldn't find Am79C960 (Wrong part number = %ld)\n",
417 (version & 0x0ffff000) >> 12);
418 return -EAGAIN;
419 }
420
421 netdev_dbg(dev, "Am79C960 (PCnet-ISA) Revision %ld\n",
422 (version & 0xf0000000) >> 28);
423
424 ariadne_init_ring(dev);
425
426
427 lance->RAP = CSR3;
428 lance->RDP = 0x0000;
429 lance->RAP = CSR4;
430 lance->RDP = DPOLL | APAD_XMT | MFCOM | RCVCCOM | TXSTRTM | JABM;
431
432
433 lance->RAP = CSR8;
434 lance->RDP = 0x0000;
435 lance->RAP = CSR9;
436 lance->RDP = 0x0000;
437 lance->RAP = CSR10;
438 lance->RDP = 0x0000;
439 lance->RAP = CSR11;
440 lance->RDP = 0x0000;
441
442
443 lance->RAP = CSR12;
444 lance->RDP = ((u_short *)&dev->dev_addr[0])[0];
445 lance->RAP = CSR13;
446 lance->RDP = ((u_short *)&dev->dev_addr[0])[1];
447 lance->RAP = CSR14;
448 lance->RDP = ((u_short *)&dev->dev_addr[0])[2];
449
450
451 lance->RAP = CSR15;
452 lance->RDP = 0x0000;
453
454
455 lance->RAP = CSR30;
456 lance->RDP = swloww(ARIADNE_RAM + offsetof(struct lancedata, tx_ring));
457 lance->RAP = CSR31;
458 lance->RDP = swhighw(ARIADNE_RAM + offsetof(struct lancedata, tx_ring));
459
460
461 lance->RAP = CSR24;
462 lance->RDP = swloww(ARIADNE_RAM + offsetof(struct lancedata, rx_ring));
463 lance->RAP = CSR25;
464 lance->RDP = swhighw(ARIADNE_RAM + offsetof(struct lancedata, rx_ring));
465
466
467 lance->RAP = CSR76;
468 lance->RDP = swapw(((u_short)-RX_RING_SIZE));
469 lance->RAP = CSR78;
470 lance->RDP = swapw(((u_short)-TX_RING_SIZE));
471
472
473 lance->RAP = ISACSR2;
474 lance->IDP = ASEL;
475
476
477 lance->RAP = ISACSR5;
478 lance->IDP = PSE|XMTE;
479 lance->RAP = ISACSR6;
480 lance->IDP = PSE|COLE;
481 lance->RAP = ISACSR7;
482 lance->IDP = PSE|RCVE;
483
484 netif_start_queue(dev);
485
486 i = request_irq(IRQ_AMIGA_PORTS, ariadne_interrupt, IRQF_SHARED,
487 dev->name, dev);
488 if (i)
489 return i;
490
491 lance->RAP = CSR0;
492 lance->RDP = INEA | STRT;
493
494 return 0;
495}
496
497static int ariadne_close(struct net_device *dev)
498{
499 volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr;
500
501 netif_stop_queue(dev);
502
503 lance->RAP = CSR112;
504 dev->stats.rx_missed_errors = swapw(lance->RDP);
505 lance->RAP = CSR0;
506
507 if (ariadne_debug > 1) {
508 netdev_dbg(dev, "Shutting down ethercard, status was %02x\n",
509 lance->RDP);
510 netdev_dbg(dev, "%lu packets missed\n",
511 dev->stats.rx_missed_errors);
512 }
513
514
515 lance->RDP = STOP;
516
517 free_irq(IRQ_AMIGA_PORTS, dev);
518
519 return 0;
520}
521
522static inline void ariadne_reset(struct net_device *dev)
523{
524 volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr;
525
526 lance->RAP = CSR0;
527 lance->RDP = STOP;
528 ariadne_init_ring(dev);
529 lance->RDP = INEA | STRT;
530 netif_start_queue(dev);
531}
532
533static void ariadne_tx_timeout(struct net_device *dev, unsigned int txqueue)
534{
535 volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr;
536
537 netdev_err(dev, "transmit timed out, status %04x, resetting\n",
538 lance->RDP);
539 ariadne_reset(dev);
540 netif_wake_queue(dev);
541}
542
543static netdev_tx_t ariadne_start_xmit(struct sk_buff *skb,
544 struct net_device *dev)
545{
546 struct ariadne_private *priv = netdev_priv(dev);
547 volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr;
548 int entry;
549 unsigned long flags;
550 int len = skb->len;
551
552#if 0
553 if (ariadne_debug > 3) {
554 lance->RAP = CSR0;
555 netdev_dbg(dev, "%s: csr0 %04x\n", __func__, lance->RDP);
556 lance->RDP = 0x0000;
557 }
558#endif
559
560
561 if (skb->len < ETH_ZLEN) {
562 if (skb_padto(skb, ETH_ZLEN))
563 return NETDEV_TX_OK;
564 len = ETH_ZLEN;
565 }
566
567
568
569 netdev_dbg(dev, "TX pkt type 0x%04x from %pM to %pM data %p len %u\n",
570 ((u_short *)skb->data)[6],
571 skb->data + 6, skb->data,
572 skb->data, skb->len);
573
574 local_irq_save(flags);
575
576 entry = priv->cur_tx % TX_RING_SIZE;
577
578
579
580
581 priv->tx_ring[entry]->TMD2 = swapw((u_short)-skb->len);
582 priv->tx_ring[entry]->TMD3 = 0x0000;
583 memcpyw(priv->tx_buff[entry], (u_short *)skb->data, len);
584
585#ifdef DEBUG
586 print_hex_dump(KERN_DEBUG, "tx_buff: ", DUMP_PREFIX_OFFSET, 16, 1,
587 (void *)priv->tx_buff[entry],
588 skb->len > 64 ? 64 : skb->len, true);
589#endif
590
591 priv->tx_ring[entry]->TMD1 = (priv->tx_ring[entry]->TMD1 & 0xff00)
592 | TF_OWN | TF_STP | TF_ENP;
593
594 dev_kfree_skb(skb);
595
596 priv->cur_tx++;
597 if ((priv->cur_tx >= TX_RING_SIZE) &&
598 (priv->dirty_tx >= TX_RING_SIZE)) {
599
600 netdev_dbg(dev, "*** Subtracting TX_RING_SIZE from cur_tx (%d) and dirty_tx (%d)\n",
601 priv->cur_tx, priv->dirty_tx);
602
603 priv->cur_tx -= TX_RING_SIZE;
604 priv->dirty_tx -= TX_RING_SIZE;
605 }
606 dev->stats.tx_bytes += len;
607
608
609 lance->RAP = CSR0;
610 lance->RDP = INEA | TDMD;
611
612 if (lowb(priv->tx_ring[(entry + 1) % TX_RING_SIZE]->TMD1) != 0) {
613 netif_stop_queue(dev);
614 priv->tx_full = 1;
615 }
616 local_irq_restore(flags);
617
618 return NETDEV_TX_OK;
619}
620
621static struct net_device_stats *ariadne_get_stats(struct net_device *dev)
622{
623 volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr;
624 short saved_addr;
625 unsigned long flags;
626
627 local_irq_save(flags);
628 saved_addr = lance->RAP;
629 lance->RAP = CSR112;
630 dev->stats.rx_missed_errors = swapw(lance->RDP);
631 lance->RAP = saved_addr;
632 local_irq_restore(flags);
633
634 return &dev->stats;
635}
636
637
638
639
640
641
642
643static void set_multicast_list(struct net_device *dev)
644{
645 volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr;
646
647 if (!netif_running(dev))
648 return;
649
650 netif_stop_queue(dev);
651
652
653 lance->RAP = CSR0;
654 lance->RDP = STOP;
655 ariadne_init_ring(dev);
656
657 if (dev->flags & IFF_PROMISC) {
658 lance->RAP = CSR15;
659 lance->RDP = PROM;
660 } else {
661 short multicast_table[4];
662 int num_addrs = netdev_mc_count(dev);
663 int i;
664
665
666
667 memset(multicast_table, (num_addrs == 0) ? 0 : -1,
668 sizeof(multicast_table));
669 for (i = 0; i < 4; i++) {
670 lance->RAP = CSR8 + (i << 8);
671
672 lance->RDP = swapw(multicast_table[i]);
673 }
674 lance->RAP = CSR15;
675 lance->RDP = 0x0000;
676 }
677
678 lance->RAP = CSR0;
679 lance->RDP = INEA | STRT | IDON;
680
681 netif_wake_queue(dev);
682}
683
684
685static void ariadne_remove_one(struct zorro_dev *z)
686{
687 struct net_device *dev = zorro_get_drvdata(z);
688
689 unregister_netdev(dev);
690 release_mem_region(ZTWO_PADDR(dev->base_addr), sizeof(struct Am79C960));
691 release_mem_region(ZTWO_PADDR(dev->mem_start), ARIADNE_RAM_SIZE);
692 free_netdev(dev);
693}
694
695static const struct zorro_device_id ariadne_zorro_tbl[] = {
696 { ZORRO_PROD_VILLAGE_TRONIC_ARIADNE },
697 { 0 }
698};
699MODULE_DEVICE_TABLE(zorro, ariadne_zorro_tbl);
700
701static const struct net_device_ops ariadne_netdev_ops = {
702 .ndo_open = ariadne_open,
703 .ndo_stop = ariadne_close,
704 .ndo_start_xmit = ariadne_start_xmit,
705 .ndo_tx_timeout = ariadne_tx_timeout,
706 .ndo_get_stats = ariadne_get_stats,
707 .ndo_set_rx_mode = set_multicast_list,
708 .ndo_validate_addr = eth_validate_addr,
709 .ndo_set_mac_address = eth_mac_addr,
710};
711
712static int ariadne_init_one(struct zorro_dev *z,
713 const struct zorro_device_id *ent)
714{
715 unsigned long board = z->resource.start;
716 unsigned long base_addr = board + ARIADNE_LANCE;
717 unsigned long mem_start = board + ARIADNE_RAM;
718 struct resource *r1, *r2;
719 struct net_device *dev;
720 u32 serial;
721 int err;
722
723 r1 = request_mem_region(base_addr, sizeof(struct Am79C960), "Am79C960");
724 if (!r1)
725 return -EBUSY;
726 r2 = request_mem_region(mem_start, ARIADNE_RAM_SIZE, "RAM");
727 if (!r2) {
728 release_mem_region(base_addr, sizeof(struct Am79C960));
729 return -EBUSY;
730 }
731
732 dev = alloc_etherdev(sizeof(struct ariadne_private));
733 if (dev == NULL) {
734 release_mem_region(base_addr, sizeof(struct Am79C960));
735 release_mem_region(mem_start, ARIADNE_RAM_SIZE);
736 return -ENOMEM;
737 }
738
739 r1->name = dev->name;
740 r2->name = dev->name;
741
742 serial = be32_to_cpu(z->rom.er_SerialNumber);
743 dev->dev_addr[0] = 0x00;
744 dev->dev_addr[1] = 0x60;
745 dev->dev_addr[2] = 0x30;
746 dev->dev_addr[3] = (serial >> 16) & 0xff;
747 dev->dev_addr[4] = (serial >> 8) & 0xff;
748 dev->dev_addr[5] = serial & 0xff;
749 dev->base_addr = (unsigned long)ZTWO_VADDR(base_addr);
750 dev->mem_start = (unsigned long)ZTWO_VADDR(mem_start);
751 dev->mem_end = dev->mem_start + ARIADNE_RAM_SIZE;
752
753 dev->netdev_ops = &ariadne_netdev_ops;
754 dev->watchdog_timeo = 5 * HZ;
755
756 err = register_netdev(dev);
757 if (err) {
758 release_mem_region(base_addr, sizeof(struct Am79C960));
759 release_mem_region(mem_start, ARIADNE_RAM_SIZE);
760 free_netdev(dev);
761 return err;
762 }
763 zorro_set_drvdata(z, dev);
764
765 netdev_info(dev, "Ariadne at 0x%08lx, Ethernet Address %pM\n",
766 board, dev->dev_addr);
767
768 return 0;
769}
770
771static struct zorro_driver ariadne_driver = {
772 .name = "ariadne",
773 .id_table = ariadne_zorro_tbl,
774 .probe = ariadne_init_one,
775 .remove = ariadne_remove_one,
776};
777
778static int __init ariadne_init_module(void)
779{
780 return zorro_register_driver(&ariadne_driver);
781}
782
783static void __exit ariadne_cleanup_module(void)
784{
785 zorro_unregister_driver(&ariadne_driver);
786}
787
788module_init(ariadne_init_module);
789module_exit(ariadne_cleanup_module);
790
791MODULE_LICENSE("GPL");
792