1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32#ifndef CONFIG_ISA_DMA_API
33#define ALLOW_DMA 0
34#else
35#define ALLOW_DMA 1
36#endif
37
38
39
40
41
42#define DEBUGGING 1
43
44
45
46
47
48
49#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
50
51#include <linux/module.h>
52#include <linux/printk.h>
53#include <linux/errno.h>
54#include <linux/netdevice.h>
55#include <linux/etherdevice.h>
56#include <linux/platform_device.h>
57#include <linux/kernel.h>
58#include <linux/types.h>
59#include <linux/fcntl.h>
60#include <linux/interrupt.h>
61#include <linux/ioport.h>
62#include <linux/in.h>
63#include <linux/skbuff.h>
64#include <linux/spinlock.h>
65#include <linux/string.h>
66#include <linux/init.h>
67#include <linux/bitops.h>
68#include <linux/delay.h>
69#include <linux/gfp.h>
70#include <linux/io.h>
71
72#include <asm/irq.h>
73#include <linux/atomic.h>
74#if ALLOW_DMA
75#include <asm/dma.h>
76#endif
77
78#include "cs89x0.h"
79
80#define cs89_dbg(val, level, fmt, ...) \
81do { \
82 if (val <= net_debug) \
83 pr_##level(fmt, ##__VA_ARGS__); \
84} while (0)
85
86static char version[] __initdata =
87 "v2.4.3-pre1 Russell Nelson <nelson@crynwr.com>, Andrew Morton";
88
89#define DRV_NAME "cs89x0"
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104#ifndef CONFIG_CS89x0_PLATFORM
105static unsigned int netcard_portlist[] __used __initdata = {
106 0x300, 0x320, 0x340, 0x360, 0x200, 0x220, 0x240,
107 0x260, 0x280, 0x2a0, 0x2c0, 0x2e0, 0
108};
109static unsigned int cs8900_irq_map[] = {
110 10, 11, 12, 5
111};
112#endif
113
114#if DEBUGGING
115static unsigned int net_debug = DEBUGGING;
116#else
117#define net_debug 0
118#endif
119
120
121#define NETCARD_IO_EXTENT 16
122
123
124#define FORCE_RJ45 0x0001
125#define FORCE_AUI 0x0002
126#define FORCE_BNC 0x0004
127
128#define FORCE_AUTO 0x0010
129#define FORCE_HALF 0x0020
130#define FORCE_FULL 0x0030
131
132
133struct net_local {
134 int chip_type;
135 char chip_revision;
136 int send_cmd;
137 int auto_neg_cnf;
138 int adapter_cnf;
139 int isa_config;
140 int irq_map;
141 int rx_mode;
142 int curr_rx_cfg;
143 int linectl;
144 int send_underrun;
145 int force;
146 spinlock_t lock;
147 void __iomem *virt_addr;
148 unsigned long size;
149#if ALLOW_DMA
150 int use_dma;
151 int dma;
152 int dmasize;
153 unsigned char *dma_buff;
154 unsigned char *end_dma_buff;
155 unsigned char *rx_dma_ptr;
156#endif
157};
158
159
160#define tx_done(dev) 1
161
162
163
164
165#if !defined(MODULE)
166#if ALLOW_DMA
167static int g_cs89x0_dma;
168
169static int __init dma_fn(char *str)
170{
171 g_cs89x0_dma = simple_strtol(str, NULL, 0);
172 return 1;
173}
174
175__setup("cs89x0_dma=", dma_fn);
176#endif
177
178static int g_cs89x0_media__force;
179
180static int __init media_fn(char *str)
181{
182 if (!strcmp(str, "rj45"))
183 g_cs89x0_media__force = FORCE_RJ45;
184 else if (!strcmp(str, "aui"))
185 g_cs89x0_media__force = FORCE_AUI;
186 else if (!strcmp(str, "bnc"))
187 g_cs89x0_media__force = FORCE_BNC;
188
189 return 1;
190}
191
192__setup("cs89x0_media=", media_fn);
193#endif
194
195static void readwords(struct net_local *lp, int portno, void *buf, int length)
196{
197 u8 *buf8 = (u8 *)buf;
198
199 do {
200 u16 tmp16;
201
202 tmp16 = ioread16(lp->virt_addr + portno);
203 *buf8++ = (u8)tmp16;
204 *buf8++ = (u8)(tmp16 >> 8);
205 } while (--length);
206}
207
208static void writewords(struct net_local *lp, int portno, void *buf, int length)
209{
210 u8 *buf8 = (u8 *)buf;
211
212 do {
213 u16 tmp16;
214
215 tmp16 = *buf8++;
216 tmp16 |= (*buf8++) << 8;
217 iowrite16(tmp16, lp->virt_addr + portno);
218 } while (--length);
219}
220
221static u16
222readreg(struct net_device *dev, u16 regno)
223{
224 struct net_local *lp = netdev_priv(dev);
225
226 iowrite16(regno, lp->virt_addr + ADD_PORT);
227 return ioread16(lp->virt_addr + DATA_PORT);
228}
229
230static void
231writereg(struct net_device *dev, u16 regno, u16 value)
232{
233 struct net_local *lp = netdev_priv(dev);
234
235 iowrite16(regno, lp->virt_addr + ADD_PORT);
236 iowrite16(value, lp->virt_addr + DATA_PORT);
237}
238
239static int __init
240wait_eeprom_ready(struct net_device *dev)
241{
242 int timeout = jiffies;
243
244
245
246
247 while (readreg(dev, PP_SelfST) & SI_BUSY)
248 if (jiffies - timeout >= 40)
249 return -1;
250 return 0;
251}
252
253static int __init
254get_eeprom_data(struct net_device *dev, int off, int len, int *buffer)
255{
256 int i;
257
258 cs89_dbg(3, info, "EEPROM data from %x for %x:", off, len);
259 for (i = 0; i < len; i++) {
260 if (wait_eeprom_ready(dev) < 0)
261 return -1;
262
263 writereg(dev, PP_EECMD, (off + i) | EEPROM_READ_CMD);
264 if (wait_eeprom_ready(dev) < 0)
265 return -1;
266 buffer[i] = readreg(dev, PP_EEData);
267 cs89_dbg(3, cont, " %04x", buffer[i]);
268 }
269 cs89_dbg(3, cont, "\n");
270 return 0;
271}
272
273static int __init
274get_eeprom_cksum(int off, int len, int *buffer)
275{
276 int i, cksum;
277
278 cksum = 0;
279 for (i = 0; i < len; i++)
280 cksum += buffer[i];
281 cksum &= 0xffff;
282 if (cksum == 0)
283 return 0;
284 return -1;
285}
286
287static void
288write_irq(struct net_device *dev, int chip_type, int irq)
289{
290 int i;
291
292 if (chip_type == CS8900) {
293#ifndef CONFIG_CS89x0_PLATFORM
294
295 for (i = 0; i != ARRAY_SIZE(cs8900_irq_map); i++)
296 if (cs8900_irq_map[i] == irq)
297 break;
298
299 if (i == ARRAY_SIZE(cs8900_irq_map))
300 i = 3;
301#else
302
303 i = 0;
304#endif
305 writereg(dev, PP_CS8900_ISAINT, i);
306 } else {
307 writereg(dev, PP_CS8920_ISAINT, irq);
308 }
309}
310
311static void
312count_rx_errors(int status, struct net_device *dev)
313{
314 dev->stats.rx_errors++;
315 if (status & RX_RUNT)
316 dev->stats.rx_length_errors++;
317 if (status & RX_EXTRA_DATA)
318 dev->stats.rx_length_errors++;
319 if ((status & RX_CRC_ERROR) && !(status & (RX_EXTRA_DATA | RX_RUNT)))
320
321 dev->stats.rx_crc_errors++;
322 if (status & RX_DRIBBLE)
323 dev->stats.rx_frame_errors++;
324}
325
326
327
328
329
330#if ALLOW_DMA
331
332#define dma_page_eq(ptr1, ptr2) ((long)(ptr1) >> 17 == (long)(ptr2) >> 17)
333
334static void
335get_dma_channel(struct net_device *dev)
336{
337 struct net_local *lp = netdev_priv(dev);
338
339 if (lp->dma) {
340 dev->dma = lp->dma;
341 lp->isa_config |= ISA_RxDMA;
342 } else {
343 if ((lp->isa_config & ANY_ISA_DMA) == 0)
344 return;
345 dev->dma = lp->isa_config & DMA_NO_MASK;
346 if (lp->chip_type == CS8900)
347 dev->dma += 5;
348 if (dev->dma < 5 || dev->dma > 7) {
349 lp->isa_config &= ~ANY_ISA_DMA;
350 return;
351 }
352 }
353}
354
355static void
356write_dma(struct net_device *dev, int chip_type, int dma)
357{
358 struct net_local *lp = netdev_priv(dev);
359 if ((lp->isa_config & ANY_ISA_DMA) == 0)
360 return;
361 if (chip_type == CS8900)
362 writereg(dev, PP_CS8900_ISADMA, dma - 5);
363 else
364 writereg(dev, PP_CS8920_ISADMA, dma);
365}
366
367static void
368set_dma_cfg(struct net_device *dev)
369{
370 struct net_local *lp = netdev_priv(dev);
371
372 if (lp->use_dma) {
373 if ((lp->isa_config & ANY_ISA_DMA) == 0) {
374 cs89_dbg(3, err, "set_dma_cfg(): no DMA\n");
375 return;
376 }
377 if (lp->isa_config & ISA_RxDMA) {
378 lp->curr_rx_cfg |= RX_DMA_ONLY;
379 cs89_dbg(3, info, "set_dma_cfg(): RX_DMA_ONLY\n");
380 } else {
381 lp->curr_rx_cfg |= AUTO_RX_DMA;
382 cs89_dbg(3, info, "set_dma_cfg(): AUTO_RX_DMA\n");
383 }
384 }
385}
386
387static int
388dma_bufcfg(struct net_device *dev)
389{
390 struct net_local *lp = netdev_priv(dev);
391 if (lp->use_dma)
392 return (lp->isa_config & ANY_ISA_DMA) ? RX_DMA_ENBL : 0;
393 else
394 return 0;
395}
396
397static int
398dma_busctl(struct net_device *dev)
399{
400 int retval = 0;
401 struct net_local *lp = netdev_priv(dev);
402 if (lp->use_dma) {
403 if (lp->isa_config & ANY_ISA_DMA)
404 retval |= RESET_RX_DMA;
405 if (lp->isa_config & DMA_BURST)
406 retval |= DMA_BURST_MODE;
407 if (lp->dmasize == 64)
408 retval |= RX_DMA_SIZE_64K;
409 retval |= MEMORY_ON;
410 }
411 return retval;
412}
413
414static void
415dma_rx(struct net_device *dev)
416{
417 struct net_local *lp = netdev_priv(dev);
418 struct sk_buff *skb;
419 int status, length;
420 unsigned char *bp = lp->rx_dma_ptr;
421
422 status = bp[0] + (bp[1] << 8);
423 length = bp[2] + (bp[3] << 8);
424 bp += 4;
425
426 cs89_dbg(5, debug, "%s: receiving DMA packet at %lx, status %x, length %x\n",
427 dev->name, (unsigned long)bp, status, length);
428
429 if ((status & RX_OK) == 0) {
430 count_rx_errors(status, dev);
431 goto skip_this_frame;
432 }
433
434
435 skb = netdev_alloc_skb(dev, length + 2);
436 if (skb == NULL) {
437 dev->stats.rx_dropped++;
438
439
440skip_this_frame:
441 bp += (length + 3) & ~3;
442 if (bp >= lp->end_dma_buff)
443 bp -= lp->dmasize * 1024;
444 lp->rx_dma_ptr = bp;
445 return;
446 }
447 skb_reserve(skb, 2);
448
449 if (bp + length > lp->end_dma_buff) {
450 int semi_cnt = lp->end_dma_buff - bp;
451 memcpy(skb_put(skb, semi_cnt), bp, semi_cnt);
452 memcpy(skb_put(skb, length - semi_cnt), lp->dma_buff,
453 length - semi_cnt);
454 } else {
455 memcpy(skb_put(skb, length), bp, length);
456 }
457 bp += (length + 3) & ~3;
458 if (bp >= lp->end_dma_buff)
459 bp -= lp->dmasize*1024;
460 lp->rx_dma_ptr = bp;
461
462 cs89_dbg(3, info, "%s: received %d byte DMA packet of type %x\n",
463 dev->name, length,
464 ((skb->data[ETH_ALEN + ETH_ALEN] << 8) |
465 skb->data[ETH_ALEN + ETH_ALEN + 1]));
466
467 skb->protocol = eth_type_trans(skb, dev);
468 netif_rx(skb);
469 dev->stats.rx_packets++;
470 dev->stats.rx_bytes += length;
471}
472
473static void release_dma_buff(struct net_local *lp)
474{
475 if (lp->dma_buff) {
476 free_pages((unsigned long)(lp->dma_buff),
477 get_order(lp->dmasize * 1024));
478 lp->dma_buff = NULL;
479 }
480}
481
482#endif
483
484static void
485control_dc_dc(struct net_device *dev, int on_not_off)
486{
487 struct net_local *lp = netdev_priv(dev);
488 unsigned int selfcontrol;
489 int timenow = jiffies;
490
491
492
493
494
495 selfcontrol = HCB1_ENBL;
496 if (((lp->adapter_cnf & A_CNF_DC_DC_POLARITY) != 0) ^ on_not_off)
497 selfcontrol |= HCB1;
498 else
499 selfcontrol &= ~HCB1;
500 writereg(dev, PP_SelfCTL, selfcontrol);
501
502
503 while (jiffies - timenow < HZ)
504 ;
505}
506
507
508static int
509send_test_pkt(struct net_device *dev)
510{
511 struct net_local *lp = netdev_priv(dev);
512 char test_packet[] = {
513 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
514 0, 46,
515 0, 0,
516 0xf3, 0
517 };
518 long timenow = jiffies;
519
520 writereg(dev, PP_LineCTL, readreg(dev, PP_LineCTL) | SERIAL_TX_ON);
521
522 memcpy(test_packet, dev->dev_addr, ETH_ALEN);
523 memcpy(test_packet + ETH_ALEN, dev->dev_addr, ETH_ALEN);
524
525 iowrite16(TX_AFTER_ALL, lp->virt_addr + TX_CMD_PORT);
526 iowrite16(ETH_ZLEN, lp->virt_addr + TX_LEN_PORT);
527
528
529 while (jiffies - timenow < 5)
530 if (readreg(dev, PP_BusST) & READY_FOR_TX_NOW)
531 break;
532 if (jiffies - timenow >= 5)
533 return 0;
534
535
536 writewords(lp, TX_FRAME_PORT, test_packet, (ETH_ZLEN + 1) >> 1);
537
538 cs89_dbg(1, debug, "Sending test packet ");
539
540 for (timenow = jiffies; jiffies - timenow < 3;)
541 ;
542 if ((readreg(dev, PP_TxEvent) & TX_SEND_OK_BITS) == TX_OK) {
543 cs89_dbg(1, cont, "succeeded\n");
544 return 1;
545 }
546 cs89_dbg(1, cont, "failed\n");
547 return 0;
548}
549
550#define DETECTED_NONE 0
551#define DETECTED_RJ45H 1
552#define DETECTED_RJ45F 2
553#define DETECTED_AUI 3
554#define DETECTED_BNC 4
555
556static int
557detect_tp(struct net_device *dev)
558{
559 struct net_local *lp = netdev_priv(dev);
560 int timenow = jiffies;
561 int fdx;
562
563 cs89_dbg(1, debug, "%s: Attempting TP\n", dev->name);
564
565
566
567
568
569
570
571
572 writereg(dev, PP_LineCTL, lp->linectl & ~AUI_ONLY);
573 control_dc_dc(dev, 0);
574
575
576
577
578 for (timenow = jiffies; jiffies - timenow < 15;)
579 ;
580 if ((readreg(dev, PP_LineST) & LINK_OK) == 0)
581 return DETECTED_NONE;
582
583 if (lp->chip_type == CS8900) {
584 switch (lp->force & 0xf0) {
585#if 0
586 case FORCE_AUTO:
587 pr_info("%s: cs8900 doesn't autonegotiate\n",
588 dev->name);
589 return DETECTED_NONE;
590#endif
591
592 case FORCE_AUTO:
593 lp->force &= ~FORCE_AUTO;
594 lp->force |= FORCE_HALF;
595 break;
596 case FORCE_HALF:
597 break;
598 case FORCE_FULL:
599 writereg(dev, PP_TestCTL,
600 readreg(dev, PP_TestCTL) | FDX_8900);
601 break;
602 }
603 fdx = readreg(dev, PP_TestCTL) & FDX_8900;
604 } else {
605 switch (lp->force & 0xf0) {
606 case FORCE_AUTO:
607 lp->auto_neg_cnf = AUTO_NEG_ENABLE;
608 break;
609 case FORCE_HALF:
610 lp->auto_neg_cnf = 0;
611 break;
612 case FORCE_FULL:
613 lp->auto_neg_cnf = RE_NEG_NOW | ALLOW_FDX;
614 break;
615 }
616
617 writereg(dev, PP_AutoNegCTL, lp->auto_neg_cnf & AUTO_NEG_MASK);
618
619 if ((lp->auto_neg_cnf & AUTO_NEG_BITS) == AUTO_NEG_ENABLE) {
620 pr_info("%s: negotiating duplex...\n", dev->name);
621 while (readreg(dev, PP_AutoNegST) & AUTO_NEG_BUSY) {
622 if (jiffies - timenow > 4000) {
623 pr_err("**** Full / half duplex auto-negotiation timed out ****\n");
624 break;
625 }
626 }
627 }
628 fdx = readreg(dev, PP_AutoNegST) & FDX_ACTIVE;
629 }
630 if (fdx)
631 return DETECTED_RJ45F;
632 else
633 return DETECTED_RJ45H;
634}
635
636static int
637detect_bnc(struct net_device *dev)
638{
639 struct net_local *lp = netdev_priv(dev);
640
641 cs89_dbg(1, debug, "%s: Attempting BNC\n", dev->name);
642 control_dc_dc(dev, 1);
643
644 writereg(dev, PP_LineCTL, (lp->linectl & ~AUTO_AUI_10BASET) | AUI_ONLY);
645
646 if (send_test_pkt(dev))
647 return DETECTED_BNC;
648 else
649 return DETECTED_NONE;
650}
651
652static int
653detect_aui(struct net_device *dev)
654{
655 struct net_local *lp = netdev_priv(dev);
656
657 cs89_dbg(1, debug, "%s: Attempting AUI\n", dev->name);
658 control_dc_dc(dev, 0);
659
660 writereg(dev, PP_LineCTL, (lp->linectl & ~AUTO_AUI_10BASET) | AUI_ONLY);
661
662 if (send_test_pkt(dev))
663 return DETECTED_AUI;
664 else
665 return DETECTED_NONE;
666}
667
668
669static void
670net_rx(struct net_device *dev)
671{
672 struct net_local *lp = netdev_priv(dev);
673 struct sk_buff *skb;
674 int status, length;
675
676 status = ioread16(lp->virt_addr + RX_FRAME_PORT);
677 length = ioread16(lp->virt_addr + RX_FRAME_PORT);
678
679 if ((status & RX_OK) == 0) {
680 count_rx_errors(status, dev);
681 return;
682 }
683
684
685 skb = netdev_alloc_skb(dev, length + 2);
686 if (skb == NULL) {
687 dev->stats.rx_dropped++;
688 return;
689 }
690 skb_reserve(skb, 2);
691
692 readwords(lp, RX_FRAME_PORT, skb_put(skb, length), length >> 1);
693 if (length & 1)
694 skb->data[length-1] = ioread16(lp->virt_addr + RX_FRAME_PORT);
695
696 cs89_dbg(3, debug, "%s: received %d byte packet of type %x\n",
697 dev->name, length,
698 (skb->data[ETH_ALEN + ETH_ALEN] << 8) |
699 skb->data[ETH_ALEN + ETH_ALEN + 1]);
700
701 skb->protocol = eth_type_trans(skb, dev);
702 netif_rx(skb);
703 dev->stats.rx_packets++;
704 dev->stats.rx_bytes += length;
705}
706
707
708
709
710
711static irqreturn_t net_interrupt(int irq, void *dev_id)
712{
713 struct net_device *dev = dev_id;
714 struct net_local *lp;
715 int status;
716 int handled = 0;
717
718 lp = netdev_priv(dev);
719
720
721
722
723
724
725
726
727
728 while ((status = ioread16(lp->virt_addr + ISQ_PORT))) {
729 cs89_dbg(4, debug, "%s: event=%04x\n", dev->name, status);
730 handled = 1;
731 switch (status & ISQ_EVENT_MASK) {
732 case ISQ_RECEIVER_EVENT:
733
734 net_rx(dev);
735 break;
736 case ISQ_TRANSMITTER_EVENT:
737 dev->stats.tx_packets++;
738 netif_wake_queue(dev);
739 if ((status & (TX_OK |
740 TX_LOST_CRS |
741 TX_SQE_ERROR |
742 TX_LATE_COL |
743 TX_16_COL)) != TX_OK) {
744 if ((status & TX_OK) == 0)
745 dev->stats.tx_errors++;
746 if (status & TX_LOST_CRS)
747 dev->stats.tx_carrier_errors++;
748 if (status & TX_SQE_ERROR)
749 dev->stats.tx_heartbeat_errors++;
750 if (status & TX_LATE_COL)
751 dev->stats.tx_window_errors++;
752 if (status & TX_16_COL)
753 dev->stats.tx_aborted_errors++;
754 }
755 break;
756 case ISQ_BUFFER_EVENT:
757 if (status & READY_FOR_TX) {
758
759
760
761
762
763
764 netif_wake_queue(dev);
765 }
766 if (status & TX_UNDERRUN) {
767 cs89_dbg(0, err, "%s: transmit underrun\n",
768 dev->name);
769 lp->send_underrun++;
770 if (lp->send_underrun == 3)
771 lp->send_cmd = TX_AFTER_381;
772 else if (lp->send_underrun == 6)
773 lp->send_cmd = TX_AFTER_ALL;
774
775
776
777
778
779
780 netif_wake_queue(dev);
781 }
782#if ALLOW_DMA
783 if (lp->use_dma && (status & RX_DMA)) {
784 int count = readreg(dev, PP_DmaFrameCnt);
785 while (count) {
786 cs89_dbg(5, debug,
787 "%s: receiving %d DMA frames\n",
788 dev->name, count);
789 if (count > 1)
790 cs89_dbg(2, debug,
791 "%s: receiving %d DMA frames\n",
792 dev->name, count);
793 dma_rx(dev);
794 if (--count == 0)
795 count = readreg(dev, PP_DmaFrameCnt);
796 if (count > 0)
797 cs89_dbg(2, debug,
798 "%s: continuing with %d DMA frames\n",
799 dev->name, count);
800 }
801 }
802#endif
803 break;
804 case ISQ_RX_MISS_EVENT:
805 dev->stats.rx_missed_errors += (status >> 6);
806 break;
807 case ISQ_TX_COL_EVENT:
808 dev->stats.collisions += (status >> 6);
809 break;
810 }
811 }
812 return IRQ_RETVAL(handled);
813}
814
815
816
817
818
819
820
821
822
823
824
825static int
826net_open(struct net_device *dev)
827{
828 struct net_local *lp = netdev_priv(dev);
829 int result = 0;
830 int i;
831 int ret;
832
833 if (dev->irq < 2) {
834
835
836#if 0
837 writereg(dev, PP_BusCTL, readreg(dev, PP_BusCTL) | ENABLE_IRQ);
838#endif
839
840 writereg(dev, PP_BusCTL, ENABLE_IRQ | MEMORY_ON);
841
842 for (i = 2; i < CS8920_NO_INTS; i++) {
843 if ((1 << i) & lp->irq_map) {
844 if (request_irq(i, net_interrupt, 0, dev->name,
845 dev) == 0) {
846 dev->irq = i;
847 write_irq(dev, lp->chip_type, i);
848
849 break;
850 }
851 }
852 }
853
854 if (i >= CS8920_NO_INTS) {
855 writereg(dev, PP_BusCTL, 0);
856 pr_err("can't get an interrupt\n");
857 ret = -EAGAIN;
858 goto bad_out;
859 }
860 } else {
861#if !defined(CONFIG_CS89x0_PLATFORM)
862 if (((1 << dev->irq) & lp->irq_map) == 0) {
863 pr_err("%s: IRQ %d is not in our map of allowable IRQs, which is %x\n",
864 dev->name, dev->irq, lp->irq_map);
865 ret = -EAGAIN;
866 goto bad_out;
867 }
868#endif
869
870 writereg(dev, PP_BusCTL, readreg(dev, PP_BusCTL)|ENABLE_IRQ);
871
872#if 0
873 writereg(dev, PP_BusCTL, ENABLE_IRQ | MEMORY_ON);
874#endif
875 write_irq(dev, lp->chip_type, dev->irq);
876 ret = request_irq(dev->irq, net_interrupt, 0, dev->name, dev);
877 if (ret) {
878 pr_err("request_irq(%d) failed\n", dev->irq);
879 goto bad_out;
880 }
881 }
882
883#if ALLOW_DMA
884 if (lp->use_dma && (lp->isa_config & ANY_ISA_DMA)) {
885 unsigned long flags;
886 lp->dma_buff = (unsigned char *)__get_dma_pages(GFP_KERNEL,
887 get_order(lp->dmasize * 1024));
888 if (!lp->dma_buff) {
889 pr_err("%s: cannot get %dK memory for DMA\n",
890 dev->name, lp->dmasize);
891 goto release_irq;
892 }
893 cs89_dbg(1, debug, "%s: dma %lx %lx\n",
894 dev->name,
895 (unsigned long)lp->dma_buff,
896 (unsigned long)isa_virt_to_bus(lp->dma_buff));
897 if ((unsigned long)lp->dma_buff >= MAX_DMA_ADDRESS ||
898 !dma_page_eq(lp->dma_buff,
899 lp->dma_buff + lp->dmasize * 1024 - 1)) {
900 pr_err("%s: not usable as DMA buffer\n", dev->name);
901 goto release_irq;
902 }
903 memset(lp->dma_buff, 0, lp->dmasize * 1024);
904 if (request_dma(dev->dma, dev->name)) {
905 pr_err("%s: cannot get dma channel %d\n",
906 dev->name, dev->dma);
907 goto release_irq;
908 }
909 write_dma(dev, lp->chip_type, dev->dma);
910 lp->rx_dma_ptr = lp->dma_buff;
911 lp->end_dma_buff = lp->dma_buff + lp->dmasize * 1024;
912 spin_lock_irqsave(&lp->lock, flags);
913 disable_dma(dev->dma);
914 clear_dma_ff(dev->dma);
915 set_dma_mode(dev->dma, DMA_RX_MODE);
916 set_dma_addr(dev->dma, isa_virt_to_bus(lp->dma_buff));
917 set_dma_count(dev->dma, lp->dmasize * 1024);
918 enable_dma(dev->dma);
919 spin_unlock_irqrestore(&lp->lock, flags);
920 }
921#endif
922
923
924 for (i = 0; i < ETH_ALEN / 2; i++)
925 writereg(dev, PP_IA + i * 2,
926 (dev->dev_addr[i * 2] |
927 (dev->dev_addr[i * 2 + 1] << 8)));
928
929
930 writereg(dev, PP_BusCTL, MEMORY_ON);
931
932
933 if ((lp->adapter_cnf & A_CNF_EXTND_10B_2) &&
934 (lp->adapter_cnf & A_CNF_LOW_RX_SQUELCH))
935 lp->linectl = LOW_RX_SQUELCH;
936 else
937 lp->linectl = 0;
938
939
940 switch (lp->adapter_cnf & A_CNF_MEDIA_TYPE) {
941 case A_CNF_MEDIA_10B_T:
942 result = lp->adapter_cnf & A_CNF_10B_T;
943 break;
944 case A_CNF_MEDIA_AUI:
945 result = lp->adapter_cnf & A_CNF_AUI;
946 break;
947 case A_CNF_MEDIA_10B_2:
948 result = lp->adapter_cnf & A_CNF_10B_2;
949 break;
950 default:
951 result = lp->adapter_cnf & (A_CNF_10B_T |
952 A_CNF_AUI |
953 A_CNF_10B_2);
954 }
955 if (!result) {
956 pr_err("%s: EEPROM is configured for unavailable media\n",
957 dev->name);
958release_dma:
959#if ALLOW_DMA
960 free_dma(dev->dma);
961release_irq:
962 release_dma_buff(lp);
963#endif
964 writereg(dev, PP_LineCTL,
965 readreg(dev, PP_LineCTL) & ~(SERIAL_TX_ON | SERIAL_RX_ON));
966 free_irq(dev->irq, dev);
967 ret = -EAGAIN;
968 goto bad_out;
969 }
970
971
972 switch (lp->adapter_cnf & A_CNF_MEDIA_TYPE) {
973 case A_CNF_MEDIA_10B_T:
974 result = detect_tp(dev);
975 if (result == DETECTED_NONE) {
976 pr_warn("%s: 10Base-T (RJ-45) has no cable\n",
977 dev->name);
978 if (lp->auto_neg_cnf & IMM_BIT)
979 result = DETECTED_RJ45H;
980 }
981 break;
982 case A_CNF_MEDIA_AUI:
983 result = detect_aui(dev);
984 if (result == DETECTED_NONE) {
985 pr_warn("%s: 10Base-5 (AUI) has no cable\n", dev->name);
986 if (lp->auto_neg_cnf & IMM_BIT)
987 result = DETECTED_AUI;
988 }
989 break;
990 case A_CNF_MEDIA_10B_2:
991 result = detect_bnc(dev);
992 if (result == DETECTED_NONE) {
993 pr_warn("%s: 10Base-2 (BNC) has no cable\n", dev->name);
994 if (lp->auto_neg_cnf & IMM_BIT)
995 result = DETECTED_BNC;
996 }
997 break;
998 case A_CNF_MEDIA_AUTO:
999 writereg(dev, PP_LineCTL, lp->linectl | AUTO_AUI_10BASET);
1000 if (lp->adapter_cnf & A_CNF_10B_T) {
1001 result = detect_tp(dev);
1002 if (result != DETECTED_NONE)
1003 break;
1004 }
1005 if (lp->adapter_cnf & A_CNF_AUI) {
1006 result = detect_aui(dev);
1007 if (result != DETECTED_NONE)
1008 break;
1009 }
1010 if (lp->adapter_cnf & A_CNF_10B_2) {
1011 result = detect_bnc(dev);
1012 if (result != DETECTED_NONE)
1013 break;
1014 }
1015 pr_err("%s: no media detected\n", dev->name);
1016 goto release_dma;
1017 }
1018 switch (result) {
1019 case DETECTED_NONE:
1020 pr_err("%s: no network cable attached to configured media\n",
1021 dev->name);
1022 goto release_dma;
1023 case DETECTED_RJ45H:
1024 pr_info("%s: using half-duplex 10Base-T (RJ-45)\n", dev->name);
1025 break;
1026 case DETECTED_RJ45F:
1027 pr_info("%s: using full-duplex 10Base-T (RJ-45)\n", dev->name);
1028 break;
1029 case DETECTED_AUI:
1030 pr_info("%s: using 10Base-5 (AUI)\n", dev->name);
1031 break;
1032 case DETECTED_BNC:
1033 pr_info("%s: using 10Base-2 (BNC)\n", dev->name);
1034 break;
1035 }
1036
1037
1038 writereg(dev, PP_LineCTL,
1039 readreg(dev, PP_LineCTL) | SERIAL_RX_ON | SERIAL_TX_ON);
1040
1041
1042 lp->rx_mode = 0;
1043 writereg(dev, PP_RxCTL, DEF_RX_ACCEPT);
1044
1045 lp->curr_rx_cfg = RX_OK_ENBL | RX_CRC_ERROR_ENBL;
1046
1047 if (lp->isa_config & STREAM_TRANSFER)
1048 lp->curr_rx_cfg |= RX_STREAM_ENBL;
1049#if ALLOW_DMA
1050 set_dma_cfg(dev);
1051#endif
1052 writereg(dev, PP_RxCFG, lp->curr_rx_cfg);
1053
1054 writereg(dev, PP_TxCFG, (TX_LOST_CRS_ENBL |
1055 TX_SQE_ERROR_ENBL |
1056 TX_OK_ENBL |
1057 TX_LATE_COL_ENBL |
1058 TX_JBR_ENBL |
1059 TX_ANY_COL_ENBL |
1060 TX_16_COL_ENBL));
1061
1062 writereg(dev, PP_BufCFG, (READY_FOR_TX_ENBL |
1063 RX_MISS_COUNT_OVRFLOW_ENBL |
1064#if ALLOW_DMA
1065 dma_bufcfg(dev) |
1066#endif
1067 TX_COL_COUNT_OVRFLOW_ENBL |
1068 TX_UNDERRUN_ENBL));
1069
1070
1071 writereg(dev, PP_BusCTL, (ENABLE_IRQ
1072 | (dev->mem_start ? MEMORY_ON : 0)
1073#if ALLOW_DMA
1074 | dma_busctl(dev)
1075#endif
1076 ));
1077 netif_start_queue(dev);
1078 cs89_dbg(1, debug, "net_open() succeeded\n");
1079 return 0;
1080bad_out:
1081 return ret;
1082}
1083
1084
1085static int
1086net_close(struct net_device *dev)
1087{
1088#if ALLOW_DMA
1089 struct net_local *lp = netdev_priv(dev);
1090#endif
1091
1092 netif_stop_queue(dev);
1093
1094 writereg(dev, PP_RxCFG, 0);
1095 writereg(dev, PP_TxCFG, 0);
1096 writereg(dev, PP_BufCFG, 0);
1097 writereg(dev, PP_BusCTL, 0);
1098
1099 free_irq(dev->irq, dev);
1100
1101#if ALLOW_DMA
1102 if (lp->use_dma && lp->dma) {
1103 free_dma(dev->dma);
1104 release_dma_buff(lp);
1105 }
1106#endif
1107
1108
1109 return 0;
1110}
1111
1112
1113
1114
1115static struct net_device_stats *
1116net_get_stats(struct net_device *dev)
1117{
1118 struct net_local *lp = netdev_priv(dev);
1119 unsigned long flags;
1120
1121 spin_lock_irqsave(&lp->lock, flags);
1122
1123 dev->stats.rx_missed_errors += (readreg(dev, PP_RxMiss) >> 6);
1124 dev->stats.collisions += (readreg(dev, PP_TxCol) >> 6);
1125 spin_unlock_irqrestore(&lp->lock, flags);
1126
1127 return &dev->stats;
1128}
1129
1130static void net_timeout(struct net_device *dev)
1131{
1132
1133
1134 cs89_dbg(0, err, "%s: transmit timed out, %s?\n",
1135 dev->name,
1136 tx_done(dev) ? "IRQ conflict" : "network cable problem");
1137
1138 netif_wake_queue(dev);
1139}
1140
1141static netdev_tx_t net_send_packet(struct sk_buff *skb, struct net_device *dev)
1142{
1143 struct net_local *lp = netdev_priv(dev);
1144 unsigned long flags;
1145
1146 cs89_dbg(3, debug, "%s: sent %d byte packet of type %x\n",
1147 dev->name, skb->len,
1148 ((skb->data[ETH_ALEN + ETH_ALEN] << 8) |
1149 skb->data[ETH_ALEN + ETH_ALEN + 1]));
1150
1151
1152
1153
1154
1155
1156 spin_lock_irqsave(&lp->lock, flags);
1157 netif_stop_queue(dev);
1158
1159
1160 iowrite16(lp->send_cmd, lp->virt_addr + TX_CMD_PORT);
1161 iowrite16(skb->len, lp->virt_addr + TX_LEN_PORT);
1162
1163
1164 if ((readreg(dev, PP_BusST) & READY_FOR_TX_NOW) == 0) {
1165
1166
1167
1168
1169 spin_unlock_irqrestore(&lp->lock, flags);
1170 cs89_dbg(0, err, "Tx buffer not free!\n");
1171 return NETDEV_TX_BUSY;
1172 }
1173
1174 writewords(lp, TX_FRAME_PORT, skb->data, (skb->len + 1) >> 1);
1175 spin_unlock_irqrestore(&lp->lock, flags);
1176 dev->stats.tx_bytes += skb->len;
1177 dev_kfree_skb(skb);
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189 return NETDEV_TX_OK;
1190}
1191
1192static void set_multicast_list(struct net_device *dev)
1193{
1194 struct net_local *lp = netdev_priv(dev);
1195 unsigned long flags;
1196 u16 cfg;
1197
1198 spin_lock_irqsave(&lp->lock, flags);
1199 if (dev->flags & IFF_PROMISC)
1200 lp->rx_mode = RX_ALL_ACCEPT;
1201 else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev))
1202
1203
1204
1205 lp->rx_mode = RX_MULTCAST_ACCEPT;
1206 else
1207 lp->rx_mode = 0;
1208
1209 writereg(dev, PP_RxCTL, DEF_RX_ACCEPT | lp->rx_mode);
1210
1211
1212
1213
1214 cfg = lp->curr_rx_cfg;
1215 if (lp->rx_mode == RX_ALL_ACCEPT)
1216 cfg |= RX_CRC_ERROR_ENBL | RX_RUNT_ENBL | RX_EXTRA_DATA_ENBL;
1217 writereg(dev, PP_RxCFG, cfg);
1218 spin_unlock_irqrestore(&lp->lock, flags);
1219}
1220
1221static int set_mac_address(struct net_device *dev, void *p)
1222{
1223 int i;
1224 struct sockaddr *addr = p;
1225
1226 if (netif_running(dev))
1227 return -EBUSY;
1228
1229 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1230
1231 cs89_dbg(0, debug, "%s: Setting MAC address to %pM\n",
1232 dev->name, dev->dev_addr);
1233
1234
1235 for (i = 0; i < ETH_ALEN / 2; i++)
1236 writereg(dev, PP_IA + i * 2,
1237 (dev->dev_addr[i * 2] |
1238 (dev->dev_addr[i * 2 + 1] << 8)));
1239
1240 return 0;
1241}
1242
1243#ifdef CONFIG_NET_POLL_CONTROLLER
1244
1245
1246
1247
1248static void net_poll_controller(struct net_device *dev)
1249{
1250 disable_irq(dev->irq);
1251 net_interrupt(dev->irq, dev);
1252 enable_irq(dev->irq);
1253}
1254#endif
1255
1256static const struct net_device_ops net_ops = {
1257 .ndo_open = net_open,
1258 .ndo_stop = net_close,
1259 .ndo_tx_timeout = net_timeout,
1260 .ndo_start_xmit = net_send_packet,
1261 .ndo_get_stats = net_get_stats,
1262 .ndo_set_rx_mode = set_multicast_list,
1263 .ndo_set_mac_address = set_mac_address,
1264#ifdef CONFIG_NET_POLL_CONTROLLER
1265 .ndo_poll_controller = net_poll_controller,
1266#endif
1267 .ndo_change_mtu = eth_change_mtu,
1268 .ndo_validate_addr = eth_validate_addr,
1269};
1270
1271static void __init reset_chip(struct net_device *dev)
1272{
1273#if !defined(CONFIG_MACH_MX31ADS)
1274 struct net_local *lp = netdev_priv(dev);
1275 int reset_start_time;
1276
1277 writereg(dev, PP_SelfCTL, readreg(dev, PP_SelfCTL) | POWER_ON_RESET);
1278
1279
1280 msleep(30);
1281
1282 if (lp->chip_type != CS8900) {
1283
1284 iowrite16(PP_CS8920_ISAINT, lp->virt_addr + ADD_PORT);
1285 iowrite8(dev->irq, lp->virt_addr + DATA_PORT);
1286 iowrite8(0, lp->virt_addr + DATA_PORT + 1);
1287
1288 iowrite16(PP_CS8920_ISAMemB, lp->virt_addr + ADD_PORT);
1289 iowrite8((dev->mem_start >> 16) & 0xff,
1290 lp->virt_addr + DATA_PORT);
1291 iowrite8((dev->mem_start >> 8) & 0xff,
1292 lp->virt_addr + DATA_PORT + 1);
1293 }
1294
1295
1296 reset_start_time = jiffies;
1297 while ((readreg(dev, PP_SelfST) & INIT_DONE) == 0 &&
1298 jiffies - reset_start_time < 2)
1299 ;
1300#endif
1301}
1302
1303
1304
1305
1306
1307
1308
1309static int __init
1310cs89x0_probe1(struct net_device *dev, void __iomem *ioaddr, int modular)
1311{
1312 struct net_local *lp = netdev_priv(dev);
1313 int i;
1314 int tmp;
1315 unsigned rev_type = 0;
1316 int eeprom_buff[CHKSUM_LEN];
1317 int retval;
1318
1319
1320 if (!modular) {
1321 memset(lp, 0, sizeof(*lp));
1322 spin_lock_init(&lp->lock);
1323#ifndef MODULE
1324#if ALLOW_DMA
1325 if (g_cs89x0_dma) {
1326 lp->use_dma = 1;
1327 lp->dma = g_cs89x0_dma;
1328 lp->dmasize = 16;
1329 }
1330#endif
1331 lp->force = g_cs89x0_media__force;
1332#endif
1333 }
1334
1335 pr_debug("PP_addr at %p[%x]: 0x%x\n",
1336 ioaddr, ADD_PORT, ioread16(ioaddr + ADD_PORT));
1337 iowrite16(PP_ChipID, ioaddr + ADD_PORT);
1338
1339 tmp = ioread16(ioaddr + DATA_PORT);
1340 if (tmp != CHIP_EISA_ID_SIG) {
1341 pr_debug("%s: incorrect signature at %p[%x]: 0x%x!="
1342 CHIP_EISA_ID_SIG_STR "\n",
1343 dev->name, ioaddr, DATA_PORT, tmp);
1344 retval = -ENODEV;
1345 goto out1;
1346 }
1347
1348 lp->virt_addr = ioaddr;
1349
1350
1351 rev_type = readreg(dev, PRODUCT_ID_ADD);
1352 lp->chip_type = rev_type & ~REVISON_BITS;
1353 lp->chip_revision = ((rev_type & REVISON_BITS) >> 8) + 'A';
1354
1355
1356
1357
1358
1359 lp->send_cmd = TX_AFTER_381;
1360 if (lp->chip_type == CS8900 && lp->chip_revision >= 'F')
1361 lp->send_cmd = TX_NOW;
1362 if (lp->chip_type != CS8900 && lp->chip_revision >= 'C')
1363 lp->send_cmd = TX_NOW;
1364
1365 pr_info_once("%s\n", version);
1366
1367 pr_info("%s: cs89%c0%s rev %c found at %p ",
1368 dev->name,
1369 lp->chip_type == CS8900 ? '0' : '2',
1370 lp->chip_type == CS8920M ? "M" : "",
1371 lp->chip_revision,
1372 lp->virt_addr);
1373
1374 reset_chip(dev);
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384 if ((readreg(dev, PP_SelfST) & (EEPROM_OK | EEPROM_PRESENT)) ==
1385 (EEPROM_OK | EEPROM_PRESENT)) {
1386
1387 for (i = 0; i < ETH_ALEN / 2; i++) {
1388 unsigned int Addr;
1389 Addr = readreg(dev, PP_IA + i * 2);
1390 dev->dev_addr[i * 2] = Addr & 0xFF;
1391 dev->dev_addr[i * 2 + 1] = Addr >> 8;
1392 }
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405 lp->adapter_cnf = 0;
1406 i = readreg(dev, PP_LineCTL);
1407
1408 if ((i & (HCB1 | HCB1_ENBL)) == (HCB1 | HCB1_ENBL))
1409 lp->adapter_cnf |= A_CNF_DC_DC_POLARITY;
1410
1411 if ((i & LOW_RX_SQUELCH) == LOW_RX_SQUELCH)
1412 lp->adapter_cnf |= A_CNF_EXTND_10B_2 | A_CNF_LOW_RX_SQUELCH;
1413
1414 if ((i & (AUI_ONLY | AUTO_AUI_10BASET)) == 0)
1415 lp->adapter_cnf |= A_CNF_10B_T | A_CNF_MEDIA_10B_T;
1416
1417 if ((i & (AUI_ONLY | AUTO_AUI_10BASET)) == AUI_ONLY)
1418 lp->adapter_cnf |= A_CNF_AUI | A_CNF_MEDIA_AUI;
1419
1420 if ((i & (AUI_ONLY | AUTO_AUI_10BASET)) == AUTO_AUI_10BASET)
1421 lp->adapter_cnf |= A_CNF_AUI | A_CNF_10B_T |
1422 A_CNF_MEDIA_AUI | A_CNF_MEDIA_10B_T | A_CNF_MEDIA_AUTO;
1423
1424 cs89_dbg(1, info, "%s: PP_LineCTL=0x%x, adapter_cnf=0x%x\n",
1425 dev->name, i, lp->adapter_cnf);
1426
1427
1428 if (lp->chip_type == CS8900)
1429 lp->isa_config = readreg(dev, PP_CS8900_ISAINT) & INT_NO_MASK;
1430
1431 pr_cont("[Cirrus EEPROM] ");
1432 }
1433
1434 pr_cont("\n");
1435
1436
1437
1438 if ((readreg(dev, PP_SelfST) & EEPROM_PRESENT) == 0)
1439 pr_warn("No EEPROM, relying on command line....\n");
1440 else if (get_eeprom_data(dev, START_EEPROM_DATA, CHKSUM_LEN, eeprom_buff) < 0) {
1441 pr_warn("EEPROM read failed, relying on command line\n");
1442 } else if (get_eeprom_cksum(START_EEPROM_DATA, CHKSUM_LEN, eeprom_buff) < 0) {
1443
1444
1445 if ((readreg(dev, PP_SelfST) & (EEPROM_OK | EEPROM_PRESENT)) !=
1446 (EEPROM_OK | EEPROM_PRESENT))
1447 pr_warn("Extended EEPROM checksum bad and no Cirrus EEPROM, relying on command line\n");
1448
1449 } else {
1450
1451
1452
1453
1454
1455 if (!lp->auto_neg_cnf)
1456 lp->auto_neg_cnf = eeprom_buff[AUTO_NEG_CNF_OFFSET / 2];
1457
1458 if (!lp->adapter_cnf)
1459 lp->adapter_cnf = eeprom_buff[ADAPTER_CNF_OFFSET / 2];
1460
1461 lp->isa_config = eeprom_buff[ISA_CNF_OFFSET / 2];
1462 dev->mem_start = eeprom_buff[PACKET_PAGE_OFFSET / 2] << 8;
1463
1464
1465
1466 for (i = 0; i < ETH_ALEN / 2; i++) {
1467 dev->dev_addr[i * 2] = eeprom_buff[i];
1468 dev->dev_addr[i * 2 + 1] = eeprom_buff[i] >> 8;
1469 }
1470 cs89_dbg(1, debug, "%s: new adapter_cnf: 0x%x\n",
1471 dev->name, lp->adapter_cnf);
1472 }
1473
1474
1475 {
1476 int count = 0;
1477 if (lp->force & FORCE_RJ45) {
1478 lp->adapter_cnf |= A_CNF_10B_T;
1479 count++;
1480 }
1481 if (lp->force & FORCE_AUI) {
1482 lp->adapter_cnf |= A_CNF_AUI;
1483 count++;
1484 }
1485 if (lp->force & FORCE_BNC) {
1486 lp->adapter_cnf |= A_CNF_10B_2;
1487 count++;
1488 }
1489 if (count > 1)
1490 lp->adapter_cnf |= A_CNF_MEDIA_AUTO;
1491 else if (lp->force & FORCE_RJ45)
1492 lp->adapter_cnf |= A_CNF_MEDIA_10B_T;
1493 else if (lp->force & FORCE_AUI)
1494 lp->adapter_cnf |= A_CNF_MEDIA_AUI;
1495 else if (lp->force & FORCE_BNC)
1496 lp->adapter_cnf |= A_CNF_MEDIA_10B_2;
1497 }
1498
1499 cs89_dbg(1, debug, "%s: after force 0x%x, adapter_cnf=0x%x\n",
1500 dev->name, lp->force, lp->adapter_cnf);
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510 pr_info("media %s%s%s",
1511 (lp->adapter_cnf & A_CNF_10B_T) ? "RJ-45," : "",
1512 (lp->adapter_cnf & A_CNF_AUI) ? "AUI," : "",
1513 (lp->adapter_cnf & A_CNF_10B_2) ? "BNC," : "");
1514
1515 lp->irq_map = 0xffff;
1516
1517
1518 if (lp->chip_type != CS8900 &&
1519
1520 (i = readreg(dev, PP_CS8920_ISAINT) & 0xff,
1521 (i != 0 && i < CS8920_NO_INTS))) {
1522 if (!dev->irq)
1523 dev->irq = i;
1524 } else {
1525 i = lp->isa_config & INT_NO_MASK;
1526#ifndef CONFIG_CS89x0_PLATFORM
1527 if (lp->chip_type == CS8900) {
1528
1529 if (i >= ARRAY_SIZE(cs8900_irq_map))
1530 pr_err("invalid ISA interrupt number %d\n", i);
1531 else
1532 i = cs8900_irq_map[i];
1533
1534 lp->irq_map = CS8900_IRQ_MAP;
1535 } else {
1536 int irq_map_buff[IRQ_MAP_LEN/2];
1537
1538 if (get_eeprom_data(dev, IRQ_MAP_EEPROM_DATA,
1539 IRQ_MAP_LEN / 2,
1540 irq_map_buff) >= 0) {
1541 if ((irq_map_buff[0] & 0xff) == PNP_IRQ_FRMT)
1542 lp->irq_map = ((irq_map_buff[0] >> 8) |
1543 (irq_map_buff[1] << 8));
1544 }
1545 }
1546#endif
1547 if (!dev->irq)
1548 dev->irq = i;
1549 }
1550
1551 pr_cont(" IRQ %d", dev->irq);
1552
1553#if ALLOW_DMA
1554 if (lp->use_dma) {
1555 get_dma_channel(dev);
1556 pr_cont(", DMA %d", dev->dma);
1557 } else
1558#endif
1559 pr_cont(", programmed I/O");
1560
1561
1562 pr_cont(", MAC %pM\n", dev->dev_addr);
1563
1564 dev->netdev_ops = &net_ops;
1565 dev->watchdog_timeo = HZ;
1566
1567 cs89_dbg(0, info, "cs89x0_probe1() successful\n");
1568
1569 retval = register_netdev(dev);
1570 if (retval)
1571 goto out2;
1572 return 0;
1573out2:
1574 iowrite16(PP_ChipID, lp->virt_addr + ADD_PORT);
1575out1:
1576 return retval;
1577}
1578
1579#ifndef CONFIG_CS89x0_PLATFORM
1580
1581
1582
1583
1584
1585static int __init
1586cs89x0_ioport_probe(struct net_device *dev, unsigned long ioport, int modular)
1587{
1588 struct net_local *lp = netdev_priv(dev);
1589 int ret;
1590 void __iomem *io_mem;
1591
1592 if (!lp)
1593 return -ENOMEM;
1594
1595 dev->base_addr = ioport;
1596
1597 if (!request_region(ioport, NETCARD_IO_EXTENT, DRV_NAME)) {
1598 ret = -EBUSY;
1599 goto out;
1600 }
1601
1602 io_mem = ioport_map(ioport & ~3, NETCARD_IO_EXTENT);
1603 if (!io_mem) {
1604 ret = -ENOMEM;
1605 goto release;
1606 }
1607
1608
1609
1610
1611
1612
1613 if (ioport & 1) {
1614 cs89_dbg(1, info, "%s: odd ioaddr 0x%lx\n", dev->name, ioport);
1615 if ((ioport & 2) != 2) {
1616 if ((ioread16(io_mem + ADD_PORT) & ADD_MASK) !=
1617 ADD_SIG) {
1618 pr_err("%s: bad signature 0x%x\n",
1619 dev->name, ioread16(io_mem + ADD_PORT));
1620 ret = -ENODEV;
1621 goto unmap;
1622 }
1623 }
1624 }
1625
1626 ret = cs89x0_probe1(dev, io_mem, modular);
1627 if (!ret)
1628 goto out;
1629unmap:
1630 ioport_unmap(io_mem);
1631release:
1632 release_region(ioport, NETCARD_IO_EXTENT);
1633out:
1634 return ret;
1635}
1636
1637#ifndef MODULE
1638
1639
1640
1641
1642
1643
1644
1645
1646struct net_device * __init cs89x0_probe(int unit)
1647{
1648 struct net_device *dev = alloc_etherdev(sizeof(struct net_local));
1649 unsigned *port;
1650 int err = 0;
1651 int irq;
1652 int io;
1653
1654 if (!dev)
1655 return ERR_PTR(-ENODEV);
1656
1657 sprintf(dev->name, "eth%d", unit);
1658 netdev_boot_setup_check(dev);
1659 io = dev->base_addr;
1660 irq = dev->irq;
1661
1662 cs89_dbg(0, info, "cs89x0_probe(0x%x)\n", io);
1663
1664 if (io > 0x1ff) {
1665 err = cs89x0_ioport_probe(dev, io, 0);
1666 } else if (io != 0) {
1667 err = -ENXIO;
1668 } else {
1669 for (port = netcard_portlist; *port; port++) {
1670 if (cs89x0_ioport_probe(dev, *port, 0) == 0)
1671 break;
1672 dev->irq = irq;
1673 }
1674 if (!*port)
1675 err = -ENODEV;
1676 }
1677 if (err)
1678 goto out;
1679 return dev;
1680out:
1681 free_netdev(dev);
1682 pr_warn("no cs8900 or cs8920 detected. Be sure to disable PnP with SETUP\n");
1683 return ERR_PTR(err);
1684}
1685#endif
1686#endif
1687
1688#if defined(MODULE) && !defined(CONFIG_CS89x0_PLATFORM)
1689
1690static struct net_device *dev_cs89x0;
1691
1692
1693
1694
1695
1696static int io;
1697static int irq;
1698static int debug;
1699static char media[8];
1700static int duplex = -1;
1701
1702static int use_dma;
1703static int dma;
1704static int dmasize = 16;
1705
1706module_param(io, int, 0);
1707module_param(irq, int, 0);
1708module_param(debug, int, 0);
1709module_param_string(media, media, sizeof(media), 0);
1710module_param(duplex, int, 0);
1711module_param(dma , int, 0);
1712module_param(dmasize , int, 0);
1713module_param(use_dma , int, 0);
1714MODULE_PARM_DESC(io, "cs89x0 I/O base address");
1715MODULE_PARM_DESC(irq, "cs89x0 IRQ number");
1716#if DEBUGGING
1717MODULE_PARM_DESC(debug, "cs89x0 debug level (0-6)");
1718#else
1719MODULE_PARM_DESC(debug, "(ignored)");
1720#endif
1721MODULE_PARM_DESC(media, "Set cs89x0 adapter(s) media type(s) (rj45,bnc,aui)");
1722
1723MODULE_PARM_DESC(duplex, "(ignored)");
1724#if ALLOW_DMA
1725MODULE_PARM_DESC(dma , "cs89x0 ISA DMA channel; ignored if use_dma=0");
1726MODULE_PARM_DESC(dmasize , "cs89x0 DMA size in kB (16,64); ignored if use_dma=0");
1727MODULE_PARM_DESC(use_dma , "cs89x0 using DMA (0-1)");
1728#else
1729MODULE_PARM_DESC(dma , "(ignored)");
1730MODULE_PARM_DESC(dmasize , "(ignored)");
1731MODULE_PARM_DESC(use_dma , "(ignored)");
1732#endif
1733
1734MODULE_AUTHOR("Mike Cruse, Russwll Nelson <nelson@crynwr.com>, Andrew Morton");
1735MODULE_LICENSE("GPL");
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760int __init init_module(void)
1761{
1762 struct net_device *dev = alloc_etherdev(sizeof(struct net_local));
1763 struct net_local *lp;
1764 int ret = 0;
1765
1766#if DEBUGGING
1767 net_debug = debug;
1768#else
1769 debug = 0;
1770#endif
1771 if (!dev)
1772 return -ENOMEM;
1773
1774 dev->irq = irq;
1775 dev->base_addr = io;
1776 lp = netdev_priv(dev);
1777
1778#if ALLOW_DMA
1779 if (use_dma) {
1780 lp->use_dma = use_dma;
1781 lp->dma = dma;
1782 lp->dmasize = dmasize;
1783 }
1784#endif
1785
1786 spin_lock_init(&lp->lock);
1787
1788
1789 if (!strcmp(media, "rj45"))
1790 lp->adapter_cnf = A_CNF_MEDIA_10B_T | A_CNF_10B_T;
1791 else if (!strcmp(media, "aui"))
1792 lp->adapter_cnf = A_CNF_MEDIA_AUI | A_CNF_AUI;
1793 else if (!strcmp(media, "bnc"))
1794 lp->adapter_cnf = A_CNF_MEDIA_10B_2 | A_CNF_10B_2;
1795 else
1796 lp->adapter_cnf = A_CNF_MEDIA_10B_T | A_CNF_10B_T;
1797
1798 if (duplex == -1)
1799 lp->auto_neg_cnf = AUTO_NEG_ENABLE;
1800
1801 if (io == 0) {
1802 pr_err("Module autoprobing not allowed\n");
1803 pr_err("Append io=0xNNN\n");
1804 ret = -EPERM;
1805 goto out;
1806 } else if (io <= 0x1ff) {
1807 ret = -ENXIO;
1808 goto out;
1809 }
1810
1811#if ALLOW_DMA
1812 if (use_dma && dmasize != 16 && dmasize != 64) {
1813 pr_err("dma size must be either 16K or 64K, not %dK\n",
1814 dmasize);
1815 ret = -EPERM;
1816 goto out;
1817 }
1818#endif
1819 ret = cs89x0_ioport_probe(dev, io, 1);
1820 if (ret)
1821 goto out;
1822
1823 dev_cs89x0 = dev;
1824 return 0;
1825out:
1826 free_netdev(dev);
1827 return ret;
1828}
1829
1830void __exit
1831cleanup_module(void)
1832{
1833 struct net_local *lp = netdev_priv(dev_cs89x0);
1834
1835 unregister_netdev(dev_cs89x0);
1836 iowrite16(PP_ChipID, lp->virt_addr + ADD_PORT);
1837 ioport_unmap(lp->virt_addr);
1838 release_region(dev_cs89x0->base_addr, NETCARD_IO_EXTENT);
1839 free_netdev(dev_cs89x0);
1840}
1841#endif
1842
1843#ifdef CONFIG_CS89x0_PLATFORM
1844static int __init cs89x0_platform_probe(struct platform_device *pdev)
1845{
1846 struct net_device *dev = alloc_etherdev(sizeof(struct net_local));
1847 struct net_local *lp;
1848 struct resource *mem_res;
1849 void __iomem *virt_addr;
1850 int err;
1851
1852 if (!dev)
1853 return -ENOMEM;
1854
1855 lp = netdev_priv(dev);
1856
1857 mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1858 dev->irq = platform_get_irq(pdev, 0);
1859 if (mem_res == NULL || dev->irq <= 0) {
1860 dev_warn(&dev->dev, "memory/interrupt resource missing\n");
1861 err = -ENXIO;
1862 goto free;
1863 }
1864
1865 lp->size = resource_size(mem_res);
1866 if (!request_mem_region(mem_res->start, lp->size, DRV_NAME)) {
1867 dev_warn(&dev->dev, "request_mem_region() failed\n");
1868 err = -EBUSY;
1869 goto free;
1870 }
1871
1872 virt_addr = ioremap(mem_res->start, lp->size);
1873 if (!virt_addr) {
1874 dev_warn(&dev->dev, "ioremap() failed\n");
1875 err = -ENOMEM;
1876 goto release;
1877 }
1878
1879 err = cs89x0_probe1(dev, virt_addr, 0);
1880 if (err) {
1881 dev_warn(&dev->dev, "no cs8900 or cs8920 detected\n");
1882 goto unmap;
1883 }
1884
1885 platform_set_drvdata(pdev, dev);
1886 return 0;
1887
1888unmap:
1889 iounmap(virt_addr);
1890release:
1891 release_mem_region(mem_res->start, lp->size);
1892free:
1893 free_netdev(dev);
1894 return err;
1895}
1896
1897static int cs89x0_platform_remove(struct platform_device *pdev)
1898{
1899 struct net_device *dev = platform_get_drvdata(pdev);
1900 struct net_local *lp = netdev_priv(dev);
1901 struct resource *mem_res;
1902
1903
1904
1905
1906
1907 mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1908 unregister_netdev(dev);
1909 iounmap(lp->virt_addr);
1910 release_mem_region(mem_res->start, lp->size);
1911 free_netdev(dev);
1912 return 0;
1913}
1914
1915static struct platform_driver cs89x0_driver = {
1916 .driver = {
1917 .name = DRV_NAME,
1918 .owner = THIS_MODULE,
1919 },
1920 .remove = cs89x0_platform_remove,
1921};
1922
1923module_platform_driver_probe(cs89x0_driver, cs89x0_platform_probe);
1924
1925#endif
1926