1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446#include <linux/module.h>
447#include <linux/kernel.h>
448#include <linux/string.h>
449#include <linux/interrupt.h>
450#include <linux/ptrace.h>
451#include <linux/errno.h>
452#include <linux/ioport.h>
453#include <linux/pci.h>
454#include <linux/eisa.h>
455#include <linux/delay.h>
456#include <linux/init.h>
457#include <linux/spinlock.h>
458#include <linux/crc32.h>
459#include <linux/netdevice.h>
460#include <linux/etherdevice.h>
461#include <linux/skbuff.h>
462#include <linux/time.h>
463#include <linux/types.h>
464#include <linux/unistd.h>
465#include <linux/ctype.h>
466#include <linux/dma-mapping.h>
467#include <linux/moduleparam.h>
468#include <linux/bitops.h>
469#include <linux/gfp.h>
470
471#include <asm/io.h>
472#include <asm/dma.h>
473#include <asm/byteorder.h>
474#include <asm/unaligned.h>
475#include <asm/uaccess.h>
476#ifdef CONFIG_PPC_PMAC
477#include <asm/machdep.h>
478#endif
479
480#include "de4x5.h"
481
482static const char version[] =
483 KERN_INFO "de4x5.c:V0.546 2001/02/22 davies@maniac.ultranet.com\n";
484
485#define c_char const char
486
487
488
489
490struct phy_table {
491 int reset;
492 int id;
493 int ta;
494 struct {
495 int reg;
496 int mask;
497 int value;
498 } spd;
499};
500
501struct mii_phy {
502 int reset;
503 int id;
504 int ta;
505 struct {
506 int reg;
507 int mask;
508 int value;
509 } spd;
510 int addr;
511 u_char *gep;
512 u_char *rst;
513 u_int mc;
514 u_int ana;
515 u_int fdx;
516 u_int ttm;
517 u_int mci;
518};
519
520#define DE4X5_MAX_PHY 8
521
522struct sia_phy {
523 u_char mc;
524 u_char ext;
525 int csr13;
526 int csr14;
527 int csr15;
528 int gepc;
529 int gep;
530};
531
532
533
534
535
536static struct phy_table phy_info[] = {
537 {0, NATIONAL_TX, 1, {0x19, 0x40, 0x00}},
538 {1, BROADCOM_T4, 1, {0x10, 0x02, 0x02}},
539 {0, SEEQ_T4 , 1, {0x12, 0x10, 0x10}},
540 {0, CYPRESS_T4 , 1, {0x05, 0x20, 0x20}},
541 {0, 0x7810 , 1, {0x14, 0x0800, 0x0800}}
542};
543
544
545
546
547
548
549#define GENERIC_REG 0x05
550#define GENERIC_MASK MII_ANLPA_100M
551#define GENERIC_VALUE MII_ANLPA_100M
552
553
554
555
556static c_char enet_det[][ETH_ALEN] = {
557 {0x00, 0x00, 0xc0, 0x00, 0x00, 0x00},
558 {0x00, 0x00, 0xe8, 0x00, 0x00, 0x00}
559};
560
561#define SMC 1
562#define ACCTON 2
563
564
565
566
567
568
569static c_char srom_repair_info[][100] = {
570 {0x00,0x1e,0x00,0x00,0x00,0x08,
571 0x1f,0x01,0x8f,0x01,0x00,0x01,0x00,0x02,
572 0x01,0x00,0x00,0x78,0xe0,0x01,0x00,0x50,
573 0x00,0x18,}
574};
575
576
577#ifdef DE4X5_DEBUG
578static int de4x5_debug = DE4X5_DEBUG;
579#else
580
581static int de4x5_debug = (DEBUG_MEDIA | DEBUG_VERSION);
582#endif
583
584
585
586
587
588
589
590
591
592
593#ifdef DE4X5_PARM
594static char *args = DE4X5_PARM;
595#else
596static char *args;
597#endif
598
599struct parameters {
600 bool fdx;
601 int autosense;
602};
603
604#define DE4X5_AUTOSENSE_MS 250
605
606#define DE4X5_NDA 0xffe0
607
608
609
610
611#define PROBE_LENGTH 32
612#define ETH_PROM_SIG 0xAA5500FFUL
613
614
615
616
617#define PKT_BUF_SZ 1536
618#define IEEE802_3_SZ 1518
619#define MAX_PKT_SZ 1514
620#define MAX_DAT_SZ 1500
621#define MIN_DAT_SZ 1
622#define PKT_HDR_LEN 14
623#define FAKE_FRAME_LEN (MAX_PKT_SZ + 1)
624#define QUEUE_PKT_TIMEOUT (3*HZ)
625
626
627
628
629
630#define DE4X5_EISA_IO_PORTS 0x0c00
631#define DE4X5_EISA_TOTAL_SIZE 0x100
632
633#define EISA_ALLOWED_IRQ_LIST {5, 9, 10, 11}
634
635#define DE4X5_SIGNATURE {"DE425","DE434","DE435","DE450","DE500"}
636#define DE4X5_NAME_LENGTH 8
637
638static c_char *de4x5_signatures[] = DE4X5_SIGNATURE;
639
640
641
642
643#define PROBE_LENGTH 32
644#define ETH_PROM_SIG 0xAA5500FFUL
645
646
647
648
649#define PCI_MAX_BUS_NUM 8
650#define DE4X5_PCI_TOTAL_SIZE 0x80
651#define DE4X5_CLASS_CODE 0x00020000
652
653
654
655
656
657
658
659#define DE4X5_ALIGN4 ((u_long)4 - 1)
660#define DE4X5_ALIGN8 ((u_long)8 - 1)
661#define DE4X5_ALIGN16 ((u_long)16 - 1)
662#define DE4X5_ALIGN32 ((u_long)32 - 1)
663#define DE4X5_ALIGN64 ((u_long)64 - 1)
664#define DE4X5_ALIGN128 ((u_long)128 - 1)
665
666#define DE4X5_ALIGN DE4X5_ALIGN32
667#define DE4X5_CACHE_ALIGN CAL_16LONG
668#define DESC_SKIP_LEN DSL_0
669
670#define DESC_ALIGN
671
672#ifndef DEC_ONLY
673static int dec_only;
674#else
675static int dec_only = 1;
676#endif
677
678
679
680
681#define ENABLE_IRQs { \
682 imr |= lp->irq_en;\
683 outl(imr, DE4X5_IMR); \
684}
685
686#define DISABLE_IRQs {\
687 imr = inl(DE4X5_IMR);\
688 imr &= ~lp->irq_en;\
689 outl(imr, DE4X5_IMR); \
690}
691
692#define UNMASK_IRQs {\
693 imr |= lp->irq_mask;\
694 outl(imr, DE4X5_IMR); \
695}
696
697#define MASK_IRQs {\
698 imr = inl(DE4X5_IMR);\
699 imr &= ~lp->irq_mask;\
700 outl(imr, DE4X5_IMR); \
701}
702
703
704
705
706#define START_DE4X5 {\
707 omr = inl(DE4X5_OMR);\
708 omr |= OMR_ST | OMR_SR;\
709 outl(omr, DE4X5_OMR); \
710}
711
712#define STOP_DE4X5 {\
713 omr = inl(DE4X5_OMR);\
714 omr &= ~(OMR_ST|OMR_SR);\
715 outl(omr, DE4X5_OMR); \
716}
717
718
719
720
721#define RESET_SIA outl(0, DE4X5_SICR);
722
723
724
725
726#define DE4X5_AUTOSENSE_MS 250
727
728
729
730
731struct de4x5_srom {
732 char sub_vendor_id[2];
733 char sub_system_id[2];
734 char reserved[12];
735 char id_block_crc;
736 char reserved2;
737 char version;
738 char num_controllers;
739 char ieee_addr[6];
740 char info[100];
741 short chksum;
742};
743#define SUB_VENDOR_ID 0x500a
744
745
746
747
748
749
750
751
752
753#define NUM_RX_DESC 8
754#define NUM_TX_DESC 32
755#define RX_BUFF_SZ 1536
756
757
758struct de4x5_desc {
759 volatile __le32 status;
760 __le32 des1;
761 __le32 buf;
762 __le32 next;
763 DESC_ALIGN
764};
765
766
767
768
769#define DE4X5_PKT_STAT_SZ 16
770#define DE4X5_PKT_BIN_SZ 128
771
772
773struct pkt_stats {
774 u_int bins[DE4X5_PKT_STAT_SZ];
775 u_int unicast;
776 u_int multicast;
777 u_int broadcast;
778 u_int excessive_collisions;
779 u_int tx_underruns;
780 u_int excessive_underruns;
781 u_int rx_runt_frames;
782 u_int rx_collision;
783 u_int rx_dribble;
784 u_int rx_overflow;
785};
786
787struct de4x5_private {
788 char adapter_name[80];
789 u_long interrupt;
790 struct de4x5_desc *rx_ring;
791 struct de4x5_desc *tx_ring;
792 struct sk_buff *tx_skb[NUM_TX_DESC];
793 struct sk_buff *rx_skb[NUM_RX_DESC];
794 int rx_new, rx_old;
795 int tx_new, tx_old;
796 char setup_frame[SETUP_FRAME_LEN];
797 char frame[64];
798 spinlock_t lock;
799 struct net_device_stats stats;
800 struct pkt_stats pktStats;
801 char rxRingSize;
802 char txRingSize;
803 int bus;
804 int bus_num;
805 int device;
806 int state;
807 int chipset;
808 s32 irq_mask;
809 s32 irq_en;
810 int media;
811 int c_media;
812 bool fdx;
813 int linkOK;
814 int autosense;
815 bool tx_enable;
816 int setup_f;
817 int local_state;
818 struct mii_phy phy[DE4X5_MAX_PHY];
819 struct sia_phy sia;
820 int active;
821 int mii_cnt;
822 int timeout;
823 struct timer_list timer;
824 int tmp;
825 struct {
826 u_long lock;
827 s32 csr0;
828 s32 csr6;
829 s32 csr7;
830 s32 gep;
831 s32 gepc;
832 s32 csr13;
833 s32 csr14;
834 s32 csr15;
835 int save_cnt;
836 struct sk_buff_head queue;
837 } cache;
838 struct de4x5_srom srom;
839 int cfrv;
840 int rx_ovf;
841 bool useSROM;
842 bool useMII;
843 int asBitValid;
844 int asPolarity;
845 int asBit;
846 int defMedium;
847 int tcount;
848 int infoblock_init;
849 int infoleaf_offset;
850 s32 infoblock_csr6;
851 int infoblock_media;
852 int (*infoleaf_fn)(struct net_device *);
853 u_char *rst;
854 u_char ibn;
855 struct parameters params;
856 struct device *gendev;
857 dma_addr_t dma_rings;
858 int dma_size;
859 char *rx_bufs;
860};
861
862
863
864
865
866
867
868
869
870
871
872
873
874static struct {
875 int chipset;
876 int bus;
877 int irq;
878 u_char addr[ETH_ALEN];
879} last = {0,};
880
881
882
883
884
885
886
887
888#define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\
889 lp->tx_old+lp->txRingSize-lp->tx_new-1:\
890 lp->tx_old -lp->tx_new-1)
891
892#define TX_PKT_PENDING (lp->tx_old != lp->tx_new)
893
894
895
896
897static int de4x5_open(struct net_device *dev);
898static netdev_tx_t de4x5_queue_pkt(struct sk_buff *skb,
899 struct net_device *dev);
900static irqreturn_t de4x5_interrupt(int irq, void *dev_id);
901static int de4x5_close(struct net_device *dev);
902static struct net_device_stats *de4x5_get_stats(struct net_device *dev);
903static void de4x5_local_stats(struct net_device *dev, char *buf, int pkt_len);
904static void set_multicast_list(struct net_device *dev);
905static int de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
906
907
908
909
910static int de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev);
911static int de4x5_init(struct net_device *dev);
912static int de4x5_sw_reset(struct net_device *dev);
913static int de4x5_rx(struct net_device *dev);
914static int de4x5_tx(struct net_device *dev);
915static void de4x5_ast(struct net_device *dev);
916static int de4x5_txur(struct net_device *dev);
917static int de4x5_rx_ovfc(struct net_device *dev);
918
919static int autoconf_media(struct net_device *dev);
920static void create_packet(struct net_device *dev, char *frame, int len);
921static void load_packet(struct net_device *dev, char *buf, u32 flags, struct sk_buff *skb);
922static int dc21040_autoconf(struct net_device *dev);
923static int dc21041_autoconf(struct net_device *dev);
924static int dc21140m_autoconf(struct net_device *dev);
925static int dc2114x_autoconf(struct net_device *dev);
926static int srom_autoconf(struct net_device *dev);
927static int de4x5_suspect_state(struct net_device *dev, int timeout, int prev_state, int (*fn)(struct net_device *, int), int (*asfn)(struct net_device *));
928static int dc21040_state(struct net_device *dev, int csr13, int csr14, int csr15, int timeout, int next_state, int suspect_state, int (*fn)(struct net_device *, int));
929static int test_media(struct net_device *dev, s32 irqs, s32 irq_mask, s32 csr13, s32 csr14, s32 csr15, s32 msec);
930static int test_for_100Mb(struct net_device *dev, int msec);
931static int wait_for_link(struct net_device *dev);
932static int test_mii_reg(struct net_device *dev, int reg, int mask, bool pol, long msec);
933static int is_spd_100(struct net_device *dev);
934static int is_100_up(struct net_device *dev);
935static int is_10_up(struct net_device *dev);
936static int is_anc_capable(struct net_device *dev);
937static int ping_media(struct net_device *dev, int msec);
938static struct sk_buff *de4x5_alloc_rx_buff(struct net_device *dev, int index, int len);
939static void de4x5_free_rx_buffs(struct net_device *dev);
940static void de4x5_free_tx_buffs(struct net_device *dev);
941static void de4x5_save_skbs(struct net_device *dev);
942static void de4x5_rst_desc_ring(struct net_device *dev);
943static void de4x5_cache_state(struct net_device *dev, int flag);
944static void de4x5_put_cache(struct net_device *dev, struct sk_buff *skb);
945static void de4x5_putb_cache(struct net_device *dev, struct sk_buff *skb);
946static struct sk_buff *de4x5_get_cache(struct net_device *dev);
947static void de4x5_setup_intr(struct net_device *dev);
948static void de4x5_init_connection(struct net_device *dev);
949static int de4x5_reset_phy(struct net_device *dev);
950static void reset_init_sia(struct net_device *dev, s32 sicr, s32 strr, s32 sigr);
951static int test_ans(struct net_device *dev, s32 irqs, s32 irq_mask, s32 msec);
952static int test_tp(struct net_device *dev, s32 msec);
953static int EISA_signature(char *name, struct device *device);
954static int PCI_signature(char *name, struct de4x5_private *lp);
955static void DevicePresent(struct net_device *dev, u_long iobase);
956static void enet_addr_rst(u_long aprom_addr);
957static int de4x5_bad_srom(struct de4x5_private *lp);
958static short srom_rd(u_long address, u_char offset);
959static void srom_latch(u_int command, u_long address);
960static void srom_command(u_int command, u_long address);
961static void srom_address(u_int command, u_long address, u_char offset);
962static short srom_data(u_int command, u_long address);
963
964static void sendto_srom(u_int command, u_long addr);
965static int getfrom_srom(u_long addr);
966static int srom_map_media(struct net_device *dev);
967static int srom_infoleaf_info(struct net_device *dev);
968static void srom_init(struct net_device *dev);
969static void srom_exec(struct net_device *dev, u_char *p);
970static int mii_rd(u_char phyreg, u_char phyaddr, u_long ioaddr);
971static void mii_wr(int data, u_char phyreg, u_char phyaddr, u_long ioaddr);
972static int mii_rdata(u_long ioaddr);
973static void mii_wdata(int data, int len, u_long ioaddr);
974static void mii_ta(u_long rw, u_long ioaddr);
975static int mii_swap(int data, int len);
976static void mii_address(u_char addr, u_long ioaddr);
977static void sendto_mii(u32 command, int data, u_long ioaddr);
978static int getfrom_mii(u32 command, u_long ioaddr);
979static int mii_get_oui(u_char phyaddr, u_long ioaddr);
980static int mii_get_phy(struct net_device *dev);
981static void SetMulticastFilter(struct net_device *dev);
982static int get_hw_addr(struct net_device *dev);
983static void srom_repair(struct net_device *dev, int card);
984static int test_bad_enet(struct net_device *dev, int status);
985static int an_exception(struct de4x5_private *lp);
986static char *build_setup_frame(struct net_device *dev, int mode);
987static void disable_ast(struct net_device *dev);
988static long de4x5_switch_mac_port(struct net_device *dev);
989static int gep_rd(struct net_device *dev);
990static void gep_wr(s32 data, struct net_device *dev);
991static void yawn(struct net_device *dev, int state);
992static void de4x5_parse_params(struct net_device *dev);
993static void de4x5_dbg_open(struct net_device *dev);
994static void de4x5_dbg_mii(struct net_device *dev, int k);
995static void de4x5_dbg_media(struct net_device *dev);
996static void de4x5_dbg_srom(struct de4x5_srom *p);
997static void de4x5_dbg_rx(struct sk_buff *skb, int len);
998static int de4x5_strncmp(char *a, char *b, int n);
999static int dc21041_infoleaf(struct net_device *dev);
1000static int dc21140_infoleaf(struct net_device *dev);
1001static int dc21142_infoleaf(struct net_device *dev);
1002static int dc21143_infoleaf(struct net_device *dev);
1003static int type0_infoblock(struct net_device *dev, u_char count, u_char *p);
1004static int type1_infoblock(struct net_device *dev, u_char count, u_char *p);
1005static int type2_infoblock(struct net_device *dev, u_char count, u_char *p);
1006static int type3_infoblock(struct net_device *dev, u_char count, u_char *p);
1007static int type4_infoblock(struct net_device *dev, u_char count, u_char *p);
1008static int type5_infoblock(struct net_device *dev, u_char count, u_char *p);
1009static int compact_infoblock(struct net_device *dev, u_char count, u_char *p);
1010
1011
1012
1013
1014
1015
1016
1017static int io=0x0;
1018
1019module_param(io, int, 0);
1020module_param(de4x5_debug, int, 0);
1021module_param(dec_only, int, 0);
1022module_param(args, charp, 0);
1023
1024MODULE_PARM_DESC(io, "de4x5 I/O base address");
1025MODULE_PARM_DESC(de4x5_debug, "de4x5 debug mask");
1026MODULE_PARM_DESC(dec_only, "de4x5 probe only for Digital boards (0-1)");
1027MODULE_PARM_DESC(args, "de4x5 full duplex and media type settings; see de4x5.c for details");
1028MODULE_LICENSE("GPL");
1029
1030
1031
1032
1033struct InfoLeaf {
1034 int chipset;
1035 int (*fn)(struct net_device *);
1036};
1037static struct InfoLeaf infoleaf_array[] = {
1038 {DC21041, dc21041_infoleaf},
1039 {DC21140, dc21140_infoleaf},
1040 {DC21142, dc21142_infoleaf},
1041 {DC21143, dc21143_infoleaf}
1042};
1043#define INFOLEAF_SIZE ARRAY_SIZE(infoleaf_array)
1044
1045
1046
1047
1048static int (*dc_infoblock[])(struct net_device *dev, u_char, u_char *) = {
1049 type0_infoblock,
1050 type1_infoblock,
1051 type2_infoblock,
1052 type3_infoblock,
1053 type4_infoblock,
1054 type5_infoblock,
1055 compact_infoblock
1056};
1057
1058#define COMPACT (ARRAY_SIZE(dc_infoblock) - 1)
1059
1060
1061
1062
1063#define RESET_DE4X5 {\
1064 int i;\
1065 i=inl(DE4X5_BMR);\
1066 mdelay(1);\
1067 outl(i | BMR_SWR, DE4X5_BMR);\
1068 mdelay(1);\
1069 outl(i, DE4X5_BMR);\
1070 mdelay(1);\
1071 for (i=0;i<5;i++) {inl(DE4X5_BMR); mdelay(1);}\
1072 mdelay(1);\
1073}
1074
1075#define PHY_HARD_RESET {\
1076 outl(GEP_HRST, DE4X5_GEP); \
1077 mdelay(1); \
1078 outl(0x00, DE4X5_GEP);\
1079 mdelay(2); \
1080}
1081
1082static const struct net_device_ops de4x5_netdev_ops = {
1083 .ndo_open = de4x5_open,
1084 .ndo_stop = de4x5_close,
1085 .ndo_start_xmit = de4x5_queue_pkt,
1086 .ndo_get_stats = de4x5_get_stats,
1087 .ndo_set_rx_mode = set_multicast_list,
1088 .ndo_do_ioctl = de4x5_ioctl,
1089 .ndo_change_mtu = eth_change_mtu,
1090 .ndo_set_mac_address= eth_mac_addr,
1091 .ndo_validate_addr = eth_validate_addr,
1092};
1093
1094
1095static int
1096de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev)
1097{
1098 char name[DE4X5_NAME_LENGTH + 1];
1099 struct de4x5_private *lp = netdev_priv(dev);
1100 struct pci_dev *pdev = NULL;
1101 int i, status=0;
1102
1103 dev_set_drvdata(gendev, dev);
1104
1105
1106 if (lp->bus == EISA) {
1107 outb(WAKEUP, PCI_CFPM);
1108 } else {
1109 pdev = to_pci_dev (gendev);
1110 pci_write_config_byte(pdev, PCI_CFDA_PSM, WAKEUP);
1111 }
1112 mdelay(10);
1113
1114 RESET_DE4X5;
1115
1116 if ((inl(DE4X5_STS) & (STS_TS | STS_RS)) != 0) {
1117 return -ENXIO;
1118 }
1119
1120
1121
1122
1123 lp->useSROM = false;
1124 if (lp->bus == PCI) {
1125 PCI_signature(name, lp);
1126 } else {
1127 EISA_signature(name, gendev);
1128 }
1129
1130 if (*name == '\0') {
1131 return -ENXIO;
1132 }
1133
1134 dev->base_addr = iobase;
1135 printk ("%s: %s at 0x%04lx", dev_name(gendev), name, iobase);
1136
1137 status = get_hw_addr(dev);
1138 printk(", h/w address %pM\n", dev->dev_addr);
1139
1140 if (status != 0) {
1141 printk(" which has an Ethernet PROM CRC error.\n");
1142 return -ENXIO;
1143 } else {
1144 skb_queue_head_init(&lp->cache.queue);
1145 lp->cache.gepc = GEP_INIT;
1146 lp->asBit = GEP_SLNK;
1147 lp->asPolarity = GEP_SLNK;
1148 lp->asBitValid = ~0;
1149 lp->timeout = -1;
1150 lp->gendev = gendev;
1151 spin_lock_init(&lp->lock);
1152 init_timer(&lp->timer);
1153 lp->timer.function = (void (*)(unsigned long))de4x5_ast;
1154 lp->timer.data = (unsigned long)dev;
1155 de4x5_parse_params(dev);
1156
1157
1158
1159
1160 lp->autosense = lp->params.autosense;
1161 if (lp->chipset != DC21140) {
1162 if ((lp->chipset==DC21040) && (lp->params.autosense&TP_NW)) {
1163 lp->params.autosense = TP;
1164 }
1165 if ((lp->chipset==DC21041) && (lp->params.autosense&BNC_AUI)) {
1166 lp->params.autosense = BNC;
1167 }
1168 }
1169 lp->fdx = lp->params.fdx;
1170 sprintf(lp->adapter_name,"%s (%s)", name, dev_name(gendev));
1171
1172 lp->dma_size = (NUM_RX_DESC + NUM_TX_DESC) * sizeof(struct de4x5_desc);
1173#if defined(__alpha__) || defined(__powerpc__) || defined(CONFIG_SPARC) || defined(DE4X5_DO_MEMCPY)
1174 lp->dma_size += RX_BUFF_SZ * NUM_RX_DESC + DE4X5_ALIGN;
1175#endif
1176 lp->rx_ring = dma_alloc_coherent(gendev, lp->dma_size,
1177 &lp->dma_rings, GFP_ATOMIC);
1178 if (lp->rx_ring == NULL) {
1179 return -ENOMEM;
1180 }
1181
1182 lp->tx_ring = lp->rx_ring + NUM_RX_DESC;
1183
1184
1185
1186
1187
1188#if !defined(__alpha__) && !defined(__powerpc__) && !defined(CONFIG_SPARC) && !defined(DE4X5_DO_MEMCPY)
1189 for (i=0; i<NUM_RX_DESC; i++) {
1190 lp->rx_ring[i].status = 0;
1191 lp->rx_ring[i].des1 = cpu_to_le32(RX_BUFF_SZ);
1192 lp->rx_ring[i].buf = 0;
1193 lp->rx_ring[i].next = 0;
1194 lp->rx_skb[i] = (struct sk_buff *) 1;
1195 }
1196
1197#else
1198 {
1199 dma_addr_t dma_rx_bufs;
1200
1201 dma_rx_bufs = lp->dma_rings + (NUM_RX_DESC + NUM_TX_DESC)
1202 * sizeof(struct de4x5_desc);
1203 dma_rx_bufs = (dma_rx_bufs + DE4X5_ALIGN) & ~DE4X5_ALIGN;
1204 lp->rx_bufs = (char *)(((long)(lp->rx_ring + NUM_RX_DESC
1205 + NUM_TX_DESC) + DE4X5_ALIGN) & ~DE4X5_ALIGN);
1206 for (i=0; i<NUM_RX_DESC; i++) {
1207 lp->rx_ring[i].status = 0;
1208 lp->rx_ring[i].des1 = cpu_to_le32(RX_BUFF_SZ);
1209 lp->rx_ring[i].buf =
1210 cpu_to_le32(dma_rx_bufs+i*RX_BUFF_SZ);
1211 lp->rx_ring[i].next = 0;
1212 lp->rx_skb[i] = (struct sk_buff *) 1;
1213 }
1214
1215 }
1216#endif
1217
1218 barrier();
1219
1220 lp->rxRingSize = NUM_RX_DESC;
1221 lp->txRingSize = NUM_TX_DESC;
1222
1223
1224 lp->rx_ring[lp->rxRingSize - 1].des1 |= cpu_to_le32(RD_RER);
1225 lp->tx_ring[lp->txRingSize - 1].des1 |= cpu_to_le32(TD_TER);
1226
1227
1228 outl(lp->dma_rings, DE4X5_RRBA);
1229 outl(lp->dma_rings + NUM_RX_DESC * sizeof(struct de4x5_desc),
1230 DE4X5_TRBA);
1231
1232
1233 lp->irq_mask = IMR_RIM | IMR_TIM | IMR_TUM | IMR_UNM;
1234 lp->irq_en = IMR_NIM | IMR_AIM;
1235
1236
1237 create_packet(dev, lp->frame, sizeof(lp->frame));
1238
1239
1240 i = lp->cfrv & 0x000000fe;
1241 if ((lp->chipset == DC21140) && (i == 0x20)) {
1242 lp->rx_ovf = 1;
1243 }
1244
1245
1246 if (lp->useSROM) {
1247 lp->state = INITIALISED;
1248 if (srom_infoleaf_info(dev)) {
1249 dma_free_coherent (gendev, lp->dma_size,
1250 lp->rx_ring, lp->dma_rings);
1251 return -ENXIO;
1252 }
1253 srom_init(dev);
1254 }
1255
1256 lp->state = CLOSED;
1257
1258
1259
1260
1261 if ((lp->chipset != DC21040) && (lp->chipset != DC21041)) {
1262 mii_get_phy(dev);
1263 }
1264
1265 printk(" and requires IRQ%d (provided by %s).\n", dev->irq,
1266 ((lp->bus == PCI) ? "PCI BIOS" : "EISA CNFG"));
1267 }
1268
1269 if (de4x5_debug & DEBUG_VERSION) {
1270 printk(version);
1271 }
1272
1273
1274 SET_NETDEV_DEV(dev, gendev);
1275 dev->netdev_ops = &de4x5_netdev_ops;
1276 dev->mem_start = 0;
1277
1278
1279 if ((status = register_netdev (dev))) {
1280 dma_free_coherent (gendev, lp->dma_size,
1281 lp->rx_ring, lp->dma_rings);
1282 return status;
1283 }
1284
1285
1286 yawn(dev, SLEEP);
1287
1288 return status;
1289}
1290
1291
1292static int
1293de4x5_open(struct net_device *dev)
1294{
1295 struct de4x5_private *lp = netdev_priv(dev);
1296 u_long iobase = dev->base_addr;
1297 int i, status = 0;
1298 s32 omr;
1299
1300
1301 for (i=0; i<lp->rxRingSize; i++) {
1302 if (de4x5_alloc_rx_buff(dev, i, 0) == NULL) {
1303 de4x5_free_rx_buffs(dev);
1304 return -EAGAIN;
1305 }
1306 }
1307
1308
1309
1310
1311 yawn(dev, WAKEUP);
1312
1313
1314
1315
1316 status = de4x5_init(dev);
1317 spin_lock_init(&lp->lock);
1318 lp->state = OPEN;
1319 de4x5_dbg_open(dev);
1320
1321 if (request_irq(dev->irq, de4x5_interrupt, IRQF_SHARED,
1322 lp->adapter_name, dev)) {
1323 printk("de4x5_open(): Requested IRQ%d is busy - attemping FAST/SHARE...", dev->irq);
1324 if (request_irq(dev->irq, de4x5_interrupt, IRQF_SHARED,
1325 lp->adapter_name, dev)) {
1326 printk("\n Cannot get IRQ- reconfigure your hardware.\n");
1327 disable_ast(dev);
1328 de4x5_free_rx_buffs(dev);
1329 de4x5_free_tx_buffs(dev);
1330 yawn(dev, SLEEP);
1331 lp->state = CLOSED;
1332 return -EAGAIN;
1333 } else {
1334 printk("\n Succeeded, but you should reconfigure your hardware to avoid this.\n");
1335 printk("WARNING: there may be IRQ related problems in heavily loaded systems.\n");
1336 }
1337 }
1338
1339 lp->interrupt = UNMASK_INTERRUPTS;
1340 dev->trans_start = jiffies;
1341
1342 START_DE4X5;
1343
1344 de4x5_setup_intr(dev);
1345
1346 if (de4x5_debug & DEBUG_OPEN) {
1347 printk("\tsts: 0x%08x\n", inl(DE4X5_STS));
1348 printk("\tbmr: 0x%08x\n", inl(DE4X5_BMR));
1349 printk("\timr: 0x%08x\n", inl(DE4X5_IMR));
1350 printk("\tomr: 0x%08x\n", inl(DE4X5_OMR));
1351 printk("\tsisr: 0x%08x\n", inl(DE4X5_SISR));
1352 printk("\tsicr: 0x%08x\n", inl(DE4X5_SICR));
1353 printk("\tstrr: 0x%08x\n", inl(DE4X5_STRR));
1354 printk("\tsigr: 0x%08x\n", inl(DE4X5_SIGR));
1355 }
1356
1357 return status;
1358}
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368static int
1369de4x5_init(struct net_device *dev)
1370{
1371
1372 netif_stop_queue(dev);
1373
1374 de4x5_sw_reset(dev);
1375
1376
1377 autoconf_media(dev);
1378
1379 return 0;
1380}
1381
1382static int
1383de4x5_sw_reset(struct net_device *dev)
1384{
1385 struct de4x5_private *lp = netdev_priv(dev);
1386 u_long iobase = dev->base_addr;
1387 int i, j, status = 0;
1388 s32 bmr, omr;
1389
1390
1391 if (!lp->useSROM) {
1392 if (lp->phy[lp->active].id != 0) {
1393 lp->infoblock_csr6 = OMR_SDP | OMR_PS | OMR_HBD;
1394 } else {
1395 lp->infoblock_csr6 = OMR_SDP | OMR_TTM;
1396 }
1397 de4x5_switch_mac_port(dev);
1398 }
1399
1400
1401
1402
1403
1404
1405 bmr = (lp->chipset==DC21140 ? PBL_8 : PBL_4) | DESC_SKIP_LEN | DE4X5_CACHE_ALIGN;
1406 bmr |= ((lp->chipset & ~0x00ff)==DC2114x ? BMR_RML : 0);
1407 outl(bmr, DE4X5_BMR);
1408
1409 omr = inl(DE4X5_OMR) & ~OMR_PR;
1410 if (lp->chipset == DC21140) {
1411 omr |= (OMR_SDP | OMR_SB);
1412 }
1413 lp->setup_f = PERFECT;
1414 outl(lp->dma_rings, DE4X5_RRBA);
1415 outl(lp->dma_rings + NUM_RX_DESC * sizeof(struct de4x5_desc),
1416 DE4X5_TRBA);
1417
1418 lp->rx_new = lp->rx_old = 0;
1419 lp->tx_new = lp->tx_old = 0;
1420
1421 for (i = 0; i < lp->rxRingSize; i++) {
1422 lp->rx_ring[i].status = cpu_to_le32(R_OWN);
1423 }
1424
1425 for (i = 0; i < lp->txRingSize; i++) {
1426 lp->tx_ring[i].status = cpu_to_le32(0);
1427 }
1428
1429 barrier();
1430
1431
1432 SetMulticastFilter(dev);
1433
1434 load_packet(dev, lp->setup_frame, PERFECT_F|TD_SET|SETUP_FRAME_LEN, (struct sk_buff *)1);
1435 outl(omr|OMR_ST, DE4X5_OMR);
1436
1437
1438
1439 for (j=0, i=0;(i<500) && (j==0);i++) {
1440 mdelay(1);
1441 if ((s32)le32_to_cpu(lp->tx_ring[lp->tx_new].status) >= 0) j=1;
1442 }
1443 outl(omr, DE4X5_OMR);
1444
1445 if (j == 0) {
1446 printk("%s: Setup frame timed out, status %08x\n", dev->name,
1447 inl(DE4X5_STS));
1448 status = -EIO;
1449 }
1450
1451 lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
1452 lp->tx_old = lp->tx_new;
1453
1454 return status;
1455}
1456
1457
1458
1459
1460static netdev_tx_t
1461de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev)
1462{
1463 struct de4x5_private *lp = netdev_priv(dev);
1464 u_long iobase = dev->base_addr;
1465 u_long flags = 0;
1466
1467 netif_stop_queue(dev);
1468 if (!lp->tx_enable)
1469 return NETDEV_TX_LOCKED;
1470
1471
1472
1473
1474
1475
1476 spin_lock_irqsave(&lp->lock, flags);
1477 de4x5_tx(dev);
1478 spin_unlock_irqrestore(&lp->lock, flags);
1479
1480
1481 if (test_and_set_bit(0, (void *)&lp->cache.lock) && !lp->interrupt)
1482 return NETDEV_TX_LOCKED;
1483
1484
1485 if (netif_queue_stopped(dev) || (u_long) lp->tx_skb[lp->tx_new] > 1) {
1486 if (lp->interrupt) {
1487 de4x5_putb_cache(dev, skb);
1488 } else {
1489 de4x5_put_cache(dev, skb);
1490 }
1491 if (de4x5_debug & DEBUG_TX) {
1492 printk("%s: transmit busy, lost media or stale skb found:\n STS:%08x\n tbusy:%d\n IMR:%08x\n OMR:%08x\n Stale skb: %s\n",dev->name, inl(DE4X5_STS), netif_queue_stopped(dev), inl(DE4X5_IMR), inl(DE4X5_OMR), ((u_long) lp->tx_skb[lp->tx_new] > 1) ? "YES" : "NO");
1493 }
1494 } else if (skb->len > 0) {
1495
1496 if (!skb_queue_empty(&lp->cache.queue) && !lp->interrupt) {
1497 de4x5_put_cache(dev, skb);
1498 skb = de4x5_get_cache(dev);
1499 }
1500
1501 while (skb && !netif_queue_stopped(dev) &&
1502 (u_long) lp->tx_skb[lp->tx_new] <= 1) {
1503 spin_lock_irqsave(&lp->lock, flags);
1504 netif_stop_queue(dev);
1505 load_packet(dev, skb->data, TD_IC | TD_LS | TD_FS | skb->len, skb);
1506 lp->stats.tx_bytes += skb->len;
1507 outl(POLL_DEMAND, DE4X5_TPD);
1508
1509 lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
1510
1511 if (TX_BUFFS_AVAIL) {
1512 netif_start_queue(dev);
1513 }
1514 skb = de4x5_get_cache(dev);
1515 spin_unlock_irqrestore(&lp->lock, flags);
1516 }
1517 if (skb) de4x5_putb_cache(dev, skb);
1518 }
1519
1520 lp->cache.lock = 0;
1521
1522 return NETDEV_TX_OK;
1523}
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536static irqreturn_t
1537de4x5_interrupt(int irq, void *dev_id)
1538{
1539 struct net_device *dev = dev_id;
1540 struct de4x5_private *lp;
1541 s32 imr, omr, sts, limit;
1542 u_long iobase;
1543 unsigned int handled = 0;
1544
1545 lp = netdev_priv(dev);
1546 spin_lock(&lp->lock);
1547 iobase = dev->base_addr;
1548
1549 DISABLE_IRQs;
1550
1551 if (test_and_set_bit(MASK_INTERRUPTS, (void*) &lp->interrupt))
1552 printk("%s: Re-entering the interrupt handler.\n", dev->name);
1553
1554 synchronize_irq(dev->irq);
1555
1556 for (limit=0; limit<8; limit++) {
1557 sts = inl(DE4X5_STS);
1558 outl(sts, DE4X5_STS);
1559
1560 if (!(sts & lp->irq_mask)) break;
1561 handled = 1;
1562
1563 if (sts & (STS_RI | STS_RU))
1564 de4x5_rx(dev);
1565
1566 if (sts & (STS_TI | STS_TU))
1567 de4x5_tx(dev);
1568
1569 if (sts & STS_LNF) {
1570 lp->irq_mask &= ~IMR_LFM;
1571 }
1572
1573 if (sts & STS_UNF) {
1574 de4x5_txur(dev);
1575 }
1576
1577 if (sts & STS_SE) {
1578 STOP_DE4X5;
1579 printk("%s: Fatal bus error occurred, sts=%#8x, device stopped.\n",
1580 dev->name, sts);
1581 spin_unlock(&lp->lock);
1582 return IRQ_HANDLED;
1583 }
1584 }
1585
1586
1587 if (!test_and_set_bit(0, (void *)&lp->cache.lock)) {
1588 while (!skb_queue_empty(&lp->cache.queue) && !netif_queue_stopped(dev) && lp->tx_enable) {
1589 de4x5_queue_pkt(de4x5_get_cache(dev), dev);
1590 }
1591 lp->cache.lock = 0;
1592 }
1593
1594 lp->interrupt = UNMASK_INTERRUPTS;
1595 ENABLE_IRQs;
1596 spin_unlock(&lp->lock);
1597
1598 return IRQ_RETVAL(handled);
1599}
1600
1601static int
1602de4x5_rx(struct net_device *dev)
1603{
1604 struct de4x5_private *lp = netdev_priv(dev);
1605 u_long iobase = dev->base_addr;
1606 int entry;
1607 s32 status;
1608
1609 for (entry=lp->rx_new; (s32)le32_to_cpu(lp->rx_ring[entry].status)>=0;
1610 entry=lp->rx_new) {
1611 status = (s32)le32_to_cpu(lp->rx_ring[entry].status);
1612
1613 if (lp->rx_ovf) {
1614 if (inl(DE4X5_MFC) & MFC_FOCM) {
1615 de4x5_rx_ovfc(dev);
1616 break;
1617 }
1618 }
1619
1620 if (status & RD_FS) {
1621 lp->rx_old = entry;
1622 }
1623
1624 if (status & RD_LS) {
1625 if (lp->tx_enable) lp->linkOK++;
1626 if (status & RD_ES) {
1627 lp->stats.rx_errors++;
1628 if (status & (RD_RF | RD_TL)) lp->stats.rx_frame_errors++;
1629 if (status & RD_CE) lp->stats.rx_crc_errors++;
1630 if (status & RD_OF) lp->stats.rx_fifo_errors++;
1631 if (status & RD_TL) lp->stats.rx_length_errors++;
1632 if (status & RD_RF) lp->pktStats.rx_runt_frames++;
1633 if (status & RD_CS) lp->pktStats.rx_collision++;
1634 if (status & RD_DB) lp->pktStats.rx_dribble++;
1635 if (status & RD_OF) lp->pktStats.rx_overflow++;
1636 } else {
1637 struct sk_buff *skb;
1638 short pkt_len = (short)(le32_to_cpu(lp->rx_ring[entry].status)
1639 >> 16) - 4;
1640
1641 if ((skb = de4x5_alloc_rx_buff(dev, entry, pkt_len)) == NULL) {
1642 printk("%s: Insufficient memory; nuking packet.\n",
1643 dev->name);
1644 lp->stats.rx_dropped++;
1645 } else {
1646 de4x5_dbg_rx(skb, pkt_len);
1647
1648
1649 skb->protocol=eth_type_trans(skb,dev);
1650 de4x5_local_stats(dev, skb->data, pkt_len);
1651 netif_rx(skb);
1652
1653
1654 lp->stats.rx_packets++;
1655 lp->stats.rx_bytes += pkt_len;
1656 }
1657 }
1658
1659
1660 for (;lp->rx_old!=entry;lp->rx_old=(lp->rx_old + 1)%lp->rxRingSize) {
1661 lp->rx_ring[lp->rx_old].status = cpu_to_le32(R_OWN);
1662 barrier();
1663 }
1664 lp->rx_ring[entry].status = cpu_to_le32(R_OWN);
1665 barrier();
1666 }
1667
1668
1669
1670
1671 lp->rx_new = (lp->rx_new + 1) % lp->rxRingSize;
1672 }
1673
1674 return 0;
1675}
1676
1677static inline void
1678de4x5_free_tx_buff(struct de4x5_private *lp, int entry)
1679{
1680 dma_unmap_single(lp->gendev, le32_to_cpu(lp->tx_ring[entry].buf),
1681 le32_to_cpu(lp->tx_ring[entry].des1) & TD_TBS1,
1682 DMA_TO_DEVICE);
1683 if ((u_long) lp->tx_skb[entry] > 1)
1684 dev_kfree_skb_irq(lp->tx_skb[entry]);
1685 lp->tx_skb[entry] = NULL;
1686}
1687
1688
1689
1690
1691static int
1692de4x5_tx(struct net_device *dev)
1693{
1694 struct de4x5_private *lp = netdev_priv(dev);
1695 u_long iobase = dev->base_addr;
1696 int entry;
1697 s32 status;
1698
1699 for (entry = lp->tx_old; entry != lp->tx_new; entry = lp->tx_old) {
1700 status = (s32)le32_to_cpu(lp->tx_ring[entry].status);
1701 if (status < 0) {
1702 break;
1703 } else if (status != 0x7fffffff) {
1704 if (status & TD_ES) {
1705 lp->stats.tx_errors++;
1706 if (status & TD_NC) lp->stats.tx_carrier_errors++;
1707 if (status & TD_LC) lp->stats.tx_window_errors++;
1708 if (status & TD_UF) lp->stats.tx_fifo_errors++;
1709 if (status & TD_EC) lp->pktStats.excessive_collisions++;
1710 if (status & TD_DE) lp->stats.tx_aborted_errors++;
1711
1712 if (TX_PKT_PENDING) {
1713 outl(POLL_DEMAND, DE4X5_TPD);
1714 }
1715 } else {
1716 lp->stats.tx_packets++;
1717 if (lp->tx_enable) lp->linkOK++;
1718 }
1719
1720 lp->stats.collisions += ((status & TD_EC) ? 16 :
1721 ((status & TD_CC) >> 3));
1722
1723
1724 if (lp->tx_skb[entry] != NULL)
1725 de4x5_free_tx_buff(lp, entry);
1726 }
1727
1728
1729 lp->tx_old = (lp->tx_old + 1) % lp->txRingSize;
1730 }
1731
1732
1733 if (TX_BUFFS_AVAIL && netif_queue_stopped(dev)) {
1734 if (lp->interrupt)
1735 netif_wake_queue(dev);
1736 else
1737 netif_start_queue(dev);
1738 }
1739
1740 return 0;
1741}
1742
1743static void
1744de4x5_ast(struct net_device *dev)
1745{
1746 struct de4x5_private *lp = netdev_priv(dev);
1747 int next_tick = DE4X5_AUTOSENSE_MS;
1748 int dt;
1749
1750 if (lp->useSROM)
1751 next_tick = srom_autoconf(dev);
1752 else if (lp->chipset == DC21140)
1753 next_tick = dc21140m_autoconf(dev);
1754 else if (lp->chipset == DC21041)
1755 next_tick = dc21041_autoconf(dev);
1756 else if (lp->chipset == DC21040)
1757 next_tick = dc21040_autoconf(dev);
1758 lp->linkOK = 0;
1759
1760 dt = (next_tick * HZ) / 1000;
1761
1762 if (!dt)
1763 dt = 1;
1764
1765 mod_timer(&lp->timer, jiffies + dt);
1766}
1767
1768static int
1769de4x5_txur(struct net_device *dev)
1770{
1771 struct de4x5_private *lp = netdev_priv(dev);
1772 u_long iobase = dev->base_addr;
1773 int omr;
1774
1775 omr = inl(DE4X5_OMR);
1776 if (!(omr & OMR_SF) || (lp->chipset==DC21041) || (lp->chipset==DC21040)) {
1777 omr &= ~(OMR_ST|OMR_SR);
1778 outl(omr, DE4X5_OMR);
1779 while (inl(DE4X5_STS) & STS_TS);
1780 if ((omr & OMR_TR) < OMR_TR) {
1781 omr += 0x4000;
1782 } else {
1783 omr |= OMR_SF;
1784 }
1785 outl(omr | OMR_ST | OMR_SR, DE4X5_OMR);
1786 }
1787
1788 return 0;
1789}
1790
1791static int
1792de4x5_rx_ovfc(struct net_device *dev)
1793{
1794 struct de4x5_private *lp = netdev_priv(dev);
1795 u_long iobase = dev->base_addr;
1796 int omr;
1797
1798 omr = inl(DE4X5_OMR);
1799 outl(omr & ~OMR_SR, DE4X5_OMR);
1800 while (inl(DE4X5_STS) & STS_RS);
1801
1802 for (; (s32)le32_to_cpu(lp->rx_ring[lp->rx_new].status)>=0;) {
1803 lp->rx_ring[lp->rx_new].status = cpu_to_le32(R_OWN);
1804 lp->rx_new = (lp->rx_new + 1) % lp->rxRingSize;
1805 }
1806
1807 outl(omr, DE4X5_OMR);
1808
1809 return 0;
1810}
1811
1812static int
1813de4x5_close(struct net_device *dev)
1814{
1815 struct de4x5_private *lp = netdev_priv(dev);
1816 u_long iobase = dev->base_addr;
1817 s32 imr, omr;
1818
1819 disable_ast(dev);
1820
1821 netif_stop_queue(dev);
1822
1823 if (de4x5_debug & DEBUG_CLOSE) {
1824 printk("%s: Shutting down ethercard, status was %8.8x.\n",
1825 dev->name, inl(DE4X5_STS));
1826 }
1827
1828
1829
1830
1831 DISABLE_IRQs;
1832 STOP_DE4X5;
1833
1834
1835 free_irq(dev->irq, dev);
1836 lp->state = CLOSED;
1837
1838
1839 de4x5_free_rx_buffs(dev);
1840 de4x5_free_tx_buffs(dev);
1841
1842
1843 yawn(dev, SLEEP);
1844
1845 return 0;
1846}
1847
1848static struct net_device_stats *
1849de4x5_get_stats(struct net_device *dev)
1850{
1851 struct de4x5_private *lp = netdev_priv(dev);
1852 u_long iobase = dev->base_addr;
1853
1854 lp->stats.rx_missed_errors = (int)(inl(DE4X5_MFC) & (MFC_OVFL | MFC_CNTR));
1855
1856 return &lp->stats;
1857}
1858
1859static void
1860de4x5_local_stats(struct net_device *dev, char *buf, int pkt_len)
1861{
1862 struct de4x5_private *lp = netdev_priv(dev);
1863 int i;
1864
1865 for (i=1; i<DE4X5_PKT_STAT_SZ-1; i++) {
1866 if (pkt_len < (i*DE4X5_PKT_BIN_SZ)) {
1867 lp->pktStats.bins[i]++;
1868 i = DE4X5_PKT_STAT_SZ;
1869 }
1870 }
1871 if (is_multicast_ether_addr(buf)) {
1872 if (is_broadcast_ether_addr(buf)) {
1873 lp->pktStats.broadcast++;
1874 } else {
1875 lp->pktStats.multicast++;
1876 }
1877 } else if (ether_addr_equal(buf, dev->dev_addr)) {
1878 lp->pktStats.unicast++;
1879 }
1880
1881 lp->pktStats.bins[0]++;
1882 if (lp->pktStats.bins[0] == 0) {
1883 memset((char *)&lp->pktStats, 0, sizeof(lp->pktStats));
1884 }
1885}
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895static void
1896load_packet(struct net_device *dev, char *buf, u32 flags, struct sk_buff *skb)
1897{
1898 struct de4x5_private *lp = netdev_priv(dev);
1899 int entry = (lp->tx_new ? lp->tx_new-1 : lp->txRingSize-1);
1900 dma_addr_t buf_dma = dma_map_single(lp->gendev, buf, flags & TD_TBS1, DMA_TO_DEVICE);
1901
1902 lp->tx_ring[lp->tx_new].buf = cpu_to_le32(buf_dma);
1903 lp->tx_ring[lp->tx_new].des1 &= cpu_to_le32(TD_TER);
1904 lp->tx_ring[lp->tx_new].des1 |= cpu_to_le32(flags);
1905 lp->tx_skb[lp->tx_new] = skb;
1906 lp->tx_ring[entry].des1 &= cpu_to_le32(~TD_IC);
1907 barrier();
1908
1909 lp->tx_ring[lp->tx_new].status = cpu_to_le32(T_OWN);
1910 barrier();
1911}
1912
1913
1914
1915
1916static void
1917set_multicast_list(struct net_device *dev)
1918{
1919 struct de4x5_private *lp = netdev_priv(dev);
1920 u_long iobase = dev->base_addr;
1921
1922
1923 if (lp->state == OPEN) {
1924 if (dev->flags & IFF_PROMISC) {
1925 u32 omr;
1926 omr = inl(DE4X5_OMR);
1927 omr |= OMR_PR;
1928 outl(omr, DE4X5_OMR);
1929 } else {
1930 SetMulticastFilter(dev);
1931 load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET |
1932 SETUP_FRAME_LEN, (struct sk_buff *)1);
1933
1934 lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
1935 outl(POLL_DEMAND, DE4X5_TPD);
1936 dev->trans_start = jiffies;
1937 }
1938 }
1939}
1940
1941
1942
1943
1944
1945
1946static void
1947SetMulticastFilter(struct net_device *dev)
1948{
1949 struct de4x5_private *lp = netdev_priv(dev);
1950 struct netdev_hw_addr *ha;
1951 u_long iobase = dev->base_addr;
1952 int i, bit, byte;
1953 u16 hashcode;
1954 u32 omr, crc;
1955 char *pa;
1956 unsigned char *addrs;
1957
1958 omr = inl(DE4X5_OMR);
1959 omr &= ~(OMR_PR | OMR_PM);
1960 pa = build_setup_frame(dev, ALL);
1961
1962 if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 14)) {
1963 omr |= OMR_PM;
1964 } else if (lp->setup_f == HASH_PERF) {
1965 netdev_for_each_mc_addr(ha, dev) {
1966 crc = ether_crc_le(ETH_ALEN, ha->addr);
1967 hashcode = crc & HASH_BITS;
1968
1969 byte = hashcode >> 3;
1970 bit = 1 << (hashcode & 0x07);
1971
1972 byte <<= 1;
1973 if (byte & 0x02) {
1974 byte -= 1;
1975 }
1976 lp->setup_frame[byte] |= bit;
1977 }
1978 } else {
1979 netdev_for_each_mc_addr(ha, dev) {
1980 addrs = ha->addr;
1981 for (i=0; i<ETH_ALEN; i++) {
1982 *(pa + (i&1)) = *addrs++;
1983 if (i & 0x01) pa += 4;
1984 }
1985 }
1986 }
1987 outl(omr, DE4X5_OMR);
1988}
1989
1990#ifdef CONFIG_EISA
1991
1992static u_char de4x5_irq[] = EISA_ALLOWED_IRQ_LIST;
1993
1994static int __init de4x5_eisa_probe (struct device *gendev)
1995{
1996 struct eisa_device *edev;
1997 u_long iobase;
1998 u_char irq, regval;
1999 u_short vendor;
2000 u32 cfid;
2001 int status, device;
2002 struct net_device *dev;
2003 struct de4x5_private *lp;
2004
2005 edev = to_eisa_device (gendev);
2006 iobase = edev->base_addr;
2007
2008 if (!request_region (iobase, DE4X5_EISA_TOTAL_SIZE, "de4x5"))
2009 return -EBUSY;
2010
2011 if (!request_region (iobase + DE4X5_EISA_IO_PORTS,
2012 DE4X5_EISA_TOTAL_SIZE, "de4x5")) {
2013 status = -EBUSY;
2014 goto release_reg_1;
2015 }
2016
2017 if (!(dev = alloc_etherdev (sizeof (struct de4x5_private)))) {
2018 status = -ENOMEM;
2019 goto release_reg_2;
2020 }
2021 lp = netdev_priv(dev);
2022
2023 cfid = (u32) inl(PCI_CFID);
2024 lp->cfrv = (u_short) inl(PCI_CFRV);
2025 device = (cfid >> 8) & 0x00ffff00;
2026 vendor = (u_short) cfid;
2027
2028
2029 regval = inb(EISA_REG0) & (ER0_INTL | ER0_INTT);
2030#ifdef CONFIG_ALPHA
2031
2032
2033
2034
2035
2036
2037 outb (ER1_IAM | 1, EISA_REG1);
2038 mdelay (1);
2039
2040
2041 outb (ER1_IAM, EISA_REG1);
2042 mdelay (1);
2043
2044
2045 outb (ER3_BWE | ER3_BRE, EISA_REG3);
2046
2047
2048 outb (ER0_BSW | ER0_BMW | ER0_EPT | regval, EISA_REG0);
2049#endif
2050 irq = de4x5_irq[(regval >> 1) & 0x03];
2051
2052 if (is_DC2114x) {
2053 device = ((lp->cfrv & CFRV_RN) < DC2114x_BRK ? DC21142 : DC21143);
2054 }
2055 lp->chipset = device;
2056 lp->bus = EISA;
2057
2058
2059 outl(PCI_COMMAND_IO | PCI_COMMAND_MASTER, PCI_CFCS);
2060 outl(0x00006000, PCI_CFLT);
2061 outl(iobase, PCI_CBIO);
2062
2063 DevicePresent(dev, EISA_APROM);
2064
2065 dev->irq = irq;
2066
2067 if (!(status = de4x5_hw_init (dev, iobase, gendev))) {
2068 return 0;
2069 }
2070
2071 free_netdev (dev);
2072 release_reg_2:
2073 release_region (iobase + DE4X5_EISA_IO_PORTS, DE4X5_EISA_TOTAL_SIZE);
2074 release_reg_1:
2075 release_region (iobase, DE4X5_EISA_TOTAL_SIZE);
2076
2077 return status;
2078}
2079
2080static int de4x5_eisa_remove(struct device *device)
2081{
2082 struct net_device *dev;
2083 u_long iobase;
2084
2085 dev = dev_get_drvdata(device);
2086 iobase = dev->base_addr;
2087
2088 unregister_netdev (dev);
2089 free_netdev (dev);
2090 release_region (iobase + DE4X5_EISA_IO_PORTS, DE4X5_EISA_TOTAL_SIZE);
2091 release_region (iobase, DE4X5_EISA_TOTAL_SIZE);
2092
2093 return 0;
2094}
2095
2096static struct eisa_device_id de4x5_eisa_ids[] = {
2097 { "DEC4250", 0 },
2098 { "" }
2099};
2100MODULE_DEVICE_TABLE(eisa, de4x5_eisa_ids);
2101
2102static struct eisa_driver de4x5_eisa_driver = {
2103 .id_table = de4x5_eisa_ids,
2104 .driver = {
2105 .name = "de4x5",
2106 .probe = de4x5_eisa_probe,
2107 .remove = de4x5_eisa_remove,
2108 }
2109};
2110MODULE_DEVICE_TABLE(eisa, de4x5_eisa_ids);
2111#endif
2112
2113#ifdef CONFIG_PCI
2114
2115
2116
2117
2118
2119
2120
2121static void
2122srom_search(struct net_device *dev, struct pci_dev *pdev)
2123{
2124 u_char pb;
2125 u_short vendor, status;
2126 u_int irq = 0, device;
2127 u_long iobase = 0;
2128 int i, j;
2129 struct de4x5_private *lp = netdev_priv(dev);
2130 struct pci_dev *this_dev;
2131
2132 list_for_each_entry(this_dev, &pdev->bus->devices, bus_list) {
2133 vendor = this_dev->vendor;
2134 device = this_dev->device << 8;
2135 if (!(is_DC21040 || is_DC21041 || is_DC21140 || is_DC2114x)) continue;
2136
2137
2138 pb = this_dev->bus->number;
2139
2140
2141 lp->device = PCI_SLOT(this_dev->devfn);
2142 lp->bus_num = pb;
2143
2144
2145 if (is_DC2114x) {
2146 device = ((this_dev->revision & CFRV_RN) < DC2114x_BRK
2147 ? DC21142 : DC21143);
2148 }
2149 lp->chipset = device;
2150
2151
2152 iobase = pci_resource_start(this_dev, 0);
2153
2154
2155 irq = this_dev->irq;
2156 if ((irq == 0) || (irq == 0xff) || ((int)irq == -1)) continue;
2157
2158
2159 pci_read_config_word(this_dev, PCI_COMMAND, &status);
2160 if (!(status & PCI_COMMAND_IO)) continue;
2161
2162
2163 DevicePresent(dev, DE4X5_APROM);
2164 for (j=0, i=0; i<ETH_ALEN; i++) {
2165 j += (u_char) *((u_char *)&lp->srom + SROM_HWADD + i);
2166 }
2167 if (j != 0 && j != 6 * 0xff) {
2168 last.chipset = device;
2169 last.bus = pb;
2170 last.irq = irq;
2171 for (i=0; i<ETH_ALEN; i++) {
2172 last.addr[i] = (u_char)*((u_char *)&lp->srom + SROM_HWADD + i);
2173 }
2174 return;
2175 }
2176 }
2177}
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195static int de4x5_pci_probe(struct pci_dev *pdev,
2196 const struct pci_device_id *ent)
2197{
2198 u_char pb, pbus = 0, dev_num, dnum = 0, timer;
2199 u_short vendor, status;
2200 u_int irq = 0, device;
2201 u_long iobase = 0;
2202 int error;
2203 struct net_device *dev;
2204 struct de4x5_private *lp;
2205
2206 dev_num = PCI_SLOT(pdev->devfn);
2207 pb = pdev->bus->number;
2208
2209 if (io) {
2210 pbus = (u_short)(io >> 8);
2211 dnum = (u_short)(io & 0xff);
2212 if ((pbus != pb) || (dnum != dev_num))
2213 return -ENODEV;
2214 }
2215
2216 vendor = pdev->vendor;
2217 device = pdev->device << 8;
2218 if (!(is_DC21040 || is_DC21041 || is_DC21140 || is_DC2114x))
2219 return -ENODEV;
2220
2221
2222 if ((error = pci_enable_device (pdev)))
2223 return error;
2224
2225 if (!(dev = alloc_etherdev (sizeof (struct de4x5_private)))) {
2226 error = -ENOMEM;
2227 goto disable_dev;
2228 }
2229
2230 lp = netdev_priv(dev);
2231 lp->bus = PCI;
2232 lp->bus_num = 0;
2233
2234
2235 if (lp->bus_num != pb) {
2236 lp->bus_num = pb;
2237 srom_search(dev, pdev);
2238 }
2239
2240
2241 lp->cfrv = pdev->revision;
2242
2243
2244 lp->device = dev_num;
2245 lp->bus_num = pb;
2246
2247
2248 if (is_DC2114x) {
2249 device = ((lp->cfrv & CFRV_RN) < DC2114x_BRK ? DC21142 : DC21143);
2250 }
2251 lp->chipset = device;
2252
2253
2254 iobase = pci_resource_start(pdev, 0);
2255
2256
2257 irq = pdev->irq;
2258 if ((irq == 0) || (irq == 0xff) || ((int)irq == -1)) {
2259 error = -ENODEV;
2260 goto free_dev;
2261 }
2262
2263
2264 pci_read_config_word(pdev, PCI_COMMAND, &status);
2265#ifdef __powerpc__
2266 if (!(status & PCI_COMMAND_IO)) {
2267 status |= PCI_COMMAND_IO;
2268 pci_write_config_word(pdev, PCI_COMMAND, status);
2269 pci_read_config_word(pdev, PCI_COMMAND, &status);
2270 }
2271#endif
2272 if (!(status & PCI_COMMAND_IO)) {
2273 error = -ENODEV;
2274 goto free_dev;
2275 }
2276
2277 if (!(status & PCI_COMMAND_MASTER)) {
2278 status |= PCI_COMMAND_MASTER;
2279 pci_write_config_word(pdev, PCI_COMMAND, status);
2280 pci_read_config_word(pdev, PCI_COMMAND, &status);
2281 }
2282 if (!(status & PCI_COMMAND_MASTER)) {
2283 error = -ENODEV;
2284 goto free_dev;
2285 }
2286
2287
2288 pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &timer);
2289 if (timer < 0x60) {
2290 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x60);
2291 }
2292
2293 DevicePresent(dev, DE4X5_APROM);
2294
2295 if (!request_region (iobase, DE4X5_PCI_TOTAL_SIZE, "de4x5")) {
2296 error = -EBUSY;
2297 goto free_dev;
2298 }
2299
2300 dev->irq = irq;
2301
2302 if ((error = de4x5_hw_init(dev, iobase, &pdev->dev))) {
2303 goto release;
2304 }
2305
2306 return 0;
2307
2308 release:
2309 release_region (iobase, DE4X5_PCI_TOTAL_SIZE);
2310 free_dev:
2311 free_netdev (dev);
2312 disable_dev:
2313 pci_disable_device (pdev);
2314 return error;
2315}
2316
2317static void de4x5_pci_remove(struct pci_dev *pdev)
2318{
2319 struct net_device *dev;
2320 u_long iobase;
2321
2322 dev = pci_get_drvdata(pdev);
2323 iobase = dev->base_addr;
2324
2325 unregister_netdev (dev);
2326 free_netdev (dev);
2327 release_region (iobase, DE4X5_PCI_TOTAL_SIZE);
2328 pci_disable_device (pdev);
2329}
2330
2331static const struct pci_device_id de4x5_pci_tbl[] = {
2332 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP,
2333 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
2334 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_PLUS,
2335 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
2336 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_FAST,
2337 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
2338 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21142,
2339 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 },
2340 { },
2341};
2342
2343static struct pci_driver de4x5_pci_driver = {
2344 .name = "de4x5",
2345 .id_table = de4x5_pci_tbl,
2346 .probe = de4x5_pci_probe,
2347 .remove = de4x5_pci_remove,
2348};
2349
2350#endif
2351
2352
2353
2354
2355
2356
2357
2358
2359static int
2360autoconf_media(struct net_device *dev)
2361{
2362 struct de4x5_private *lp = netdev_priv(dev);
2363 u_long iobase = dev->base_addr;
2364
2365 disable_ast(dev);
2366
2367 lp->c_media = AUTO;
2368 inl(DE4X5_MFC);
2369 lp->media = INIT;
2370 lp->tcount = 0;
2371
2372 de4x5_ast(dev);
2373
2374 return lp->media;
2375}
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389static int
2390dc21040_autoconf(struct net_device *dev)
2391{
2392 struct de4x5_private *lp = netdev_priv(dev);
2393 u_long iobase = dev->base_addr;
2394 int next_tick = DE4X5_AUTOSENSE_MS;
2395 s32 imr;
2396
2397 switch (lp->media) {
2398 case INIT:
2399 DISABLE_IRQs;
2400 lp->tx_enable = false;
2401 lp->timeout = -1;
2402 de4x5_save_skbs(dev);
2403 if ((lp->autosense == AUTO) || (lp->autosense == TP)) {
2404 lp->media = TP;
2405 } else if ((lp->autosense == BNC) || (lp->autosense == AUI) || (lp->autosense == BNC_AUI)) {
2406 lp->media = BNC_AUI;
2407 } else if (lp->autosense == EXT_SIA) {
2408 lp->media = EXT_SIA;
2409 } else {
2410 lp->media = NC;
2411 }
2412 lp->local_state = 0;
2413 next_tick = dc21040_autoconf(dev);
2414 break;
2415
2416 case TP:
2417 next_tick = dc21040_state(dev, 0x8f01, 0xffff, 0x0000, 3000, BNC_AUI,
2418 TP_SUSPECT, test_tp);
2419 break;
2420
2421 case TP_SUSPECT:
2422 next_tick = de4x5_suspect_state(dev, 1000, TP, test_tp, dc21040_autoconf);
2423 break;
2424
2425 case BNC:
2426 case AUI:
2427 case BNC_AUI:
2428 next_tick = dc21040_state(dev, 0x8f09, 0x0705, 0x0006, 3000, EXT_SIA,
2429 BNC_AUI_SUSPECT, ping_media);
2430 break;
2431
2432 case BNC_AUI_SUSPECT:
2433 next_tick = de4x5_suspect_state(dev, 1000, BNC_AUI, ping_media, dc21040_autoconf);
2434 break;
2435
2436 case EXT_SIA:
2437 next_tick = dc21040_state(dev, 0x3041, 0x0000, 0x0006, 3000,
2438 NC, EXT_SIA_SUSPECT, ping_media);
2439 break;
2440
2441 case EXT_SIA_SUSPECT:
2442 next_tick = de4x5_suspect_state(dev, 1000, EXT_SIA, ping_media, dc21040_autoconf);
2443 break;
2444
2445 case NC:
2446
2447 reset_init_sia(dev, 0x8f01, 0xffff, 0x0000);
2448 if (lp->media != lp->c_media) {
2449 de4x5_dbg_media(dev);
2450 lp->c_media = lp->media;
2451 }
2452 lp->media = INIT;
2453 lp->tx_enable = false;
2454 break;
2455 }
2456
2457 return next_tick;
2458}
2459
2460static int
2461dc21040_state(struct net_device *dev, int csr13, int csr14, int csr15, int timeout,
2462 int next_state, int suspect_state,
2463 int (*fn)(struct net_device *, int))
2464{
2465 struct de4x5_private *lp = netdev_priv(dev);
2466 int next_tick = DE4X5_AUTOSENSE_MS;
2467 int linkBad;
2468
2469 switch (lp->local_state) {
2470 case 0:
2471 reset_init_sia(dev, csr13, csr14, csr15);
2472 lp->local_state++;
2473 next_tick = 500;
2474 break;
2475
2476 case 1:
2477 if (!lp->tx_enable) {
2478 linkBad = fn(dev, timeout);
2479 if (linkBad < 0) {
2480 next_tick = linkBad & ~TIMER_CB;
2481 } else {
2482 if (linkBad && (lp->autosense == AUTO)) {
2483 lp->local_state = 0;
2484 lp->media = next_state;
2485 } else {
2486 de4x5_init_connection(dev);
2487 }
2488 }
2489 } else if (!lp->linkOK && (lp->autosense == AUTO)) {
2490 lp->media = suspect_state;
2491 next_tick = 3000;
2492 }
2493 break;
2494 }
2495
2496 return next_tick;
2497}
2498
2499static int
2500de4x5_suspect_state(struct net_device *dev, int timeout, int prev_state,
2501 int (*fn)(struct net_device *, int),
2502 int (*asfn)(struct net_device *))
2503{
2504 struct de4x5_private *lp = netdev_priv(dev);
2505 int next_tick = DE4X5_AUTOSENSE_MS;
2506 int linkBad;
2507
2508 switch (lp->local_state) {
2509 case 1:
2510 if (lp->linkOK) {
2511 lp->media = prev_state;
2512 } else {
2513 lp->local_state++;
2514 next_tick = asfn(dev);
2515 }
2516 break;
2517
2518 case 2:
2519 linkBad = fn(dev, timeout);
2520 if (linkBad < 0) {
2521 next_tick = linkBad & ~TIMER_CB;
2522 } else if (!linkBad) {
2523 lp->local_state--;
2524 lp->media = prev_state;
2525 } else {
2526 lp->media = INIT;
2527 lp->tcount++;
2528 }
2529 }
2530
2531 return next_tick;
2532}
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543static int
2544dc21041_autoconf(struct net_device *dev)
2545{
2546 struct de4x5_private *lp = netdev_priv(dev);
2547 u_long iobase = dev->base_addr;
2548 s32 sts, irqs, irq_mask, imr, omr;
2549 int next_tick = DE4X5_AUTOSENSE_MS;
2550
2551 switch (lp->media) {
2552 case INIT:
2553 DISABLE_IRQs;
2554 lp->tx_enable = false;
2555 lp->timeout = -1;
2556 de4x5_save_skbs(dev);
2557 if ((lp->autosense == AUTO) || (lp->autosense == TP_NW)) {
2558 lp->media = TP;
2559 } else if (lp->autosense == TP) {
2560 lp->media = TP;
2561 } else if (lp->autosense == BNC) {
2562 lp->media = BNC;
2563 } else if (lp->autosense == AUI) {
2564 lp->media = AUI;
2565 } else {
2566 lp->media = NC;
2567 }
2568 lp->local_state = 0;
2569 next_tick = dc21041_autoconf(dev);
2570 break;
2571
2572 case TP_NW:
2573 if (lp->timeout < 0) {
2574 omr = inl(DE4X5_OMR);
2575 outl(omr | OMR_FDX, DE4X5_OMR);
2576 }
2577 irqs = STS_LNF | STS_LNP;
2578 irq_mask = IMR_LFM | IMR_LPM;
2579 sts = test_media(dev, irqs, irq_mask, 0xef01, 0xffff, 0x0008, 2400);
2580 if (sts < 0) {
2581 next_tick = sts & ~TIMER_CB;
2582 } else {
2583 if (sts & STS_LNP) {
2584 lp->media = ANS;
2585 } else {
2586 lp->media = AUI;
2587 }
2588 next_tick = dc21041_autoconf(dev);
2589 }
2590 break;
2591
2592 case ANS:
2593 if (!lp->tx_enable) {
2594 irqs = STS_LNP;
2595 irq_mask = IMR_LPM;
2596 sts = test_ans(dev, irqs, irq_mask, 3000);
2597 if (sts < 0) {
2598 next_tick = sts & ~TIMER_CB;
2599 } else {
2600 if (!(sts & STS_LNP) && (lp->autosense == AUTO)) {
2601 lp->media = TP;
2602 next_tick = dc21041_autoconf(dev);
2603 } else {
2604 lp->local_state = 1;
2605 de4x5_init_connection(dev);
2606 }
2607 }
2608 } else if (!lp->linkOK && (lp->autosense == AUTO)) {
2609 lp->media = ANS_SUSPECT;
2610 next_tick = 3000;
2611 }
2612 break;
2613
2614 case ANS_SUSPECT:
2615 next_tick = de4x5_suspect_state(dev, 1000, ANS, test_tp, dc21041_autoconf);
2616 break;
2617
2618 case TP:
2619 if (!lp->tx_enable) {
2620 if (lp->timeout < 0) {
2621 omr = inl(DE4X5_OMR);
2622 outl(omr & ~OMR_FDX, DE4X5_OMR);
2623 }
2624 irqs = STS_LNF | STS_LNP;
2625 irq_mask = IMR_LFM | IMR_LPM;
2626 sts = test_media(dev,irqs, irq_mask, 0xef01, 0xff3f, 0x0008, 2400);
2627 if (sts < 0) {
2628 next_tick = sts & ~TIMER_CB;
2629 } else {
2630 if (!(sts & STS_LNP) && (lp->autosense == AUTO)) {
2631 if (inl(DE4X5_SISR) & SISR_NRA) {
2632 lp->media = AUI;
2633 } else {
2634 lp->media = BNC;
2635 }
2636 next_tick = dc21041_autoconf(dev);
2637 } else {
2638 lp->local_state = 1;
2639 de4x5_init_connection(dev);
2640 }
2641 }
2642 } else if (!lp->linkOK && (lp->autosense == AUTO)) {
2643 lp->media = TP_SUSPECT;
2644 next_tick = 3000;
2645 }
2646 break;
2647
2648 case TP_SUSPECT:
2649 next_tick = de4x5_suspect_state(dev, 1000, TP, test_tp, dc21041_autoconf);
2650 break;
2651
2652 case AUI:
2653 if (!lp->tx_enable) {
2654 if (lp->timeout < 0) {
2655 omr = inl(DE4X5_OMR);
2656 outl(omr & ~OMR_FDX, DE4X5_OMR);
2657 }
2658 irqs = 0;
2659 irq_mask = 0;
2660 sts = test_media(dev,irqs, irq_mask, 0xef09, 0xf73d, 0x000e, 1000);
2661 if (sts < 0) {
2662 next_tick = sts & ~TIMER_CB;
2663 } else {
2664 if (!(inl(DE4X5_SISR) & SISR_SRA) && (lp->autosense == AUTO)) {
2665 lp->media = BNC;
2666 next_tick = dc21041_autoconf(dev);
2667 } else {
2668 lp->local_state = 1;
2669 de4x5_init_connection(dev);
2670 }
2671 }
2672 } else if (!lp->linkOK && (lp->autosense == AUTO)) {
2673 lp->media = AUI_SUSPECT;
2674 next_tick = 3000;
2675 }
2676 break;
2677
2678 case AUI_SUSPECT:
2679 next_tick = de4x5_suspect_state(dev, 1000, AUI, ping_media, dc21041_autoconf);
2680 break;
2681
2682 case BNC:
2683 switch (lp->local_state) {
2684 case 0:
2685 if (lp->timeout < 0) {
2686 omr = inl(DE4X5_OMR);
2687 outl(omr & ~OMR_FDX, DE4X5_OMR);
2688 }
2689 irqs = 0;
2690 irq_mask = 0;
2691 sts = test_media(dev,irqs, irq_mask, 0xef09, 0xf73d, 0x0006, 1000);
2692 if (sts < 0) {
2693 next_tick = sts & ~TIMER_CB;
2694 } else {
2695 lp->local_state++;
2696 next_tick = dc21041_autoconf(dev);
2697 }
2698 break;
2699
2700 case 1:
2701 if (!lp->tx_enable) {
2702 if ((sts = ping_media(dev, 3000)) < 0) {
2703 next_tick = sts & ~TIMER_CB;
2704 } else {
2705 if (sts) {
2706 lp->local_state = 0;
2707 lp->media = NC;
2708 } else {
2709 de4x5_init_connection(dev);
2710 }
2711 }
2712 } else if (!lp->linkOK && (lp->autosense == AUTO)) {
2713 lp->media = BNC_SUSPECT;
2714 next_tick = 3000;
2715 }
2716 break;
2717 }
2718 break;
2719
2720 case BNC_SUSPECT:
2721 next_tick = de4x5_suspect_state(dev, 1000, BNC, ping_media, dc21041_autoconf);
2722 break;
2723
2724 case NC:
2725 omr = inl(DE4X5_OMR);
2726 outl(omr | OMR_FDX, DE4X5_OMR);
2727 reset_init_sia(dev, 0xef01, 0xffff, 0x0008);
2728 if (lp->media != lp->c_media) {
2729 de4x5_dbg_media(dev);
2730 lp->c_media = lp->media;
2731 }
2732 lp->media = INIT;
2733 lp->tx_enable = false;
2734 break;
2735 }
2736
2737 return next_tick;
2738}
2739
2740
2741
2742
2743
2744
2745static int
2746dc21140m_autoconf(struct net_device *dev)
2747{
2748 struct de4x5_private *lp = netdev_priv(dev);
2749 int ana, anlpa, cap, cr, slnk, sr;
2750 int next_tick = DE4X5_AUTOSENSE_MS;
2751 u_long imr, omr, iobase = dev->base_addr;
2752
2753 switch(lp->media) {
2754 case INIT:
2755 if (lp->timeout < 0) {
2756 DISABLE_IRQs;
2757 lp->tx_enable = false;
2758 lp->linkOK = 0;
2759 de4x5_save_skbs(dev);
2760 }
2761 if ((next_tick = de4x5_reset_phy(dev)) < 0) {
2762 next_tick &= ~TIMER_CB;
2763 } else {
2764 if (lp->useSROM) {
2765 if (srom_map_media(dev) < 0) {
2766 lp->tcount++;
2767 return next_tick;
2768 }
2769 srom_exec(dev, lp->phy[lp->active].gep);
2770 if (lp->infoblock_media == ANS) {
2771 ana = lp->phy[lp->active].ana | MII_ANA_CSMA;
2772 mii_wr(ana, MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
2773 }
2774 } else {
2775 lp->tmp = MII_SR_ASSC;
2776 SET_10Mb;
2777 if (lp->autosense == _100Mb) {
2778 lp->media = _100Mb;
2779 } else if (lp->autosense == _10Mb) {
2780 lp->media = _10Mb;
2781 } else if ((lp->autosense == AUTO) &&
2782 ((sr=is_anc_capable(dev)) & MII_SR_ANC)) {
2783 ana = (((sr >> 6) & MII_ANA_TAF) | MII_ANA_CSMA);
2784 ana &= (lp->fdx ? ~0 : ~MII_ANA_FDAM);
2785 mii_wr(ana, MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
2786 lp->media = ANS;
2787 } else if (lp->autosense == AUTO) {
2788 lp->media = SPD_DET;
2789 } else if (is_spd_100(dev) && is_100_up(dev)) {
2790 lp->media = _100Mb;
2791 } else {
2792 lp->media = NC;
2793 }
2794 }
2795 lp->local_state = 0;
2796 next_tick = dc21140m_autoconf(dev);
2797 }
2798 break;
2799
2800 case ANS:
2801 switch (lp->local_state) {
2802 case 0:
2803 if (lp->timeout < 0) {
2804 mii_wr(MII_CR_ASSE | MII_CR_RAN, MII_CR, lp->phy[lp->active].addr, DE4X5_MII);
2805 }
2806 cr = test_mii_reg(dev, MII_CR, MII_CR_RAN, false, 500);
2807 if (cr < 0) {
2808 next_tick = cr & ~TIMER_CB;
2809 } else {
2810 if (cr) {
2811 lp->local_state = 0;
2812 lp->media = SPD_DET;
2813 } else {
2814 lp->local_state++;
2815 }
2816 next_tick = dc21140m_autoconf(dev);
2817 }
2818 break;
2819
2820 case 1:
2821 if ((sr=test_mii_reg(dev, MII_SR, MII_SR_ASSC, true, 2000)) < 0) {
2822 next_tick = sr & ~TIMER_CB;
2823 } else {
2824 lp->media = SPD_DET;
2825 lp->local_state = 0;
2826 if (sr) {
2827 lp->tmp = MII_SR_ASSC;
2828 anlpa = mii_rd(MII_ANLPA, lp->phy[lp->active].addr, DE4X5_MII);
2829 ana = mii_rd(MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
2830 if (!(anlpa & MII_ANLPA_RF) &&
2831 (cap = anlpa & MII_ANLPA_TAF & ana)) {
2832 if (cap & MII_ANA_100M) {
2833 lp->fdx = (ana & anlpa & MII_ANA_FDAM & MII_ANA_100M) != 0;
2834 lp->media = _100Mb;
2835 } else if (cap & MII_ANA_10M) {
2836 lp->fdx = (ana & anlpa & MII_ANA_FDAM & MII_ANA_10M) != 0;
2837
2838 lp->media = _10Mb;
2839 }
2840 }
2841 }
2842 next_tick = dc21140m_autoconf(dev);
2843 }
2844 break;
2845 }
2846 break;
2847
2848 case SPD_DET:
2849 if (lp->timeout < 0) {
2850 lp->tmp = (lp->phy[lp->active].id ? MII_SR_LKS :
2851 (~gep_rd(dev) & GEP_LNP));
2852 SET_100Mb_PDET;
2853 }
2854 if ((slnk = test_for_100Mb(dev, 6500)) < 0) {
2855 next_tick = slnk & ~TIMER_CB;
2856 } else {
2857 if (is_spd_100(dev) && is_100_up(dev)) {
2858 lp->media = _100Mb;
2859 } else if ((!is_spd_100(dev) && (is_10_up(dev) & lp->tmp))) {
2860 lp->media = _10Mb;
2861 } else {
2862 lp->media = NC;
2863 }
2864 next_tick = dc21140m_autoconf(dev);
2865 }
2866 break;
2867
2868 case _100Mb:
2869 next_tick = 3000;
2870 if (!lp->tx_enable) {
2871 SET_100Mb;
2872 de4x5_init_connection(dev);
2873 } else {
2874 if (!lp->linkOK && (lp->autosense == AUTO)) {
2875 if (!is_100_up(dev) || (!lp->useSROM && !is_spd_100(dev))) {
2876 lp->media = INIT;
2877 lp->tcount++;
2878 next_tick = DE4X5_AUTOSENSE_MS;
2879 }
2880 }
2881 }
2882 break;
2883
2884 case BNC:
2885 case AUI:
2886 case _10Mb:
2887 next_tick = 3000;
2888 if (!lp->tx_enable) {
2889 SET_10Mb;
2890 de4x5_init_connection(dev);
2891 } else {
2892 if (!lp->linkOK && (lp->autosense == AUTO)) {
2893 if (!is_10_up(dev) || (!lp->useSROM && is_spd_100(dev))) {
2894 lp->media = INIT;
2895 lp->tcount++;
2896 next_tick = DE4X5_AUTOSENSE_MS;
2897 }
2898 }
2899 }
2900 break;
2901
2902 case NC:
2903 if (lp->media != lp->c_media) {
2904 de4x5_dbg_media(dev);
2905 lp->c_media = lp->media;
2906 }
2907 lp->media = INIT;
2908 lp->tx_enable = false;
2909 break;
2910 }
2911
2912 return next_tick;
2913}
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929static int
2930dc2114x_autoconf(struct net_device *dev)
2931{
2932 struct de4x5_private *lp = netdev_priv(dev);
2933 u_long iobase = dev->base_addr;
2934 s32 cr, anlpa, ana, cap, irqs, irq_mask, imr, omr, slnk, sr, sts;
2935 int next_tick = DE4X5_AUTOSENSE_MS;
2936
2937 switch (lp->media) {
2938 case INIT:
2939 if (lp->timeout < 0) {
2940 DISABLE_IRQs;
2941 lp->tx_enable = false;
2942 lp->linkOK = 0;
2943 lp->timeout = -1;
2944 de4x5_save_skbs(dev);
2945 if (lp->params.autosense & ~AUTO) {
2946 srom_map_media(dev);
2947 if (lp->media != lp->params.autosense) {
2948 lp->tcount++;
2949 lp->media = INIT;
2950 return next_tick;
2951 }
2952 lp->media = INIT;
2953 }
2954 }
2955 if ((next_tick = de4x5_reset_phy(dev)) < 0) {
2956 next_tick &= ~TIMER_CB;
2957 } else {
2958 if (lp->autosense == _100Mb) {
2959 lp->media = _100Mb;
2960 } else if (lp->autosense == _10Mb) {
2961 lp->media = _10Mb;
2962 } else if (lp->autosense == TP) {
2963 lp->media = TP;
2964 } else if (lp->autosense == BNC) {
2965 lp->media = BNC;
2966 } else if (lp->autosense == AUI) {
2967 lp->media = AUI;
2968 } else {
2969 lp->media = SPD_DET;
2970 if ((lp->infoblock_media == ANS) &&
2971 ((sr=is_anc_capable(dev)) & MII_SR_ANC)) {
2972 ana = (((sr >> 6) & MII_ANA_TAF) | MII_ANA_CSMA);
2973 ana &= (lp->fdx ? ~0 : ~MII_ANA_FDAM);
2974 mii_wr(ana, MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
2975 lp->media = ANS;
2976 }
2977 }
2978 lp->local_state = 0;
2979 next_tick = dc2114x_autoconf(dev);
2980 }
2981 break;
2982
2983 case ANS:
2984 switch (lp->local_state) {
2985 case 0:
2986 if (lp->timeout < 0) {
2987 mii_wr(MII_CR_ASSE | MII_CR_RAN, MII_CR, lp->phy[lp->active].addr, DE4X5_MII);
2988 }
2989 cr = test_mii_reg(dev, MII_CR, MII_CR_RAN, false, 500);
2990 if (cr < 0) {
2991 next_tick = cr & ~TIMER_CB;
2992 } else {
2993 if (cr) {
2994 lp->local_state = 0;
2995 lp->media = SPD_DET;
2996 } else {
2997 lp->local_state++;
2998 }
2999 next_tick = dc2114x_autoconf(dev);
3000 }
3001 break;
3002
3003 case 1:
3004 sr = test_mii_reg(dev, MII_SR, MII_SR_ASSC, true, 2000);
3005 if (sr < 0) {
3006 next_tick = sr & ~TIMER_CB;
3007 } else {
3008 lp->media = SPD_DET;
3009 lp->local_state = 0;
3010 if (sr) {
3011 lp->tmp = MII_SR_ASSC;
3012 anlpa = mii_rd(MII_ANLPA, lp->phy[lp->active].addr, DE4X5_MII);
3013 ana = mii_rd(MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
3014 if (!(anlpa & MII_ANLPA_RF) &&
3015 (cap = anlpa & MII_ANLPA_TAF & ana)) {
3016 if (cap & MII_ANA_100M) {
3017 lp->fdx = (ana & anlpa & MII_ANA_FDAM & MII_ANA_100M) != 0;
3018 lp->media = _100Mb;
3019 } else if (cap & MII_ANA_10M) {
3020 lp->fdx = (ana & anlpa & MII_ANA_FDAM & MII_ANA_10M) != 0;
3021 lp->media = _10Mb;
3022 }
3023 }
3024 }
3025 next_tick = dc2114x_autoconf(dev);
3026 }
3027 break;
3028 }
3029 break;
3030
3031 case AUI:
3032 if (!lp->tx_enable) {
3033 if (lp->timeout < 0) {
3034 omr = inl(DE4X5_OMR);
3035 outl(omr & ~OMR_FDX, DE4X5_OMR);
3036 }
3037 irqs = 0;
3038 irq_mask = 0;
3039 sts = test_media(dev,irqs, irq_mask, 0, 0, 0, 1000);
3040 if (sts < 0) {
3041 next_tick = sts & ~TIMER_CB;
3042 } else {
3043 if (!(inl(DE4X5_SISR) & SISR_SRA) && (lp->autosense == AUTO)) {
3044 lp->media = BNC;
3045 next_tick = dc2114x_autoconf(dev);
3046 } else {
3047 lp->local_state = 1;
3048 de4x5_init_connection(dev);
3049 }
3050 }
3051 } else if (!lp->linkOK && (lp->autosense == AUTO)) {
3052 lp->media = AUI_SUSPECT;
3053 next_tick = 3000;
3054 }
3055 break;
3056
3057 case AUI_SUSPECT:
3058 next_tick = de4x5_suspect_state(dev, 1000, AUI, ping_media, dc2114x_autoconf);
3059 break;
3060
3061 case BNC:
3062 switch (lp->local_state) {
3063 case 0:
3064 if (lp->timeout < 0) {
3065 omr = inl(DE4X5_OMR);
3066 outl(omr & ~OMR_FDX, DE4X5_OMR);
3067 }
3068 irqs = 0;
3069 irq_mask = 0;
3070 sts = test_media(dev,irqs, irq_mask, 0, 0, 0, 1000);
3071 if (sts < 0) {
3072 next_tick = sts & ~TIMER_CB;
3073 } else {
3074 lp->local_state++;
3075 next_tick = dc2114x_autoconf(dev);
3076 }
3077 break;
3078
3079 case 1:
3080 if (!lp->tx_enable) {
3081 if ((sts = ping_media(dev, 3000)) < 0) {
3082 next_tick = sts & ~TIMER_CB;
3083 } else {
3084 if (sts) {
3085 lp->local_state = 0;
3086 lp->tcount++;
3087 lp->media = INIT;
3088 } else {
3089 de4x5_init_connection(dev);
3090 }
3091 }
3092 } else if (!lp->linkOK && (lp->autosense == AUTO)) {
3093 lp->media = BNC_SUSPECT;
3094 next_tick = 3000;
3095 }
3096 break;
3097 }
3098 break;
3099
3100 case BNC_SUSPECT:
3101 next_tick = de4x5_suspect_state(dev, 1000, BNC, ping_media, dc2114x_autoconf);
3102 break;
3103
3104 case SPD_DET:
3105 if (srom_map_media(dev) < 0) {
3106 lp->tcount++;
3107 lp->media = INIT;
3108 return next_tick;
3109 }
3110 if (lp->media == _100Mb) {
3111 if ((slnk = test_for_100Mb(dev, 6500)) < 0) {
3112 lp->media = SPD_DET;
3113 return slnk & ~TIMER_CB;
3114 }
3115 } else {
3116 if (wait_for_link(dev) < 0) {
3117 lp->media = SPD_DET;
3118 return PDET_LINK_WAIT;
3119 }
3120 }
3121 if (lp->media == ANS) {
3122 if (is_spd_100(dev)) {
3123 lp->media = _100Mb;
3124 } else {
3125 lp->media = _10Mb;
3126 }
3127 next_tick = dc2114x_autoconf(dev);
3128 } else if (((lp->media == _100Mb) && is_100_up(dev)) ||
3129 (((lp->media == _10Mb) || (lp->media == TP) ||
3130 (lp->media == BNC) || (lp->media == AUI)) &&
3131 is_10_up(dev))) {
3132 next_tick = dc2114x_autoconf(dev);
3133 } else {
3134 lp->tcount++;
3135 lp->media = INIT;
3136 }
3137 break;
3138
3139 case _10Mb:
3140 next_tick = 3000;
3141 if (!lp->tx_enable) {
3142 SET_10Mb;
3143 de4x5_init_connection(dev);
3144 } else {
3145 if (!lp->linkOK && (lp->autosense == AUTO)) {
3146 if (!is_10_up(dev) || (!lp->useSROM && is_spd_100(dev))) {
3147 lp->media = INIT;
3148 lp->tcount++;
3149 next_tick = DE4X5_AUTOSENSE_MS;
3150 }
3151 }
3152 }
3153 break;
3154
3155 case _100Mb:
3156 next_tick = 3000;
3157 if (!lp->tx_enable) {
3158 SET_100Mb;
3159 de4x5_init_connection(dev);
3160 } else {
3161 if (!lp->linkOK && (lp->autosense == AUTO)) {
3162 if (!is_100_up(dev) || (!lp->useSROM && !is_spd_100(dev))) {
3163 lp->media = INIT;
3164 lp->tcount++;
3165 next_tick = DE4X5_AUTOSENSE_MS;
3166 }
3167 }
3168 }
3169 break;
3170
3171 default:
3172 lp->tcount++;
3173printk("Huh?: media:%02x\n", lp->media);
3174 lp->media = INIT;
3175 break;
3176 }
3177
3178 return next_tick;
3179}
3180
3181static int
3182srom_autoconf(struct net_device *dev)
3183{
3184 struct de4x5_private *lp = netdev_priv(dev);
3185
3186 return lp->infoleaf_fn(dev);
3187}
3188
3189
3190
3191
3192
3193
3194static int
3195srom_map_media(struct net_device *dev)
3196{
3197 struct de4x5_private *lp = netdev_priv(dev);
3198
3199 lp->fdx = false;
3200 if (lp->infoblock_media == lp->media)
3201 return 0;
3202
3203 switch(lp->infoblock_media) {
3204 case SROM_10BASETF:
3205 if (!lp->params.fdx) return -1;
3206 lp->fdx = true;
3207 case SROM_10BASET:
3208 if (lp->params.fdx && !lp->fdx) return -1;
3209 if ((lp->chipset == DC21140) || ((lp->chipset & ~0x00ff) == DC2114x)) {
3210 lp->media = _10Mb;
3211 } else {
3212 lp->media = TP;
3213 }
3214 break;
3215
3216 case SROM_10BASE2:
3217 lp->media = BNC;
3218 break;
3219
3220 case SROM_10BASE5:
3221 lp->media = AUI;
3222 break;
3223
3224 case SROM_100BASETF:
3225 if (!lp->params.fdx) return -1;
3226 lp->fdx = true;
3227 case SROM_100BASET:
3228 if (lp->params.fdx && !lp->fdx) return -1;
3229 lp->media = _100Mb;
3230 break;
3231
3232 case SROM_100BASET4:
3233 lp->media = _100Mb;
3234 break;
3235
3236 case SROM_100BASEFF:
3237 if (!lp->params.fdx) return -1;
3238 lp->fdx = true;
3239 case SROM_100BASEF:
3240 if (lp->params.fdx && !lp->fdx) return -1;
3241 lp->media = _100Mb;
3242 break;
3243
3244 case ANS:
3245 lp->media = ANS;
3246 lp->fdx = lp->params.fdx;
3247 break;
3248
3249 default:
3250 printk("%s: Bad media code [%d] detected in SROM!\n", dev->name,
3251 lp->infoblock_media);
3252 return -1;
3253 }
3254
3255 return 0;
3256}
3257
3258static void
3259de4x5_init_connection(struct net_device *dev)
3260{
3261 struct de4x5_private *lp = netdev_priv(dev);
3262 u_long iobase = dev->base_addr;
3263 u_long flags = 0;
3264
3265 if (lp->media != lp->c_media) {
3266 de4x5_dbg_media(dev);
3267 lp->c_media = lp->media;
3268 }
3269
3270 spin_lock_irqsave(&lp->lock, flags);
3271 de4x5_rst_desc_ring(dev);
3272 de4x5_setup_intr(dev);
3273 lp->tx_enable = true;
3274 spin_unlock_irqrestore(&lp->lock, flags);
3275 outl(POLL_DEMAND, DE4X5_TPD);
3276
3277 netif_wake_queue(dev);
3278}
3279
3280
3281
3282
3283
3284
3285static int
3286de4x5_reset_phy(struct net_device *dev)
3287{
3288 struct de4x5_private *lp = netdev_priv(dev);
3289 u_long iobase = dev->base_addr;
3290 int next_tick = 0;
3291
3292 if ((lp->useSROM) || (lp->phy[lp->active].id)) {
3293 if (lp->timeout < 0) {
3294 if (lp->useSROM) {
3295 if (lp->phy[lp->active].rst) {
3296 srom_exec(dev, lp->phy[lp->active].rst);
3297 srom_exec(dev, lp->phy[lp->active].rst);
3298 } else if (lp->rst) {
3299 srom_exec(dev, lp->rst);
3300 srom_exec(dev, lp->rst);
3301 }
3302 } else {
3303 PHY_HARD_RESET;
3304 }
3305 if (lp->useMII) {
3306 mii_wr(MII_CR_RST, MII_CR, lp->phy[lp->active].addr, DE4X5_MII);
3307 }
3308 }
3309 if (lp->useMII) {
3310 next_tick = test_mii_reg(dev, MII_CR, MII_CR_RST, false, 500);
3311 }
3312 } else if (lp->chipset == DC21140) {
3313 PHY_HARD_RESET;
3314 }
3315
3316 return next_tick;
3317}
3318
3319static int
3320test_media(struct net_device *dev, s32 irqs, s32 irq_mask, s32 csr13, s32 csr14, s32 csr15, s32 msec)
3321{
3322 struct de4x5_private *lp = netdev_priv(dev);
3323 u_long iobase = dev->base_addr;
3324 s32 sts, csr12;
3325
3326 if (lp->timeout < 0) {
3327 lp->timeout = msec/100;
3328 if (!lp->useSROM) {
3329 reset_init_sia(dev, csr13, csr14, csr15);
3330 }
3331
3332
3333 outl(irq_mask, DE4X5_IMR);
3334
3335
3336 sts = inl(DE4X5_STS);
3337 outl(sts, DE4X5_STS);
3338
3339
3340 if ((lp->chipset == DC21041) || lp->useSROM) {
3341 csr12 = inl(DE4X5_SISR);
3342 outl(csr12, DE4X5_SISR);
3343 }
3344 }
3345
3346 sts = inl(DE4X5_STS) & ~TIMER_CB;
3347
3348 if (!(sts & irqs) && --lp->timeout) {
3349 sts = 100 | TIMER_CB;
3350 } else {
3351 lp->timeout = -1;
3352 }
3353
3354 return sts;
3355}
3356
3357static int
3358test_tp(struct net_device *dev, s32 msec)
3359{
3360 struct de4x5_private *lp = netdev_priv(dev);
3361 u_long iobase = dev->base_addr;
3362 int sisr;
3363
3364 if (lp->timeout < 0) {
3365 lp->timeout = msec/100;
3366 }
3367
3368 sisr = (inl(DE4X5_SISR) & ~TIMER_CB) & (SISR_LKF | SISR_NCR);
3369
3370 if (sisr && --lp->timeout) {
3371 sisr = 100 | TIMER_CB;
3372 } else {
3373 lp->timeout = -1;
3374 }
3375
3376 return sisr;
3377}
3378
3379
3380
3381
3382
3383
3384#define SAMPLE_INTERVAL 500
3385#define SAMPLE_DELAY 2000
3386static int
3387test_for_100Mb(struct net_device *dev, int msec)
3388{
3389 struct de4x5_private *lp = netdev_priv(dev);
3390 int gep = 0, ret = ((lp->chipset & ~0x00ff)==DC2114x? -1 :GEP_SLNK);
3391
3392 if (lp->timeout < 0) {
3393 if ((msec/SAMPLE_INTERVAL) <= 0) return 0;
3394 if (msec > SAMPLE_DELAY) {
3395 lp->timeout = (msec - SAMPLE_DELAY)/SAMPLE_INTERVAL;
3396 gep = SAMPLE_DELAY | TIMER_CB;
3397 return gep;
3398 } else {
3399 lp->timeout = msec/SAMPLE_INTERVAL;
3400 }
3401 }
3402
3403 if (lp->phy[lp->active].id || lp->useSROM) {
3404 gep = is_100_up(dev) | is_spd_100(dev);
3405 } else {
3406 gep = (~gep_rd(dev) & (GEP_SLNK | GEP_LNP));
3407 }
3408 if (!(gep & ret) && --lp->timeout) {
3409 gep = SAMPLE_INTERVAL | TIMER_CB;
3410 } else {
3411 lp->timeout = -1;
3412 }
3413
3414 return gep;
3415}
3416
3417static int
3418wait_for_link(struct net_device *dev)
3419{
3420 struct de4x5_private *lp = netdev_priv(dev);
3421
3422 if (lp->timeout < 0) {
3423 lp->timeout = 1;
3424 }
3425
3426 if (lp->timeout--) {
3427 return TIMER_CB;
3428 } else {
3429 lp->timeout = -1;
3430 }
3431
3432 return 0;
3433}
3434
3435
3436
3437
3438
3439static int
3440test_mii_reg(struct net_device *dev, int reg, int mask, bool pol, long msec)
3441{
3442 struct de4x5_private *lp = netdev_priv(dev);
3443 int test;
3444 u_long iobase = dev->base_addr;
3445
3446 if (lp->timeout < 0) {
3447 lp->timeout = msec/100;
3448 }
3449
3450 reg = mii_rd((u_char)reg, lp->phy[lp->active].addr, DE4X5_MII) & mask;
3451 test = (reg ^ (pol ? ~0 : 0)) & mask;
3452
3453 if (test && --lp->timeout) {
3454 reg = 100 | TIMER_CB;
3455 } else {
3456 lp->timeout = -1;
3457 }
3458
3459 return reg;
3460}
3461
3462static int
3463is_spd_100(struct net_device *dev)
3464{
3465 struct de4x5_private *lp = netdev_priv(dev);
3466 u_long iobase = dev->base_addr;
3467 int spd;
3468
3469 if (lp->useMII) {
3470 spd = mii_rd(lp->phy[lp->active].spd.reg, lp->phy[lp->active].addr, DE4X5_MII);
3471 spd = ~(spd ^ lp->phy[lp->active].spd.value);
3472 spd &= lp->phy[lp->active].spd.mask;
3473 } else if (!lp->useSROM) {
3474 spd = ((~gep_rd(dev)) & GEP_SLNK);
3475 } else {
3476 if ((lp->ibn == 2) || !lp->asBitValid)
3477 return (lp->chipset == DC21143) ? (~inl(DE4X5_SISR)&SISR_LS100) : 0;
3478
3479 spd = (lp->asBitValid & (lp->asPolarity ^ (gep_rd(dev) & lp->asBit))) |
3480 (lp->linkOK & ~lp->asBitValid);
3481 }
3482
3483 return spd;
3484}
3485
3486static int
3487is_100_up(struct net_device *dev)
3488{
3489 struct de4x5_private *lp = netdev_priv(dev);
3490 u_long iobase = dev->base_addr;
3491
3492 if (lp->useMII) {
3493
3494 mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);
3495 return mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII) & MII_SR_LKS;
3496 } else if (!lp->useSROM) {
3497 return (~gep_rd(dev)) & GEP_SLNK;
3498 } else {
3499 if ((lp->ibn == 2) || !lp->asBitValid)
3500 return (lp->chipset == DC21143) ? (~inl(DE4X5_SISR)&SISR_LS100) : 0;
3501
3502 return (lp->asBitValid&(lp->asPolarity^(gep_rd(dev)&lp->asBit))) |
3503 (lp->linkOK & ~lp->asBitValid);
3504 }
3505}
3506
3507static int
3508is_10_up(struct net_device *dev)
3509{
3510 struct de4x5_private *lp = netdev_priv(dev);
3511 u_long iobase = dev->base_addr;
3512
3513 if (lp->useMII) {
3514
3515 mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);
3516 return mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII) & MII_SR_LKS;
3517 } else if (!lp->useSROM) {
3518 return (~gep_rd(dev)) & GEP_LNP;
3519 } else {
3520 if ((lp->ibn == 2) || !lp->asBitValid)
3521 return ((lp->chipset & ~0x00ff) == DC2114x) ?
3522 (~inl(DE4X5_SISR)&SISR_LS10):
3523 0;
3524
3525 return (lp->asBitValid&(lp->asPolarity^(gep_rd(dev)&lp->asBit))) |
3526 (lp->linkOK & ~lp->asBitValid);
3527 }
3528}
3529
3530static int
3531is_anc_capable(struct net_device *dev)
3532{
3533 struct de4x5_private *lp = netdev_priv(dev);
3534 u_long iobase = dev->base_addr;
3535
3536 if (lp->phy[lp->active].id && (!lp->useSROM || lp->useMII)) {
3537 return mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);
3538 } else if ((lp->chipset & ~0x00ff) == DC2114x) {
3539 return (inl(DE4X5_SISR) & SISR_LPN) >> 12;
3540 } else {
3541 return 0;
3542 }
3543}
3544
3545
3546
3547
3548
3549static int
3550ping_media(struct net_device *dev, int msec)
3551{
3552 struct de4x5_private *lp = netdev_priv(dev);
3553 u_long iobase = dev->base_addr;
3554 int sisr;
3555
3556 if (lp->timeout < 0) {
3557 lp->timeout = msec/100;
3558
3559 lp->tmp = lp->tx_new;
3560 load_packet(dev, lp->frame, TD_LS | TD_FS | sizeof(lp->frame), (struct sk_buff *)1);
3561 lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
3562 outl(POLL_DEMAND, DE4X5_TPD);
3563 }
3564
3565 sisr = inl(DE4X5_SISR);
3566
3567 if ((!(sisr & SISR_NCR)) &&
3568 ((s32)le32_to_cpu(lp->tx_ring[lp->tmp].status) < 0) &&
3569 (--lp->timeout)) {
3570 sisr = 100 | TIMER_CB;
3571 } else {
3572 if ((!(sisr & SISR_NCR)) &&
3573 !(le32_to_cpu(lp->tx_ring[lp->tmp].status) & (T_OWN | TD_ES)) &&
3574 lp->timeout) {
3575 sisr = 0;
3576 } else {
3577 sisr = 1;
3578 }
3579 lp->timeout = -1;
3580 }
3581
3582 return sisr;
3583}
3584
3585
3586
3587
3588
3589
3590static struct sk_buff *
3591de4x5_alloc_rx_buff(struct net_device *dev, int index, int len)
3592{
3593 struct de4x5_private *lp = netdev_priv(dev);
3594 struct sk_buff *p;
3595
3596#if !defined(__alpha__) && !defined(__powerpc__) && !defined(CONFIG_SPARC) && !defined(DE4X5_DO_MEMCPY)
3597 struct sk_buff *ret;
3598 u_long i=0, tmp;
3599
3600 p = netdev_alloc_skb(dev, IEEE802_3_SZ + DE4X5_ALIGN + 2);
3601 if (!p) return NULL;
3602
3603 tmp = virt_to_bus(p->data);
3604 i = ((tmp + DE4X5_ALIGN) & ~DE4X5_ALIGN) - tmp;
3605 skb_reserve(p, i);
3606 lp->rx_ring[index].buf = cpu_to_le32(tmp + i);
3607
3608 ret = lp->rx_skb[index];
3609 lp->rx_skb[index] = p;
3610
3611 if ((u_long) ret > 1) {
3612 skb_put(ret, len);
3613 }
3614
3615 return ret;
3616
3617#else
3618 if (lp->state != OPEN) return (struct sk_buff *)1;
3619
3620 p = netdev_alloc_skb(dev, len + 2);
3621 if (!p) return NULL;
3622
3623 skb_reserve(p, 2);
3624 if (index < lp->rx_old) {
3625 short tlen = (lp->rxRingSize - lp->rx_old) * RX_BUFF_SZ;
3626 memcpy(skb_put(p,tlen),lp->rx_bufs + lp->rx_old * RX_BUFF_SZ,tlen);
3627 memcpy(skb_put(p,len-tlen),lp->rx_bufs,len-tlen);
3628 } else {
3629 memcpy(skb_put(p,len),lp->rx_bufs + lp->rx_old * RX_BUFF_SZ,len);
3630 }
3631
3632 return p;
3633#endif
3634}
3635
3636static void
3637de4x5_free_rx_buffs(struct net_device *dev)
3638{
3639 struct de4x5_private *lp = netdev_priv(dev);
3640 int i;
3641
3642 for (i=0; i<lp->rxRingSize; i++) {
3643 if ((u_long) lp->rx_skb[i] > 1) {
3644 dev_kfree_skb(lp->rx_skb[i]);
3645 }
3646 lp->rx_ring[i].status = 0;
3647 lp->rx_skb[i] = (struct sk_buff *)1;
3648 }
3649}
3650
3651static void
3652de4x5_free_tx_buffs(struct net_device *dev)
3653{
3654 struct de4x5_private *lp = netdev_priv(dev);
3655 int i;
3656
3657 for (i=0; i<lp->txRingSize; i++) {
3658 if (lp->tx_skb[i])
3659 de4x5_free_tx_buff(lp, i);
3660 lp->tx_ring[i].status = 0;
3661 }
3662
3663
3664 __skb_queue_purge(&lp->cache.queue);
3665}
3666
3667
3668
3669
3670
3671
3672
3673
3674static void
3675de4x5_save_skbs(struct net_device *dev)
3676{
3677 struct de4x5_private *lp = netdev_priv(dev);
3678 u_long iobase = dev->base_addr;
3679 s32 omr;
3680
3681 if (!lp->cache.save_cnt) {
3682 STOP_DE4X5;
3683 de4x5_tx(dev);
3684 de4x5_free_tx_buffs(dev);
3685 de4x5_cache_state(dev, DE4X5_SAVE_STATE);
3686 de4x5_sw_reset(dev);
3687 de4x5_cache_state(dev, DE4X5_RESTORE_STATE);
3688 lp->cache.save_cnt++;
3689 START_DE4X5;
3690 }
3691}
3692
3693static void
3694de4x5_rst_desc_ring(struct net_device *dev)
3695{
3696 struct de4x5_private *lp = netdev_priv(dev);
3697 u_long iobase = dev->base_addr;
3698 int i;
3699 s32 omr;
3700
3701 if (lp->cache.save_cnt) {
3702 STOP_DE4X5;
3703 outl(lp->dma_rings, DE4X5_RRBA);
3704 outl(lp->dma_rings + NUM_RX_DESC * sizeof(struct de4x5_desc),
3705 DE4X5_TRBA);
3706
3707 lp->rx_new = lp->rx_old = 0;
3708 lp->tx_new = lp->tx_old = 0;
3709
3710 for (i = 0; i < lp->rxRingSize; i++) {
3711 lp->rx_ring[i].status = cpu_to_le32(R_OWN);
3712 }
3713
3714 for (i = 0; i < lp->txRingSize; i++) {
3715 lp->tx_ring[i].status = cpu_to_le32(0);
3716 }
3717
3718 barrier();
3719 lp->cache.save_cnt--;
3720 START_DE4X5;
3721 }
3722}
3723
3724static void
3725de4x5_cache_state(struct net_device *dev, int flag)
3726{
3727 struct de4x5_private *lp = netdev_priv(dev);
3728 u_long iobase = dev->base_addr;
3729
3730 switch(flag) {
3731 case DE4X5_SAVE_STATE:
3732 lp->cache.csr0 = inl(DE4X5_BMR);
3733 lp->cache.csr6 = (inl(DE4X5_OMR) & ~(OMR_ST | OMR_SR));
3734 lp->cache.csr7 = inl(DE4X5_IMR);
3735 break;
3736
3737 case DE4X5_RESTORE_STATE:
3738 outl(lp->cache.csr0, DE4X5_BMR);
3739 outl(lp->cache.csr6, DE4X5_OMR);
3740 outl(lp->cache.csr7, DE4X5_IMR);
3741 if (lp->chipset == DC21140) {
3742 gep_wr(lp->cache.gepc, dev);
3743 gep_wr(lp->cache.gep, dev);
3744 } else {
3745 reset_init_sia(dev, lp->cache.csr13, lp->cache.csr14,
3746 lp->cache.csr15);
3747 }
3748 break;
3749 }
3750}
3751
3752static void
3753de4x5_put_cache(struct net_device *dev, struct sk_buff *skb)
3754{
3755 struct de4x5_private *lp = netdev_priv(dev);
3756
3757 __skb_queue_tail(&lp->cache.queue, skb);
3758}
3759
3760static void
3761de4x5_putb_cache(struct net_device *dev, struct sk_buff *skb)
3762{
3763 struct de4x5_private *lp = netdev_priv(dev);
3764
3765 __skb_queue_head(&lp->cache.queue, skb);
3766}
3767
3768static struct sk_buff *
3769de4x5_get_cache(struct net_device *dev)
3770{
3771 struct de4x5_private *lp = netdev_priv(dev);
3772
3773 return __skb_dequeue(&lp->cache.queue);
3774}
3775
3776
3777
3778
3779
3780static int
3781test_ans(struct net_device *dev, s32 irqs, s32 irq_mask, s32 msec)
3782{
3783 struct de4x5_private *lp = netdev_priv(dev);
3784 u_long iobase = dev->base_addr;
3785 s32 sts, ans;
3786
3787 if (lp->timeout < 0) {
3788 lp->timeout = msec/100;
3789 outl(irq_mask, DE4X5_IMR);
3790
3791
3792 sts = inl(DE4X5_STS);
3793 outl(sts, DE4X5_STS);
3794 }
3795
3796 ans = inl(DE4X5_SISR) & SISR_ANS;
3797 sts = inl(DE4X5_STS) & ~TIMER_CB;
3798
3799 if (!(sts & irqs) && (ans ^ ANS_NWOK) && --lp->timeout) {
3800 sts = 100 | TIMER_CB;
3801 } else {
3802 lp->timeout = -1;
3803 }
3804
3805 return sts;
3806}
3807
3808static void
3809de4x5_setup_intr(struct net_device *dev)
3810{
3811 struct de4x5_private *lp = netdev_priv(dev);
3812 u_long iobase = dev->base_addr;
3813 s32 imr, sts;
3814
3815 if (inl(DE4X5_OMR) & OMR_SR) {
3816 imr = 0;
3817 UNMASK_IRQs;
3818 sts = inl(DE4X5_STS);
3819 outl(sts, DE4X5_STS);
3820 ENABLE_IRQs;
3821 }
3822}
3823
3824
3825
3826
3827static void
3828reset_init_sia(struct net_device *dev, s32 csr13, s32 csr14, s32 csr15)
3829{
3830 struct de4x5_private *lp = netdev_priv(dev);
3831 u_long iobase = dev->base_addr;
3832
3833 RESET_SIA;
3834 if (lp->useSROM) {
3835 if (lp->ibn == 3) {
3836 srom_exec(dev, lp->phy[lp->active].rst);
3837 srom_exec(dev, lp->phy[lp->active].gep);
3838 outl(1, DE4X5_SICR);
3839 return;
3840 } else {
3841 csr15 = lp->cache.csr15;
3842 csr14 = lp->cache.csr14;
3843 csr13 = lp->cache.csr13;
3844 outl(csr15 | lp->cache.gepc, DE4X5_SIGR);
3845 outl(csr15 | lp->cache.gep, DE4X5_SIGR);
3846 }
3847 } else {
3848 outl(csr15, DE4X5_SIGR);
3849 }
3850 outl(csr14, DE4X5_STRR);
3851 outl(csr13, DE4X5_SICR);
3852
3853 mdelay(10);
3854}
3855
3856
3857
3858
3859static void
3860create_packet(struct net_device *dev, char *frame, int len)
3861{
3862 int i;
3863 char *buf = frame;
3864
3865 for (i=0; i<ETH_ALEN; i++) {
3866 *buf++ = dev->dev_addr[i];
3867 }
3868 for (i=0; i<ETH_ALEN; i++) {
3869 *buf++ = dev->dev_addr[i];
3870 }
3871
3872 *buf++ = 0;
3873 *buf++ = 1;
3874}
3875
3876
3877
3878
3879static int
3880EISA_signature(char *name, struct device *device)
3881{
3882 int i, status = 0, siglen = ARRAY_SIZE(de4x5_signatures);
3883 struct eisa_device *edev;
3884
3885 *name = '\0';
3886 edev = to_eisa_device (device);
3887 i = edev->id.driver_data;
3888
3889 if (i >= 0 && i < siglen) {
3890 strcpy (name, de4x5_signatures[i]);
3891 status = 1;
3892 }
3893
3894 return status;
3895}
3896
3897
3898
3899
3900static int
3901PCI_signature(char *name, struct de4x5_private *lp)
3902{
3903 int i, status = 0, siglen = ARRAY_SIZE(de4x5_signatures);
3904
3905 if (lp->chipset == DC21040) {
3906 strcpy(name, "DE434/5");
3907 return status;
3908 } else {
3909 int tmp = *((char *)&lp->srom + 19) * 3;
3910 strncpy(name, (char *)&lp->srom + 26 + tmp, 8);
3911 }
3912 name[8] = '\0';
3913 for (i=0; i<siglen; i++) {
3914 if (strstr(name,de4x5_signatures[i])!=NULL) break;
3915 }
3916 if (i == siglen) {
3917 if (dec_only) {
3918 *name = '\0';
3919 } else {
3920 strcpy(name, (((lp->chipset == DC21040) ? "DC21040" :
3921 ((lp->chipset == DC21041) ? "DC21041" :
3922 ((lp->chipset == DC21140) ? "DC21140" :
3923 ((lp->chipset == DC21142) ? "DC21142" :
3924 ((lp->chipset == DC21143) ? "DC21143" : "UNKNOWN"
3925 )))))));
3926 }
3927 if (lp->chipset != DC21041) {
3928 lp->useSROM = true;
3929 }
3930 } else if ((lp->chipset & ~0x00ff) == DC2114x) {
3931 lp->useSROM = true;
3932 }
3933
3934 return status;
3935}
3936
3937
3938
3939
3940
3941
3942
3943
3944
3945static void
3946DevicePresent(struct net_device *dev, u_long aprom_addr)
3947{
3948 int i, j=0;
3949 struct de4x5_private *lp = netdev_priv(dev);
3950
3951 if (lp->chipset == DC21040) {
3952 if (lp->bus == EISA) {
3953 enet_addr_rst(aprom_addr);
3954 } else {
3955 outl(0, aprom_addr);
3956 }
3957 } else {
3958 u_short tmp;
3959 __le16 *p = (__le16 *)((char *)&lp->srom + SROM_HWADD);
3960 for (i=0; i<(ETH_ALEN>>1); i++) {
3961 tmp = srom_rd(aprom_addr, (SROM_HWADD>>1) + i);
3962 j += tmp;
3963 *p = cpu_to_le16(tmp);
3964 }
3965 if (j == 0 || j == 3 * 0xffff) {
3966
3967 return;
3968 }
3969
3970 p = (__le16 *)&lp->srom;
3971 for (i=0; i<(sizeof(struct de4x5_srom)>>1); i++) {
3972 tmp = srom_rd(aprom_addr, i);
3973 *p++ = cpu_to_le16(tmp);
3974 }
3975 de4x5_dbg_srom(&lp->srom);
3976 }
3977}
3978
3979
3980
3981
3982
3983
3984static void
3985enet_addr_rst(u_long aprom_addr)
3986{
3987 union {
3988 struct {
3989 u32 a;
3990 u32 b;
3991 } llsig;
3992 char Sig[sizeof(u32) << 1];
3993 } dev;
3994 short sigLength=0;
3995 s8 data;
3996 int i, j;
3997
3998 dev.llsig.a = ETH_PROM_SIG;
3999 dev.llsig.b = ETH_PROM_SIG;
4000 sigLength = sizeof(u32) << 1;
4001
4002 for (i=0,j=0;j<sigLength && i<PROBE_LENGTH+sigLength-1;i++) {
4003 data = inb(aprom_addr);
4004 if (dev.Sig[j] == data) {
4005 j++;
4006 } else {
4007 if (data == dev.Sig[0]) {
4008 j=1;
4009 } else {
4010 j=0;
4011 }
4012 }
4013 }
4014}
4015
4016
4017
4018
4019
4020
4021
4022static int
4023get_hw_addr(struct net_device *dev)
4024{
4025 u_long iobase = dev->base_addr;
4026 int broken, i, k, tmp, status = 0;
4027 u_short j,chksum;
4028 struct de4x5_private *lp = netdev_priv(dev);
4029
4030 broken = de4x5_bad_srom(lp);
4031
4032 for (i=0,k=0,j=0;j<3;j++) {
4033 k <<= 1;
4034 if (k > 0xffff) k-=0xffff;
4035
4036 if (lp->bus == PCI) {
4037 if (lp->chipset == DC21040) {
4038 while ((tmp = inl(DE4X5_APROM)) < 0);
4039 k += (u_char) tmp;
4040 dev->dev_addr[i++] = (u_char) tmp;
4041 while ((tmp = inl(DE4X5_APROM)) < 0);
4042 k += (u_short) (tmp << 8);
4043 dev->dev_addr[i++] = (u_char) tmp;
4044 } else if (!broken) {
4045 dev->dev_addr[i] = (u_char) lp->srom.ieee_addr[i]; i++;
4046 dev->dev_addr[i] = (u_char) lp->srom.ieee_addr[i]; i++;
4047 } else if ((broken == SMC) || (broken == ACCTON)) {
4048 dev->dev_addr[i] = *((u_char *)&lp->srom + i); i++;
4049 dev->dev_addr[i] = *((u_char *)&lp->srom + i); i++;
4050 }
4051 } else {
4052 k += (u_char) (tmp = inb(EISA_APROM));
4053 dev->dev_addr[i++] = (u_char) tmp;
4054 k += (u_short) ((tmp = inb(EISA_APROM)) << 8);
4055 dev->dev_addr[i++] = (u_char) tmp;
4056 }
4057
4058 if (k > 0xffff) k-=0xffff;
4059 }
4060 if (k == 0xffff) k=0;
4061
4062 if (lp->bus == PCI) {
4063 if (lp->chipset == DC21040) {
4064 while ((tmp = inl(DE4X5_APROM)) < 0);
4065 chksum = (u_char) tmp;
4066 while ((tmp = inl(DE4X5_APROM)) < 0);
4067 chksum |= (u_short) (tmp << 8);
4068 if ((k != chksum) && (dec_only)) status = -1;
4069 }
4070 } else {
4071 chksum = (u_char) inb(EISA_APROM);
4072 chksum |= (u_short) (inb(EISA_APROM) << 8);
4073 if ((k != chksum) && (dec_only)) status = -1;
4074 }
4075
4076
4077 srom_repair(dev, broken);
4078
4079#ifdef CONFIG_PPC_PMAC
4080
4081
4082
4083
4084 if ( machine_is(powermac) &&
4085 (dev->dev_addr[0] == 0) &&
4086 (dev->dev_addr[1] == 0xa0) )
4087 {
4088 for (i = 0; i < ETH_ALEN; ++i)
4089 {
4090 int x = dev->dev_addr[i];
4091 x = ((x & 0xf) << 4) + ((x & 0xf0) >> 4);
4092 x = ((x & 0x33) << 2) + ((x & 0xcc) >> 2);
4093 dev->dev_addr[i] = ((x & 0x55) << 1) + ((x & 0xaa) >> 1);
4094 }
4095 }
4096#endif
4097
4098
4099 status = test_bad_enet(dev, status);
4100
4101 return status;
4102}
4103
4104
4105
4106
4107
4108static int
4109de4x5_bad_srom(struct de4x5_private *lp)
4110{
4111 int i, status = 0;
4112
4113 for (i = 0; i < ARRAY_SIZE(enet_det); i++) {
4114 if (!de4x5_strncmp((char *)&lp->srom, (char *)&enet_det[i], 3) &&
4115 !de4x5_strncmp((char *)&lp->srom+0x10, (char *)&enet_det[i], 3)) {
4116 if (i == 0) {
4117 status = SMC;
4118 } else if (i == 1) {
4119 status = ACCTON;
4120 }
4121 break;
4122 }
4123 }
4124
4125 return status;
4126}
4127
4128static int
4129de4x5_strncmp(char *a, char *b, int n)
4130{
4131 int ret=0;
4132
4133 for (;n && !ret; n--) {
4134 ret = *a++ - *b++;
4135 }
4136
4137 return ret;
4138}
4139
4140static void
4141srom_repair(struct net_device *dev, int card)
4142{
4143 struct de4x5_private *lp = netdev_priv(dev);
4144
4145 switch(card) {
4146 case SMC:
4147 memset((char *)&lp->srom, 0, sizeof(struct de4x5_srom));
4148 memcpy(lp->srom.ieee_addr, (char *)dev->dev_addr, ETH_ALEN);
4149 memcpy(lp->srom.info, (char *)&srom_repair_info[SMC-1], 100);
4150 lp->useSROM = true;
4151 break;
4152 }
4153}
4154
4155
4156
4157
4158
4159static int
4160test_bad_enet(struct net_device *dev, int status)
4161{
4162 struct de4x5_private *lp = netdev_priv(dev);
4163 int i, tmp;
4164
4165 for (tmp=0,i=0; i<ETH_ALEN; i++) tmp += (u_char)dev->dev_addr[i];
4166 if ((tmp == 0) || (tmp == 0x5fa)) {
4167 if ((lp->chipset == last.chipset) &&
4168 (lp->bus_num == last.bus) && (lp->bus_num > 0)) {
4169 for (i=0; i<ETH_ALEN; i++) dev->dev_addr[i] = last.addr[i];
4170 for (i=ETH_ALEN-1; i>2; --i) {
4171 dev->dev_addr[i] += 1;
4172 if (dev->dev_addr[i] != 0) break;
4173 }
4174 for (i=0; i<ETH_ALEN; i++) last.addr[i] = dev->dev_addr[i];
4175 if (!an_exception(lp)) {
4176 dev->irq = last.irq;
4177 }
4178
4179 status = 0;
4180 }
4181 } else if (!status) {
4182 last.chipset = lp->chipset;
4183 last.bus = lp->bus_num;
4184 last.irq = dev->irq;
4185 for (i=0; i<ETH_ALEN; i++) last.addr[i] = dev->dev_addr[i];
4186 }
4187
4188 return status;
4189}
4190
4191
4192
4193
4194static int
4195an_exception(struct de4x5_private *lp)
4196{
4197 if ((*(u_short *)lp->srom.sub_vendor_id == 0x00c0) &&
4198 (*(u_short *)lp->srom.sub_system_id == 0x95e0)) {
4199 return -1;
4200 }
4201
4202 return 0;
4203}
4204
4205
4206
4207
4208static short
4209srom_rd(u_long addr, u_char offset)
4210{
4211 sendto_srom(SROM_RD | SROM_SR, addr);
4212
4213 srom_latch(SROM_RD | SROM_SR | DT_CS, addr);
4214 srom_command(SROM_RD | SROM_SR | DT_IN | DT_CS, addr);
4215 srom_address(SROM_RD | SROM_SR | DT_CS, addr, offset);
4216
4217 return srom_data(SROM_RD | SROM_SR | DT_CS, addr);
4218}
4219
4220static void
4221srom_latch(u_int command, u_long addr)
4222{
4223 sendto_srom(command, addr);
4224 sendto_srom(command | DT_CLK, addr);
4225 sendto_srom(command, addr);
4226}
4227
4228static void
4229srom_command(u_int command, u_long addr)
4230{
4231 srom_latch(command, addr);
4232 srom_latch(command, addr);
4233 srom_latch((command & 0x0000ff00) | DT_CS, addr);
4234}
4235
4236static void
4237srom_address(u_int command, u_long addr, u_char offset)
4238{
4239 int i, a;
4240
4241 a = offset << 2;
4242 for (i=0; i<6; i++, a <<= 1) {
4243 srom_latch(command | ((a & 0x80) ? DT_IN : 0), addr);
4244 }
4245 udelay(1);
4246
4247 i = (getfrom_srom(addr) >> 3) & 0x01;
4248}
4249
4250static short
4251srom_data(u_int command, u_long addr)
4252{
4253 int i;
4254 short word = 0;
4255 s32 tmp;
4256
4257 for (i=0; i<16; i++) {
4258 sendto_srom(command | DT_CLK, addr);
4259 tmp = getfrom_srom(addr);
4260 sendto_srom(command, addr);
4261
4262 word = (word << 1) | ((tmp >> 3) & 0x01);
4263 }
4264
4265 sendto_srom(command & 0x0000ff00, addr);
4266
4267 return word;
4268}
4269
4270
4271
4272
4273
4274
4275
4276
4277
4278
4279
4280
4281
4282
4283
4284static void
4285sendto_srom(u_int command, u_long addr)
4286{
4287 outl(command, addr);
4288 udelay(1);
4289}
4290
4291static int
4292getfrom_srom(u_long addr)
4293{
4294 s32 tmp;
4295
4296 tmp = inl(addr);
4297 udelay(1);
4298
4299 return tmp;
4300}
4301
4302static int
4303srom_infoleaf_info(struct net_device *dev)
4304{
4305 struct de4x5_private *lp = netdev_priv(dev);
4306 int i, count;
4307 u_char *p;
4308
4309
4310 for (i=0; i<INFOLEAF_SIZE; i++) {
4311 if (lp->chipset == infoleaf_array[i].chipset) break;
4312 }
4313 if (i == INFOLEAF_SIZE) {
4314 lp->useSROM = false;
4315 printk("%s: Cannot find correct chipset for SROM decoding!\n",
4316 dev->name);
4317 return -ENXIO;
4318 }
4319
4320 lp->infoleaf_fn = infoleaf_array[i].fn;
4321
4322
4323 count = *((u_char *)&lp->srom + 19);
4324 p = (u_char *)&lp->srom + 26;
4325
4326 if (count > 1) {
4327 for (i=count; i; --i, p+=3) {
4328 if (lp->device == *p) break;
4329 }
4330 if (i == 0) {
4331 lp->useSROM = false;
4332 printk("%s: Cannot find correct PCI device [%d] for SROM decoding!\n",
4333 dev->name, lp->device);
4334 return -ENXIO;
4335 }
4336 }
4337
4338 lp->infoleaf_offset = get_unaligned_le16(p + 1);
4339
4340 return 0;
4341}
4342
4343
4344
4345
4346
4347
4348
4349
4350static void
4351srom_init(struct net_device *dev)
4352{
4353 struct de4x5_private *lp = netdev_priv(dev);
4354 u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset;
4355 u_char count;
4356
4357 p+=2;
4358 if (lp->chipset == DC21140) {
4359 lp->cache.gepc = (*p++ | GEP_CTRL);
4360 gep_wr(lp->cache.gepc, dev);
4361 }
4362
4363
4364 count = *p++;
4365
4366
4367 for (;count; --count) {
4368 if (*p < 128) {
4369 p += COMPACT_LEN;
4370 } else if (*(p+1) == 5) {
4371 type5_infoblock(dev, 1, p);
4372 p += ((*p & BLOCK_LEN) + 1);
4373 } else if (*(p+1) == 4) {
4374 p += ((*p & BLOCK_LEN) + 1);
4375 } else if (*(p+1) == 3) {
4376 type3_infoblock(dev, 1, p);
4377 p += ((*p & BLOCK_LEN) + 1);
4378 } else if (*(p+1) == 2) {
4379 p += ((*p & BLOCK_LEN) + 1);
4380 } else if (*(p+1) == 1) {
4381 type1_infoblock(dev, 1, p);
4382 p += ((*p & BLOCK_LEN) + 1);
4383 } else {
4384 p += ((*p & BLOCK_LEN) + 1);
4385 }
4386 }
4387}
4388
4389
4390
4391
4392
4393static void
4394srom_exec(struct net_device *dev, u_char *p)
4395{
4396 struct de4x5_private *lp = netdev_priv(dev);
4397 u_long iobase = dev->base_addr;
4398 u_char count = (p ? *p++ : 0);
4399 u_short *w = (u_short *)p;
4400
4401 if (((lp->ibn != 1) && (lp->ibn != 3) && (lp->ibn != 5)) || !count) return;
4402
4403 if (lp->chipset != DC21140) RESET_SIA;
4404
4405 while (count--) {
4406 gep_wr(((lp->chipset==DC21140) && (lp->ibn!=5) ?
4407 *p++ : get_unaligned_le16(w++)), dev);
4408 mdelay(2);
4409 }
4410
4411 if (lp->chipset != DC21140) {
4412 outl(lp->cache.csr14, DE4X5_STRR);
4413 outl(lp->cache.csr13, DE4X5_SICR);
4414 }
4415}
4416
4417
4418
4419
4420
4421
4422static int
4423dc21041_infoleaf(struct net_device *dev)
4424{
4425 return DE4X5_AUTOSENSE_MS;
4426}
4427
4428static int
4429dc21140_infoleaf(struct net_device *dev)
4430{
4431 struct de4x5_private *lp = netdev_priv(dev);
4432 u_char count = 0;
4433 u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset;
4434 int next_tick = DE4X5_AUTOSENSE_MS;
4435
4436
4437 p+=2;
4438
4439
4440 lp->cache.gepc = (*p++ | GEP_CTRL);
4441
4442
4443 count = *p++;
4444
4445
4446 if (*p < 128) {
4447 next_tick = dc_infoblock[COMPACT](dev, count, p);
4448 } else {
4449 next_tick = dc_infoblock[*(p+1)](dev, count, p);
4450 }
4451
4452 if (lp->tcount == count) {
4453 lp->media = NC;
4454 if (lp->media != lp->c_media) {
4455 de4x5_dbg_media(dev);
4456 lp->c_media = lp->media;
4457 }
4458 lp->media = INIT;
4459 lp->tcount = 0;
4460 lp->tx_enable = false;
4461 }
4462
4463 return next_tick & ~TIMER_CB;
4464}
4465
4466static int
4467dc21142_infoleaf(struct net_device *dev)
4468{
4469 struct de4x5_private *lp = netdev_priv(dev);
4470 u_char count = 0;
4471 u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset;
4472 int next_tick = DE4X5_AUTOSENSE_MS;
4473
4474
4475 p+=2;
4476
4477
4478 count = *p++;
4479
4480
4481 if (*p < 128) {
4482 next_tick = dc_infoblock[COMPACT](dev, count, p);
4483 } else {
4484 next_tick = dc_infoblock[*(p+1)](dev, count, p);
4485 }
4486
4487 if (lp->tcount == count) {
4488 lp->media = NC;
4489 if (lp->media != lp->c_media) {
4490 de4x5_dbg_media(dev);
4491 lp->c_media = lp->media;
4492 }
4493 lp->media = INIT;
4494 lp->tcount = 0;
4495 lp->tx_enable = false;
4496 }
4497
4498 return next_tick & ~TIMER_CB;
4499}
4500
4501static int
4502dc21143_infoleaf(struct net_device *dev)
4503{
4504 struct de4x5_private *lp = netdev_priv(dev);
4505 u_char count = 0;
4506 u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset;
4507 int next_tick = DE4X5_AUTOSENSE_MS;
4508
4509
4510 p+=2;
4511
4512
4513 count = *p++;
4514
4515
4516 if (*p < 128) {
4517 next_tick = dc_infoblock[COMPACT](dev, count, p);
4518 } else {
4519 next_tick = dc_infoblock[*(p+1)](dev, count, p);
4520 }
4521 if (lp->tcount == count) {
4522 lp->media = NC;
4523 if (lp->media != lp->c_media) {
4524 de4x5_dbg_media(dev);
4525 lp->c_media = lp->media;
4526 }
4527 lp->media = INIT;
4528 lp->tcount = 0;
4529 lp->tx_enable = false;
4530 }
4531
4532 return next_tick & ~TIMER_CB;
4533}
4534
4535
4536
4537
4538
4539static int
4540compact_infoblock(struct net_device *dev, u_char count, u_char *p)
4541{
4542 struct de4x5_private *lp = netdev_priv(dev);
4543 u_char flags, csr6;
4544
4545
4546 if (--count > lp->tcount) {
4547 if (*(p+COMPACT_LEN) < 128) {
4548 return dc_infoblock[COMPACT](dev, count, p+COMPACT_LEN);
4549 } else {
4550 return dc_infoblock[*(p+COMPACT_LEN+1)](dev, count, p+COMPACT_LEN);
4551 }
4552 }
4553
4554 if ((lp->media == INIT) && (lp->timeout < 0)) {
4555 lp->ibn = COMPACT;
4556 lp->active = 0;
4557 gep_wr(lp->cache.gepc, dev);
4558 lp->infoblock_media = (*p++) & COMPACT_MC;
4559 lp->cache.gep = *p++;
4560 csr6 = *p++;
4561 flags = *p++;
4562
4563 lp->asBitValid = (flags & 0x80) ? 0 : -1;
4564 lp->defMedium = (flags & 0x40) ? -1 : 0;
4565 lp->asBit = 1 << ((csr6 >> 1) & 0x07);
4566 lp->asPolarity = ((csr6 & 0x80) ? -1 : 0) & lp->asBit;
4567 lp->infoblock_csr6 = OMR_DEF | ((csr6 & 0x71) << 18);
4568 lp->useMII = false;
4569
4570 de4x5_switch_mac_port(dev);
4571 }
4572
4573 return dc21140m_autoconf(dev);
4574}
4575
4576
4577
4578
4579static int
4580type0_infoblock(struct net_device *dev, u_char count, u_char *p)
4581{
4582 struct de4x5_private *lp = netdev_priv(dev);
4583 u_char flags, csr6, len = (*p & BLOCK_LEN)+1;
4584
4585
4586 if (--count > lp->tcount) {
4587 if (*(p+len) < 128) {
4588 return dc_infoblock[COMPACT](dev, count, p+len);
4589 } else {
4590 return dc_infoblock[*(p+len+1)](dev, count, p+len);
4591 }
4592 }
4593
4594 if ((lp->media == INIT) && (lp->timeout < 0)) {
4595 lp->ibn = 0;
4596 lp->active = 0;
4597 gep_wr(lp->cache.gepc, dev);
4598 p+=2;
4599 lp->infoblock_media = (*p++) & BLOCK0_MC;
4600 lp->cache.gep = *p++;
4601 csr6 = *p++;
4602 flags = *p++;
4603
4604 lp->asBitValid = (flags & 0x80) ? 0 : -1;
4605 lp->defMedium = (flags & 0x40) ? -1 : 0;
4606 lp->asBit = 1 << ((csr6 >> 1) & 0x07);
4607 lp->asPolarity = ((csr6 & 0x80) ? -1 : 0) & lp->asBit;
4608 lp->infoblock_csr6 = OMR_DEF | ((csr6 & 0x71) << 18);
4609 lp->useMII = false;
4610
4611 de4x5_switch_mac_port(dev);
4612 }
4613
4614 return dc21140m_autoconf(dev);
4615}
4616
4617
4618
4619static int
4620type1_infoblock(struct net_device *dev, u_char count, u_char *p)
4621{
4622 struct de4x5_private *lp = netdev_priv(dev);
4623 u_char len = (*p & BLOCK_LEN)+1;
4624
4625
4626 if (--count > lp->tcount) {
4627 if (*(p+len) < 128) {
4628 return dc_infoblock[COMPACT](dev, count, p+len);
4629 } else {
4630 return dc_infoblock[*(p+len+1)](dev, count, p+len);
4631 }
4632 }
4633
4634 p += 2;
4635 if (lp->state == INITIALISED) {
4636 lp->ibn = 1;
4637 lp->active = *p++;
4638 lp->phy[lp->active].gep = (*p ? p : NULL); p += (*p + 1);
4639 lp->phy[lp->active].rst = (*p ? p : NULL); p += (*p + 1);
4640 lp->phy[lp->active].mc = get_unaligned_le16(p); p += 2;
4641 lp->phy[lp->active].ana = get_unaligned_le16(p); p += 2;
4642 lp->phy[lp->active].fdx = get_unaligned_le16(p); p += 2;
4643 lp->phy[lp->active].ttm = get_unaligned_le16(p);
4644 return 0;
4645 } else if ((lp->media == INIT) && (lp->timeout < 0)) {
4646 lp->ibn = 1;
4647 lp->active = *p;
4648 lp->infoblock_csr6 = OMR_MII_100;
4649 lp->useMII = true;
4650 lp->infoblock_media = ANS;
4651
4652 de4x5_switch_mac_port(dev);
4653 }
4654
4655 return dc21140m_autoconf(dev);
4656}
4657
4658static int
4659type2_infoblock(struct net_device *dev, u_char count, u_char *p)
4660{
4661 struct de4x5_private *lp = netdev_priv(dev);
4662 u_char len = (*p & BLOCK_LEN)+1;
4663
4664
4665 if (--count > lp->tcount) {
4666 if (*(p+len) < 128) {
4667 return dc_infoblock[COMPACT](dev, count, p+len);
4668 } else {
4669 return dc_infoblock[*(p+len+1)](dev, count, p+len);
4670 }
4671 }
4672
4673 if ((lp->media == INIT) && (lp->timeout < 0)) {
4674 lp->ibn = 2;
4675 lp->active = 0;
4676 p += 2;
4677 lp->infoblock_media = (*p) & MEDIA_CODE;
4678
4679 if ((*p++) & EXT_FIELD) {
4680 lp->cache.csr13 = get_unaligned_le16(p); p += 2;
4681 lp->cache.csr14 = get_unaligned_le16(p); p += 2;
4682 lp->cache.csr15 = get_unaligned_le16(p); p += 2;
4683 } else {
4684 lp->cache.csr13 = CSR13;
4685 lp->cache.csr14 = CSR14;
4686 lp->cache.csr15 = CSR15;
4687 }
4688 lp->cache.gepc = ((s32)(get_unaligned_le16(p)) << 16); p += 2;
4689 lp->cache.gep = ((s32)(get_unaligned_le16(p)) << 16);
4690 lp->infoblock_csr6 = OMR_SIA;
4691 lp->useMII = false;
4692
4693 de4x5_switch_mac_port(dev);
4694 }
4695
4696 return dc2114x_autoconf(dev);
4697}
4698
4699static int
4700type3_infoblock(struct net_device *dev, u_char count, u_char *p)
4701{
4702 struct de4x5_private *lp = netdev_priv(dev);
4703 u_char len = (*p & BLOCK_LEN)+1;
4704
4705
4706 if (--count > lp->tcount) {
4707 if (*(p+len) < 128) {
4708 return dc_infoblock[COMPACT](dev, count, p+len);
4709 } else {
4710 return dc_infoblock[*(p+len+1)](dev, count, p+len);
4711 }
4712 }
4713
4714 p += 2;
4715 if (lp->state == INITIALISED) {
4716 lp->ibn = 3;
4717 lp->active = *p++;
4718 if (MOTO_SROM_BUG) lp->active = 0;
4719 lp->phy[lp->active].gep = (*p ? p : NULL); p += (2 * (*p) + 1);
4720 lp->phy[lp->active].rst = (*p ? p : NULL); p += (2 * (*p) + 1);
4721 lp->phy[lp->active].mc = get_unaligned_le16(p); p += 2;
4722 lp->phy[lp->active].ana = get_unaligned_le16(p); p += 2;
4723 lp->phy[lp->active].fdx = get_unaligned_le16(p); p += 2;
4724 lp->phy[lp->active].ttm = get_unaligned_le16(p); p += 2;
4725 lp->phy[lp->active].mci = *p;
4726 return 0;
4727 } else if ((lp->media == INIT) && (lp->timeout < 0)) {
4728 lp->ibn = 3;
4729 lp->active = *p;
4730 if (MOTO_SROM_BUG) lp->active = 0;
4731 lp->infoblock_csr6 = OMR_MII_100;
4732 lp->useMII = true;
4733 lp->infoblock_media = ANS;
4734
4735 de4x5_switch_mac_port(dev);
4736 }
4737
4738 return dc2114x_autoconf(dev);
4739}
4740
4741static int
4742type4_infoblock(struct net_device *dev, u_char count, u_char *p)
4743{
4744 struct de4x5_private *lp = netdev_priv(dev);
4745 u_char flags, csr6, len = (*p & BLOCK_LEN)+1;
4746
4747
4748 if (--count > lp->tcount) {
4749 if (*(p+len) < 128) {
4750 return dc_infoblock[COMPACT](dev, count, p+len);
4751 } else {
4752 return dc_infoblock[*(p+len+1)](dev, count, p+len);
4753 }
4754 }
4755
4756 if ((lp->media == INIT) && (lp->timeout < 0)) {
4757 lp->ibn = 4;
4758 lp->active = 0;
4759 p+=2;
4760 lp->infoblock_media = (*p++) & MEDIA_CODE;
4761 lp->cache.csr13 = CSR13;
4762 lp->cache.csr14 = CSR14;
4763 lp->cache.csr15 = CSR15;
4764 lp->cache.gepc = ((s32)(get_unaligned_le16(p)) << 16); p += 2;
4765 lp->cache.gep = ((s32)(get_unaligned_le16(p)) << 16); p += 2;
4766 csr6 = *p++;
4767 flags = *p++;
4768
4769 lp->asBitValid = (flags & 0x80) ? 0 : -1;
4770 lp->defMedium = (flags & 0x40) ? -1 : 0;
4771 lp->asBit = 1 << ((csr6 >> 1) & 0x07);
4772 lp->asPolarity = ((csr6 & 0x80) ? -1 : 0) & lp->asBit;
4773 lp->infoblock_csr6 = OMR_DEF | ((csr6 & 0x71) << 18);
4774 lp->useMII = false;
4775
4776 de4x5_switch_mac_port(dev);
4777 }
4778
4779 return dc2114x_autoconf(dev);
4780}
4781
4782
4783
4784
4785
4786static int
4787type5_infoblock(struct net_device *dev, u_char count, u_char *p)
4788{
4789 struct de4x5_private *lp = netdev_priv(dev);
4790 u_char len = (*p & BLOCK_LEN)+1;
4791
4792
4793 if (--count > lp->tcount) {
4794 if (*(p+len) < 128) {
4795 return dc_infoblock[COMPACT](dev, count, p+len);
4796 } else {
4797 return dc_infoblock[*(p+len+1)](dev, count, p+len);
4798 }
4799 }
4800
4801
4802 if ((lp->state == INITIALISED) || (lp->media == INIT)) {
4803 p+=2;
4804 lp->rst = p;
4805 srom_exec(dev, lp->rst);
4806 }
4807
4808 return DE4X5_AUTOSENSE_MS;
4809}
4810
4811
4812
4813
4814
4815static int
4816mii_rd(u_char phyreg, u_char phyaddr, u_long ioaddr)
4817{
4818 mii_wdata(MII_PREAMBLE, 2, ioaddr);
4819 mii_wdata(MII_PREAMBLE, 32, ioaddr);
4820 mii_wdata(MII_STRD, 4, ioaddr);
4821 mii_address(phyaddr, ioaddr);
4822 mii_address(phyreg, ioaddr);
4823 mii_ta(MII_STRD, ioaddr);
4824
4825 return mii_rdata(ioaddr);
4826}
4827
4828static void
4829mii_wr(int data, u_char phyreg, u_char phyaddr, u_long ioaddr)
4830{
4831 mii_wdata(MII_PREAMBLE, 2, ioaddr);
4832 mii_wdata(MII_PREAMBLE, 32, ioaddr);
4833 mii_wdata(MII_STWR, 4, ioaddr);
4834 mii_address(phyaddr, ioaddr);
4835 mii_address(phyreg, ioaddr);
4836 mii_ta(MII_STWR, ioaddr);
4837 data = mii_swap(data, 16);
4838 mii_wdata(data, 16, ioaddr);
4839}
4840
4841static int
4842mii_rdata(u_long ioaddr)
4843{
4844 int i;
4845 s32 tmp = 0;
4846
4847 for (i=0; i<16; i++) {
4848 tmp <<= 1;
4849 tmp |= getfrom_mii(MII_MRD | MII_RD, ioaddr);
4850 }
4851
4852 return tmp;
4853}
4854
4855static void
4856mii_wdata(int data, int len, u_long ioaddr)
4857{
4858 int i;
4859
4860 for (i=0; i<len; i++) {
4861 sendto_mii(MII_MWR | MII_WR, data, ioaddr);
4862 data >>= 1;
4863 }
4864}
4865
4866static void
4867mii_address(u_char addr, u_long ioaddr)
4868{
4869 int i;
4870
4871 addr = mii_swap(addr, 5);
4872 for (i=0; i<5; i++) {
4873 sendto_mii(MII_MWR | MII_WR, addr, ioaddr);
4874 addr >>= 1;
4875 }
4876}
4877
4878static void
4879mii_ta(u_long rw, u_long ioaddr)
4880{
4881 if (rw == MII_STWR) {
4882 sendto_mii(MII_MWR | MII_WR, 1, ioaddr);
4883 sendto_mii(MII_MWR | MII_WR, 0, ioaddr);
4884 } else {
4885 getfrom_mii(MII_MRD | MII_RD, ioaddr);
4886 }
4887}
4888
4889static int
4890mii_swap(int data, int len)
4891{
4892 int i, tmp = 0;
4893
4894 for (i=0; i<len; i++) {
4895 tmp <<= 1;
4896 tmp |= (data & 1);
4897 data >>= 1;
4898 }
4899
4900 return tmp;
4901}
4902
4903static void
4904sendto_mii(u32 command, int data, u_long ioaddr)
4905{
4906 u32 j;
4907
4908 j = (data & 1) << 17;
4909 outl(command | j, ioaddr);
4910 udelay(1);
4911 outl(command | MII_MDC | j, ioaddr);
4912 udelay(1);
4913}
4914
4915static int
4916getfrom_mii(u32 command, u_long ioaddr)
4917{
4918 outl(command, ioaddr);
4919 udelay(1);
4920 outl(command | MII_MDC, ioaddr);
4921 udelay(1);
4922
4923 return (inl(ioaddr) >> 19) & 1;
4924}
4925
4926
4927
4928
4929static int
4930mii_get_oui(u_char phyaddr, u_long ioaddr)
4931{
4932
4933
4934
4935
4936
4937
4938 int r2, r3;
4939
4940
4941 r2 = mii_rd(MII_ID0, phyaddr, ioaddr);
4942 r3 = mii_rd(MII_ID1, phyaddr, ioaddr);
4943
4944
4945
4946
4947
4948
4949
4950
4951
4952
4953
4954
4955
4956
4957
4958
4959
4960
4961
4962
4963
4964
4965
4966
4967
4968
4969
4970 return r2;
4971}
4972
4973
4974
4975
4976static int
4977mii_get_phy(struct net_device *dev)
4978{
4979 struct de4x5_private *lp = netdev_priv(dev);
4980 u_long iobase = dev->base_addr;
4981 int i, j, k, n, limit=ARRAY_SIZE(phy_info);
4982 int id;
4983
4984 lp->active = 0;
4985 lp->useMII = true;
4986
4987
4988 for (n=0, lp->mii_cnt=0, i=1; !((i==1) && (n==1)); i=(i+1)%DE4X5_MAX_MII) {
4989 lp->phy[lp->active].addr = i;
4990 if (i==0) n++;
4991 while (de4x5_reset_phy(dev)<0) udelay(100);
4992 id = mii_get_oui(i, DE4X5_MII);
4993 if ((id == 0) || (id == 65535)) continue;
4994 for (j=0; j<limit; j++) {
4995 if (id != phy_info[j].id) continue;
4996 for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++);
4997 if (k < DE4X5_MAX_PHY) {
4998 memcpy((char *)&lp->phy[k],
4999 (char *)&phy_info[j], sizeof(struct phy_table));
5000 lp->phy[k].addr = i;
5001 lp->mii_cnt++;
5002 lp->active++;
5003 } else {
5004 goto purgatory;
5005 }
5006 break;
5007 }
5008 if ((j == limit) && (i < DE4X5_MAX_MII)) {
5009 for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++);
5010 lp->phy[k].addr = i;
5011 lp->phy[k].id = id;
5012 lp->phy[k].spd.reg = GENERIC_REG;
5013 lp->phy[k].spd.mask = GENERIC_MASK;
5014 lp->phy[k].spd.value = GENERIC_VALUE;
5015 lp->mii_cnt++;
5016 lp->active++;
5017 printk("%s: Using generic MII device control. If the board doesn't operate,\nplease mail the following dump to the author:\n", dev->name);
5018 j = de4x5_debug;
5019 de4x5_debug |= DEBUG_MII;
5020 de4x5_dbg_mii(dev, k);
5021 de4x5_debug = j;
5022 printk("\n");
5023 }
5024 }
5025 purgatory:
5026 lp->active = 0;
5027 if (lp->phy[0].id) {
5028 for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++) {
5029 mii_wr(MII_CR_RST, MII_CR, lp->phy[k].addr, DE4X5_MII);
5030 while (mii_rd(MII_CR, lp->phy[k].addr, DE4X5_MII) & MII_CR_RST);
5031
5032 de4x5_dbg_mii(dev, k);
5033 }
5034 }
5035 if (!lp->mii_cnt) lp->useMII = false;
5036
5037 return lp->mii_cnt;
5038}
5039
5040static char *
5041build_setup_frame(struct net_device *dev, int mode)
5042{
5043 struct de4x5_private *lp = netdev_priv(dev);
5044 int i;
5045 char *pa = lp->setup_frame;
5046
5047
5048 if (mode == ALL) {
5049 memset(lp->setup_frame, 0, SETUP_FRAME_LEN);
5050 }
5051
5052 if (lp->setup_f == HASH_PERF) {
5053 for (pa=lp->setup_frame+IMPERF_PA_OFFSET, i=0; i<ETH_ALEN; i++) {
5054 *(pa + i) = dev->dev_addr[i];
5055 if (i & 0x01) pa += 2;
5056 }
5057 *(lp->setup_frame + (HASH_TABLE_LEN >> 3) - 3) = 0x80;
5058 } else {
5059 for (i=0; i<ETH_ALEN; i++) {
5060 *(pa + (i&1)) = dev->dev_addr[i];
5061 if (i & 0x01) pa += 4;
5062 }
5063 for (i=0; i<ETH_ALEN; i++) {
5064 *(pa + (i&1)) = (char) 0xff;
5065 if (i & 0x01) pa += 4;
5066 }
5067 }
5068
5069 return pa;
5070}
5071
5072static void
5073disable_ast(struct net_device *dev)
5074{
5075 struct de4x5_private *lp = netdev_priv(dev);
5076 del_timer_sync(&lp->timer);
5077}
5078
5079static long
5080de4x5_switch_mac_port(struct net_device *dev)
5081{
5082 struct de4x5_private *lp = netdev_priv(dev);
5083 u_long iobase = dev->base_addr;
5084 s32 omr;
5085
5086 STOP_DE4X5;
5087
5088
5089 omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR |
5090 OMR_FDX));
5091 omr |= lp->infoblock_csr6;
5092 if (omr & OMR_PS) omr |= OMR_HBD;
5093 outl(omr, DE4X5_OMR);
5094
5095
5096 RESET_DE4X5;
5097
5098
5099 if (lp->chipset == DC21140) {
5100 gep_wr(lp->cache.gepc, dev);
5101 gep_wr(lp->cache.gep, dev);
5102 } else if ((lp->chipset & ~0x0ff) == DC2114x) {
5103 reset_init_sia(dev, lp->cache.csr13, lp->cache.csr14, lp->cache.csr15);
5104 }
5105
5106
5107 outl(omr, DE4X5_OMR);
5108
5109
5110 inl(DE4X5_MFC);
5111
5112 return omr;
5113}
5114
5115static void
5116gep_wr(s32 data, struct net_device *dev)
5117{
5118 struct de4x5_private *lp = netdev_priv(dev);
5119 u_long iobase = dev->base_addr;
5120
5121 if (lp->chipset == DC21140) {
5122 outl(data, DE4X5_GEP);
5123 } else if ((lp->chipset & ~0x00ff) == DC2114x) {
5124 outl((data<<16) | lp->cache.csr15, DE4X5_SIGR);
5125 }
5126}
5127
5128static int
5129gep_rd(struct net_device *dev)
5130{
5131 struct de4x5_private *lp = netdev_priv(dev);
5132 u_long iobase = dev->base_addr;
5133
5134 if (lp->chipset == DC21140) {
5135 return inl(DE4X5_GEP);
5136 } else if ((lp->chipset & ~0x00ff) == DC2114x) {
5137 return inl(DE4X5_SIGR) & 0x000fffff;
5138 }
5139
5140 return 0;
5141}
5142
5143static void
5144yawn(struct net_device *dev, int state)
5145{
5146 struct de4x5_private *lp = netdev_priv(dev);
5147 u_long iobase = dev->base_addr;
5148
5149 if ((lp->chipset == DC21040) || (lp->chipset == DC21140)) return;
5150
5151 if(lp->bus == EISA) {
5152 switch(state) {
5153 case WAKEUP:
5154 outb(WAKEUP, PCI_CFPM);
5155 mdelay(10);
5156 break;
5157
5158 case SNOOZE:
5159 outb(SNOOZE, PCI_CFPM);
5160 break;
5161
5162 case SLEEP:
5163 outl(0, DE4X5_SICR);
5164 outb(SLEEP, PCI_CFPM);
5165 break;
5166 }
5167 } else {
5168 struct pci_dev *pdev = to_pci_dev (lp->gendev);
5169 switch(state) {
5170 case WAKEUP:
5171 pci_write_config_byte(pdev, PCI_CFDA_PSM, WAKEUP);
5172 mdelay(10);
5173 break;
5174
5175 case SNOOZE:
5176 pci_write_config_byte(pdev, PCI_CFDA_PSM, SNOOZE);
5177 break;
5178
5179 case SLEEP:
5180 outl(0, DE4X5_SICR);
5181 pci_write_config_byte(pdev, PCI_CFDA_PSM, SLEEP);
5182 break;
5183 }
5184 }
5185}
5186
5187static void
5188de4x5_parse_params(struct net_device *dev)
5189{
5190 struct de4x5_private *lp = netdev_priv(dev);
5191 char *p, *q, t;
5192
5193 lp->params.fdx = false;
5194 lp->params.autosense = AUTO;
5195
5196 if (args == NULL) return;
5197
5198 if ((p = strstr(args, dev->name))) {
5199 if (!(q = strstr(p+strlen(dev->name), "eth"))) q = p + strlen(p);
5200 t = *q;
5201 *q = '\0';
5202
5203 if (strstr(p, "fdx") || strstr(p, "FDX")) lp->params.fdx = true;
5204
5205 if (strstr(p, "autosense") || strstr(p, "AUTOSENSE")) {
5206 if (strstr(p, "TP")) {
5207 lp->params.autosense = TP;
5208 } else if (strstr(p, "TP_NW")) {
5209 lp->params.autosense = TP_NW;
5210 } else if (strstr(p, "BNC")) {
5211 lp->params.autosense = BNC;
5212 } else if (strstr(p, "AUI")) {
5213 lp->params.autosense = AUI;
5214 } else if (strstr(p, "BNC_AUI")) {
5215 lp->params.autosense = BNC;
5216 } else if (strstr(p, "10Mb")) {
5217 lp->params.autosense = _10Mb;
5218 } else if (strstr(p, "100Mb")) {
5219 lp->params.autosense = _100Mb;
5220 } else if (strstr(p, "AUTO")) {
5221 lp->params.autosense = AUTO;
5222 }
5223 }
5224 *q = t;
5225 }
5226}
5227
5228static void
5229de4x5_dbg_open(struct net_device *dev)
5230{
5231 struct de4x5_private *lp = netdev_priv(dev);
5232 int i;
5233
5234 if (de4x5_debug & DEBUG_OPEN) {
5235 printk("%s: de4x5 opening with irq %d\n",dev->name,dev->irq);
5236 printk("\tphysical address: %pM\n", dev->dev_addr);
5237 printk("Descriptor head addresses:\n");
5238 printk("\t0x%8.8lx 0x%8.8lx\n",(u_long)lp->rx_ring,(u_long)lp->tx_ring);
5239 printk("Descriptor addresses:\nRX: ");
5240 for (i=0;i<lp->rxRingSize-1;i++){
5241 if (i < 3) {
5242 printk("0x%8.8lx ",(u_long)&lp->rx_ring[i].status);
5243 }
5244 }
5245 printk("...0x%8.8lx\n",(u_long)&lp->rx_ring[i].status);
5246 printk("TX: ");
5247 for (i=0;i<lp->txRingSize-1;i++){
5248 if (i < 3) {
5249 printk("0x%8.8lx ", (u_long)&lp->tx_ring[i].status);
5250 }
5251 }
5252 printk("...0x%8.8lx\n", (u_long)&lp->tx_ring[i].status);
5253 printk("Descriptor buffers:\nRX: ");
5254 for (i=0;i<lp->rxRingSize-1;i++){
5255 if (i < 3) {
5256 printk("0x%8.8x ",le32_to_cpu(lp->rx_ring[i].buf));
5257 }
5258 }
5259 printk("...0x%8.8x\n",le32_to_cpu(lp->rx_ring[i].buf));
5260 printk("TX: ");
5261 for (i=0;i<lp->txRingSize-1;i++){
5262 if (i < 3) {
5263 printk("0x%8.8x ", le32_to_cpu(lp->tx_ring[i].buf));
5264 }
5265 }
5266 printk("...0x%8.8x\n", le32_to_cpu(lp->tx_ring[i].buf));
5267 printk("Ring size:\nRX: %d\nTX: %d\n",
5268 (short)lp->rxRingSize,
5269 (short)lp->txRingSize);
5270 }
5271}
5272
5273static void
5274de4x5_dbg_mii(struct net_device *dev, int k)
5275{
5276 struct de4x5_private *lp = netdev_priv(dev);
5277 u_long iobase = dev->base_addr;
5278
5279 if (de4x5_debug & DEBUG_MII) {
5280 printk("\nMII device address: %d\n", lp->phy[k].addr);
5281 printk("MII CR: %x\n",mii_rd(MII_CR,lp->phy[k].addr,DE4X5_MII));
5282 printk("MII SR: %x\n",mii_rd(MII_SR,lp->phy[k].addr,DE4X5_MII));
5283 printk("MII ID0: %x\n",mii_rd(MII_ID0,lp->phy[k].addr,DE4X5_MII));
5284 printk("MII ID1: %x\n",mii_rd(MII_ID1,lp->phy[k].addr,DE4X5_MII));
5285 if (lp->phy[k].id != BROADCOM_T4) {
5286 printk("MII ANA: %x\n",mii_rd(0x04,lp->phy[k].addr,DE4X5_MII));
5287 printk("MII ANC: %x\n",mii_rd(0x05,lp->phy[k].addr,DE4X5_MII));
5288 }
5289 printk("MII 16: %x\n",mii_rd(0x10,lp->phy[k].addr,DE4X5_MII));
5290 if (lp->phy[k].id != BROADCOM_T4) {
5291 printk("MII 17: %x\n",mii_rd(0x11,lp->phy[k].addr,DE4X5_MII));
5292 printk("MII 18: %x\n",mii_rd(0x12,lp->phy[k].addr,DE4X5_MII));
5293 } else {
5294 printk("MII 20: %x\n",mii_rd(0x14,lp->phy[k].addr,DE4X5_MII));
5295 }
5296 }
5297}
5298
5299static void
5300de4x5_dbg_media(struct net_device *dev)
5301{
5302 struct de4x5_private *lp = netdev_priv(dev);
5303
5304 if (lp->media != lp->c_media) {
5305 if (de4x5_debug & DEBUG_MEDIA) {
5306 printk("%s: media is %s%s\n", dev->name,
5307 (lp->media == NC ? "unconnected, link down or incompatible connection" :
5308 (lp->media == TP ? "TP" :
5309 (lp->media == ANS ? "TP/Nway" :
5310 (lp->media == BNC ? "BNC" :
5311 (lp->media == AUI ? "AUI" :
5312 (lp->media == BNC_AUI ? "BNC/AUI" :
5313 (lp->media == EXT_SIA ? "EXT SIA" :
5314 (lp->media == _100Mb ? "100Mb/s" :
5315 (lp->media == _10Mb ? "10Mb/s" :
5316 "???"
5317 ))))))))), (lp->fdx?" full duplex.":"."));
5318 }
5319 lp->c_media = lp->media;
5320 }
5321}
5322
5323static void
5324de4x5_dbg_srom(struct de4x5_srom *p)
5325{
5326 int i;
5327
5328 if (de4x5_debug & DEBUG_SROM) {
5329 printk("Sub-system Vendor ID: %04x\n", *((u_short *)p->sub_vendor_id));
5330 printk("Sub-system ID: %04x\n", *((u_short *)p->sub_system_id));
5331 printk("ID Block CRC: %02x\n", (u_char)(p->id_block_crc));
5332 printk("SROM version: %02x\n", (u_char)(p->version));
5333 printk("# controllers: %02x\n", (u_char)(p->num_controllers));
5334
5335 printk("Hardware Address: %pM\n", p->ieee_addr);
5336 printk("CRC checksum: %04x\n", (u_short)(p->chksum));
5337 for (i=0; i<64; i++) {
5338 printk("%3d %04x\n", i<<1, (u_short)*((u_short *)p+i));
5339 }
5340 }
5341}
5342
5343static void
5344de4x5_dbg_rx(struct sk_buff *skb, int len)
5345{
5346 int i, j;
5347
5348 if (de4x5_debug & DEBUG_RX) {
5349 printk("R: %pM <- %pM len/SAP:%02x%02x [%d]\n",
5350 skb->data, &skb->data[6],
5351 (u_char)skb->data[12],
5352 (u_char)skb->data[13],
5353 len);
5354 for (j=0; len>0;j+=16, len-=16) {
5355 printk(" %03x: ",j);
5356 for (i=0; i<16 && i<len; i++) {
5357 printk("%02x ",(u_char)skb->data[i+j]);
5358 }
5359 printk("\n");
5360 }
5361 }
5362}
5363
5364
5365
5366
5367
5368
5369static int
5370de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
5371{
5372 struct de4x5_private *lp = netdev_priv(dev);
5373 struct de4x5_ioctl *ioc = (struct de4x5_ioctl *) &rq->ifr_ifru;
5374 u_long iobase = dev->base_addr;
5375 int i, j, status = 0;
5376 s32 omr;
5377 union {
5378 u8 addr[144];
5379 u16 sval[72];
5380 u32 lval[36];
5381 } tmp;
5382 u_long flags = 0;
5383
5384 switch(ioc->cmd) {
5385 case DE4X5_GET_HWADDR:
5386 ioc->len = ETH_ALEN;
5387 for (i=0; i<ETH_ALEN; i++) {
5388 tmp.addr[i] = dev->dev_addr[i];
5389 }
5390 if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
5391 break;
5392
5393 case DE4X5_SET_HWADDR:
5394 if (!capable(CAP_NET_ADMIN)) return -EPERM;
5395 if (copy_from_user(tmp.addr, ioc->data, ETH_ALEN)) return -EFAULT;
5396 if (netif_queue_stopped(dev))
5397 return -EBUSY;
5398 netif_stop_queue(dev);
5399 for (i=0; i<ETH_ALEN; i++) {
5400 dev->dev_addr[i] = tmp.addr[i];
5401 }
5402 build_setup_frame(dev, PHYS_ADDR_ONLY);
5403
5404 load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET |
5405 SETUP_FRAME_LEN, (struct sk_buff *)1);
5406 lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
5407 outl(POLL_DEMAND, DE4X5_TPD);
5408 netif_wake_queue(dev);
5409 break;
5410
5411 case DE4X5_SAY_BOO:
5412 if (!capable(CAP_NET_ADMIN)) return -EPERM;
5413 printk("%s: Boo!\n", dev->name);
5414 break;
5415
5416 case DE4X5_MCA_EN:
5417 if (!capable(CAP_NET_ADMIN)) return -EPERM;
5418 omr = inl(DE4X5_OMR);
5419 omr |= OMR_PM;
5420 outl(omr, DE4X5_OMR);
5421 break;
5422
5423 case DE4X5_GET_STATS:
5424 {
5425 struct pkt_stats statbuf;
5426 ioc->len = sizeof(statbuf);
5427 spin_lock_irqsave(&lp->lock, flags);
5428 memcpy(&statbuf, &lp->pktStats, ioc->len);
5429 spin_unlock_irqrestore(&lp->lock, flags);
5430 if (copy_to_user(ioc->data, &statbuf, ioc->len))
5431 return -EFAULT;
5432 break;
5433 }
5434 case DE4X5_CLR_STATS:
5435 if (!capable(CAP_NET_ADMIN)) return -EPERM;
5436 spin_lock_irqsave(&lp->lock, flags);
5437 memset(&lp->pktStats, 0, sizeof(lp->pktStats));
5438 spin_unlock_irqrestore(&lp->lock, flags);
5439 break;
5440
5441 case DE4X5_GET_OMR:
5442 tmp.addr[0] = inl(DE4X5_OMR);
5443 if (copy_to_user(ioc->data, tmp.addr, 1)) return -EFAULT;
5444 break;
5445
5446 case DE4X5_SET_OMR:
5447 if (!capable(CAP_NET_ADMIN)) return -EPERM;
5448 if (copy_from_user(tmp.addr, ioc->data, 1)) return -EFAULT;
5449 outl(tmp.addr[0], DE4X5_OMR);
5450 break;
5451
5452 case DE4X5_GET_REG:
5453 j = 0;
5454 tmp.lval[0] = inl(DE4X5_STS); j+=4;
5455 tmp.lval[1] = inl(DE4X5_BMR); j+=4;
5456 tmp.lval[2] = inl(DE4X5_IMR); j+=4;
5457 tmp.lval[3] = inl(DE4X5_OMR); j+=4;
5458 tmp.lval[4] = inl(DE4X5_SISR); j+=4;
5459 tmp.lval[5] = inl(DE4X5_SICR); j+=4;
5460 tmp.lval[6] = inl(DE4X5_STRR); j+=4;
5461 tmp.lval[7] = inl(DE4X5_SIGR); j+=4;
5462 ioc->len = j;
5463 if (copy_to_user(ioc->data, tmp.lval, ioc->len))
5464 return -EFAULT;
5465 break;
5466
5467#define DE4X5_DUMP 0x0f
5468
5469
5470
5471
5472
5473
5474
5475
5476
5477
5478
5479
5480
5481
5482
5483
5484
5485
5486
5487
5488
5489
5490
5491
5492
5493
5494
5495
5496
5497
5498
5499
5500
5501
5502
5503
5504
5505
5506
5507
5508
5509
5510
5511
5512
5513
5514
5515
5516
5517
5518
5519
5520
5521
5522
5523
5524
5525
5526
5527
5528
5529
5530
5531
5532
5533
5534
5535
5536
5537
5538
5539
5540
5541
5542
5543
5544
5545
5546
5547
5548
5549
5550
5551
5552
5553
5554
5555
5556
5557 default:
5558 return -EOPNOTSUPP;
5559 }
5560
5561 return status;
5562}
5563
5564static int __init de4x5_module_init (void)
5565{
5566 int err = 0;
5567
5568#ifdef CONFIG_PCI
5569 err = pci_register_driver(&de4x5_pci_driver);
5570#endif
5571#ifdef CONFIG_EISA
5572 err |= eisa_driver_register (&de4x5_eisa_driver);
5573#endif
5574
5575 return err;
5576}
5577
5578static void __exit de4x5_module_exit (void)
5579{
5580#ifdef CONFIG_PCI
5581 pci_unregister_driver (&de4x5_pci_driver);
5582#endif
5583#ifdef CONFIG_EISA
5584 eisa_driver_unregister (&de4x5_eisa_driver);
5585#endif
5586}
5587
5588module_init (de4x5_module_init);
5589module_exit (de4x5_module_exit);
5590