1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446#include <linux/module.h>
447#include <linux/kernel.h>
448#include <linux/string.h>
449#include <linux/interrupt.h>
450#include <linux/ptrace.h>
451#include <linux/errno.h>
452#include <linux/ioport.h>
453#include <linux/pci.h>
454#include <linux/eisa.h>
455#include <linux/delay.h>
456#include <linux/init.h>
457#include <linux/spinlock.h>
458#include <linux/crc32.h>
459#include <linux/netdevice.h>
460#include <linux/etherdevice.h>
461#include <linux/skbuff.h>
462#include <linux/time.h>
463#include <linux/types.h>
464#include <linux/unistd.h>
465#include <linux/ctype.h>
466#include <linux/dma-mapping.h>
467#include <linux/moduleparam.h>
468#include <linux/bitops.h>
469#include <linux/gfp.h>
470
471#include <asm/io.h>
472#include <asm/dma.h>
473#include <asm/byteorder.h>
474#include <asm/unaligned.h>
475#include <asm/uaccess.h>
476#ifdef CONFIG_PPC_PMAC
477#include <asm/machdep.h>
478#endif
479
480#include "de4x5.h"
481
482static const char version[] =
483 KERN_INFO "de4x5.c:V0.546 2001/02/22 davies@maniac.ultranet.com\n";
484
485#define c_char const char
486
487
488
489
490struct phy_table {
491 int reset;
492 int id;
493 int ta;
494 struct {
495 int reg;
496 int mask;
497 int value;
498 } spd;
499};
500
501struct mii_phy {
502 int reset;
503 int id;
504 int ta;
505 struct {
506 int reg;
507 int mask;
508 int value;
509 } spd;
510 int addr;
511 u_char *gep;
512 u_char *rst;
513 u_int mc;
514 u_int ana;
515 u_int fdx;
516 u_int ttm;
517 u_int mci;
518};
519
520#define DE4X5_MAX_PHY 8
521
522struct sia_phy {
523 u_char mc;
524 u_char ext;
525 int csr13;
526 int csr14;
527 int csr15;
528 int gepc;
529 int gep;
530};
531
532
533
534
535
536static struct phy_table phy_info[] = {
537 {0, NATIONAL_TX, 1, {0x19, 0x40, 0x00}},
538 {1, BROADCOM_T4, 1, {0x10, 0x02, 0x02}},
539 {0, SEEQ_T4 , 1, {0x12, 0x10, 0x10}},
540 {0, CYPRESS_T4 , 1, {0x05, 0x20, 0x20}},
541 {0, 0x7810 , 1, {0x14, 0x0800, 0x0800}}
542};
543
544
545
546
547
548
549#define GENERIC_REG 0x05
550#define GENERIC_MASK MII_ANLPA_100M
551#define GENERIC_VALUE MII_ANLPA_100M
552
553
554
555
556static c_char enet_det[][ETH_ALEN] = {
557 {0x00, 0x00, 0xc0, 0x00, 0x00, 0x00},
558 {0x00, 0x00, 0xe8, 0x00, 0x00, 0x00}
559};
560
561#define SMC 1
562#define ACCTON 2
563
564
565
566
567
568
569static c_char srom_repair_info[][100] = {
570 {0x00,0x1e,0x00,0x00,0x00,0x08,
571 0x1f,0x01,0x8f,0x01,0x00,0x01,0x00,0x02,
572 0x01,0x00,0x00,0x78,0xe0,0x01,0x00,0x50,
573 0x00,0x18,}
574};
575
576
577#ifdef DE4X5_DEBUG
578static int de4x5_debug = DE4X5_DEBUG;
579#else
580
581static int de4x5_debug = (DEBUG_MEDIA | DEBUG_VERSION);
582#endif
583
584
585
586
587
588
589
590
591
592
593#ifdef DE4X5_PARM
594static char *args = DE4X5_PARM;
595#else
596static char *args;
597#endif
598
599struct parameters {
600 bool fdx;
601 int autosense;
602};
603
604#define DE4X5_AUTOSENSE_MS 250
605
606#define DE4X5_NDA 0xffe0
607
608
609
610
611#define PROBE_LENGTH 32
612#define ETH_PROM_SIG 0xAA5500FFUL
613
614
615
616
617#define PKT_BUF_SZ 1536
618#define IEEE802_3_SZ 1518
619#define MAX_PKT_SZ 1514
620#define MAX_DAT_SZ 1500
621#define MIN_DAT_SZ 1
622#define PKT_HDR_LEN 14
623#define FAKE_FRAME_LEN (MAX_PKT_SZ + 1)
624#define QUEUE_PKT_TIMEOUT (3*HZ)
625
626
627
628
629
630#define DE4X5_EISA_IO_PORTS 0x0c00
631#define DE4X5_EISA_TOTAL_SIZE 0x100
632
633#define EISA_ALLOWED_IRQ_LIST {5, 9, 10, 11}
634
635#define DE4X5_SIGNATURE {"DE425","DE434","DE435","DE450","DE500"}
636#define DE4X5_NAME_LENGTH 8
637
638static c_char *de4x5_signatures[] = DE4X5_SIGNATURE;
639
640
641
642
643#define PROBE_LENGTH 32
644#define ETH_PROM_SIG 0xAA5500FFUL
645
646
647
648
649#define PCI_MAX_BUS_NUM 8
650#define DE4X5_PCI_TOTAL_SIZE 0x80
651#define DE4X5_CLASS_CODE 0x00020000
652
653
654
655
656
657
658
659#define DE4X5_ALIGN4 ((u_long)4 - 1)
660#define DE4X5_ALIGN8 ((u_long)8 - 1)
661#define DE4X5_ALIGN16 ((u_long)16 - 1)
662#define DE4X5_ALIGN32 ((u_long)32 - 1)
663#define DE4X5_ALIGN64 ((u_long)64 - 1)
664#define DE4X5_ALIGN128 ((u_long)128 - 1)
665
666#define DE4X5_ALIGN DE4X5_ALIGN32
667#define DE4X5_CACHE_ALIGN CAL_16LONG
668#define DESC_SKIP_LEN DSL_0
669
670#define DESC_ALIGN
671
672#ifndef DEC_ONLY
673static int dec_only;
674#else
675static int dec_only = 1;
676#endif
677
678
679
680
681#define ENABLE_IRQs { \
682 imr |= lp->irq_en;\
683 outl(imr, DE4X5_IMR); \
684}
685
686#define DISABLE_IRQs {\
687 imr = inl(DE4X5_IMR);\
688 imr &= ~lp->irq_en;\
689 outl(imr, DE4X5_IMR); \
690}
691
692#define UNMASK_IRQs {\
693 imr |= lp->irq_mask;\
694 outl(imr, DE4X5_IMR); \
695}
696
697#define MASK_IRQs {\
698 imr = inl(DE4X5_IMR);\
699 imr &= ~lp->irq_mask;\
700 outl(imr, DE4X5_IMR); \
701}
702
703
704
705
706#define START_DE4X5 {\
707 omr = inl(DE4X5_OMR);\
708 omr |= OMR_ST | OMR_SR;\
709 outl(omr, DE4X5_OMR); \
710}
711
712#define STOP_DE4X5 {\
713 omr = inl(DE4X5_OMR);\
714 omr &= ~(OMR_ST|OMR_SR);\
715 outl(omr, DE4X5_OMR); \
716}
717
718
719
720
721#define RESET_SIA outl(0, DE4X5_SICR);
722
723
724
725
726#define DE4X5_AUTOSENSE_MS 250
727
728
729
730
731struct de4x5_srom {
732 char sub_vendor_id[2];
733 char sub_system_id[2];
734 char reserved[12];
735 char id_block_crc;
736 char reserved2;
737 char version;
738 char num_controllers;
739 char ieee_addr[6];
740 char info[100];
741 short chksum;
742};
743#define SUB_VENDOR_ID 0x500a
744
745
746
747
748
749
750
751
752
753#define NUM_RX_DESC 8
754#define NUM_TX_DESC 32
755#define RX_BUFF_SZ 1536
756
757
758struct de4x5_desc {
759 volatile __le32 status;
760 __le32 des1;
761 __le32 buf;
762 __le32 next;
763 DESC_ALIGN
764};
765
766
767
768
769#define DE4X5_PKT_STAT_SZ 16
770#define DE4X5_PKT_BIN_SZ 128
771
772
773struct pkt_stats {
774 u_int bins[DE4X5_PKT_STAT_SZ];
775 u_int unicast;
776 u_int multicast;
777 u_int broadcast;
778 u_int excessive_collisions;
779 u_int tx_underruns;
780 u_int excessive_underruns;
781 u_int rx_runt_frames;
782 u_int rx_collision;
783 u_int rx_dribble;
784 u_int rx_overflow;
785};
786
787struct de4x5_private {
788 char adapter_name[80];
789 u_long interrupt;
790 struct de4x5_desc *rx_ring;
791 struct de4x5_desc *tx_ring;
792 struct sk_buff *tx_skb[NUM_TX_DESC];
793 struct sk_buff *rx_skb[NUM_RX_DESC];
794 int rx_new, rx_old;
795 int tx_new, tx_old;
796 char setup_frame[SETUP_FRAME_LEN];
797 char frame[64];
798 spinlock_t lock;
799 struct net_device_stats stats;
800 struct pkt_stats pktStats;
801 char rxRingSize;
802 char txRingSize;
803 int bus;
804 int bus_num;
805 int device;
806 int state;
807 int chipset;
808 s32 irq_mask;
809 s32 irq_en;
810 int media;
811 int c_media;
812 bool fdx;
813 int linkOK;
814 int autosense;
815 bool tx_enable;
816 int setup_f;
817 int local_state;
818 struct mii_phy phy[DE4X5_MAX_PHY];
819 struct sia_phy sia;
820 int active;
821 int mii_cnt;
822 int timeout;
823 struct timer_list timer;
824 int tmp;
825 struct {
826 u_long lock;
827 s32 csr0;
828 s32 csr6;
829 s32 csr7;
830 s32 gep;
831 s32 gepc;
832 s32 csr13;
833 s32 csr14;
834 s32 csr15;
835 int save_cnt;
836 struct sk_buff_head queue;
837 } cache;
838 struct de4x5_srom srom;
839 int cfrv;
840 int rx_ovf;
841 bool useSROM;
842 bool useMII;
843 int asBitValid;
844 int asPolarity;
845 int asBit;
846 int defMedium;
847 int tcount;
848 int infoblock_init;
849 int infoleaf_offset;
850 s32 infoblock_csr6;
851 int infoblock_media;
852 int (*infoleaf_fn)(struct net_device *);
853 u_char *rst;
854 u_char ibn;
855 struct parameters params;
856 struct device *gendev;
857 dma_addr_t dma_rings;
858 int dma_size;
859 char *rx_bufs;
860};
861
862
863
864
865
866
867
868
869
870
871
872
873
874static struct {
875 int chipset;
876 int bus;
877 int irq;
878 u_char addr[ETH_ALEN];
879} last = {0,};
880
881
882
883
884
885
886
887
888#define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\
889 lp->tx_old+lp->txRingSize-lp->tx_new-1:\
890 lp->tx_old -lp->tx_new-1)
891
892#define TX_PKT_PENDING (lp->tx_old != lp->tx_new)
893
894
895
896
897static int de4x5_open(struct net_device *dev);
898static netdev_tx_t de4x5_queue_pkt(struct sk_buff *skb,
899 struct net_device *dev);
900static irqreturn_t de4x5_interrupt(int irq, void *dev_id);
901static int de4x5_close(struct net_device *dev);
902static struct net_device_stats *de4x5_get_stats(struct net_device *dev);
903static void de4x5_local_stats(struct net_device *dev, char *buf, int pkt_len);
904static void set_multicast_list(struct net_device *dev);
905static int de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
906
907
908
909
910static int de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev);
911static int de4x5_init(struct net_device *dev);
912static int de4x5_sw_reset(struct net_device *dev);
913static int de4x5_rx(struct net_device *dev);
914static int de4x5_tx(struct net_device *dev);
915static void de4x5_ast(struct net_device *dev);
916static int de4x5_txur(struct net_device *dev);
917static int de4x5_rx_ovfc(struct net_device *dev);
918
919static int autoconf_media(struct net_device *dev);
920static void create_packet(struct net_device *dev, char *frame, int len);
921static void load_packet(struct net_device *dev, char *buf, u32 flags, struct sk_buff *skb);
922static int dc21040_autoconf(struct net_device *dev);
923static int dc21041_autoconf(struct net_device *dev);
924static int dc21140m_autoconf(struct net_device *dev);
925static int dc2114x_autoconf(struct net_device *dev);
926static int srom_autoconf(struct net_device *dev);
927static int de4x5_suspect_state(struct net_device *dev, int timeout, int prev_state, int (*fn)(struct net_device *, int), int (*asfn)(struct net_device *));
928static int dc21040_state(struct net_device *dev, int csr13, int csr14, int csr15, int timeout, int next_state, int suspect_state, int (*fn)(struct net_device *, int));
929static int test_media(struct net_device *dev, s32 irqs, s32 irq_mask, s32 csr13, s32 csr14, s32 csr15, s32 msec);
930static int test_for_100Mb(struct net_device *dev, int msec);
931static int wait_for_link(struct net_device *dev);
932static int test_mii_reg(struct net_device *dev, int reg, int mask, bool pol, long msec);
933static int is_spd_100(struct net_device *dev);
934static int is_100_up(struct net_device *dev);
935static int is_10_up(struct net_device *dev);
936static int is_anc_capable(struct net_device *dev);
937static int ping_media(struct net_device *dev, int msec);
938static struct sk_buff *de4x5_alloc_rx_buff(struct net_device *dev, int index, int len);
939static void de4x5_free_rx_buffs(struct net_device *dev);
940static void de4x5_free_tx_buffs(struct net_device *dev);
941static void de4x5_save_skbs(struct net_device *dev);
942static void de4x5_rst_desc_ring(struct net_device *dev);
943static void de4x5_cache_state(struct net_device *dev, int flag);
944static void de4x5_put_cache(struct net_device *dev, struct sk_buff *skb);
945static void de4x5_putb_cache(struct net_device *dev, struct sk_buff *skb);
946static struct sk_buff *de4x5_get_cache(struct net_device *dev);
947static void de4x5_setup_intr(struct net_device *dev);
948static void de4x5_init_connection(struct net_device *dev);
949static int de4x5_reset_phy(struct net_device *dev);
950static void reset_init_sia(struct net_device *dev, s32 sicr, s32 strr, s32 sigr);
951static int test_ans(struct net_device *dev, s32 irqs, s32 irq_mask, s32 msec);
952static int test_tp(struct net_device *dev, s32 msec);
953static int EISA_signature(char *name, struct device *device);
954static int PCI_signature(char *name, struct de4x5_private *lp);
955static void DevicePresent(struct net_device *dev, u_long iobase);
956static void enet_addr_rst(u_long aprom_addr);
957static int de4x5_bad_srom(struct de4x5_private *lp);
958static short srom_rd(u_long address, u_char offset);
959static void srom_latch(u_int command, u_long address);
960static void srom_command(u_int command, u_long address);
961static void srom_address(u_int command, u_long address, u_char offset);
962static short srom_data(u_int command, u_long address);
963
964static void sendto_srom(u_int command, u_long addr);
965static int getfrom_srom(u_long addr);
966static int srom_map_media(struct net_device *dev);
967static int srom_infoleaf_info(struct net_device *dev);
968static void srom_init(struct net_device *dev);
969static void srom_exec(struct net_device *dev, u_char *p);
970static int mii_rd(u_char phyreg, u_char phyaddr, u_long ioaddr);
971static void mii_wr(int data, u_char phyreg, u_char phyaddr, u_long ioaddr);
972static int mii_rdata(u_long ioaddr);
973static void mii_wdata(int data, int len, u_long ioaddr);
974static void mii_ta(u_long rw, u_long ioaddr);
975static int mii_swap(int data, int len);
976static void mii_address(u_char addr, u_long ioaddr);
977static void sendto_mii(u32 command, int data, u_long ioaddr);
978static int getfrom_mii(u32 command, u_long ioaddr);
979static int mii_get_oui(u_char phyaddr, u_long ioaddr);
980static int mii_get_phy(struct net_device *dev);
981static void SetMulticastFilter(struct net_device *dev);
982static int get_hw_addr(struct net_device *dev);
983static void srom_repair(struct net_device *dev, int card);
984static int test_bad_enet(struct net_device *dev, int status);
985static int an_exception(struct de4x5_private *lp);
986static char *build_setup_frame(struct net_device *dev, int mode);
987static void disable_ast(struct net_device *dev);
988static long de4x5_switch_mac_port(struct net_device *dev);
989static int gep_rd(struct net_device *dev);
990static void gep_wr(s32 data, struct net_device *dev);
991static void yawn(struct net_device *dev, int state);
992static void de4x5_parse_params(struct net_device *dev);
993static void de4x5_dbg_open(struct net_device *dev);
994static void de4x5_dbg_mii(struct net_device *dev, int k);
995static void de4x5_dbg_media(struct net_device *dev);
996static void de4x5_dbg_srom(struct de4x5_srom *p);
997static void de4x5_dbg_rx(struct sk_buff *skb, int len);
998static int de4x5_strncmp(char *a, char *b, int n);
999static int dc21041_infoleaf(struct net_device *dev);
1000static int dc21140_infoleaf(struct net_device *dev);
1001static int dc21142_infoleaf(struct net_device *dev);
1002static int dc21143_infoleaf(struct net_device *dev);
1003static int type0_infoblock(struct net_device *dev, u_char count, u_char *p);
1004static int type1_infoblock(struct net_device *dev, u_char count, u_char *p);
1005static int type2_infoblock(struct net_device *dev, u_char count, u_char *p);
1006static int type3_infoblock(struct net_device *dev, u_char count, u_char *p);
1007static int type4_infoblock(struct net_device *dev, u_char count, u_char *p);
1008static int type5_infoblock(struct net_device *dev, u_char count, u_char *p);
1009static int compact_infoblock(struct net_device *dev, u_char count, u_char *p);
1010
1011
1012
1013
1014
1015
1016
1017static int io=0x0;
1018
1019module_param(io, int, 0);
1020module_param(de4x5_debug, int, 0);
1021module_param(dec_only, int, 0);
1022module_param(args, charp, 0);
1023
1024MODULE_PARM_DESC(io, "de4x5 I/O base address");
1025MODULE_PARM_DESC(de4x5_debug, "de4x5 debug mask");
1026MODULE_PARM_DESC(dec_only, "de4x5 probe only for Digital boards (0-1)");
1027MODULE_PARM_DESC(args, "de4x5 full duplex and media type settings; see de4x5.c for details");
1028MODULE_LICENSE("GPL");
1029
1030
1031
1032
1033struct InfoLeaf {
1034 int chipset;
1035 int (*fn)(struct net_device *);
1036};
1037static struct InfoLeaf infoleaf_array[] = {
1038 {DC21041, dc21041_infoleaf},
1039 {DC21140, dc21140_infoleaf},
1040 {DC21142, dc21142_infoleaf},
1041 {DC21143, dc21143_infoleaf}
1042};
1043#define INFOLEAF_SIZE ARRAY_SIZE(infoleaf_array)
1044
1045
1046
1047
1048static int (*dc_infoblock[])(struct net_device *dev, u_char, u_char *) = {
1049 type0_infoblock,
1050 type1_infoblock,
1051 type2_infoblock,
1052 type3_infoblock,
1053 type4_infoblock,
1054 type5_infoblock,
1055 compact_infoblock
1056};
1057
1058#define COMPACT (ARRAY_SIZE(dc_infoblock) - 1)
1059
1060
1061
1062
1063#define RESET_DE4X5 {\
1064 int i;\
1065 i=inl(DE4X5_BMR);\
1066 mdelay(1);\
1067 outl(i | BMR_SWR, DE4X5_BMR);\
1068 mdelay(1);\
1069 outl(i, DE4X5_BMR);\
1070 mdelay(1);\
1071 for (i=0;i<5;i++) {inl(DE4X5_BMR); mdelay(1);}\
1072 mdelay(1);\
1073}
1074
1075#define PHY_HARD_RESET {\
1076 outl(GEP_HRST, DE4X5_GEP); \
1077 mdelay(1); \
1078 outl(0x00, DE4X5_GEP);\
1079 mdelay(2); \
1080}
1081
1082static const struct net_device_ops de4x5_netdev_ops = {
1083 .ndo_open = de4x5_open,
1084 .ndo_stop = de4x5_close,
1085 .ndo_start_xmit = de4x5_queue_pkt,
1086 .ndo_get_stats = de4x5_get_stats,
1087 .ndo_set_rx_mode = set_multicast_list,
1088 .ndo_do_ioctl = de4x5_ioctl,
1089 .ndo_change_mtu = eth_change_mtu,
1090 .ndo_set_mac_address= eth_mac_addr,
1091 .ndo_validate_addr = eth_validate_addr,
1092};
1093
1094
1095static int
1096de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev)
1097{
1098 char name[DE4X5_NAME_LENGTH + 1];
1099 struct de4x5_private *lp = netdev_priv(dev);
1100 struct pci_dev *pdev = NULL;
1101 int i, status=0;
1102
1103 dev_set_drvdata(gendev, dev);
1104
1105
1106 if (lp->bus == EISA) {
1107 outb(WAKEUP, PCI_CFPM);
1108 } else {
1109 pdev = to_pci_dev (gendev);
1110 pci_write_config_byte(pdev, PCI_CFDA_PSM, WAKEUP);
1111 }
1112 mdelay(10);
1113
1114 RESET_DE4X5;
1115
1116 if ((inl(DE4X5_STS) & (STS_TS | STS_RS)) != 0) {
1117 return -ENXIO;
1118 }
1119
1120
1121
1122
1123 lp->useSROM = false;
1124 if (lp->bus == PCI) {
1125 PCI_signature(name, lp);
1126 } else {
1127 EISA_signature(name, gendev);
1128 }
1129
1130 if (*name == '\0') {
1131 return -ENXIO;
1132 }
1133
1134 dev->base_addr = iobase;
1135 printk ("%s: %s at 0x%04lx", dev_name(gendev), name, iobase);
1136
1137 status = get_hw_addr(dev);
1138 printk(", h/w address %pM\n", dev->dev_addr);
1139
1140 if (status != 0) {
1141 printk(" which has an Ethernet PROM CRC error.\n");
1142 return -ENXIO;
1143 } else {
1144 skb_queue_head_init(&lp->cache.queue);
1145 lp->cache.gepc = GEP_INIT;
1146 lp->asBit = GEP_SLNK;
1147 lp->asPolarity = GEP_SLNK;
1148 lp->asBitValid = ~0;
1149 lp->timeout = -1;
1150 lp->gendev = gendev;
1151 spin_lock_init(&lp->lock);
1152 init_timer(&lp->timer);
1153 lp->timer.function = (void (*)(unsigned long))de4x5_ast;
1154 lp->timer.data = (unsigned long)dev;
1155 de4x5_parse_params(dev);
1156
1157
1158
1159
1160 lp->autosense = lp->params.autosense;
1161 if (lp->chipset != DC21140) {
1162 if ((lp->chipset==DC21040) && (lp->params.autosense&TP_NW)) {
1163 lp->params.autosense = TP;
1164 }
1165 if ((lp->chipset==DC21041) && (lp->params.autosense&BNC_AUI)) {
1166 lp->params.autosense = BNC;
1167 }
1168 }
1169 lp->fdx = lp->params.fdx;
1170 sprintf(lp->adapter_name,"%s (%s)", name, dev_name(gendev));
1171
1172 lp->dma_size = (NUM_RX_DESC + NUM_TX_DESC) * sizeof(struct de4x5_desc);
1173#if defined(__alpha__) || defined(__powerpc__) || defined(CONFIG_SPARC) || defined(DE4X5_DO_MEMCPY)
1174 lp->dma_size += RX_BUFF_SZ * NUM_RX_DESC + DE4X5_ALIGN;
1175#endif
1176 lp->rx_ring = dma_alloc_coherent(gendev, lp->dma_size,
1177 &lp->dma_rings, GFP_ATOMIC);
1178 if (lp->rx_ring == NULL) {
1179 return -ENOMEM;
1180 }
1181
1182 lp->tx_ring = lp->rx_ring + NUM_RX_DESC;
1183
1184
1185
1186
1187
1188#if !defined(__alpha__) && !defined(__powerpc__) && !defined(CONFIG_SPARC) && !defined(DE4X5_DO_MEMCPY)
1189 for (i=0; i<NUM_RX_DESC; i++) {
1190 lp->rx_ring[i].status = 0;
1191 lp->rx_ring[i].des1 = cpu_to_le32(RX_BUFF_SZ);
1192 lp->rx_ring[i].buf = 0;
1193 lp->rx_ring[i].next = 0;
1194 lp->rx_skb[i] = (struct sk_buff *) 1;
1195 }
1196
1197#else
1198 {
1199 dma_addr_t dma_rx_bufs;
1200
1201 dma_rx_bufs = lp->dma_rings + (NUM_RX_DESC + NUM_TX_DESC)
1202 * sizeof(struct de4x5_desc);
1203 dma_rx_bufs = (dma_rx_bufs + DE4X5_ALIGN) & ~DE4X5_ALIGN;
1204 lp->rx_bufs = (char *)(((long)(lp->rx_ring + NUM_RX_DESC
1205 + NUM_TX_DESC) + DE4X5_ALIGN) & ~DE4X5_ALIGN);
1206 for (i=0; i<NUM_RX_DESC; i++) {
1207 lp->rx_ring[i].status = 0;
1208 lp->rx_ring[i].des1 = cpu_to_le32(RX_BUFF_SZ);
1209 lp->rx_ring[i].buf =
1210 cpu_to_le32(dma_rx_bufs+i*RX_BUFF_SZ);
1211 lp->rx_ring[i].next = 0;
1212 lp->rx_skb[i] = (struct sk_buff *) 1;
1213 }
1214
1215 }
1216#endif
1217
1218 barrier();
1219
1220 lp->rxRingSize = NUM_RX_DESC;
1221 lp->txRingSize = NUM_TX_DESC;
1222
1223
1224 lp->rx_ring[lp->rxRingSize - 1].des1 |= cpu_to_le32(RD_RER);
1225 lp->tx_ring[lp->txRingSize - 1].des1 |= cpu_to_le32(TD_TER);
1226
1227
1228 outl(lp->dma_rings, DE4X5_RRBA);
1229 outl(lp->dma_rings + NUM_RX_DESC * sizeof(struct de4x5_desc),
1230 DE4X5_TRBA);
1231
1232
1233 lp->irq_mask = IMR_RIM | IMR_TIM | IMR_TUM | IMR_UNM;
1234 lp->irq_en = IMR_NIM | IMR_AIM;
1235
1236
1237 create_packet(dev, lp->frame, sizeof(lp->frame));
1238
1239
1240 i = lp->cfrv & 0x000000fe;
1241 if ((lp->chipset == DC21140) && (i == 0x20)) {
1242 lp->rx_ovf = 1;
1243 }
1244
1245
1246 if (lp->useSROM) {
1247 lp->state = INITIALISED;
1248 if (srom_infoleaf_info(dev)) {
1249 dma_free_coherent (gendev, lp->dma_size,
1250 lp->rx_ring, lp->dma_rings);
1251 return -ENXIO;
1252 }
1253 srom_init(dev);
1254 }
1255
1256 lp->state = CLOSED;
1257
1258
1259
1260
1261 if ((lp->chipset != DC21040) && (lp->chipset != DC21041)) {
1262 mii_get_phy(dev);
1263 }
1264
1265 printk(" and requires IRQ%d (provided by %s).\n", dev->irq,
1266 ((lp->bus == PCI) ? "PCI BIOS" : "EISA CNFG"));
1267 }
1268
1269 if (de4x5_debug & DEBUG_VERSION) {
1270 printk(version);
1271 }
1272
1273
1274 SET_NETDEV_DEV(dev, gendev);
1275 dev->netdev_ops = &de4x5_netdev_ops;
1276 dev->mem_start = 0;
1277
1278
1279 if ((status = register_netdev (dev))) {
1280 dma_free_coherent (gendev, lp->dma_size,
1281 lp->rx_ring, lp->dma_rings);
1282 return status;
1283 }
1284
1285
1286 yawn(dev, SLEEP);
1287
1288 return status;
1289}
1290
1291
1292static int
1293de4x5_open(struct net_device *dev)
1294{
1295 struct de4x5_private *lp = netdev_priv(dev);
1296 u_long iobase = dev->base_addr;
1297 int i, status = 0;
1298 s32 omr;
1299
1300
1301 for (i=0; i<lp->rxRingSize; i++) {
1302 if (de4x5_alloc_rx_buff(dev, i, 0) == NULL) {
1303 de4x5_free_rx_buffs(dev);
1304 return -EAGAIN;
1305 }
1306 }
1307
1308
1309
1310
1311 yawn(dev, WAKEUP);
1312
1313
1314
1315
1316 status = de4x5_init(dev);
1317 spin_lock_init(&lp->lock);
1318 lp->state = OPEN;
1319 de4x5_dbg_open(dev);
1320
1321 if (request_irq(dev->irq, de4x5_interrupt, IRQF_SHARED,
1322 lp->adapter_name, dev)) {
1323 printk("de4x5_open(): Requested IRQ%d is busy - attemping FAST/SHARE...", dev->irq);
1324 if (request_irq(dev->irq, de4x5_interrupt, IRQF_DISABLED | IRQF_SHARED,
1325 lp->adapter_name, dev)) {
1326 printk("\n Cannot get IRQ- reconfigure your hardware.\n");
1327 disable_ast(dev);
1328 de4x5_free_rx_buffs(dev);
1329 de4x5_free_tx_buffs(dev);
1330 yawn(dev, SLEEP);
1331 lp->state = CLOSED;
1332 return -EAGAIN;
1333 } else {
1334 printk("\n Succeeded, but you should reconfigure your hardware to avoid this.\n");
1335 printk("WARNING: there may be IRQ related problems in heavily loaded systems.\n");
1336 }
1337 }
1338
1339 lp->interrupt = UNMASK_INTERRUPTS;
1340 netif_trans_update(dev);
1341
1342 START_DE4X5;
1343
1344 de4x5_setup_intr(dev);
1345
1346 if (de4x5_debug & DEBUG_OPEN) {
1347 printk("\tsts: 0x%08x\n", inl(DE4X5_STS));
1348 printk("\tbmr: 0x%08x\n", inl(DE4X5_BMR));
1349 printk("\timr: 0x%08x\n", inl(DE4X5_IMR));
1350 printk("\tomr: 0x%08x\n", inl(DE4X5_OMR));
1351 printk("\tsisr: 0x%08x\n", inl(DE4X5_SISR));
1352 printk("\tsicr: 0x%08x\n", inl(DE4X5_SICR));
1353 printk("\tstrr: 0x%08x\n", inl(DE4X5_STRR));
1354 printk("\tsigr: 0x%08x\n", inl(DE4X5_SIGR));
1355 }
1356
1357 return status;
1358}
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368static int
1369de4x5_init(struct net_device *dev)
1370{
1371
1372 netif_stop_queue(dev);
1373
1374 de4x5_sw_reset(dev);
1375
1376
1377 autoconf_media(dev);
1378
1379 return 0;
1380}
1381
1382static int
1383de4x5_sw_reset(struct net_device *dev)
1384{
1385 struct de4x5_private *lp = netdev_priv(dev);
1386 u_long iobase = dev->base_addr;
1387 int i, j, status = 0;
1388 s32 bmr, omr;
1389
1390
1391 if (!lp->useSROM) {
1392 if (lp->phy[lp->active].id != 0) {
1393 lp->infoblock_csr6 = OMR_SDP | OMR_PS | OMR_HBD;
1394 } else {
1395 lp->infoblock_csr6 = OMR_SDP | OMR_TTM;
1396 }
1397 de4x5_switch_mac_port(dev);
1398 }
1399
1400
1401
1402
1403
1404
1405 bmr = (lp->chipset==DC21140 ? PBL_8 : PBL_4) | DESC_SKIP_LEN | DE4X5_CACHE_ALIGN;
1406 bmr |= ((lp->chipset & ~0x00ff)==DC2114x ? BMR_RML : 0);
1407 outl(bmr, DE4X5_BMR);
1408
1409 omr = inl(DE4X5_OMR) & ~OMR_PR;
1410 if (lp->chipset == DC21140) {
1411 omr |= (OMR_SDP | OMR_SB);
1412 }
1413 lp->setup_f = PERFECT;
1414 outl(lp->dma_rings, DE4X5_RRBA);
1415 outl(lp->dma_rings + NUM_RX_DESC * sizeof(struct de4x5_desc),
1416 DE4X5_TRBA);
1417
1418 lp->rx_new = lp->rx_old = 0;
1419 lp->tx_new = lp->tx_old = 0;
1420
1421 for (i = 0; i < lp->rxRingSize; i++) {
1422 lp->rx_ring[i].status = cpu_to_le32(R_OWN);
1423 }
1424
1425 for (i = 0; i < lp->txRingSize; i++) {
1426 lp->tx_ring[i].status = cpu_to_le32(0);
1427 }
1428
1429 barrier();
1430
1431
1432 SetMulticastFilter(dev);
1433
1434 load_packet(dev, lp->setup_frame, PERFECT_F|TD_SET|SETUP_FRAME_LEN, (struct sk_buff *)1);
1435 outl(omr|OMR_ST, DE4X5_OMR);
1436
1437
1438
1439 for (j=0, i=0;(i<500) && (j==0);i++) {
1440 mdelay(1);
1441 if ((s32)le32_to_cpu(lp->tx_ring[lp->tx_new].status) >= 0) j=1;
1442 }
1443 outl(omr, DE4X5_OMR);
1444
1445 if (j == 0) {
1446 printk("%s: Setup frame timed out, status %08x\n", dev->name,
1447 inl(DE4X5_STS));
1448 status = -EIO;
1449 }
1450
1451 lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
1452 lp->tx_old = lp->tx_new;
1453
1454 return status;
1455}
1456
1457
1458
1459
1460static netdev_tx_t
1461de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev)
1462{
1463 struct de4x5_private *lp = netdev_priv(dev);
1464 u_long iobase = dev->base_addr;
1465 u_long flags = 0;
1466
1467 netif_stop_queue(dev);
1468 if (!lp->tx_enable)
1469 return NETDEV_TX_LOCKED;
1470
1471
1472
1473
1474
1475
1476 spin_lock_irqsave(&lp->lock, flags);
1477 de4x5_tx(dev);
1478 spin_unlock_irqrestore(&lp->lock, flags);
1479
1480
1481 if (test_and_set_bit(0, (void *)&lp->cache.lock) && !lp->interrupt)
1482 return NETDEV_TX_LOCKED;
1483
1484
1485 if (netif_queue_stopped(dev) || (u_long) lp->tx_skb[lp->tx_new] > 1) {
1486 if (lp->interrupt) {
1487 de4x5_putb_cache(dev, skb);
1488 } else {
1489 de4x5_put_cache(dev, skb);
1490 }
1491 if (de4x5_debug & DEBUG_TX) {
1492 printk("%s: transmit busy, lost media or stale skb found:\n STS:%08x\n tbusy:%d\n IMR:%08x\n OMR:%08x\n Stale skb: %s\n",dev->name, inl(DE4X5_STS), netif_queue_stopped(dev), inl(DE4X5_IMR), inl(DE4X5_OMR), ((u_long) lp->tx_skb[lp->tx_new] > 1) ? "YES" : "NO");
1493 }
1494 } else if (skb->len > 0) {
1495
1496 if (!skb_queue_empty(&lp->cache.queue) && !lp->interrupt) {
1497 de4x5_put_cache(dev, skb);
1498 skb = de4x5_get_cache(dev);
1499 }
1500
1501 while (skb && !netif_queue_stopped(dev) &&
1502 (u_long) lp->tx_skb[lp->tx_new] <= 1) {
1503 spin_lock_irqsave(&lp->lock, flags);
1504 netif_stop_queue(dev);
1505 load_packet(dev, skb->data, TD_IC | TD_LS | TD_FS | skb->len, skb);
1506 lp->stats.tx_bytes += skb->len;
1507 outl(POLL_DEMAND, DE4X5_TPD);
1508
1509 lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
1510
1511 if (TX_BUFFS_AVAIL) {
1512 netif_start_queue(dev);
1513 }
1514 skb = de4x5_get_cache(dev);
1515 spin_unlock_irqrestore(&lp->lock, flags);
1516 }
1517 if (skb) de4x5_putb_cache(dev, skb);
1518 }
1519
1520 lp->cache.lock = 0;
1521
1522 return NETDEV_TX_OK;
1523}
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536static irqreturn_t
1537de4x5_interrupt(int irq, void *dev_id)
1538{
1539 struct net_device *dev = dev_id;
1540 struct de4x5_private *lp;
1541 s32 imr, omr, sts, limit;
1542 u_long iobase;
1543 unsigned int handled = 0;
1544
1545 lp = netdev_priv(dev);
1546 spin_lock(&lp->lock);
1547 iobase = dev->base_addr;
1548
1549 DISABLE_IRQs;
1550
1551 if (test_and_set_bit(MASK_INTERRUPTS, (void*) &lp->interrupt))
1552 printk("%s: Re-entering the interrupt handler.\n", dev->name);
1553
1554 synchronize_irq(dev->irq);
1555
1556 for (limit=0; limit<8; limit++) {
1557 sts = inl(DE4X5_STS);
1558 outl(sts, DE4X5_STS);
1559
1560 if (!(sts & lp->irq_mask)) break;
1561 handled = 1;
1562
1563 if (sts & (STS_RI | STS_RU))
1564 de4x5_rx(dev);
1565
1566 if (sts & (STS_TI | STS_TU))
1567 de4x5_tx(dev);
1568
1569 if (sts & STS_LNF) {
1570 lp->irq_mask &= ~IMR_LFM;
1571 }
1572
1573 if (sts & STS_UNF) {
1574 de4x5_txur(dev);
1575 }
1576
1577 if (sts & STS_SE) {
1578 STOP_DE4X5;
1579 printk("%s: Fatal bus error occurred, sts=%#8x, device stopped.\n",
1580 dev->name, sts);
1581 spin_unlock(&lp->lock);
1582 return IRQ_HANDLED;
1583 }
1584 }
1585
1586
1587 if (!test_and_set_bit(0, (void *)&lp->cache.lock)) {
1588 while (!skb_queue_empty(&lp->cache.queue) && !netif_queue_stopped(dev) && lp->tx_enable) {
1589 de4x5_queue_pkt(de4x5_get_cache(dev), dev);
1590 }
1591 lp->cache.lock = 0;
1592 }
1593
1594 lp->interrupt = UNMASK_INTERRUPTS;
1595 ENABLE_IRQs;
1596 spin_unlock(&lp->lock);
1597
1598 return IRQ_RETVAL(handled);
1599}
1600
1601static int
1602de4x5_rx(struct net_device *dev)
1603{
1604 struct de4x5_private *lp = netdev_priv(dev);
1605 u_long iobase = dev->base_addr;
1606 int entry;
1607 s32 status;
1608
1609 for (entry=lp->rx_new; (s32)le32_to_cpu(lp->rx_ring[entry].status)>=0;
1610 entry=lp->rx_new) {
1611 status = (s32)le32_to_cpu(lp->rx_ring[entry].status);
1612
1613 if (lp->rx_ovf) {
1614 if (inl(DE4X5_MFC) & MFC_FOCM) {
1615 de4x5_rx_ovfc(dev);
1616 break;
1617 }
1618 }
1619
1620 if (status & RD_FS) {
1621 lp->rx_old = entry;
1622 }
1623
1624 if (status & RD_LS) {
1625 if (lp->tx_enable) lp->linkOK++;
1626 if (status & RD_ES) {
1627 lp->stats.rx_errors++;
1628 if (status & (RD_RF | RD_TL)) lp->stats.rx_frame_errors++;
1629 if (status & RD_CE) lp->stats.rx_crc_errors++;
1630 if (status & RD_OF) lp->stats.rx_fifo_errors++;
1631 if (status & RD_TL) lp->stats.rx_length_errors++;
1632 if (status & RD_RF) lp->pktStats.rx_runt_frames++;
1633 if (status & RD_CS) lp->pktStats.rx_collision++;
1634 if (status & RD_DB) lp->pktStats.rx_dribble++;
1635 if (status & RD_OF) lp->pktStats.rx_overflow++;
1636 } else {
1637 struct sk_buff *skb;
1638 short pkt_len = (short)(le32_to_cpu(lp->rx_ring[entry].status)
1639 >> 16) - 4;
1640
1641 if ((skb = de4x5_alloc_rx_buff(dev, entry, pkt_len)) == NULL) {
1642 printk("%s: Insufficient memory; nuking packet.\n",
1643 dev->name);
1644 lp->stats.rx_dropped++;
1645 } else {
1646 de4x5_dbg_rx(skb, pkt_len);
1647
1648
1649 skb->protocol=eth_type_trans(skb,dev);
1650 de4x5_local_stats(dev, skb->data, pkt_len);
1651 netif_rx(skb);
1652
1653
1654 lp->stats.rx_packets++;
1655 lp->stats.rx_bytes += pkt_len;
1656 }
1657 }
1658
1659
1660 for (;lp->rx_old!=entry;lp->rx_old=(lp->rx_old + 1)%lp->rxRingSize) {
1661 lp->rx_ring[lp->rx_old].status = cpu_to_le32(R_OWN);
1662 barrier();
1663 }
1664 lp->rx_ring[entry].status = cpu_to_le32(R_OWN);
1665 barrier();
1666 }
1667
1668
1669
1670
1671 lp->rx_new = (lp->rx_new + 1) % lp->rxRingSize;
1672 }
1673
1674 return 0;
1675}
1676
1677static inline void
1678de4x5_free_tx_buff(struct de4x5_private *lp, int entry)
1679{
1680 dma_unmap_single(lp->gendev, le32_to_cpu(lp->tx_ring[entry].buf),
1681 le32_to_cpu(lp->tx_ring[entry].des1) & TD_TBS1,
1682 DMA_TO_DEVICE);
1683 if ((u_long) lp->tx_skb[entry] > 1)
1684 dev_kfree_skb_irq(lp->tx_skb[entry]);
1685 lp->tx_skb[entry] = NULL;
1686}
1687
1688
1689
1690
1691static int
1692de4x5_tx(struct net_device *dev)
1693{
1694 struct de4x5_private *lp = netdev_priv(dev);
1695 u_long iobase = dev->base_addr;
1696 int entry;
1697 s32 status;
1698
1699 for (entry = lp->tx_old; entry != lp->tx_new; entry = lp->tx_old) {
1700 status = (s32)le32_to_cpu(lp->tx_ring[entry].status);
1701 if (status < 0) {
1702 break;
1703 } else if (status != 0x7fffffff) {
1704 if (status & TD_ES) {
1705 lp->stats.tx_errors++;
1706 if (status & TD_NC) lp->stats.tx_carrier_errors++;
1707 if (status & TD_LC) lp->stats.tx_window_errors++;
1708 if (status & TD_UF) lp->stats.tx_fifo_errors++;
1709 if (status & TD_EC) lp->pktStats.excessive_collisions++;
1710 if (status & TD_DE) lp->stats.tx_aborted_errors++;
1711
1712 if (TX_PKT_PENDING) {
1713 outl(POLL_DEMAND, DE4X5_TPD);
1714 }
1715 } else {
1716 lp->stats.tx_packets++;
1717 if (lp->tx_enable) lp->linkOK++;
1718 }
1719
1720 lp->stats.collisions += ((status & TD_EC) ? 16 :
1721 ((status & TD_CC) >> 3));
1722
1723
1724 if (lp->tx_skb[entry] != NULL)
1725 de4x5_free_tx_buff(lp, entry);
1726 }
1727
1728
1729 lp->tx_old = (lp->tx_old + 1) % lp->txRingSize;
1730 }
1731
1732
1733 if (TX_BUFFS_AVAIL && netif_queue_stopped(dev)) {
1734 if (lp->interrupt)
1735 netif_wake_queue(dev);
1736 else
1737 netif_start_queue(dev);
1738 }
1739
1740 return 0;
1741}
1742
1743static void
1744de4x5_ast(struct net_device *dev)
1745{
1746 struct de4x5_private *lp = netdev_priv(dev);
1747 int next_tick = DE4X5_AUTOSENSE_MS;
1748 int dt;
1749
1750 if (lp->useSROM)
1751 next_tick = srom_autoconf(dev);
1752 else if (lp->chipset == DC21140)
1753 next_tick = dc21140m_autoconf(dev);
1754 else if (lp->chipset == DC21041)
1755 next_tick = dc21041_autoconf(dev);
1756 else if (lp->chipset == DC21040)
1757 next_tick = dc21040_autoconf(dev);
1758 lp->linkOK = 0;
1759
1760 dt = (next_tick * HZ) / 1000;
1761
1762 if (!dt)
1763 dt = 1;
1764
1765 mod_timer(&lp->timer, jiffies + dt);
1766}
1767
1768static int
1769de4x5_txur(struct net_device *dev)
1770{
1771 struct de4x5_private *lp = netdev_priv(dev);
1772 u_long iobase = dev->base_addr;
1773 int omr;
1774
1775 omr = inl(DE4X5_OMR);
1776 if (!(omr & OMR_SF) || (lp->chipset==DC21041) || (lp->chipset==DC21040)) {
1777 omr &= ~(OMR_ST|OMR_SR);
1778 outl(omr, DE4X5_OMR);
1779 while (inl(DE4X5_STS) & STS_TS);
1780 if ((omr & OMR_TR) < OMR_TR) {
1781 omr += 0x4000;
1782 } else {
1783 omr |= OMR_SF;
1784 }
1785 outl(omr | OMR_ST | OMR_SR, DE4X5_OMR);
1786 }
1787
1788 return 0;
1789}
1790
1791static int
1792de4x5_rx_ovfc(struct net_device *dev)
1793{
1794 struct de4x5_private *lp = netdev_priv(dev);
1795 u_long iobase = dev->base_addr;
1796 int omr;
1797
1798 omr = inl(DE4X5_OMR);
1799 outl(omr & ~OMR_SR, DE4X5_OMR);
1800 while (inl(DE4X5_STS) & STS_RS);
1801
1802 for (; (s32)le32_to_cpu(lp->rx_ring[lp->rx_new].status)>=0;) {
1803 lp->rx_ring[lp->rx_new].status = cpu_to_le32(R_OWN);
1804 lp->rx_new = (lp->rx_new + 1) % lp->rxRingSize;
1805 }
1806
1807 outl(omr, DE4X5_OMR);
1808
1809 return 0;
1810}
1811
1812static int
1813de4x5_close(struct net_device *dev)
1814{
1815 struct de4x5_private *lp = netdev_priv(dev);
1816 u_long iobase = dev->base_addr;
1817 s32 imr, omr;
1818
1819 disable_ast(dev);
1820
1821 netif_stop_queue(dev);
1822
1823 if (de4x5_debug & DEBUG_CLOSE) {
1824 printk("%s: Shutting down ethercard, status was %8.8x.\n",
1825 dev->name, inl(DE4X5_STS));
1826 }
1827
1828
1829
1830
1831 DISABLE_IRQs;
1832 STOP_DE4X5;
1833
1834
1835 free_irq(dev->irq, dev);
1836 lp->state = CLOSED;
1837
1838
1839 de4x5_free_rx_buffs(dev);
1840 de4x5_free_tx_buffs(dev);
1841
1842
1843 yawn(dev, SLEEP);
1844
1845 return 0;
1846}
1847
1848static struct net_device_stats *
1849de4x5_get_stats(struct net_device *dev)
1850{
1851 struct de4x5_private *lp = netdev_priv(dev);
1852 u_long iobase = dev->base_addr;
1853
1854 lp->stats.rx_missed_errors = (int)(inl(DE4X5_MFC) & (MFC_OVFL | MFC_CNTR));
1855
1856 return &lp->stats;
1857}
1858
1859static void
1860de4x5_local_stats(struct net_device *dev, char *buf, int pkt_len)
1861{
1862 struct de4x5_private *lp = netdev_priv(dev);
1863 int i;
1864
1865 for (i=1; i<DE4X5_PKT_STAT_SZ-1; i++) {
1866 if (pkt_len < (i*DE4X5_PKT_BIN_SZ)) {
1867 lp->pktStats.bins[i]++;
1868 i = DE4X5_PKT_STAT_SZ;
1869 }
1870 }
1871 if (is_multicast_ether_addr(buf)) {
1872 if (is_broadcast_ether_addr(buf)) {
1873 lp->pktStats.broadcast++;
1874 } else {
1875 lp->pktStats.multicast++;
1876 }
1877 } else if (ether_addr_equal(buf, dev->dev_addr)) {
1878 lp->pktStats.unicast++;
1879 }
1880
1881 lp->pktStats.bins[0]++;
1882 if (lp->pktStats.bins[0] == 0) {
1883 memset((char *)&lp->pktStats, 0, sizeof(lp->pktStats));
1884 }
1885}
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895static void
1896load_packet(struct net_device *dev, char *buf, u32 flags, struct sk_buff *skb)
1897{
1898 struct de4x5_private *lp = netdev_priv(dev);
1899 int entry = (lp->tx_new ? lp->tx_new-1 : lp->txRingSize-1);
1900 dma_addr_t buf_dma = dma_map_single(lp->gendev, buf, flags & TD_TBS1, DMA_TO_DEVICE);
1901
1902 lp->tx_ring[lp->tx_new].buf = cpu_to_le32(buf_dma);
1903 lp->tx_ring[lp->tx_new].des1 &= cpu_to_le32(TD_TER);
1904 lp->tx_ring[lp->tx_new].des1 |= cpu_to_le32(flags);
1905 lp->tx_skb[lp->tx_new] = skb;
1906 lp->tx_ring[entry].des1 &= cpu_to_le32(~TD_IC);
1907 barrier();
1908
1909 lp->tx_ring[lp->tx_new].status = cpu_to_le32(T_OWN);
1910 barrier();
1911}
1912
1913
1914
1915
1916static void
1917set_multicast_list(struct net_device *dev)
1918{
1919 struct de4x5_private *lp = netdev_priv(dev);
1920 u_long iobase = dev->base_addr;
1921
1922
1923 if (lp->state == OPEN) {
1924 if (dev->flags & IFF_PROMISC) {
1925 u32 omr;
1926 omr = inl(DE4X5_OMR);
1927 omr |= OMR_PR;
1928 outl(omr, DE4X5_OMR);
1929 } else {
1930 SetMulticastFilter(dev);
1931 load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET |
1932 SETUP_FRAME_LEN, (struct sk_buff *)1);
1933
1934 lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
1935 outl(POLL_DEMAND, DE4X5_TPD);
1936 netif_trans_update(dev);
1937 }
1938 }
1939}
1940
1941
1942
1943
1944
1945
1946static void
1947SetMulticastFilter(struct net_device *dev)
1948{
1949 struct de4x5_private *lp = netdev_priv(dev);
1950 struct netdev_hw_addr *ha;
1951 u_long iobase = dev->base_addr;
1952 int i, bit, byte;
1953 u16 hashcode;
1954 u32 omr, crc;
1955 char *pa;
1956 unsigned char *addrs;
1957
1958 omr = inl(DE4X5_OMR);
1959 omr &= ~(OMR_PR | OMR_PM);
1960 pa = build_setup_frame(dev, ALL);
1961
1962 if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 14)) {
1963 omr |= OMR_PM;
1964 } else if (lp->setup_f == HASH_PERF) {
1965 netdev_for_each_mc_addr(ha, dev) {
1966 crc = ether_crc_le(ETH_ALEN, ha->addr);
1967 hashcode = crc & HASH_BITS;
1968
1969 byte = hashcode >> 3;
1970 bit = 1 << (hashcode & 0x07);
1971
1972 byte <<= 1;
1973 if (byte & 0x02) {
1974 byte -= 1;
1975 }
1976 lp->setup_frame[byte] |= bit;
1977 }
1978 } else {
1979 netdev_for_each_mc_addr(ha, dev) {
1980 addrs = ha->addr;
1981 for (i=0; i<ETH_ALEN; i++) {
1982 *(pa + (i&1)) = *addrs++;
1983 if (i & 0x01) pa += 4;
1984 }
1985 }
1986 }
1987 outl(omr, DE4X5_OMR);
1988}
1989
1990#ifdef CONFIG_EISA
1991
1992static u_char de4x5_irq[] = EISA_ALLOWED_IRQ_LIST;
1993
1994static int __init de4x5_eisa_probe (struct device *gendev)
1995{
1996 struct eisa_device *edev;
1997 u_long iobase;
1998 u_char irq, regval;
1999 u_short vendor;
2000 u32 cfid;
2001 int status, device;
2002 struct net_device *dev;
2003 struct de4x5_private *lp;
2004
2005 edev = to_eisa_device (gendev);
2006 iobase = edev->base_addr;
2007
2008 if (!request_region (iobase, DE4X5_EISA_TOTAL_SIZE, "de4x5"))
2009 return -EBUSY;
2010
2011 if (!request_region (iobase + DE4X5_EISA_IO_PORTS,
2012 DE4X5_EISA_TOTAL_SIZE, "de4x5")) {
2013 status = -EBUSY;
2014 goto release_reg_1;
2015 }
2016
2017 if (!(dev = alloc_etherdev (sizeof (struct de4x5_private)))) {
2018 status = -ENOMEM;
2019 goto release_reg_2;
2020 }
2021 lp = netdev_priv(dev);
2022
2023 cfid = (u32) inl(PCI_CFID);
2024 lp->cfrv = (u_short) inl(PCI_CFRV);
2025 device = (cfid >> 8) & 0x00ffff00;
2026 vendor = (u_short) cfid;
2027
2028
2029 regval = inb(EISA_REG0) & (ER0_INTL | ER0_INTT);
2030#ifdef CONFIG_ALPHA
2031
2032
2033
2034
2035
2036
2037 outb (ER1_IAM | 1, EISA_REG1);
2038 mdelay (1);
2039
2040
2041 outb (ER1_IAM, EISA_REG1);
2042 mdelay (1);
2043
2044
2045 outb (ER3_BWE | ER3_BRE, EISA_REG3);
2046
2047
2048 outb (ER0_BSW | ER0_BMW | ER0_EPT | regval, EISA_REG0);
2049#endif
2050 irq = de4x5_irq[(regval >> 1) & 0x03];
2051
2052 if (is_DC2114x) {
2053 device = ((lp->cfrv & CFRV_RN) < DC2114x_BRK ? DC21142 : DC21143);
2054 }
2055 lp->chipset = device;
2056 lp->bus = EISA;
2057
2058
2059 outl(PCI_COMMAND_IO | PCI_COMMAND_MASTER, PCI_CFCS);
2060 outl(0x00006000, PCI_CFLT);
2061 outl(iobase, PCI_CBIO);
2062
2063 DevicePresent(dev, EISA_APROM);
2064
2065 dev->irq = irq;
2066
2067 if (!(status = de4x5_hw_init (dev, iobase, gendev))) {
2068 return 0;
2069 }
2070
2071 free_netdev (dev);
2072 release_reg_2:
2073 release_region (iobase + DE4X5_EISA_IO_PORTS, DE4X5_EISA_TOTAL_SIZE);
2074 release_reg_1:
2075 release_region (iobase, DE4X5_EISA_TOTAL_SIZE);
2076
2077 return status;
2078}
2079
2080static int de4x5_eisa_remove(struct device *device)
2081{
2082 struct net_device *dev;
2083 u_long iobase;
2084
2085 dev = dev_get_drvdata(device);
2086 iobase = dev->base_addr;
2087
2088 unregister_netdev (dev);
2089 free_netdev (dev);
2090 release_region (iobase + DE4X5_EISA_IO_PORTS, DE4X5_EISA_TOTAL_SIZE);
2091 release_region (iobase, DE4X5_EISA_TOTAL_SIZE);
2092
2093 return 0;
2094}
2095
2096static struct eisa_device_id de4x5_eisa_ids[] = {
2097 { "DEC4250", 0 },
2098 { "" }
2099};
2100MODULE_DEVICE_TABLE(eisa, de4x5_eisa_ids);
2101
2102static struct eisa_driver de4x5_eisa_driver = {
2103 .id_table = de4x5_eisa_ids,
2104 .driver = {
2105 .name = "de4x5",
2106 .probe = de4x5_eisa_probe,
2107 .remove = de4x5_eisa_remove,
2108 }
2109};
2110MODULE_DEVICE_TABLE(eisa, de4x5_eisa_ids);
2111#endif
2112
2113#ifdef CONFIG_PCI
2114
2115
2116
2117
2118
2119
2120
2121static void
2122srom_search(struct net_device *dev, struct pci_dev *pdev)
2123{
2124 u_char pb;
2125 u_short vendor, status;
2126 u_int irq = 0, device;
2127 u_long iobase = 0;
2128 int i, j;
2129 struct de4x5_private *lp = netdev_priv(dev);
2130 struct pci_dev *this_dev;
2131
2132 list_for_each_entry(this_dev, &pdev->bus->devices, bus_list) {
2133 vendor = this_dev->vendor;
2134 device = this_dev->device << 8;
2135 if (!(is_DC21040 || is_DC21041 || is_DC21140 || is_DC2114x)) continue;
2136
2137
2138 pb = this_dev->bus->number;
2139
2140
2141 lp->device = PCI_SLOT(this_dev->devfn);
2142 lp->bus_num = pb;
2143
2144
2145 if (is_DC2114x) {
2146 device = ((this_dev->revision & CFRV_RN) < DC2114x_BRK
2147 ? DC21142 : DC21143);
2148 }
2149 lp->chipset = device;
2150
2151
2152 iobase = pci_resource_start(this_dev, 0);
2153
2154
2155 irq = this_dev->irq;
2156 if ((irq == 0) || (irq == 0xff) || ((int)irq == -1)) continue;
2157
2158
2159 pci_read_config_word(this_dev, PCI_COMMAND, &status);
2160 if (!(status & PCI_COMMAND_IO)) continue;
2161
2162
2163 DevicePresent(dev, DE4X5_APROM);
2164 for (j=0, i=0; i<ETH_ALEN; i++) {
2165 j += (u_char) *((u_char *)&lp->srom + SROM_HWADD + i);
2166 }
2167 if (j != 0 && j != 6 * 0xff) {
2168 last.chipset = device;
2169 last.bus = pb;
2170 last.irq = irq;
2171 for (i=0; i<ETH_ALEN; i++) {
2172 last.addr[i] = (u_char)*((u_char *)&lp->srom + SROM_HWADD + i);
2173 }
2174 return;
2175 }
2176 }
2177}
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195static int de4x5_pci_probe(struct pci_dev *pdev,
2196 const struct pci_device_id *ent)
2197{
2198 u_char pb, pbus = 0, dev_num, dnum = 0, timer;
2199 u_short vendor, status;
2200 u_int irq = 0, device;
2201 u_long iobase = 0;
2202 int error;
2203 struct net_device *dev;
2204 struct de4x5_private *lp;
2205
2206 dev_num = PCI_SLOT(pdev->devfn);
2207 pb = pdev->bus->number;
2208
2209 if (io) {
2210 pbus = (u_short)(io >> 8);
2211 dnum = (u_short)(io & 0xff);
2212 if ((pbus != pb) || (dnum != dev_num))
2213 return -ENODEV;
2214 }
2215
2216 vendor = pdev->vendor;
2217 device = pdev->device << 8;
2218 if (!(is_DC21040 || is_DC21041 || is_DC21140 || is_DC2114x))
2219 return -ENODEV;
2220
2221
2222 if ((error = pci_enable_device (pdev)))
2223 return error;
2224
2225 if (!(dev = alloc_etherdev (sizeof (struct de4x5_private)))) {
2226 error = -ENOMEM;
2227 goto disable_dev;
2228 }
2229
2230 lp = netdev_priv(dev);
2231 lp->bus = PCI;
2232 lp->bus_num = 0;
2233
2234
2235 if (lp->bus_num != pb) {
2236 lp->bus_num = pb;
2237 srom_search(dev, pdev);
2238 }
2239
2240
2241 lp->cfrv = pdev->revision;
2242
2243
2244 lp->device = dev_num;
2245 lp->bus_num = pb;
2246
2247
2248 if (is_DC2114x) {
2249 device = ((lp->cfrv & CFRV_RN) < DC2114x_BRK ? DC21142 : DC21143);
2250 }
2251 lp->chipset = device;
2252
2253
2254 iobase = pci_resource_start(pdev, 0);
2255
2256
2257 irq = pdev->irq;
2258 if ((irq == 0) || (irq == 0xff) || ((int)irq == -1)) {
2259 error = -ENODEV;
2260 goto free_dev;
2261 }
2262
2263
2264 pci_read_config_word(pdev, PCI_COMMAND, &status);
2265#ifdef __powerpc__
2266 if (!(status & PCI_COMMAND_IO)) {
2267 status |= PCI_COMMAND_IO;
2268 pci_write_config_word(pdev, PCI_COMMAND, status);
2269 pci_read_config_word(pdev, PCI_COMMAND, &status);
2270 }
2271#endif
2272 if (!(status & PCI_COMMAND_IO)) {
2273 error = -ENODEV;
2274 goto free_dev;
2275 }
2276
2277 if (!(status & PCI_COMMAND_MASTER)) {
2278 status |= PCI_COMMAND_MASTER;
2279 pci_write_config_word(pdev, PCI_COMMAND, status);
2280 pci_read_config_word(pdev, PCI_COMMAND, &status);
2281 }
2282 if (!(status & PCI_COMMAND_MASTER)) {
2283 error = -ENODEV;
2284 goto free_dev;
2285 }
2286
2287
2288 pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &timer);
2289 if (timer < 0x60) {
2290 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x60);
2291 }
2292
2293 DevicePresent(dev, DE4X5_APROM);
2294
2295 if (!request_region (iobase, DE4X5_PCI_TOTAL_SIZE, "de4x5")) {
2296 error = -EBUSY;
2297 goto free_dev;
2298 }
2299
2300 dev->irq = irq;
2301
2302 if ((error = de4x5_hw_init(dev, iobase, &pdev->dev))) {
2303 goto release;
2304 }
2305
2306 return 0;
2307
2308 release:
2309 release_region (iobase, DE4X5_PCI_TOTAL_SIZE);
2310 free_dev:
2311 free_netdev (dev);
2312 disable_dev:
2313 pci_disable_device (pdev);
2314 return error;
2315}
2316
2317static void de4x5_pci_remove(struct pci_dev *pdev)
2318{
2319 struct net_device *dev;
2320 u_long iobase;
2321
2322 dev = dev_get_drvdata(&pdev->dev);
2323 iobase = dev->base_addr;
2324
2325 unregister_netdev (dev);
2326 free_netdev (dev);
2327 release_region (iobase, DE4X5_PCI_TOTAL_SIZE);
2328 pci_disable_device (pdev);
2329}
2330
2331static struct pci_device_id de4x5_pci_tbl[] = {
2332 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP,
2333 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
2334 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_PLUS,
2335 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
2336 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_FAST,
2337 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
2338 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21142,
2339 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 },
2340 { },
2341};
2342
2343static struct pci_driver de4x5_pci_driver = {
2344 .name = "de4x5",
2345 .id_table = de4x5_pci_tbl,
2346 .probe = de4x5_pci_probe,
2347 .remove = de4x5_pci_remove,
2348};
2349
2350#endif
2351
2352
2353
2354
2355
2356
2357
2358
2359static int
2360autoconf_media(struct net_device *dev)
2361{
2362 struct de4x5_private *lp = netdev_priv(dev);
2363 u_long iobase = dev->base_addr;
2364
2365 disable_ast(dev);
2366
2367 lp->c_media = AUTO;
2368 inl(DE4X5_MFC);
2369 lp->media = INIT;
2370 lp->tcount = 0;
2371
2372 de4x5_ast(dev);
2373
2374 return lp->media;
2375}
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389static int
2390dc21040_autoconf(struct net_device *dev)
2391{
2392 struct de4x5_private *lp = netdev_priv(dev);
2393 u_long iobase = dev->base_addr;
2394 int next_tick = DE4X5_AUTOSENSE_MS;
2395 s32 imr;
2396
2397 switch (lp->media) {
2398 case INIT:
2399 DISABLE_IRQs;
2400 lp->tx_enable = false;
2401 lp->timeout = -1;
2402 de4x5_save_skbs(dev);
2403 if ((lp->autosense == AUTO) || (lp->autosense == TP)) {
2404 lp->media = TP;
2405 } else if ((lp->autosense == BNC) || (lp->autosense == AUI) || (lp->autosense == BNC_AUI)) {
2406 lp->media = BNC_AUI;
2407 } else if (lp->autosense == EXT_SIA) {
2408 lp->media = EXT_SIA;
2409 } else {
2410 lp->media = NC;
2411 }
2412 lp->local_state = 0;
2413 next_tick = dc21040_autoconf(dev);
2414 break;
2415
2416 case TP:
2417 next_tick = dc21040_state(dev, 0x8f01, 0xffff, 0x0000, 3000, BNC_AUI,
2418 TP_SUSPECT, test_tp);
2419 break;
2420
2421 case TP_SUSPECT:
2422 next_tick = de4x5_suspect_state(dev, 1000, TP, test_tp, dc21040_autoconf);
2423 break;
2424
2425 case BNC:
2426 case AUI:
2427 case BNC_AUI:
2428 next_tick = dc21040_state(dev, 0x8f09, 0x0705, 0x0006, 3000, EXT_SIA,
2429 BNC_AUI_SUSPECT, ping_media);
2430 break;
2431
2432 case BNC_AUI_SUSPECT:
2433 next_tick = de4x5_suspect_state(dev, 1000, BNC_AUI, ping_media, dc21040_autoconf);
2434 break;
2435
2436 case EXT_SIA:
2437 next_tick = dc21040_state(dev, 0x3041, 0x0000, 0x0006, 3000,
2438 NC, EXT_SIA_SUSPECT, ping_media);
2439 break;
2440
2441 case EXT_SIA_SUSPECT:
2442 next_tick = de4x5_suspect_state(dev, 1000, EXT_SIA, ping_media, dc21040_autoconf);
2443 break;
2444
2445 case NC:
2446
2447 reset_init_sia(dev, 0x8f01, 0xffff, 0x0000);
2448 if (lp->media != lp->c_media) {
2449 de4x5_dbg_media(dev);
2450 lp->c_media = lp->media;
2451 }
2452 lp->media = INIT;
2453 lp->tx_enable = false;
2454 break;
2455 }
2456
2457 return next_tick;
2458}
2459
2460static int
2461dc21040_state(struct net_device *dev, int csr13, int csr14, int csr15, int timeout,
2462 int next_state, int suspect_state,
2463 int (*fn)(struct net_device *, int))
2464{
2465 struct de4x5_private *lp = netdev_priv(dev);
2466 int next_tick = DE4X5_AUTOSENSE_MS;
2467 int linkBad;
2468
2469 switch (lp->local_state) {
2470 case 0:
2471 reset_init_sia(dev, csr13, csr14, csr15);
2472 lp->local_state++;
2473 next_tick = 500;
2474 break;
2475
2476 case 1:
2477 if (!lp->tx_enable) {
2478 linkBad = fn(dev, timeout);
2479 if (linkBad < 0) {
2480 next_tick = linkBad & ~TIMER_CB;
2481 } else {
2482 if (linkBad && (lp->autosense == AUTO)) {
2483 lp->local_state = 0;
2484 lp->media = next_state;
2485 } else {
2486 de4x5_init_connection(dev);
2487 }
2488 }
2489 } else if (!lp->linkOK && (lp->autosense == AUTO)) {
2490 lp->media = suspect_state;
2491 next_tick = 3000;
2492 }
2493 break;
2494 }
2495
2496 return next_tick;
2497}
2498
2499static int
2500de4x5_suspect_state(struct net_device *dev, int timeout, int prev_state,
2501 int (*fn)(struct net_device *, int),
2502 int (*asfn)(struct net_device *))
2503{
2504 struct de4x5_private *lp = netdev_priv(dev);
2505 int next_tick = DE4X5_AUTOSENSE_MS;
2506 int linkBad;
2507
2508 switch (lp->local_state) {
2509 case 1:
2510 if (lp->linkOK) {
2511 lp->media = prev_state;
2512 } else {
2513 lp->local_state++;
2514 next_tick = asfn(dev);
2515 }
2516 break;
2517
2518 case 2:
2519 linkBad = fn(dev, timeout);
2520 if (linkBad < 0) {
2521 next_tick = linkBad & ~TIMER_CB;
2522 } else if (!linkBad) {
2523 lp->local_state--;
2524 lp->media = prev_state;
2525 } else {
2526 lp->media = INIT;
2527 lp->tcount++;
2528 }
2529 }
2530
2531 return next_tick;
2532}
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543static int
2544dc21041_autoconf(struct net_device *dev)
2545{
2546 struct de4x5_private *lp = netdev_priv(dev);
2547 u_long iobase = dev->base_addr;
2548 s32 sts, irqs, irq_mask, imr, omr;
2549 int next_tick = DE4X5_AUTOSENSE_MS;
2550
2551 switch (lp->media) {
2552 case INIT:
2553 DISABLE_IRQs;
2554 lp->tx_enable = false;
2555 lp->timeout = -1;
2556 de4x5_save_skbs(dev);
2557 if ((lp->autosense == AUTO) || (lp->autosense == TP_NW)) {
2558 lp->media = TP;
2559 } else if (lp->autosense == TP) {
2560 lp->media = TP;
2561 } else if (lp->autosense == BNC) {
2562 lp->media = BNC;
2563 } else if (lp->autosense == AUI) {
2564 lp->media = AUI;
2565 } else {
2566 lp->media = NC;
2567 }
2568 lp->local_state = 0;
2569 next_tick = dc21041_autoconf(dev);
2570 break;
2571
2572 case TP_NW:
2573 if (lp->timeout < 0) {
2574 omr = inl(DE4X5_OMR);
2575 outl(omr | OMR_FDX, DE4X5_OMR);
2576 }
2577 irqs = STS_LNF | STS_LNP;
2578 irq_mask = IMR_LFM | IMR_LPM;
2579 sts = test_media(dev, irqs, irq_mask, 0xef01, 0xffff, 0x0008, 2400);
2580 if (sts < 0) {
2581 next_tick = sts & ~TIMER_CB;
2582 } else {
2583 if (sts & STS_LNP) {
2584 lp->media = ANS;
2585 } else {
2586 lp->media = AUI;
2587 }
2588 next_tick = dc21041_autoconf(dev);
2589 }
2590 break;
2591
2592 case ANS:
2593 if (!lp->tx_enable) {
2594 irqs = STS_LNP;
2595 irq_mask = IMR_LPM;
2596 sts = test_ans(dev, irqs, irq_mask, 3000);
2597 if (sts < 0) {
2598 next_tick = sts & ~TIMER_CB;
2599 } else {
2600 if (!(sts & STS_LNP) && (lp->autosense == AUTO)) {
2601 lp->media = TP;
2602 next_tick = dc21041_autoconf(dev);
2603 } else {
2604 lp->local_state = 1;
2605 de4x5_init_connection(dev);
2606 }
2607 }
2608 } else if (!lp->linkOK && (lp->autosense == AUTO)) {
2609 lp->media = ANS_SUSPECT;
2610 next_tick = 3000;
2611 }
2612 break;
2613
2614 case ANS_SUSPECT:
2615 next_tick = de4x5_suspect_state(dev, 1000, ANS, test_tp, dc21041_autoconf);
2616 break;
2617
2618 case TP:
2619 if (!lp->tx_enable) {
2620 if (lp->timeout < 0) {
2621 omr = inl(DE4X5_OMR);
2622 outl(omr & ~OMR_FDX, DE4X5_OMR);
2623 }
2624 irqs = STS_LNF | STS_LNP;
2625 irq_mask = IMR_LFM | IMR_LPM;
2626 sts = test_media(dev,irqs, irq_mask, 0xef01, 0xff3f, 0x0008, 2400);
2627 if (sts < 0) {
2628 next_tick = sts & ~TIMER_CB;
2629 } else {
2630 if (!(sts & STS_LNP) && (lp->autosense == AUTO)) {
2631 if (inl(DE4X5_SISR) & SISR_NRA) {
2632 lp->media = AUI;
2633 } else {
2634 lp->media = BNC;
2635 }
2636 next_tick = dc21041_autoconf(dev);
2637 } else {
2638 lp->local_state = 1;
2639 de4x5_init_connection(dev);
2640 }
2641 }
2642 } else if (!lp->linkOK && (lp->autosense == AUTO)) {
2643 lp->media = TP_SUSPECT;
2644 next_tick = 3000;
2645 }
2646 break;
2647
2648 case TP_SUSPECT:
2649 next_tick = de4x5_suspect_state(dev, 1000, TP, test_tp, dc21041_autoconf);
2650 break;
2651
2652 case AUI:
2653 if (!lp->tx_enable) {
2654 if (lp->timeout < 0) {
2655 omr = inl(DE4X5_OMR);
2656 outl(omr & ~OMR_FDX, DE4X5_OMR);
2657 }
2658 irqs = 0;
2659 irq_mask = 0;
2660 sts = test_media(dev,irqs, irq_mask, 0xef09, 0xf73d, 0x000e, 1000);
2661 if (sts < 0) {
2662 next_tick = sts & ~TIMER_CB;
2663 } else {
2664 if (!(inl(DE4X5_SISR) & SISR_SRA) && (lp->autosense == AUTO)) {
2665 lp->media = BNC;
2666 next_tick = dc21041_autoconf(dev);
2667 } else {
2668 lp->local_state = 1;
2669 de4x5_init_connection(dev);
2670 }
2671 }
2672 } else if (!lp->linkOK && (lp->autosense == AUTO)) {
2673 lp->media = AUI_SUSPECT;
2674 next_tick = 3000;
2675 }
2676 break;
2677
2678 case AUI_SUSPECT:
2679 next_tick = de4x5_suspect_state(dev, 1000, AUI, ping_media, dc21041_autoconf);
2680 break;
2681
2682 case BNC:
2683 switch (lp->local_state) {
2684 case 0:
2685 if (lp->timeout < 0) {
2686 omr = inl(DE4X5_OMR);
2687 outl(omr & ~OMR_FDX, DE4X5_OMR);
2688 }
2689 irqs = 0;
2690 irq_mask = 0;
2691 sts = test_media(dev,irqs, irq_mask, 0xef09, 0xf73d, 0x0006, 1000);
2692 if (sts < 0) {
2693 next_tick = sts & ~TIMER_CB;
2694 } else {
2695 lp->local_state++;
2696 next_tick = dc21041_autoconf(dev);
2697 }
2698 break;
2699
2700 case 1:
2701 if (!lp->tx_enable) {
2702 if ((sts = ping_media(dev, 3000)) < 0) {
2703 next_tick = sts & ~TIMER_CB;
2704 } else {
2705 if (sts) {
2706 lp->local_state = 0;
2707 lp->media = NC;
2708 } else {
2709 de4x5_init_connection(dev);
2710 }
2711 }
2712 } else if (!lp->linkOK && (lp->autosense == AUTO)) {
2713 lp->media = BNC_SUSPECT;
2714 next_tick = 3000;
2715 }
2716 break;
2717 }
2718 break;
2719
2720 case BNC_SUSPECT:
2721 next_tick = de4x5_suspect_state(dev, 1000, BNC, ping_media, dc21041_autoconf);
2722 break;
2723
2724 case NC:
2725 omr = inl(DE4X5_OMR);
2726 outl(omr | OMR_FDX, DE4X5_OMR);
2727 reset_init_sia(dev, 0xef01, 0xffff, 0x0008);
2728 if (lp->media != lp->c_media) {
2729 de4x5_dbg_media(dev);
2730 lp->c_media = lp->media;
2731 }
2732 lp->media = INIT;
2733 lp->tx_enable = false;
2734 break;
2735 }
2736
2737 return next_tick;
2738}
2739
2740
2741
2742
2743
2744
2745static int
2746dc21140m_autoconf(struct net_device *dev)
2747{
2748 struct de4x5_private *lp = netdev_priv(dev);
2749 int ana, anlpa, cap, cr, slnk, sr;
2750 int next_tick = DE4X5_AUTOSENSE_MS;
2751 u_long imr, omr, iobase = dev->base_addr;
2752
2753 switch(lp->media) {
2754 case INIT:
2755 if (lp->timeout < 0) {
2756 DISABLE_IRQs;
2757 lp->tx_enable = false;
2758 lp->linkOK = 0;
2759 de4x5_save_skbs(dev);
2760 }
2761 if ((next_tick = de4x5_reset_phy(dev)) < 0) {
2762 next_tick &= ~TIMER_CB;
2763 } else {
2764 if (lp->useSROM) {
2765 if (srom_map_media(dev) < 0) {
2766 lp->tcount++;
2767 return next_tick;
2768 }
2769 srom_exec(dev, lp->phy[lp->active].gep);
2770 if (lp->infoblock_media == ANS) {
2771 ana = lp->phy[lp->active].ana | MII_ANA_CSMA;
2772 mii_wr(ana, MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
2773 }
2774 } else {
2775 lp->tmp = MII_SR_ASSC;
2776 SET_10Mb;
2777 if (lp->autosense == _100Mb) {
2778 lp->media = _100Mb;
2779 } else if (lp->autosense == _10Mb) {
2780 lp->media = _10Mb;
2781 } else if ((lp->autosense == AUTO) &&
2782 ((sr=is_anc_capable(dev)) & MII_SR_ANC)) {
2783 ana = (((sr >> 6) & MII_ANA_TAF) | MII_ANA_CSMA);
2784 ana &= (lp->fdx ? ~0 : ~MII_ANA_FDAM);
2785 mii_wr(ana, MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
2786 lp->media = ANS;
2787 } else if (lp->autosense == AUTO) {
2788 lp->media = SPD_DET;
2789 } else if (is_spd_100(dev) && is_100_up(dev)) {
2790 lp->media = _100Mb;
2791 } else {
2792 lp->media = NC;
2793 }
2794 }
2795 lp->local_state = 0;
2796 next_tick = dc21140m_autoconf(dev);
2797 }
2798 break;
2799
2800 case ANS:
2801 switch (lp->local_state) {
2802 case 0:
2803 if (lp->timeout < 0) {
2804 mii_wr(MII_CR_ASSE | MII_CR_RAN, MII_CR, lp->phy[lp->active].addr, DE4X5_MII);
2805 }
2806 cr = test_mii_reg(dev, MII_CR, MII_CR_RAN, false, 500);
2807 if (cr < 0) {
2808 next_tick = cr & ~TIMER_CB;
2809 } else {
2810 if (cr) {
2811 lp->local_state = 0;
2812 lp->media = SPD_DET;
2813 } else {
2814 lp->local_state++;
2815 }
2816 next_tick = dc21140m_autoconf(dev);
2817 }
2818 break;
2819
2820 case 1:
2821 if ((sr=test_mii_reg(dev, MII_SR, MII_SR_ASSC, true, 2000)) < 0) {
2822 next_tick = sr & ~TIMER_CB;
2823 } else {
2824 lp->media = SPD_DET;
2825 lp->local_state = 0;
2826 if (sr) {
2827 lp->tmp = MII_SR_ASSC;
2828 anlpa = mii_rd(MII_ANLPA, lp->phy[lp->active].addr, DE4X5_MII);
2829 ana = mii_rd(MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
2830 if (!(anlpa & MII_ANLPA_RF) &&
2831 (cap = anlpa & MII_ANLPA_TAF & ana)) {
2832 if (cap & MII_ANA_100M) {
2833 lp->fdx = (ana & anlpa & MII_ANA_FDAM & MII_ANA_100M) != 0;
2834 lp->media = _100Mb;
2835 } else if (cap & MII_ANA_10M) {
2836 lp->fdx = (ana & anlpa & MII_ANA_FDAM & MII_ANA_10M) != 0;
2837
2838 lp->media = _10Mb;
2839 }
2840 }
2841 }
2842 next_tick = dc21140m_autoconf(dev);
2843 }
2844 break;
2845 }
2846 break;
2847
2848 case SPD_DET:
2849 if (lp->timeout < 0) {
2850 lp->tmp = (lp->phy[lp->active].id ? MII_SR_LKS :
2851 (~gep_rd(dev) & GEP_LNP));
2852 SET_100Mb_PDET;
2853 }
2854 if ((slnk = test_for_100Mb(dev, 6500)) < 0) {
2855 next_tick = slnk & ~TIMER_CB;
2856 } else {
2857 if (is_spd_100(dev) && is_100_up(dev)) {
2858 lp->media = _100Mb;
2859 } else if ((!is_spd_100(dev) && (is_10_up(dev) & lp->tmp))) {
2860 lp->media = _10Mb;
2861 } else {
2862 lp->media = NC;
2863 }
2864 next_tick = dc21140m_autoconf(dev);
2865 }
2866 break;
2867
2868 case _100Mb:
2869 next_tick = 3000;
2870 if (!lp->tx_enable) {
2871 SET_100Mb;
2872 de4x5_init_connection(dev);
2873 } else {
2874 if (!lp->linkOK && (lp->autosense == AUTO)) {
2875 if (!is_100_up(dev) || (!lp->useSROM && !is_spd_100(dev))) {
2876 lp->media = INIT;
2877 lp->tcount++;
2878 next_tick = DE4X5_AUTOSENSE_MS;
2879 }
2880 }
2881 }
2882 break;
2883
2884 case BNC:
2885 case AUI:
2886 case _10Mb:
2887 next_tick = 3000;
2888 if (!lp->tx_enable) {
2889 SET_10Mb;
2890 de4x5_init_connection(dev);
2891 } else {
2892 if (!lp->linkOK && (lp->autosense == AUTO)) {
2893 if (!is_10_up(dev) || (!lp->useSROM && is_spd_100(dev))) {
2894 lp->media = INIT;
2895 lp->tcount++;
2896 next_tick = DE4X5_AUTOSENSE_MS;
2897 }
2898 }
2899 }
2900 break;
2901
2902 case NC:
2903 if (lp->media != lp->c_media) {
2904 de4x5_dbg_media(dev);
2905 lp->c_media = lp->media;
2906 }
2907 lp->media = INIT;
2908 lp->tx_enable = false;
2909 break;
2910 }
2911
2912 return next_tick;
2913}
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929static int
2930dc2114x_autoconf(struct net_device *dev)
2931{
2932 struct de4x5_private *lp = netdev_priv(dev);
2933 u_long iobase = dev->base_addr;
2934 s32 cr, anlpa, ana, cap, irqs, irq_mask, imr, omr, slnk, sr, sts;
2935 int next_tick = DE4X5_AUTOSENSE_MS;
2936
2937 switch (lp->media) {
2938 case INIT:
2939 if (lp->timeout < 0) {
2940 DISABLE_IRQs;
2941 lp->tx_enable = false;
2942 lp->linkOK = 0;
2943 lp->timeout = -1;
2944 de4x5_save_skbs(dev);
2945 if (lp->params.autosense & ~AUTO) {
2946 srom_map_media(dev);
2947 if (lp->media != lp->params.autosense) {
2948 lp->tcount++;
2949 lp->media = INIT;
2950 return next_tick;
2951 }
2952 lp->media = INIT;
2953 }
2954 }
2955 if ((next_tick = de4x5_reset_phy(dev)) < 0) {
2956 next_tick &= ~TIMER_CB;
2957 } else {
2958 if (lp->autosense == _100Mb) {
2959 lp->media = _100Mb;
2960 } else if (lp->autosense == _10Mb) {
2961 lp->media = _10Mb;
2962 } else if (lp->autosense == TP) {
2963 lp->media = TP;
2964 } else if (lp->autosense == BNC) {
2965 lp->media = BNC;
2966 } else if (lp->autosense == AUI) {
2967 lp->media = AUI;
2968 } else {
2969 lp->media = SPD_DET;
2970 if ((lp->infoblock_media == ANS) &&
2971 ((sr=is_anc_capable(dev)) & MII_SR_ANC)) {
2972 ana = (((sr >> 6) & MII_ANA_TAF) | MII_ANA_CSMA);
2973 ana &= (lp->fdx ? ~0 : ~MII_ANA_FDAM);
2974 mii_wr(ana, MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
2975 lp->media = ANS;
2976 }
2977 }
2978 lp->local_state = 0;
2979 next_tick = dc2114x_autoconf(dev);
2980 }
2981 break;
2982
2983 case ANS:
2984 switch (lp->local_state) {
2985 case 0:
2986 if (lp->timeout < 0) {
2987 mii_wr(MII_CR_ASSE | MII_CR_RAN, MII_CR, lp->phy[lp->active].addr, DE4X5_MII);
2988 }
2989 cr = test_mii_reg(dev, MII_CR, MII_CR_RAN, false, 500);
2990 if (cr < 0) {
2991 next_tick = cr & ~TIMER_CB;
2992 } else {
2993 if (cr) {
2994 lp->local_state = 0;
2995 lp->media = SPD_DET;
2996 } else {
2997 lp->local_state++;
2998 }
2999 next_tick = dc2114x_autoconf(dev);
3000 }
3001 break;
3002
3003 case 1:
3004 sr = test_mii_reg(dev, MII_SR, MII_SR_ASSC, true, 2000);
3005 if (sr < 0) {
3006 next_tick = sr & ~TIMER_CB;
3007 } else {
3008 lp->media = SPD_DET;
3009 lp->local_state = 0;
3010 if (sr) {
3011 lp->tmp = MII_SR_ASSC;
3012 anlpa = mii_rd(MII_ANLPA, lp->phy[lp->active].addr, DE4X5_MII);
3013 ana = mii_rd(MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
3014 if (!(anlpa & MII_ANLPA_RF) &&
3015 (cap = anlpa & MII_ANLPA_TAF & ana)) {
3016 if (cap & MII_ANA_100M) {
3017 lp->fdx = (ana & anlpa & MII_ANA_FDAM & MII_ANA_100M) != 0;
3018 lp->media = _100Mb;
3019 } else if (cap & MII_ANA_10M) {
3020 lp->fdx = (ana & anlpa & MII_ANA_FDAM & MII_ANA_10M) != 0;
3021 lp->media = _10Mb;
3022 }
3023 }
3024 }
3025 next_tick = dc2114x_autoconf(dev);
3026 }
3027 break;
3028 }
3029 break;
3030
3031 case AUI:
3032 if (!lp->tx_enable) {
3033 if (lp->timeout < 0) {
3034 omr = inl(DE4X5_OMR);
3035 outl(omr & ~OMR_FDX, DE4X5_OMR);
3036 }
3037 irqs = 0;
3038 irq_mask = 0;
3039 sts = test_media(dev,irqs, irq_mask, 0, 0, 0, 1000);
3040 if (sts < 0) {
3041 next_tick = sts & ~TIMER_CB;
3042 } else {
3043 if (!(inl(DE4X5_SISR) & SISR_SRA) && (lp->autosense == AUTO)) {
3044 lp->media = BNC;
3045 next_tick = dc2114x_autoconf(dev);
3046 } else {
3047 lp->local_state = 1;
3048 de4x5_init_connection(dev);
3049 }
3050 }
3051 } else if (!lp->linkOK && (lp->autosense == AUTO)) {
3052 lp->media = AUI_SUSPECT;
3053 next_tick = 3000;
3054 }
3055 break;
3056
3057 case AUI_SUSPECT:
3058 next_tick = de4x5_suspect_state(dev, 1000, AUI, ping_media, dc2114x_autoconf);
3059 break;
3060
3061 case BNC:
3062 switch (lp->local_state) {
3063 case 0:
3064 if (lp->timeout < 0) {
3065 omr = inl(DE4X5_OMR);
3066 outl(omr & ~OMR_FDX, DE4X5_OMR);
3067 }
3068 irqs = 0;
3069 irq_mask = 0;
3070 sts = test_media(dev,irqs, irq_mask, 0, 0, 0, 1000);
3071 if (sts < 0) {
3072 next_tick = sts & ~TIMER_CB;
3073 } else {
3074 lp->local_state++;
3075 next_tick = dc2114x_autoconf(dev);
3076 }
3077 break;
3078
3079 case 1:
3080 if (!lp->tx_enable) {
3081 if ((sts = ping_media(dev, 3000)) < 0) {
3082 next_tick = sts & ~TIMER_CB;
3083 } else {
3084 if (sts) {
3085 lp->local_state = 0;
3086 lp->tcount++;
3087 lp->media = INIT;
3088 } else {
3089 de4x5_init_connection(dev);
3090 }
3091 }
3092 } else if (!lp->linkOK && (lp->autosense == AUTO)) {
3093 lp->media = BNC_SUSPECT;
3094 next_tick = 3000;
3095 }
3096 break;
3097 }
3098 break;
3099
3100 case BNC_SUSPECT:
3101 next_tick = de4x5_suspect_state(dev, 1000, BNC, ping_media, dc2114x_autoconf);
3102 break;
3103
3104 case SPD_DET:
3105 if (srom_map_media(dev) < 0) {
3106 lp->tcount++;
3107 lp->media = INIT;
3108 return next_tick;
3109 }
3110 if (lp->media == _100Mb) {
3111 if ((slnk = test_for_100Mb(dev, 6500)) < 0) {
3112 lp->media = SPD_DET;
3113 return slnk & ~TIMER_CB;
3114 }
3115 } else {
3116 if (wait_for_link(dev) < 0) {
3117 lp->media = SPD_DET;
3118 return PDET_LINK_WAIT;
3119 }
3120 }
3121 if (lp->media == ANS) {
3122 if (is_spd_100(dev)) {
3123 lp->media = _100Mb;
3124 } else {
3125 lp->media = _10Mb;
3126 }
3127 next_tick = dc2114x_autoconf(dev);
3128 } else if (((lp->media == _100Mb) && is_100_up(dev)) ||
3129 (((lp->media == _10Mb) || (lp->media == TP) ||
3130 (lp->media == BNC) || (lp->media == AUI)) &&
3131 is_10_up(dev))) {
3132 next_tick = dc2114x_autoconf(dev);
3133 } else {
3134 lp->tcount++;
3135 lp->media = INIT;
3136 }
3137 break;
3138
3139 case _10Mb:
3140 next_tick = 3000;
3141 if (!lp->tx_enable) {
3142 SET_10Mb;
3143 de4x5_init_connection(dev);
3144 } else {
3145 if (!lp->linkOK && (lp->autosense == AUTO)) {
3146 if (!is_10_up(dev) || (!lp->useSROM && is_spd_100(dev))) {
3147 lp->media = INIT;
3148 lp->tcount++;
3149 next_tick = DE4X5_AUTOSENSE_MS;
3150 }
3151 }
3152 }
3153 break;
3154
3155 case _100Mb:
3156 next_tick = 3000;
3157 if (!lp->tx_enable) {
3158 SET_100Mb;
3159 de4x5_init_connection(dev);
3160 } else {
3161 if (!lp->linkOK && (lp->autosense == AUTO)) {
3162 if (!is_100_up(dev) || (!lp->useSROM && !is_spd_100(dev))) {
3163 lp->media = INIT;
3164 lp->tcount++;
3165 next_tick = DE4X5_AUTOSENSE_MS;
3166 }
3167 }
3168 }
3169 break;
3170
3171 default:
3172 lp->tcount++;
3173printk("Huh?: media:%02x\n", lp->media);
3174 lp->media = INIT;
3175 break;
3176 }
3177
3178 return next_tick;
3179}
3180
3181static int
3182srom_autoconf(struct net_device *dev)
3183{
3184 struct de4x5_private *lp = netdev_priv(dev);
3185
3186 return lp->infoleaf_fn(dev);
3187}
3188
3189
3190
3191
3192
3193
3194static int
3195srom_map_media(struct net_device *dev)
3196{
3197 struct de4x5_private *lp = netdev_priv(dev);
3198
3199 lp->fdx = false;
3200 if (lp->infoblock_media == lp->media)
3201 return 0;
3202
3203 switch(lp->infoblock_media) {
3204 case SROM_10BASETF:
3205 if (!lp->params.fdx) return -1;
3206 lp->fdx = true;
3207 case SROM_10BASET:
3208 if (lp->params.fdx && !lp->fdx) return -1;
3209 if ((lp->chipset == DC21140) || ((lp->chipset & ~0x00ff) == DC2114x)) {
3210 lp->media = _10Mb;
3211 } else {
3212 lp->media = TP;
3213 }
3214 break;
3215
3216 case SROM_10BASE2:
3217 lp->media = BNC;
3218 break;
3219
3220 case SROM_10BASE5:
3221 lp->media = AUI;
3222 break;
3223
3224 case SROM_100BASETF:
3225 if (!lp->params.fdx) return -1;
3226 lp->fdx = true;
3227 case SROM_100BASET:
3228 if (lp->params.fdx && !lp->fdx) return -1;
3229 lp->media = _100Mb;
3230 break;
3231
3232 case SROM_100BASET4:
3233 lp->media = _100Mb;
3234 break;
3235
3236 case SROM_100BASEFF:
3237 if (!lp->params.fdx) return -1;
3238 lp->fdx = true;
3239 case SROM_100BASEF:
3240 if (lp->params.fdx && !lp->fdx) return -1;
3241 lp->media = _100Mb;
3242 break;
3243
3244 case ANS:
3245 lp->media = ANS;
3246 lp->fdx = lp->params.fdx;
3247 break;
3248
3249 default:
3250 printk("%s: Bad media code [%d] detected in SROM!\n", dev->name,
3251 lp->infoblock_media);
3252 return -1;
3253 break;
3254 }
3255
3256 return 0;
3257}
3258
3259static void
3260de4x5_init_connection(struct net_device *dev)
3261{
3262 struct de4x5_private *lp = netdev_priv(dev);
3263 u_long iobase = dev->base_addr;
3264 u_long flags = 0;
3265
3266 if (lp->media != lp->c_media) {
3267 de4x5_dbg_media(dev);
3268 lp->c_media = lp->media;
3269 }
3270
3271 spin_lock_irqsave(&lp->lock, flags);
3272 de4x5_rst_desc_ring(dev);
3273 de4x5_setup_intr(dev);
3274 lp->tx_enable = true;
3275 spin_unlock_irqrestore(&lp->lock, flags);
3276 outl(POLL_DEMAND, DE4X5_TPD);
3277
3278 netif_wake_queue(dev);
3279}
3280
3281
3282
3283
3284
3285
3286static int
3287de4x5_reset_phy(struct net_device *dev)
3288{
3289 struct de4x5_private *lp = netdev_priv(dev);
3290 u_long iobase = dev->base_addr;
3291 int next_tick = 0;
3292
3293 if ((lp->useSROM) || (lp->phy[lp->active].id)) {
3294 if (lp->timeout < 0) {
3295 if (lp->useSROM) {
3296 if (lp->phy[lp->active].rst) {
3297 srom_exec(dev, lp->phy[lp->active].rst);
3298 srom_exec(dev, lp->phy[lp->active].rst);
3299 } else if (lp->rst) {
3300 srom_exec(dev, lp->rst);
3301 srom_exec(dev, lp->rst);
3302 }
3303 } else {
3304 PHY_HARD_RESET;
3305 }
3306 if (lp->useMII) {
3307 mii_wr(MII_CR_RST, MII_CR, lp->phy[lp->active].addr, DE4X5_MII);
3308 }
3309 }
3310 if (lp->useMII) {
3311 next_tick = test_mii_reg(dev, MII_CR, MII_CR_RST, false, 500);
3312 }
3313 } else if (lp->chipset == DC21140) {
3314 PHY_HARD_RESET;
3315 }
3316
3317 return next_tick;
3318}
3319
3320static int
3321test_media(struct net_device *dev, s32 irqs, s32 irq_mask, s32 csr13, s32 csr14, s32 csr15, s32 msec)
3322{
3323 struct de4x5_private *lp = netdev_priv(dev);
3324 u_long iobase = dev->base_addr;
3325 s32 sts, csr12;
3326
3327 if (lp->timeout < 0) {
3328 lp->timeout = msec/100;
3329 if (!lp->useSROM) {
3330 reset_init_sia(dev, csr13, csr14, csr15);
3331 }
3332
3333
3334 outl(irq_mask, DE4X5_IMR);
3335
3336
3337 sts = inl(DE4X5_STS);
3338 outl(sts, DE4X5_STS);
3339
3340
3341 if ((lp->chipset == DC21041) || lp->useSROM) {
3342 csr12 = inl(DE4X5_SISR);
3343 outl(csr12, DE4X5_SISR);
3344 }
3345 }
3346
3347 sts = inl(DE4X5_STS) & ~TIMER_CB;
3348
3349 if (!(sts & irqs) && --lp->timeout) {
3350 sts = 100 | TIMER_CB;
3351 } else {
3352 lp->timeout = -1;
3353 }
3354
3355 return sts;
3356}
3357
3358static int
3359test_tp(struct net_device *dev, s32 msec)
3360{
3361 struct de4x5_private *lp = netdev_priv(dev);
3362 u_long iobase = dev->base_addr;
3363 int sisr;
3364
3365 if (lp->timeout < 0) {
3366 lp->timeout = msec/100;
3367 }
3368
3369 sisr = (inl(DE4X5_SISR) & ~TIMER_CB) & (SISR_LKF | SISR_NCR);
3370
3371 if (sisr && --lp->timeout) {
3372 sisr = 100 | TIMER_CB;
3373 } else {
3374 lp->timeout = -1;
3375 }
3376
3377 return sisr;
3378}
3379
3380
3381
3382
3383
3384
3385#define SAMPLE_INTERVAL 500
3386#define SAMPLE_DELAY 2000
3387static int
3388test_for_100Mb(struct net_device *dev, int msec)
3389{
3390 struct de4x5_private *lp = netdev_priv(dev);
3391 int gep = 0, ret = ((lp->chipset & ~0x00ff)==DC2114x? -1 :GEP_SLNK);
3392
3393 if (lp->timeout < 0) {
3394 if ((msec/SAMPLE_INTERVAL) <= 0) return 0;
3395 if (msec > SAMPLE_DELAY) {
3396 lp->timeout = (msec - SAMPLE_DELAY)/SAMPLE_INTERVAL;
3397 gep = SAMPLE_DELAY | TIMER_CB;
3398 return gep;
3399 } else {
3400 lp->timeout = msec/SAMPLE_INTERVAL;
3401 }
3402 }
3403
3404 if (lp->phy[lp->active].id || lp->useSROM) {
3405 gep = is_100_up(dev) | is_spd_100(dev);
3406 } else {
3407 gep = (~gep_rd(dev) & (GEP_SLNK | GEP_LNP));
3408 }
3409 if (!(gep & ret) && --lp->timeout) {
3410 gep = SAMPLE_INTERVAL | TIMER_CB;
3411 } else {
3412 lp->timeout = -1;
3413 }
3414
3415 return gep;
3416}
3417
3418static int
3419wait_for_link(struct net_device *dev)
3420{
3421 struct de4x5_private *lp = netdev_priv(dev);
3422
3423 if (lp->timeout < 0) {
3424 lp->timeout = 1;
3425 }
3426
3427 if (lp->timeout--) {
3428 return TIMER_CB;
3429 } else {
3430 lp->timeout = -1;
3431 }
3432
3433 return 0;
3434}
3435
3436
3437
3438
3439
3440static int
3441test_mii_reg(struct net_device *dev, int reg, int mask, bool pol, long msec)
3442{
3443 struct de4x5_private *lp = netdev_priv(dev);
3444 int test;
3445 u_long iobase = dev->base_addr;
3446
3447 if (lp->timeout < 0) {
3448 lp->timeout = msec/100;
3449 }
3450
3451 reg = mii_rd((u_char)reg, lp->phy[lp->active].addr, DE4X5_MII) & mask;
3452 test = (reg ^ (pol ? ~0 : 0)) & mask;
3453
3454 if (test && --lp->timeout) {
3455 reg = 100 | TIMER_CB;
3456 } else {
3457 lp->timeout = -1;
3458 }
3459
3460 return reg;
3461}
3462
3463static int
3464is_spd_100(struct net_device *dev)
3465{
3466 struct de4x5_private *lp = netdev_priv(dev);
3467 u_long iobase = dev->base_addr;
3468 int spd;
3469
3470 if (lp->useMII) {
3471 spd = mii_rd(lp->phy[lp->active].spd.reg, lp->phy[lp->active].addr, DE4X5_MII);
3472 spd = ~(spd ^ lp->phy[lp->active].spd.value);
3473 spd &= lp->phy[lp->active].spd.mask;
3474 } else if (!lp->useSROM) {
3475 spd = ((~gep_rd(dev)) & GEP_SLNK);
3476 } else {
3477 if ((lp->ibn == 2) || !lp->asBitValid)
3478 return (lp->chipset == DC21143) ? (~inl(DE4X5_SISR)&SISR_LS100) : 0;
3479
3480 spd = (lp->asBitValid & (lp->asPolarity ^ (gep_rd(dev) & lp->asBit))) |
3481 (lp->linkOK & ~lp->asBitValid);
3482 }
3483
3484 return spd;
3485}
3486
3487static int
3488is_100_up(struct net_device *dev)
3489{
3490 struct de4x5_private *lp = netdev_priv(dev);
3491 u_long iobase = dev->base_addr;
3492
3493 if (lp->useMII) {
3494
3495 mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);
3496 return mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII) & MII_SR_LKS;
3497 } else if (!lp->useSROM) {
3498 return (~gep_rd(dev)) & GEP_SLNK;
3499 } else {
3500 if ((lp->ibn == 2) || !lp->asBitValid)
3501 return (lp->chipset == DC21143) ? (~inl(DE4X5_SISR)&SISR_LS100) : 0;
3502
3503 return (lp->asBitValid&(lp->asPolarity^(gep_rd(dev)&lp->asBit))) |
3504 (lp->linkOK & ~lp->asBitValid);
3505 }
3506}
3507
3508static int
3509is_10_up(struct net_device *dev)
3510{
3511 struct de4x5_private *lp = netdev_priv(dev);
3512 u_long iobase = dev->base_addr;
3513
3514 if (lp->useMII) {
3515
3516 mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);
3517 return mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII) & MII_SR_LKS;
3518 } else if (!lp->useSROM) {
3519 return (~gep_rd(dev)) & GEP_LNP;
3520 } else {
3521 if ((lp->ibn == 2) || !lp->asBitValid)
3522 return ((lp->chipset & ~0x00ff) == DC2114x) ?
3523 (~inl(DE4X5_SISR)&SISR_LS10):
3524 0;
3525
3526 return (lp->asBitValid&(lp->asPolarity^(gep_rd(dev)&lp->asBit))) |
3527 (lp->linkOK & ~lp->asBitValid);
3528 }
3529}
3530
3531static int
3532is_anc_capable(struct net_device *dev)
3533{
3534 struct de4x5_private *lp = netdev_priv(dev);
3535 u_long iobase = dev->base_addr;
3536
3537 if (lp->phy[lp->active].id && (!lp->useSROM || lp->useMII)) {
3538 return mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);
3539 } else if ((lp->chipset & ~0x00ff) == DC2114x) {
3540 return (inl(DE4X5_SISR) & SISR_LPN) >> 12;
3541 } else {
3542 return 0;
3543 }
3544}
3545
3546
3547
3548
3549
3550static int
3551ping_media(struct net_device *dev, int msec)
3552{
3553 struct de4x5_private *lp = netdev_priv(dev);
3554 u_long iobase = dev->base_addr;
3555 int sisr;
3556
3557 if (lp->timeout < 0) {
3558 lp->timeout = msec/100;
3559
3560 lp->tmp = lp->tx_new;
3561 load_packet(dev, lp->frame, TD_LS | TD_FS | sizeof(lp->frame), (struct sk_buff *)1);
3562 lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
3563 outl(POLL_DEMAND, DE4X5_TPD);
3564 }
3565
3566 sisr = inl(DE4X5_SISR);
3567
3568 if ((!(sisr & SISR_NCR)) &&
3569 ((s32)le32_to_cpu(lp->tx_ring[lp->tmp].status) < 0) &&
3570 (--lp->timeout)) {
3571 sisr = 100 | TIMER_CB;
3572 } else {
3573 if ((!(sisr & SISR_NCR)) &&
3574 !(le32_to_cpu(lp->tx_ring[lp->tmp].status) & (T_OWN | TD_ES)) &&
3575 lp->timeout) {
3576 sisr = 0;
3577 } else {
3578 sisr = 1;
3579 }
3580 lp->timeout = -1;
3581 }
3582
3583 return sisr;
3584}
3585
3586
3587
3588
3589
3590
3591static struct sk_buff *
3592de4x5_alloc_rx_buff(struct net_device *dev, int index, int len)
3593{
3594 struct de4x5_private *lp = netdev_priv(dev);
3595 struct sk_buff *p;
3596
3597#if !defined(__alpha__) && !defined(__powerpc__) && !defined(CONFIG_SPARC) && !defined(DE4X5_DO_MEMCPY)
3598 struct sk_buff *ret;
3599 u_long i=0, tmp;
3600
3601 p = netdev_alloc_skb(dev, IEEE802_3_SZ + DE4X5_ALIGN + 2);
3602 if (!p) return NULL;
3603
3604 tmp = virt_to_bus(p->data);
3605 i = ((tmp + DE4X5_ALIGN) & ~DE4X5_ALIGN) - tmp;
3606 skb_reserve(p, i);
3607 lp->rx_ring[index].buf = cpu_to_le32(tmp + i);
3608
3609 ret = lp->rx_skb[index];
3610 lp->rx_skb[index] = p;
3611
3612 if ((u_long) ret > 1) {
3613 skb_put(ret, len);
3614 }
3615
3616 return ret;
3617
3618#else
3619 if (lp->state != OPEN) return (struct sk_buff *)1;
3620
3621 p = netdev_alloc_skb(dev, len + 2);
3622 if (!p) return NULL;
3623
3624 skb_reserve(p, 2);
3625 if (index < lp->rx_old) {
3626 short tlen = (lp->rxRingSize - lp->rx_old) * RX_BUFF_SZ;
3627 memcpy(skb_put(p,tlen),lp->rx_bufs + lp->rx_old * RX_BUFF_SZ,tlen);
3628 memcpy(skb_put(p,len-tlen),lp->rx_bufs,len-tlen);
3629 } else {
3630 memcpy(skb_put(p,len),lp->rx_bufs + lp->rx_old * RX_BUFF_SZ,len);
3631 }
3632
3633 return p;
3634#endif
3635}
3636
3637static void
3638de4x5_free_rx_buffs(struct net_device *dev)
3639{
3640 struct de4x5_private *lp = netdev_priv(dev);
3641 int i;
3642
3643 for (i=0; i<lp->rxRingSize; i++) {
3644 if ((u_long) lp->rx_skb[i] > 1) {
3645 dev_kfree_skb(lp->rx_skb[i]);
3646 }
3647 lp->rx_ring[i].status = 0;
3648 lp->rx_skb[i] = (struct sk_buff *)1;
3649 }
3650}
3651
3652static void
3653de4x5_free_tx_buffs(struct net_device *dev)
3654{
3655 struct de4x5_private *lp = netdev_priv(dev);
3656 int i;
3657
3658 for (i=0; i<lp->txRingSize; i++) {
3659 if (lp->tx_skb[i])
3660 de4x5_free_tx_buff(lp, i);
3661 lp->tx_ring[i].status = 0;
3662 }
3663
3664
3665 __skb_queue_purge(&lp->cache.queue);
3666}
3667
3668
3669
3670
3671
3672
3673
3674
3675static void
3676de4x5_save_skbs(struct net_device *dev)
3677{
3678 struct de4x5_private *lp = netdev_priv(dev);
3679 u_long iobase = dev->base_addr;
3680 s32 omr;
3681
3682 if (!lp->cache.save_cnt) {
3683 STOP_DE4X5;
3684 de4x5_tx(dev);
3685 de4x5_free_tx_buffs(dev);
3686 de4x5_cache_state(dev, DE4X5_SAVE_STATE);
3687 de4x5_sw_reset(dev);
3688 de4x5_cache_state(dev, DE4X5_RESTORE_STATE);
3689 lp->cache.save_cnt++;
3690 START_DE4X5;
3691 }
3692}
3693
3694static void
3695de4x5_rst_desc_ring(struct net_device *dev)
3696{
3697 struct de4x5_private *lp = netdev_priv(dev);
3698 u_long iobase = dev->base_addr;
3699 int i;
3700 s32 omr;
3701
3702 if (lp->cache.save_cnt) {
3703 STOP_DE4X5;
3704 outl(lp->dma_rings, DE4X5_RRBA);
3705 outl(lp->dma_rings + NUM_RX_DESC * sizeof(struct de4x5_desc),
3706 DE4X5_TRBA);
3707
3708 lp->rx_new = lp->rx_old = 0;
3709 lp->tx_new = lp->tx_old = 0;
3710
3711 for (i = 0; i < lp->rxRingSize; i++) {
3712 lp->rx_ring[i].status = cpu_to_le32(R_OWN);
3713 }
3714
3715 for (i = 0; i < lp->txRingSize; i++) {
3716 lp->tx_ring[i].status = cpu_to_le32(0);
3717 }
3718
3719 barrier();
3720 lp->cache.save_cnt--;
3721 START_DE4X5;
3722 }
3723}
3724
3725static void
3726de4x5_cache_state(struct net_device *dev, int flag)
3727{
3728 struct de4x5_private *lp = netdev_priv(dev);
3729 u_long iobase = dev->base_addr;
3730
3731 switch(flag) {
3732 case DE4X5_SAVE_STATE:
3733 lp->cache.csr0 = inl(DE4X5_BMR);
3734 lp->cache.csr6 = (inl(DE4X5_OMR) & ~(OMR_ST | OMR_SR));
3735 lp->cache.csr7 = inl(DE4X5_IMR);
3736 break;
3737
3738 case DE4X5_RESTORE_STATE:
3739 outl(lp->cache.csr0, DE4X5_BMR);
3740 outl(lp->cache.csr6, DE4X5_OMR);
3741 outl(lp->cache.csr7, DE4X5_IMR);
3742 if (lp->chipset == DC21140) {
3743 gep_wr(lp->cache.gepc, dev);
3744 gep_wr(lp->cache.gep, dev);
3745 } else {
3746 reset_init_sia(dev, lp->cache.csr13, lp->cache.csr14,
3747 lp->cache.csr15);
3748 }
3749 break;
3750 }
3751}
3752
3753static void
3754de4x5_put_cache(struct net_device *dev, struct sk_buff *skb)
3755{
3756 struct de4x5_private *lp = netdev_priv(dev);
3757
3758 __skb_queue_tail(&lp->cache.queue, skb);
3759}
3760
3761static void
3762de4x5_putb_cache(struct net_device *dev, struct sk_buff *skb)
3763{
3764 struct de4x5_private *lp = netdev_priv(dev);
3765
3766 __skb_queue_head(&lp->cache.queue, skb);
3767}
3768
3769static struct sk_buff *
3770de4x5_get_cache(struct net_device *dev)
3771{
3772 struct de4x5_private *lp = netdev_priv(dev);
3773
3774 return __skb_dequeue(&lp->cache.queue);
3775}
3776
3777
3778
3779
3780
3781static int
3782test_ans(struct net_device *dev, s32 irqs, s32 irq_mask, s32 msec)
3783{
3784 struct de4x5_private *lp = netdev_priv(dev);
3785 u_long iobase = dev->base_addr;
3786 s32 sts, ans;
3787
3788 if (lp->timeout < 0) {
3789 lp->timeout = msec/100;
3790 outl(irq_mask, DE4X5_IMR);
3791
3792
3793 sts = inl(DE4X5_STS);
3794 outl(sts, DE4X5_STS);
3795 }
3796
3797 ans = inl(DE4X5_SISR) & SISR_ANS;
3798 sts = inl(DE4X5_STS) & ~TIMER_CB;
3799
3800 if (!(sts & irqs) && (ans ^ ANS_NWOK) && --lp->timeout) {
3801 sts = 100 | TIMER_CB;
3802 } else {
3803 lp->timeout = -1;
3804 }
3805
3806 return sts;
3807}
3808
3809static void
3810de4x5_setup_intr(struct net_device *dev)
3811{
3812 struct de4x5_private *lp = netdev_priv(dev);
3813 u_long iobase = dev->base_addr;
3814 s32 imr, sts;
3815
3816 if (inl(DE4X5_OMR) & OMR_SR) {
3817 imr = 0;
3818 UNMASK_IRQs;
3819 sts = inl(DE4X5_STS);
3820 outl(sts, DE4X5_STS);
3821 ENABLE_IRQs;
3822 }
3823}
3824
3825
3826
3827
3828static void
3829reset_init_sia(struct net_device *dev, s32 csr13, s32 csr14, s32 csr15)
3830{
3831 struct de4x5_private *lp = netdev_priv(dev);
3832 u_long iobase = dev->base_addr;
3833
3834 RESET_SIA;
3835 if (lp->useSROM) {
3836 if (lp->ibn == 3) {
3837 srom_exec(dev, lp->phy[lp->active].rst);
3838 srom_exec(dev, lp->phy[lp->active].gep);
3839 outl(1, DE4X5_SICR);
3840 return;
3841 } else {
3842 csr15 = lp->cache.csr15;
3843 csr14 = lp->cache.csr14;
3844 csr13 = lp->cache.csr13;
3845 outl(csr15 | lp->cache.gepc, DE4X5_SIGR);
3846 outl(csr15 | lp->cache.gep, DE4X5_SIGR);
3847 }
3848 } else {
3849 outl(csr15, DE4X5_SIGR);
3850 }
3851 outl(csr14, DE4X5_STRR);
3852 outl(csr13, DE4X5_SICR);
3853
3854 mdelay(10);
3855}
3856
3857
3858
3859
3860static void
3861create_packet(struct net_device *dev, char *frame, int len)
3862{
3863 int i;
3864 char *buf = frame;
3865
3866 for (i=0; i<ETH_ALEN; i++) {
3867 *buf++ = dev->dev_addr[i];
3868 }
3869 for (i=0; i<ETH_ALEN; i++) {
3870 *buf++ = dev->dev_addr[i];
3871 }
3872
3873 *buf++ = 0;
3874 *buf++ = 1;
3875}
3876
3877
3878
3879
3880static int
3881EISA_signature(char *name, struct device *device)
3882{
3883 int i, status = 0, siglen = ARRAY_SIZE(de4x5_signatures);
3884 struct eisa_device *edev;
3885
3886 *name = '\0';
3887 edev = to_eisa_device (device);
3888 i = edev->id.driver_data;
3889
3890 if (i >= 0 && i < siglen) {
3891 strcpy (name, de4x5_signatures[i]);
3892 status = 1;
3893 }
3894
3895 return status;
3896}
3897
3898
3899
3900
3901static int
3902PCI_signature(char *name, struct de4x5_private *lp)
3903{
3904 int i, status = 0, siglen = ARRAY_SIZE(de4x5_signatures);
3905
3906 if (lp->chipset == DC21040) {
3907 strcpy(name, "DE434/5");
3908 return status;
3909 } else {
3910 int tmp = *((char *)&lp->srom + 19) * 3;
3911 strncpy(name, (char *)&lp->srom + 26 + tmp, 8);
3912 }
3913 name[8] = '\0';
3914 for (i=0; i<siglen; i++) {
3915 if (strstr(name,de4x5_signatures[i])!=NULL) break;
3916 }
3917 if (i == siglen) {
3918 if (dec_only) {
3919 *name = '\0';
3920 } else {
3921 strcpy(name, (((lp->chipset == DC21040) ? "DC21040" :
3922 ((lp->chipset == DC21041) ? "DC21041" :
3923 ((lp->chipset == DC21140) ? "DC21140" :
3924 ((lp->chipset == DC21142) ? "DC21142" :
3925 ((lp->chipset == DC21143) ? "DC21143" : "UNKNOWN"
3926 )))))));
3927 }
3928 if (lp->chipset != DC21041) {
3929 lp->useSROM = true;
3930 }
3931 } else if ((lp->chipset & ~0x00ff) == DC2114x) {
3932 lp->useSROM = true;
3933 }
3934
3935 return status;
3936}
3937
3938
3939
3940
3941
3942
3943
3944
3945
3946static void
3947DevicePresent(struct net_device *dev, u_long aprom_addr)
3948{
3949 int i, j=0;
3950 struct de4x5_private *lp = netdev_priv(dev);
3951
3952 if (lp->chipset == DC21040) {
3953 if (lp->bus == EISA) {
3954 enet_addr_rst(aprom_addr);
3955 } else {
3956 outl(0, aprom_addr);
3957 }
3958 } else {
3959 u_short tmp;
3960 __le16 *p = (__le16 *)((char *)&lp->srom + SROM_HWADD);
3961 for (i=0; i<(ETH_ALEN>>1); i++) {
3962 tmp = srom_rd(aprom_addr, (SROM_HWADD>>1) + i);
3963 j += tmp;
3964 *p = cpu_to_le16(tmp);
3965 }
3966 if (j == 0 || j == 3 * 0xffff) {
3967
3968 return;
3969 }
3970
3971 p = (__le16 *)&lp->srom;
3972 for (i=0; i<(sizeof(struct de4x5_srom)>>1); i++) {
3973 tmp = srom_rd(aprom_addr, i);
3974 *p++ = cpu_to_le16(tmp);
3975 }
3976 de4x5_dbg_srom(&lp->srom);
3977 }
3978}
3979
3980
3981
3982
3983
3984
3985static void
3986enet_addr_rst(u_long aprom_addr)
3987{
3988 union {
3989 struct {
3990 u32 a;
3991 u32 b;
3992 } llsig;
3993 char Sig[sizeof(u32) << 1];
3994 } dev;
3995 short sigLength=0;
3996 s8 data;
3997 int i, j;
3998
3999 dev.llsig.a = ETH_PROM_SIG;
4000 dev.llsig.b = ETH_PROM_SIG;
4001 sigLength = sizeof(u32) << 1;
4002
4003 for (i=0,j=0;j<sigLength && i<PROBE_LENGTH+sigLength-1;i++) {
4004 data = inb(aprom_addr);
4005 if (dev.Sig[j] == data) {
4006 j++;
4007 } else {
4008 if (data == dev.Sig[0]) {
4009 j=1;
4010 } else {
4011 j=0;
4012 }
4013 }
4014 }
4015}
4016
4017
4018
4019
4020
4021
4022
4023static int
4024get_hw_addr(struct net_device *dev)
4025{
4026 u_long iobase = dev->base_addr;
4027 int broken, i, k, tmp, status = 0;
4028 u_short j,chksum;
4029 struct de4x5_private *lp = netdev_priv(dev);
4030
4031 broken = de4x5_bad_srom(lp);
4032
4033 for (i=0,k=0,j=0;j<3;j++) {
4034 k <<= 1;
4035 if (k > 0xffff) k-=0xffff;
4036
4037 if (lp->bus == PCI) {
4038 if (lp->chipset == DC21040) {
4039 while ((tmp = inl(DE4X5_APROM)) < 0);
4040 k += (u_char) tmp;
4041 dev->dev_addr[i++] = (u_char) tmp;
4042 while ((tmp = inl(DE4X5_APROM)) < 0);
4043 k += (u_short) (tmp << 8);
4044 dev->dev_addr[i++] = (u_char) tmp;
4045 } else if (!broken) {
4046 dev->dev_addr[i] = (u_char) lp->srom.ieee_addr[i]; i++;
4047 dev->dev_addr[i] = (u_char) lp->srom.ieee_addr[i]; i++;
4048 } else if ((broken == SMC) || (broken == ACCTON)) {
4049 dev->dev_addr[i] = *((u_char *)&lp->srom + i); i++;
4050 dev->dev_addr[i] = *((u_char *)&lp->srom + i); i++;
4051 }
4052 } else {
4053 k += (u_char) (tmp = inb(EISA_APROM));
4054 dev->dev_addr[i++] = (u_char) tmp;
4055 k += (u_short) ((tmp = inb(EISA_APROM)) << 8);
4056 dev->dev_addr[i++] = (u_char) tmp;
4057 }
4058
4059 if (k > 0xffff) k-=0xffff;
4060 }
4061 if (k == 0xffff) k=0;
4062
4063 if (lp->bus == PCI) {
4064 if (lp->chipset == DC21040) {
4065 while ((tmp = inl(DE4X5_APROM)) < 0);
4066 chksum = (u_char) tmp;
4067 while ((tmp = inl(DE4X5_APROM)) < 0);
4068 chksum |= (u_short) (tmp << 8);
4069 if ((k != chksum) && (dec_only)) status = -1;
4070 }
4071 } else {
4072 chksum = (u_char) inb(EISA_APROM);
4073 chksum |= (u_short) (inb(EISA_APROM) << 8);
4074 if ((k != chksum) && (dec_only)) status = -1;
4075 }
4076
4077
4078 srom_repair(dev, broken);
4079
4080#ifdef CONFIG_PPC_PMAC
4081
4082
4083
4084
4085 if ( machine_is(powermac) &&
4086 (dev->dev_addr[0] == 0) &&
4087 (dev->dev_addr[1] == 0xa0) )
4088 {
4089 for (i = 0; i < ETH_ALEN; ++i)
4090 {
4091 int x = dev->dev_addr[i];
4092 x = ((x & 0xf) << 4) + ((x & 0xf0) >> 4);
4093 x = ((x & 0x33) << 2) + ((x & 0xcc) >> 2);
4094 dev->dev_addr[i] = ((x & 0x55) << 1) + ((x & 0xaa) >> 1);
4095 }
4096 }
4097#endif
4098
4099
4100 status = test_bad_enet(dev, status);
4101
4102 return status;
4103}
4104
4105
4106
4107
4108
4109static int
4110de4x5_bad_srom(struct de4x5_private *lp)
4111{
4112 int i, status = 0;
4113
4114 for (i = 0; i < ARRAY_SIZE(enet_det); i++) {
4115 if (!de4x5_strncmp((char *)&lp->srom, (char *)&enet_det[i], 3) &&
4116 !de4x5_strncmp((char *)&lp->srom+0x10, (char *)&enet_det[i], 3)) {
4117 if (i == 0) {
4118 status = SMC;
4119 } else if (i == 1) {
4120 status = ACCTON;
4121 }
4122 break;
4123 }
4124 }
4125
4126 return status;
4127}
4128
4129static int
4130de4x5_strncmp(char *a, char *b, int n)
4131{
4132 int ret=0;
4133
4134 for (;n && !ret; n--) {
4135 ret = *a++ - *b++;
4136 }
4137
4138 return ret;
4139}
4140
4141static void
4142srom_repair(struct net_device *dev, int card)
4143{
4144 struct de4x5_private *lp = netdev_priv(dev);
4145
4146 switch(card) {
4147 case SMC:
4148 memset((char *)&lp->srom, 0, sizeof(struct de4x5_srom));
4149 memcpy(lp->srom.ieee_addr, (char *)dev->dev_addr, ETH_ALEN);
4150 memcpy(lp->srom.info, (char *)&srom_repair_info[SMC-1], 100);
4151 lp->useSROM = true;
4152 break;
4153 }
4154}
4155
4156
4157
4158
4159
4160static int
4161test_bad_enet(struct net_device *dev, int status)
4162{
4163 struct de4x5_private *lp = netdev_priv(dev);
4164 int i, tmp;
4165
4166 for (tmp=0,i=0; i<ETH_ALEN; i++) tmp += (u_char)dev->dev_addr[i];
4167 if ((tmp == 0) || (tmp == 0x5fa)) {
4168 if ((lp->chipset == last.chipset) &&
4169 (lp->bus_num == last.bus) && (lp->bus_num > 0)) {
4170 for (i=0; i<ETH_ALEN; i++) dev->dev_addr[i] = last.addr[i];
4171 for (i=ETH_ALEN-1; i>2; --i) {
4172 dev->dev_addr[i] += 1;
4173 if (dev->dev_addr[i] != 0) break;
4174 }
4175 for (i=0; i<ETH_ALEN; i++) last.addr[i] = dev->dev_addr[i];
4176 if (!an_exception(lp)) {
4177 dev->irq = last.irq;
4178 }
4179
4180 status = 0;
4181 }
4182 } else if (!status) {
4183 last.chipset = lp->chipset;
4184 last.bus = lp->bus_num;
4185 last.irq = dev->irq;
4186 for (i=0; i<ETH_ALEN; i++) last.addr[i] = dev->dev_addr[i];
4187 }
4188
4189 return status;
4190}
4191
4192
4193
4194
4195static int
4196an_exception(struct de4x5_private *lp)
4197{
4198 if ((*(u_short *)lp->srom.sub_vendor_id == 0x00c0) &&
4199 (*(u_short *)lp->srom.sub_system_id == 0x95e0)) {
4200 return -1;
4201 }
4202
4203 return 0;
4204}
4205
4206
4207
4208
4209static short
4210srom_rd(u_long addr, u_char offset)
4211{
4212 sendto_srom(SROM_RD | SROM_SR, addr);
4213
4214 srom_latch(SROM_RD | SROM_SR | DT_CS, addr);
4215 srom_command(SROM_RD | SROM_SR | DT_IN | DT_CS, addr);
4216 srom_address(SROM_RD | SROM_SR | DT_CS, addr, offset);
4217
4218 return srom_data(SROM_RD | SROM_SR | DT_CS, addr);
4219}
4220
4221static void
4222srom_latch(u_int command, u_long addr)
4223{
4224 sendto_srom(command, addr);
4225 sendto_srom(command | DT_CLK, addr);
4226 sendto_srom(command, addr);
4227}
4228
4229static void
4230srom_command(u_int command, u_long addr)
4231{
4232 srom_latch(command, addr);
4233 srom_latch(command, addr);
4234 srom_latch((command & 0x0000ff00) | DT_CS, addr);
4235}
4236
4237static void
4238srom_address(u_int command, u_long addr, u_char offset)
4239{
4240 int i, a;
4241
4242 a = offset << 2;
4243 for (i=0; i<6; i++, a <<= 1) {
4244 srom_latch(command | ((a & 0x80) ? DT_IN : 0), addr);
4245 }
4246 udelay(1);
4247
4248 i = (getfrom_srom(addr) >> 3) & 0x01;
4249}
4250
4251static short
4252srom_data(u_int command, u_long addr)
4253{
4254 int i;
4255 short word = 0;
4256 s32 tmp;
4257
4258 for (i=0; i<16; i++) {
4259 sendto_srom(command | DT_CLK, addr);
4260 tmp = getfrom_srom(addr);
4261 sendto_srom(command, addr);
4262
4263 word = (word << 1) | ((tmp >> 3) & 0x01);
4264 }
4265
4266 sendto_srom(command & 0x0000ff00, addr);
4267
4268 return word;
4269}
4270
4271
4272
4273
4274
4275
4276
4277
4278
4279
4280
4281
4282
4283
4284
4285static void
4286sendto_srom(u_int command, u_long addr)
4287{
4288 outl(command, addr);
4289 udelay(1);
4290}
4291
4292static int
4293getfrom_srom(u_long addr)
4294{
4295 s32 tmp;
4296
4297 tmp = inl(addr);
4298 udelay(1);
4299
4300 return tmp;
4301}
4302
4303static int
4304srom_infoleaf_info(struct net_device *dev)
4305{
4306 struct de4x5_private *lp = netdev_priv(dev);
4307 int i, count;
4308 u_char *p;
4309
4310
4311 for (i=0; i<INFOLEAF_SIZE; i++) {
4312 if (lp->chipset == infoleaf_array[i].chipset) break;
4313 }
4314 if (i == INFOLEAF_SIZE) {
4315 lp->useSROM = false;
4316 printk("%s: Cannot find correct chipset for SROM decoding!\n",
4317 dev->name);
4318 return -ENXIO;
4319 }
4320
4321 lp->infoleaf_fn = infoleaf_array[i].fn;
4322
4323
4324 count = *((u_char *)&lp->srom + 19);
4325 p = (u_char *)&lp->srom + 26;
4326
4327 if (count > 1) {
4328 for (i=count; i; --i, p+=3) {
4329 if (lp->device == *p) break;
4330 }
4331 if (i == 0) {
4332 lp->useSROM = false;
4333 printk("%s: Cannot find correct PCI device [%d] for SROM decoding!\n",
4334 dev->name, lp->device);
4335 return -ENXIO;
4336 }
4337 }
4338
4339 lp->infoleaf_offset = get_unaligned_le16(p + 1);
4340
4341 return 0;
4342}
4343
4344
4345
4346
4347
4348
4349
4350
4351static void
4352srom_init(struct net_device *dev)
4353{
4354 struct de4x5_private *lp = netdev_priv(dev);
4355 u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset;
4356 u_char count;
4357
4358 p+=2;
4359 if (lp->chipset == DC21140) {
4360 lp->cache.gepc = (*p++ | GEP_CTRL);
4361 gep_wr(lp->cache.gepc, dev);
4362 }
4363
4364
4365 count = *p++;
4366
4367
4368 for (;count; --count) {
4369 if (*p < 128) {
4370 p += COMPACT_LEN;
4371 } else if (*(p+1) == 5) {
4372 type5_infoblock(dev, 1, p);
4373 p += ((*p & BLOCK_LEN) + 1);
4374 } else if (*(p+1) == 4) {
4375 p += ((*p & BLOCK_LEN) + 1);
4376 } else if (*(p+1) == 3) {
4377 type3_infoblock(dev, 1, p);
4378 p += ((*p & BLOCK_LEN) + 1);
4379 } else if (*(p+1) == 2) {
4380 p += ((*p & BLOCK_LEN) + 1);
4381 } else if (*(p+1) == 1) {
4382 type1_infoblock(dev, 1, p);
4383 p += ((*p & BLOCK_LEN) + 1);
4384 } else {
4385 p += ((*p & BLOCK_LEN) + 1);
4386 }
4387 }
4388}
4389
4390
4391
4392
4393
4394static void
4395srom_exec(struct net_device *dev, u_char *p)
4396{
4397 struct de4x5_private *lp = netdev_priv(dev);
4398 u_long iobase = dev->base_addr;
4399 u_char count = (p ? *p++ : 0);
4400 u_short *w = (u_short *)p;
4401
4402 if (((lp->ibn != 1) && (lp->ibn != 3) && (lp->ibn != 5)) || !count) return;
4403
4404 if (lp->chipset != DC21140) RESET_SIA;
4405
4406 while (count--) {
4407 gep_wr(((lp->chipset==DC21140) && (lp->ibn!=5) ?
4408 *p++ : get_unaligned_le16(w++)), dev);
4409 mdelay(2);
4410 }
4411
4412 if (lp->chipset != DC21140) {
4413 outl(lp->cache.csr14, DE4X5_STRR);
4414 outl(lp->cache.csr13, DE4X5_SICR);
4415 }
4416}
4417
4418
4419
4420
4421
4422
4423static int
4424dc21041_infoleaf(struct net_device *dev)
4425{
4426 return DE4X5_AUTOSENSE_MS;
4427}
4428
4429static int
4430dc21140_infoleaf(struct net_device *dev)
4431{
4432 struct de4x5_private *lp = netdev_priv(dev);
4433 u_char count = 0;
4434 u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset;
4435 int next_tick = DE4X5_AUTOSENSE_MS;
4436
4437
4438 p+=2;
4439
4440
4441 lp->cache.gepc = (*p++ | GEP_CTRL);
4442
4443
4444 count = *p++;
4445
4446
4447 if (*p < 128) {
4448 next_tick = dc_infoblock[COMPACT](dev, count, p);
4449 } else {
4450 next_tick = dc_infoblock[*(p+1)](dev, count, p);
4451 }
4452
4453 if (lp->tcount == count) {
4454 lp->media = NC;
4455 if (lp->media != lp->c_media) {
4456 de4x5_dbg_media(dev);
4457 lp->c_media = lp->media;
4458 }
4459 lp->media = INIT;
4460 lp->tcount = 0;
4461 lp->tx_enable = false;
4462 }
4463
4464 return next_tick & ~TIMER_CB;
4465}
4466
4467static int
4468dc21142_infoleaf(struct net_device *dev)
4469{
4470 struct de4x5_private *lp = netdev_priv(dev);
4471 u_char count = 0;
4472 u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset;
4473 int next_tick = DE4X5_AUTOSENSE_MS;
4474
4475
4476 p+=2;
4477
4478
4479 count = *p++;
4480
4481
4482 if (*p < 128) {
4483 next_tick = dc_infoblock[COMPACT](dev, count, p);
4484 } else {
4485 next_tick = dc_infoblock[*(p+1)](dev, count, p);
4486 }
4487
4488 if (lp->tcount == count) {
4489 lp->media = NC;
4490 if (lp->media != lp->c_media) {
4491 de4x5_dbg_media(dev);
4492 lp->c_media = lp->media;
4493 }
4494 lp->media = INIT;
4495 lp->tcount = 0;
4496 lp->tx_enable = false;
4497 }
4498
4499 return next_tick & ~TIMER_CB;
4500}
4501
4502static int
4503dc21143_infoleaf(struct net_device *dev)
4504{
4505 struct de4x5_private *lp = netdev_priv(dev);
4506 u_char count = 0;
4507 u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset;
4508 int next_tick = DE4X5_AUTOSENSE_MS;
4509
4510
4511 p+=2;
4512
4513
4514 count = *p++;
4515
4516
4517 if (*p < 128) {
4518 next_tick = dc_infoblock[COMPACT](dev, count, p);
4519 } else {
4520 next_tick = dc_infoblock[*(p+1)](dev, count, p);
4521 }
4522 if (lp->tcount == count) {
4523 lp->media = NC;
4524 if (lp->media != lp->c_media) {
4525 de4x5_dbg_media(dev);
4526 lp->c_media = lp->media;
4527 }
4528 lp->media = INIT;
4529 lp->tcount = 0;
4530 lp->tx_enable = false;
4531 }
4532
4533 return next_tick & ~TIMER_CB;
4534}
4535
4536
4537
4538
4539
4540static int
4541compact_infoblock(struct net_device *dev, u_char count, u_char *p)
4542{
4543 struct de4x5_private *lp = netdev_priv(dev);
4544 u_char flags, csr6;
4545
4546
4547 if (--count > lp->tcount) {
4548 if (*(p+COMPACT_LEN) < 128) {
4549 return dc_infoblock[COMPACT](dev, count, p+COMPACT_LEN);
4550 } else {
4551 return dc_infoblock[*(p+COMPACT_LEN+1)](dev, count, p+COMPACT_LEN);
4552 }
4553 }
4554
4555 if ((lp->media == INIT) && (lp->timeout < 0)) {
4556 lp->ibn = COMPACT;
4557 lp->active = 0;
4558 gep_wr(lp->cache.gepc, dev);
4559 lp->infoblock_media = (*p++) & COMPACT_MC;
4560 lp->cache.gep = *p++;
4561 csr6 = *p++;
4562 flags = *p++;
4563
4564 lp->asBitValid = (flags & 0x80) ? 0 : -1;
4565 lp->defMedium = (flags & 0x40) ? -1 : 0;
4566 lp->asBit = 1 << ((csr6 >> 1) & 0x07);
4567 lp->asPolarity = ((csr6 & 0x80) ? -1 : 0) & lp->asBit;
4568 lp->infoblock_csr6 = OMR_DEF | ((csr6 & 0x71) << 18);
4569 lp->useMII = false;
4570
4571 de4x5_switch_mac_port(dev);
4572 }
4573
4574 return dc21140m_autoconf(dev);
4575}
4576
4577
4578
4579
4580static int
4581type0_infoblock(struct net_device *dev, u_char count, u_char *p)
4582{
4583 struct de4x5_private *lp = netdev_priv(dev);
4584 u_char flags, csr6, len = (*p & BLOCK_LEN)+1;
4585
4586
4587 if (--count > lp->tcount) {
4588 if (*(p+len) < 128) {
4589 return dc_infoblock[COMPACT](dev, count, p+len);
4590 } else {
4591 return dc_infoblock[*(p+len+1)](dev, count, p+len);
4592 }
4593 }
4594
4595 if ((lp->media == INIT) && (lp->timeout < 0)) {
4596 lp->ibn = 0;
4597 lp->active = 0;
4598 gep_wr(lp->cache.gepc, dev);
4599 p+=2;
4600 lp->infoblock_media = (*p++) & BLOCK0_MC;
4601 lp->cache.gep = *p++;
4602 csr6 = *p++;
4603 flags = *p++;
4604
4605 lp->asBitValid = (flags & 0x80) ? 0 : -1;
4606 lp->defMedium = (flags & 0x40) ? -1 : 0;
4607 lp->asBit = 1 << ((csr6 >> 1) & 0x07);
4608 lp->asPolarity = ((csr6 & 0x80) ? -1 : 0) & lp->asBit;
4609 lp->infoblock_csr6 = OMR_DEF | ((csr6 & 0x71) << 18);
4610 lp->useMII = false;
4611
4612 de4x5_switch_mac_port(dev);
4613 }
4614
4615 return dc21140m_autoconf(dev);
4616}
4617
4618
4619
4620static int
4621type1_infoblock(struct net_device *dev, u_char count, u_char *p)
4622{
4623 struct de4x5_private *lp = netdev_priv(dev);
4624 u_char len = (*p & BLOCK_LEN)+1;
4625
4626
4627 if (--count > lp->tcount) {
4628 if (*(p+len) < 128) {
4629 return dc_infoblock[COMPACT](dev, count, p+len);
4630 } else {
4631 return dc_infoblock[*(p+len+1)](dev, count, p+len);
4632 }
4633 }
4634
4635 p += 2;
4636 if (lp->state == INITIALISED) {
4637 lp->ibn = 1;
4638 lp->active = *p++;
4639 lp->phy[lp->active].gep = (*p ? p : NULL); p += (*p + 1);
4640 lp->phy[lp->active].rst = (*p ? p : NULL); p += (*p + 1);
4641 lp->phy[lp->active].mc = get_unaligned_le16(p); p += 2;
4642 lp->phy[lp->active].ana = get_unaligned_le16(p); p += 2;
4643 lp->phy[lp->active].fdx = get_unaligned_le16(p); p += 2;
4644 lp->phy[lp->active].ttm = get_unaligned_le16(p);
4645 return 0;
4646 } else if ((lp->media == INIT) && (lp->timeout < 0)) {
4647 lp->ibn = 1;
4648 lp->active = *p;
4649 lp->infoblock_csr6 = OMR_MII_100;
4650 lp->useMII = true;
4651 lp->infoblock_media = ANS;
4652
4653 de4x5_switch_mac_port(dev);
4654 }
4655
4656 return dc21140m_autoconf(dev);
4657}
4658
4659static int
4660type2_infoblock(struct net_device *dev, u_char count, u_char *p)
4661{
4662 struct de4x5_private *lp = netdev_priv(dev);
4663 u_char len = (*p & BLOCK_LEN)+1;
4664
4665
4666 if (--count > lp->tcount) {
4667 if (*(p+len) < 128) {
4668 return dc_infoblock[COMPACT](dev, count, p+len);
4669 } else {
4670 return dc_infoblock[*(p+len+1)](dev, count, p+len);
4671 }
4672 }
4673
4674 if ((lp->media == INIT) && (lp->timeout < 0)) {
4675 lp->ibn = 2;
4676 lp->active = 0;
4677 p += 2;
4678 lp->infoblock_media = (*p) & MEDIA_CODE;
4679
4680 if ((*p++) & EXT_FIELD) {
4681 lp->cache.csr13 = get_unaligned_le16(p); p += 2;
4682 lp->cache.csr14 = get_unaligned_le16(p); p += 2;
4683 lp->cache.csr15 = get_unaligned_le16(p); p += 2;
4684 } else {
4685 lp->cache.csr13 = CSR13;
4686 lp->cache.csr14 = CSR14;
4687 lp->cache.csr15 = CSR15;
4688 }
4689 lp->cache.gepc = ((s32)(get_unaligned_le16(p)) << 16); p += 2;
4690 lp->cache.gep = ((s32)(get_unaligned_le16(p)) << 16);
4691 lp->infoblock_csr6 = OMR_SIA;
4692 lp->useMII = false;
4693
4694 de4x5_switch_mac_port(dev);
4695 }
4696
4697 return dc2114x_autoconf(dev);
4698}
4699
4700static int
4701type3_infoblock(struct net_device *dev, u_char count, u_char *p)
4702{
4703 struct de4x5_private *lp = netdev_priv(dev);
4704 u_char len = (*p & BLOCK_LEN)+1;
4705
4706
4707 if (--count > lp->tcount) {
4708 if (*(p+len) < 128) {
4709 return dc_infoblock[COMPACT](dev, count, p+len);
4710 } else {
4711 return dc_infoblock[*(p+len+1)](dev, count, p+len);
4712 }
4713 }
4714
4715 p += 2;
4716 if (lp->state == INITIALISED) {
4717 lp->ibn = 3;
4718 lp->active = *p++;
4719 if (MOTO_SROM_BUG) lp->active = 0;
4720 lp->phy[lp->active].gep = (*p ? p : NULL); p += (2 * (*p) + 1);
4721 lp->phy[lp->active].rst = (*p ? p : NULL); p += (2 * (*p) + 1);
4722 lp->phy[lp->active].mc = get_unaligned_le16(p); p += 2;
4723 lp->phy[lp->active].ana = get_unaligned_le16(p); p += 2;
4724 lp->phy[lp->active].fdx = get_unaligned_le16(p); p += 2;
4725 lp->phy[lp->active].ttm = get_unaligned_le16(p); p += 2;
4726 lp->phy[lp->active].mci = *p;
4727 return 0;
4728 } else if ((lp->media == INIT) && (lp->timeout < 0)) {
4729 lp->ibn = 3;
4730 lp->active = *p;
4731 if (MOTO_SROM_BUG) lp->active = 0;
4732 lp->infoblock_csr6 = OMR_MII_100;
4733 lp->useMII = true;
4734 lp->infoblock_media = ANS;
4735
4736 de4x5_switch_mac_port(dev);
4737 }
4738
4739 return dc2114x_autoconf(dev);
4740}
4741
4742static int
4743type4_infoblock(struct net_device *dev, u_char count, u_char *p)
4744{
4745 struct de4x5_private *lp = netdev_priv(dev);
4746 u_char flags, csr6, len = (*p & BLOCK_LEN)+1;
4747
4748
4749 if (--count > lp->tcount) {
4750 if (*(p+len) < 128) {
4751 return dc_infoblock[COMPACT](dev, count, p+len);
4752 } else {
4753 return dc_infoblock[*(p+len+1)](dev, count, p+len);
4754 }
4755 }
4756
4757 if ((lp->media == INIT) && (lp->timeout < 0)) {
4758 lp->ibn = 4;
4759 lp->active = 0;
4760 p+=2;
4761 lp->infoblock_media = (*p++) & MEDIA_CODE;
4762 lp->cache.csr13 = CSR13;
4763 lp->cache.csr14 = CSR14;
4764 lp->cache.csr15 = CSR15;
4765 lp->cache.gepc = ((s32)(get_unaligned_le16(p)) << 16); p += 2;
4766 lp->cache.gep = ((s32)(get_unaligned_le16(p)) << 16); p += 2;
4767 csr6 = *p++;
4768 flags = *p++;
4769
4770 lp->asBitValid = (flags & 0x80) ? 0 : -1;
4771 lp->defMedium = (flags & 0x40) ? -1 : 0;
4772 lp->asBit = 1 << ((csr6 >> 1) & 0x07);
4773 lp->asPolarity = ((csr6 & 0x80) ? -1 : 0) & lp->asBit;
4774 lp->infoblock_csr6 = OMR_DEF | ((csr6 & 0x71) << 18);
4775 lp->useMII = false;
4776
4777 de4x5_switch_mac_port(dev);
4778 }
4779
4780 return dc2114x_autoconf(dev);
4781}
4782
4783
4784
4785
4786
4787static int
4788type5_infoblock(struct net_device *dev, u_char count, u_char *p)
4789{
4790 struct de4x5_private *lp = netdev_priv(dev);
4791 u_char len = (*p & BLOCK_LEN)+1;
4792
4793
4794 if (--count > lp->tcount) {
4795 if (*(p+len) < 128) {
4796 return dc_infoblock[COMPACT](dev, count, p+len);
4797 } else {
4798 return dc_infoblock[*(p+len+1)](dev, count, p+len);
4799 }
4800 }
4801
4802
4803 if ((lp->state == INITIALISED) || (lp->media == INIT)) {
4804 p+=2;
4805 lp->rst = p;
4806 srom_exec(dev, lp->rst);
4807 }
4808
4809 return DE4X5_AUTOSENSE_MS;
4810}
4811
4812
4813
4814
4815
4816static int
4817mii_rd(u_char phyreg, u_char phyaddr, u_long ioaddr)
4818{
4819 mii_wdata(MII_PREAMBLE, 2, ioaddr);
4820 mii_wdata(MII_PREAMBLE, 32, ioaddr);
4821 mii_wdata(MII_STRD, 4, ioaddr);
4822 mii_address(phyaddr, ioaddr);
4823 mii_address(phyreg, ioaddr);
4824 mii_ta(MII_STRD, ioaddr);
4825
4826 return mii_rdata(ioaddr);
4827}
4828
4829static void
4830mii_wr(int data, u_char phyreg, u_char phyaddr, u_long ioaddr)
4831{
4832 mii_wdata(MII_PREAMBLE, 2, ioaddr);
4833 mii_wdata(MII_PREAMBLE, 32, ioaddr);
4834 mii_wdata(MII_STWR, 4, ioaddr);
4835 mii_address(phyaddr, ioaddr);
4836 mii_address(phyreg, ioaddr);
4837 mii_ta(MII_STWR, ioaddr);
4838 data = mii_swap(data, 16);
4839 mii_wdata(data, 16, ioaddr);
4840}
4841
4842static int
4843mii_rdata(u_long ioaddr)
4844{
4845 int i;
4846 s32 tmp = 0;
4847
4848 for (i=0; i<16; i++) {
4849 tmp <<= 1;
4850 tmp |= getfrom_mii(MII_MRD | MII_RD, ioaddr);
4851 }
4852
4853 return tmp;
4854}
4855
4856static void
4857mii_wdata(int data, int len, u_long ioaddr)
4858{
4859 int i;
4860
4861 for (i=0; i<len; i++) {
4862 sendto_mii(MII_MWR | MII_WR, data, ioaddr);
4863 data >>= 1;
4864 }
4865}
4866
4867static void
4868mii_address(u_char addr, u_long ioaddr)
4869{
4870 int i;
4871
4872 addr = mii_swap(addr, 5);
4873 for (i=0; i<5; i++) {
4874 sendto_mii(MII_MWR | MII_WR, addr, ioaddr);
4875 addr >>= 1;
4876 }
4877}
4878
4879static void
4880mii_ta(u_long rw, u_long ioaddr)
4881{
4882 if (rw == MII_STWR) {
4883 sendto_mii(MII_MWR | MII_WR, 1, ioaddr);
4884 sendto_mii(MII_MWR | MII_WR, 0, ioaddr);
4885 } else {
4886 getfrom_mii(MII_MRD | MII_RD, ioaddr);
4887 }
4888}
4889
4890static int
4891mii_swap(int data, int len)
4892{
4893 int i, tmp = 0;
4894
4895 for (i=0; i<len; i++) {
4896 tmp <<= 1;
4897 tmp |= (data & 1);
4898 data >>= 1;
4899 }
4900
4901 return tmp;
4902}
4903
4904static void
4905sendto_mii(u32 command, int data, u_long ioaddr)
4906{
4907 u32 j;
4908
4909 j = (data & 1) << 17;
4910 outl(command | j, ioaddr);
4911 udelay(1);
4912 outl(command | MII_MDC | j, ioaddr);
4913 udelay(1);
4914}
4915
4916static int
4917getfrom_mii(u32 command, u_long ioaddr)
4918{
4919 outl(command, ioaddr);
4920 udelay(1);
4921 outl(command | MII_MDC, ioaddr);
4922 udelay(1);
4923
4924 return (inl(ioaddr) >> 19) & 1;
4925}
4926
4927
4928
4929
4930static int
4931mii_get_oui(u_char phyaddr, u_long ioaddr)
4932{
4933
4934
4935
4936
4937
4938
4939 int r2, r3;
4940
4941
4942 r2 = mii_rd(MII_ID0, phyaddr, ioaddr);
4943 r3 = mii_rd(MII_ID1, phyaddr, ioaddr);
4944
4945
4946
4947
4948
4949
4950
4951
4952
4953
4954
4955
4956
4957
4958
4959
4960
4961
4962
4963
4964
4965
4966
4967
4968
4969
4970
4971 return r2;
4972}
4973
4974
4975
4976
4977static int
4978mii_get_phy(struct net_device *dev)
4979{
4980 struct de4x5_private *lp = netdev_priv(dev);
4981 u_long iobase = dev->base_addr;
4982 int i, j, k, n, limit=ARRAY_SIZE(phy_info);
4983 int id;
4984
4985 lp->active = 0;
4986 lp->useMII = true;
4987
4988
4989 for (n=0, lp->mii_cnt=0, i=1; !((i==1) && (n==1)); i=(i+1)%DE4X5_MAX_MII) {
4990 lp->phy[lp->active].addr = i;
4991 if (i==0) n++;
4992 while (de4x5_reset_phy(dev)<0) udelay(100);
4993 id = mii_get_oui(i, DE4X5_MII);
4994 if ((id == 0) || (id == 65535)) continue;
4995 for (j=0; j<limit; j++) {
4996 if (id != phy_info[j].id) continue;
4997 for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++);
4998 if (k < DE4X5_MAX_PHY) {
4999 memcpy((char *)&lp->phy[k],
5000 (char *)&phy_info[j], sizeof(struct phy_table));
5001 lp->phy[k].addr = i;
5002 lp->mii_cnt++;
5003 lp->active++;
5004 } else {
5005 goto purgatory;
5006 }
5007 break;
5008 }
5009 if ((j == limit) && (i < DE4X5_MAX_MII)) {
5010 for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++);
5011 lp->phy[k].addr = i;
5012 lp->phy[k].id = id;
5013 lp->phy[k].spd.reg = GENERIC_REG;
5014 lp->phy[k].spd.mask = GENERIC_MASK;
5015 lp->phy[k].spd.value = GENERIC_VALUE;
5016 lp->mii_cnt++;
5017 lp->active++;
5018 printk("%s: Using generic MII device control. If the board doesn't operate,\nplease mail the following dump to the author:\n", dev->name);
5019 j = de4x5_debug;
5020 de4x5_debug |= DEBUG_MII;
5021 de4x5_dbg_mii(dev, k);
5022 de4x5_debug = j;
5023 printk("\n");
5024 }
5025 }
5026 purgatory:
5027 lp->active = 0;
5028 if (lp->phy[0].id) {
5029 for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++) {
5030 mii_wr(MII_CR_RST, MII_CR, lp->phy[k].addr, DE4X5_MII);
5031 while (mii_rd(MII_CR, lp->phy[k].addr, DE4X5_MII) & MII_CR_RST);
5032
5033 de4x5_dbg_mii(dev, k);
5034 }
5035 }
5036 if (!lp->mii_cnt) lp->useMII = false;
5037
5038 return lp->mii_cnt;
5039}
5040
5041static char *
5042build_setup_frame(struct net_device *dev, int mode)
5043{
5044 struct de4x5_private *lp = netdev_priv(dev);
5045 int i;
5046 char *pa = lp->setup_frame;
5047
5048
5049 if (mode == ALL) {
5050 memset(lp->setup_frame, 0, SETUP_FRAME_LEN);
5051 }
5052
5053 if (lp->setup_f == HASH_PERF) {
5054 for (pa=lp->setup_frame+IMPERF_PA_OFFSET, i=0; i<ETH_ALEN; i++) {
5055 *(pa + i) = dev->dev_addr[i];
5056 if (i & 0x01) pa += 2;
5057 }
5058 *(lp->setup_frame + (HASH_TABLE_LEN >> 3) - 3) = 0x80;
5059 } else {
5060 for (i=0; i<ETH_ALEN; i++) {
5061 *(pa + (i&1)) = dev->dev_addr[i];
5062 if (i & 0x01) pa += 4;
5063 }
5064 for (i=0; i<ETH_ALEN; i++) {
5065 *(pa + (i&1)) = (char) 0xff;
5066 if (i & 0x01) pa += 4;
5067 }
5068 }
5069
5070 return pa;
5071}
5072
5073static void
5074disable_ast(struct net_device *dev)
5075{
5076 struct de4x5_private *lp = netdev_priv(dev);
5077 del_timer_sync(&lp->timer);
5078}
5079
5080static long
5081de4x5_switch_mac_port(struct net_device *dev)
5082{
5083 struct de4x5_private *lp = netdev_priv(dev);
5084 u_long iobase = dev->base_addr;
5085 s32 omr;
5086
5087 STOP_DE4X5;
5088
5089
5090 omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR |
5091 OMR_FDX));
5092 omr |= lp->infoblock_csr6;
5093 if (omr & OMR_PS) omr |= OMR_HBD;
5094 outl(omr, DE4X5_OMR);
5095
5096
5097 RESET_DE4X5;
5098
5099
5100 if (lp->chipset == DC21140) {
5101 gep_wr(lp->cache.gepc, dev);
5102 gep_wr(lp->cache.gep, dev);
5103 } else if ((lp->chipset & ~0x0ff) == DC2114x) {
5104 reset_init_sia(dev, lp->cache.csr13, lp->cache.csr14, lp->cache.csr15);
5105 }
5106
5107
5108 outl(omr, DE4X5_OMR);
5109
5110
5111 inl(DE4X5_MFC);
5112
5113 return omr;
5114}
5115
5116static void
5117gep_wr(s32 data, struct net_device *dev)
5118{
5119 struct de4x5_private *lp = netdev_priv(dev);
5120 u_long iobase = dev->base_addr;
5121
5122 if (lp->chipset == DC21140) {
5123 outl(data, DE4X5_GEP);
5124 } else if ((lp->chipset & ~0x00ff) == DC2114x) {
5125 outl((data<<16) | lp->cache.csr15, DE4X5_SIGR);
5126 }
5127}
5128
5129static int
5130gep_rd(struct net_device *dev)
5131{
5132 struct de4x5_private *lp = netdev_priv(dev);
5133 u_long iobase = dev->base_addr;
5134
5135 if (lp->chipset == DC21140) {
5136 return inl(DE4X5_GEP);
5137 } else if ((lp->chipset & ~0x00ff) == DC2114x) {
5138 return inl(DE4X5_SIGR) & 0x000fffff;
5139 }
5140
5141 return 0;
5142}
5143
5144static void
5145yawn(struct net_device *dev, int state)
5146{
5147 struct de4x5_private *lp = netdev_priv(dev);
5148 u_long iobase = dev->base_addr;
5149
5150 if ((lp->chipset == DC21040) || (lp->chipset == DC21140)) return;
5151
5152 if(lp->bus == EISA) {
5153 switch(state) {
5154 case WAKEUP:
5155 outb(WAKEUP, PCI_CFPM);
5156 mdelay(10);
5157 break;
5158
5159 case SNOOZE:
5160 outb(SNOOZE, PCI_CFPM);
5161 break;
5162
5163 case SLEEP:
5164 outl(0, DE4X5_SICR);
5165 outb(SLEEP, PCI_CFPM);
5166 break;
5167 }
5168 } else {
5169 struct pci_dev *pdev = to_pci_dev (lp->gendev);
5170 switch(state) {
5171 case WAKEUP:
5172 pci_write_config_byte(pdev, PCI_CFDA_PSM, WAKEUP);
5173 mdelay(10);
5174 break;
5175
5176 case SNOOZE:
5177 pci_write_config_byte(pdev, PCI_CFDA_PSM, SNOOZE);
5178 break;
5179
5180 case SLEEP:
5181 outl(0, DE4X5_SICR);
5182 pci_write_config_byte(pdev, PCI_CFDA_PSM, SLEEP);
5183 break;
5184 }
5185 }
5186}
5187
5188static void
5189de4x5_parse_params(struct net_device *dev)
5190{
5191 struct de4x5_private *lp = netdev_priv(dev);
5192 char *p, *q, t;
5193
5194 lp->params.fdx = false;
5195 lp->params.autosense = AUTO;
5196
5197 if (args == NULL) return;
5198
5199 if ((p = strstr(args, dev->name))) {
5200 if (!(q = strstr(p+strlen(dev->name), "eth"))) q = p + strlen(p);
5201 t = *q;
5202 *q = '\0';
5203
5204 if (strstr(p, "fdx") || strstr(p, "FDX")) lp->params.fdx = true;
5205
5206 if (strstr(p, "autosense") || strstr(p, "AUTOSENSE")) {
5207 if (strstr(p, "TP")) {
5208 lp->params.autosense = TP;
5209 } else if (strstr(p, "TP_NW")) {
5210 lp->params.autosense = TP_NW;
5211 } else if (strstr(p, "BNC")) {
5212 lp->params.autosense = BNC;
5213 } else if (strstr(p, "AUI")) {
5214 lp->params.autosense = AUI;
5215 } else if (strstr(p, "BNC_AUI")) {
5216 lp->params.autosense = BNC;
5217 } else if (strstr(p, "10Mb")) {
5218 lp->params.autosense = _10Mb;
5219 } else if (strstr(p, "100Mb")) {
5220 lp->params.autosense = _100Mb;
5221 } else if (strstr(p, "AUTO")) {
5222 lp->params.autosense = AUTO;
5223 }
5224 }
5225 *q = t;
5226 }
5227}
5228
5229static void
5230de4x5_dbg_open(struct net_device *dev)
5231{
5232 struct de4x5_private *lp = netdev_priv(dev);
5233 int i;
5234
5235 if (de4x5_debug & DEBUG_OPEN) {
5236 printk("%s: de4x5 opening with irq %d\n",dev->name,dev->irq);
5237 printk("\tphysical address: %pM\n", dev->dev_addr);
5238 printk("Descriptor head addresses:\n");
5239 printk("\t0x%8.8lx 0x%8.8lx\n",(u_long)lp->rx_ring,(u_long)lp->tx_ring);
5240 printk("Descriptor addresses:\nRX: ");
5241 for (i=0;i<lp->rxRingSize-1;i++){
5242 if (i < 3) {
5243 printk("0x%8.8lx ",(u_long)&lp->rx_ring[i].status);
5244 }
5245 }
5246 printk("...0x%8.8lx\n",(u_long)&lp->rx_ring[i].status);
5247 printk("TX: ");
5248 for (i=0;i<lp->txRingSize-1;i++){
5249 if (i < 3) {
5250 printk("0x%8.8lx ", (u_long)&lp->tx_ring[i].status);
5251 }
5252 }
5253 printk("...0x%8.8lx\n", (u_long)&lp->tx_ring[i].status);
5254 printk("Descriptor buffers:\nRX: ");
5255 for (i=0;i<lp->rxRingSize-1;i++){
5256 if (i < 3) {
5257 printk("0x%8.8x ",le32_to_cpu(lp->rx_ring[i].buf));
5258 }
5259 }
5260 printk("...0x%8.8x\n",le32_to_cpu(lp->rx_ring[i].buf));
5261 printk("TX: ");
5262 for (i=0;i<lp->txRingSize-1;i++){
5263 if (i < 3) {
5264 printk("0x%8.8x ", le32_to_cpu(lp->tx_ring[i].buf));
5265 }
5266 }
5267 printk("...0x%8.8x\n", le32_to_cpu(lp->tx_ring[i].buf));
5268 printk("Ring size:\nRX: %d\nTX: %d\n",
5269 (short)lp->rxRingSize,
5270 (short)lp->txRingSize);
5271 }
5272}
5273
5274static void
5275de4x5_dbg_mii(struct net_device *dev, int k)
5276{
5277 struct de4x5_private *lp = netdev_priv(dev);
5278 u_long iobase = dev->base_addr;
5279
5280 if (de4x5_debug & DEBUG_MII) {
5281 printk("\nMII device address: %d\n", lp->phy[k].addr);
5282 printk("MII CR: %x\n",mii_rd(MII_CR,lp->phy[k].addr,DE4X5_MII));
5283 printk("MII SR: %x\n",mii_rd(MII_SR,lp->phy[k].addr,DE4X5_MII));
5284 printk("MII ID0: %x\n",mii_rd(MII_ID0,lp->phy[k].addr,DE4X5_MII));
5285 printk("MII ID1: %x\n",mii_rd(MII_ID1,lp->phy[k].addr,DE4X5_MII));
5286 if (lp->phy[k].id != BROADCOM_T4) {
5287 printk("MII ANA: %x\n",mii_rd(0x04,lp->phy[k].addr,DE4X5_MII));
5288 printk("MII ANC: %x\n",mii_rd(0x05,lp->phy[k].addr,DE4X5_MII));
5289 }
5290 printk("MII 16: %x\n",mii_rd(0x10,lp->phy[k].addr,DE4X5_MII));
5291 if (lp->phy[k].id != BROADCOM_T4) {
5292 printk("MII 17: %x\n",mii_rd(0x11,lp->phy[k].addr,DE4X5_MII));
5293 printk("MII 18: %x\n",mii_rd(0x12,lp->phy[k].addr,DE4X5_MII));
5294 } else {
5295 printk("MII 20: %x\n",mii_rd(0x14,lp->phy[k].addr,DE4X5_MII));
5296 }
5297 }
5298}
5299
5300static void
5301de4x5_dbg_media(struct net_device *dev)
5302{
5303 struct de4x5_private *lp = netdev_priv(dev);
5304
5305 if (lp->media != lp->c_media) {
5306 if (de4x5_debug & DEBUG_MEDIA) {
5307 printk("%s: media is %s%s\n", dev->name,
5308 (lp->media == NC ? "unconnected, link down or incompatible connection" :
5309 (lp->media == TP ? "TP" :
5310 (lp->media == ANS ? "TP/Nway" :
5311 (lp->media == BNC ? "BNC" :
5312 (lp->media == AUI ? "AUI" :
5313 (lp->media == BNC_AUI ? "BNC/AUI" :
5314 (lp->media == EXT_SIA ? "EXT SIA" :
5315 (lp->media == _100Mb ? "100Mb/s" :
5316 (lp->media == _10Mb ? "10Mb/s" :
5317 "???"
5318 ))))))))), (lp->fdx?" full duplex.":"."));
5319 }
5320 lp->c_media = lp->media;
5321 }
5322}
5323
5324static void
5325de4x5_dbg_srom(struct de4x5_srom *p)
5326{
5327 int i;
5328
5329 if (de4x5_debug & DEBUG_SROM) {
5330 printk("Sub-system Vendor ID: %04x\n", *((u_short *)p->sub_vendor_id));
5331 printk("Sub-system ID: %04x\n", *((u_short *)p->sub_system_id));
5332 printk("ID Block CRC: %02x\n", (u_char)(p->id_block_crc));
5333 printk("SROM version: %02x\n", (u_char)(p->version));
5334 printk("# controllers: %02x\n", (u_char)(p->num_controllers));
5335
5336 printk("Hardware Address: %pM\n", p->ieee_addr);
5337 printk("CRC checksum: %04x\n", (u_short)(p->chksum));
5338 for (i=0; i<64; i++) {
5339 printk("%3d %04x\n", i<<1, (u_short)*((u_short *)p+i));
5340 }
5341 }
5342}
5343
5344static void
5345de4x5_dbg_rx(struct sk_buff *skb, int len)
5346{
5347 int i, j;
5348
5349 if (de4x5_debug & DEBUG_RX) {
5350 printk("R: %pM <- %pM len/SAP:%02x%02x [%d]\n",
5351 skb->data, &skb->data[6],
5352 (u_char)skb->data[12],
5353 (u_char)skb->data[13],
5354 len);
5355 for (j=0; len>0;j+=16, len-=16) {
5356 printk(" %03x: ",j);
5357 for (i=0; i<16 && i<len; i++) {
5358 printk("%02x ",(u_char)skb->data[i+j]);
5359 }
5360 printk("\n");
5361 }
5362 }
5363}
5364
5365
5366
5367
5368
5369
5370static int
5371de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
5372{
5373 struct de4x5_private *lp = netdev_priv(dev);
5374 struct de4x5_ioctl *ioc = (struct de4x5_ioctl *) &rq->ifr_ifru;
5375 u_long iobase = dev->base_addr;
5376 int i, j, status = 0;
5377 s32 omr;
5378 union {
5379 u8 addr[144];
5380 u16 sval[72];
5381 u32 lval[36];
5382 } tmp;
5383 u_long flags = 0;
5384
5385 switch(ioc->cmd) {
5386 case DE4X5_GET_HWADDR:
5387 ioc->len = ETH_ALEN;
5388 for (i=0; i<ETH_ALEN; i++) {
5389 tmp.addr[i] = dev->dev_addr[i];
5390 }
5391 if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
5392 break;
5393
5394 case DE4X5_SET_HWADDR:
5395 if (!capable(CAP_NET_ADMIN)) return -EPERM;
5396 if (copy_from_user(tmp.addr, ioc->data, ETH_ALEN)) return -EFAULT;
5397 if (netif_queue_stopped(dev))
5398 return -EBUSY;
5399 netif_stop_queue(dev);
5400 for (i=0; i<ETH_ALEN; i++) {
5401 dev->dev_addr[i] = tmp.addr[i];
5402 }
5403 build_setup_frame(dev, PHYS_ADDR_ONLY);
5404
5405 load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET |
5406 SETUP_FRAME_LEN, (struct sk_buff *)1);
5407 lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
5408 outl(POLL_DEMAND, DE4X5_TPD);
5409 netif_wake_queue(dev);
5410 break;
5411
5412 case DE4X5_SAY_BOO:
5413 if (!capable(CAP_NET_ADMIN)) return -EPERM;
5414 printk("%s: Boo!\n", dev->name);
5415 break;
5416
5417 case DE4X5_MCA_EN:
5418 if (!capable(CAP_NET_ADMIN)) return -EPERM;
5419 omr = inl(DE4X5_OMR);
5420 omr |= OMR_PM;
5421 outl(omr, DE4X5_OMR);
5422 break;
5423
5424 case DE4X5_GET_STATS:
5425 {
5426 struct pkt_stats statbuf;
5427 ioc->len = sizeof(statbuf);
5428 spin_lock_irqsave(&lp->lock, flags);
5429 memcpy(&statbuf, &lp->pktStats, ioc->len);
5430 spin_unlock_irqrestore(&lp->lock, flags);
5431 if (copy_to_user(ioc->data, &statbuf, ioc->len))
5432 return -EFAULT;
5433 break;
5434 }
5435 case DE4X5_CLR_STATS:
5436 if (!capable(CAP_NET_ADMIN)) return -EPERM;
5437 spin_lock_irqsave(&lp->lock, flags);
5438 memset(&lp->pktStats, 0, sizeof(lp->pktStats));
5439 spin_unlock_irqrestore(&lp->lock, flags);
5440 break;
5441
5442 case DE4X5_GET_OMR:
5443 tmp.addr[0] = inl(DE4X5_OMR);
5444 if (copy_to_user(ioc->data, tmp.addr, 1)) return -EFAULT;
5445 break;
5446
5447 case DE4X5_SET_OMR:
5448 if (!capable(CAP_NET_ADMIN)) return -EPERM;
5449 if (copy_from_user(tmp.addr, ioc->data, 1)) return -EFAULT;
5450 outl(tmp.addr[0], DE4X5_OMR);
5451 break;
5452
5453 case DE4X5_GET_REG:
5454 j = 0;
5455 tmp.lval[0] = inl(DE4X5_STS); j+=4;
5456 tmp.lval[1] = inl(DE4X5_BMR); j+=4;
5457 tmp.lval[2] = inl(DE4X5_IMR); j+=4;
5458 tmp.lval[3] = inl(DE4X5_OMR); j+=4;
5459 tmp.lval[4] = inl(DE4X5_SISR); j+=4;
5460 tmp.lval[5] = inl(DE4X5_SICR); j+=4;
5461 tmp.lval[6] = inl(DE4X5_STRR); j+=4;
5462 tmp.lval[7] = inl(DE4X5_SIGR); j+=4;
5463 ioc->len = j;
5464 if (copy_to_user(ioc->data, tmp.lval, ioc->len))
5465 return -EFAULT;
5466 break;
5467
5468#define DE4X5_DUMP 0x0f
5469
5470
5471
5472
5473
5474
5475
5476
5477
5478
5479
5480
5481
5482
5483
5484
5485
5486
5487
5488
5489
5490
5491
5492
5493
5494
5495
5496
5497
5498
5499
5500
5501
5502
5503
5504
5505
5506
5507
5508
5509
5510
5511
5512
5513
5514
5515
5516
5517
5518
5519
5520
5521
5522
5523
5524
5525
5526
5527
5528
5529
5530
5531
5532
5533
5534
5535
5536
5537
5538
5539
5540
5541
5542
5543
5544
5545
5546
5547
5548
5549
5550
5551
5552
5553
5554
5555
5556
5557
5558 default:
5559 return -EOPNOTSUPP;
5560 }
5561
5562 return status;
5563}
5564
5565static int __init de4x5_module_init (void)
5566{
5567 int err = 0;
5568
5569#ifdef CONFIG_PCI
5570 err = pci_register_driver(&de4x5_pci_driver);
5571#endif
5572#ifdef CONFIG_EISA
5573 err |= eisa_driver_register (&de4x5_eisa_driver);
5574#endif
5575
5576 return err;
5577}
5578
5579static void __exit de4x5_module_exit (void)
5580{
5581#ifdef CONFIG_PCI
5582 pci_unregister_driver (&de4x5_pci_driver);
5583#endif
5584#ifdef CONFIG_EISA
5585 eisa_driver_unregister (&de4x5_eisa_driver);
5586#endif
5587}
5588
5589module_init (de4x5_module_init);
5590module_exit (de4x5_module_exit);
5591