1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446#include <linux/compat.h>
447#include <linux/module.h>
448#include <linux/kernel.h>
449#include <linux/string.h>
450#include <linux/interrupt.h>
451#include <linux/ptrace.h>
452#include <linux/errno.h>
453#include <linux/ioport.h>
454#include <linux/pci.h>
455#include <linux/eisa.h>
456#include <linux/delay.h>
457#include <linux/init.h>
458#include <linux/spinlock.h>
459#include <linux/crc32.h>
460#include <linux/netdevice.h>
461#include <linux/etherdevice.h>
462#include <linux/skbuff.h>
463#include <linux/time.h>
464#include <linux/types.h>
465#include <linux/unistd.h>
466#include <linux/ctype.h>
467#include <linux/dma-mapping.h>
468#include <linux/moduleparam.h>
469#include <linux/bitops.h>
470#include <linux/gfp.h>
471
472#include <asm/io.h>
473#include <asm/dma.h>
474#include <asm/byteorder.h>
475#include <asm/unaligned.h>
476#include <linux/uaccess.h>
477#ifdef CONFIG_PPC_PMAC
478#include <asm/machdep.h>
479#endif
480
481#include "de4x5.h"
482
483static const char version[] =
484 KERN_INFO "de4x5.c:V0.546 2001/02/22 davies@maniac.ultranet.com\n";
485
486#define c_char const char
487
488
489
490
491struct phy_table {
492 int reset;
493 int id;
494 int ta;
495 struct {
496 int reg;
497 int mask;
498 int value;
499 } spd;
500};
501
502struct mii_phy {
503 int reset;
504 int id;
505 int ta;
506 struct {
507 int reg;
508 int mask;
509 int value;
510 } spd;
511 int addr;
512 u_char *gep;
513 u_char *rst;
514 u_int mc;
515 u_int ana;
516 u_int fdx;
517 u_int ttm;
518 u_int mci;
519};
520
521#define DE4X5_MAX_PHY 8
522
523struct sia_phy {
524 u_char mc;
525 u_char ext;
526 int csr13;
527 int csr14;
528 int csr15;
529 int gepc;
530 int gep;
531};
532
533
534
535
536
537static struct phy_table phy_info[] = {
538 {0, NATIONAL_TX, 1, {0x19, 0x40, 0x00}},
539 {1, BROADCOM_T4, 1, {0x10, 0x02, 0x02}},
540 {0, SEEQ_T4 , 1, {0x12, 0x10, 0x10}},
541 {0, CYPRESS_T4 , 1, {0x05, 0x20, 0x20}},
542 {0, 0x7810 , 1, {0x14, 0x0800, 0x0800}}
543};
544
545
546
547
548
549
550#define GENERIC_REG 0x05
551#define GENERIC_MASK MII_ANLPA_100M
552#define GENERIC_VALUE MII_ANLPA_100M
553
554
555
556
557static c_char enet_det[][ETH_ALEN] = {
558 {0x00, 0x00, 0xc0, 0x00, 0x00, 0x00},
559 {0x00, 0x00, 0xe8, 0x00, 0x00, 0x00}
560};
561
562#define SMC 1
563#define ACCTON 2
564
565
566
567
568
569
570static c_char srom_repair_info[][100] = {
571 {0x00,0x1e,0x00,0x00,0x00,0x08,
572 0x1f,0x01,0x8f,0x01,0x00,0x01,0x00,0x02,
573 0x01,0x00,0x00,0x78,0xe0,0x01,0x00,0x50,
574 0x00,0x18,}
575};
576
577
578#ifdef DE4X5_DEBUG
579static int de4x5_debug = DE4X5_DEBUG;
580#else
581
582static int de4x5_debug = (DEBUG_MEDIA | DEBUG_VERSION);
583#endif
584
585
586
587
588
589
590
591
592
593
594#ifdef DE4X5_PARM
595static char *args = DE4X5_PARM;
596#else
597static char *args;
598#endif
599
600struct parameters {
601 bool fdx;
602 int autosense;
603};
604
605#define DE4X5_AUTOSENSE_MS 250
606
607#define DE4X5_NDA 0xffe0
608
609
610
611
612#define PROBE_LENGTH 32
613#define ETH_PROM_SIG 0xAA5500FFUL
614
615
616
617
618#define PKT_BUF_SZ 1536
619#define IEEE802_3_SZ 1518
620#define MAX_PKT_SZ 1514
621#define MAX_DAT_SZ 1500
622#define MIN_DAT_SZ 1
623#define PKT_HDR_LEN 14
624#define FAKE_FRAME_LEN (MAX_PKT_SZ + 1)
625#define QUEUE_PKT_TIMEOUT (3*HZ)
626
627
628
629
630
631#define DE4X5_EISA_IO_PORTS 0x0c00
632#define DE4X5_EISA_TOTAL_SIZE 0x100
633
634#define EISA_ALLOWED_IRQ_LIST {5, 9, 10, 11}
635
636#define DE4X5_SIGNATURE {"DE425","DE434","DE435","DE450","DE500"}
637#define DE4X5_NAME_LENGTH 8
638
639static c_char *de4x5_signatures[] = DE4X5_SIGNATURE;
640
641
642
643
644#define PROBE_LENGTH 32
645#define ETH_PROM_SIG 0xAA5500FFUL
646
647
648
649
650#define PCI_MAX_BUS_NUM 8
651#define DE4X5_PCI_TOTAL_SIZE 0x80
652#define DE4X5_CLASS_CODE 0x00020000
653
654
655
656
657
658
659
660#define DE4X5_ALIGN4 ((u_long)4 - 1)
661#define DE4X5_ALIGN8 ((u_long)8 - 1)
662#define DE4X5_ALIGN16 ((u_long)16 - 1)
663#define DE4X5_ALIGN32 ((u_long)32 - 1)
664#define DE4X5_ALIGN64 ((u_long)64 - 1)
665#define DE4X5_ALIGN128 ((u_long)128 - 1)
666
667#define DE4X5_ALIGN DE4X5_ALIGN32
668#define DE4X5_CACHE_ALIGN CAL_16LONG
669#define DESC_SKIP_LEN DSL_0
670
671#define DESC_ALIGN
672
673#ifndef DEC_ONLY
674static int dec_only;
675#else
676static int dec_only = 1;
677#endif
678
679
680
681
682#define ENABLE_IRQs { \
683 imr |= lp->irq_en;\
684 outl(imr, DE4X5_IMR); \
685}
686
687#define DISABLE_IRQs {\
688 imr = inl(DE4X5_IMR);\
689 imr &= ~lp->irq_en;\
690 outl(imr, DE4X5_IMR); \
691}
692
693#define UNMASK_IRQs {\
694 imr |= lp->irq_mask;\
695 outl(imr, DE4X5_IMR); \
696}
697
698#define MASK_IRQs {\
699 imr = inl(DE4X5_IMR);\
700 imr &= ~lp->irq_mask;\
701 outl(imr, DE4X5_IMR); \
702}
703
704
705
706
707#define START_DE4X5 {\
708 omr = inl(DE4X5_OMR);\
709 omr |= OMR_ST | OMR_SR;\
710 outl(omr, DE4X5_OMR); \
711}
712
713#define STOP_DE4X5 {\
714 omr = inl(DE4X5_OMR);\
715 omr &= ~(OMR_ST|OMR_SR);\
716 outl(omr, DE4X5_OMR); \
717}
718
719
720
721
722#define RESET_SIA outl(0, DE4X5_SICR);
723
724
725
726
727#define DE4X5_AUTOSENSE_MS 250
728
729
730
731
732struct de4x5_srom {
733 char sub_vendor_id[2];
734 char sub_system_id[2];
735 char reserved[12];
736 char id_block_crc;
737 char reserved2;
738 char version;
739 char num_controllers;
740 char ieee_addr[6];
741 char info[100];
742 short chksum;
743};
744#define SUB_VENDOR_ID 0x500a
745
746
747
748
749
750
751
752
753
754#define NUM_RX_DESC 8
755#define NUM_TX_DESC 32
756#define RX_BUFF_SZ 1536
757
758
759struct de4x5_desc {
760 volatile __le32 status;
761 __le32 des1;
762 __le32 buf;
763 __le32 next;
764 DESC_ALIGN
765};
766
767
768
769
770#define DE4X5_PKT_STAT_SZ 16
771#define DE4X5_PKT_BIN_SZ 128
772
773
774struct pkt_stats {
775 u_int bins[DE4X5_PKT_STAT_SZ];
776 u_int unicast;
777 u_int multicast;
778 u_int broadcast;
779 u_int excessive_collisions;
780 u_int tx_underruns;
781 u_int excessive_underruns;
782 u_int rx_runt_frames;
783 u_int rx_collision;
784 u_int rx_dribble;
785 u_int rx_overflow;
786};
787
788struct de4x5_private {
789 char adapter_name[80];
790 u_long interrupt;
791 struct de4x5_desc *rx_ring;
792 struct de4x5_desc *tx_ring;
793 struct sk_buff *tx_skb[NUM_TX_DESC];
794 struct sk_buff *rx_skb[NUM_RX_DESC];
795 int rx_new, rx_old;
796 int tx_new, tx_old;
797 char setup_frame[SETUP_FRAME_LEN];
798 char frame[64];
799 spinlock_t lock;
800 struct net_device_stats stats;
801 struct pkt_stats pktStats;
802 char rxRingSize;
803 char txRingSize;
804 int bus;
805 int bus_num;
806 int device;
807 int state;
808 int chipset;
809 s32 irq_mask;
810 s32 irq_en;
811 int media;
812 int c_media;
813 bool fdx;
814 int linkOK;
815 int autosense;
816 bool tx_enable;
817 int setup_f;
818 int local_state;
819 struct mii_phy phy[DE4X5_MAX_PHY];
820 struct sia_phy sia;
821 int active;
822 int mii_cnt;
823 int timeout;
824 struct timer_list timer;
825 int tmp;
826 struct {
827 u_long lock;
828 s32 csr0;
829 s32 csr6;
830 s32 csr7;
831 s32 gep;
832 s32 gepc;
833 s32 csr13;
834 s32 csr14;
835 s32 csr15;
836 int save_cnt;
837 struct sk_buff_head queue;
838 } cache;
839 struct de4x5_srom srom;
840 int cfrv;
841 int rx_ovf;
842 bool useSROM;
843 bool useMII;
844 int asBitValid;
845 int asPolarity;
846 int asBit;
847 int defMedium;
848 int tcount;
849 int infoblock_init;
850 int infoleaf_offset;
851 s32 infoblock_csr6;
852 int infoblock_media;
853 int (*infoleaf_fn)(struct net_device *);
854 u_char *rst;
855 u_char ibn;
856 struct parameters params;
857 struct device *gendev;
858 dma_addr_t dma_rings;
859 int dma_size;
860 char *rx_bufs;
861};
862
863
864
865
866
867
868
869
870
871
872
873
874
875static struct {
876 int chipset;
877 int bus;
878 int irq;
879 u_char addr[ETH_ALEN];
880} last = {0,};
881
882
883
884
885
886
887
888
889#define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\
890 lp->tx_old+lp->txRingSize-lp->tx_new-1:\
891 lp->tx_old -lp->tx_new-1)
892
893#define TX_PKT_PENDING (lp->tx_old != lp->tx_new)
894
895
896
897
898static int de4x5_open(struct net_device *dev);
899static netdev_tx_t de4x5_queue_pkt(struct sk_buff *skb,
900 struct net_device *dev);
901static irqreturn_t de4x5_interrupt(int irq, void *dev_id);
902static int de4x5_close(struct net_device *dev);
903static struct net_device_stats *de4x5_get_stats(struct net_device *dev);
904static void de4x5_local_stats(struct net_device *dev, char *buf, int pkt_len);
905static void set_multicast_list(struct net_device *dev);
906static int de4x5_siocdevprivate(struct net_device *dev, struct ifreq *rq,
907 void __user *data, int cmd);
908
909
910
911
912static int de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev);
913static int de4x5_init(struct net_device *dev);
914static int de4x5_sw_reset(struct net_device *dev);
915static int de4x5_rx(struct net_device *dev);
916static int de4x5_tx(struct net_device *dev);
917static void de4x5_ast(struct timer_list *t);
918static int de4x5_txur(struct net_device *dev);
919static int de4x5_rx_ovfc(struct net_device *dev);
920
921static int autoconf_media(struct net_device *dev);
922static void create_packet(struct net_device *dev, char *frame, int len);
923static void load_packet(struct net_device *dev, char *buf, u32 flags, struct sk_buff *skb);
924static int dc21040_autoconf(struct net_device *dev);
925static int dc21041_autoconf(struct net_device *dev);
926static int dc21140m_autoconf(struct net_device *dev);
927static int dc2114x_autoconf(struct net_device *dev);
928static int srom_autoconf(struct net_device *dev);
929static int de4x5_suspect_state(struct net_device *dev, int timeout, int prev_state, int (*fn)(struct net_device *, int), int (*asfn)(struct net_device *));
930static int dc21040_state(struct net_device *dev, int csr13, int csr14, int csr15, int timeout, int next_state, int suspect_state, int (*fn)(struct net_device *, int));
931static int test_media(struct net_device *dev, s32 irqs, s32 irq_mask, s32 csr13, s32 csr14, s32 csr15, s32 msec);
932static int test_for_100Mb(struct net_device *dev, int msec);
933static int wait_for_link(struct net_device *dev);
934static int test_mii_reg(struct net_device *dev, int reg, int mask, bool pol, long msec);
935static int is_spd_100(struct net_device *dev);
936static int is_100_up(struct net_device *dev);
937static int is_10_up(struct net_device *dev);
938static int is_anc_capable(struct net_device *dev);
939static int ping_media(struct net_device *dev, int msec);
940static struct sk_buff *de4x5_alloc_rx_buff(struct net_device *dev, int index, int len);
941static void de4x5_free_rx_buffs(struct net_device *dev);
942static void de4x5_free_tx_buffs(struct net_device *dev);
943static void de4x5_save_skbs(struct net_device *dev);
944static void de4x5_rst_desc_ring(struct net_device *dev);
945static void de4x5_cache_state(struct net_device *dev, int flag);
946static void de4x5_put_cache(struct net_device *dev, struct sk_buff *skb);
947static void de4x5_putb_cache(struct net_device *dev, struct sk_buff *skb);
948static struct sk_buff *de4x5_get_cache(struct net_device *dev);
949static void de4x5_setup_intr(struct net_device *dev);
950static void de4x5_init_connection(struct net_device *dev);
951static int de4x5_reset_phy(struct net_device *dev);
952static void reset_init_sia(struct net_device *dev, s32 sicr, s32 strr, s32 sigr);
953static int test_ans(struct net_device *dev, s32 irqs, s32 irq_mask, s32 msec);
954static int test_tp(struct net_device *dev, s32 msec);
955static int EISA_signature(char *name, struct device *device);
956static void PCI_signature(char *name, struct de4x5_private *lp);
957static void DevicePresent(struct net_device *dev, u_long iobase);
958static void enet_addr_rst(u_long aprom_addr);
959static int de4x5_bad_srom(struct de4x5_private *lp);
960static short srom_rd(u_long address, u_char offset);
961static void srom_latch(u_int command, u_long address);
962static void srom_command(u_int command, u_long address);
963static void srom_address(u_int command, u_long address, u_char offset);
964static short srom_data(u_int command, u_long address);
965
966static void sendto_srom(u_int command, u_long addr);
967static int getfrom_srom(u_long addr);
968static int srom_map_media(struct net_device *dev);
969static int srom_infoleaf_info(struct net_device *dev);
970static void srom_init(struct net_device *dev);
971static void srom_exec(struct net_device *dev, u_char *p);
972static int mii_rd(u_char phyreg, u_char phyaddr, u_long ioaddr);
973static void mii_wr(int data, u_char phyreg, u_char phyaddr, u_long ioaddr);
974static int mii_rdata(u_long ioaddr);
975static void mii_wdata(int data, int len, u_long ioaddr);
976static void mii_ta(u_long rw, u_long ioaddr);
977static int mii_swap(int data, int len);
978static void mii_address(u_char addr, u_long ioaddr);
979static void sendto_mii(u32 command, int data, u_long ioaddr);
980static int getfrom_mii(u32 command, u_long ioaddr);
981static int mii_get_oui(u_char phyaddr, u_long ioaddr);
982static int mii_get_phy(struct net_device *dev);
983static void SetMulticastFilter(struct net_device *dev);
984static int get_hw_addr(struct net_device *dev);
985static void srom_repair(struct net_device *dev, int card);
986static int test_bad_enet(struct net_device *dev, int status);
987static int an_exception(struct de4x5_private *lp);
988static char *build_setup_frame(struct net_device *dev, int mode);
989static void disable_ast(struct net_device *dev);
990static long de4x5_switch_mac_port(struct net_device *dev);
991static int gep_rd(struct net_device *dev);
992static void gep_wr(s32 data, struct net_device *dev);
993static void yawn(struct net_device *dev, int state);
994static void de4x5_parse_params(struct net_device *dev);
995static void de4x5_dbg_open(struct net_device *dev);
996static void de4x5_dbg_mii(struct net_device *dev, int k);
997static void de4x5_dbg_media(struct net_device *dev);
998static void de4x5_dbg_srom(struct de4x5_srom *p);
999static void de4x5_dbg_rx(struct sk_buff *skb, int len);
1000static int dc21041_infoleaf(struct net_device *dev);
1001static int dc21140_infoleaf(struct net_device *dev);
1002static int dc21142_infoleaf(struct net_device *dev);
1003static int dc21143_infoleaf(struct net_device *dev);
1004static int type0_infoblock(struct net_device *dev, u_char count, u_char *p);
1005static int type1_infoblock(struct net_device *dev, u_char count, u_char *p);
1006static int type2_infoblock(struct net_device *dev, u_char count, u_char *p);
1007static int type3_infoblock(struct net_device *dev, u_char count, u_char *p);
1008static int type4_infoblock(struct net_device *dev, u_char count, u_char *p);
1009static int type5_infoblock(struct net_device *dev, u_char count, u_char *p);
1010static int compact_infoblock(struct net_device *dev, u_char count, u_char *p);
1011
1012
1013
1014
1015
1016
1017
1018static int io=0x0;
1019
1020module_param_hw(io, int, ioport, 0);
1021module_param(de4x5_debug, int, 0);
1022module_param(dec_only, int, 0);
1023module_param(args, charp, 0);
1024
1025MODULE_PARM_DESC(io, "de4x5 I/O base address");
1026MODULE_PARM_DESC(de4x5_debug, "de4x5 debug mask");
1027MODULE_PARM_DESC(dec_only, "de4x5 probe only for Digital boards (0-1)");
1028MODULE_PARM_DESC(args, "de4x5 full duplex and media type settings; see de4x5.c for details");
1029MODULE_LICENSE("GPL");
1030
1031
1032
1033
1034struct InfoLeaf {
1035 int chipset;
1036 int (*fn)(struct net_device *);
1037};
1038static struct InfoLeaf infoleaf_array[] = {
1039 {DC21041, dc21041_infoleaf},
1040 {DC21140, dc21140_infoleaf},
1041 {DC21142, dc21142_infoleaf},
1042 {DC21143, dc21143_infoleaf}
1043};
1044#define INFOLEAF_SIZE ARRAY_SIZE(infoleaf_array)
1045
1046
1047
1048
1049static int (*dc_infoblock[])(struct net_device *dev, u_char, u_char *) = {
1050 type0_infoblock,
1051 type1_infoblock,
1052 type2_infoblock,
1053 type3_infoblock,
1054 type4_infoblock,
1055 type5_infoblock,
1056 compact_infoblock
1057};
1058
1059#define COMPACT (ARRAY_SIZE(dc_infoblock) - 1)
1060
1061
1062
1063
1064#define RESET_DE4X5 {\
1065 int i;\
1066 i=inl(DE4X5_BMR);\
1067 mdelay(1);\
1068 outl(i | BMR_SWR, DE4X5_BMR);\
1069 mdelay(1);\
1070 outl(i, DE4X5_BMR);\
1071 mdelay(1);\
1072 for (i=0;i<5;i++) {inl(DE4X5_BMR); mdelay(1);}\
1073 mdelay(1);\
1074}
1075
1076#define PHY_HARD_RESET {\
1077 outl(GEP_HRST, DE4X5_GEP); \
1078 mdelay(1); \
1079 outl(0x00, DE4X5_GEP);\
1080 mdelay(2); \
1081}
1082
1083static const struct net_device_ops de4x5_netdev_ops = {
1084 .ndo_open = de4x5_open,
1085 .ndo_stop = de4x5_close,
1086 .ndo_start_xmit = de4x5_queue_pkt,
1087 .ndo_get_stats = de4x5_get_stats,
1088 .ndo_set_rx_mode = set_multicast_list,
1089 .ndo_siocdevprivate = de4x5_siocdevprivate,
1090 .ndo_set_mac_address= eth_mac_addr,
1091 .ndo_validate_addr = eth_validate_addr,
1092};
1093
1094
1095static int
1096de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev)
1097{
1098 char name[DE4X5_NAME_LENGTH + 1];
1099 struct de4x5_private *lp = netdev_priv(dev);
1100 struct pci_dev *pdev = NULL;
1101 int i, status=0;
1102
1103 dev_set_drvdata(gendev, dev);
1104
1105
1106 if (lp->bus == EISA) {
1107 outb(WAKEUP, PCI_CFPM);
1108 } else {
1109 pdev = to_pci_dev (gendev);
1110 pci_write_config_byte(pdev, PCI_CFDA_PSM, WAKEUP);
1111 }
1112 mdelay(10);
1113
1114 RESET_DE4X5;
1115
1116 if ((inl(DE4X5_STS) & (STS_TS | STS_RS)) != 0) {
1117 return -ENXIO;
1118 }
1119
1120
1121
1122
1123 lp->useSROM = false;
1124 if (lp->bus == PCI) {
1125 PCI_signature(name, lp);
1126 } else {
1127 EISA_signature(name, gendev);
1128 }
1129
1130 if (*name == '\0') {
1131 return -ENXIO;
1132 }
1133
1134 dev->base_addr = iobase;
1135 printk ("%s: %s at 0x%04lx", dev_name(gendev), name, iobase);
1136
1137 status = get_hw_addr(dev);
1138 printk(", h/w address %pM\n", dev->dev_addr);
1139
1140 if (status != 0) {
1141 printk(" which has an Ethernet PROM CRC error.\n");
1142 return -ENXIO;
1143 } else {
1144 skb_queue_head_init(&lp->cache.queue);
1145 lp->cache.gepc = GEP_INIT;
1146 lp->asBit = GEP_SLNK;
1147 lp->asPolarity = GEP_SLNK;
1148 lp->asBitValid = ~0;
1149 lp->timeout = -1;
1150 lp->gendev = gendev;
1151 spin_lock_init(&lp->lock);
1152 timer_setup(&lp->timer, de4x5_ast, 0);
1153 de4x5_parse_params(dev);
1154
1155
1156
1157
1158 lp->autosense = lp->params.autosense;
1159 if (lp->chipset != DC21140) {
1160 if ((lp->chipset==DC21040) && (lp->params.autosense&TP_NW)) {
1161 lp->params.autosense = TP;
1162 }
1163 if ((lp->chipset==DC21041) && (lp->params.autosense&BNC_AUI)) {
1164 lp->params.autosense = BNC;
1165 }
1166 }
1167 lp->fdx = lp->params.fdx;
1168 sprintf(lp->adapter_name,"%s (%s)", name, dev_name(gendev));
1169
1170 lp->dma_size = (NUM_RX_DESC + NUM_TX_DESC) * sizeof(struct de4x5_desc);
1171#if defined(__alpha__) || defined(__powerpc__) || defined(CONFIG_SPARC) || defined(DE4X5_DO_MEMCPY)
1172 lp->dma_size += RX_BUFF_SZ * NUM_RX_DESC + DE4X5_ALIGN;
1173#endif
1174 lp->rx_ring = dma_alloc_coherent(gendev, lp->dma_size,
1175 &lp->dma_rings, GFP_ATOMIC);
1176 if (lp->rx_ring == NULL) {
1177 return -ENOMEM;
1178 }
1179
1180 lp->tx_ring = lp->rx_ring + NUM_RX_DESC;
1181
1182
1183
1184
1185
1186#if !defined(__alpha__) && !defined(__powerpc__) && !defined(CONFIG_SPARC) && !defined(DE4X5_DO_MEMCPY)
1187 for (i=0; i<NUM_RX_DESC; i++) {
1188 lp->rx_ring[i].status = 0;
1189 lp->rx_ring[i].des1 = cpu_to_le32(RX_BUFF_SZ);
1190 lp->rx_ring[i].buf = 0;
1191 lp->rx_ring[i].next = 0;
1192 lp->rx_skb[i] = (struct sk_buff *) 1;
1193 }
1194
1195#else
1196 {
1197 dma_addr_t dma_rx_bufs;
1198
1199 dma_rx_bufs = lp->dma_rings + (NUM_RX_DESC + NUM_TX_DESC)
1200 * sizeof(struct de4x5_desc);
1201 dma_rx_bufs = (dma_rx_bufs + DE4X5_ALIGN) & ~DE4X5_ALIGN;
1202 lp->rx_bufs = (char *)(((long)(lp->rx_ring + NUM_RX_DESC
1203 + NUM_TX_DESC) + DE4X5_ALIGN) & ~DE4X5_ALIGN);
1204 for (i=0; i<NUM_RX_DESC; i++) {
1205 lp->rx_ring[i].status = 0;
1206 lp->rx_ring[i].des1 = cpu_to_le32(RX_BUFF_SZ);
1207 lp->rx_ring[i].buf =
1208 cpu_to_le32(dma_rx_bufs+i*RX_BUFF_SZ);
1209 lp->rx_ring[i].next = 0;
1210 lp->rx_skb[i] = (struct sk_buff *) 1;
1211 }
1212
1213 }
1214#endif
1215
1216 barrier();
1217
1218 lp->rxRingSize = NUM_RX_DESC;
1219 lp->txRingSize = NUM_TX_DESC;
1220
1221
1222 lp->rx_ring[lp->rxRingSize - 1].des1 |= cpu_to_le32(RD_RER);
1223 lp->tx_ring[lp->txRingSize - 1].des1 |= cpu_to_le32(TD_TER);
1224
1225
1226 outl(lp->dma_rings, DE4X5_RRBA);
1227 outl(lp->dma_rings + NUM_RX_DESC * sizeof(struct de4x5_desc),
1228 DE4X5_TRBA);
1229
1230
1231 lp->irq_mask = IMR_RIM | IMR_TIM | IMR_TUM | IMR_UNM;
1232 lp->irq_en = IMR_NIM | IMR_AIM;
1233
1234
1235 create_packet(dev, lp->frame, sizeof(lp->frame));
1236
1237
1238 i = lp->cfrv & 0x000000fe;
1239 if ((lp->chipset == DC21140) && (i == 0x20)) {
1240 lp->rx_ovf = 1;
1241 }
1242
1243
1244 if (lp->useSROM) {
1245 lp->state = INITIALISED;
1246 if (srom_infoleaf_info(dev)) {
1247 dma_free_coherent (gendev, lp->dma_size,
1248 lp->rx_ring, lp->dma_rings);
1249 return -ENXIO;
1250 }
1251 srom_init(dev);
1252 }
1253
1254 lp->state = CLOSED;
1255
1256
1257
1258
1259 if ((lp->chipset != DC21040) && (lp->chipset != DC21041)) {
1260 mii_get_phy(dev);
1261 }
1262
1263 printk(" and requires IRQ%d (provided by %s).\n", dev->irq,
1264 ((lp->bus == PCI) ? "PCI BIOS" : "EISA CNFG"));
1265 }
1266
1267 if (de4x5_debug & DEBUG_VERSION) {
1268 printk(version);
1269 }
1270
1271
1272 SET_NETDEV_DEV(dev, gendev);
1273 dev->netdev_ops = &de4x5_netdev_ops;
1274 dev->mem_start = 0;
1275
1276
1277 if ((status = register_netdev (dev))) {
1278 dma_free_coherent (gendev, lp->dma_size,
1279 lp->rx_ring, lp->dma_rings);
1280 return status;
1281 }
1282
1283
1284 yawn(dev, SLEEP);
1285
1286 return status;
1287}
1288
1289
1290static int
1291de4x5_open(struct net_device *dev)
1292{
1293 struct de4x5_private *lp = netdev_priv(dev);
1294 u_long iobase = dev->base_addr;
1295 int i, status = 0;
1296 s32 omr;
1297
1298
1299 for (i=0; i<lp->rxRingSize; i++) {
1300 if (de4x5_alloc_rx_buff(dev, i, 0) == NULL) {
1301 de4x5_free_rx_buffs(dev);
1302 return -EAGAIN;
1303 }
1304 }
1305
1306
1307
1308
1309 yawn(dev, WAKEUP);
1310
1311
1312
1313
1314 status = de4x5_init(dev);
1315 spin_lock_init(&lp->lock);
1316 lp->state = OPEN;
1317 de4x5_dbg_open(dev);
1318
1319 if (request_irq(dev->irq, de4x5_interrupt, IRQF_SHARED,
1320 lp->adapter_name, dev)) {
1321 printk("de4x5_open(): Requested IRQ%d is busy - attempting FAST/SHARE...", dev->irq);
1322 if (request_irq(dev->irq, de4x5_interrupt, IRQF_SHARED,
1323 lp->adapter_name, dev)) {
1324 printk("\n Cannot get IRQ- reconfigure your hardware.\n");
1325 disable_ast(dev);
1326 de4x5_free_rx_buffs(dev);
1327 de4x5_free_tx_buffs(dev);
1328 yawn(dev, SLEEP);
1329 lp->state = CLOSED;
1330 return -EAGAIN;
1331 } else {
1332 printk("\n Succeeded, but you should reconfigure your hardware to avoid this.\n");
1333 printk("WARNING: there may be IRQ related problems in heavily loaded systems.\n");
1334 }
1335 }
1336
1337 lp->interrupt = UNMASK_INTERRUPTS;
1338 netif_trans_update(dev);
1339
1340 START_DE4X5;
1341
1342 de4x5_setup_intr(dev);
1343
1344 if (de4x5_debug & DEBUG_OPEN) {
1345 printk("\tsts: 0x%08x\n", inl(DE4X5_STS));
1346 printk("\tbmr: 0x%08x\n", inl(DE4X5_BMR));
1347 printk("\timr: 0x%08x\n", inl(DE4X5_IMR));
1348 printk("\tomr: 0x%08x\n", inl(DE4X5_OMR));
1349 printk("\tsisr: 0x%08x\n", inl(DE4X5_SISR));
1350 printk("\tsicr: 0x%08x\n", inl(DE4X5_SICR));
1351 printk("\tstrr: 0x%08x\n", inl(DE4X5_STRR));
1352 printk("\tsigr: 0x%08x\n", inl(DE4X5_SIGR));
1353 }
1354
1355 return status;
1356}
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366static int
1367de4x5_init(struct net_device *dev)
1368{
1369
1370 netif_stop_queue(dev);
1371
1372 de4x5_sw_reset(dev);
1373
1374
1375 autoconf_media(dev);
1376
1377 return 0;
1378}
1379
1380static int
1381de4x5_sw_reset(struct net_device *dev)
1382{
1383 struct de4x5_private *lp = netdev_priv(dev);
1384 u_long iobase = dev->base_addr;
1385 int i, j, status = 0;
1386 s32 bmr, omr;
1387
1388
1389 if (!lp->useSROM) {
1390 if (lp->phy[lp->active].id != 0) {
1391 lp->infoblock_csr6 = OMR_SDP | OMR_PS | OMR_HBD;
1392 } else {
1393 lp->infoblock_csr6 = OMR_SDP | OMR_TTM;
1394 }
1395 de4x5_switch_mac_port(dev);
1396 }
1397
1398
1399
1400
1401
1402
1403 bmr = (lp->chipset==DC21140 ? PBL_8 : PBL_4) | DESC_SKIP_LEN | DE4X5_CACHE_ALIGN;
1404 bmr |= ((lp->chipset & ~0x00ff)==DC2114x ? BMR_RML : 0);
1405 outl(bmr, DE4X5_BMR);
1406
1407 omr = inl(DE4X5_OMR) & ~OMR_PR;
1408 if (lp->chipset == DC21140) {
1409 omr |= (OMR_SDP | OMR_SB);
1410 }
1411 lp->setup_f = PERFECT;
1412 outl(lp->dma_rings, DE4X5_RRBA);
1413 outl(lp->dma_rings + NUM_RX_DESC * sizeof(struct de4x5_desc),
1414 DE4X5_TRBA);
1415
1416 lp->rx_new = lp->rx_old = 0;
1417 lp->tx_new = lp->tx_old = 0;
1418
1419 for (i = 0; i < lp->rxRingSize; i++) {
1420 lp->rx_ring[i].status = cpu_to_le32(R_OWN);
1421 }
1422
1423 for (i = 0; i < lp->txRingSize; i++) {
1424 lp->tx_ring[i].status = cpu_to_le32(0);
1425 }
1426
1427 barrier();
1428
1429
1430 SetMulticastFilter(dev);
1431
1432 load_packet(dev, lp->setup_frame, PERFECT_F|TD_SET|SETUP_FRAME_LEN, (struct sk_buff *)1);
1433 outl(omr|OMR_ST, DE4X5_OMR);
1434
1435
1436
1437 for (j=0, i=0;(i<500) && (j==0);i++) {
1438 mdelay(1);
1439 if ((s32)le32_to_cpu(lp->tx_ring[lp->tx_new].status) >= 0) j=1;
1440 }
1441 outl(omr, DE4X5_OMR);
1442
1443 if (j == 0) {
1444 printk("%s: Setup frame timed out, status %08x\n", dev->name,
1445 inl(DE4X5_STS));
1446 status = -EIO;
1447 }
1448
1449 lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
1450 lp->tx_old = lp->tx_new;
1451
1452 return status;
1453}
1454
1455
1456
1457
1458static netdev_tx_t
1459de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev)
1460{
1461 struct de4x5_private *lp = netdev_priv(dev);
1462 u_long iobase = dev->base_addr;
1463 u_long flags = 0;
1464
1465 netif_stop_queue(dev);
1466 if (!lp->tx_enable)
1467 goto tx_err;
1468
1469
1470
1471
1472
1473
1474 spin_lock_irqsave(&lp->lock, flags);
1475 de4x5_tx(dev);
1476 spin_unlock_irqrestore(&lp->lock, flags);
1477
1478
1479 if (test_and_set_bit(0, (void *)&lp->cache.lock) && !lp->interrupt)
1480 goto tx_err;
1481
1482
1483 if (netif_queue_stopped(dev) || (u_long) lp->tx_skb[lp->tx_new] > 1) {
1484 if (lp->interrupt) {
1485 de4x5_putb_cache(dev, skb);
1486 } else {
1487 de4x5_put_cache(dev, skb);
1488 }
1489 if (de4x5_debug & DEBUG_TX) {
1490 printk("%s: transmit busy, lost media or stale skb found:\n STS:%08x\n tbusy:%d\n IMR:%08x\n OMR:%08x\n Stale skb: %s\n",dev->name, inl(DE4X5_STS), netif_queue_stopped(dev), inl(DE4X5_IMR), inl(DE4X5_OMR), ((u_long) lp->tx_skb[lp->tx_new] > 1) ? "YES" : "NO");
1491 }
1492 } else if (skb->len > 0) {
1493
1494 if (!skb_queue_empty(&lp->cache.queue) && !lp->interrupt) {
1495 de4x5_put_cache(dev, skb);
1496 skb = de4x5_get_cache(dev);
1497 }
1498
1499 while (skb && !netif_queue_stopped(dev) &&
1500 (u_long) lp->tx_skb[lp->tx_new] <= 1) {
1501 spin_lock_irqsave(&lp->lock, flags);
1502 netif_stop_queue(dev);
1503 load_packet(dev, skb->data, TD_IC | TD_LS | TD_FS | skb->len, skb);
1504 lp->stats.tx_bytes += skb->len;
1505 outl(POLL_DEMAND, DE4X5_TPD);
1506
1507 lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
1508
1509 if (TX_BUFFS_AVAIL) {
1510 netif_start_queue(dev);
1511 }
1512 skb = de4x5_get_cache(dev);
1513 spin_unlock_irqrestore(&lp->lock, flags);
1514 }
1515 if (skb) de4x5_putb_cache(dev, skb);
1516 }
1517
1518 lp->cache.lock = 0;
1519
1520 return NETDEV_TX_OK;
1521tx_err:
1522 dev_kfree_skb_any(skb);
1523 return NETDEV_TX_OK;
1524}
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537static irqreturn_t
1538de4x5_interrupt(int irq, void *dev_id)
1539{
1540 struct net_device *dev = dev_id;
1541 struct de4x5_private *lp;
1542 s32 imr, omr, sts, limit;
1543 u_long iobase;
1544 unsigned int handled = 0;
1545
1546 lp = netdev_priv(dev);
1547 spin_lock(&lp->lock);
1548 iobase = dev->base_addr;
1549
1550 DISABLE_IRQs;
1551
1552 if (test_and_set_bit(MASK_INTERRUPTS, (void*) &lp->interrupt))
1553 printk("%s: Re-entering the interrupt handler.\n", dev->name);
1554
1555 synchronize_irq(dev->irq);
1556
1557 for (limit=0; limit<8; limit++) {
1558 sts = inl(DE4X5_STS);
1559 outl(sts, DE4X5_STS);
1560
1561 if (!(sts & lp->irq_mask)) break;
1562 handled = 1;
1563
1564 if (sts & (STS_RI | STS_RU))
1565 de4x5_rx(dev);
1566
1567 if (sts & (STS_TI | STS_TU))
1568 de4x5_tx(dev);
1569
1570 if (sts & STS_LNF) {
1571 lp->irq_mask &= ~IMR_LFM;
1572 }
1573
1574 if (sts & STS_UNF) {
1575 de4x5_txur(dev);
1576 }
1577
1578 if (sts & STS_SE) {
1579 STOP_DE4X5;
1580 printk("%s: Fatal bus error occurred, sts=%#8x, device stopped.\n",
1581 dev->name, sts);
1582 spin_unlock(&lp->lock);
1583 return IRQ_HANDLED;
1584 }
1585 }
1586
1587
1588 if (!test_and_set_bit(0, (void *)&lp->cache.lock)) {
1589 while (!skb_queue_empty(&lp->cache.queue) && !netif_queue_stopped(dev) && lp->tx_enable) {
1590 de4x5_queue_pkt(de4x5_get_cache(dev), dev);
1591 }
1592 lp->cache.lock = 0;
1593 }
1594
1595 lp->interrupt = UNMASK_INTERRUPTS;
1596 ENABLE_IRQs;
1597 spin_unlock(&lp->lock);
1598
1599 return IRQ_RETVAL(handled);
1600}
1601
1602static int
1603de4x5_rx(struct net_device *dev)
1604{
1605 struct de4x5_private *lp = netdev_priv(dev);
1606 u_long iobase = dev->base_addr;
1607 int entry;
1608 s32 status;
1609
1610 for (entry=lp->rx_new; (s32)le32_to_cpu(lp->rx_ring[entry].status)>=0;
1611 entry=lp->rx_new) {
1612 status = (s32)le32_to_cpu(lp->rx_ring[entry].status);
1613
1614 if (lp->rx_ovf) {
1615 if (inl(DE4X5_MFC) & MFC_FOCM) {
1616 de4x5_rx_ovfc(dev);
1617 break;
1618 }
1619 }
1620
1621 if (status & RD_FS) {
1622 lp->rx_old = entry;
1623 }
1624
1625 if (status & RD_LS) {
1626 if (lp->tx_enable) lp->linkOK++;
1627 if (status & RD_ES) {
1628 lp->stats.rx_errors++;
1629 if (status & (RD_RF | RD_TL)) lp->stats.rx_frame_errors++;
1630 if (status & RD_CE) lp->stats.rx_crc_errors++;
1631 if (status & RD_OF) lp->stats.rx_fifo_errors++;
1632 if (status & RD_TL) lp->stats.rx_length_errors++;
1633 if (status & RD_RF) lp->pktStats.rx_runt_frames++;
1634 if (status & RD_CS) lp->pktStats.rx_collision++;
1635 if (status & RD_DB) lp->pktStats.rx_dribble++;
1636 if (status & RD_OF) lp->pktStats.rx_overflow++;
1637 } else {
1638 struct sk_buff *skb;
1639 short pkt_len = (short)(le32_to_cpu(lp->rx_ring[entry].status)
1640 >> 16) - 4;
1641
1642 if ((skb = de4x5_alloc_rx_buff(dev, entry, pkt_len)) == NULL) {
1643 printk("%s: Insufficient memory; nuking packet.\n",
1644 dev->name);
1645 lp->stats.rx_dropped++;
1646 } else {
1647 de4x5_dbg_rx(skb, pkt_len);
1648
1649
1650 skb->protocol=eth_type_trans(skb,dev);
1651 de4x5_local_stats(dev, skb->data, pkt_len);
1652 netif_rx(skb);
1653
1654
1655 lp->stats.rx_packets++;
1656 lp->stats.rx_bytes += pkt_len;
1657 }
1658 }
1659
1660
1661 for (;lp->rx_old!=entry;lp->rx_old=(lp->rx_old + 1)%lp->rxRingSize) {
1662 lp->rx_ring[lp->rx_old].status = cpu_to_le32(R_OWN);
1663 barrier();
1664 }
1665 lp->rx_ring[entry].status = cpu_to_le32(R_OWN);
1666 barrier();
1667 }
1668
1669
1670
1671
1672 lp->rx_new = (lp->rx_new + 1) % lp->rxRingSize;
1673 }
1674
1675 return 0;
1676}
1677
1678static inline void
1679de4x5_free_tx_buff(struct de4x5_private *lp, int entry)
1680{
1681 dma_unmap_single(lp->gendev, le32_to_cpu(lp->tx_ring[entry].buf),
1682 le32_to_cpu(lp->tx_ring[entry].des1) & TD_TBS1,
1683 DMA_TO_DEVICE);
1684 if ((u_long) lp->tx_skb[entry] > 1)
1685 dev_kfree_skb_irq(lp->tx_skb[entry]);
1686 lp->tx_skb[entry] = NULL;
1687}
1688
1689
1690
1691
1692static int
1693de4x5_tx(struct net_device *dev)
1694{
1695 struct de4x5_private *lp = netdev_priv(dev);
1696 u_long iobase = dev->base_addr;
1697 int entry;
1698 s32 status;
1699
1700 for (entry = lp->tx_old; entry != lp->tx_new; entry = lp->tx_old) {
1701 status = (s32)le32_to_cpu(lp->tx_ring[entry].status);
1702 if (status < 0) {
1703 break;
1704 } else if (status != 0x7fffffff) {
1705 if (status & TD_ES) {
1706 lp->stats.tx_errors++;
1707 if (status & TD_NC) lp->stats.tx_carrier_errors++;
1708 if (status & TD_LC) lp->stats.tx_window_errors++;
1709 if (status & TD_UF) lp->stats.tx_fifo_errors++;
1710 if (status & TD_EC) lp->pktStats.excessive_collisions++;
1711 if (status & TD_DE) lp->stats.tx_aborted_errors++;
1712
1713 if (TX_PKT_PENDING) {
1714 outl(POLL_DEMAND, DE4X5_TPD);
1715 }
1716 } else {
1717 lp->stats.tx_packets++;
1718 if (lp->tx_enable) lp->linkOK++;
1719 }
1720
1721 lp->stats.collisions += ((status & TD_EC) ? 16 :
1722 ((status & TD_CC) >> 3));
1723
1724
1725 if (lp->tx_skb[entry] != NULL)
1726 de4x5_free_tx_buff(lp, entry);
1727 }
1728
1729
1730 lp->tx_old = (lp->tx_old + 1) % lp->txRingSize;
1731 }
1732
1733
1734 if (TX_BUFFS_AVAIL && netif_queue_stopped(dev)) {
1735 if (lp->interrupt)
1736 netif_wake_queue(dev);
1737 else
1738 netif_start_queue(dev);
1739 }
1740
1741 return 0;
1742}
1743
1744static void
1745de4x5_ast(struct timer_list *t)
1746{
1747 struct de4x5_private *lp = from_timer(lp, t, timer);
1748 struct net_device *dev = dev_get_drvdata(lp->gendev);
1749 int next_tick = DE4X5_AUTOSENSE_MS;
1750 int dt;
1751
1752 if (lp->useSROM)
1753 next_tick = srom_autoconf(dev);
1754 else if (lp->chipset == DC21140)
1755 next_tick = dc21140m_autoconf(dev);
1756 else if (lp->chipset == DC21041)
1757 next_tick = dc21041_autoconf(dev);
1758 else if (lp->chipset == DC21040)
1759 next_tick = dc21040_autoconf(dev);
1760 lp->linkOK = 0;
1761
1762 dt = (next_tick * HZ) / 1000;
1763
1764 if (!dt)
1765 dt = 1;
1766
1767 mod_timer(&lp->timer, jiffies + dt);
1768}
1769
1770static int
1771de4x5_txur(struct net_device *dev)
1772{
1773 struct de4x5_private *lp = netdev_priv(dev);
1774 u_long iobase = dev->base_addr;
1775 int omr;
1776
1777 omr = inl(DE4X5_OMR);
1778 if (!(omr & OMR_SF) || (lp->chipset==DC21041) || (lp->chipset==DC21040)) {
1779 omr &= ~(OMR_ST|OMR_SR);
1780 outl(omr, DE4X5_OMR);
1781 while (inl(DE4X5_STS) & STS_TS);
1782 if ((omr & OMR_TR) < OMR_TR) {
1783 omr += 0x4000;
1784 } else {
1785 omr |= OMR_SF;
1786 }
1787 outl(omr | OMR_ST | OMR_SR, DE4X5_OMR);
1788 }
1789
1790 return 0;
1791}
1792
1793static int
1794de4x5_rx_ovfc(struct net_device *dev)
1795{
1796 struct de4x5_private *lp = netdev_priv(dev);
1797 u_long iobase = dev->base_addr;
1798 int omr;
1799
1800 omr = inl(DE4X5_OMR);
1801 outl(omr & ~OMR_SR, DE4X5_OMR);
1802 while (inl(DE4X5_STS) & STS_RS);
1803
1804 for (; (s32)le32_to_cpu(lp->rx_ring[lp->rx_new].status)>=0;) {
1805 lp->rx_ring[lp->rx_new].status = cpu_to_le32(R_OWN);
1806 lp->rx_new = (lp->rx_new + 1) % lp->rxRingSize;
1807 }
1808
1809 outl(omr, DE4X5_OMR);
1810
1811 return 0;
1812}
1813
1814static int
1815de4x5_close(struct net_device *dev)
1816{
1817 struct de4x5_private *lp = netdev_priv(dev);
1818 u_long iobase = dev->base_addr;
1819 s32 imr, omr;
1820
1821 disable_ast(dev);
1822
1823 netif_stop_queue(dev);
1824
1825 if (de4x5_debug & DEBUG_CLOSE) {
1826 printk("%s: Shutting down ethercard, status was %8.8x.\n",
1827 dev->name, inl(DE4X5_STS));
1828 }
1829
1830
1831
1832
1833 DISABLE_IRQs;
1834 STOP_DE4X5;
1835
1836
1837 free_irq(dev->irq, dev);
1838 lp->state = CLOSED;
1839
1840
1841 de4x5_free_rx_buffs(dev);
1842 de4x5_free_tx_buffs(dev);
1843
1844
1845 yawn(dev, SLEEP);
1846
1847 return 0;
1848}
1849
1850static struct net_device_stats *
1851de4x5_get_stats(struct net_device *dev)
1852{
1853 struct de4x5_private *lp = netdev_priv(dev);
1854 u_long iobase = dev->base_addr;
1855
1856 lp->stats.rx_missed_errors = (int)(inl(DE4X5_MFC) & (MFC_OVFL | MFC_CNTR));
1857
1858 return &lp->stats;
1859}
1860
1861static void
1862de4x5_local_stats(struct net_device *dev, char *buf, int pkt_len)
1863{
1864 struct de4x5_private *lp = netdev_priv(dev);
1865 int i;
1866
1867 for (i=1; i<DE4X5_PKT_STAT_SZ-1; i++) {
1868 if (pkt_len < (i*DE4X5_PKT_BIN_SZ)) {
1869 lp->pktStats.bins[i]++;
1870 i = DE4X5_PKT_STAT_SZ;
1871 }
1872 }
1873 if (is_multicast_ether_addr(buf)) {
1874 if (is_broadcast_ether_addr(buf)) {
1875 lp->pktStats.broadcast++;
1876 } else {
1877 lp->pktStats.multicast++;
1878 }
1879 } else if (ether_addr_equal(buf, dev->dev_addr)) {
1880 lp->pktStats.unicast++;
1881 }
1882
1883 lp->pktStats.bins[0]++;
1884 if (lp->pktStats.bins[0] == 0) {
1885 memset((char *)&lp->pktStats, 0, sizeof(lp->pktStats));
1886 }
1887}
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897static void
1898load_packet(struct net_device *dev, char *buf, u32 flags, struct sk_buff *skb)
1899{
1900 struct de4x5_private *lp = netdev_priv(dev);
1901 int entry = (lp->tx_new ? lp->tx_new-1 : lp->txRingSize-1);
1902 dma_addr_t buf_dma = dma_map_single(lp->gendev, buf, flags & TD_TBS1, DMA_TO_DEVICE);
1903
1904 lp->tx_ring[lp->tx_new].buf = cpu_to_le32(buf_dma);
1905 lp->tx_ring[lp->tx_new].des1 &= cpu_to_le32(TD_TER);
1906 lp->tx_ring[lp->tx_new].des1 |= cpu_to_le32(flags);
1907 lp->tx_skb[lp->tx_new] = skb;
1908 lp->tx_ring[entry].des1 &= cpu_to_le32(~TD_IC);
1909 barrier();
1910
1911 lp->tx_ring[lp->tx_new].status = cpu_to_le32(T_OWN);
1912 barrier();
1913}
1914
1915
1916
1917
1918static void
1919set_multicast_list(struct net_device *dev)
1920{
1921 struct de4x5_private *lp = netdev_priv(dev);
1922 u_long iobase = dev->base_addr;
1923
1924
1925 if (lp->state == OPEN) {
1926 if (dev->flags & IFF_PROMISC) {
1927 u32 omr;
1928 omr = inl(DE4X5_OMR);
1929 omr |= OMR_PR;
1930 outl(omr, DE4X5_OMR);
1931 } else {
1932 SetMulticastFilter(dev);
1933 load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET |
1934 SETUP_FRAME_LEN, (struct sk_buff *)1);
1935
1936 lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
1937 outl(POLL_DEMAND, DE4X5_TPD);
1938 netif_trans_update(dev);
1939 }
1940 }
1941}
1942
1943
1944
1945
1946
1947
1948static void
1949SetMulticastFilter(struct net_device *dev)
1950{
1951 struct de4x5_private *lp = netdev_priv(dev);
1952 struct netdev_hw_addr *ha;
1953 u_long iobase = dev->base_addr;
1954 int i, bit, byte;
1955 u16 hashcode;
1956 u32 omr, crc;
1957 char *pa;
1958 unsigned char *addrs;
1959
1960 omr = inl(DE4X5_OMR);
1961 omr &= ~(OMR_PR | OMR_PM);
1962 pa = build_setup_frame(dev, ALL);
1963
1964 if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 14)) {
1965 omr |= OMR_PM;
1966 } else if (lp->setup_f == HASH_PERF) {
1967 netdev_for_each_mc_addr(ha, dev) {
1968 crc = ether_crc_le(ETH_ALEN, ha->addr);
1969 hashcode = crc & DE4X5_HASH_BITS;
1970
1971 byte = hashcode >> 3;
1972 bit = 1 << (hashcode & 0x07);
1973
1974 byte <<= 1;
1975 if (byte & 0x02) {
1976 byte -= 1;
1977 }
1978 lp->setup_frame[byte] |= bit;
1979 }
1980 } else {
1981 netdev_for_each_mc_addr(ha, dev) {
1982 addrs = ha->addr;
1983 for (i=0; i<ETH_ALEN; i++) {
1984 *(pa + (i&1)) = *addrs++;
1985 if (i & 0x01) pa += 4;
1986 }
1987 }
1988 }
1989 outl(omr, DE4X5_OMR);
1990}
1991
1992#ifdef CONFIG_EISA
1993
1994static u_char de4x5_irq[] = EISA_ALLOWED_IRQ_LIST;
1995
1996static int de4x5_eisa_probe(struct device *gendev)
1997{
1998 struct eisa_device *edev;
1999 u_long iobase;
2000 u_char irq, regval;
2001 u_short vendor;
2002 u32 cfid;
2003 int status, device;
2004 struct net_device *dev;
2005 struct de4x5_private *lp;
2006
2007 edev = to_eisa_device (gendev);
2008 iobase = edev->base_addr;
2009
2010 if (!request_region (iobase, DE4X5_EISA_TOTAL_SIZE, "de4x5"))
2011 return -EBUSY;
2012
2013 if (!request_region (iobase + DE4X5_EISA_IO_PORTS,
2014 DE4X5_EISA_TOTAL_SIZE, "de4x5")) {
2015 status = -EBUSY;
2016 goto release_reg_1;
2017 }
2018
2019 if (!(dev = alloc_etherdev (sizeof (struct de4x5_private)))) {
2020 status = -ENOMEM;
2021 goto release_reg_2;
2022 }
2023 lp = netdev_priv(dev);
2024
2025 cfid = (u32) inl(PCI_CFID);
2026 lp->cfrv = (u_short) inl(PCI_CFRV);
2027 device = (cfid >> 8) & 0x00ffff00;
2028 vendor = (u_short) cfid;
2029
2030
2031 regval = inb(EISA_REG0) & (ER0_INTL | ER0_INTT);
2032#ifdef CONFIG_ALPHA
2033
2034
2035
2036
2037
2038
2039 outb (ER1_IAM | 1, EISA_REG1);
2040 mdelay (1);
2041
2042
2043 outb (ER1_IAM, EISA_REG1);
2044 mdelay (1);
2045
2046
2047 outb (ER3_BWE | ER3_BRE, EISA_REG3);
2048
2049
2050 outb (ER0_BSW | ER0_BMW | ER0_EPT | regval, EISA_REG0);
2051#endif
2052 irq = de4x5_irq[(regval >> 1) & 0x03];
2053
2054 if (is_DC2114x) {
2055 device = ((lp->cfrv & CFRV_RN) < DC2114x_BRK ? DC21142 : DC21143);
2056 }
2057 lp->chipset = device;
2058 lp->bus = EISA;
2059
2060
2061 outl(PCI_COMMAND_IO | PCI_COMMAND_MASTER, PCI_CFCS);
2062 outl(0x00006000, PCI_CFLT);
2063 outl(iobase, PCI_CBIO);
2064
2065 DevicePresent(dev, EISA_APROM);
2066
2067 dev->irq = irq;
2068
2069 if (!(status = de4x5_hw_init (dev, iobase, gendev))) {
2070 return 0;
2071 }
2072
2073 free_netdev (dev);
2074 release_reg_2:
2075 release_region (iobase + DE4X5_EISA_IO_PORTS, DE4X5_EISA_TOTAL_SIZE);
2076 release_reg_1:
2077 release_region (iobase, DE4X5_EISA_TOTAL_SIZE);
2078
2079 return status;
2080}
2081
2082static int de4x5_eisa_remove(struct device *device)
2083{
2084 struct net_device *dev;
2085 u_long iobase;
2086
2087 dev = dev_get_drvdata(device);
2088 iobase = dev->base_addr;
2089
2090 unregister_netdev (dev);
2091 free_netdev (dev);
2092 release_region (iobase + DE4X5_EISA_IO_PORTS, DE4X5_EISA_TOTAL_SIZE);
2093 release_region (iobase, DE4X5_EISA_TOTAL_SIZE);
2094
2095 return 0;
2096}
2097
2098static const struct eisa_device_id de4x5_eisa_ids[] = {
2099 { "DEC4250", 0 },
2100 { "" }
2101};
2102MODULE_DEVICE_TABLE(eisa, de4x5_eisa_ids);
2103
2104static struct eisa_driver de4x5_eisa_driver = {
2105 .id_table = de4x5_eisa_ids,
2106 .driver = {
2107 .name = "de4x5",
2108 .probe = de4x5_eisa_probe,
2109 .remove = de4x5_eisa_remove,
2110 }
2111};
2112#endif
2113
2114#ifdef CONFIG_PCI
2115
2116
2117
2118
2119
2120
2121
2122static void
2123srom_search(struct net_device *dev, struct pci_dev *pdev)
2124{
2125 u_char pb;
2126 u_short vendor, status;
2127 u_int irq = 0, device;
2128 u_long iobase = 0;
2129 int i, j;
2130 struct de4x5_private *lp = netdev_priv(dev);
2131 struct pci_dev *this_dev;
2132
2133 list_for_each_entry(this_dev, &pdev->bus->devices, bus_list) {
2134 vendor = this_dev->vendor;
2135 device = this_dev->device << 8;
2136 if (!(is_DC21040 || is_DC21041 || is_DC21140 || is_DC2114x)) continue;
2137
2138
2139 pb = this_dev->bus->number;
2140
2141
2142 lp->device = PCI_SLOT(this_dev->devfn);
2143 lp->bus_num = pb;
2144
2145
2146 if (is_DC2114x) {
2147 device = ((this_dev->revision & CFRV_RN) < DC2114x_BRK
2148 ? DC21142 : DC21143);
2149 }
2150 lp->chipset = device;
2151
2152
2153 iobase = pci_resource_start(this_dev, 0);
2154
2155
2156 irq = this_dev->irq;
2157 if ((irq == 0) || (irq == 0xff) || ((int)irq == -1)) continue;
2158
2159
2160 pci_read_config_word(this_dev, PCI_COMMAND, &status);
2161 if (!(status & PCI_COMMAND_IO)) continue;
2162
2163
2164 DevicePresent(dev, DE4X5_APROM);
2165 for (j=0, i=0; i<ETH_ALEN; i++) {
2166 j += (u_char) *((u_char *)&lp->srom + SROM_HWADD + i);
2167 }
2168 if (j != 0 && j != 6 * 0xff) {
2169 last.chipset = device;
2170 last.bus = pb;
2171 last.irq = irq;
2172 for (i=0; i<ETH_ALEN; i++) {
2173 last.addr[i] = (u_char)*((u_char *)&lp->srom + SROM_HWADD + i);
2174 }
2175 return;
2176 }
2177 }
2178}
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196static int de4x5_pci_probe(struct pci_dev *pdev,
2197 const struct pci_device_id *ent)
2198{
2199 u_char pb, pbus = 0, dev_num, dnum = 0, timer;
2200 u_short vendor, status;
2201 u_int irq = 0, device;
2202 u_long iobase = 0;
2203 int error;
2204 struct net_device *dev;
2205 struct de4x5_private *lp;
2206
2207 dev_num = PCI_SLOT(pdev->devfn);
2208 pb = pdev->bus->number;
2209
2210 if (io) {
2211 pbus = (u_short)(io >> 8);
2212 dnum = (u_short)(io & 0xff);
2213 if ((pbus != pb) || (dnum != dev_num))
2214 return -ENODEV;
2215 }
2216
2217 vendor = pdev->vendor;
2218 device = pdev->device << 8;
2219 if (!(is_DC21040 || is_DC21041 || is_DC21140 || is_DC2114x))
2220 return -ENODEV;
2221
2222
2223 if ((error = pci_enable_device (pdev)))
2224 return error;
2225
2226 if (!(dev = alloc_etherdev (sizeof (struct de4x5_private)))) {
2227 error = -ENOMEM;
2228 goto disable_dev;
2229 }
2230
2231 lp = netdev_priv(dev);
2232 lp->bus = PCI;
2233 lp->bus_num = 0;
2234
2235
2236 if (lp->bus_num != pb) {
2237 lp->bus_num = pb;
2238 srom_search(dev, pdev);
2239 }
2240
2241
2242 lp->cfrv = pdev->revision;
2243
2244
2245 lp->device = dev_num;
2246 lp->bus_num = pb;
2247
2248
2249 if (is_DC2114x) {
2250 device = ((lp->cfrv & CFRV_RN) < DC2114x_BRK ? DC21142 : DC21143);
2251 }
2252 lp->chipset = device;
2253
2254
2255 iobase = pci_resource_start(pdev, 0);
2256
2257
2258 irq = pdev->irq;
2259 if ((irq == 0) || (irq == 0xff) || ((int)irq == -1)) {
2260 error = -ENODEV;
2261 goto free_dev;
2262 }
2263
2264
2265 pci_read_config_word(pdev, PCI_COMMAND, &status);
2266#ifdef __powerpc__
2267 if (!(status & PCI_COMMAND_IO)) {
2268 status |= PCI_COMMAND_IO;
2269 pci_write_config_word(pdev, PCI_COMMAND, status);
2270 pci_read_config_word(pdev, PCI_COMMAND, &status);
2271 }
2272#endif
2273 if (!(status & PCI_COMMAND_IO)) {
2274 error = -ENODEV;
2275 goto free_dev;
2276 }
2277
2278 if (!(status & PCI_COMMAND_MASTER)) {
2279 status |= PCI_COMMAND_MASTER;
2280 pci_write_config_word(pdev, PCI_COMMAND, status);
2281 pci_read_config_word(pdev, PCI_COMMAND, &status);
2282 }
2283 if (!(status & PCI_COMMAND_MASTER)) {
2284 error = -ENODEV;
2285 goto free_dev;
2286 }
2287
2288
2289 pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &timer);
2290 if (timer < 0x60) {
2291 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x60);
2292 }
2293
2294 DevicePresent(dev, DE4X5_APROM);
2295
2296 if (!request_region (iobase, DE4X5_PCI_TOTAL_SIZE, "de4x5")) {
2297 error = -EBUSY;
2298 goto free_dev;
2299 }
2300
2301 dev->irq = irq;
2302
2303 if ((error = de4x5_hw_init(dev, iobase, &pdev->dev))) {
2304 goto release;
2305 }
2306
2307 return 0;
2308
2309 release:
2310 release_region (iobase, DE4X5_PCI_TOTAL_SIZE);
2311 free_dev:
2312 free_netdev (dev);
2313 disable_dev:
2314 pci_disable_device (pdev);
2315 return error;
2316}
2317
2318static void de4x5_pci_remove(struct pci_dev *pdev)
2319{
2320 struct net_device *dev;
2321 u_long iobase;
2322
2323 dev = pci_get_drvdata(pdev);
2324 iobase = dev->base_addr;
2325
2326 unregister_netdev (dev);
2327 free_netdev (dev);
2328 release_region (iobase, DE4X5_PCI_TOTAL_SIZE);
2329 pci_disable_device (pdev);
2330}
2331
2332static const struct pci_device_id de4x5_pci_tbl[] = {
2333 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP,
2334 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
2335 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_PLUS,
2336 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
2337 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_FAST,
2338 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
2339 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21142,
2340 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 },
2341 { },
2342};
2343
2344static struct pci_driver de4x5_pci_driver = {
2345 .name = "de4x5",
2346 .id_table = de4x5_pci_tbl,
2347 .probe = de4x5_pci_probe,
2348 .remove = de4x5_pci_remove,
2349};
2350
2351#endif
2352
2353
2354
2355
2356
2357
2358
2359
2360static int
2361autoconf_media(struct net_device *dev)
2362{
2363 struct de4x5_private *lp = netdev_priv(dev);
2364 u_long iobase = dev->base_addr;
2365
2366 disable_ast(dev);
2367
2368 lp->c_media = AUTO;
2369 inl(DE4X5_MFC);
2370 lp->media = INIT;
2371 lp->tcount = 0;
2372
2373 de4x5_ast(&lp->timer);
2374
2375 return lp->media;
2376}
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390static int
2391dc21040_autoconf(struct net_device *dev)
2392{
2393 struct de4x5_private *lp = netdev_priv(dev);
2394 u_long iobase = dev->base_addr;
2395 int next_tick = DE4X5_AUTOSENSE_MS;
2396 s32 imr;
2397
2398 switch (lp->media) {
2399 case INIT:
2400 DISABLE_IRQs;
2401 lp->tx_enable = false;
2402 lp->timeout = -1;
2403 de4x5_save_skbs(dev);
2404 if ((lp->autosense == AUTO) || (lp->autosense == TP)) {
2405 lp->media = TP;
2406 } else if ((lp->autosense == BNC) || (lp->autosense == AUI) || (lp->autosense == BNC_AUI)) {
2407 lp->media = BNC_AUI;
2408 } else if (lp->autosense == EXT_SIA) {
2409 lp->media = EXT_SIA;
2410 } else {
2411 lp->media = NC;
2412 }
2413 lp->local_state = 0;
2414 next_tick = dc21040_autoconf(dev);
2415 break;
2416
2417 case TP:
2418 next_tick = dc21040_state(dev, 0x8f01, 0xffff, 0x0000, 3000, BNC_AUI,
2419 TP_SUSPECT, test_tp);
2420 break;
2421
2422 case TP_SUSPECT:
2423 next_tick = de4x5_suspect_state(dev, 1000, TP, test_tp, dc21040_autoconf);
2424 break;
2425
2426 case BNC:
2427 case AUI:
2428 case BNC_AUI:
2429 next_tick = dc21040_state(dev, 0x8f09, 0x0705, 0x0006, 3000, EXT_SIA,
2430 BNC_AUI_SUSPECT, ping_media);
2431 break;
2432
2433 case BNC_AUI_SUSPECT:
2434 next_tick = de4x5_suspect_state(dev, 1000, BNC_AUI, ping_media, dc21040_autoconf);
2435 break;
2436
2437 case EXT_SIA:
2438 next_tick = dc21040_state(dev, 0x3041, 0x0000, 0x0006, 3000,
2439 NC, EXT_SIA_SUSPECT, ping_media);
2440 break;
2441
2442 case EXT_SIA_SUSPECT:
2443 next_tick = de4x5_suspect_state(dev, 1000, EXT_SIA, ping_media, dc21040_autoconf);
2444 break;
2445
2446 case NC:
2447
2448 reset_init_sia(dev, 0x8f01, 0xffff, 0x0000);
2449 if (lp->media != lp->c_media) {
2450 de4x5_dbg_media(dev);
2451 lp->c_media = lp->media;
2452 }
2453 lp->media = INIT;
2454 lp->tx_enable = false;
2455 break;
2456 }
2457
2458 return next_tick;
2459}
2460
2461static int
2462dc21040_state(struct net_device *dev, int csr13, int csr14, int csr15, int timeout,
2463 int next_state, int suspect_state,
2464 int (*fn)(struct net_device *, int))
2465{
2466 struct de4x5_private *lp = netdev_priv(dev);
2467 int next_tick = DE4X5_AUTOSENSE_MS;
2468 int linkBad;
2469
2470 switch (lp->local_state) {
2471 case 0:
2472 reset_init_sia(dev, csr13, csr14, csr15);
2473 lp->local_state++;
2474 next_tick = 500;
2475 break;
2476
2477 case 1:
2478 if (!lp->tx_enable) {
2479 linkBad = fn(dev, timeout);
2480 if (linkBad < 0) {
2481 next_tick = linkBad & ~TIMER_CB;
2482 } else {
2483 if (linkBad && (lp->autosense == AUTO)) {
2484 lp->local_state = 0;
2485 lp->media = next_state;
2486 } else {
2487 de4x5_init_connection(dev);
2488 }
2489 }
2490 } else if (!lp->linkOK && (lp->autosense == AUTO)) {
2491 lp->media = suspect_state;
2492 next_tick = 3000;
2493 }
2494 break;
2495 }
2496
2497 return next_tick;
2498}
2499
2500static int
2501de4x5_suspect_state(struct net_device *dev, int timeout, int prev_state,
2502 int (*fn)(struct net_device *, int),
2503 int (*asfn)(struct net_device *))
2504{
2505 struct de4x5_private *lp = netdev_priv(dev);
2506 int next_tick = DE4X5_AUTOSENSE_MS;
2507 int linkBad;
2508
2509 switch (lp->local_state) {
2510 case 1:
2511 if (lp->linkOK) {
2512 lp->media = prev_state;
2513 } else {
2514 lp->local_state++;
2515 next_tick = asfn(dev);
2516 }
2517 break;
2518
2519 case 2:
2520 linkBad = fn(dev, timeout);
2521 if (linkBad < 0) {
2522 next_tick = linkBad & ~TIMER_CB;
2523 } else if (!linkBad) {
2524 lp->local_state--;
2525 lp->media = prev_state;
2526 } else {
2527 lp->media = INIT;
2528 lp->tcount++;
2529 }
2530 }
2531
2532 return next_tick;
2533}
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544static int
2545dc21041_autoconf(struct net_device *dev)
2546{
2547 struct de4x5_private *lp = netdev_priv(dev);
2548 u_long iobase = dev->base_addr;
2549 s32 sts, irqs, irq_mask, imr, omr;
2550 int next_tick = DE4X5_AUTOSENSE_MS;
2551
2552 switch (lp->media) {
2553 case INIT:
2554 DISABLE_IRQs;
2555 lp->tx_enable = false;
2556 lp->timeout = -1;
2557 de4x5_save_skbs(dev);
2558 if ((lp->autosense == AUTO) || (lp->autosense == TP_NW)) {
2559 lp->media = TP;
2560 } else if (lp->autosense == TP) {
2561 lp->media = TP;
2562 } else if (lp->autosense == BNC) {
2563 lp->media = BNC;
2564 } else if (lp->autosense == AUI) {
2565 lp->media = AUI;
2566 } else {
2567 lp->media = NC;
2568 }
2569 lp->local_state = 0;
2570 next_tick = dc21041_autoconf(dev);
2571 break;
2572
2573 case TP_NW:
2574 if (lp->timeout < 0) {
2575 omr = inl(DE4X5_OMR);
2576 outl(omr | OMR_FDX, DE4X5_OMR);
2577 }
2578 irqs = STS_LNF | STS_LNP;
2579 irq_mask = IMR_LFM | IMR_LPM;
2580 sts = test_media(dev, irqs, irq_mask, 0xef01, 0xffff, 0x0008, 2400);
2581 if (sts < 0) {
2582 next_tick = sts & ~TIMER_CB;
2583 } else {
2584 if (sts & STS_LNP) {
2585 lp->media = ANS;
2586 } else {
2587 lp->media = AUI;
2588 }
2589 next_tick = dc21041_autoconf(dev);
2590 }
2591 break;
2592
2593 case ANS:
2594 if (!lp->tx_enable) {
2595 irqs = STS_LNP;
2596 irq_mask = IMR_LPM;
2597 sts = test_ans(dev, irqs, irq_mask, 3000);
2598 if (sts < 0) {
2599 next_tick = sts & ~TIMER_CB;
2600 } else {
2601 if (!(sts & STS_LNP) && (lp->autosense == AUTO)) {
2602 lp->media = TP;
2603 next_tick = dc21041_autoconf(dev);
2604 } else {
2605 lp->local_state = 1;
2606 de4x5_init_connection(dev);
2607 }
2608 }
2609 } else if (!lp->linkOK && (lp->autosense == AUTO)) {
2610 lp->media = ANS_SUSPECT;
2611 next_tick = 3000;
2612 }
2613 break;
2614
2615 case ANS_SUSPECT:
2616 next_tick = de4x5_suspect_state(dev, 1000, ANS, test_tp, dc21041_autoconf);
2617 break;
2618
2619 case TP:
2620 if (!lp->tx_enable) {
2621 if (lp->timeout < 0) {
2622 omr = inl(DE4X5_OMR);
2623 outl(omr & ~OMR_FDX, DE4X5_OMR);
2624 }
2625 irqs = STS_LNF | STS_LNP;
2626 irq_mask = IMR_LFM | IMR_LPM;
2627 sts = test_media(dev,irqs, irq_mask, 0xef01, 0xff3f, 0x0008, 2400);
2628 if (sts < 0) {
2629 next_tick = sts & ~TIMER_CB;
2630 } else {
2631 if (!(sts & STS_LNP) && (lp->autosense == AUTO)) {
2632 if (inl(DE4X5_SISR) & SISR_NRA) {
2633 lp->media = AUI;
2634 } else {
2635 lp->media = BNC;
2636 }
2637 next_tick = dc21041_autoconf(dev);
2638 } else {
2639 lp->local_state = 1;
2640 de4x5_init_connection(dev);
2641 }
2642 }
2643 } else if (!lp->linkOK && (lp->autosense == AUTO)) {
2644 lp->media = TP_SUSPECT;
2645 next_tick = 3000;
2646 }
2647 break;
2648
2649 case TP_SUSPECT:
2650 next_tick = de4x5_suspect_state(dev, 1000, TP, test_tp, dc21041_autoconf);
2651 break;
2652
2653 case AUI:
2654 if (!lp->tx_enable) {
2655 if (lp->timeout < 0) {
2656 omr = inl(DE4X5_OMR);
2657 outl(omr & ~OMR_FDX, DE4X5_OMR);
2658 }
2659 irqs = 0;
2660 irq_mask = 0;
2661 sts = test_media(dev,irqs, irq_mask, 0xef09, 0xf73d, 0x000e, 1000);
2662 if (sts < 0) {
2663 next_tick = sts & ~TIMER_CB;
2664 } else {
2665 if (!(inl(DE4X5_SISR) & SISR_SRA) && (lp->autosense == AUTO)) {
2666 lp->media = BNC;
2667 next_tick = dc21041_autoconf(dev);
2668 } else {
2669 lp->local_state = 1;
2670 de4x5_init_connection(dev);
2671 }
2672 }
2673 } else if (!lp->linkOK && (lp->autosense == AUTO)) {
2674 lp->media = AUI_SUSPECT;
2675 next_tick = 3000;
2676 }
2677 break;
2678
2679 case AUI_SUSPECT:
2680 next_tick = de4x5_suspect_state(dev, 1000, AUI, ping_media, dc21041_autoconf);
2681 break;
2682
2683 case BNC:
2684 switch (lp->local_state) {
2685 case 0:
2686 if (lp->timeout < 0) {
2687 omr = inl(DE4X5_OMR);
2688 outl(omr & ~OMR_FDX, DE4X5_OMR);
2689 }
2690 irqs = 0;
2691 irq_mask = 0;
2692 sts = test_media(dev,irqs, irq_mask, 0xef09, 0xf73d, 0x0006, 1000);
2693 if (sts < 0) {
2694 next_tick = sts & ~TIMER_CB;
2695 } else {
2696 lp->local_state++;
2697 next_tick = dc21041_autoconf(dev);
2698 }
2699 break;
2700
2701 case 1:
2702 if (!lp->tx_enable) {
2703 if ((sts = ping_media(dev, 3000)) < 0) {
2704 next_tick = sts & ~TIMER_CB;
2705 } else {
2706 if (sts) {
2707 lp->local_state = 0;
2708 lp->media = NC;
2709 } else {
2710 de4x5_init_connection(dev);
2711 }
2712 }
2713 } else if (!lp->linkOK && (lp->autosense == AUTO)) {
2714 lp->media = BNC_SUSPECT;
2715 next_tick = 3000;
2716 }
2717 break;
2718 }
2719 break;
2720
2721 case BNC_SUSPECT:
2722 next_tick = de4x5_suspect_state(dev, 1000, BNC, ping_media, dc21041_autoconf);
2723 break;
2724
2725 case NC:
2726 omr = inl(DE4X5_OMR);
2727 outl(omr | OMR_FDX, DE4X5_OMR);
2728 reset_init_sia(dev, 0xef01, 0xffff, 0x0008);
2729 if (lp->media != lp->c_media) {
2730 de4x5_dbg_media(dev);
2731 lp->c_media = lp->media;
2732 }
2733 lp->media = INIT;
2734 lp->tx_enable = false;
2735 break;
2736 }
2737
2738 return next_tick;
2739}
2740
2741
2742
2743
2744
2745
2746static int
2747dc21140m_autoconf(struct net_device *dev)
2748{
2749 struct de4x5_private *lp = netdev_priv(dev);
2750 int ana, anlpa, cap, cr, slnk, sr;
2751 int next_tick = DE4X5_AUTOSENSE_MS;
2752 u_long imr, omr, iobase = dev->base_addr;
2753
2754 switch(lp->media) {
2755 case INIT:
2756 if (lp->timeout < 0) {
2757 DISABLE_IRQs;
2758 lp->tx_enable = false;
2759 lp->linkOK = 0;
2760 de4x5_save_skbs(dev);
2761 }
2762 if ((next_tick = de4x5_reset_phy(dev)) < 0) {
2763 next_tick &= ~TIMER_CB;
2764 } else {
2765 if (lp->useSROM) {
2766 if (srom_map_media(dev) < 0) {
2767 lp->tcount++;
2768 return next_tick;
2769 }
2770 srom_exec(dev, lp->phy[lp->active].gep);
2771 if (lp->infoblock_media == ANS) {
2772 ana = lp->phy[lp->active].ana | MII_ANA_CSMA;
2773 mii_wr(ana, MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
2774 }
2775 } else {
2776 lp->tmp = MII_SR_ASSC;
2777 SET_10Mb;
2778 if (lp->autosense == _100Mb) {
2779 lp->media = _100Mb;
2780 } else if (lp->autosense == _10Mb) {
2781 lp->media = _10Mb;
2782 } else if ((lp->autosense == AUTO) &&
2783 ((sr=is_anc_capable(dev)) & MII_SR_ANC)) {
2784 ana = (((sr >> 6) & MII_ANA_TAF) | MII_ANA_CSMA);
2785 ana &= (lp->fdx ? ~0 : ~MII_ANA_FDAM);
2786 mii_wr(ana, MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
2787 lp->media = ANS;
2788 } else if (lp->autosense == AUTO) {
2789 lp->media = SPD_DET;
2790 } else if (is_spd_100(dev) && is_100_up(dev)) {
2791 lp->media = _100Mb;
2792 } else {
2793 lp->media = NC;
2794 }
2795 }
2796 lp->local_state = 0;
2797 next_tick = dc21140m_autoconf(dev);
2798 }
2799 break;
2800
2801 case ANS:
2802 switch (lp->local_state) {
2803 case 0:
2804 if (lp->timeout < 0) {
2805 mii_wr(MII_CR_ASSE | MII_CR_RAN, MII_CR, lp->phy[lp->active].addr, DE4X5_MII);
2806 }
2807 cr = test_mii_reg(dev, MII_CR, MII_CR_RAN, false, 500);
2808 if (cr < 0) {
2809 next_tick = cr & ~TIMER_CB;
2810 } else {
2811 if (cr) {
2812 lp->local_state = 0;
2813 lp->media = SPD_DET;
2814 } else {
2815 lp->local_state++;
2816 }
2817 next_tick = dc21140m_autoconf(dev);
2818 }
2819 break;
2820
2821 case 1:
2822 if ((sr=test_mii_reg(dev, MII_SR, MII_SR_ASSC, true, 2000)) < 0) {
2823 next_tick = sr & ~TIMER_CB;
2824 } else {
2825 lp->media = SPD_DET;
2826 lp->local_state = 0;
2827 if (sr) {
2828 lp->tmp = MII_SR_ASSC;
2829 anlpa = mii_rd(MII_ANLPA, lp->phy[lp->active].addr, DE4X5_MII);
2830 ana = mii_rd(MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
2831 if (!(anlpa & MII_ANLPA_RF) &&
2832 (cap = anlpa & MII_ANLPA_TAF & ana)) {
2833 if (cap & MII_ANA_100M) {
2834 lp->fdx = (ana & anlpa & MII_ANA_FDAM & MII_ANA_100M) != 0;
2835 lp->media = _100Mb;
2836 } else if (cap & MII_ANA_10M) {
2837 lp->fdx = (ana & anlpa & MII_ANA_FDAM & MII_ANA_10M) != 0;
2838
2839 lp->media = _10Mb;
2840 }
2841 }
2842 }
2843 next_tick = dc21140m_autoconf(dev);
2844 }
2845 break;
2846 }
2847 break;
2848
2849 case SPD_DET:
2850 if (lp->timeout < 0) {
2851 lp->tmp = (lp->phy[lp->active].id ? MII_SR_LKS :
2852 (~gep_rd(dev) & GEP_LNP));
2853 SET_100Mb_PDET;
2854 }
2855 if ((slnk = test_for_100Mb(dev, 6500)) < 0) {
2856 next_tick = slnk & ~TIMER_CB;
2857 } else {
2858 if (is_spd_100(dev) && is_100_up(dev)) {
2859 lp->media = _100Mb;
2860 } else if ((!is_spd_100(dev) && (is_10_up(dev) & lp->tmp))) {
2861 lp->media = _10Mb;
2862 } else {
2863 lp->media = NC;
2864 }
2865 next_tick = dc21140m_autoconf(dev);
2866 }
2867 break;
2868
2869 case _100Mb:
2870 next_tick = 3000;
2871 if (!lp->tx_enable) {
2872 SET_100Mb;
2873 de4x5_init_connection(dev);
2874 } else {
2875 if (!lp->linkOK && (lp->autosense == AUTO)) {
2876 if (!is_100_up(dev) || (!lp->useSROM && !is_spd_100(dev))) {
2877 lp->media = INIT;
2878 lp->tcount++;
2879 next_tick = DE4X5_AUTOSENSE_MS;
2880 }
2881 }
2882 }
2883 break;
2884
2885 case BNC:
2886 case AUI:
2887 case _10Mb:
2888 next_tick = 3000;
2889 if (!lp->tx_enable) {
2890 SET_10Mb;
2891 de4x5_init_connection(dev);
2892 } else {
2893 if (!lp->linkOK && (lp->autosense == AUTO)) {
2894 if (!is_10_up(dev) || (!lp->useSROM && is_spd_100(dev))) {
2895 lp->media = INIT;
2896 lp->tcount++;
2897 next_tick = DE4X5_AUTOSENSE_MS;
2898 }
2899 }
2900 }
2901 break;
2902
2903 case NC:
2904 if (lp->media != lp->c_media) {
2905 de4x5_dbg_media(dev);
2906 lp->c_media = lp->media;
2907 }
2908 lp->media = INIT;
2909 lp->tx_enable = false;
2910 break;
2911 }
2912
2913 return next_tick;
2914}
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930static int
2931dc2114x_autoconf(struct net_device *dev)
2932{
2933 struct de4x5_private *lp = netdev_priv(dev);
2934 u_long iobase = dev->base_addr;
2935 s32 cr, anlpa, ana, cap, irqs, irq_mask, imr, omr, slnk, sr, sts;
2936 int next_tick = DE4X5_AUTOSENSE_MS;
2937
2938 switch (lp->media) {
2939 case INIT:
2940 if (lp->timeout < 0) {
2941 DISABLE_IRQs;
2942 lp->tx_enable = false;
2943 lp->linkOK = 0;
2944 lp->timeout = -1;
2945 de4x5_save_skbs(dev);
2946 if (lp->params.autosense & ~AUTO) {
2947 srom_map_media(dev);
2948 if (lp->media != lp->params.autosense) {
2949 lp->tcount++;
2950 lp->media = INIT;
2951 return next_tick;
2952 }
2953 lp->media = INIT;
2954 }
2955 }
2956 if ((next_tick = de4x5_reset_phy(dev)) < 0) {
2957 next_tick &= ~TIMER_CB;
2958 } else {
2959 if (lp->autosense == _100Mb) {
2960 lp->media = _100Mb;
2961 } else if (lp->autosense == _10Mb) {
2962 lp->media = _10Mb;
2963 } else if (lp->autosense == TP) {
2964 lp->media = TP;
2965 } else if (lp->autosense == BNC) {
2966 lp->media = BNC;
2967 } else if (lp->autosense == AUI) {
2968 lp->media = AUI;
2969 } else {
2970 lp->media = SPD_DET;
2971 if ((lp->infoblock_media == ANS) &&
2972 ((sr=is_anc_capable(dev)) & MII_SR_ANC)) {
2973 ana = (((sr >> 6) & MII_ANA_TAF) | MII_ANA_CSMA);
2974 ana &= (lp->fdx ? ~0 : ~MII_ANA_FDAM);
2975 mii_wr(ana, MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
2976 lp->media = ANS;
2977 }
2978 }
2979 lp->local_state = 0;
2980 next_tick = dc2114x_autoconf(dev);
2981 }
2982 break;
2983
2984 case ANS:
2985 switch (lp->local_state) {
2986 case 0:
2987 if (lp->timeout < 0) {
2988 mii_wr(MII_CR_ASSE | MII_CR_RAN, MII_CR, lp->phy[lp->active].addr, DE4X5_MII);
2989 }
2990 cr = test_mii_reg(dev, MII_CR, MII_CR_RAN, false, 500);
2991 if (cr < 0) {
2992 next_tick = cr & ~TIMER_CB;
2993 } else {
2994 if (cr) {
2995 lp->local_state = 0;
2996 lp->media = SPD_DET;
2997 } else {
2998 lp->local_state++;
2999 }
3000 next_tick = dc2114x_autoconf(dev);
3001 }
3002 break;
3003
3004 case 1:
3005 sr = test_mii_reg(dev, MII_SR, MII_SR_ASSC, true, 2000);
3006 if (sr < 0) {
3007 next_tick = sr & ~TIMER_CB;
3008 } else {
3009 lp->media = SPD_DET;
3010 lp->local_state = 0;
3011 if (sr) {
3012 lp->tmp = MII_SR_ASSC;
3013 anlpa = mii_rd(MII_ANLPA, lp->phy[lp->active].addr, DE4X5_MII);
3014 ana = mii_rd(MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
3015 if (!(anlpa & MII_ANLPA_RF) &&
3016 (cap = anlpa & MII_ANLPA_TAF & ana)) {
3017 if (cap & MII_ANA_100M) {
3018 lp->fdx = (ana & anlpa & MII_ANA_FDAM & MII_ANA_100M) != 0;
3019 lp->media = _100Mb;
3020 } else if (cap & MII_ANA_10M) {
3021 lp->fdx = (ana & anlpa & MII_ANA_FDAM & MII_ANA_10M) != 0;
3022 lp->media = _10Mb;
3023 }
3024 }
3025 }
3026 next_tick = dc2114x_autoconf(dev);
3027 }
3028 break;
3029 }
3030 break;
3031
3032 case AUI:
3033 if (!lp->tx_enable) {
3034 if (lp->timeout < 0) {
3035 omr = inl(DE4X5_OMR);
3036 outl(omr & ~OMR_FDX, DE4X5_OMR);
3037 }
3038 irqs = 0;
3039 irq_mask = 0;
3040 sts = test_media(dev,irqs, irq_mask, 0, 0, 0, 1000);
3041 if (sts < 0) {
3042 next_tick = sts & ~TIMER_CB;
3043 } else {
3044 if (!(inl(DE4X5_SISR) & SISR_SRA) && (lp->autosense == AUTO)) {
3045 lp->media = BNC;
3046 next_tick = dc2114x_autoconf(dev);
3047 } else {
3048 lp->local_state = 1;
3049 de4x5_init_connection(dev);
3050 }
3051 }
3052 } else if (!lp->linkOK && (lp->autosense == AUTO)) {
3053 lp->media = AUI_SUSPECT;
3054 next_tick = 3000;
3055 }
3056 break;
3057
3058 case AUI_SUSPECT:
3059 next_tick = de4x5_suspect_state(dev, 1000, AUI, ping_media, dc2114x_autoconf);
3060 break;
3061
3062 case BNC:
3063 switch (lp->local_state) {
3064 case 0:
3065 if (lp->timeout < 0) {
3066 omr = inl(DE4X5_OMR);
3067 outl(omr & ~OMR_FDX, DE4X5_OMR);
3068 }
3069 irqs = 0;
3070 irq_mask = 0;
3071 sts = test_media(dev,irqs, irq_mask, 0, 0, 0, 1000);
3072 if (sts < 0) {
3073 next_tick = sts & ~TIMER_CB;
3074 } else {
3075 lp->local_state++;
3076 next_tick = dc2114x_autoconf(dev);
3077 }
3078 break;
3079
3080 case 1:
3081 if (!lp->tx_enable) {
3082 if ((sts = ping_media(dev, 3000)) < 0) {
3083 next_tick = sts & ~TIMER_CB;
3084 } else {
3085 if (sts) {
3086 lp->local_state = 0;
3087 lp->tcount++;
3088 lp->media = INIT;
3089 } else {
3090 de4x5_init_connection(dev);
3091 }
3092 }
3093 } else if (!lp->linkOK && (lp->autosense == AUTO)) {
3094 lp->media = BNC_SUSPECT;
3095 next_tick = 3000;
3096 }
3097 break;
3098 }
3099 break;
3100
3101 case BNC_SUSPECT:
3102 next_tick = de4x5_suspect_state(dev, 1000, BNC, ping_media, dc2114x_autoconf);
3103 break;
3104
3105 case SPD_DET:
3106 if (srom_map_media(dev) < 0) {
3107 lp->tcount++;
3108 lp->media = INIT;
3109 return next_tick;
3110 }
3111 if (lp->media == _100Mb) {
3112 if ((slnk = test_for_100Mb(dev, 6500)) < 0) {
3113 lp->media = SPD_DET;
3114 return slnk & ~TIMER_CB;
3115 }
3116 } else {
3117 if (wait_for_link(dev) < 0) {
3118 lp->media = SPD_DET;
3119 return PDET_LINK_WAIT;
3120 }
3121 }
3122 if (lp->media == ANS) {
3123 if (is_spd_100(dev)) {
3124 lp->media = _100Mb;
3125 } else {
3126 lp->media = _10Mb;
3127 }
3128 next_tick = dc2114x_autoconf(dev);
3129 } else if (((lp->media == _100Mb) && is_100_up(dev)) ||
3130 (((lp->media == _10Mb) || (lp->media == TP) ||
3131 (lp->media == BNC) || (lp->media == AUI)) &&
3132 is_10_up(dev))) {
3133 next_tick = dc2114x_autoconf(dev);
3134 } else {
3135 lp->tcount++;
3136 lp->media = INIT;
3137 }
3138 break;
3139
3140 case _10Mb:
3141 next_tick = 3000;
3142 if (!lp->tx_enable) {
3143 SET_10Mb;
3144 de4x5_init_connection(dev);
3145 } else {
3146 if (!lp->linkOK && (lp->autosense == AUTO)) {
3147 if (!is_10_up(dev) || (!lp->useSROM && is_spd_100(dev))) {
3148 lp->media = INIT;
3149 lp->tcount++;
3150 next_tick = DE4X5_AUTOSENSE_MS;
3151 }
3152 }
3153 }
3154 break;
3155
3156 case _100Mb:
3157 next_tick = 3000;
3158 if (!lp->tx_enable) {
3159 SET_100Mb;
3160 de4x5_init_connection(dev);
3161 } else {
3162 if (!lp->linkOK && (lp->autosense == AUTO)) {
3163 if (!is_100_up(dev) || (!lp->useSROM && !is_spd_100(dev))) {
3164 lp->media = INIT;
3165 lp->tcount++;
3166 next_tick = DE4X5_AUTOSENSE_MS;
3167 }
3168 }
3169 }
3170 break;
3171
3172 default:
3173 lp->tcount++;
3174printk("Huh?: media:%02x\n", lp->media);
3175 lp->media = INIT;
3176 break;
3177 }
3178
3179 return next_tick;
3180}
3181
3182static int
3183srom_autoconf(struct net_device *dev)
3184{
3185 struct de4x5_private *lp = netdev_priv(dev);
3186
3187 return lp->infoleaf_fn(dev);
3188}
3189
3190
3191
3192
3193
3194
3195static int
3196srom_map_media(struct net_device *dev)
3197{
3198 struct de4x5_private *lp = netdev_priv(dev);
3199
3200 lp->fdx = false;
3201 if (lp->infoblock_media == lp->media)
3202 return 0;
3203
3204 switch(lp->infoblock_media) {
3205 case SROM_10BASETF:
3206 if (!lp->params.fdx) return -1;
3207 lp->fdx = true;
3208 fallthrough;
3209
3210 case SROM_10BASET:
3211 if (lp->params.fdx && !lp->fdx) return -1;
3212 if ((lp->chipset == DC21140) || ((lp->chipset & ~0x00ff) == DC2114x)) {
3213 lp->media = _10Mb;
3214 } else {
3215 lp->media = TP;
3216 }
3217 break;
3218
3219 case SROM_10BASE2:
3220 lp->media = BNC;
3221 break;
3222
3223 case SROM_10BASE5:
3224 lp->media = AUI;
3225 break;
3226
3227 case SROM_100BASETF:
3228 if (!lp->params.fdx) return -1;
3229 lp->fdx = true;
3230 fallthrough;
3231
3232 case SROM_100BASET:
3233 if (lp->params.fdx && !lp->fdx) return -1;
3234 lp->media = _100Mb;
3235 break;
3236
3237 case SROM_100BASET4:
3238 lp->media = _100Mb;
3239 break;
3240
3241 case SROM_100BASEFF:
3242 if (!lp->params.fdx) return -1;
3243 lp->fdx = true;
3244 fallthrough;
3245
3246 case SROM_100BASEF:
3247 if (lp->params.fdx && !lp->fdx) return -1;
3248 lp->media = _100Mb;
3249 break;
3250
3251 case ANS:
3252 lp->media = ANS;
3253 lp->fdx = lp->params.fdx;
3254 break;
3255
3256 default:
3257 printk("%s: Bad media code [%d] detected in SROM!\n", dev->name,
3258 lp->infoblock_media);
3259 return -1;
3260 }
3261
3262 return 0;
3263}
3264
3265static void
3266de4x5_init_connection(struct net_device *dev)
3267{
3268 struct de4x5_private *lp = netdev_priv(dev);
3269 u_long iobase = dev->base_addr;
3270 u_long flags = 0;
3271
3272 if (lp->media != lp->c_media) {
3273 de4x5_dbg_media(dev);
3274 lp->c_media = lp->media;
3275 }
3276
3277 spin_lock_irqsave(&lp->lock, flags);
3278 de4x5_rst_desc_ring(dev);
3279 de4x5_setup_intr(dev);
3280 lp->tx_enable = true;
3281 spin_unlock_irqrestore(&lp->lock, flags);
3282 outl(POLL_DEMAND, DE4X5_TPD);
3283
3284 netif_wake_queue(dev);
3285}
3286
3287
3288
3289
3290
3291
3292static int
3293de4x5_reset_phy(struct net_device *dev)
3294{
3295 struct de4x5_private *lp = netdev_priv(dev);
3296 u_long iobase = dev->base_addr;
3297 int next_tick = 0;
3298
3299 if ((lp->useSROM) || (lp->phy[lp->active].id)) {
3300 if (lp->timeout < 0) {
3301 if (lp->useSROM) {
3302 if (lp->phy[lp->active].rst) {
3303 srom_exec(dev, lp->phy[lp->active].rst);
3304 srom_exec(dev, lp->phy[lp->active].rst);
3305 } else if (lp->rst) {
3306 srom_exec(dev, lp->rst);
3307 srom_exec(dev, lp->rst);
3308 }
3309 } else {
3310 PHY_HARD_RESET;
3311 }
3312 if (lp->useMII) {
3313 mii_wr(MII_CR_RST, MII_CR, lp->phy[lp->active].addr, DE4X5_MII);
3314 }
3315 }
3316 if (lp->useMII) {
3317 next_tick = test_mii_reg(dev, MII_CR, MII_CR_RST, false, 500);
3318 }
3319 } else if (lp->chipset == DC21140) {
3320 PHY_HARD_RESET;
3321 }
3322
3323 return next_tick;
3324}
3325
3326static int
3327test_media(struct net_device *dev, s32 irqs, s32 irq_mask, s32 csr13, s32 csr14, s32 csr15, s32 msec)
3328{
3329 struct de4x5_private *lp = netdev_priv(dev);
3330 u_long iobase = dev->base_addr;
3331 s32 sts, csr12;
3332
3333 if (lp->timeout < 0) {
3334 lp->timeout = msec/100;
3335 if (!lp->useSROM) {
3336 reset_init_sia(dev, csr13, csr14, csr15);
3337 }
3338
3339
3340 outl(irq_mask, DE4X5_IMR);
3341
3342
3343 sts = inl(DE4X5_STS);
3344 outl(sts, DE4X5_STS);
3345
3346
3347 if ((lp->chipset == DC21041) || lp->useSROM) {
3348 csr12 = inl(DE4X5_SISR);
3349 outl(csr12, DE4X5_SISR);
3350 }
3351 }
3352
3353 sts = inl(DE4X5_STS) & ~TIMER_CB;
3354
3355 if (!(sts & irqs) && --lp->timeout) {
3356 sts = 100 | TIMER_CB;
3357 } else {
3358 lp->timeout = -1;
3359 }
3360
3361 return sts;
3362}
3363
3364static int
3365test_tp(struct net_device *dev, s32 msec)
3366{
3367 struct de4x5_private *lp = netdev_priv(dev);
3368 u_long iobase = dev->base_addr;
3369 int sisr;
3370
3371 if (lp->timeout < 0) {
3372 lp->timeout = msec/100;
3373 }
3374
3375 sisr = (inl(DE4X5_SISR) & ~TIMER_CB) & (SISR_LKF | SISR_NCR);
3376
3377 if (sisr && --lp->timeout) {
3378 sisr = 100 | TIMER_CB;
3379 } else {
3380 lp->timeout = -1;
3381 }
3382
3383 return sisr;
3384}
3385
3386
3387
3388
3389
3390
3391#define SAMPLE_INTERVAL 500
3392#define SAMPLE_DELAY 2000
3393static int
3394test_for_100Mb(struct net_device *dev, int msec)
3395{
3396 struct de4x5_private *lp = netdev_priv(dev);
3397 int gep = 0, ret = ((lp->chipset & ~0x00ff)==DC2114x? -1 :GEP_SLNK);
3398
3399 if (lp->timeout < 0) {
3400 if ((msec/SAMPLE_INTERVAL) <= 0) return 0;
3401 if (msec > SAMPLE_DELAY) {
3402 lp->timeout = (msec - SAMPLE_DELAY)/SAMPLE_INTERVAL;
3403 gep = SAMPLE_DELAY | TIMER_CB;
3404 return gep;
3405 } else {
3406 lp->timeout = msec/SAMPLE_INTERVAL;
3407 }
3408 }
3409
3410 if (lp->phy[lp->active].id || lp->useSROM) {
3411 gep = is_100_up(dev) | is_spd_100(dev);
3412 } else {
3413 gep = (~gep_rd(dev) & (GEP_SLNK | GEP_LNP));
3414 }
3415 if (!(gep & ret) && --lp->timeout) {
3416 gep = SAMPLE_INTERVAL | TIMER_CB;
3417 } else {
3418 lp->timeout = -1;
3419 }
3420
3421 return gep;
3422}
3423
3424static int
3425wait_for_link(struct net_device *dev)
3426{
3427 struct de4x5_private *lp = netdev_priv(dev);
3428
3429 if (lp->timeout < 0) {
3430 lp->timeout = 1;
3431 }
3432
3433 if (lp->timeout--) {
3434 return TIMER_CB;
3435 } else {
3436 lp->timeout = -1;
3437 }
3438
3439 return 0;
3440}
3441
3442
3443
3444
3445
3446static int
3447test_mii_reg(struct net_device *dev, int reg, int mask, bool pol, long msec)
3448{
3449 struct de4x5_private *lp = netdev_priv(dev);
3450 int test;
3451 u_long iobase = dev->base_addr;
3452
3453 if (lp->timeout < 0) {
3454 lp->timeout = msec/100;
3455 }
3456
3457 reg = mii_rd((u_char)reg, lp->phy[lp->active].addr, DE4X5_MII) & mask;
3458 test = (reg ^ (pol ? ~0 : 0)) & mask;
3459
3460 if (test && --lp->timeout) {
3461 reg = 100 | TIMER_CB;
3462 } else {
3463 lp->timeout = -1;
3464 }
3465
3466 return reg;
3467}
3468
3469static int
3470is_spd_100(struct net_device *dev)
3471{
3472 struct de4x5_private *lp = netdev_priv(dev);
3473 u_long iobase = dev->base_addr;
3474 int spd;
3475
3476 if (lp->useMII) {
3477 spd = mii_rd(lp->phy[lp->active].spd.reg, lp->phy[lp->active].addr, DE4X5_MII);
3478 spd = ~(spd ^ lp->phy[lp->active].spd.value);
3479 spd &= lp->phy[lp->active].spd.mask;
3480 } else if (!lp->useSROM) {
3481 spd = ((~gep_rd(dev)) & GEP_SLNK);
3482 } else {
3483 if ((lp->ibn == 2) || !lp->asBitValid)
3484 return (lp->chipset == DC21143) ? (~inl(DE4X5_SISR)&SISR_LS100) : 0;
3485
3486 spd = (lp->asBitValid & (lp->asPolarity ^ (gep_rd(dev) & lp->asBit))) |
3487 (lp->linkOK & ~lp->asBitValid);
3488 }
3489
3490 return spd;
3491}
3492
3493static int
3494is_100_up(struct net_device *dev)
3495{
3496 struct de4x5_private *lp = netdev_priv(dev);
3497 u_long iobase = dev->base_addr;
3498
3499 if (lp->useMII) {
3500
3501 mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);
3502 return mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII) & MII_SR_LKS;
3503 } else if (!lp->useSROM) {
3504 return (~gep_rd(dev)) & GEP_SLNK;
3505 } else {
3506 if ((lp->ibn == 2) || !lp->asBitValid)
3507 return (lp->chipset == DC21143) ? (~inl(DE4X5_SISR)&SISR_LS100) : 0;
3508
3509 return (lp->asBitValid&(lp->asPolarity^(gep_rd(dev)&lp->asBit))) |
3510 (lp->linkOK & ~lp->asBitValid);
3511 }
3512}
3513
3514static int
3515is_10_up(struct net_device *dev)
3516{
3517 struct de4x5_private *lp = netdev_priv(dev);
3518 u_long iobase = dev->base_addr;
3519
3520 if (lp->useMII) {
3521
3522 mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);
3523 return mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII) & MII_SR_LKS;
3524 } else if (!lp->useSROM) {
3525 return (~gep_rd(dev)) & GEP_LNP;
3526 } else {
3527 if ((lp->ibn == 2) || !lp->asBitValid)
3528 return ((lp->chipset & ~0x00ff) == DC2114x) ?
3529 (~inl(DE4X5_SISR)&SISR_LS10):
3530 0;
3531
3532 return (lp->asBitValid&(lp->asPolarity^(gep_rd(dev)&lp->asBit))) |
3533 (lp->linkOK & ~lp->asBitValid);
3534 }
3535}
3536
3537static int
3538is_anc_capable(struct net_device *dev)
3539{
3540 struct de4x5_private *lp = netdev_priv(dev);
3541 u_long iobase = dev->base_addr;
3542
3543 if (lp->phy[lp->active].id && (!lp->useSROM || lp->useMII)) {
3544 return mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);
3545 } else if ((lp->chipset & ~0x00ff) == DC2114x) {
3546 return (inl(DE4X5_SISR) & SISR_LPN) >> 12;
3547 } else {
3548 return 0;
3549 }
3550}
3551
3552
3553
3554
3555
3556static int
3557ping_media(struct net_device *dev, int msec)
3558{
3559 struct de4x5_private *lp = netdev_priv(dev);
3560 u_long iobase = dev->base_addr;
3561 int sisr;
3562
3563 if (lp->timeout < 0) {
3564 lp->timeout = msec/100;
3565
3566 lp->tmp = lp->tx_new;
3567 load_packet(dev, lp->frame, TD_LS | TD_FS | sizeof(lp->frame), (struct sk_buff *)1);
3568 lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
3569 outl(POLL_DEMAND, DE4X5_TPD);
3570 }
3571
3572 sisr = inl(DE4X5_SISR);
3573
3574 if ((!(sisr & SISR_NCR)) &&
3575 ((s32)le32_to_cpu(lp->tx_ring[lp->tmp].status) < 0) &&
3576 (--lp->timeout)) {
3577 sisr = 100 | TIMER_CB;
3578 } else {
3579 if ((!(sisr & SISR_NCR)) &&
3580 !(le32_to_cpu(lp->tx_ring[lp->tmp].status) & (T_OWN | TD_ES)) &&
3581 lp->timeout) {
3582 sisr = 0;
3583 } else {
3584 sisr = 1;
3585 }
3586 lp->timeout = -1;
3587 }
3588
3589 return sisr;
3590}
3591
3592
3593
3594
3595
3596
3597static struct sk_buff *
3598de4x5_alloc_rx_buff(struct net_device *dev, int index, int len)
3599{
3600 struct de4x5_private *lp = netdev_priv(dev);
3601 struct sk_buff *p;
3602
3603#if !defined(__alpha__) && !defined(__powerpc__) && !defined(CONFIG_SPARC) && !defined(DE4X5_DO_MEMCPY)
3604 struct sk_buff *ret;
3605 u_long i=0, tmp;
3606
3607 p = netdev_alloc_skb(dev, IEEE802_3_SZ + DE4X5_ALIGN + 2);
3608 if (!p) return NULL;
3609
3610 tmp = virt_to_bus(p->data);
3611 i = ((tmp + DE4X5_ALIGN) & ~DE4X5_ALIGN) - tmp;
3612 skb_reserve(p, i);
3613 lp->rx_ring[index].buf = cpu_to_le32(tmp + i);
3614
3615 ret = lp->rx_skb[index];
3616 lp->rx_skb[index] = p;
3617
3618 if ((u_long) ret > 1) {
3619 skb_put(ret, len);
3620 }
3621
3622 return ret;
3623
3624#else
3625 if (lp->state != OPEN) return (struct sk_buff *)1;
3626
3627 p = netdev_alloc_skb(dev, len + 2);
3628 if (!p) return NULL;
3629
3630 skb_reserve(p, 2);
3631 if (index < lp->rx_old) {
3632 short tlen = (lp->rxRingSize - lp->rx_old) * RX_BUFF_SZ;
3633 skb_put_data(p, lp->rx_bufs + lp->rx_old * RX_BUFF_SZ, tlen);
3634 skb_put_data(p, lp->rx_bufs, len - tlen);
3635 } else {
3636 skb_put_data(p, lp->rx_bufs + lp->rx_old * RX_BUFF_SZ, len);
3637 }
3638
3639 return p;
3640#endif
3641}
3642
3643static void
3644de4x5_free_rx_buffs(struct net_device *dev)
3645{
3646 struct de4x5_private *lp = netdev_priv(dev);
3647 int i;
3648
3649 for (i=0; i<lp->rxRingSize; i++) {
3650 if ((u_long) lp->rx_skb[i] > 1) {
3651 dev_kfree_skb(lp->rx_skb[i]);
3652 }
3653 lp->rx_ring[i].status = 0;
3654 lp->rx_skb[i] = (struct sk_buff *)1;
3655 }
3656}
3657
3658static void
3659de4x5_free_tx_buffs(struct net_device *dev)
3660{
3661 struct de4x5_private *lp = netdev_priv(dev);
3662 int i;
3663
3664 for (i=0; i<lp->txRingSize; i++) {
3665 if (lp->tx_skb[i])
3666 de4x5_free_tx_buff(lp, i);
3667 lp->tx_ring[i].status = 0;
3668 }
3669
3670
3671 __skb_queue_purge(&lp->cache.queue);
3672}
3673
3674
3675
3676
3677
3678
3679
3680
3681static void
3682de4x5_save_skbs(struct net_device *dev)
3683{
3684 struct de4x5_private *lp = netdev_priv(dev);
3685 u_long iobase = dev->base_addr;
3686 s32 omr;
3687
3688 if (!lp->cache.save_cnt) {
3689 STOP_DE4X5;
3690 de4x5_tx(dev);
3691 de4x5_free_tx_buffs(dev);
3692 de4x5_cache_state(dev, DE4X5_SAVE_STATE);
3693 de4x5_sw_reset(dev);
3694 de4x5_cache_state(dev, DE4X5_RESTORE_STATE);
3695 lp->cache.save_cnt++;
3696 START_DE4X5;
3697 }
3698}
3699
3700static void
3701de4x5_rst_desc_ring(struct net_device *dev)
3702{
3703 struct de4x5_private *lp = netdev_priv(dev);
3704 u_long iobase = dev->base_addr;
3705 int i;
3706 s32 omr;
3707
3708 if (lp->cache.save_cnt) {
3709 STOP_DE4X5;
3710 outl(lp->dma_rings, DE4X5_RRBA);
3711 outl(lp->dma_rings + NUM_RX_DESC * sizeof(struct de4x5_desc),
3712 DE4X5_TRBA);
3713
3714 lp->rx_new = lp->rx_old = 0;
3715 lp->tx_new = lp->tx_old = 0;
3716
3717 for (i = 0; i < lp->rxRingSize; i++) {
3718 lp->rx_ring[i].status = cpu_to_le32(R_OWN);
3719 }
3720
3721 for (i = 0; i < lp->txRingSize; i++) {
3722 lp->tx_ring[i].status = cpu_to_le32(0);
3723 }
3724
3725 barrier();
3726 lp->cache.save_cnt--;
3727 START_DE4X5;
3728 }
3729}
3730
3731static void
3732de4x5_cache_state(struct net_device *dev, int flag)
3733{
3734 struct de4x5_private *lp = netdev_priv(dev);
3735 u_long iobase = dev->base_addr;
3736
3737 switch(flag) {
3738 case DE4X5_SAVE_STATE:
3739 lp->cache.csr0 = inl(DE4X5_BMR);
3740 lp->cache.csr6 = (inl(DE4X5_OMR) & ~(OMR_ST | OMR_SR));
3741 lp->cache.csr7 = inl(DE4X5_IMR);
3742 break;
3743
3744 case DE4X5_RESTORE_STATE:
3745 outl(lp->cache.csr0, DE4X5_BMR);
3746 outl(lp->cache.csr6, DE4X5_OMR);
3747 outl(lp->cache.csr7, DE4X5_IMR);
3748 if (lp->chipset == DC21140) {
3749 gep_wr(lp->cache.gepc, dev);
3750 gep_wr(lp->cache.gep, dev);
3751 } else {
3752 reset_init_sia(dev, lp->cache.csr13, lp->cache.csr14,
3753 lp->cache.csr15);
3754 }
3755 break;
3756 }
3757}
3758
3759static void
3760de4x5_put_cache(struct net_device *dev, struct sk_buff *skb)
3761{
3762 struct de4x5_private *lp = netdev_priv(dev);
3763
3764 __skb_queue_tail(&lp->cache.queue, skb);
3765}
3766
3767static void
3768de4x5_putb_cache(struct net_device *dev, struct sk_buff *skb)
3769{
3770 struct de4x5_private *lp = netdev_priv(dev);
3771
3772 __skb_queue_head(&lp->cache.queue, skb);
3773}
3774
3775static struct sk_buff *
3776de4x5_get_cache(struct net_device *dev)
3777{
3778 struct de4x5_private *lp = netdev_priv(dev);
3779
3780 return __skb_dequeue(&lp->cache.queue);
3781}
3782
3783
3784
3785
3786
3787static int
3788test_ans(struct net_device *dev, s32 irqs, s32 irq_mask, s32 msec)
3789{
3790 struct de4x5_private *lp = netdev_priv(dev);
3791 u_long iobase = dev->base_addr;
3792 s32 sts, ans;
3793
3794 if (lp->timeout < 0) {
3795 lp->timeout = msec/100;
3796 outl(irq_mask, DE4X5_IMR);
3797
3798
3799 sts = inl(DE4X5_STS);
3800 outl(sts, DE4X5_STS);
3801 }
3802
3803 ans = inl(DE4X5_SISR) & SISR_ANS;
3804 sts = inl(DE4X5_STS) & ~TIMER_CB;
3805
3806 if (!(sts & irqs) && (ans ^ ANS_NWOK) && --lp->timeout) {
3807 sts = 100 | TIMER_CB;
3808 } else {
3809 lp->timeout = -1;
3810 }
3811
3812 return sts;
3813}
3814
3815static void
3816de4x5_setup_intr(struct net_device *dev)
3817{
3818 struct de4x5_private *lp = netdev_priv(dev);
3819 u_long iobase = dev->base_addr;
3820 s32 imr, sts;
3821
3822 if (inl(DE4X5_OMR) & OMR_SR) {
3823 imr = 0;
3824 UNMASK_IRQs;
3825 sts = inl(DE4X5_STS);
3826 outl(sts, DE4X5_STS);
3827 ENABLE_IRQs;
3828 }
3829}
3830
3831
3832
3833
3834static void
3835reset_init_sia(struct net_device *dev, s32 csr13, s32 csr14, s32 csr15)
3836{
3837 struct de4x5_private *lp = netdev_priv(dev);
3838 u_long iobase = dev->base_addr;
3839
3840 RESET_SIA;
3841 if (lp->useSROM) {
3842 if (lp->ibn == 3) {
3843 srom_exec(dev, lp->phy[lp->active].rst);
3844 srom_exec(dev, lp->phy[lp->active].gep);
3845 outl(1, DE4X5_SICR);
3846 return;
3847 } else {
3848 csr15 = lp->cache.csr15;
3849 csr14 = lp->cache.csr14;
3850 csr13 = lp->cache.csr13;
3851 outl(csr15 | lp->cache.gepc, DE4X5_SIGR);
3852 outl(csr15 | lp->cache.gep, DE4X5_SIGR);
3853 }
3854 } else {
3855 outl(csr15, DE4X5_SIGR);
3856 }
3857 outl(csr14, DE4X5_STRR);
3858 outl(csr13, DE4X5_SICR);
3859
3860 mdelay(10);
3861}
3862
3863
3864
3865
3866static void
3867create_packet(struct net_device *dev, char *frame, int len)
3868{
3869 int i;
3870 char *buf = frame;
3871
3872 for (i=0; i<ETH_ALEN; i++) {
3873 *buf++ = dev->dev_addr[i];
3874 }
3875 for (i=0; i<ETH_ALEN; i++) {
3876 *buf++ = dev->dev_addr[i];
3877 }
3878
3879 *buf++ = 0;
3880 *buf++ = 1;
3881}
3882
3883
3884
3885
3886static int
3887EISA_signature(char *name, struct device *device)
3888{
3889 int i, status = 0, siglen = ARRAY_SIZE(de4x5_signatures);
3890 struct eisa_device *edev;
3891
3892 *name = '\0';
3893 edev = to_eisa_device (device);
3894 i = edev->id.driver_data;
3895
3896 if (i >= 0 && i < siglen) {
3897 strcpy (name, de4x5_signatures[i]);
3898 status = 1;
3899 }
3900
3901 return status;
3902}
3903
3904
3905
3906
3907static void
3908PCI_signature(char *name, struct de4x5_private *lp)
3909{
3910 int i, siglen = ARRAY_SIZE(de4x5_signatures);
3911
3912 if (lp->chipset == DC21040) {
3913 strcpy(name, "DE434/5");
3914 return;
3915 } else {
3916 int tmp = *((char *)&lp->srom + 19) * 3;
3917 strncpy(name, (char *)&lp->srom + 26 + tmp, 8);
3918 }
3919 name[8] = '\0';
3920 for (i=0; i<siglen; i++) {
3921 if (strstr(name,de4x5_signatures[i])!=NULL) break;
3922 }
3923 if (i == siglen) {
3924 if (dec_only) {
3925 *name = '\0';
3926 } else {
3927 strcpy(name, (((lp->chipset == DC21040) ? "DC21040" :
3928 ((lp->chipset == DC21041) ? "DC21041" :
3929 ((lp->chipset == DC21140) ? "DC21140" :
3930 ((lp->chipset == DC21142) ? "DC21142" :
3931 ((lp->chipset == DC21143) ? "DC21143" : "UNKNOWN"
3932 )))))));
3933 }
3934 if (lp->chipset != DC21041) {
3935 lp->useSROM = true;
3936 }
3937 } else if ((lp->chipset & ~0x00ff) == DC2114x) {
3938 lp->useSROM = true;
3939 }
3940}
3941
3942
3943
3944
3945
3946
3947
3948
3949
3950static void
3951DevicePresent(struct net_device *dev, u_long aprom_addr)
3952{
3953 int i, j=0;
3954 struct de4x5_private *lp = netdev_priv(dev);
3955
3956 if (lp->chipset == DC21040) {
3957 if (lp->bus == EISA) {
3958 enet_addr_rst(aprom_addr);
3959 } else {
3960 outl(0, aprom_addr);
3961 }
3962 } else {
3963 u_short tmp;
3964 __le16 *p = (__le16 *)((char *)&lp->srom + SROM_HWADD);
3965 for (i=0; i<(ETH_ALEN>>1); i++) {
3966 tmp = srom_rd(aprom_addr, (SROM_HWADD>>1) + i);
3967 j += tmp;
3968 *p = cpu_to_le16(tmp);
3969 }
3970 if (j == 0 || j == 3 * 0xffff) {
3971
3972 return;
3973 }
3974
3975 p = (__le16 *)&lp->srom;
3976 for (i=0; i<(sizeof(struct de4x5_srom)>>1); i++) {
3977 tmp = srom_rd(aprom_addr, i);
3978 *p++ = cpu_to_le16(tmp);
3979 }
3980 de4x5_dbg_srom(&lp->srom);
3981 }
3982}
3983
3984
3985
3986
3987
3988
3989static void
3990enet_addr_rst(u_long aprom_addr)
3991{
3992 union {
3993 struct {
3994 u32 a;
3995 u32 b;
3996 } llsig;
3997 char Sig[sizeof(u32) << 1];
3998 } dev;
3999 short sigLength=0;
4000 s8 data;
4001 int i, j;
4002
4003 dev.llsig.a = ETH_PROM_SIG;
4004 dev.llsig.b = ETH_PROM_SIG;
4005 sigLength = sizeof(u32) << 1;
4006
4007 for (i=0,j=0;j<sigLength && i<PROBE_LENGTH+sigLength-1;i++) {
4008 data = inb(aprom_addr);
4009 if (dev.Sig[j] == data) {
4010 j++;
4011 } else {
4012 if (data == dev.Sig[0]) {
4013 j=1;
4014 } else {
4015 j=0;
4016 }
4017 }
4018 }
4019}
4020
4021
4022
4023
4024
4025
4026
4027static int
4028get_hw_addr(struct net_device *dev)
4029{
4030 u_long iobase = dev->base_addr;
4031 int broken, i, k, tmp, status = 0;
4032 u_short j,chksum;
4033 struct de4x5_private *lp = netdev_priv(dev);
4034
4035 broken = de4x5_bad_srom(lp);
4036
4037 for (i=0,k=0,j=0;j<3;j++) {
4038 k <<= 1;
4039 if (k > 0xffff) k-=0xffff;
4040
4041 if (lp->bus == PCI) {
4042 if (lp->chipset == DC21040) {
4043 while ((tmp = inl(DE4X5_APROM)) < 0);
4044 k += (u_char) tmp;
4045 dev->dev_addr[i++] = (u_char) tmp;
4046 while ((tmp = inl(DE4X5_APROM)) < 0);
4047 k += (u_short) (tmp << 8);
4048 dev->dev_addr[i++] = (u_char) tmp;
4049 } else if (!broken) {
4050 dev->dev_addr[i] = (u_char) lp->srom.ieee_addr[i]; i++;
4051 dev->dev_addr[i] = (u_char) lp->srom.ieee_addr[i]; i++;
4052 } else if ((broken == SMC) || (broken == ACCTON)) {
4053 dev->dev_addr[i] = *((u_char *)&lp->srom + i); i++;
4054 dev->dev_addr[i] = *((u_char *)&lp->srom + i); i++;
4055 }
4056 } else {
4057 k += (u_char) (tmp = inb(EISA_APROM));
4058 dev->dev_addr[i++] = (u_char) tmp;
4059 k += (u_short) ((tmp = inb(EISA_APROM)) << 8);
4060 dev->dev_addr[i++] = (u_char) tmp;
4061 }
4062
4063 if (k > 0xffff) k-=0xffff;
4064 }
4065 if (k == 0xffff) k=0;
4066
4067 if (lp->bus == PCI) {
4068 if (lp->chipset == DC21040) {
4069 while ((tmp = inl(DE4X5_APROM)) < 0);
4070 chksum = (u_char) tmp;
4071 while ((tmp = inl(DE4X5_APROM)) < 0);
4072 chksum |= (u_short) (tmp << 8);
4073 if ((k != chksum) && (dec_only)) status = -1;
4074 }
4075 } else {
4076 chksum = (u_char) inb(EISA_APROM);
4077 chksum |= (u_short) (inb(EISA_APROM) << 8);
4078 if ((k != chksum) && (dec_only)) status = -1;
4079 }
4080
4081
4082 srom_repair(dev, broken);
4083
4084#ifdef CONFIG_PPC_PMAC
4085
4086
4087
4088
4089 if ( machine_is(powermac) &&
4090 (dev->dev_addr[0] == 0) &&
4091 (dev->dev_addr[1] == 0xa0) )
4092 {
4093 for (i = 0; i < ETH_ALEN; ++i)
4094 {
4095 int x = dev->dev_addr[i];
4096 x = ((x & 0xf) << 4) + ((x & 0xf0) >> 4);
4097 x = ((x & 0x33) << 2) + ((x & 0xcc) >> 2);
4098 dev->dev_addr[i] = ((x & 0x55) << 1) + ((x & 0xaa) >> 1);
4099 }
4100 }
4101#endif
4102
4103
4104 status = test_bad_enet(dev, status);
4105
4106 return status;
4107}
4108
4109
4110
4111
4112static int
4113de4x5_bad_srom(struct de4x5_private *lp)
4114{
4115 int i, status = 0;
4116
4117 for (i = 0; i < ARRAY_SIZE(enet_det); i++) {
4118 if (!memcmp(&lp->srom, &enet_det[i], 3) &&
4119 !memcmp((char *)&lp->srom+0x10, &enet_det[i], 3)) {
4120 if (i == 0) {
4121 status = SMC;
4122 } else if (i == 1) {
4123 status = ACCTON;
4124 }
4125 break;
4126 }
4127 }
4128
4129 return status;
4130}
4131
4132static void
4133srom_repair(struct net_device *dev, int card)
4134{
4135 struct de4x5_private *lp = netdev_priv(dev);
4136
4137 switch(card) {
4138 case SMC:
4139 memset((char *)&lp->srom, 0, sizeof(struct de4x5_srom));
4140 memcpy(lp->srom.ieee_addr, (char *)dev->dev_addr, ETH_ALEN);
4141 memcpy(lp->srom.info, (char *)&srom_repair_info[SMC-1], 100);
4142 lp->useSROM = true;
4143 break;
4144 }
4145}
4146
4147
4148
4149
4150
4151static int
4152test_bad_enet(struct net_device *dev, int status)
4153{
4154 struct de4x5_private *lp = netdev_priv(dev);
4155 int i, tmp;
4156
4157 for (tmp=0,i=0; i<ETH_ALEN; i++) tmp += (u_char)dev->dev_addr[i];
4158 if ((tmp == 0) || (tmp == 0x5fa)) {
4159 if ((lp->chipset == last.chipset) &&
4160 (lp->bus_num == last.bus) && (lp->bus_num > 0)) {
4161 for (i=0; i<ETH_ALEN; i++) dev->dev_addr[i] = last.addr[i];
4162 for (i=ETH_ALEN-1; i>2; --i) {
4163 dev->dev_addr[i] += 1;
4164 if (dev->dev_addr[i] != 0) break;
4165 }
4166 for (i=0; i<ETH_ALEN; i++) last.addr[i] = dev->dev_addr[i];
4167 if (!an_exception(lp)) {
4168 dev->irq = last.irq;
4169 }
4170
4171 status = 0;
4172 }
4173 } else if (!status) {
4174 last.chipset = lp->chipset;
4175 last.bus = lp->bus_num;
4176 last.irq = dev->irq;
4177 for (i=0; i<ETH_ALEN; i++) last.addr[i] = dev->dev_addr[i];
4178 }
4179
4180 return status;
4181}
4182
4183
4184
4185
4186static int
4187an_exception(struct de4x5_private *lp)
4188{
4189 if ((*(u_short *)lp->srom.sub_vendor_id == 0x00c0) &&
4190 (*(u_short *)lp->srom.sub_system_id == 0x95e0)) {
4191 return -1;
4192 }
4193
4194 return 0;
4195}
4196
4197
4198
4199
4200static short
4201srom_rd(u_long addr, u_char offset)
4202{
4203 sendto_srom(SROM_RD | SROM_SR, addr);
4204
4205 srom_latch(SROM_RD | SROM_SR | DT_CS, addr);
4206 srom_command(SROM_RD | SROM_SR | DT_IN | DT_CS, addr);
4207 srom_address(SROM_RD | SROM_SR | DT_CS, addr, offset);
4208
4209 return srom_data(SROM_RD | SROM_SR | DT_CS, addr);
4210}
4211
4212static void
4213srom_latch(u_int command, u_long addr)
4214{
4215 sendto_srom(command, addr);
4216 sendto_srom(command | DT_CLK, addr);
4217 sendto_srom(command, addr);
4218}
4219
4220static void
4221srom_command(u_int command, u_long addr)
4222{
4223 srom_latch(command, addr);
4224 srom_latch(command, addr);
4225 srom_latch((command & 0x0000ff00) | DT_CS, addr);
4226}
4227
4228static void
4229srom_address(u_int command, u_long addr, u_char offset)
4230{
4231 int i, a;
4232
4233 a = offset << 2;
4234 for (i=0; i<6; i++, a <<= 1) {
4235 srom_latch(command | ((a & 0x80) ? DT_IN : 0), addr);
4236 }
4237 udelay(1);
4238
4239 i = (getfrom_srom(addr) >> 3) & 0x01;
4240}
4241
4242static short
4243srom_data(u_int command, u_long addr)
4244{
4245 int i;
4246 short word = 0;
4247 s32 tmp;
4248
4249 for (i=0; i<16; i++) {
4250 sendto_srom(command | DT_CLK, addr);
4251 tmp = getfrom_srom(addr);
4252 sendto_srom(command, addr);
4253
4254 word = (word << 1) | ((tmp >> 3) & 0x01);
4255 }
4256
4257 sendto_srom(command & 0x0000ff00, addr);
4258
4259 return word;
4260}
4261
4262
4263
4264
4265
4266
4267
4268
4269
4270
4271
4272
4273
4274
4275
4276static void
4277sendto_srom(u_int command, u_long addr)
4278{
4279 outl(command, addr);
4280 udelay(1);
4281}
4282
4283static int
4284getfrom_srom(u_long addr)
4285{
4286 s32 tmp;
4287
4288 tmp = inl(addr);
4289 udelay(1);
4290
4291 return tmp;
4292}
4293
4294static int
4295srom_infoleaf_info(struct net_device *dev)
4296{
4297 struct de4x5_private *lp = netdev_priv(dev);
4298 int i, count;
4299 u_char *p;
4300
4301
4302 for (i=0; i<INFOLEAF_SIZE; i++) {
4303 if (lp->chipset == infoleaf_array[i].chipset) break;
4304 }
4305 if (i == INFOLEAF_SIZE) {
4306 lp->useSROM = false;
4307 printk("%s: Cannot find correct chipset for SROM decoding!\n",
4308 dev->name);
4309 return -ENXIO;
4310 }
4311
4312 lp->infoleaf_fn = infoleaf_array[i].fn;
4313
4314
4315 count = *((u_char *)&lp->srom + 19);
4316 p = (u_char *)&lp->srom + 26;
4317
4318 if (count > 1) {
4319 for (i=count; i; --i, p+=3) {
4320 if (lp->device == *p) break;
4321 }
4322 if (i == 0) {
4323 lp->useSROM = false;
4324 printk("%s: Cannot find correct PCI device [%d] for SROM decoding!\n",
4325 dev->name, lp->device);
4326 return -ENXIO;
4327 }
4328 }
4329
4330 lp->infoleaf_offset = get_unaligned_le16(p + 1);
4331
4332 return 0;
4333}
4334
4335
4336
4337
4338
4339
4340
4341
4342static void
4343srom_init(struct net_device *dev)
4344{
4345 struct de4x5_private *lp = netdev_priv(dev);
4346 u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset;
4347 u_char count;
4348
4349 p+=2;
4350 if (lp->chipset == DC21140) {
4351 lp->cache.gepc = (*p++ | GEP_CTRL);
4352 gep_wr(lp->cache.gepc, dev);
4353 }
4354
4355
4356 count = *p++;
4357
4358
4359 for (;count; --count) {
4360 if (*p < 128) {
4361 p += COMPACT_LEN;
4362 } else if (*(p+1) == 5) {
4363 type5_infoblock(dev, 1, p);
4364 p += ((*p & BLOCK_LEN) + 1);
4365 } else if (*(p+1) == 4) {
4366 p += ((*p & BLOCK_LEN) + 1);
4367 } else if (*(p+1) == 3) {
4368 type3_infoblock(dev, 1, p);
4369 p += ((*p & BLOCK_LEN) + 1);
4370 } else if (*(p+1) == 2) {
4371 p += ((*p & BLOCK_LEN) + 1);
4372 } else if (*(p+1) == 1) {
4373 type1_infoblock(dev, 1, p);
4374 p += ((*p & BLOCK_LEN) + 1);
4375 } else {
4376 p += ((*p & BLOCK_LEN) + 1);
4377 }
4378 }
4379}
4380
4381
4382
4383
4384
4385static void
4386srom_exec(struct net_device *dev, u_char *p)
4387{
4388 struct de4x5_private *lp = netdev_priv(dev);
4389 u_long iobase = dev->base_addr;
4390 u_char count = (p ? *p++ : 0);
4391 u_short *w = (u_short *)p;
4392
4393 if (((lp->ibn != 1) && (lp->ibn != 3) && (lp->ibn != 5)) || !count) return;
4394
4395 if (lp->chipset != DC21140) RESET_SIA;
4396
4397 while (count--) {
4398 gep_wr(((lp->chipset==DC21140) && (lp->ibn!=5) ?
4399 *p++ : get_unaligned_le16(w++)), dev);
4400 mdelay(2);
4401 }
4402
4403 if (lp->chipset != DC21140) {
4404 outl(lp->cache.csr14, DE4X5_STRR);
4405 outl(lp->cache.csr13, DE4X5_SICR);
4406 }
4407}
4408
4409
4410
4411
4412
4413
4414static int
4415dc21041_infoleaf(struct net_device *dev)
4416{
4417 return DE4X5_AUTOSENSE_MS;
4418}
4419
4420static int
4421dc21140_infoleaf(struct net_device *dev)
4422{
4423 struct de4x5_private *lp = netdev_priv(dev);
4424 u_char count = 0;
4425 u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset;
4426 int next_tick = DE4X5_AUTOSENSE_MS;
4427
4428
4429 p+=2;
4430
4431
4432 lp->cache.gepc = (*p++ | GEP_CTRL);
4433
4434
4435 count = *p++;
4436
4437
4438 if (*p < 128) {
4439 next_tick = dc_infoblock[COMPACT](dev, count, p);
4440 } else {
4441 next_tick = dc_infoblock[*(p+1)](dev, count, p);
4442 }
4443
4444 if (lp->tcount == count) {
4445 lp->media = NC;
4446 if (lp->media != lp->c_media) {
4447 de4x5_dbg_media(dev);
4448 lp->c_media = lp->media;
4449 }
4450 lp->media = INIT;
4451 lp->tcount = 0;
4452 lp->tx_enable = false;
4453 }
4454
4455 return next_tick & ~TIMER_CB;
4456}
4457
4458static int
4459dc21142_infoleaf(struct net_device *dev)
4460{
4461 struct de4x5_private *lp = netdev_priv(dev);
4462 u_char count = 0;
4463 u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset;
4464 int next_tick = DE4X5_AUTOSENSE_MS;
4465
4466
4467 p+=2;
4468
4469
4470 count = *p++;
4471
4472
4473 if (*p < 128) {
4474 next_tick = dc_infoblock[COMPACT](dev, count, p);
4475 } else {
4476 next_tick = dc_infoblock[*(p+1)](dev, count, p);
4477 }
4478
4479 if (lp->tcount == count) {
4480 lp->media = NC;
4481 if (lp->media != lp->c_media) {
4482 de4x5_dbg_media(dev);
4483 lp->c_media = lp->media;
4484 }
4485 lp->media = INIT;
4486 lp->tcount = 0;
4487 lp->tx_enable = false;
4488 }
4489
4490 return next_tick & ~TIMER_CB;
4491}
4492
4493static int
4494dc21143_infoleaf(struct net_device *dev)
4495{
4496 struct de4x5_private *lp = netdev_priv(dev);
4497 u_char count = 0;
4498 u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset;
4499 int next_tick = DE4X5_AUTOSENSE_MS;
4500
4501
4502 p+=2;
4503
4504
4505 count = *p++;
4506
4507
4508 if (*p < 128) {
4509 next_tick = dc_infoblock[COMPACT](dev, count, p);
4510 } else {
4511 next_tick = dc_infoblock[*(p+1)](dev, count, p);
4512 }
4513 if (lp->tcount == count) {
4514 lp->media = NC;
4515 if (lp->media != lp->c_media) {
4516 de4x5_dbg_media(dev);
4517 lp->c_media = lp->media;
4518 }
4519 lp->media = INIT;
4520 lp->tcount = 0;
4521 lp->tx_enable = false;
4522 }
4523
4524 return next_tick & ~TIMER_CB;
4525}
4526
4527
4528
4529
4530
4531static int
4532compact_infoblock(struct net_device *dev, u_char count, u_char *p)
4533{
4534 struct de4x5_private *lp = netdev_priv(dev);
4535 u_char flags, csr6;
4536
4537
4538 if (--count > lp->tcount) {
4539 if (*(p+COMPACT_LEN) < 128) {
4540 return dc_infoblock[COMPACT](dev, count, p+COMPACT_LEN);
4541 } else {
4542 return dc_infoblock[*(p+COMPACT_LEN+1)](dev, count, p+COMPACT_LEN);
4543 }
4544 }
4545
4546 if ((lp->media == INIT) && (lp->timeout < 0)) {
4547 lp->ibn = COMPACT;
4548 lp->active = 0;
4549 gep_wr(lp->cache.gepc, dev);
4550 lp->infoblock_media = (*p++) & COMPACT_MC;
4551 lp->cache.gep = *p++;
4552 csr6 = *p++;
4553 flags = *p++;
4554
4555 lp->asBitValid = (flags & 0x80) ? 0 : -1;
4556 lp->defMedium = (flags & 0x40) ? -1 : 0;
4557 lp->asBit = 1 << ((csr6 >> 1) & 0x07);
4558 lp->asPolarity = ((csr6 & 0x80) ? -1 : 0) & lp->asBit;
4559 lp->infoblock_csr6 = OMR_DEF | ((csr6 & 0x71) << 18);
4560 lp->useMII = false;
4561
4562 de4x5_switch_mac_port(dev);
4563 }
4564
4565 return dc21140m_autoconf(dev);
4566}
4567
4568
4569
4570
4571static int
4572type0_infoblock(struct net_device *dev, u_char count, u_char *p)
4573{
4574 struct de4x5_private *lp = netdev_priv(dev);
4575 u_char flags, csr6, len = (*p & BLOCK_LEN)+1;
4576
4577
4578 if (--count > lp->tcount) {
4579 if (*(p+len) < 128) {
4580 return dc_infoblock[COMPACT](dev, count, p+len);
4581 } else {
4582 return dc_infoblock[*(p+len+1)](dev, count, p+len);
4583 }
4584 }
4585
4586 if ((lp->media == INIT) && (lp->timeout < 0)) {
4587 lp->ibn = 0;
4588 lp->active = 0;
4589 gep_wr(lp->cache.gepc, dev);
4590 p+=2;
4591 lp->infoblock_media = (*p++) & BLOCK0_MC;
4592 lp->cache.gep = *p++;
4593 csr6 = *p++;
4594 flags = *p++;
4595
4596 lp->asBitValid = (flags & 0x80) ? 0 : -1;
4597 lp->defMedium = (flags & 0x40) ? -1 : 0;
4598 lp->asBit = 1 << ((csr6 >> 1) & 0x07);
4599 lp->asPolarity = ((csr6 & 0x80) ? -1 : 0) & lp->asBit;
4600 lp->infoblock_csr6 = OMR_DEF | ((csr6 & 0x71) << 18);
4601 lp->useMII = false;
4602
4603 de4x5_switch_mac_port(dev);
4604 }
4605
4606 return dc21140m_autoconf(dev);
4607}
4608
4609
4610
4611static int
4612type1_infoblock(struct net_device *dev, u_char count, u_char *p)
4613{
4614 struct de4x5_private *lp = netdev_priv(dev);
4615 u_char len = (*p & BLOCK_LEN)+1;
4616
4617
4618 if (--count > lp->tcount) {
4619 if (*(p+len) < 128) {
4620 return dc_infoblock[COMPACT](dev, count, p+len);
4621 } else {
4622 return dc_infoblock[*(p+len+1)](dev, count, p+len);
4623 }
4624 }
4625
4626 p += 2;
4627 if (lp->state == INITIALISED) {
4628 lp->ibn = 1;
4629 lp->active = *p++;
4630 lp->phy[lp->active].gep = (*p ? p : NULL); p += (*p + 1);
4631 lp->phy[lp->active].rst = (*p ? p : NULL); p += (*p + 1);
4632 lp->phy[lp->active].mc = get_unaligned_le16(p); p += 2;
4633 lp->phy[lp->active].ana = get_unaligned_le16(p); p += 2;
4634 lp->phy[lp->active].fdx = get_unaligned_le16(p); p += 2;
4635 lp->phy[lp->active].ttm = get_unaligned_le16(p);
4636 return 0;
4637 } else if ((lp->media == INIT) && (lp->timeout < 0)) {
4638 lp->ibn = 1;
4639 lp->active = *p;
4640 lp->infoblock_csr6 = OMR_MII_100;
4641 lp->useMII = true;
4642 lp->infoblock_media = ANS;
4643
4644 de4x5_switch_mac_port(dev);
4645 }
4646
4647 return dc21140m_autoconf(dev);
4648}
4649
4650static int
4651type2_infoblock(struct net_device *dev, u_char count, u_char *p)
4652{
4653 struct de4x5_private *lp = netdev_priv(dev);
4654 u_char len = (*p & BLOCK_LEN)+1;
4655
4656
4657 if (--count > lp->tcount) {
4658 if (*(p+len) < 128) {
4659 return dc_infoblock[COMPACT](dev, count, p+len);
4660 } else {
4661 return dc_infoblock[*(p+len+1)](dev, count, p+len);
4662 }
4663 }
4664
4665 if ((lp->media == INIT) && (lp->timeout < 0)) {
4666 lp->ibn = 2;
4667 lp->active = 0;
4668 p += 2;
4669 lp->infoblock_media = (*p) & MEDIA_CODE;
4670
4671 if ((*p++) & EXT_FIELD) {
4672 lp->cache.csr13 = get_unaligned_le16(p); p += 2;
4673 lp->cache.csr14 = get_unaligned_le16(p); p += 2;
4674 lp->cache.csr15 = get_unaligned_le16(p); p += 2;
4675 } else {
4676 lp->cache.csr13 = CSR13;
4677 lp->cache.csr14 = CSR14;
4678 lp->cache.csr15 = CSR15;
4679 }
4680 lp->cache.gepc = ((s32)(get_unaligned_le16(p)) << 16); p += 2;
4681 lp->cache.gep = ((s32)(get_unaligned_le16(p)) << 16);
4682 lp->infoblock_csr6 = OMR_SIA;
4683 lp->useMII = false;
4684
4685 de4x5_switch_mac_port(dev);
4686 }
4687
4688 return dc2114x_autoconf(dev);
4689}
4690
4691static int
4692type3_infoblock(struct net_device *dev, u_char count, u_char *p)
4693{
4694 struct de4x5_private *lp = netdev_priv(dev);
4695 u_char len = (*p & BLOCK_LEN)+1;
4696
4697
4698 if (--count > lp->tcount) {
4699 if (*(p+len) < 128) {
4700 return dc_infoblock[COMPACT](dev, count, p+len);
4701 } else {
4702 return dc_infoblock[*(p+len+1)](dev, count, p+len);
4703 }
4704 }
4705
4706 p += 2;
4707 if (lp->state == INITIALISED) {
4708 lp->ibn = 3;
4709 lp->active = *p++;
4710 if (MOTO_SROM_BUG) lp->active = 0;
4711 lp->phy[lp->active].gep = (*p ? p : NULL); p += (2 * (*p) + 1);
4712 lp->phy[lp->active].rst = (*p ? p : NULL); p += (2 * (*p) + 1);
4713 lp->phy[lp->active].mc = get_unaligned_le16(p); p += 2;
4714 lp->phy[lp->active].ana = get_unaligned_le16(p); p += 2;
4715 lp->phy[lp->active].fdx = get_unaligned_le16(p); p += 2;
4716 lp->phy[lp->active].ttm = get_unaligned_le16(p); p += 2;
4717 lp->phy[lp->active].mci = *p;
4718 return 0;
4719 } else if ((lp->media == INIT) && (lp->timeout < 0)) {
4720 lp->ibn = 3;
4721 lp->active = *p;
4722 if (MOTO_SROM_BUG) lp->active = 0;
4723 lp->infoblock_csr6 = OMR_MII_100;
4724 lp->useMII = true;
4725 lp->infoblock_media = ANS;
4726
4727 de4x5_switch_mac_port(dev);
4728 }
4729
4730 return dc2114x_autoconf(dev);
4731}
4732
4733static int
4734type4_infoblock(struct net_device *dev, u_char count, u_char *p)
4735{
4736 struct de4x5_private *lp = netdev_priv(dev);
4737 u_char flags, csr6, len = (*p & BLOCK_LEN)+1;
4738
4739
4740 if (--count > lp->tcount) {
4741 if (*(p+len) < 128) {
4742 return dc_infoblock[COMPACT](dev, count, p+len);
4743 } else {
4744 return dc_infoblock[*(p+len+1)](dev, count, p+len);
4745 }
4746 }
4747
4748 if ((lp->media == INIT) && (lp->timeout < 0)) {
4749 lp->ibn = 4;
4750 lp->active = 0;
4751 p+=2;
4752 lp->infoblock_media = (*p++) & MEDIA_CODE;
4753 lp->cache.csr13 = CSR13;
4754 lp->cache.csr14 = CSR14;
4755 lp->cache.csr15 = CSR15;
4756 lp->cache.gepc = ((s32)(get_unaligned_le16(p)) << 16); p += 2;
4757 lp->cache.gep = ((s32)(get_unaligned_le16(p)) << 16); p += 2;
4758 csr6 = *p++;
4759 flags = *p++;
4760
4761 lp->asBitValid = (flags & 0x80) ? 0 : -1;
4762 lp->defMedium = (flags & 0x40) ? -1 : 0;
4763 lp->asBit = 1 << ((csr6 >> 1) & 0x07);
4764 lp->asPolarity = ((csr6 & 0x80) ? -1 : 0) & lp->asBit;
4765 lp->infoblock_csr6 = OMR_DEF | ((csr6 & 0x71) << 18);
4766 lp->useMII = false;
4767
4768 de4x5_switch_mac_port(dev);
4769 }
4770
4771 return dc2114x_autoconf(dev);
4772}
4773
4774
4775
4776
4777
4778static int
4779type5_infoblock(struct net_device *dev, u_char count, u_char *p)
4780{
4781 struct de4x5_private *lp = netdev_priv(dev);
4782 u_char len = (*p & BLOCK_LEN)+1;
4783
4784
4785 if (--count > lp->tcount) {
4786 if (*(p+len) < 128) {
4787 return dc_infoblock[COMPACT](dev, count, p+len);
4788 } else {
4789 return dc_infoblock[*(p+len+1)](dev, count, p+len);
4790 }
4791 }
4792
4793
4794 if ((lp->state == INITIALISED) || (lp->media == INIT)) {
4795 p+=2;
4796 lp->rst = p;
4797 srom_exec(dev, lp->rst);
4798 }
4799
4800 return DE4X5_AUTOSENSE_MS;
4801}
4802
4803
4804
4805
4806
4807static int
4808mii_rd(u_char phyreg, u_char phyaddr, u_long ioaddr)
4809{
4810 mii_wdata(MII_PREAMBLE, 2, ioaddr);
4811 mii_wdata(MII_PREAMBLE, 32, ioaddr);
4812 mii_wdata(MII_STRD, 4, ioaddr);
4813 mii_address(phyaddr, ioaddr);
4814 mii_address(phyreg, ioaddr);
4815 mii_ta(MII_STRD, ioaddr);
4816
4817 return mii_rdata(ioaddr);
4818}
4819
4820static void
4821mii_wr(int data, u_char phyreg, u_char phyaddr, u_long ioaddr)
4822{
4823 mii_wdata(MII_PREAMBLE, 2, ioaddr);
4824 mii_wdata(MII_PREAMBLE, 32, ioaddr);
4825 mii_wdata(MII_STWR, 4, ioaddr);
4826 mii_address(phyaddr, ioaddr);
4827 mii_address(phyreg, ioaddr);
4828 mii_ta(MII_STWR, ioaddr);
4829 data = mii_swap(data, 16);
4830 mii_wdata(data, 16, ioaddr);
4831}
4832
4833static int
4834mii_rdata(u_long ioaddr)
4835{
4836 int i;
4837 s32 tmp = 0;
4838
4839 for (i=0; i<16; i++) {
4840 tmp <<= 1;
4841 tmp |= getfrom_mii(MII_MRD | MII_RD, ioaddr);
4842 }
4843
4844 return tmp;
4845}
4846
4847static void
4848mii_wdata(int data, int len, u_long ioaddr)
4849{
4850 int i;
4851
4852 for (i=0; i<len; i++) {
4853 sendto_mii(MII_MWR | MII_WR, data, ioaddr);
4854 data >>= 1;
4855 }
4856}
4857
4858static void
4859mii_address(u_char addr, u_long ioaddr)
4860{
4861 int i;
4862
4863 addr = mii_swap(addr, 5);
4864 for (i=0; i<5; i++) {
4865 sendto_mii(MII_MWR | MII_WR, addr, ioaddr);
4866 addr >>= 1;
4867 }
4868}
4869
4870static void
4871mii_ta(u_long rw, u_long ioaddr)
4872{
4873 if (rw == MII_STWR) {
4874 sendto_mii(MII_MWR | MII_WR, 1, ioaddr);
4875 sendto_mii(MII_MWR | MII_WR, 0, ioaddr);
4876 } else {
4877 getfrom_mii(MII_MRD | MII_RD, ioaddr);
4878 }
4879}
4880
4881static int
4882mii_swap(int data, int len)
4883{
4884 int i, tmp = 0;
4885
4886 for (i=0; i<len; i++) {
4887 tmp <<= 1;
4888 tmp |= (data & 1);
4889 data >>= 1;
4890 }
4891
4892 return tmp;
4893}
4894
4895static void
4896sendto_mii(u32 command, int data, u_long ioaddr)
4897{
4898 u32 j;
4899
4900 j = (data & 1) << 17;
4901 outl(command | j, ioaddr);
4902 udelay(1);
4903 outl(command | MII_MDC | j, ioaddr);
4904 udelay(1);
4905}
4906
4907static int
4908getfrom_mii(u32 command, u_long ioaddr)
4909{
4910 outl(command, ioaddr);
4911 udelay(1);
4912 outl(command | MII_MDC, ioaddr);
4913 udelay(1);
4914
4915 return (inl(ioaddr) >> 19) & 1;
4916}
4917
4918
4919
4920
4921static int
4922mii_get_oui(u_char phyaddr, u_long ioaddr)
4923{
4924
4925
4926
4927
4928
4929
4930 int r2;
4931
4932
4933 r2 = mii_rd(MII_ID0, phyaddr, ioaddr);
4934 mii_rd(MII_ID1, phyaddr, ioaddr);
4935
4936
4937
4938
4939
4940
4941
4942
4943
4944
4945
4946
4947
4948
4949
4950
4951
4952
4953
4954
4955
4956
4957
4958
4959
4960
4961
4962 return r2;
4963}
4964
4965
4966
4967
4968static int
4969mii_get_phy(struct net_device *dev)
4970{
4971 struct de4x5_private *lp = netdev_priv(dev);
4972 u_long iobase = dev->base_addr;
4973 int i, j, k, n, limit=ARRAY_SIZE(phy_info);
4974 int id;
4975
4976 lp->active = 0;
4977 lp->useMII = true;
4978
4979
4980 for (n=0, lp->mii_cnt=0, i=1; !((i==1) && (n==1)); i=(i+1)%DE4X5_MAX_MII) {
4981 lp->phy[lp->active].addr = i;
4982 if (i==0) n++;
4983 while (de4x5_reset_phy(dev)<0) udelay(100);
4984 id = mii_get_oui(i, DE4X5_MII);
4985 if ((id == 0) || (id == 65535)) continue;
4986 for (j=0; j<limit; j++) {
4987 if (id != phy_info[j].id) continue;
4988 for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++);
4989 if (k < DE4X5_MAX_PHY) {
4990 memcpy((char *)&lp->phy[k],
4991 (char *)&phy_info[j], sizeof(struct phy_table));
4992 lp->phy[k].addr = i;
4993 lp->mii_cnt++;
4994 lp->active++;
4995 } else {
4996 goto purgatory;
4997 }
4998 break;
4999 }
5000 if ((j == limit) && (i < DE4X5_MAX_MII)) {
5001 for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++);
5002 lp->phy[k].addr = i;
5003 lp->phy[k].id = id;
5004 lp->phy[k].spd.reg = GENERIC_REG;
5005 lp->phy[k].spd.mask = GENERIC_MASK;
5006 lp->phy[k].spd.value = GENERIC_VALUE;
5007 lp->mii_cnt++;
5008 lp->active++;
5009 printk("%s: Using generic MII device control. If the board doesn't operate,\nplease mail the following dump to the author:\n", dev->name);
5010 j = de4x5_debug;
5011 de4x5_debug |= DEBUG_MII;
5012 de4x5_dbg_mii(dev, k);
5013 de4x5_debug = j;
5014 printk("\n");
5015 }
5016 }
5017 purgatory:
5018 lp->active = 0;
5019 if (lp->phy[0].id) {
5020 for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++) {
5021 mii_wr(MII_CR_RST, MII_CR, lp->phy[k].addr, DE4X5_MII);
5022 while (mii_rd(MII_CR, lp->phy[k].addr, DE4X5_MII) & MII_CR_RST);
5023
5024 de4x5_dbg_mii(dev, k);
5025 }
5026 }
5027 if (!lp->mii_cnt) lp->useMII = false;
5028
5029 return lp->mii_cnt;
5030}
5031
5032static char *
5033build_setup_frame(struct net_device *dev, int mode)
5034{
5035 struct de4x5_private *lp = netdev_priv(dev);
5036 int i;
5037 char *pa = lp->setup_frame;
5038
5039
5040 if (mode == ALL) {
5041 memset(lp->setup_frame, 0, SETUP_FRAME_LEN);
5042 }
5043
5044 if (lp->setup_f == HASH_PERF) {
5045 for (pa=lp->setup_frame+IMPERF_PA_OFFSET, i=0; i<ETH_ALEN; i++) {
5046 *(pa + i) = dev->dev_addr[i];
5047 if (i & 0x01) pa += 2;
5048 }
5049 *(lp->setup_frame + (DE4X5_HASH_TABLE_LEN >> 3) - 3) = 0x80;
5050 } else {
5051 for (i=0; i<ETH_ALEN; i++) {
5052 *(pa + (i&1)) = dev->dev_addr[i];
5053 if (i & 0x01) pa += 4;
5054 }
5055 for (i=0; i<ETH_ALEN; i++) {
5056 *(pa + (i&1)) = (char) 0xff;
5057 if (i & 0x01) pa += 4;
5058 }
5059 }
5060
5061 return pa;
5062}
5063
5064static void
5065disable_ast(struct net_device *dev)
5066{
5067 struct de4x5_private *lp = netdev_priv(dev);
5068 del_timer_sync(&lp->timer);
5069}
5070
5071static long
5072de4x5_switch_mac_port(struct net_device *dev)
5073{
5074 struct de4x5_private *lp = netdev_priv(dev);
5075 u_long iobase = dev->base_addr;
5076 s32 omr;
5077
5078 STOP_DE4X5;
5079
5080
5081 omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR |
5082 OMR_FDX));
5083 omr |= lp->infoblock_csr6;
5084 if (omr & OMR_PS) omr |= OMR_HBD;
5085 outl(omr, DE4X5_OMR);
5086
5087
5088 RESET_DE4X5;
5089
5090
5091 if (lp->chipset == DC21140) {
5092 gep_wr(lp->cache.gepc, dev);
5093 gep_wr(lp->cache.gep, dev);
5094 } else if ((lp->chipset & ~0x0ff) == DC2114x) {
5095 reset_init_sia(dev, lp->cache.csr13, lp->cache.csr14, lp->cache.csr15);
5096 }
5097
5098
5099 outl(omr, DE4X5_OMR);
5100
5101
5102 inl(DE4X5_MFC);
5103
5104 return omr;
5105}
5106
5107static void
5108gep_wr(s32 data, struct net_device *dev)
5109{
5110 struct de4x5_private *lp = netdev_priv(dev);
5111 u_long iobase = dev->base_addr;
5112
5113 if (lp->chipset == DC21140) {
5114 outl(data, DE4X5_GEP);
5115 } else if ((lp->chipset & ~0x00ff) == DC2114x) {
5116 outl((data<<16) | lp->cache.csr15, DE4X5_SIGR);
5117 }
5118}
5119
5120static int
5121gep_rd(struct net_device *dev)
5122{
5123 struct de4x5_private *lp = netdev_priv(dev);
5124 u_long iobase = dev->base_addr;
5125
5126 if (lp->chipset == DC21140) {
5127 return inl(DE4X5_GEP);
5128 } else if ((lp->chipset & ~0x00ff) == DC2114x) {
5129 return inl(DE4X5_SIGR) & 0x000fffff;
5130 }
5131
5132 return 0;
5133}
5134
5135static void
5136yawn(struct net_device *dev, int state)
5137{
5138 struct de4x5_private *lp = netdev_priv(dev);
5139 u_long iobase = dev->base_addr;
5140
5141 if ((lp->chipset == DC21040) || (lp->chipset == DC21140)) return;
5142
5143 if(lp->bus == EISA) {
5144 switch(state) {
5145 case WAKEUP:
5146 outb(WAKEUP, PCI_CFPM);
5147 mdelay(10);
5148 break;
5149
5150 case SNOOZE:
5151 outb(SNOOZE, PCI_CFPM);
5152 break;
5153
5154 case SLEEP:
5155 outl(0, DE4X5_SICR);
5156 outb(SLEEP, PCI_CFPM);
5157 break;
5158 }
5159 } else {
5160 struct pci_dev *pdev = to_pci_dev (lp->gendev);
5161 switch(state) {
5162 case WAKEUP:
5163 pci_write_config_byte(pdev, PCI_CFDA_PSM, WAKEUP);
5164 mdelay(10);
5165 break;
5166
5167 case SNOOZE:
5168 pci_write_config_byte(pdev, PCI_CFDA_PSM, SNOOZE);
5169 break;
5170
5171 case SLEEP:
5172 outl(0, DE4X5_SICR);
5173 pci_write_config_byte(pdev, PCI_CFDA_PSM, SLEEP);
5174 break;
5175 }
5176 }
5177}
5178
5179static void
5180de4x5_parse_params(struct net_device *dev)
5181{
5182 struct de4x5_private *lp = netdev_priv(dev);
5183 char *p, *q, t;
5184
5185 lp->params.fdx = false;
5186 lp->params.autosense = AUTO;
5187
5188 if (args == NULL) return;
5189
5190 if ((p = strstr(args, dev->name))) {
5191 if (!(q = strstr(p+strlen(dev->name), "eth"))) q = p + strlen(p);
5192 t = *q;
5193 *q = '\0';
5194
5195 if (strstr(p, "fdx") || strstr(p, "FDX")) lp->params.fdx = true;
5196
5197 if (strstr(p, "autosense") || strstr(p, "AUTOSENSE")) {
5198 if (strstr(p, "TP_NW")) {
5199 lp->params.autosense = TP_NW;
5200 } else if (strstr(p, "TP")) {
5201 lp->params.autosense = TP;
5202 } else if (strstr(p, "BNC_AUI")) {
5203 lp->params.autosense = BNC;
5204 } else if (strstr(p, "BNC")) {
5205 lp->params.autosense = BNC;
5206 } else if (strstr(p, "AUI")) {
5207 lp->params.autosense = AUI;
5208 } else if (strstr(p, "10Mb")) {
5209 lp->params.autosense = _10Mb;
5210 } else if (strstr(p, "100Mb")) {
5211 lp->params.autosense = _100Mb;
5212 } else if (strstr(p, "AUTO")) {
5213 lp->params.autosense = AUTO;
5214 }
5215 }
5216 *q = t;
5217 }
5218}
5219
5220static void
5221de4x5_dbg_open(struct net_device *dev)
5222{
5223 struct de4x5_private *lp = netdev_priv(dev);
5224 int i;
5225
5226 if (de4x5_debug & DEBUG_OPEN) {
5227 printk("%s: de4x5 opening with irq %d\n",dev->name,dev->irq);
5228 printk("\tphysical address: %pM\n", dev->dev_addr);
5229 printk("Descriptor head addresses:\n");
5230 printk("\t0x%8.8lx 0x%8.8lx\n",(u_long)lp->rx_ring,(u_long)lp->tx_ring);
5231 printk("Descriptor addresses:\nRX: ");
5232 for (i=0;i<lp->rxRingSize-1;i++){
5233 if (i < 3) {
5234 printk("0x%8.8lx ",(u_long)&lp->rx_ring[i].status);
5235 }
5236 }
5237 printk("...0x%8.8lx\n",(u_long)&lp->rx_ring[i].status);
5238 printk("TX: ");
5239 for (i=0;i<lp->txRingSize-1;i++){
5240 if (i < 3) {
5241 printk("0x%8.8lx ", (u_long)&lp->tx_ring[i].status);
5242 }
5243 }
5244 printk("...0x%8.8lx\n", (u_long)&lp->tx_ring[i].status);
5245 printk("Descriptor buffers:\nRX: ");
5246 for (i=0;i<lp->rxRingSize-1;i++){
5247 if (i < 3) {
5248 printk("0x%8.8x ",le32_to_cpu(lp->rx_ring[i].buf));
5249 }
5250 }
5251 printk("...0x%8.8x\n",le32_to_cpu(lp->rx_ring[i].buf));
5252 printk("TX: ");
5253 for (i=0;i<lp->txRingSize-1;i++){
5254 if (i < 3) {
5255 printk("0x%8.8x ", le32_to_cpu(lp->tx_ring[i].buf));
5256 }
5257 }
5258 printk("...0x%8.8x\n", le32_to_cpu(lp->tx_ring[i].buf));
5259 printk("Ring size:\nRX: %d\nTX: %d\n",
5260 (short)lp->rxRingSize,
5261 (short)lp->txRingSize);
5262 }
5263}
5264
5265static void
5266de4x5_dbg_mii(struct net_device *dev, int k)
5267{
5268 struct de4x5_private *lp = netdev_priv(dev);
5269 u_long iobase = dev->base_addr;
5270
5271 if (de4x5_debug & DEBUG_MII) {
5272 printk("\nMII device address: %d\n", lp->phy[k].addr);
5273 printk("MII CR: %x\n",mii_rd(MII_CR,lp->phy[k].addr,DE4X5_MII));
5274 printk("MII SR: %x\n",mii_rd(MII_SR,lp->phy[k].addr,DE4X5_MII));
5275 printk("MII ID0: %x\n",mii_rd(MII_ID0,lp->phy[k].addr,DE4X5_MII));
5276 printk("MII ID1: %x\n",mii_rd(MII_ID1,lp->phy[k].addr,DE4X5_MII));
5277 if (lp->phy[k].id != BROADCOM_T4) {
5278 printk("MII ANA: %x\n",mii_rd(0x04,lp->phy[k].addr,DE4X5_MII));
5279 printk("MII ANC: %x\n",mii_rd(0x05,lp->phy[k].addr,DE4X5_MII));
5280 }
5281 printk("MII 16: %x\n",mii_rd(0x10,lp->phy[k].addr,DE4X5_MII));
5282 if (lp->phy[k].id != BROADCOM_T4) {
5283 printk("MII 17: %x\n",mii_rd(0x11,lp->phy[k].addr,DE4X5_MII));
5284 printk("MII 18: %x\n",mii_rd(0x12,lp->phy[k].addr,DE4X5_MII));
5285 } else {
5286 printk("MII 20: %x\n",mii_rd(0x14,lp->phy[k].addr,DE4X5_MII));
5287 }
5288 }
5289}
5290
5291static void
5292de4x5_dbg_media(struct net_device *dev)
5293{
5294 struct de4x5_private *lp = netdev_priv(dev);
5295
5296 if (lp->media != lp->c_media) {
5297 if (de4x5_debug & DEBUG_MEDIA) {
5298 printk("%s: media is %s%s\n", dev->name,
5299 (lp->media == NC ? "unconnected, link down or incompatible connection" :
5300 (lp->media == TP ? "TP" :
5301 (lp->media == ANS ? "TP/Nway" :
5302 (lp->media == BNC ? "BNC" :
5303 (lp->media == AUI ? "AUI" :
5304 (lp->media == BNC_AUI ? "BNC/AUI" :
5305 (lp->media == EXT_SIA ? "EXT SIA" :
5306 (lp->media == _100Mb ? "100Mb/s" :
5307 (lp->media == _10Mb ? "10Mb/s" :
5308 "???"
5309 ))))))))), (lp->fdx?" full duplex.":"."));
5310 }
5311 lp->c_media = lp->media;
5312 }
5313}
5314
5315static void
5316de4x5_dbg_srom(struct de4x5_srom *p)
5317{
5318 int i;
5319
5320 if (de4x5_debug & DEBUG_SROM) {
5321 printk("Sub-system Vendor ID: %04x\n", *((u_short *)p->sub_vendor_id));
5322 printk("Sub-system ID: %04x\n", *((u_short *)p->sub_system_id));
5323 printk("ID Block CRC: %02x\n", (u_char)(p->id_block_crc));
5324 printk("SROM version: %02x\n", (u_char)(p->version));
5325 printk("# controllers: %02x\n", (u_char)(p->num_controllers));
5326
5327 printk("Hardware Address: %pM\n", p->ieee_addr);
5328 printk("CRC checksum: %04x\n", (u_short)(p->chksum));
5329 for (i=0; i<64; i++) {
5330 printk("%3d %04x\n", i<<1, (u_short)*((u_short *)p+i));
5331 }
5332 }
5333}
5334
5335static void
5336de4x5_dbg_rx(struct sk_buff *skb, int len)
5337{
5338 int i, j;
5339
5340 if (de4x5_debug & DEBUG_RX) {
5341 printk("R: %pM <- %pM len/SAP:%02x%02x [%d]\n",
5342 skb->data, &skb->data[6],
5343 (u_char)skb->data[12],
5344 (u_char)skb->data[13],
5345 len);
5346 for (j=0; len>0;j+=16, len-=16) {
5347 printk(" %03x: ",j);
5348 for (i=0; i<16 && i<len; i++) {
5349 printk("%02x ",(u_char)skb->data[i+j]);
5350 }
5351 printk("\n");
5352 }
5353 }
5354}
5355
5356
5357
5358
5359
5360
5361static int
5362de4x5_siocdevprivate(struct net_device *dev, struct ifreq *rq, void __user *data, int cmd)
5363{
5364 struct de4x5_private *lp = netdev_priv(dev);
5365 struct de4x5_ioctl *ioc = (struct de4x5_ioctl *) &rq->ifr_ifru;
5366 u_long iobase = dev->base_addr;
5367 int i, j, status = 0;
5368 s32 omr;
5369 union {
5370 u8 addr[144];
5371 u16 sval[72];
5372 u32 lval[36];
5373 } tmp;
5374 u_long flags = 0;
5375
5376 if (cmd != SIOCDEVPRIVATE || in_compat_syscall())
5377 return -EOPNOTSUPP;
5378
5379 switch(ioc->cmd) {
5380 case DE4X5_GET_HWADDR:
5381 ioc->len = ETH_ALEN;
5382 for (i=0; i<ETH_ALEN; i++) {
5383 tmp.addr[i] = dev->dev_addr[i];
5384 }
5385 if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
5386 break;
5387
5388 case DE4X5_SET_HWADDR:
5389 if (!capable(CAP_NET_ADMIN)) return -EPERM;
5390 if (copy_from_user(tmp.addr, ioc->data, ETH_ALEN)) return -EFAULT;
5391 if (netif_queue_stopped(dev))
5392 return -EBUSY;
5393 netif_stop_queue(dev);
5394 for (i=0; i<ETH_ALEN; i++) {
5395 dev->dev_addr[i] = tmp.addr[i];
5396 }
5397 build_setup_frame(dev, PHYS_ADDR_ONLY);
5398
5399 load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET |
5400 SETUP_FRAME_LEN, (struct sk_buff *)1);
5401 lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
5402 outl(POLL_DEMAND, DE4X5_TPD);
5403 netif_wake_queue(dev);
5404 break;
5405
5406 case DE4X5_SAY_BOO:
5407 if (!capable(CAP_NET_ADMIN)) return -EPERM;
5408 printk("%s: Boo!\n", dev->name);
5409 break;
5410
5411 case DE4X5_MCA_EN:
5412 if (!capable(CAP_NET_ADMIN)) return -EPERM;
5413 omr = inl(DE4X5_OMR);
5414 omr |= OMR_PM;
5415 outl(omr, DE4X5_OMR);
5416 break;
5417
5418 case DE4X5_GET_STATS:
5419 {
5420 struct pkt_stats statbuf;
5421 ioc->len = sizeof(statbuf);
5422 spin_lock_irqsave(&lp->lock, flags);
5423 memcpy(&statbuf, &lp->pktStats, ioc->len);
5424 spin_unlock_irqrestore(&lp->lock, flags);
5425 if (copy_to_user(ioc->data, &statbuf, ioc->len))
5426 return -EFAULT;
5427 break;
5428 }
5429 case DE4X5_CLR_STATS:
5430 if (!capable(CAP_NET_ADMIN)) return -EPERM;
5431 spin_lock_irqsave(&lp->lock, flags);
5432 memset(&lp->pktStats, 0, sizeof(lp->pktStats));
5433 spin_unlock_irqrestore(&lp->lock, flags);
5434 break;
5435
5436 case DE4X5_GET_OMR:
5437 tmp.addr[0] = inl(DE4X5_OMR);
5438 if (copy_to_user(ioc->data, tmp.addr, 1)) return -EFAULT;
5439 break;
5440
5441 case DE4X5_SET_OMR:
5442 if (!capable(CAP_NET_ADMIN)) return -EPERM;
5443 if (copy_from_user(tmp.addr, ioc->data, 1)) return -EFAULT;
5444 outl(tmp.addr[0], DE4X5_OMR);
5445 break;
5446
5447 case DE4X5_GET_REG:
5448 j = 0;
5449 tmp.lval[0] = inl(DE4X5_STS); j+=4;
5450 tmp.lval[1] = inl(DE4X5_BMR); j+=4;
5451 tmp.lval[2] = inl(DE4X5_IMR); j+=4;
5452 tmp.lval[3] = inl(DE4X5_OMR); j+=4;
5453 tmp.lval[4] = inl(DE4X5_SISR); j+=4;
5454 tmp.lval[5] = inl(DE4X5_SICR); j+=4;
5455 tmp.lval[6] = inl(DE4X5_STRR); j+=4;
5456 tmp.lval[7] = inl(DE4X5_SIGR); j+=4;
5457 ioc->len = j;
5458 if (copy_to_user(ioc->data, tmp.lval, ioc->len))
5459 return -EFAULT;
5460 break;
5461
5462#define DE4X5_DUMP 0x0f
5463
5464
5465
5466
5467
5468
5469
5470
5471
5472
5473
5474
5475
5476
5477
5478
5479
5480
5481
5482
5483
5484
5485
5486
5487
5488
5489
5490
5491
5492
5493
5494
5495
5496
5497
5498
5499
5500
5501
5502
5503
5504
5505
5506
5507
5508
5509
5510
5511
5512
5513
5514
5515
5516
5517
5518
5519
5520
5521
5522
5523
5524
5525
5526
5527
5528
5529
5530
5531
5532
5533
5534
5535
5536
5537
5538
5539
5540
5541
5542
5543
5544
5545
5546
5547
5548
5549
5550
5551
5552 default:
5553 return -EOPNOTSUPP;
5554 }
5555
5556 return status;
5557}
5558
5559static int __init de4x5_module_init (void)
5560{
5561 int err = 0;
5562
5563#ifdef CONFIG_PCI
5564 err = pci_register_driver(&de4x5_pci_driver);
5565#endif
5566#ifdef CONFIG_EISA
5567 err |= eisa_driver_register (&de4x5_eisa_driver);
5568#endif
5569
5570 return err;
5571}
5572
5573static void __exit de4x5_module_exit (void)
5574{
5575#ifdef CONFIG_PCI
5576 pci_unregister_driver (&de4x5_pci_driver);
5577#endif
5578#ifdef CONFIG_EISA
5579 eisa_driver_unregister (&de4x5_eisa_driver);
5580#endif
5581}
5582
5583module_init (de4x5_module_init);
5584module_exit (de4x5_module_exit);
5585