1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446#include <linux/module.h>
447#include <linux/kernel.h>
448#include <linux/string.h>
449#include <linux/interrupt.h>
450#include <linux/ptrace.h>
451#include <linux/errno.h>
452#include <linux/ioport.h>
453#include <linux/pci.h>
454#include <linux/eisa.h>
455#include <linux/delay.h>
456#include <linux/init.h>
457#include <linux/spinlock.h>
458#include <linux/crc32.h>
459#include <linux/netdevice.h>
460#include <linux/etherdevice.h>
461#include <linux/skbuff.h>
462#include <linux/time.h>
463#include <linux/types.h>
464#include <linux/unistd.h>
465#include <linux/ctype.h>
466#include <linux/dma-mapping.h>
467#include <linux/moduleparam.h>
468#include <linux/bitops.h>
469#include <linux/gfp.h>
470
471#include <asm/io.h>
472#include <asm/dma.h>
473#include <asm/byteorder.h>
474#include <asm/unaligned.h>
475#include <asm/uaccess.h>
476#ifdef CONFIG_PPC_PMAC
477#include <asm/machdep.h>
478#endif
479
480#include "de4x5.h"
481
482static const char version[] =
483 KERN_INFO "de4x5.c:V0.546 2001/02/22 davies@maniac.ultranet.com\n";
484
485#define c_char const char
486
487
488
489
490struct phy_table {
491 int reset;
492 int id;
493 int ta;
494 struct {
495 int reg;
496 int mask;
497 int value;
498 } spd;
499};
500
501struct mii_phy {
502 int reset;
503 int id;
504 int ta;
505 struct {
506 int reg;
507 int mask;
508 int value;
509 } spd;
510 int addr;
511 u_char *gep;
512 u_char *rst;
513 u_int mc;
514 u_int ana;
515 u_int fdx;
516 u_int ttm;
517 u_int mci;
518};
519
520#define DE4X5_MAX_PHY 8
521
522struct sia_phy {
523 u_char mc;
524 u_char ext;
525 int csr13;
526 int csr14;
527 int csr15;
528 int gepc;
529 int gep;
530};
531
532
533
534
535
536static struct phy_table phy_info[] = {
537 {0, NATIONAL_TX, 1, {0x19, 0x40, 0x00}},
538 {1, BROADCOM_T4, 1, {0x10, 0x02, 0x02}},
539 {0, SEEQ_T4 , 1, {0x12, 0x10, 0x10}},
540 {0, CYPRESS_T4 , 1, {0x05, 0x20, 0x20}},
541 {0, 0x7810 , 1, {0x14, 0x0800, 0x0800}}
542};
543
544
545
546
547
548
549#define GENERIC_REG 0x05
550#define GENERIC_MASK MII_ANLPA_100M
551#define GENERIC_VALUE MII_ANLPA_100M
552
553
554
555
556static c_char enet_det[][ETH_ALEN] = {
557 {0x00, 0x00, 0xc0, 0x00, 0x00, 0x00},
558 {0x00, 0x00, 0xe8, 0x00, 0x00, 0x00}
559};
560
561#define SMC 1
562#define ACCTON 2
563
564
565
566
567
568
569static c_char srom_repair_info[][100] = {
570 {0x00,0x1e,0x00,0x00,0x00,0x08,
571 0x1f,0x01,0x8f,0x01,0x00,0x01,0x00,0x02,
572 0x01,0x00,0x00,0x78,0xe0,0x01,0x00,0x50,
573 0x00,0x18,}
574};
575
576
577#ifdef DE4X5_DEBUG
578static int de4x5_debug = DE4X5_DEBUG;
579#else
580
581static int de4x5_debug = (DEBUG_MEDIA | DEBUG_VERSION);
582#endif
583
584
585
586
587
588
589
590
591
592
593#ifdef DE4X5_PARM
594static char *args = DE4X5_PARM;
595#else
596static char *args;
597#endif
598
599struct parameters {
600 bool fdx;
601 int autosense;
602};
603
604#define DE4X5_AUTOSENSE_MS 250
605
606#define DE4X5_NDA 0xffe0
607
608
609
610
611#define PROBE_LENGTH 32
612#define ETH_PROM_SIG 0xAA5500FFUL
613
614
615
616
617#define PKT_BUF_SZ 1536
618#define IEEE802_3_SZ 1518
619#define MAX_PKT_SZ 1514
620#define MAX_DAT_SZ 1500
621#define MIN_DAT_SZ 1
622#define PKT_HDR_LEN 14
623#define FAKE_FRAME_LEN (MAX_PKT_SZ + 1)
624#define QUEUE_PKT_TIMEOUT (3*HZ)
625
626
627
628
629
630#define DE4X5_EISA_IO_PORTS 0x0c00
631#define DE4X5_EISA_TOTAL_SIZE 0x100
632
633#define EISA_ALLOWED_IRQ_LIST {5, 9, 10, 11}
634
635#define DE4X5_SIGNATURE {"DE425","DE434","DE435","DE450","DE500"}
636#define DE4X5_NAME_LENGTH 8
637
638static c_char *de4x5_signatures[] = DE4X5_SIGNATURE;
639
640
641
642
643#define PROBE_LENGTH 32
644#define ETH_PROM_SIG 0xAA5500FFUL
645
646
647
648
649#define PCI_MAX_BUS_NUM 8
650#define DE4X5_PCI_TOTAL_SIZE 0x80
651#define DE4X5_CLASS_CODE 0x00020000
652
653
654
655
656
657
658
659#define DE4X5_ALIGN4 ((u_long)4 - 1)
660#define DE4X5_ALIGN8 ((u_long)8 - 1)
661#define DE4X5_ALIGN16 ((u_long)16 - 1)
662#define DE4X5_ALIGN32 ((u_long)32 - 1)
663#define DE4X5_ALIGN64 ((u_long)64 - 1)
664#define DE4X5_ALIGN128 ((u_long)128 - 1)
665
666#define DE4X5_ALIGN DE4X5_ALIGN32
667#define DE4X5_CACHE_ALIGN CAL_16LONG
668#define DESC_SKIP_LEN DSL_0
669
670#define DESC_ALIGN
671
672#ifndef DEC_ONLY
673static int dec_only;
674#else
675static int dec_only = 1;
676#endif
677
678
679
680
681#define ENABLE_IRQs { \
682 imr |= lp->irq_en;\
683 outl(imr, DE4X5_IMR); \
684}
685
686#define DISABLE_IRQs {\
687 imr = inl(DE4X5_IMR);\
688 imr &= ~lp->irq_en;\
689 outl(imr, DE4X5_IMR); \
690}
691
692#define UNMASK_IRQs {\
693 imr |= lp->irq_mask;\
694 outl(imr, DE4X5_IMR); \
695}
696
697#define MASK_IRQs {\
698 imr = inl(DE4X5_IMR);\
699 imr &= ~lp->irq_mask;\
700 outl(imr, DE4X5_IMR); \
701}
702
703
704
705
706#define START_DE4X5 {\
707 omr = inl(DE4X5_OMR);\
708 omr |= OMR_ST | OMR_SR;\
709 outl(omr, DE4X5_OMR); \
710}
711
712#define STOP_DE4X5 {\
713 omr = inl(DE4X5_OMR);\
714 omr &= ~(OMR_ST|OMR_SR);\
715 outl(omr, DE4X5_OMR); \
716}
717
718
719
720
721#define RESET_SIA outl(0, DE4X5_SICR);
722
723
724
725
726#define DE4X5_AUTOSENSE_MS 250
727
728
729
730
731struct de4x5_srom {
732 char sub_vendor_id[2];
733 char sub_system_id[2];
734 char reserved[12];
735 char id_block_crc;
736 char reserved2;
737 char version;
738 char num_controllers;
739 char ieee_addr[6];
740 char info[100];
741 short chksum;
742};
743#define SUB_VENDOR_ID 0x500a
744
745
746
747
748
749
750
751
752
753#define NUM_RX_DESC 8
754#define NUM_TX_DESC 32
755#define RX_BUFF_SZ 1536
756
757
758struct de4x5_desc {
759 volatile __le32 status;
760 __le32 des1;
761 __le32 buf;
762 __le32 next;
763 DESC_ALIGN
764};
765
766
767
768
769#define DE4X5_PKT_STAT_SZ 16
770#define DE4X5_PKT_BIN_SZ 128
771
772
773struct pkt_stats {
774 u_int bins[DE4X5_PKT_STAT_SZ];
775 u_int unicast;
776 u_int multicast;
777 u_int broadcast;
778 u_int excessive_collisions;
779 u_int tx_underruns;
780 u_int excessive_underruns;
781 u_int rx_runt_frames;
782 u_int rx_collision;
783 u_int rx_dribble;
784 u_int rx_overflow;
785};
786
787struct de4x5_private {
788 char adapter_name[80];
789 u_long interrupt;
790 struct de4x5_desc *rx_ring;
791 struct de4x5_desc *tx_ring;
792 struct sk_buff *tx_skb[NUM_TX_DESC];
793 struct sk_buff *rx_skb[NUM_RX_DESC];
794 int rx_new, rx_old;
795 int tx_new, tx_old;
796 char setup_frame[SETUP_FRAME_LEN];
797 char frame[64];
798 spinlock_t lock;
799 struct net_device_stats stats;
800 struct pkt_stats pktStats;
801 char rxRingSize;
802 char txRingSize;
803 int bus;
804 int bus_num;
805 int device;
806 int state;
807 int chipset;
808 s32 irq_mask;
809 s32 irq_en;
810 int media;
811 int c_media;
812 bool fdx;
813 int linkOK;
814 int autosense;
815 bool tx_enable;
816 int setup_f;
817 int local_state;
818 struct mii_phy phy[DE4X5_MAX_PHY];
819 struct sia_phy sia;
820 int active;
821 int mii_cnt;
822 int timeout;
823 struct timer_list timer;
824 int tmp;
825 struct {
826 u_long lock;
827 s32 csr0;
828 s32 csr6;
829 s32 csr7;
830 s32 gep;
831 s32 gepc;
832 s32 csr13;
833 s32 csr14;
834 s32 csr15;
835 int save_cnt;
836 struct sk_buff_head queue;
837 } cache;
838 struct de4x5_srom srom;
839 int cfrv;
840 int rx_ovf;
841 bool useSROM;
842 bool useMII;
843 int asBitValid;
844 int asPolarity;
845 int asBit;
846 int defMedium;
847 int tcount;
848 int infoblock_init;
849 int infoleaf_offset;
850 s32 infoblock_csr6;
851 int infoblock_media;
852 int (*infoleaf_fn)(struct net_device *);
853 u_char *rst;
854 u_char ibn;
855 struct parameters params;
856 struct device *gendev;
857 dma_addr_t dma_rings;
858 int dma_size;
859 char *rx_bufs;
860};
861
862
863
864
865
866
867
868
869
870
871
872
873
874static struct {
875 int chipset;
876 int bus;
877 int irq;
878 u_char addr[ETH_ALEN];
879} last = {0,};
880
881
882
883
884
885
886
887
888#define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\
889 lp->tx_old+lp->txRingSize-lp->tx_new-1:\
890 lp->tx_old -lp->tx_new-1)
891
892#define TX_PKT_PENDING (lp->tx_old != lp->tx_new)
893
894
895
896
897static int de4x5_open(struct net_device *dev);
898static netdev_tx_t de4x5_queue_pkt(struct sk_buff *skb,
899 struct net_device *dev);
900static irqreturn_t de4x5_interrupt(int irq, void *dev_id);
901static int de4x5_close(struct net_device *dev);
902static struct net_device_stats *de4x5_get_stats(struct net_device *dev);
903static void de4x5_local_stats(struct net_device *dev, char *buf, int pkt_len);
904static void set_multicast_list(struct net_device *dev);
905static int de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
906
907
908
909
910static int de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev);
911static int de4x5_init(struct net_device *dev);
912static int de4x5_sw_reset(struct net_device *dev);
913static int de4x5_rx(struct net_device *dev);
914static int de4x5_tx(struct net_device *dev);
915static void de4x5_ast(struct net_device *dev);
916static int de4x5_txur(struct net_device *dev);
917static int de4x5_rx_ovfc(struct net_device *dev);
918
919static int autoconf_media(struct net_device *dev);
920static void create_packet(struct net_device *dev, char *frame, int len);
921static void load_packet(struct net_device *dev, char *buf, u32 flags, struct sk_buff *skb);
922static int dc21040_autoconf(struct net_device *dev);
923static int dc21041_autoconf(struct net_device *dev);
924static int dc21140m_autoconf(struct net_device *dev);
925static int dc2114x_autoconf(struct net_device *dev);
926static int srom_autoconf(struct net_device *dev);
927static int de4x5_suspect_state(struct net_device *dev, int timeout, int prev_state, int (*fn)(struct net_device *, int), int (*asfn)(struct net_device *));
928static int dc21040_state(struct net_device *dev, int csr13, int csr14, int csr15, int timeout, int next_state, int suspect_state, int (*fn)(struct net_device *, int));
929static int test_media(struct net_device *dev, s32 irqs, s32 irq_mask, s32 csr13, s32 csr14, s32 csr15, s32 msec);
930static int test_for_100Mb(struct net_device *dev, int msec);
931static int wait_for_link(struct net_device *dev);
932static int test_mii_reg(struct net_device *dev, int reg, int mask, bool pol, long msec);
933static int is_spd_100(struct net_device *dev);
934static int is_100_up(struct net_device *dev);
935static int is_10_up(struct net_device *dev);
936static int is_anc_capable(struct net_device *dev);
937static int ping_media(struct net_device *dev, int msec);
938static struct sk_buff *de4x5_alloc_rx_buff(struct net_device *dev, int index, int len);
939static void de4x5_free_rx_buffs(struct net_device *dev);
940static void de4x5_free_tx_buffs(struct net_device *dev);
941static void de4x5_save_skbs(struct net_device *dev);
942static void de4x5_rst_desc_ring(struct net_device *dev);
943static void de4x5_cache_state(struct net_device *dev, int flag);
944static void de4x5_put_cache(struct net_device *dev, struct sk_buff *skb);
945static void de4x5_putb_cache(struct net_device *dev, struct sk_buff *skb);
946static struct sk_buff *de4x5_get_cache(struct net_device *dev);
947static void de4x5_setup_intr(struct net_device *dev);
948static void de4x5_init_connection(struct net_device *dev);
949static int de4x5_reset_phy(struct net_device *dev);
950static void reset_init_sia(struct net_device *dev, s32 sicr, s32 strr, s32 sigr);
951static int test_ans(struct net_device *dev, s32 irqs, s32 irq_mask, s32 msec);
952static int test_tp(struct net_device *dev, s32 msec);
953static int EISA_signature(char *name, struct device *device);
954static int PCI_signature(char *name, struct de4x5_private *lp);
955static void DevicePresent(struct net_device *dev, u_long iobase);
956static void enet_addr_rst(u_long aprom_addr);
957static int de4x5_bad_srom(struct de4x5_private *lp);
958static short srom_rd(u_long address, u_char offset);
959static void srom_latch(u_int command, u_long address);
960static void srom_command(u_int command, u_long address);
961static void srom_address(u_int command, u_long address, u_char offset);
962static short srom_data(u_int command, u_long address);
963
964static void sendto_srom(u_int command, u_long addr);
965static int getfrom_srom(u_long addr);
966static int srom_map_media(struct net_device *dev);
967static int srom_infoleaf_info(struct net_device *dev);
968static void srom_init(struct net_device *dev);
969static void srom_exec(struct net_device *dev, u_char *p);
970static int mii_rd(u_char phyreg, u_char phyaddr, u_long ioaddr);
971static void mii_wr(int data, u_char phyreg, u_char phyaddr, u_long ioaddr);
972static int mii_rdata(u_long ioaddr);
973static void mii_wdata(int data, int len, u_long ioaddr);
974static void mii_ta(u_long rw, u_long ioaddr);
975static int mii_swap(int data, int len);
976static void mii_address(u_char addr, u_long ioaddr);
977static void sendto_mii(u32 command, int data, u_long ioaddr);
978static int getfrom_mii(u32 command, u_long ioaddr);
979static int mii_get_oui(u_char phyaddr, u_long ioaddr);
980static int mii_get_phy(struct net_device *dev);
981static void SetMulticastFilter(struct net_device *dev);
982static int get_hw_addr(struct net_device *dev);
983static void srom_repair(struct net_device *dev, int card);
984static int test_bad_enet(struct net_device *dev, int status);
985static int an_exception(struct de4x5_private *lp);
986static char *build_setup_frame(struct net_device *dev, int mode);
987static void disable_ast(struct net_device *dev);
988static long de4x5_switch_mac_port(struct net_device *dev);
989static int gep_rd(struct net_device *dev);
990static void gep_wr(s32 data, struct net_device *dev);
991static void yawn(struct net_device *dev, int state);
992static void de4x5_parse_params(struct net_device *dev);
993static void de4x5_dbg_open(struct net_device *dev);
994static void de4x5_dbg_mii(struct net_device *dev, int k);
995static void de4x5_dbg_media(struct net_device *dev);
996static void de4x5_dbg_srom(struct de4x5_srom *p);
997static void de4x5_dbg_rx(struct sk_buff *skb, int len);
998static int dc21041_infoleaf(struct net_device *dev);
999static int dc21140_infoleaf(struct net_device *dev);
1000static int dc21142_infoleaf(struct net_device *dev);
1001static int dc21143_infoleaf(struct net_device *dev);
1002static int type0_infoblock(struct net_device *dev, u_char count, u_char *p);
1003static int type1_infoblock(struct net_device *dev, u_char count, u_char *p);
1004static int type2_infoblock(struct net_device *dev, u_char count, u_char *p);
1005static int type3_infoblock(struct net_device *dev, u_char count, u_char *p);
1006static int type4_infoblock(struct net_device *dev, u_char count, u_char *p);
1007static int type5_infoblock(struct net_device *dev, u_char count, u_char *p);
1008static int compact_infoblock(struct net_device *dev, u_char count, u_char *p);
1009
1010
1011
1012
1013
1014
1015
1016static int io=0x0;
1017
1018module_param(io, int, 0);
1019module_param(de4x5_debug, int, 0);
1020module_param(dec_only, int, 0);
1021module_param(args, charp, 0);
1022
1023MODULE_PARM_DESC(io, "de4x5 I/O base address");
1024MODULE_PARM_DESC(de4x5_debug, "de4x5 debug mask");
1025MODULE_PARM_DESC(dec_only, "de4x5 probe only for Digital boards (0-1)");
1026MODULE_PARM_DESC(args, "de4x5 full duplex and media type settings; see de4x5.c for details");
1027MODULE_LICENSE("GPL");
1028
1029
1030
1031
1032struct InfoLeaf {
1033 int chipset;
1034 int (*fn)(struct net_device *);
1035};
1036static struct InfoLeaf infoleaf_array[] = {
1037 {DC21041, dc21041_infoleaf},
1038 {DC21140, dc21140_infoleaf},
1039 {DC21142, dc21142_infoleaf},
1040 {DC21143, dc21143_infoleaf}
1041};
1042#define INFOLEAF_SIZE ARRAY_SIZE(infoleaf_array)
1043
1044
1045
1046
1047static int (*dc_infoblock[])(struct net_device *dev, u_char, u_char *) = {
1048 type0_infoblock,
1049 type1_infoblock,
1050 type2_infoblock,
1051 type3_infoblock,
1052 type4_infoblock,
1053 type5_infoblock,
1054 compact_infoblock
1055};
1056
1057#define COMPACT (ARRAY_SIZE(dc_infoblock) - 1)
1058
1059
1060
1061
1062#define RESET_DE4X5 {\
1063 int i;\
1064 i=inl(DE4X5_BMR);\
1065 mdelay(1);\
1066 outl(i | BMR_SWR, DE4X5_BMR);\
1067 mdelay(1);\
1068 outl(i, DE4X5_BMR);\
1069 mdelay(1);\
1070 for (i=0;i<5;i++) {inl(DE4X5_BMR); mdelay(1);}\
1071 mdelay(1);\
1072}
1073
1074#define PHY_HARD_RESET {\
1075 outl(GEP_HRST, DE4X5_GEP); \
1076 mdelay(1); \
1077 outl(0x00, DE4X5_GEP);\
1078 mdelay(2); \
1079}
1080
1081static const struct net_device_ops de4x5_netdev_ops = {
1082 .ndo_open = de4x5_open,
1083 .ndo_stop = de4x5_close,
1084 .ndo_start_xmit = de4x5_queue_pkt,
1085 .ndo_get_stats = de4x5_get_stats,
1086 .ndo_set_rx_mode = set_multicast_list,
1087 .ndo_do_ioctl = de4x5_ioctl,
1088 .ndo_change_mtu = eth_change_mtu,
1089 .ndo_set_mac_address= eth_mac_addr,
1090 .ndo_validate_addr = eth_validate_addr,
1091};
1092
1093
1094static int
1095de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev)
1096{
1097 char name[DE4X5_NAME_LENGTH + 1];
1098 struct de4x5_private *lp = netdev_priv(dev);
1099 struct pci_dev *pdev = NULL;
1100 int i, status=0;
1101
1102 dev_set_drvdata(gendev, dev);
1103
1104
1105 if (lp->bus == EISA) {
1106 outb(WAKEUP, PCI_CFPM);
1107 } else {
1108 pdev = to_pci_dev (gendev);
1109 pci_write_config_byte(pdev, PCI_CFDA_PSM, WAKEUP);
1110 }
1111 mdelay(10);
1112
1113 RESET_DE4X5;
1114
1115 if ((inl(DE4X5_STS) & (STS_TS | STS_RS)) != 0) {
1116 return -ENXIO;
1117 }
1118
1119
1120
1121
1122 lp->useSROM = false;
1123 if (lp->bus == PCI) {
1124 PCI_signature(name, lp);
1125 } else {
1126 EISA_signature(name, gendev);
1127 }
1128
1129 if (*name == '\0') {
1130 return -ENXIO;
1131 }
1132
1133 dev->base_addr = iobase;
1134 printk ("%s: %s at 0x%04lx", dev_name(gendev), name, iobase);
1135
1136 status = get_hw_addr(dev);
1137 printk(", h/w address %pM\n", dev->dev_addr);
1138
1139 if (status != 0) {
1140 printk(" which has an Ethernet PROM CRC error.\n");
1141 return -ENXIO;
1142 } else {
1143 skb_queue_head_init(&lp->cache.queue);
1144 lp->cache.gepc = GEP_INIT;
1145 lp->asBit = GEP_SLNK;
1146 lp->asPolarity = GEP_SLNK;
1147 lp->asBitValid = ~0;
1148 lp->timeout = -1;
1149 lp->gendev = gendev;
1150 spin_lock_init(&lp->lock);
1151 init_timer(&lp->timer);
1152 lp->timer.function = (void (*)(unsigned long))de4x5_ast;
1153 lp->timer.data = (unsigned long)dev;
1154 de4x5_parse_params(dev);
1155
1156
1157
1158
1159 lp->autosense = lp->params.autosense;
1160 if (lp->chipset != DC21140) {
1161 if ((lp->chipset==DC21040) && (lp->params.autosense&TP_NW)) {
1162 lp->params.autosense = TP;
1163 }
1164 if ((lp->chipset==DC21041) && (lp->params.autosense&BNC_AUI)) {
1165 lp->params.autosense = BNC;
1166 }
1167 }
1168 lp->fdx = lp->params.fdx;
1169 sprintf(lp->adapter_name,"%s (%s)", name, dev_name(gendev));
1170
1171 lp->dma_size = (NUM_RX_DESC + NUM_TX_DESC) * sizeof(struct de4x5_desc);
1172#if defined(__alpha__) || defined(__powerpc__) || defined(CONFIG_SPARC) || defined(DE4X5_DO_MEMCPY)
1173 lp->dma_size += RX_BUFF_SZ * NUM_RX_DESC + DE4X5_ALIGN;
1174#endif
1175 lp->rx_ring = dma_alloc_coherent(gendev, lp->dma_size,
1176 &lp->dma_rings, GFP_ATOMIC);
1177 if (lp->rx_ring == NULL) {
1178 return -ENOMEM;
1179 }
1180
1181 lp->tx_ring = lp->rx_ring + NUM_RX_DESC;
1182
1183
1184
1185
1186
1187#if !defined(__alpha__) && !defined(__powerpc__) && !defined(CONFIG_SPARC) && !defined(DE4X5_DO_MEMCPY)
1188 for (i=0; i<NUM_RX_DESC; i++) {
1189 lp->rx_ring[i].status = 0;
1190 lp->rx_ring[i].des1 = cpu_to_le32(RX_BUFF_SZ);
1191 lp->rx_ring[i].buf = 0;
1192 lp->rx_ring[i].next = 0;
1193 lp->rx_skb[i] = (struct sk_buff *) 1;
1194 }
1195
1196#else
1197 {
1198 dma_addr_t dma_rx_bufs;
1199
1200 dma_rx_bufs = lp->dma_rings + (NUM_RX_DESC + NUM_TX_DESC)
1201 * sizeof(struct de4x5_desc);
1202 dma_rx_bufs = (dma_rx_bufs + DE4X5_ALIGN) & ~DE4X5_ALIGN;
1203 lp->rx_bufs = (char *)(((long)(lp->rx_ring + NUM_RX_DESC
1204 + NUM_TX_DESC) + DE4X5_ALIGN) & ~DE4X5_ALIGN);
1205 for (i=0; i<NUM_RX_DESC; i++) {
1206 lp->rx_ring[i].status = 0;
1207 lp->rx_ring[i].des1 = cpu_to_le32(RX_BUFF_SZ);
1208 lp->rx_ring[i].buf =
1209 cpu_to_le32(dma_rx_bufs+i*RX_BUFF_SZ);
1210 lp->rx_ring[i].next = 0;
1211 lp->rx_skb[i] = (struct sk_buff *) 1;
1212 }
1213
1214 }
1215#endif
1216
1217 barrier();
1218
1219 lp->rxRingSize = NUM_RX_DESC;
1220 lp->txRingSize = NUM_TX_DESC;
1221
1222
1223 lp->rx_ring[lp->rxRingSize - 1].des1 |= cpu_to_le32(RD_RER);
1224 lp->tx_ring[lp->txRingSize - 1].des1 |= cpu_to_le32(TD_TER);
1225
1226
1227 outl(lp->dma_rings, DE4X5_RRBA);
1228 outl(lp->dma_rings + NUM_RX_DESC * sizeof(struct de4x5_desc),
1229 DE4X5_TRBA);
1230
1231
1232 lp->irq_mask = IMR_RIM | IMR_TIM | IMR_TUM | IMR_UNM;
1233 lp->irq_en = IMR_NIM | IMR_AIM;
1234
1235
1236 create_packet(dev, lp->frame, sizeof(lp->frame));
1237
1238
1239 i = lp->cfrv & 0x000000fe;
1240 if ((lp->chipset == DC21140) && (i == 0x20)) {
1241 lp->rx_ovf = 1;
1242 }
1243
1244
1245 if (lp->useSROM) {
1246 lp->state = INITIALISED;
1247 if (srom_infoleaf_info(dev)) {
1248 dma_free_coherent (gendev, lp->dma_size,
1249 lp->rx_ring, lp->dma_rings);
1250 return -ENXIO;
1251 }
1252 srom_init(dev);
1253 }
1254
1255 lp->state = CLOSED;
1256
1257
1258
1259
1260 if ((lp->chipset != DC21040) && (lp->chipset != DC21041)) {
1261 mii_get_phy(dev);
1262 }
1263
1264 printk(" and requires IRQ%d (provided by %s).\n", dev->irq,
1265 ((lp->bus == PCI) ? "PCI BIOS" : "EISA CNFG"));
1266 }
1267
1268 if (de4x5_debug & DEBUG_VERSION) {
1269 printk(version);
1270 }
1271
1272
1273 SET_NETDEV_DEV(dev, gendev);
1274 dev->netdev_ops = &de4x5_netdev_ops;
1275 dev->mem_start = 0;
1276
1277
1278 if ((status = register_netdev (dev))) {
1279 dma_free_coherent (gendev, lp->dma_size,
1280 lp->rx_ring, lp->dma_rings);
1281 return status;
1282 }
1283
1284
1285 yawn(dev, SLEEP);
1286
1287 return status;
1288}
1289
1290
1291static int
1292de4x5_open(struct net_device *dev)
1293{
1294 struct de4x5_private *lp = netdev_priv(dev);
1295 u_long iobase = dev->base_addr;
1296 int i, status = 0;
1297 s32 omr;
1298
1299
1300 for (i=0; i<lp->rxRingSize; i++) {
1301 if (de4x5_alloc_rx_buff(dev, i, 0) == NULL) {
1302 de4x5_free_rx_buffs(dev);
1303 return -EAGAIN;
1304 }
1305 }
1306
1307
1308
1309
1310 yawn(dev, WAKEUP);
1311
1312
1313
1314
1315 status = de4x5_init(dev);
1316 spin_lock_init(&lp->lock);
1317 lp->state = OPEN;
1318 de4x5_dbg_open(dev);
1319
1320 if (request_irq(dev->irq, de4x5_interrupt, IRQF_SHARED,
1321 lp->adapter_name, dev)) {
1322 printk("de4x5_open(): Requested IRQ%d is busy - attemping FAST/SHARE...", dev->irq);
1323 if (request_irq(dev->irq, de4x5_interrupt, IRQF_SHARED,
1324 lp->adapter_name, dev)) {
1325 printk("\n Cannot get IRQ- reconfigure your hardware.\n");
1326 disable_ast(dev);
1327 de4x5_free_rx_buffs(dev);
1328 de4x5_free_tx_buffs(dev);
1329 yawn(dev, SLEEP);
1330 lp->state = CLOSED;
1331 return -EAGAIN;
1332 } else {
1333 printk("\n Succeeded, but you should reconfigure your hardware to avoid this.\n");
1334 printk("WARNING: there may be IRQ related problems in heavily loaded systems.\n");
1335 }
1336 }
1337
1338 lp->interrupt = UNMASK_INTERRUPTS;
1339 dev->trans_start = jiffies;
1340
1341 START_DE4X5;
1342
1343 de4x5_setup_intr(dev);
1344
1345 if (de4x5_debug & DEBUG_OPEN) {
1346 printk("\tsts: 0x%08x\n", inl(DE4X5_STS));
1347 printk("\tbmr: 0x%08x\n", inl(DE4X5_BMR));
1348 printk("\timr: 0x%08x\n", inl(DE4X5_IMR));
1349 printk("\tomr: 0x%08x\n", inl(DE4X5_OMR));
1350 printk("\tsisr: 0x%08x\n", inl(DE4X5_SISR));
1351 printk("\tsicr: 0x%08x\n", inl(DE4X5_SICR));
1352 printk("\tstrr: 0x%08x\n", inl(DE4X5_STRR));
1353 printk("\tsigr: 0x%08x\n", inl(DE4X5_SIGR));
1354 }
1355
1356 return status;
1357}
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367static int
1368de4x5_init(struct net_device *dev)
1369{
1370
1371 netif_stop_queue(dev);
1372
1373 de4x5_sw_reset(dev);
1374
1375
1376 autoconf_media(dev);
1377
1378 return 0;
1379}
1380
1381static int
1382de4x5_sw_reset(struct net_device *dev)
1383{
1384 struct de4x5_private *lp = netdev_priv(dev);
1385 u_long iobase = dev->base_addr;
1386 int i, j, status = 0;
1387 s32 bmr, omr;
1388
1389
1390 if (!lp->useSROM) {
1391 if (lp->phy[lp->active].id != 0) {
1392 lp->infoblock_csr6 = OMR_SDP | OMR_PS | OMR_HBD;
1393 } else {
1394 lp->infoblock_csr6 = OMR_SDP | OMR_TTM;
1395 }
1396 de4x5_switch_mac_port(dev);
1397 }
1398
1399
1400
1401
1402
1403
1404 bmr = (lp->chipset==DC21140 ? PBL_8 : PBL_4) | DESC_SKIP_LEN | DE4X5_CACHE_ALIGN;
1405 bmr |= ((lp->chipset & ~0x00ff)==DC2114x ? BMR_RML : 0);
1406 outl(bmr, DE4X5_BMR);
1407
1408 omr = inl(DE4X5_OMR) & ~OMR_PR;
1409 if (lp->chipset == DC21140) {
1410 omr |= (OMR_SDP | OMR_SB);
1411 }
1412 lp->setup_f = PERFECT;
1413 outl(lp->dma_rings, DE4X5_RRBA);
1414 outl(lp->dma_rings + NUM_RX_DESC * sizeof(struct de4x5_desc),
1415 DE4X5_TRBA);
1416
1417 lp->rx_new = lp->rx_old = 0;
1418 lp->tx_new = lp->tx_old = 0;
1419
1420 for (i = 0; i < lp->rxRingSize; i++) {
1421 lp->rx_ring[i].status = cpu_to_le32(R_OWN);
1422 }
1423
1424 for (i = 0; i < lp->txRingSize; i++) {
1425 lp->tx_ring[i].status = cpu_to_le32(0);
1426 }
1427
1428 barrier();
1429
1430
1431 SetMulticastFilter(dev);
1432
1433 load_packet(dev, lp->setup_frame, PERFECT_F|TD_SET|SETUP_FRAME_LEN, (struct sk_buff *)1);
1434 outl(omr|OMR_ST, DE4X5_OMR);
1435
1436
1437
1438 for (j=0, i=0;(i<500) && (j==0);i++) {
1439 mdelay(1);
1440 if ((s32)le32_to_cpu(lp->tx_ring[lp->tx_new].status) >= 0) j=1;
1441 }
1442 outl(omr, DE4X5_OMR);
1443
1444 if (j == 0) {
1445 printk("%s: Setup frame timed out, status %08x\n", dev->name,
1446 inl(DE4X5_STS));
1447 status = -EIO;
1448 }
1449
1450 lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
1451 lp->tx_old = lp->tx_new;
1452
1453 return status;
1454}
1455
1456
1457
1458
1459static netdev_tx_t
1460de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev)
1461{
1462 struct de4x5_private *lp = netdev_priv(dev);
1463 u_long iobase = dev->base_addr;
1464 u_long flags = 0;
1465
1466 netif_stop_queue(dev);
1467 if (!lp->tx_enable)
1468 return NETDEV_TX_LOCKED;
1469
1470
1471
1472
1473
1474
1475 spin_lock_irqsave(&lp->lock, flags);
1476 de4x5_tx(dev);
1477 spin_unlock_irqrestore(&lp->lock, flags);
1478
1479
1480 if (test_and_set_bit(0, (void *)&lp->cache.lock) && !lp->interrupt)
1481 return NETDEV_TX_LOCKED;
1482
1483
1484 if (netif_queue_stopped(dev) || (u_long) lp->tx_skb[lp->tx_new] > 1) {
1485 if (lp->interrupt) {
1486 de4x5_putb_cache(dev, skb);
1487 } else {
1488 de4x5_put_cache(dev, skb);
1489 }
1490 if (de4x5_debug & DEBUG_TX) {
1491 printk("%s: transmit busy, lost media or stale skb found:\n STS:%08x\n tbusy:%d\n IMR:%08x\n OMR:%08x\n Stale skb: %s\n",dev->name, inl(DE4X5_STS), netif_queue_stopped(dev), inl(DE4X5_IMR), inl(DE4X5_OMR), ((u_long) lp->tx_skb[lp->tx_new] > 1) ? "YES" : "NO");
1492 }
1493 } else if (skb->len > 0) {
1494
1495 if (!skb_queue_empty(&lp->cache.queue) && !lp->interrupt) {
1496 de4x5_put_cache(dev, skb);
1497 skb = de4x5_get_cache(dev);
1498 }
1499
1500 while (skb && !netif_queue_stopped(dev) &&
1501 (u_long) lp->tx_skb[lp->tx_new] <= 1) {
1502 spin_lock_irqsave(&lp->lock, flags);
1503 netif_stop_queue(dev);
1504 load_packet(dev, skb->data, TD_IC | TD_LS | TD_FS | skb->len, skb);
1505 lp->stats.tx_bytes += skb->len;
1506 outl(POLL_DEMAND, DE4X5_TPD);
1507
1508 lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
1509
1510 if (TX_BUFFS_AVAIL) {
1511 netif_start_queue(dev);
1512 }
1513 skb = de4x5_get_cache(dev);
1514 spin_unlock_irqrestore(&lp->lock, flags);
1515 }
1516 if (skb) de4x5_putb_cache(dev, skb);
1517 }
1518
1519 lp->cache.lock = 0;
1520
1521 return NETDEV_TX_OK;
1522}
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535static irqreturn_t
1536de4x5_interrupt(int irq, void *dev_id)
1537{
1538 struct net_device *dev = dev_id;
1539 struct de4x5_private *lp;
1540 s32 imr, omr, sts, limit;
1541 u_long iobase;
1542 unsigned int handled = 0;
1543
1544 lp = netdev_priv(dev);
1545 spin_lock(&lp->lock);
1546 iobase = dev->base_addr;
1547
1548 DISABLE_IRQs;
1549
1550 if (test_and_set_bit(MASK_INTERRUPTS, (void*) &lp->interrupt))
1551 printk("%s: Re-entering the interrupt handler.\n", dev->name);
1552
1553 synchronize_irq(dev->irq);
1554
1555 for (limit=0; limit<8; limit++) {
1556 sts = inl(DE4X5_STS);
1557 outl(sts, DE4X5_STS);
1558
1559 if (!(sts & lp->irq_mask)) break;
1560 handled = 1;
1561
1562 if (sts & (STS_RI | STS_RU))
1563 de4x5_rx(dev);
1564
1565 if (sts & (STS_TI | STS_TU))
1566 de4x5_tx(dev);
1567
1568 if (sts & STS_LNF) {
1569 lp->irq_mask &= ~IMR_LFM;
1570 }
1571
1572 if (sts & STS_UNF) {
1573 de4x5_txur(dev);
1574 }
1575
1576 if (sts & STS_SE) {
1577 STOP_DE4X5;
1578 printk("%s: Fatal bus error occurred, sts=%#8x, device stopped.\n",
1579 dev->name, sts);
1580 spin_unlock(&lp->lock);
1581 return IRQ_HANDLED;
1582 }
1583 }
1584
1585
1586 if (!test_and_set_bit(0, (void *)&lp->cache.lock)) {
1587 while (!skb_queue_empty(&lp->cache.queue) && !netif_queue_stopped(dev) && lp->tx_enable) {
1588 de4x5_queue_pkt(de4x5_get_cache(dev), dev);
1589 }
1590 lp->cache.lock = 0;
1591 }
1592
1593 lp->interrupt = UNMASK_INTERRUPTS;
1594 ENABLE_IRQs;
1595 spin_unlock(&lp->lock);
1596
1597 return IRQ_RETVAL(handled);
1598}
1599
1600static int
1601de4x5_rx(struct net_device *dev)
1602{
1603 struct de4x5_private *lp = netdev_priv(dev);
1604 u_long iobase = dev->base_addr;
1605 int entry;
1606 s32 status;
1607
1608 for (entry=lp->rx_new; (s32)le32_to_cpu(lp->rx_ring[entry].status)>=0;
1609 entry=lp->rx_new) {
1610 status = (s32)le32_to_cpu(lp->rx_ring[entry].status);
1611
1612 if (lp->rx_ovf) {
1613 if (inl(DE4X5_MFC) & MFC_FOCM) {
1614 de4x5_rx_ovfc(dev);
1615 break;
1616 }
1617 }
1618
1619 if (status & RD_FS) {
1620 lp->rx_old = entry;
1621 }
1622
1623 if (status & RD_LS) {
1624 if (lp->tx_enable) lp->linkOK++;
1625 if (status & RD_ES) {
1626 lp->stats.rx_errors++;
1627 if (status & (RD_RF | RD_TL)) lp->stats.rx_frame_errors++;
1628 if (status & RD_CE) lp->stats.rx_crc_errors++;
1629 if (status & RD_OF) lp->stats.rx_fifo_errors++;
1630 if (status & RD_TL) lp->stats.rx_length_errors++;
1631 if (status & RD_RF) lp->pktStats.rx_runt_frames++;
1632 if (status & RD_CS) lp->pktStats.rx_collision++;
1633 if (status & RD_DB) lp->pktStats.rx_dribble++;
1634 if (status & RD_OF) lp->pktStats.rx_overflow++;
1635 } else {
1636 struct sk_buff *skb;
1637 short pkt_len = (short)(le32_to_cpu(lp->rx_ring[entry].status)
1638 >> 16) - 4;
1639
1640 if ((skb = de4x5_alloc_rx_buff(dev, entry, pkt_len)) == NULL) {
1641 printk("%s: Insufficient memory; nuking packet.\n",
1642 dev->name);
1643 lp->stats.rx_dropped++;
1644 } else {
1645 de4x5_dbg_rx(skb, pkt_len);
1646
1647
1648 skb->protocol=eth_type_trans(skb,dev);
1649 de4x5_local_stats(dev, skb->data, pkt_len);
1650 netif_rx(skb);
1651
1652
1653 lp->stats.rx_packets++;
1654 lp->stats.rx_bytes += pkt_len;
1655 }
1656 }
1657
1658
1659 for (;lp->rx_old!=entry;lp->rx_old=(lp->rx_old + 1)%lp->rxRingSize) {
1660 lp->rx_ring[lp->rx_old].status = cpu_to_le32(R_OWN);
1661 barrier();
1662 }
1663 lp->rx_ring[entry].status = cpu_to_le32(R_OWN);
1664 barrier();
1665 }
1666
1667
1668
1669
1670 lp->rx_new = (lp->rx_new + 1) % lp->rxRingSize;
1671 }
1672
1673 return 0;
1674}
1675
1676static inline void
1677de4x5_free_tx_buff(struct de4x5_private *lp, int entry)
1678{
1679 dma_unmap_single(lp->gendev, le32_to_cpu(lp->tx_ring[entry].buf),
1680 le32_to_cpu(lp->tx_ring[entry].des1) & TD_TBS1,
1681 DMA_TO_DEVICE);
1682 if ((u_long) lp->tx_skb[entry] > 1)
1683 dev_kfree_skb_irq(lp->tx_skb[entry]);
1684 lp->tx_skb[entry] = NULL;
1685}
1686
1687
1688
1689
1690static int
1691de4x5_tx(struct net_device *dev)
1692{
1693 struct de4x5_private *lp = netdev_priv(dev);
1694 u_long iobase = dev->base_addr;
1695 int entry;
1696 s32 status;
1697
1698 for (entry = lp->tx_old; entry != lp->tx_new; entry = lp->tx_old) {
1699 status = (s32)le32_to_cpu(lp->tx_ring[entry].status);
1700 if (status < 0) {
1701 break;
1702 } else if (status != 0x7fffffff) {
1703 if (status & TD_ES) {
1704 lp->stats.tx_errors++;
1705 if (status & TD_NC) lp->stats.tx_carrier_errors++;
1706 if (status & TD_LC) lp->stats.tx_window_errors++;
1707 if (status & TD_UF) lp->stats.tx_fifo_errors++;
1708 if (status & TD_EC) lp->pktStats.excessive_collisions++;
1709 if (status & TD_DE) lp->stats.tx_aborted_errors++;
1710
1711 if (TX_PKT_PENDING) {
1712 outl(POLL_DEMAND, DE4X5_TPD);
1713 }
1714 } else {
1715 lp->stats.tx_packets++;
1716 if (lp->tx_enable) lp->linkOK++;
1717 }
1718
1719 lp->stats.collisions += ((status & TD_EC) ? 16 :
1720 ((status & TD_CC) >> 3));
1721
1722
1723 if (lp->tx_skb[entry] != NULL)
1724 de4x5_free_tx_buff(lp, entry);
1725 }
1726
1727
1728 lp->tx_old = (lp->tx_old + 1) % lp->txRingSize;
1729 }
1730
1731
1732 if (TX_BUFFS_AVAIL && netif_queue_stopped(dev)) {
1733 if (lp->interrupt)
1734 netif_wake_queue(dev);
1735 else
1736 netif_start_queue(dev);
1737 }
1738
1739 return 0;
1740}
1741
1742static void
1743de4x5_ast(struct net_device *dev)
1744{
1745 struct de4x5_private *lp = netdev_priv(dev);
1746 int next_tick = DE4X5_AUTOSENSE_MS;
1747 int dt;
1748
1749 if (lp->useSROM)
1750 next_tick = srom_autoconf(dev);
1751 else if (lp->chipset == DC21140)
1752 next_tick = dc21140m_autoconf(dev);
1753 else if (lp->chipset == DC21041)
1754 next_tick = dc21041_autoconf(dev);
1755 else if (lp->chipset == DC21040)
1756 next_tick = dc21040_autoconf(dev);
1757 lp->linkOK = 0;
1758
1759 dt = (next_tick * HZ) / 1000;
1760
1761 if (!dt)
1762 dt = 1;
1763
1764 mod_timer(&lp->timer, jiffies + dt);
1765}
1766
1767static int
1768de4x5_txur(struct net_device *dev)
1769{
1770 struct de4x5_private *lp = netdev_priv(dev);
1771 u_long iobase = dev->base_addr;
1772 int omr;
1773
1774 omr = inl(DE4X5_OMR);
1775 if (!(omr & OMR_SF) || (lp->chipset==DC21041) || (lp->chipset==DC21040)) {
1776 omr &= ~(OMR_ST|OMR_SR);
1777 outl(omr, DE4X5_OMR);
1778 while (inl(DE4X5_STS) & STS_TS);
1779 if ((omr & OMR_TR) < OMR_TR) {
1780 omr += 0x4000;
1781 } else {
1782 omr |= OMR_SF;
1783 }
1784 outl(omr | OMR_ST | OMR_SR, DE4X5_OMR);
1785 }
1786
1787 return 0;
1788}
1789
1790static int
1791de4x5_rx_ovfc(struct net_device *dev)
1792{
1793 struct de4x5_private *lp = netdev_priv(dev);
1794 u_long iobase = dev->base_addr;
1795 int omr;
1796
1797 omr = inl(DE4X5_OMR);
1798 outl(omr & ~OMR_SR, DE4X5_OMR);
1799 while (inl(DE4X5_STS) & STS_RS);
1800
1801 for (; (s32)le32_to_cpu(lp->rx_ring[lp->rx_new].status)>=0;) {
1802 lp->rx_ring[lp->rx_new].status = cpu_to_le32(R_OWN);
1803 lp->rx_new = (lp->rx_new + 1) % lp->rxRingSize;
1804 }
1805
1806 outl(omr, DE4X5_OMR);
1807
1808 return 0;
1809}
1810
1811static int
1812de4x5_close(struct net_device *dev)
1813{
1814 struct de4x5_private *lp = netdev_priv(dev);
1815 u_long iobase = dev->base_addr;
1816 s32 imr, omr;
1817
1818 disable_ast(dev);
1819
1820 netif_stop_queue(dev);
1821
1822 if (de4x5_debug & DEBUG_CLOSE) {
1823 printk("%s: Shutting down ethercard, status was %8.8x.\n",
1824 dev->name, inl(DE4X5_STS));
1825 }
1826
1827
1828
1829
1830 DISABLE_IRQs;
1831 STOP_DE4X5;
1832
1833
1834 free_irq(dev->irq, dev);
1835 lp->state = CLOSED;
1836
1837
1838 de4x5_free_rx_buffs(dev);
1839 de4x5_free_tx_buffs(dev);
1840
1841
1842 yawn(dev, SLEEP);
1843
1844 return 0;
1845}
1846
1847static struct net_device_stats *
1848de4x5_get_stats(struct net_device *dev)
1849{
1850 struct de4x5_private *lp = netdev_priv(dev);
1851 u_long iobase = dev->base_addr;
1852
1853 lp->stats.rx_missed_errors = (int)(inl(DE4X5_MFC) & (MFC_OVFL | MFC_CNTR));
1854
1855 return &lp->stats;
1856}
1857
1858static void
1859de4x5_local_stats(struct net_device *dev, char *buf, int pkt_len)
1860{
1861 struct de4x5_private *lp = netdev_priv(dev);
1862 int i;
1863
1864 for (i=1; i<DE4X5_PKT_STAT_SZ-1; i++) {
1865 if (pkt_len < (i*DE4X5_PKT_BIN_SZ)) {
1866 lp->pktStats.bins[i]++;
1867 i = DE4X5_PKT_STAT_SZ;
1868 }
1869 }
1870 if (is_multicast_ether_addr(buf)) {
1871 if (is_broadcast_ether_addr(buf)) {
1872 lp->pktStats.broadcast++;
1873 } else {
1874 lp->pktStats.multicast++;
1875 }
1876 } else if (ether_addr_equal(buf, dev->dev_addr)) {
1877 lp->pktStats.unicast++;
1878 }
1879
1880 lp->pktStats.bins[0]++;
1881 if (lp->pktStats.bins[0] == 0) {
1882 memset((char *)&lp->pktStats, 0, sizeof(lp->pktStats));
1883 }
1884}
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894static void
1895load_packet(struct net_device *dev, char *buf, u32 flags, struct sk_buff *skb)
1896{
1897 struct de4x5_private *lp = netdev_priv(dev);
1898 int entry = (lp->tx_new ? lp->tx_new-1 : lp->txRingSize-1);
1899 dma_addr_t buf_dma = dma_map_single(lp->gendev, buf, flags & TD_TBS1, DMA_TO_DEVICE);
1900
1901 lp->tx_ring[lp->tx_new].buf = cpu_to_le32(buf_dma);
1902 lp->tx_ring[lp->tx_new].des1 &= cpu_to_le32(TD_TER);
1903 lp->tx_ring[lp->tx_new].des1 |= cpu_to_le32(flags);
1904 lp->tx_skb[lp->tx_new] = skb;
1905 lp->tx_ring[entry].des1 &= cpu_to_le32(~TD_IC);
1906 barrier();
1907
1908 lp->tx_ring[lp->tx_new].status = cpu_to_le32(T_OWN);
1909 barrier();
1910}
1911
1912
1913
1914
1915static void
1916set_multicast_list(struct net_device *dev)
1917{
1918 struct de4x5_private *lp = netdev_priv(dev);
1919 u_long iobase = dev->base_addr;
1920
1921
1922 if (lp->state == OPEN) {
1923 if (dev->flags & IFF_PROMISC) {
1924 u32 omr;
1925 omr = inl(DE4X5_OMR);
1926 omr |= OMR_PR;
1927 outl(omr, DE4X5_OMR);
1928 } else {
1929 SetMulticastFilter(dev);
1930 load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET |
1931 SETUP_FRAME_LEN, (struct sk_buff *)1);
1932
1933 lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
1934 outl(POLL_DEMAND, DE4X5_TPD);
1935 dev->trans_start = jiffies;
1936 }
1937 }
1938}
1939
1940
1941
1942
1943
1944
1945static void
1946SetMulticastFilter(struct net_device *dev)
1947{
1948 struct de4x5_private *lp = netdev_priv(dev);
1949 struct netdev_hw_addr *ha;
1950 u_long iobase = dev->base_addr;
1951 int i, bit, byte;
1952 u16 hashcode;
1953 u32 omr, crc;
1954 char *pa;
1955 unsigned char *addrs;
1956
1957 omr = inl(DE4X5_OMR);
1958 omr &= ~(OMR_PR | OMR_PM);
1959 pa = build_setup_frame(dev, ALL);
1960
1961 if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 14)) {
1962 omr |= OMR_PM;
1963 } else if (lp->setup_f == HASH_PERF) {
1964 netdev_for_each_mc_addr(ha, dev) {
1965 crc = ether_crc_le(ETH_ALEN, ha->addr);
1966 hashcode = crc & HASH_BITS;
1967
1968 byte = hashcode >> 3;
1969 bit = 1 << (hashcode & 0x07);
1970
1971 byte <<= 1;
1972 if (byte & 0x02) {
1973 byte -= 1;
1974 }
1975 lp->setup_frame[byte] |= bit;
1976 }
1977 } else {
1978 netdev_for_each_mc_addr(ha, dev) {
1979 addrs = ha->addr;
1980 for (i=0; i<ETH_ALEN; i++) {
1981 *(pa + (i&1)) = *addrs++;
1982 if (i & 0x01) pa += 4;
1983 }
1984 }
1985 }
1986 outl(omr, DE4X5_OMR);
1987}
1988
1989#ifdef CONFIG_EISA
1990
1991static u_char de4x5_irq[] = EISA_ALLOWED_IRQ_LIST;
1992
1993static int __init de4x5_eisa_probe (struct device *gendev)
1994{
1995 struct eisa_device *edev;
1996 u_long iobase;
1997 u_char irq, regval;
1998 u_short vendor;
1999 u32 cfid;
2000 int status, device;
2001 struct net_device *dev;
2002 struct de4x5_private *lp;
2003
2004 edev = to_eisa_device (gendev);
2005 iobase = edev->base_addr;
2006
2007 if (!request_region (iobase, DE4X5_EISA_TOTAL_SIZE, "de4x5"))
2008 return -EBUSY;
2009
2010 if (!request_region (iobase + DE4X5_EISA_IO_PORTS,
2011 DE4X5_EISA_TOTAL_SIZE, "de4x5")) {
2012 status = -EBUSY;
2013 goto release_reg_1;
2014 }
2015
2016 if (!(dev = alloc_etherdev (sizeof (struct de4x5_private)))) {
2017 status = -ENOMEM;
2018 goto release_reg_2;
2019 }
2020 lp = netdev_priv(dev);
2021
2022 cfid = (u32) inl(PCI_CFID);
2023 lp->cfrv = (u_short) inl(PCI_CFRV);
2024 device = (cfid >> 8) & 0x00ffff00;
2025 vendor = (u_short) cfid;
2026
2027
2028 regval = inb(EISA_REG0) & (ER0_INTL | ER0_INTT);
2029#ifdef CONFIG_ALPHA
2030
2031
2032
2033
2034
2035
2036 outb (ER1_IAM | 1, EISA_REG1);
2037 mdelay (1);
2038
2039
2040 outb (ER1_IAM, EISA_REG1);
2041 mdelay (1);
2042
2043
2044 outb (ER3_BWE | ER3_BRE, EISA_REG3);
2045
2046
2047 outb (ER0_BSW | ER0_BMW | ER0_EPT | regval, EISA_REG0);
2048#endif
2049 irq = de4x5_irq[(regval >> 1) & 0x03];
2050
2051 if (is_DC2114x) {
2052 device = ((lp->cfrv & CFRV_RN) < DC2114x_BRK ? DC21142 : DC21143);
2053 }
2054 lp->chipset = device;
2055 lp->bus = EISA;
2056
2057
2058 outl(PCI_COMMAND_IO | PCI_COMMAND_MASTER, PCI_CFCS);
2059 outl(0x00006000, PCI_CFLT);
2060 outl(iobase, PCI_CBIO);
2061
2062 DevicePresent(dev, EISA_APROM);
2063
2064 dev->irq = irq;
2065
2066 if (!(status = de4x5_hw_init (dev, iobase, gendev))) {
2067 return 0;
2068 }
2069
2070 free_netdev (dev);
2071 release_reg_2:
2072 release_region (iobase + DE4X5_EISA_IO_PORTS, DE4X5_EISA_TOTAL_SIZE);
2073 release_reg_1:
2074 release_region (iobase, DE4X5_EISA_TOTAL_SIZE);
2075
2076 return status;
2077}
2078
2079static int de4x5_eisa_remove(struct device *device)
2080{
2081 struct net_device *dev;
2082 u_long iobase;
2083
2084 dev = dev_get_drvdata(device);
2085 iobase = dev->base_addr;
2086
2087 unregister_netdev (dev);
2088 free_netdev (dev);
2089 release_region (iobase + DE4X5_EISA_IO_PORTS, DE4X5_EISA_TOTAL_SIZE);
2090 release_region (iobase, DE4X5_EISA_TOTAL_SIZE);
2091
2092 return 0;
2093}
2094
2095static struct eisa_device_id de4x5_eisa_ids[] = {
2096 { "DEC4250", 0 },
2097 { "" }
2098};
2099MODULE_DEVICE_TABLE(eisa, de4x5_eisa_ids);
2100
2101static struct eisa_driver de4x5_eisa_driver = {
2102 .id_table = de4x5_eisa_ids,
2103 .driver = {
2104 .name = "de4x5",
2105 .probe = de4x5_eisa_probe,
2106 .remove = de4x5_eisa_remove,
2107 }
2108};
2109MODULE_DEVICE_TABLE(eisa, de4x5_eisa_ids);
2110#endif
2111
2112#ifdef CONFIG_PCI
2113
2114
2115
2116
2117
2118
2119
2120static void
2121srom_search(struct net_device *dev, struct pci_dev *pdev)
2122{
2123 u_char pb;
2124 u_short vendor, status;
2125 u_int irq = 0, device;
2126 u_long iobase = 0;
2127 int i, j;
2128 struct de4x5_private *lp = netdev_priv(dev);
2129 struct pci_dev *this_dev;
2130
2131 list_for_each_entry(this_dev, &pdev->bus->devices, bus_list) {
2132 vendor = this_dev->vendor;
2133 device = this_dev->device << 8;
2134 if (!(is_DC21040 || is_DC21041 || is_DC21140 || is_DC2114x)) continue;
2135
2136
2137 pb = this_dev->bus->number;
2138
2139
2140 lp->device = PCI_SLOT(this_dev->devfn);
2141 lp->bus_num = pb;
2142
2143
2144 if (is_DC2114x) {
2145 device = ((this_dev->revision & CFRV_RN) < DC2114x_BRK
2146 ? DC21142 : DC21143);
2147 }
2148 lp->chipset = device;
2149
2150
2151 iobase = pci_resource_start(this_dev, 0);
2152
2153
2154 irq = this_dev->irq;
2155 if ((irq == 0) || (irq == 0xff) || ((int)irq == -1)) continue;
2156
2157
2158 pci_read_config_word(this_dev, PCI_COMMAND, &status);
2159 if (!(status & PCI_COMMAND_IO)) continue;
2160
2161
2162 DevicePresent(dev, DE4X5_APROM);
2163 for (j=0, i=0; i<ETH_ALEN; i++) {
2164 j += (u_char) *((u_char *)&lp->srom + SROM_HWADD + i);
2165 }
2166 if (j != 0 && j != 6 * 0xff) {
2167 last.chipset = device;
2168 last.bus = pb;
2169 last.irq = irq;
2170 for (i=0; i<ETH_ALEN; i++) {
2171 last.addr[i] = (u_char)*((u_char *)&lp->srom + SROM_HWADD + i);
2172 }
2173 return;
2174 }
2175 }
2176}
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194static int de4x5_pci_probe(struct pci_dev *pdev,
2195 const struct pci_device_id *ent)
2196{
2197 u_char pb, pbus = 0, dev_num, dnum = 0, timer;
2198 u_short vendor, status;
2199 u_int irq = 0, device;
2200 u_long iobase = 0;
2201 int error;
2202 struct net_device *dev;
2203 struct de4x5_private *lp;
2204
2205 dev_num = PCI_SLOT(pdev->devfn);
2206 pb = pdev->bus->number;
2207
2208 if (io) {
2209 pbus = (u_short)(io >> 8);
2210 dnum = (u_short)(io & 0xff);
2211 if ((pbus != pb) || (dnum != dev_num))
2212 return -ENODEV;
2213 }
2214
2215 vendor = pdev->vendor;
2216 device = pdev->device << 8;
2217 if (!(is_DC21040 || is_DC21041 || is_DC21140 || is_DC2114x))
2218 return -ENODEV;
2219
2220
2221 if ((error = pci_enable_device (pdev)))
2222 return error;
2223
2224 if (!(dev = alloc_etherdev (sizeof (struct de4x5_private)))) {
2225 error = -ENOMEM;
2226 goto disable_dev;
2227 }
2228
2229 lp = netdev_priv(dev);
2230 lp->bus = PCI;
2231 lp->bus_num = 0;
2232
2233
2234 if (lp->bus_num != pb) {
2235 lp->bus_num = pb;
2236 srom_search(dev, pdev);
2237 }
2238
2239
2240 lp->cfrv = pdev->revision;
2241
2242
2243 lp->device = dev_num;
2244 lp->bus_num = pb;
2245
2246
2247 if (is_DC2114x) {
2248 device = ((lp->cfrv & CFRV_RN) < DC2114x_BRK ? DC21142 : DC21143);
2249 }
2250 lp->chipset = device;
2251
2252
2253 iobase = pci_resource_start(pdev, 0);
2254
2255
2256 irq = pdev->irq;
2257 if ((irq == 0) || (irq == 0xff) || ((int)irq == -1)) {
2258 error = -ENODEV;
2259 goto free_dev;
2260 }
2261
2262
2263 pci_read_config_word(pdev, PCI_COMMAND, &status);
2264#ifdef __powerpc__
2265 if (!(status & PCI_COMMAND_IO)) {
2266 status |= PCI_COMMAND_IO;
2267 pci_write_config_word(pdev, PCI_COMMAND, status);
2268 pci_read_config_word(pdev, PCI_COMMAND, &status);
2269 }
2270#endif
2271 if (!(status & PCI_COMMAND_IO)) {
2272 error = -ENODEV;
2273 goto free_dev;
2274 }
2275
2276 if (!(status & PCI_COMMAND_MASTER)) {
2277 status |= PCI_COMMAND_MASTER;
2278 pci_write_config_word(pdev, PCI_COMMAND, status);
2279 pci_read_config_word(pdev, PCI_COMMAND, &status);
2280 }
2281 if (!(status & PCI_COMMAND_MASTER)) {
2282 error = -ENODEV;
2283 goto free_dev;
2284 }
2285
2286
2287 pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &timer);
2288 if (timer < 0x60) {
2289 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x60);
2290 }
2291
2292 DevicePresent(dev, DE4X5_APROM);
2293
2294 if (!request_region (iobase, DE4X5_PCI_TOTAL_SIZE, "de4x5")) {
2295 error = -EBUSY;
2296 goto free_dev;
2297 }
2298
2299 dev->irq = irq;
2300
2301 if ((error = de4x5_hw_init(dev, iobase, &pdev->dev))) {
2302 goto release;
2303 }
2304
2305 return 0;
2306
2307 release:
2308 release_region (iobase, DE4X5_PCI_TOTAL_SIZE);
2309 free_dev:
2310 free_netdev (dev);
2311 disable_dev:
2312 pci_disable_device (pdev);
2313 return error;
2314}
2315
2316static void de4x5_pci_remove(struct pci_dev *pdev)
2317{
2318 struct net_device *dev;
2319 u_long iobase;
2320
2321 dev = pci_get_drvdata(pdev);
2322 iobase = dev->base_addr;
2323
2324 unregister_netdev (dev);
2325 free_netdev (dev);
2326 release_region (iobase, DE4X5_PCI_TOTAL_SIZE);
2327 pci_disable_device (pdev);
2328}
2329
2330static const struct pci_device_id de4x5_pci_tbl[] = {
2331 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP,
2332 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
2333 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_PLUS,
2334 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
2335 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_FAST,
2336 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
2337 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21142,
2338 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 },
2339 { },
2340};
2341
2342static struct pci_driver de4x5_pci_driver = {
2343 .name = "de4x5",
2344 .id_table = de4x5_pci_tbl,
2345 .probe = de4x5_pci_probe,
2346 .remove = de4x5_pci_remove,
2347};
2348
2349#endif
2350
2351
2352
2353
2354
2355
2356
2357
2358static int
2359autoconf_media(struct net_device *dev)
2360{
2361 struct de4x5_private *lp = netdev_priv(dev);
2362 u_long iobase = dev->base_addr;
2363
2364 disable_ast(dev);
2365
2366 lp->c_media = AUTO;
2367 inl(DE4X5_MFC);
2368 lp->media = INIT;
2369 lp->tcount = 0;
2370
2371 de4x5_ast(dev);
2372
2373 return lp->media;
2374}
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388static int
2389dc21040_autoconf(struct net_device *dev)
2390{
2391 struct de4x5_private *lp = netdev_priv(dev);
2392 u_long iobase = dev->base_addr;
2393 int next_tick = DE4X5_AUTOSENSE_MS;
2394 s32 imr;
2395
2396 switch (lp->media) {
2397 case INIT:
2398 DISABLE_IRQs;
2399 lp->tx_enable = false;
2400 lp->timeout = -1;
2401 de4x5_save_skbs(dev);
2402 if ((lp->autosense == AUTO) || (lp->autosense == TP)) {
2403 lp->media = TP;
2404 } else if ((lp->autosense == BNC) || (lp->autosense == AUI) || (lp->autosense == BNC_AUI)) {
2405 lp->media = BNC_AUI;
2406 } else if (lp->autosense == EXT_SIA) {
2407 lp->media = EXT_SIA;
2408 } else {
2409 lp->media = NC;
2410 }
2411 lp->local_state = 0;
2412 next_tick = dc21040_autoconf(dev);
2413 break;
2414
2415 case TP:
2416 next_tick = dc21040_state(dev, 0x8f01, 0xffff, 0x0000, 3000, BNC_AUI,
2417 TP_SUSPECT, test_tp);
2418 break;
2419
2420 case TP_SUSPECT:
2421 next_tick = de4x5_suspect_state(dev, 1000, TP, test_tp, dc21040_autoconf);
2422 break;
2423
2424 case BNC:
2425 case AUI:
2426 case BNC_AUI:
2427 next_tick = dc21040_state(dev, 0x8f09, 0x0705, 0x0006, 3000, EXT_SIA,
2428 BNC_AUI_SUSPECT, ping_media);
2429 break;
2430
2431 case BNC_AUI_SUSPECT:
2432 next_tick = de4x5_suspect_state(dev, 1000, BNC_AUI, ping_media, dc21040_autoconf);
2433 break;
2434
2435 case EXT_SIA:
2436 next_tick = dc21040_state(dev, 0x3041, 0x0000, 0x0006, 3000,
2437 NC, EXT_SIA_SUSPECT, ping_media);
2438 break;
2439
2440 case EXT_SIA_SUSPECT:
2441 next_tick = de4x5_suspect_state(dev, 1000, EXT_SIA, ping_media, dc21040_autoconf);
2442 break;
2443
2444 case NC:
2445
2446 reset_init_sia(dev, 0x8f01, 0xffff, 0x0000);
2447 if (lp->media != lp->c_media) {
2448 de4x5_dbg_media(dev);
2449 lp->c_media = lp->media;
2450 }
2451 lp->media = INIT;
2452 lp->tx_enable = false;
2453 break;
2454 }
2455
2456 return next_tick;
2457}
2458
2459static int
2460dc21040_state(struct net_device *dev, int csr13, int csr14, int csr15, int timeout,
2461 int next_state, int suspect_state,
2462 int (*fn)(struct net_device *, int))
2463{
2464 struct de4x5_private *lp = netdev_priv(dev);
2465 int next_tick = DE4X5_AUTOSENSE_MS;
2466 int linkBad;
2467
2468 switch (lp->local_state) {
2469 case 0:
2470 reset_init_sia(dev, csr13, csr14, csr15);
2471 lp->local_state++;
2472 next_tick = 500;
2473 break;
2474
2475 case 1:
2476 if (!lp->tx_enable) {
2477 linkBad = fn(dev, timeout);
2478 if (linkBad < 0) {
2479 next_tick = linkBad & ~TIMER_CB;
2480 } else {
2481 if (linkBad && (lp->autosense == AUTO)) {
2482 lp->local_state = 0;
2483 lp->media = next_state;
2484 } else {
2485 de4x5_init_connection(dev);
2486 }
2487 }
2488 } else if (!lp->linkOK && (lp->autosense == AUTO)) {
2489 lp->media = suspect_state;
2490 next_tick = 3000;
2491 }
2492 break;
2493 }
2494
2495 return next_tick;
2496}
2497
2498static int
2499de4x5_suspect_state(struct net_device *dev, int timeout, int prev_state,
2500 int (*fn)(struct net_device *, int),
2501 int (*asfn)(struct net_device *))
2502{
2503 struct de4x5_private *lp = netdev_priv(dev);
2504 int next_tick = DE4X5_AUTOSENSE_MS;
2505 int linkBad;
2506
2507 switch (lp->local_state) {
2508 case 1:
2509 if (lp->linkOK) {
2510 lp->media = prev_state;
2511 } else {
2512 lp->local_state++;
2513 next_tick = asfn(dev);
2514 }
2515 break;
2516
2517 case 2:
2518 linkBad = fn(dev, timeout);
2519 if (linkBad < 0) {
2520 next_tick = linkBad & ~TIMER_CB;
2521 } else if (!linkBad) {
2522 lp->local_state--;
2523 lp->media = prev_state;
2524 } else {
2525 lp->media = INIT;
2526 lp->tcount++;
2527 }
2528 }
2529
2530 return next_tick;
2531}
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542static int
2543dc21041_autoconf(struct net_device *dev)
2544{
2545 struct de4x5_private *lp = netdev_priv(dev);
2546 u_long iobase = dev->base_addr;
2547 s32 sts, irqs, irq_mask, imr, omr;
2548 int next_tick = DE4X5_AUTOSENSE_MS;
2549
2550 switch (lp->media) {
2551 case INIT:
2552 DISABLE_IRQs;
2553 lp->tx_enable = false;
2554 lp->timeout = -1;
2555 de4x5_save_skbs(dev);
2556 if ((lp->autosense == AUTO) || (lp->autosense == TP_NW)) {
2557 lp->media = TP;
2558 } else if (lp->autosense == TP) {
2559 lp->media = TP;
2560 } else if (lp->autosense == BNC) {
2561 lp->media = BNC;
2562 } else if (lp->autosense == AUI) {
2563 lp->media = AUI;
2564 } else {
2565 lp->media = NC;
2566 }
2567 lp->local_state = 0;
2568 next_tick = dc21041_autoconf(dev);
2569 break;
2570
2571 case TP_NW:
2572 if (lp->timeout < 0) {
2573 omr = inl(DE4X5_OMR);
2574 outl(omr | OMR_FDX, DE4X5_OMR);
2575 }
2576 irqs = STS_LNF | STS_LNP;
2577 irq_mask = IMR_LFM | IMR_LPM;
2578 sts = test_media(dev, irqs, irq_mask, 0xef01, 0xffff, 0x0008, 2400);
2579 if (sts < 0) {
2580 next_tick = sts & ~TIMER_CB;
2581 } else {
2582 if (sts & STS_LNP) {
2583 lp->media = ANS;
2584 } else {
2585 lp->media = AUI;
2586 }
2587 next_tick = dc21041_autoconf(dev);
2588 }
2589 break;
2590
2591 case ANS:
2592 if (!lp->tx_enable) {
2593 irqs = STS_LNP;
2594 irq_mask = IMR_LPM;
2595 sts = test_ans(dev, irqs, irq_mask, 3000);
2596 if (sts < 0) {
2597 next_tick = sts & ~TIMER_CB;
2598 } else {
2599 if (!(sts & STS_LNP) && (lp->autosense == AUTO)) {
2600 lp->media = TP;
2601 next_tick = dc21041_autoconf(dev);
2602 } else {
2603 lp->local_state = 1;
2604 de4x5_init_connection(dev);
2605 }
2606 }
2607 } else if (!lp->linkOK && (lp->autosense == AUTO)) {
2608 lp->media = ANS_SUSPECT;
2609 next_tick = 3000;
2610 }
2611 break;
2612
2613 case ANS_SUSPECT:
2614 next_tick = de4x5_suspect_state(dev, 1000, ANS, test_tp, dc21041_autoconf);
2615 break;
2616
2617 case TP:
2618 if (!lp->tx_enable) {
2619 if (lp->timeout < 0) {
2620 omr = inl(DE4X5_OMR);
2621 outl(omr & ~OMR_FDX, DE4X5_OMR);
2622 }
2623 irqs = STS_LNF | STS_LNP;
2624 irq_mask = IMR_LFM | IMR_LPM;
2625 sts = test_media(dev,irqs, irq_mask, 0xef01, 0xff3f, 0x0008, 2400);
2626 if (sts < 0) {
2627 next_tick = sts & ~TIMER_CB;
2628 } else {
2629 if (!(sts & STS_LNP) && (lp->autosense == AUTO)) {
2630 if (inl(DE4X5_SISR) & SISR_NRA) {
2631 lp->media = AUI;
2632 } else {
2633 lp->media = BNC;
2634 }
2635 next_tick = dc21041_autoconf(dev);
2636 } else {
2637 lp->local_state = 1;
2638 de4x5_init_connection(dev);
2639 }
2640 }
2641 } else if (!lp->linkOK && (lp->autosense == AUTO)) {
2642 lp->media = TP_SUSPECT;
2643 next_tick = 3000;
2644 }
2645 break;
2646
2647 case TP_SUSPECT:
2648 next_tick = de4x5_suspect_state(dev, 1000, TP, test_tp, dc21041_autoconf);
2649 break;
2650
2651 case AUI:
2652 if (!lp->tx_enable) {
2653 if (lp->timeout < 0) {
2654 omr = inl(DE4X5_OMR);
2655 outl(omr & ~OMR_FDX, DE4X5_OMR);
2656 }
2657 irqs = 0;
2658 irq_mask = 0;
2659 sts = test_media(dev,irqs, irq_mask, 0xef09, 0xf73d, 0x000e, 1000);
2660 if (sts < 0) {
2661 next_tick = sts & ~TIMER_CB;
2662 } else {
2663 if (!(inl(DE4X5_SISR) & SISR_SRA) && (lp->autosense == AUTO)) {
2664 lp->media = BNC;
2665 next_tick = dc21041_autoconf(dev);
2666 } else {
2667 lp->local_state = 1;
2668 de4x5_init_connection(dev);
2669 }
2670 }
2671 } else if (!lp->linkOK && (lp->autosense == AUTO)) {
2672 lp->media = AUI_SUSPECT;
2673 next_tick = 3000;
2674 }
2675 break;
2676
2677 case AUI_SUSPECT:
2678 next_tick = de4x5_suspect_state(dev, 1000, AUI, ping_media, dc21041_autoconf);
2679 break;
2680
2681 case BNC:
2682 switch (lp->local_state) {
2683 case 0:
2684 if (lp->timeout < 0) {
2685 omr = inl(DE4X5_OMR);
2686 outl(omr & ~OMR_FDX, DE4X5_OMR);
2687 }
2688 irqs = 0;
2689 irq_mask = 0;
2690 sts = test_media(dev,irqs, irq_mask, 0xef09, 0xf73d, 0x0006, 1000);
2691 if (sts < 0) {
2692 next_tick = sts & ~TIMER_CB;
2693 } else {
2694 lp->local_state++;
2695 next_tick = dc21041_autoconf(dev);
2696 }
2697 break;
2698
2699 case 1:
2700 if (!lp->tx_enable) {
2701 if ((sts = ping_media(dev, 3000)) < 0) {
2702 next_tick = sts & ~TIMER_CB;
2703 } else {
2704 if (sts) {
2705 lp->local_state = 0;
2706 lp->media = NC;
2707 } else {
2708 de4x5_init_connection(dev);
2709 }
2710 }
2711 } else if (!lp->linkOK && (lp->autosense == AUTO)) {
2712 lp->media = BNC_SUSPECT;
2713 next_tick = 3000;
2714 }
2715 break;
2716 }
2717 break;
2718
2719 case BNC_SUSPECT:
2720 next_tick = de4x5_suspect_state(dev, 1000, BNC, ping_media, dc21041_autoconf);
2721 break;
2722
2723 case NC:
2724 omr = inl(DE4X5_OMR);
2725 outl(omr | OMR_FDX, DE4X5_OMR);
2726 reset_init_sia(dev, 0xef01, 0xffff, 0x0008);
2727 if (lp->media != lp->c_media) {
2728 de4x5_dbg_media(dev);
2729 lp->c_media = lp->media;
2730 }
2731 lp->media = INIT;
2732 lp->tx_enable = false;
2733 break;
2734 }
2735
2736 return next_tick;
2737}
2738
2739
2740
2741
2742
2743
2744static int
2745dc21140m_autoconf(struct net_device *dev)
2746{
2747 struct de4x5_private *lp = netdev_priv(dev);
2748 int ana, anlpa, cap, cr, slnk, sr;
2749 int next_tick = DE4X5_AUTOSENSE_MS;
2750 u_long imr, omr, iobase = dev->base_addr;
2751
2752 switch(lp->media) {
2753 case INIT:
2754 if (lp->timeout < 0) {
2755 DISABLE_IRQs;
2756 lp->tx_enable = false;
2757 lp->linkOK = 0;
2758 de4x5_save_skbs(dev);
2759 }
2760 if ((next_tick = de4x5_reset_phy(dev)) < 0) {
2761 next_tick &= ~TIMER_CB;
2762 } else {
2763 if (lp->useSROM) {
2764 if (srom_map_media(dev) < 0) {
2765 lp->tcount++;
2766 return next_tick;
2767 }
2768 srom_exec(dev, lp->phy[lp->active].gep);
2769 if (lp->infoblock_media == ANS) {
2770 ana = lp->phy[lp->active].ana | MII_ANA_CSMA;
2771 mii_wr(ana, MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
2772 }
2773 } else {
2774 lp->tmp = MII_SR_ASSC;
2775 SET_10Mb;
2776 if (lp->autosense == _100Mb) {
2777 lp->media = _100Mb;
2778 } else if (lp->autosense == _10Mb) {
2779 lp->media = _10Mb;
2780 } else if ((lp->autosense == AUTO) &&
2781 ((sr=is_anc_capable(dev)) & MII_SR_ANC)) {
2782 ana = (((sr >> 6) & MII_ANA_TAF) | MII_ANA_CSMA);
2783 ana &= (lp->fdx ? ~0 : ~MII_ANA_FDAM);
2784 mii_wr(ana, MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
2785 lp->media = ANS;
2786 } else if (lp->autosense == AUTO) {
2787 lp->media = SPD_DET;
2788 } else if (is_spd_100(dev) && is_100_up(dev)) {
2789 lp->media = _100Mb;
2790 } else {
2791 lp->media = NC;
2792 }
2793 }
2794 lp->local_state = 0;
2795 next_tick = dc21140m_autoconf(dev);
2796 }
2797 break;
2798
2799 case ANS:
2800 switch (lp->local_state) {
2801 case 0:
2802 if (lp->timeout < 0) {
2803 mii_wr(MII_CR_ASSE | MII_CR_RAN, MII_CR, lp->phy[lp->active].addr, DE4X5_MII);
2804 }
2805 cr = test_mii_reg(dev, MII_CR, MII_CR_RAN, false, 500);
2806 if (cr < 0) {
2807 next_tick = cr & ~TIMER_CB;
2808 } else {
2809 if (cr) {
2810 lp->local_state = 0;
2811 lp->media = SPD_DET;
2812 } else {
2813 lp->local_state++;
2814 }
2815 next_tick = dc21140m_autoconf(dev);
2816 }
2817 break;
2818
2819 case 1:
2820 if ((sr=test_mii_reg(dev, MII_SR, MII_SR_ASSC, true, 2000)) < 0) {
2821 next_tick = sr & ~TIMER_CB;
2822 } else {
2823 lp->media = SPD_DET;
2824 lp->local_state = 0;
2825 if (sr) {
2826 lp->tmp = MII_SR_ASSC;
2827 anlpa = mii_rd(MII_ANLPA, lp->phy[lp->active].addr, DE4X5_MII);
2828 ana = mii_rd(MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
2829 if (!(anlpa & MII_ANLPA_RF) &&
2830 (cap = anlpa & MII_ANLPA_TAF & ana)) {
2831 if (cap & MII_ANA_100M) {
2832 lp->fdx = (ana & anlpa & MII_ANA_FDAM & MII_ANA_100M) != 0;
2833 lp->media = _100Mb;
2834 } else if (cap & MII_ANA_10M) {
2835 lp->fdx = (ana & anlpa & MII_ANA_FDAM & MII_ANA_10M) != 0;
2836
2837 lp->media = _10Mb;
2838 }
2839 }
2840 }
2841 next_tick = dc21140m_autoconf(dev);
2842 }
2843 break;
2844 }
2845 break;
2846
2847 case SPD_DET:
2848 if (lp->timeout < 0) {
2849 lp->tmp = (lp->phy[lp->active].id ? MII_SR_LKS :
2850 (~gep_rd(dev) & GEP_LNP));
2851 SET_100Mb_PDET;
2852 }
2853 if ((slnk = test_for_100Mb(dev, 6500)) < 0) {
2854 next_tick = slnk & ~TIMER_CB;
2855 } else {
2856 if (is_spd_100(dev) && is_100_up(dev)) {
2857 lp->media = _100Mb;
2858 } else if ((!is_spd_100(dev) && (is_10_up(dev) & lp->tmp))) {
2859 lp->media = _10Mb;
2860 } else {
2861 lp->media = NC;
2862 }
2863 next_tick = dc21140m_autoconf(dev);
2864 }
2865 break;
2866
2867 case _100Mb:
2868 next_tick = 3000;
2869 if (!lp->tx_enable) {
2870 SET_100Mb;
2871 de4x5_init_connection(dev);
2872 } else {
2873 if (!lp->linkOK && (lp->autosense == AUTO)) {
2874 if (!is_100_up(dev) || (!lp->useSROM && !is_spd_100(dev))) {
2875 lp->media = INIT;
2876 lp->tcount++;
2877 next_tick = DE4X5_AUTOSENSE_MS;
2878 }
2879 }
2880 }
2881 break;
2882
2883 case BNC:
2884 case AUI:
2885 case _10Mb:
2886 next_tick = 3000;
2887 if (!lp->tx_enable) {
2888 SET_10Mb;
2889 de4x5_init_connection(dev);
2890 } else {
2891 if (!lp->linkOK && (lp->autosense == AUTO)) {
2892 if (!is_10_up(dev) || (!lp->useSROM && is_spd_100(dev))) {
2893 lp->media = INIT;
2894 lp->tcount++;
2895 next_tick = DE4X5_AUTOSENSE_MS;
2896 }
2897 }
2898 }
2899 break;
2900
2901 case NC:
2902 if (lp->media != lp->c_media) {
2903 de4x5_dbg_media(dev);
2904 lp->c_media = lp->media;
2905 }
2906 lp->media = INIT;
2907 lp->tx_enable = false;
2908 break;
2909 }
2910
2911 return next_tick;
2912}
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928static int
2929dc2114x_autoconf(struct net_device *dev)
2930{
2931 struct de4x5_private *lp = netdev_priv(dev);
2932 u_long iobase = dev->base_addr;
2933 s32 cr, anlpa, ana, cap, irqs, irq_mask, imr, omr, slnk, sr, sts;
2934 int next_tick = DE4X5_AUTOSENSE_MS;
2935
2936 switch (lp->media) {
2937 case INIT:
2938 if (lp->timeout < 0) {
2939 DISABLE_IRQs;
2940 lp->tx_enable = false;
2941 lp->linkOK = 0;
2942 lp->timeout = -1;
2943 de4x5_save_skbs(dev);
2944 if (lp->params.autosense & ~AUTO) {
2945 srom_map_media(dev);
2946 if (lp->media != lp->params.autosense) {
2947 lp->tcount++;
2948 lp->media = INIT;
2949 return next_tick;
2950 }
2951 lp->media = INIT;
2952 }
2953 }
2954 if ((next_tick = de4x5_reset_phy(dev)) < 0) {
2955 next_tick &= ~TIMER_CB;
2956 } else {
2957 if (lp->autosense == _100Mb) {
2958 lp->media = _100Mb;
2959 } else if (lp->autosense == _10Mb) {
2960 lp->media = _10Mb;
2961 } else if (lp->autosense == TP) {
2962 lp->media = TP;
2963 } else if (lp->autosense == BNC) {
2964 lp->media = BNC;
2965 } else if (lp->autosense == AUI) {
2966 lp->media = AUI;
2967 } else {
2968 lp->media = SPD_DET;
2969 if ((lp->infoblock_media == ANS) &&
2970 ((sr=is_anc_capable(dev)) & MII_SR_ANC)) {
2971 ana = (((sr >> 6) & MII_ANA_TAF) | MII_ANA_CSMA);
2972 ana &= (lp->fdx ? ~0 : ~MII_ANA_FDAM);
2973 mii_wr(ana, MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
2974 lp->media = ANS;
2975 }
2976 }
2977 lp->local_state = 0;
2978 next_tick = dc2114x_autoconf(dev);
2979 }
2980 break;
2981
2982 case ANS:
2983 switch (lp->local_state) {
2984 case 0:
2985 if (lp->timeout < 0) {
2986 mii_wr(MII_CR_ASSE | MII_CR_RAN, MII_CR, lp->phy[lp->active].addr, DE4X5_MII);
2987 }
2988 cr = test_mii_reg(dev, MII_CR, MII_CR_RAN, false, 500);
2989 if (cr < 0) {
2990 next_tick = cr & ~TIMER_CB;
2991 } else {
2992 if (cr) {
2993 lp->local_state = 0;
2994 lp->media = SPD_DET;
2995 } else {
2996 lp->local_state++;
2997 }
2998 next_tick = dc2114x_autoconf(dev);
2999 }
3000 break;
3001
3002 case 1:
3003 sr = test_mii_reg(dev, MII_SR, MII_SR_ASSC, true, 2000);
3004 if (sr < 0) {
3005 next_tick = sr & ~TIMER_CB;
3006 } else {
3007 lp->media = SPD_DET;
3008 lp->local_state = 0;
3009 if (sr) {
3010 lp->tmp = MII_SR_ASSC;
3011 anlpa = mii_rd(MII_ANLPA, lp->phy[lp->active].addr, DE4X5_MII);
3012 ana = mii_rd(MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
3013 if (!(anlpa & MII_ANLPA_RF) &&
3014 (cap = anlpa & MII_ANLPA_TAF & ana)) {
3015 if (cap & MII_ANA_100M) {
3016 lp->fdx = (ana & anlpa & MII_ANA_FDAM & MII_ANA_100M) != 0;
3017 lp->media = _100Mb;
3018 } else if (cap & MII_ANA_10M) {
3019 lp->fdx = (ana & anlpa & MII_ANA_FDAM & MII_ANA_10M) != 0;
3020 lp->media = _10Mb;
3021 }
3022 }
3023 }
3024 next_tick = dc2114x_autoconf(dev);
3025 }
3026 break;
3027 }
3028 break;
3029
3030 case AUI:
3031 if (!lp->tx_enable) {
3032 if (lp->timeout < 0) {
3033 omr = inl(DE4X5_OMR);
3034 outl(omr & ~OMR_FDX, DE4X5_OMR);
3035 }
3036 irqs = 0;
3037 irq_mask = 0;
3038 sts = test_media(dev,irqs, irq_mask, 0, 0, 0, 1000);
3039 if (sts < 0) {
3040 next_tick = sts & ~TIMER_CB;
3041 } else {
3042 if (!(inl(DE4X5_SISR) & SISR_SRA) && (lp->autosense == AUTO)) {
3043 lp->media = BNC;
3044 next_tick = dc2114x_autoconf(dev);
3045 } else {
3046 lp->local_state = 1;
3047 de4x5_init_connection(dev);
3048 }
3049 }
3050 } else if (!lp->linkOK && (lp->autosense == AUTO)) {
3051 lp->media = AUI_SUSPECT;
3052 next_tick = 3000;
3053 }
3054 break;
3055
3056 case AUI_SUSPECT:
3057 next_tick = de4x5_suspect_state(dev, 1000, AUI, ping_media, dc2114x_autoconf);
3058 break;
3059
3060 case BNC:
3061 switch (lp->local_state) {
3062 case 0:
3063 if (lp->timeout < 0) {
3064 omr = inl(DE4X5_OMR);
3065 outl(omr & ~OMR_FDX, DE4X5_OMR);
3066 }
3067 irqs = 0;
3068 irq_mask = 0;
3069 sts = test_media(dev,irqs, irq_mask, 0, 0, 0, 1000);
3070 if (sts < 0) {
3071 next_tick = sts & ~TIMER_CB;
3072 } else {
3073 lp->local_state++;
3074 next_tick = dc2114x_autoconf(dev);
3075 }
3076 break;
3077
3078 case 1:
3079 if (!lp->tx_enable) {
3080 if ((sts = ping_media(dev, 3000)) < 0) {
3081 next_tick = sts & ~TIMER_CB;
3082 } else {
3083 if (sts) {
3084 lp->local_state = 0;
3085 lp->tcount++;
3086 lp->media = INIT;
3087 } else {
3088 de4x5_init_connection(dev);
3089 }
3090 }
3091 } else if (!lp->linkOK && (lp->autosense == AUTO)) {
3092 lp->media = BNC_SUSPECT;
3093 next_tick = 3000;
3094 }
3095 break;
3096 }
3097 break;
3098
3099 case BNC_SUSPECT:
3100 next_tick = de4x5_suspect_state(dev, 1000, BNC, ping_media, dc2114x_autoconf);
3101 break;
3102
3103 case SPD_DET:
3104 if (srom_map_media(dev) < 0) {
3105 lp->tcount++;
3106 lp->media = INIT;
3107 return next_tick;
3108 }
3109 if (lp->media == _100Mb) {
3110 if ((slnk = test_for_100Mb(dev, 6500)) < 0) {
3111 lp->media = SPD_DET;
3112 return slnk & ~TIMER_CB;
3113 }
3114 } else {
3115 if (wait_for_link(dev) < 0) {
3116 lp->media = SPD_DET;
3117 return PDET_LINK_WAIT;
3118 }
3119 }
3120 if (lp->media == ANS) {
3121 if (is_spd_100(dev)) {
3122 lp->media = _100Mb;
3123 } else {
3124 lp->media = _10Mb;
3125 }
3126 next_tick = dc2114x_autoconf(dev);
3127 } else if (((lp->media == _100Mb) && is_100_up(dev)) ||
3128 (((lp->media == _10Mb) || (lp->media == TP) ||
3129 (lp->media == BNC) || (lp->media == AUI)) &&
3130 is_10_up(dev))) {
3131 next_tick = dc2114x_autoconf(dev);
3132 } else {
3133 lp->tcount++;
3134 lp->media = INIT;
3135 }
3136 break;
3137
3138 case _10Mb:
3139 next_tick = 3000;
3140 if (!lp->tx_enable) {
3141 SET_10Mb;
3142 de4x5_init_connection(dev);
3143 } else {
3144 if (!lp->linkOK && (lp->autosense == AUTO)) {
3145 if (!is_10_up(dev) || (!lp->useSROM && is_spd_100(dev))) {
3146 lp->media = INIT;
3147 lp->tcount++;
3148 next_tick = DE4X5_AUTOSENSE_MS;
3149 }
3150 }
3151 }
3152 break;
3153
3154 case _100Mb:
3155 next_tick = 3000;
3156 if (!lp->tx_enable) {
3157 SET_100Mb;
3158 de4x5_init_connection(dev);
3159 } else {
3160 if (!lp->linkOK && (lp->autosense == AUTO)) {
3161 if (!is_100_up(dev) || (!lp->useSROM && !is_spd_100(dev))) {
3162 lp->media = INIT;
3163 lp->tcount++;
3164 next_tick = DE4X5_AUTOSENSE_MS;
3165 }
3166 }
3167 }
3168 break;
3169
3170 default:
3171 lp->tcount++;
3172printk("Huh?: media:%02x\n", lp->media);
3173 lp->media = INIT;
3174 break;
3175 }
3176
3177 return next_tick;
3178}
3179
3180static int
3181srom_autoconf(struct net_device *dev)
3182{
3183 struct de4x5_private *lp = netdev_priv(dev);
3184
3185 return lp->infoleaf_fn(dev);
3186}
3187
3188
3189
3190
3191
3192
3193static int
3194srom_map_media(struct net_device *dev)
3195{
3196 struct de4x5_private *lp = netdev_priv(dev);
3197
3198 lp->fdx = false;
3199 if (lp->infoblock_media == lp->media)
3200 return 0;
3201
3202 switch(lp->infoblock_media) {
3203 case SROM_10BASETF:
3204 if (!lp->params.fdx) return -1;
3205 lp->fdx = true;
3206 case SROM_10BASET:
3207 if (lp->params.fdx && !lp->fdx) return -1;
3208 if ((lp->chipset == DC21140) || ((lp->chipset & ~0x00ff) == DC2114x)) {
3209 lp->media = _10Mb;
3210 } else {
3211 lp->media = TP;
3212 }
3213 break;
3214
3215 case SROM_10BASE2:
3216 lp->media = BNC;
3217 break;
3218
3219 case SROM_10BASE5:
3220 lp->media = AUI;
3221 break;
3222
3223 case SROM_100BASETF:
3224 if (!lp->params.fdx) return -1;
3225 lp->fdx = true;
3226 case SROM_100BASET:
3227 if (lp->params.fdx && !lp->fdx) return -1;
3228 lp->media = _100Mb;
3229 break;
3230
3231 case SROM_100BASET4:
3232 lp->media = _100Mb;
3233 break;
3234
3235 case SROM_100BASEFF:
3236 if (!lp->params.fdx) return -1;
3237 lp->fdx = true;
3238 case SROM_100BASEF:
3239 if (lp->params.fdx && !lp->fdx) return -1;
3240 lp->media = _100Mb;
3241 break;
3242
3243 case ANS:
3244 lp->media = ANS;
3245 lp->fdx = lp->params.fdx;
3246 break;
3247
3248 default:
3249 printk("%s: Bad media code [%d] detected in SROM!\n", dev->name,
3250 lp->infoblock_media);
3251 return -1;
3252 }
3253
3254 return 0;
3255}
3256
3257static void
3258de4x5_init_connection(struct net_device *dev)
3259{
3260 struct de4x5_private *lp = netdev_priv(dev);
3261 u_long iobase = dev->base_addr;
3262 u_long flags = 0;
3263
3264 if (lp->media != lp->c_media) {
3265 de4x5_dbg_media(dev);
3266 lp->c_media = lp->media;
3267 }
3268
3269 spin_lock_irqsave(&lp->lock, flags);
3270 de4x5_rst_desc_ring(dev);
3271 de4x5_setup_intr(dev);
3272 lp->tx_enable = true;
3273 spin_unlock_irqrestore(&lp->lock, flags);
3274 outl(POLL_DEMAND, DE4X5_TPD);
3275
3276 netif_wake_queue(dev);
3277}
3278
3279
3280
3281
3282
3283
3284static int
3285de4x5_reset_phy(struct net_device *dev)
3286{
3287 struct de4x5_private *lp = netdev_priv(dev);
3288 u_long iobase = dev->base_addr;
3289 int next_tick = 0;
3290
3291 if ((lp->useSROM) || (lp->phy[lp->active].id)) {
3292 if (lp->timeout < 0) {
3293 if (lp->useSROM) {
3294 if (lp->phy[lp->active].rst) {
3295 srom_exec(dev, lp->phy[lp->active].rst);
3296 srom_exec(dev, lp->phy[lp->active].rst);
3297 } else if (lp->rst) {
3298 srom_exec(dev, lp->rst);
3299 srom_exec(dev, lp->rst);
3300 }
3301 } else {
3302 PHY_HARD_RESET;
3303 }
3304 if (lp->useMII) {
3305 mii_wr(MII_CR_RST, MII_CR, lp->phy[lp->active].addr, DE4X5_MII);
3306 }
3307 }
3308 if (lp->useMII) {
3309 next_tick = test_mii_reg(dev, MII_CR, MII_CR_RST, false, 500);
3310 }
3311 } else if (lp->chipset == DC21140) {
3312 PHY_HARD_RESET;
3313 }
3314
3315 return next_tick;
3316}
3317
3318static int
3319test_media(struct net_device *dev, s32 irqs, s32 irq_mask, s32 csr13, s32 csr14, s32 csr15, s32 msec)
3320{
3321 struct de4x5_private *lp = netdev_priv(dev);
3322 u_long iobase = dev->base_addr;
3323 s32 sts, csr12;
3324
3325 if (lp->timeout < 0) {
3326 lp->timeout = msec/100;
3327 if (!lp->useSROM) {
3328 reset_init_sia(dev, csr13, csr14, csr15);
3329 }
3330
3331
3332 outl(irq_mask, DE4X5_IMR);
3333
3334
3335 sts = inl(DE4X5_STS);
3336 outl(sts, DE4X5_STS);
3337
3338
3339 if ((lp->chipset == DC21041) || lp->useSROM) {
3340 csr12 = inl(DE4X5_SISR);
3341 outl(csr12, DE4X5_SISR);
3342 }
3343 }
3344
3345 sts = inl(DE4X5_STS) & ~TIMER_CB;
3346
3347 if (!(sts & irqs) && --lp->timeout) {
3348 sts = 100 | TIMER_CB;
3349 } else {
3350 lp->timeout = -1;
3351 }
3352
3353 return sts;
3354}
3355
3356static int
3357test_tp(struct net_device *dev, s32 msec)
3358{
3359 struct de4x5_private *lp = netdev_priv(dev);
3360 u_long iobase = dev->base_addr;
3361 int sisr;
3362
3363 if (lp->timeout < 0) {
3364 lp->timeout = msec/100;
3365 }
3366
3367 sisr = (inl(DE4X5_SISR) & ~TIMER_CB) & (SISR_LKF | SISR_NCR);
3368
3369 if (sisr && --lp->timeout) {
3370 sisr = 100 | TIMER_CB;
3371 } else {
3372 lp->timeout = -1;
3373 }
3374
3375 return sisr;
3376}
3377
3378
3379
3380
3381
3382
3383#define SAMPLE_INTERVAL 500
3384#define SAMPLE_DELAY 2000
3385static int
3386test_for_100Mb(struct net_device *dev, int msec)
3387{
3388 struct de4x5_private *lp = netdev_priv(dev);
3389 int gep = 0, ret = ((lp->chipset & ~0x00ff)==DC2114x? -1 :GEP_SLNK);
3390
3391 if (lp->timeout < 0) {
3392 if ((msec/SAMPLE_INTERVAL) <= 0) return 0;
3393 if (msec > SAMPLE_DELAY) {
3394 lp->timeout = (msec - SAMPLE_DELAY)/SAMPLE_INTERVAL;
3395 gep = SAMPLE_DELAY | TIMER_CB;
3396 return gep;
3397 } else {
3398 lp->timeout = msec/SAMPLE_INTERVAL;
3399 }
3400 }
3401
3402 if (lp->phy[lp->active].id || lp->useSROM) {
3403 gep = is_100_up(dev) | is_spd_100(dev);
3404 } else {
3405 gep = (~gep_rd(dev) & (GEP_SLNK | GEP_LNP));
3406 }
3407 if (!(gep & ret) && --lp->timeout) {
3408 gep = SAMPLE_INTERVAL | TIMER_CB;
3409 } else {
3410 lp->timeout = -1;
3411 }
3412
3413 return gep;
3414}
3415
3416static int
3417wait_for_link(struct net_device *dev)
3418{
3419 struct de4x5_private *lp = netdev_priv(dev);
3420
3421 if (lp->timeout < 0) {
3422 lp->timeout = 1;
3423 }
3424
3425 if (lp->timeout--) {
3426 return TIMER_CB;
3427 } else {
3428 lp->timeout = -1;
3429 }
3430
3431 return 0;
3432}
3433
3434
3435
3436
3437
3438static int
3439test_mii_reg(struct net_device *dev, int reg, int mask, bool pol, long msec)
3440{
3441 struct de4x5_private *lp = netdev_priv(dev);
3442 int test;
3443 u_long iobase = dev->base_addr;
3444
3445 if (lp->timeout < 0) {
3446 lp->timeout = msec/100;
3447 }
3448
3449 reg = mii_rd((u_char)reg, lp->phy[lp->active].addr, DE4X5_MII) & mask;
3450 test = (reg ^ (pol ? ~0 : 0)) & mask;
3451
3452 if (test && --lp->timeout) {
3453 reg = 100 | TIMER_CB;
3454 } else {
3455 lp->timeout = -1;
3456 }
3457
3458 return reg;
3459}
3460
3461static int
3462is_spd_100(struct net_device *dev)
3463{
3464 struct de4x5_private *lp = netdev_priv(dev);
3465 u_long iobase = dev->base_addr;
3466 int spd;
3467
3468 if (lp->useMII) {
3469 spd = mii_rd(lp->phy[lp->active].spd.reg, lp->phy[lp->active].addr, DE4X5_MII);
3470 spd = ~(spd ^ lp->phy[lp->active].spd.value);
3471 spd &= lp->phy[lp->active].spd.mask;
3472 } else if (!lp->useSROM) {
3473 spd = ((~gep_rd(dev)) & GEP_SLNK);
3474 } else {
3475 if ((lp->ibn == 2) || !lp->asBitValid)
3476 return (lp->chipset == DC21143) ? (~inl(DE4X5_SISR)&SISR_LS100) : 0;
3477
3478 spd = (lp->asBitValid & (lp->asPolarity ^ (gep_rd(dev) & lp->asBit))) |
3479 (lp->linkOK & ~lp->asBitValid);
3480 }
3481
3482 return spd;
3483}
3484
3485static int
3486is_100_up(struct net_device *dev)
3487{
3488 struct de4x5_private *lp = netdev_priv(dev);
3489 u_long iobase = dev->base_addr;
3490
3491 if (lp->useMII) {
3492
3493 mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);
3494 return mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII) & MII_SR_LKS;
3495 } else if (!lp->useSROM) {
3496 return (~gep_rd(dev)) & GEP_SLNK;
3497 } else {
3498 if ((lp->ibn == 2) || !lp->asBitValid)
3499 return (lp->chipset == DC21143) ? (~inl(DE4X5_SISR)&SISR_LS100) : 0;
3500
3501 return (lp->asBitValid&(lp->asPolarity^(gep_rd(dev)&lp->asBit))) |
3502 (lp->linkOK & ~lp->asBitValid);
3503 }
3504}
3505
3506static int
3507is_10_up(struct net_device *dev)
3508{
3509 struct de4x5_private *lp = netdev_priv(dev);
3510 u_long iobase = dev->base_addr;
3511
3512 if (lp->useMII) {
3513
3514 mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);
3515 return mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII) & MII_SR_LKS;
3516 } else if (!lp->useSROM) {
3517 return (~gep_rd(dev)) & GEP_LNP;
3518 } else {
3519 if ((lp->ibn == 2) || !lp->asBitValid)
3520 return ((lp->chipset & ~0x00ff) == DC2114x) ?
3521 (~inl(DE4X5_SISR)&SISR_LS10):
3522 0;
3523
3524 return (lp->asBitValid&(lp->asPolarity^(gep_rd(dev)&lp->asBit))) |
3525 (lp->linkOK & ~lp->asBitValid);
3526 }
3527}
3528
3529static int
3530is_anc_capable(struct net_device *dev)
3531{
3532 struct de4x5_private *lp = netdev_priv(dev);
3533 u_long iobase = dev->base_addr;
3534
3535 if (lp->phy[lp->active].id && (!lp->useSROM || lp->useMII)) {
3536 return mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);
3537 } else if ((lp->chipset & ~0x00ff) == DC2114x) {
3538 return (inl(DE4X5_SISR) & SISR_LPN) >> 12;
3539 } else {
3540 return 0;
3541 }
3542}
3543
3544
3545
3546
3547
3548static int
3549ping_media(struct net_device *dev, int msec)
3550{
3551 struct de4x5_private *lp = netdev_priv(dev);
3552 u_long iobase = dev->base_addr;
3553 int sisr;
3554
3555 if (lp->timeout < 0) {
3556 lp->timeout = msec/100;
3557
3558 lp->tmp = lp->tx_new;
3559 load_packet(dev, lp->frame, TD_LS | TD_FS | sizeof(lp->frame), (struct sk_buff *)1);
3560 lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
3561 outl(POLL_DEMAND, DE4X5_TPD);
3562 }
3563
3564 sisr = inl(DE4X5_SISR);
3565
3566 if ((!(sisr & SISR_NCR)) &&
3567 ((s32)le32_to_cpu(lp->tx_ring[lp->tmp].status) < 0) &&
3568 (--lp->timeout)) {
3569 sisr = 100 | TIMER_CB;
3570 } else {
3571 if ((!(sisr & SISR_NCR)) &&
3572 !(le32_to_cpu(lp->tx_ring[lp->tmp].status) & (T_OWN | TD_ES)) &&
3573 lp->timeout) {
3574 sisr = 0;
3575 } else {
3576 sisr = 1;
3577 }
3578 lp->timeout = -1;
3579 }
3580
3581 return sisr;
3582}
3583
3584
3585
3586
3587
3588
3589static struct sk_buff *
3590de4x5_alloc_rx_buff(struct net_device *dev, int index, int len)
3591{
3592 struct de4x5_private *lp = netdev_priv(dev);
3593 struct sk_buff *p;
3594
3595#if !defined(__alpha__) && !defined(__powerpc__) && !defined(CONFIG_SPARC) && !defined(DE4X5_DO_MEMCPY)
3596 struct sk_buff *ret;
3597 u_long i=0, tmp;
3598
3599 p = netdev_alloc_skb(dev, IEEE802_3_SZ + DE4X5_ALIGN + 2);
3600 if (!p) return NULL;
3601
3602 tmp = virt_to_bus(p->data);
3603 i = ((tmp + DE4X5_ALIGN) & ~DE4X5_ALIGN) - tmp;
3604 skb_reserve(p, i);
3605 lp->rx_ring[index].buf = cpu_to_le32(tmp + i);
3606
3607 ret = lp->rx_skb[index];
3608 lp->rx_skb[index] = p;
3609
3610 if ((u_long) ret > 1) {
3611 skb_put(ret, len);
3612 }
3613
3614 return ret;
3615
3616#else
3617 if (lp->state != OPEN) return (struct sk_buff *)1;
3618
3619 p = netdev_alloc_skb(dev, len + 2);
3620 if (!p) return NULL;
3621
3622 skb_reserve(p, 2);
3623 if (index < lp->rx_old) {
3624 short tlen = (lp->rxRingSize - lp->rx_old) * RX_BUFF_SZ;
3625 memcpy(skb_put(p,tlen),lp->rx_bufs + lp->rx_old * RX_BUFF_SZ,tlen);
3626 memcpy(skb_put(p,len-tlen),lp->rx_bufs,len-tlen);
3627 } else {
3628 memcpy(skb_put(p,len),lp->rx_bufs + lp->rx_old * RX_BUFF_SZ,len);
3629 }
3630
3631 return p;
3632#endif
3633}
3634
3635static void
3636de4x5_free_rx_buffs(struct net_device *dev)
3637{
3638 struct de4x5_private *lp = netdev_priv(dev);
3639 int i;
3640
3641 for (i=0; i<lp->rxRingSize; i++) {
3642 if ((u_long) lp->rx_skb[i] > 1) {
3643 dev_kfree_skb(lp->rx_skb[i]);
3644 }
3645 lp->rx_ring[i].status = 0;
3646 lp->rx_skb[i] = (struct sk_buff *)1;
3647 }
3648}
3649
3650static void
3651de4x5_free_tx_buffs(struct net_device *dev)
3652{
3653 struct de4x5_private *lp = netdev_priv(dev);
3654 int i;
3655
3656 for (i=0; i<lp->txRingSize; i++) {
3657 if (lp->tx_skb[i])
3658 de4x5_free_tx_buff(lp, i);
3659 lp->tx_ring[i].status = 0;
3660 }
3661
3662
3663 __skb_queue_purge(&lp->cache.queue);
3664}
3665
3666
3667
3668
3669
3670
3671
3672
3673static void
3674de4x5_save_skbs(struct net_device *dev)
3675{
3676 struct de4x5_private *lp = netdev_priv(dev);
3677 u_long iobase = dev->base_addr;
3678 s32 omr;
3679
3680 if (!lp->cache.save_cnt) {
3681 STOP_DE4X5;
3682 de4x5_tx(dev);
3683 de4x5_free_tx_buffs(dev);
3684 de4x5_cache_state(dev, DE4X5_SAVE_STATE);
3685 de4x5_sw_reset(dev);
3686 de4x5_cache_state(dev, DE4X5_RESTORE_STATE);
3687 lp->cache.save_cnt++;
3688 START_DE4X5;
3689 }
3690}
3691
3692static void
3693de4x5_rst_desc_ring(struct net_device *dev)
3694{
3695 struct de4x5_private *lp = netdev_priv(dev);
3696 u_long iobase = dev->base_addr;
3697 int i;
3698 s32 omr;
3699
3700 if (lp->cache.save_cnt) {
3701 STOP_DE4X5;
3702 outl(lp->dma_rings, DE4X5_RRBA);
3703 outl(lp->dma_rings + NUM_RX_DESC * sizeof(struct de4x5_desc),
3704 DE4X5_TRBA);
3705
3706 lp->rx_new = lp->rx_old = 0;
3707 lp->tx_new = lp->tx_old = 0;
3708
3709 for (i = 0; i < lp->rxRingSize; i++) {
3710 lp->rx_ring[i].status = cpu_to_le32(R_OWN);
3711 }
3712
3713 for (i = 0; i < lp->txRingSize; i++) {
3714 lp->tx_ring[i].status = cpu_to_le32(0);
3715 }
3716
3717 barrier();
3718 lp->cache.save_cnt--;
3719 START_DE4X5;
3720 }
3721}
3722
3723static void
3724de4x5_cache_state(struct net_device *dev, int flag)
3725{
3726 struct de4x5_private *lp = netdev_priv(dev);
3727 u_long iobase = dev->base_addr;
3728
3729 switch(flag) {
3730 case DE4X5_SAVE_STATE:
3731 lp->cache.csr0 = inl(DE4X5_BMR);
3732 lp->cache.csr6 = (inl(DE4X5_OMR) & ~(OMR_ST | OMR_SR));
3733 lp->cache.csr7 = inl(DE4X5_IMR);
3734 break;
3735
3736 case DE4X5_RESTORE_STATE:
3737 outl(lp->cache.csr0, DE4X5_BMR);
3738 outl(lp->cache.csr6, DE4X5_OMR);
3739 outl(lp->cache.csr7, DE4X5_IMR);
3740 if (lp->chipset == DC21140) {
3741 gep_wr(lp->cache.gepc, dev);
3742 gep_wr(lp->cache.gep, dev);
3743 } else {
3744 reset_init_sia(dev, lp->cache.csr13, lp->cache.csr14,
3745 lp->cache.csr15);
3746 }
3747 break;
3748 }
3749}
3750
3751static void
3752de4x5_put_cache(struct net_device *dev, struct sk_buff *skb)
3753{
3754 struct de4x5_private *lp = netdev_priv(dev);
3755
3756 __skb_queue_tail(&lp->cache.queue, skb);
3757}
3758
3759static void
3760de4x5_putb_cache(struct net_device *dev, struct sk_buff *skb)
3761{
3762 struct de4x5_private *lp = netdev_priv(dev);
3763
3764 __skb_queue_head(&lp->cache.queue, skb);
3765}
3766
3767static struct sk_buff *
3768de4x5_get_cache(struct net_device *dev)
3769{
3770 struct de4x5_private *lp = netdev_priv(dev);
3771
3772 return __skb_dequeue(&lp->cache.queue);
3773}
3774
3775
3776
3777
3778
3779static int
3780test_ans(struct net_device *dev, s32 irqs, s32 irq_mask, s32 msec)
3781{
3782 struct de4x5_private *lp = netdev_priv(dev);
3783 u_long iobase = dev->base_addr;
3784 s32 sts, ans;
3785
3786 if (lp->timeout < 0) {
3787 lp->timeout = msec/100;
3788 outl(irq_mask, DE4X5_IMR);
3789
3790
3791 sts = inl(DE4X5_STS);
3792 outl(sts, DE4X5_STS);
3793 }
3794
3795 ans = inl(DE4X5_SISR) & SISR_ANS;
3796 sts = inl(DE4X5_STS) & ~TIMER_CB;
3797
3798 if (!(sts & irqs) && (ans ^ ANS_NWOK) && --lp->timeout) {
3799 sts = 100 | TIMER_CB;
3800 } else {
3801 lp->timeout = -1;
3802 }
3803
3804 return sts;
3805}
3806
3807static void
3808de4x5_setup_intr(struct net_device *dev)
3809{
3810 struct de4x5_private *lp = netdev_priv(dev);
3811 u_long iobase = dev->base_addr;
3812 s32 imr, sts;
3813
3814 if (inl(DE4X5_OMR) & OMR_SR) {
3815 imr = 0;
3816 UNMASK_IRQs;
3817 sts = inl(DE4X5_STS);
3818 outl(sts, DE4X5_STS);
3819 ENABLE_IRQs;
3820 }
3821}
3822
3823
3824
3825
3826static void
3827reset_init_sia(struct net_device *dev, s32 csr13, s32 csr14, s32 csr15)
3828{
3829 struct de4x5_private *lp = netdev_priv(dev);
3830 u_long iobase = dev->base_addr;
3831
3832 RESET_SIA;
3833 if (lp->useSROM) {
3834 if (lp->ibn == 3) {
3835 srom_exec(dev, lp->phy[lp->active].rst);
3836 srom_exec(dev, lp->phy[lp->active].gep);
3837 outl(1, DE4X5_SICR);
3838 return;
3839 } else {
3840 csr15 = lp->cache.csr15;
3841 csr14 = lp->cache.csr14;
3842 csr13 = lp->cache.csr13;
3843 outl(csr15 | lp->cache.gepc, DE4X5_SIGR);
3844 outl(csr15 | lp->cache.gep, DE4X5_SIGR);
3845 }
3846 } else {
3847 outl(csr15, DE4X5_SIGR);
3848 }
3849 outl(csr14, DE4X5_STRR);
3850 outl(csr13, DE4X5_SICR);
3851
3852 mdelay(10);
3853}
3854
3855
3856
3857
3858static void
3859create_packet(struct net_device *dev, char *frame, int len)
3860{
3861 int i;
3862 char *buf = frame;
3863
3864 for (i=0; i<ETH_ALEN; i++) {
3865 *buf++ = dev->dev_addr[i];
3866 }
3867 for (i=0; i<ETH_ALEN; i++) {
3868 *buf++ = dev->dev_addr[i];
3869 }
3870
3871 *buf++ = 0;
3872 *buf++ = 1;
3873}
3874
3875
3876
3877
3878static int
3879EISA_signature(char *name, struct device *device)
3880{
3881 int i, status = 0, siglen = ARRAY_SIZE(de4x5_signatures);
3882 struct eisa_device *edev;
3883
3884 *name = '\0';
3885 edev = to_eisa_device (device);
3886 i = edev->id.driver_data;
3887
3888 if (i >= 0 && i < siglen) {
3889 strcpy (name, de4x5_signatures[i]);
3890 status = 1;
3891 }
3892
3893 return status;
3894}
3895
3896
3897
3898
3899static int
3900PCI_signature(char *name, struct de4x5_private *lp)
3901{
3902 int i, status = 0, siglen = ARRAY_SIZE(de4x5_signatures);
3903
3904 if (lp->chipset == DC21040) {
3905 strcpy(name, "DE434/5");
3906 return status;
3907 } else {
3908 int tmp = *((char *)&lp->srom + 19) * 3;
3909 strncpy(name, (char *)&lp->srom + 26 + tmp, 8);
3910 }
3911 name[8] = '\0';
3912 for (i=0; i<siglen; i++) {
3913 if (strstr(name,de4x5_signatures[i])!=NULL) break;
3914 }
3915 if (i == siglen) {
3916 if (dec_only) {
3917 *name = '\0';
3918 } else {
3919 strcpy(name, (((lp->chipset == DC21040) ? "DC21040" :
3920 ((lp->chipset == DC21041) ? "DC21041" :
3921 ((lp->chipset == DC21140) ? "DC21140" :
3922 ((lp->chipset == DC21142) ? "DC21142" :
3923 ((lp->chipset == DC21143) ? "DC21143" : "UNKNOWN"
3924 )))))));
3925 }
3926 if (lp->chipset != DC21041) {
3927 lp->useSROM = true;
3928 }
3929 } else if ((lp->chipset & ~0x00ff) == DC2114x) {
3930 lp->useSROM = true;
3931 }
3932
3933 return status;
3934}
3935
3936
3937
3938
3939
3940
3941
3942
3943
3944static void
3945DevicePresent(struct net_device *dev, u_long aprom_addr)
3946{
3947 int i, j=0;
3948 struct de4x5_private *lp = netdev_priv(dev);
3949
3950 if (lp->chipset == DC21040) {
3951 if (lp->bus == EISA) {
3952 enet_addr_rst(aprom_addr);
3953 } else {
3954 outl(0, aprom_addr);
3955 }
3956 } else {
3957 u_short tmp;
3958 __le16 *p = (__le16 *)((char *)&lp->srom + SROM_HWADD);
3959 for (i=0; i<(ETH_ALEN>>1); i++) {
3960 tmp = srom_rd(aprom_addr, (SROM_HWADD>>1) + i);
3961 j += tmp;
3962 *p = cpu_to_le16(tmp);
3963 }
3964 if (j == 0 || j == 3 * 0xffff) {
3965
3966 return;
3967 }
3968
3969 p = (__le16 *)&lp->srom;
3970 for (i=0; i<(sizeof(struct de4x5_srom)>>1); i++) {
3971 tmp = srom_rd(aprom_addr, i);
3972 *p++ = cpu_to_le16(tmp);
3973 }
3974 de4x5_dbg_srom(&lp->srom);
3975 }
3976}
3977
3978
3979
3980
3981
3982
3983static void
3984enet_addr_rst(u_long aprom_addr)
3985{
3986 union {
3987 struct {
3988 u32 a;
3989 u32 b;
3990 } llsig;
3991 char Sig[sizeof(u32) << 1];
3992 } dev;
3993 short sigLength=0;
3994 s8 data;
3995 int i, j;
3996
3997 dev.llsig.a = ETH_PROM_SIG;
3998 dev.llsig.b = ETH_PROM_SIG;
3999 sigLength = sizeof(u32) << 1;
4000
4001 for (i=0,j=0;j<sigLength && i<PROBE_LENGTH+sigLength-1;i++) {
4002 data = inb(aprom_addr);
4003 if (dev.Sig[j] == data) {
4004 j++;
4005 } else {
4006 if (data == dev.Sig[0]) {
4007 j=1;
4008 } else {
4009 j=0;
4010 }
4011 }
4012 }
4013}
4014
4015
4016
4017
4018
4019
4020
4021static int
4022get_hw_addr(struct net_device *dev)
4023{
4024 u_long iobase = dev->base_addr;
4025 int broken, i, k, tmp, status = 0;
4026 u_short j,chksum;
4027 struct de4x5_private *lp = netdev_priv(dev);
4028
4029 broken = de4x5_bad_srom(lp);
4030
4031 for (i=0,k=0,j=0;j<3;j++) {
4032 k <<= 1;
4033 if (k > 0xffff) k-=0xffff;
4034
4035 if (lp->bus == PCI) {
4036 if (lp->chipset == DC21040) {
4037 while ((tmp = inl(DE4X5_APROM)) < 0);
4038 k += (u_char) tmp;
4039 dev->dev_addr[i++] = (u_char) tmp;
4040 while ((tmp = inl(DE4X5_APROM)) < 0);
4041 k += (u_short) (tmp << 8);
4042 dev->dev_addr[i++] = (u_char) tmp;
4043 } else if (!broken) {
4044 dev->dev_addr[i] = (u_char) lp->srom.ieee_addr[i]; i++;
4045 dev->dev_addr[i] = (u_char) lp->srom.ieee_addr[i]; i++;
4046 } else if ((broken == SMC) || (broken == ACCTON)) {
4047 dev->dev_addr[i] = *((u_char *)&lp->srom + i); i++;
4048 dev->dev_addr[i] = *((u_char *)&lp->srom + i); i++;
4049 }
4050 } else {
4051 k += (u_char) (tmp = inb(EISA_APROM));
4052 dev->dev_addr[i++] = (u_char) tmp;
4053 k += (u_short) ((tmp = inb(EISA_APROM)) << 8);
4054 dev->dev_addr[i++] = (u_char) tmp;
4055 }
4056
4057 if (k > 0xffff) k-=0xffff;
4058 }
4059 if (k == 0xffff) k=0;
4060
4061 if (lp->bus == PCI) {
4062 if (lp->chipset == DC21040) {
4063 while ((tmp = inl(DE4X5_APROM)) < 0);
4064 chksum = (u_char) tmp;
4065 while ((tmp = inl(DE4X5_APROM)) < 0);
4066 chksum |= (u_short) (tmp << 8);
4067 if ((k != chksum) && (dec_only)) status = -1;
4068 }
4069 } else {
4070 chksum = (u_char) inb(EISA_APROM);
4071 chksum |= (u_short) (inb(EISA_APROM) << 8);
4072 if ((k != chksum) && (dec_only)) status = -1;
4073 }
4074
4075
4076 srom_repair(dev, broken);
4077
4078#ifdef CONFIG_PPC_PMAC
4079
4080
4081
4082
4083 if ( machine_is(powermac) &&
4084 (dev->dev_addr[0] == 0) &&
4085 (dev->dev_addr[1] == 0xa0) )
4086 {
4087 for (i = 0; i < ETH_ALEN; ++i)
4088 {
4089 int x = dev->dev_addr[i];
4090 x = ((x & 0xf) << 4) + ((x & 0xf0) >> 4);
4091 x = ((x & 0x33) << 2) + ((x & 0xcc) >> 2);
4092 dev->dev_addr[i] = ((x & 0x55) << 1) + ((x & 0xaa) >> 1);
4093 }
4094 }
4095#endif
4096
4097
4098 status = test_bad_enet(dev, status);
4099
4100 return status;
4101}
4102
4103
4104
4105
4106static int
4107de4x5_bad_srom(struct de4x5_private *lp)
4108{
4109 int i, status = 0;
4110
4111 for (i = 0; i < ARRAY_SIZE(enet_det); i++) {
4112 if (!memcmp(&lp->srom, &enet_det[i], 3) &&
4113 !memcmp((char *)&lp->srom+0x10, &enet_det[i], 3)) {
4114 if (i == 0) {
4115 status = SMC;
4116 } else if (i == 1) {
4117 status = ACCTON;
4118 }
4119 break;
4120 }
4121 }
4122
4123 return status;
4124}
4125
4126static void
4127srom_repair(struct net_device *dev, int card)
4128{
4129 struct de4x5_private *lp = netdev_priv(dev);
4130
4131 switch(card) {
4132 case SMC:
4133 memset((char *)&lp->srom, 0, sizeof(struct de4x5_srom));
4134 memcpy(lp->srom.ieee_addr, (char *)dev->dev_addr, ETH_ALEN);
4135 memcpy(lp->srom.info, (char *)&srom_repair_info[SMC-1], 100);
4136 lp->useSROM = true;
4137 break;
4138 }
4139}
4140
4141
4142
4143
4144
4145static int
4146test_bad_enet(struct net_device *dev, int status)
4147{
4148 struct de4x5_private *lp = netdev_priv(dev);
4149 int i, tmp;
4150
4151 for (tmp=0,i=0; i<ETH_ALEN; i++) tmp += (u_char)dev->dev_addr[i];
4152 if ((tmp == 0) || (tmp == 0x5fa)) {
4153 if ((lp->chipset == last.chipset) &&
4154 (lp->bus_num == last.bus) && (lp->bus_num > 0)) {
4155 for (i=0; i<ETH_ALEN; i++) dev->dev_addr[i] = last.addr[i];
4156 for (i=ETH_ALEN-1; i>2; --i) {
4157 dev->dev_addr[i] += 1;
4158 if (dev->dev_addr[i] != 0) break;
4159 }
4160 for (i=0; i<ETH_ALEN; i++) last.addr[i] = dev->dev_addr[i];
4161 if (!an_exception(lp)) {
4162 dev->irq = last.irq;
4163 }
4164
4165 status = 0;
4166 }
4167 } else if (!status) {
4168 last.chipset = lp->chipset;
4169 last.bus = lp->bus_num;
4170 last.irq = dev->irq;
4171 for (i=0; i<ETH_ALEN; i++) last.addr[i] = dev->dev_addr[i];
4172 }
4173
4174 return status;
4175}
4176
4177
4178
4179
4180static int
4181an_exception(struct de4x5_private *lp)
4182{
4183 if ((*(u_short *)lp->srom.sub_vendor_id == 0x00c0) &&
4184 (*(u_short *)lp->srom.sub_system_id == 0x95e0)) {
4185 return -1;
4186 }
4187
4188 return 0;
4189}
4190
4191
4192
4193
4194static short
4195srom_rd(u_long addr, u_char offset)
4196{
4197 sendto_srom(SROM_RD | SROM_SR, addr);
4198
4199 srom_latch(SROM_RD | SROM_SR | DT_CS, addr);
4200 srom_command(SROM_RD | SROM_SR | DT_IN | DT_CS, addr);
4201 srom_address(SROM_RD | SROM_SR | DT_CS, addr, offset);
4202
4203 return srom_data(SROM_RD | SROM_SR | DT_CS, addr);
4204}
4205
4206static void
4207srom_latch(u_int command, u_long addr)
4208{
4209 sendto_srom(command, addr);
4210 sendto_srom(command | DT_CLK, addr);
4211 sendto_srom(command, addr);
4212}
4213
4214static void
4215srom_command(u_int command, u_long addr)
4216{
4217 srom_latch(command, addr);
4218 srom_latch(command, addr);
4219 srom_latch((command & 0x0000ff00) | DT_CS, addr);
4220}
4221
4222static void
4223srom_address(u_int command, u_long addr, u_char offset)
4224{
4225 int i, a;
4226
4227 a = offset << 2;
4228 for (i=0; i<6; i++, a <<= 1) {
4229 srom_latch(command | ((a & 0x80) ? DT_IN : 0), addr);
4230 }
4231 udelay(1);
4232
4233 i = (getfrom_srom(addr) >> 3) & 0x01;
4234}
4235
4236static short
4237srom_data(u_int command, u_long addr)
4238{
4239 int i;
4240 short word = 0;
4241 s32 tmp;
4242
4243 for (i=0; i<16; i++) {
4244 sendto_srom(command | DT_CLK, addr);
4245 tmp = getfrom_srom(addr);
4246 sendto_srom(command, addr);
4247
4248 word = (word << 1) | ((tmp >> 3) & 0x01);
4249 }
4250
4251 sendto_srom(command & 0x0000ff00, addr);
4252
4253 return word;
4254}
4255
4256
4257
4258
4259
4260
4261
4262
4263
4264
4265
4266
4267
4268
4269
4270static void
4271sendto_srom(u_int command, u_long addr)
4272{
4273 outl(command, addr);
4274 udelay(1);
4275}
4276
4277static int
4278getfrom_srom(u_long addr)
4279{
4280 s32 tmp;
4281
4282 tmp = inl(addr);
4283 udelay(1);
4284
4285 return tmp;
4286}
4287
4288static int
4289srom_infoleaf_info(struct net_device *dev)
4290{
4291 struct de4x5_private *lp = netdev_priv(dev);
4292 int i, count;
4293 u_char *p;
4294
4295
4296 for (i=0; i<INFOLEAF_SIZE; i++) {
4297 if (lp->chipset == infoleaf_array[i].chipset) break;
4298 }
4299 if (i == INFOLEAF_SIZE) {
4300 lp->useSROM = false;
4301 printk("%s: Cannot find correct chipset for SROM decoding!\n",
4302 dev->name);
4303 return -ENXIO;
4304 }
4305
4306 lp->infoleaf_fn = infoleaf_array[i].fn;
4307
4308
4309 count = *((u_char *)&lp->srom + 19);
4310 p = (u_char *)&lp->srom + 26;
4311
4312 if (count > 1) {
4313 for (i=count; i; --i, p+=3) {
4314 if (lp->device == *p) break;
4315 }
4316 if (i == 0) {
4317 lp->useSROM = false;
4318 printk("%s: Cannot find correct PCI device [%d] for SROM decoding!\n",
4319 dev->name, lp->device);
4320 return -ENXIO;
4321 }
4322 }
4323
4324 lp->infoleaf_offset = get_unaligned_le16(p + 1);
4325
4326 return 0;
4327}
4328
4329
4330
4331
4332
4333
4334
4335
4336static void
4337srom_init(struct net_device *dev)
4338{
4339 struct de4x5_private *lp = netdev_priv(dev);
4340 u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset;
4341 u_char count;
4342
4343 p+=2;
4344 if (lp->chipset == DC21140) {
4345 lp->cache.gepc = (*p++ | GEP_CTRL);
4346 gep_wr(lp->cache.gepc, dev);
4347 }
4348
4349
4350 count = *p++;
4351
4352
4353 for (;count; --count) {
4354 if (*p < 128) {
4355 p += COMPACT_LEN;
4356 } else if (*(p+1) == 5) {
4357 type5_infoblock(dev, 1, p);
4358 p += ((*p & BLOCK_LEN) + 1);
4359 } else if (*(p+1) == 4) {
4360 p += ((*p & BLOCK_LEN) + 1);
4361 } else if (*(p+1) == 3) {
4362 type3_infoblock(dev, 1, p);
4363 p += ((*p & BLOCK_LEN) + 1);
4364 } else if (*(p+1) == 2) {
4365 p += ((*p & BLOCK_LEN) + 1);
4366 } else if (*(p+1) == 1) {
4367 type1_infoblock(dev, 1, p);
4368 p += ((*p & BLOCK_LEN) + 1);
4369 } else {
4370 p += ((*p & BLOCK_LEN) + 1);
4371 }
4372 }
4373}
4374
4375
4376
4377
4378
4379static void
4380srom_exec(struct net_device *dev, u_char *p)
4381{
4382 struct de4x5_private *lp = netdev_priv(dev);
4383 u_long iobase = dev->base_addr;
4384 u_char count = (p ? *p++ : 0);
4385 u_short *w = (u_short *)p;
4386
4387 if (((lp->ibn != 1) && (lp->ibn != 3) && (lp->ibn != 5)) || !count) return;
4388
4389 if (lp->chipset != DC21140) RESET_SIA;
4390
4391 while (count--) {
4392 gep_wr(((lp->chipset==DC21140) && (lp->ibn!=5) ?
4393 *p++ : get_unaligned_le16(w++)), dev);
4394 mdelay(2);
4395 }
4396
4397 if (lp->chipset != DC21140) {
4398 outl(lp->cache.csr14, DE4X5_STRR);
4399 outl(lp->cache.csr13, DE4X5_SICR);
4400 }
4401}
4402
4403
4404
4405
4406
4407
4408static int
4409dc21041_infoleaf(struct net_device *dev)
4410{
4411 return DE4X5_AUTOSENSE_MS;
4412}
4413
4414static int
4415dc21140_infoleaf(struct net_device *dev)
4416{
4417 struct de4x5_private *lp = netdev_priv(dev);
4418 u_char count = 0;
4419 u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset;
4420 int next_tick = DE4X5_AUTOSENSE_MS;
4421
4422
4423 p+=2;
4424
4425
4426 lp->cache.gepc = (*p++ | GEP_CTRL);
4427
4428
4429 count = *p++;
4430
4431
4432 if (*p < 128) {
4433 next_tick = dc_infoblock[COMPACT](dev, count, p);
4434 } else {
4435 next_tick = dc_infoblock[*(p+1)](dev, count, p);
4436 }
4437
4438 if (lp->tcount == count) {
4439 lp->media = NC;
4440 if (lp->media != lp->c_media) {
4441 de4x5_dbg_media(dev);
4442 lp->c_media = lp->media;
4443 }
4444 lp->media = INIT;
4445 lp->tcount = 0;
4446 lp->tx_enable = false;
4447 }
4448
4449 return next_tick & ~TIMER_CB;
4450}
4451
4452static int
4453dc21142_infoleaf(struct net_device *dev)
4454{
4455 struct de4x5_private *lp = netdev_priv(dev);
4456 u_char count = 0;
4457 u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset;
4458 int next_tick = DE4X5_AUTOSENSE_MS;
4459
4460
4461 p+=2;
4462
4463
4464 count = *p++;
4465
4466
4467 if (*p < 128) {
4468 next_tick = dc_infoblock[COMPACT](dev, count, p);
4469 } else {
4470 next_tick = dc_infoblock[*(p+1)](dev, count, p);
4471 }
4472
4473 if (lp->tcount == count) {
4474 lp->media = NC;
4475 if (lp->media != lp->c_media) {
4476 de4x5_dbg_media(dev);
4477 lp->c_media = lp->media;
4478 }
4479 lp->media = INIT;
4480 lp->tcount = 0;
4481 lp->tx_enable = false;
4482 }
4483
4484 return next_tick & ~TIMER_CB;
4485}
4486
4487static int
4488dc21143_infoleaf(struct net_device *dev)
4489{
4490 struct de4x5_private *lp = netdev_priv(dev);
4491 u_char count = 0;
4492 u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset;
4493 int next_tick = DE4X5_AUTOSENSE_MS;
4494
4495
4496 p+=2;
4497
4498
4499 count = *p++;
4500
4501
4502 if (*p < 128) {
4503 next_tick = dc_infoblock[COMPACT](dev, count, p);
4504 } else {
4505 next_tick = dc_infoblock[*(p+1)](dev, count, p);
4506 }
4507 if (lp->tcount == count) {
4508 lp->media = NC;
4509 if (lp->media != lp->c_media) {
4510 de4x5_dbg_media(dev);
4511 lp->c_media = lp->media;
4512 }
4513 lp->media = INIT;
4514 lp->tcount = 0;
4515 lp->tx_enable = false;
4516 }
4517
4518 return next_tick & ~TIMER_CB;
4519}
4520
4521
4522
4523
4524
4525static int
4526compact_infoblock(struct net_device *dev, u_char count, u_char *p)
4527{
4528 struct de4x5_private *lp = netdev_priv(dev);
4529 u_char flags, csr6;
4530
4531
4532 if (--count > lp->tcount) {
4533 if (*(p+COMPACT_LEN) < 128) {
4534 return dc_infoblock[COMPACT](dev, count, p+COMPACT_LEN);
4535 } else {
4536 return dc_infoblock[*(p+COMPACT_LEN+1)](dev, count, p+COMPACT_LEN);
4537 }
4538 }
4539
4540 if ((lp->media == INIT) && (lp->timeout < 0)) {
4541 lp->ibn = COMPACT;
4542 lp->active = 0;
4543 gep_wr(lp->cache.gepc, dev);
4544 lp->infoblock_media = (*p++) & COMPACT_MC;
4545 lp->cache.gep = *p++;
4546 csr6 = *p++;
4547 flags = *p++;
4548
4549 lp->asBitValid = (flags & 0x80) ? 0 : -1;
4550 lp->defMedium = (flags & 0x40) ? -1 : 0;
4551 lp->asBit = 1 << ((csr6 >> 1) & 0x07);
4552 lp->asPolarity = ((csr6 & 0x80) ? -1 : 0) & lp->asBit;
4553 lp->infoblock_csr6 = OMR_DEF | ((csr6 & 0x71) << 18);
4554 lp->useMII = false;
4555
4556 de4x5_switch_mac_port(dev);
4557 }
4558
4559 return dc21140m_autoconf(dev);
4560}
4561
4562
4563
4564
4565static int
4566type0_infoblock(struct net_device *dev, u_char count, u_char *p)
4567{
4568 struct de4x5_private *lp = netdev_priv(dev);
4569 u_char flags, csr6, len = (*p & BLOCK_LEN)+1;
4570
4571
4572 if (--count > lp->tcount) {
4573 if (*(p+len) < 128) {
4574 return dc_infoblock[COMPACT](dev, count, p+len);
4575 } else {
4576 return dc_infoblock[*(p+len+1)](dev, count, p+len);
4577 }
4578 }
4579
4580 if ((lp->media == INIT) && (lp->timeout < 0)) {
4581 lp->ibn = 0;
4582 lp->active = 0;
4583 gep_wr(lp->cache.gepc, dev);
4584 p+=2;
4585 lp->infoblock_media = (*p++) & BLOCK0_MC;
4586 lp->cache.gep = *p++;
4587 csr6 = *p++;
4588 flags = *p++;
4589
4590 lp->asBitValid = (flags & 0x80) ? 0 : -1;
4591 lp->defMedium = (flags & 0x40) ? -1 : 0;
4592 lp->asBit = 1 << ((csr6 >> 1) & 0x07);
4593 lp->asPolarity = ((csr6 & 0x80) ? -1 : 0) & lp->asBit;
4594 lp->infoblock_csr6 = OMR_DEF | ((csr6 & 0x71) << 18);
4595 lp->useMII = false;
4596
4597 de4x5_switch_mac_port(dev);
4598 }
4599
4600 return dc21140m_autoconf(dev);
4601}
4602
4603
4604
4605static int
4606type1_infoblock(struct net_device *dev, u_char count, u_char *p)
4607{
4608 struct de4x5_private *lp = netdev_priv(dev);
4609 u_char len = (*p & BLOCK_LEN)+1;
4610
4611
4612 if (--count > lp->tcount) {
4613 if (*(p+len) < 128) {
4614 return dc_infoblock[COMPACT](dev, count, p+len);
4615 } else {
4616 return dc_infoblock[*(p+len+1)](dev, count, p+len);
4617 }
4618 }
4619
4620 p += 2;
4621 if (lp->state == INITIALISED) {
4622 lp->ibn = 1;
4623 lp->active = *p++;
4624 lp->phy[lp->active].gep = (*p ? p : NULL); p += (*p + 1);
4625 lp->phy[lp->active].rst = (*p ? p : NULL); p += (*p + 1);
4626 lp->phy[lp->active].mc = get_unaligned_le16(p); p += 2;
4627 lp->phy[lp->active].ana = get_unaligned_le16(p); p += 2;
4628 lp->phy[lp->active].fdx = get_unaligned_le16(p); p += 2;
4629 lp->phy[lp->active].ttm = get_unaligned_le16(p);
4630 return 0;
4631 } else if ((lp->media == INIT) && (lp->timeout < 0)) {
4632 lp->ibn = 1;
4633 lp->active = *p;
4634 lp->infoblock_csr6 = OMR_MII_100;
4635 lp->useMII = true;
4636 lp->infoblock_media = ANS;
4637
4638 de4x5_switch_mac_port(dev);
4639 }
4640
4641 return dc21140m_autoconf(dev);
4642}
4643
4644static int
4645type2_infoblock(struct net_device *dev, u_char count, u_char *p)
4646{
4647 struct de4x5_private *lp = netdev_priv(dev);
4648 u_char len = (*p & BLOCK_LEN)+1;
4649
4650
4651 if (--count > lp->tcount) {
4652 if (*(p+len) < 128) {
4653 return dc_infoblock[COMPACT](dev, count, p+len);
4654 } else {
4655 return dc_infoblock[*(p+len+1)](dev, count, p+len);
4656 }
4657 }
4658
4659 if ((lp->media == INIT) && (lp->timeout < 0)) {
4660 lp->ibn = 2;
4661 lp->active = 0;
4662 p += 2;
4663 lp->infoblock_media = (*p) & MEDIA_CODE;
4664
4665 if ((*p++) & EXT_FIELD) {
4666 lp->cache.csr13 = get_unaligned_le16(p); p += 2;
4667 lp->cache.csr14 = get_unaligned_le16(p); p += 2;
4668 lp->cache.csr15 = get_unaligned_le16(p); p += 2;
4669 } else {
4670 lp->cache.csr13 = CSR13;
4671 lp->cache.csr14 = CSR14;
4672 lp->cache.csr15 = CSR15;
4673 }
4674 lp->cache.gepc = ((s32)(get_unaligned_le16(p)) << 16); p += 2;
4675 lp->cache.gep = ((s32)(get_unaligned_le16(p)) << 16);
4676 lp->infoblock_csr6 = OMR_SIA;
4677 lp->useMII = false;
4678
4679 de4x5_switch_mac_port(dev);
4680 }
4681
4682 return dc2114x_autoconf(dev);
4683}
4684
4685static int
4686type3_infoblock(struct net_device *dev, u_char count, u_char *p)
4687{
4688 struct de4x5_private *lp = netdev_priv(dev);
4689 u_char len = (*p & BLOCK_LEN)+1;
4690
4691
4692 if (--count > lp->tcount) {
4693 if (*(p+len) < 128) {
4694 return dc_infoblock[COMPACT](dev, count, p+len);
4695 } else {
4696 return dc_infoblock[*(p+len+1)](dev, count, p+len);
4697 }
4698 }
4699
4700 p += 2;
4701 if (lp->state == INITIALISED) {
4702 lp->ibn = 3;
4703 lp->active = *p++;
4704 if (MOTO_SROM_BUG) lp->active = 0;
4705 lp->phy[lp->active].gep = (*p ? p : NULL); p += (2 * (*p) + 1);
4706 lp->phy[lp->active].rst = (*p ? p : NULL); p += (2 * (*p) + 1);
4707 lp->phy[lp->active].mc = get_unaligned_le16(p); p += 2;
4708 lp->phy[lp->active].ana = get_unaligned_le16(p); p += 2;
4709 lp->phy[lp->active].fdx = get_unaligned_le16(p); p += 2;
4710 lp->phy[lp->active].ttm = get_unaligned_le16(p); p += 2;
4711 lp->phy[lp->active].mci = *p;
4712 return 0;
4713 } else if ((lp->media == INIT) && (lp->timeout < 0)) {
4714 lp->ibn = 3;
4715 lp->active = *p;
4716 if (MOTO_SROM_BUG) lp->active = 0;
4717 lp->infoblock_csr6 = OMR_MII_100;
4718 lp->useMII = true;
4719 lp->infoblock_media = ANS;
4720
4721 de4x5_switch_mac_port(dev);
4722 }
4723
4724 return dc2114x_autoconf(dev);
4725}
4726
4727static int
4728type4_infoblock(struct net_device *dev, u_char count, u_char *p)
4729{
4730 struct de4x5_private *lp = netdev_priv(dev);
4731 u_char flags, csr6, len = (*p & BLOCK_LEN)+1;
4732
4733
4734 if (--count > lp->tcount) {
4735 if (*(p+len) < 128) {
4736 return dc_infoblock[COMPACT](dev, count, p+len);
4737 } else {
4738 return dc_infoblock[*(p+len+1)](dev, count, p+len);
4739 }
4740 }
4741
4742 if ((lp->media == INIT) && (lp->timeout < 0)) {
4743 lp->ibn = 4;
4744 lp->active = 0;
4745 p+=2;
4746 lp->infoblock_media = (*p++) & MEDIA_CODE;
4747 lp->cache.csr13 = CSR13;
4748 lp->cache.csr14 = CSR14;
4749 lp->cache.csr15 = CSR15;
4750 lp->cache.gepc = ((s32)(get_unaligned_le16(p)) << 16); p += 2;
4751 lp->cache.gep = ((s32)(get_unaligned_le16(p)) << 16); p += 2;
4752 csr6 = *p++;
4753 flags = *p++;
4754
4755 lp->asBitValid = (flags & 0x80) ? 0 : -1;
4756 lp->defMedium = (flags & 0x40) ? -1 : 0;
4757 lp->asBit = 1 << ((csr6 >> 1) & 0x07);
4758 lp->asPolarity = ((csr6 & 0x80) ? -1 : 0) & lp->asBit;
4759 lp->infoblock_csr6 = OMR_DEF | ((csr6 & 0x71) << 18);
4760 lp->useMII = false;
4761
4762 de4x5_switch_mac_port(dev);
4763 }
4764
4765 return dc2114x_autoconf(dev);
4766}
4767
4768
4769
4770
4771
4772static int
4773type5_infoblock(struct net_device *dev, u_char count, u_char *p)
4774{
4775 struct de4x5_private *lp = netdev_priv(dev);
4776 u_char len = (*p & BLOCK_LEN)+1;
4777
4778
4779 if (--count > lp->tcount) {
4780 if (*(p+len) < 128) {
4781 return dc_infoblock[COMPACT](dev, count, p+len);
4782 } else {
4783 return dc_infoblock[*(p+len+1)](dev, count, p+len);
4784 }
4785 }
4786
4787
4788 if ((lp->state == INITIALISED) || (lp->media == INIT)) {
4789 p+=2;
4790 lp->rst = p;
4791 srom_exec(dev, lp->rst);
4792 }
4793
4794 return DE4X5_AUTOSENSE_MS;
4795}
4796
4797
4798
4799
4800
4801static int
4802mii_rd(u_char phyreg, u_char phyaddr, u_long ioaddr)
4803{
4804 mii_wdata(MII_PREAMBLE, 2, ioaddr);
4805 mii_wdata(MII_PREAMBLE, 32, ioaddr);
4806 mii_wdata(MII_STRD, 4, ioaddr);
4807 mii_address(phyaddr, ioaddr);
4808 mii_address(phyreg, ioaddr);
4809 mii_ta(MII_STRD, ioaddr);
4810
4811 return mii_rdata(ioaddr);
4812}
4813
4814static void
4815mii_wr(int data, u_char phyreg, u_char phyaddr, u_long ioaddr)
4816{
4817 mii_wdata(MII_PREAMBLE, 2, ioaddr);
4818 mii_wdata(MII_PREAMBLE, 32, ioaddr);
4819 mii_wdata(MII_STWR, 4, ioaddr);
4820 mii_address(phyaddr, ioaddr);
4821 mii_address(phyreg, ioaddr);
4822 mii_ta(MII_STWR, ioaddr);
4823 data = mii_swap(data, 16);
4824 mii_wdata(data, 16, ioaddr);
4825}
4826
4827static int
4828mii_rdata(u_long ioaddr)
4829{
4830 int i;
4831 s32 tmp = 0;
4832
4833 for (i=0; i<16; i++) {
4834 tmp <<= 1;
4835 tmp |= getfrom_mii(MII_MRD | MII_RD, ioaddr);
4836 }
4837
4838 return tmp;
4839}
4840
4841static void
4842mii_wdata(int data, int len, u_long ioaddr)
4843{
4844 int i;
4845
4846 for (i=0; i<len; i++) {
4847 sendto_mii(MII_MWR | MII_WR, data, ioaddr);
4848 data >>= 1;
4849 }
4850}
4851
4852static void
4853mii_address(u_char addr, u_long ioaddr)
4854{
4855 int i;
4856
4857 addr = mii_swap(addr, 5);
4858 for (i=0; i<5; i++) {
4859 sendto_mii(MII_MWR | MII_WR, addr, ioaddr);
4860 addr >>= 1;
4861 }
4862}
4863
4864static void
4865mii_ta(u_long rw, u_long ioaddr)
4866{
4867 if (rw == MII_STWR) {
4868 sendto_mii(MII_MWR | MII_WR, 1, ioaddr);
4869 sendto_mii(MII_MWR | MII_WR, 0, ioaddr);
4870 } else {
4871 getfrom_mii(MII_MRD | MII_RD, ioaddr);
4872 }
4873}
4874
4875static int
4876mii_swap(int data, int len)
4877{
4878 int i, tmp = 0;
4879
4880 for (i=0; i<len; i++) {
4881 tmp <<= 1;
4882 tmp |= (data & 1);
4883 data >>= 1;
4884 }
4885
4886 return tmp;
4887}
4888
4889static void
4890sendto_mii(u32 command, int data, u_long ioaddr)
4891{
4892 u32 j;
4893
4894 j = (data & 1) << 17;
4895 outl(command | j, ioaddr);
4896 udelay(1);
4897 outl(command | MII_MDC | j, ioaddr);
4898 udelay(1);
4899}
4900
4901static int
4902getfrom_mii(u32 command, u_long ioaddr)
4903{
4904 outl(command, ioaddr);
4905 udelay(1);
4906 outl(command | MII_MDC, ioaddr);
4907 udelay(1);
4908
4909 return (inl(ioaddr) >> 19) & 1;
4910}
4911
4912
4913
4914
4915static int
4916mii_get_oui(u_char phyaddr, u_long ioaddr)
4917{
4918
4919
4920
4921
4922
4923
4924 int r2, r3;
4925
4926
4927 r2 = mii_rd(MII_ID0, phyaddr, ioaddr);
4928 r3 = mii_rd(MII_ID1, phyaddr, ioaddr);
4929
4930
4931
4932
4933
4934
4935
4936
4937
4938
4939
4940
4941
4942
4943
4944
4945
4946
4947
4948
4949
4950
4951
4952
4953
4954
4955
4956 return r2;
4957}
4958
4959
4960
4961
4962static int
4963mii_get_phy(struct net_device *dev)
4964{
4965 struct de4x5_private *lp = netdev_priv(dev);
4966 u_long iobase = dev->base_addr;
4967 int i, j, k, n, limit=ARRAY_SIZE(phy_info);
4968 int id;
4969
4970 lp->active = 0;
4971 lp->useMII = true;
4972
4973
4974 for (n=0, lp->mii_cnt=0, i=1; !((i==1) && (n==1)); i=(i+1)%DE4X5_MAX_MII) {
4975 lp->phy[lp->active].addr = i;
4976 if (i==0) n++;
4977 while (de4x5_reset_phy(dev)<0) udelay(100);
4978 id = mii_get_oui(i, DE4X5_MII);
4979 if ((id == 0) || (id == 65535)) continue;
4980 for (j=0; j<limit; j++) {
4981 if (id != phy_info[j].id) continue;
4982 for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++);
4983 if (k < DE4X5_MAX_PHY) {
4984 memcpy((char *)&lp->phy[k],
4985 (char *)&phy_info[j], sizeof(struct phy_table));
4986 lp->phy[k].addr = i;
4987 lp->mii_cnt++;
4988 lp->active++;
4989 } else {
4990 goto purgatory;
4991 }
4992 break;
4993 }
4994 if ((j == limit) && (i < DE4X5_MAX_MII)) {
4995 for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++);
4996 lp->phy[k].addr = i;
4997 lp->phy[k].id = id;
4998 lp->phy[k].spd.reg = GENERIC_REG;
4999 lp->phy[k].spd.mask = GENERIC_MASK;
5000 lp->phy[k].spd.value = GENERIC_VALUE;
5001 lp->mii_cnt++;
5002 lp->active++;
5003 printk("%s: Using generic MII device control. If the board doesn't operate,\nplease mail the following dump to the author:\n", dev->name);
5004 j = de4x5_debug;
5005 de4x5_debug |= DEBUG_MII;
5006 de4x5_dbg_mii(dev, k);
5007 de4x5_debug = j;
5008 printk("\n");
5009 }
5010 }
5011 purgatory:
5012 lp->active = 0;
5013 if (lp->phy[0].id) {
5014 for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++) {
5015 mii_wr(MII_CR_RST, MII_CR, lp->phy[k].addr, DE4X5_MII);
5016 while (mii_rd(MII_CR, lp->phy[k].addr, DE4X5_MII) & MII_CR_RST);
5017
5018 de4x5_dbg_mii(dev, k);
5019 }
5020 }
5021 if (!lp->mii_cnt) lp->useMII = false;
5022
5023 return lp->mii_cnt;
5024}
5025
5026static char *
5027build_setup_frame(struct net_device *dev, int mode)
5028{
5029 struct de4x5_private *lp = netdev_priv(dev);
5030 int i;
5031 char *pa = lp->setup_frame;
5032
5033
5034 if (mode == ALL) {
5035 memset(lp->setup_frame, 0, SETUP_FRAME_LEN);
5036 }
5037
5038 if (lp->setup_f == HASH_PERF) {
5039 for (pa=lp->setup_frame+IMPERF_PA_OFFSET, i=0; i<ETH_ALEN; i++) {
5040 *(pa + i) = dev->dev_addr[i];
5041 if (i & 0x01) pa += 2;
5042 }
5043 *(lp->setup_frame + (HASH_TABLE_LEN >> 3) - 3) = 0x80;
5044 } else {
5045 for (i=0; i<ETH_ALEN; i++) {
5046 *(pa + (i&1)) = dev->dev_addr[i];
5047 if (i & 0x01) pa += 4;
5048 }
5049 for (i=0; i<ETH_ALEN; i++) {
5050 *(pa + (i&1)) = (char) 0xff;
5051 if (i & 0x01) pa += 4;
5052 }
5053 }
5054
5055 return pa;
5056}
5057
5058static void
5059disable_ast(struct net_device *dev)
5060{
5061 struct de4x5_private *lp = netdev_priv(dev);
5062 del_timer_sync(&lp->timer);
5063}
5064
5065static long
5066de4x5_switch_mac_port(struct net_device *dev)
5067{
5068 struct de4x5_private *lp = netdev_priv(dev);
5069 u_long iobase = dev->base_addr;
5070 s32 omr;
5071
5072 STOP_DE4X5;
5073
5074
5075 omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR |
5076 OMR_FDX));
5077 omr |= lp->infoblock_csr6;
5078 if (omr & OMR_PS) omr |= OMR_HBD;
5079 outl(omr, DE4X5_OMR);
5080
5081
5082 RESET_DE4X5;
5083
5084
5085 if (lp->chipset == DC21140) {
5086 gep_wr(lp->cache.gepc, dev);
5087 gep_wr(lp->cache.gep, dev);
5088 } else if ((lp->chipset & ~0x0ff) == DC2114x) {
5089 reset_init_sia(dev, lp->cache.csr13, lp->cache.csr14, lp->cache.csr15);
5090 }
5091
5092
5093 outl(omr, DE4X5_OMR);
5094
5095
5096 inl(DE4X5_MFC);
5097
5098 return omr;
5099}
5100
5101static void
5102gep_wr(s32 data, struct net_device *dev)
5103{
5104 struct de4x5_private *lp = netdev_priv(dev);
5105 u_long iobase = dev->base_addr;
5106
5107 if (lp->chipset == DC21140) {
5108 outl(data, DE4X5_GEP);
5109 } else if ((lp->chipset & ~0x00ff) == DC2114x) {
5110 outl((data<<16) | lp->cache.csr15, DE4X5_SIGR);
5111 }
5112}
5113
5114static int
5115gep_rd(struct net_device *dev)
5116{
5117 struct de4x5_private *lp = netdev_priv(dev);
5118 u_long iobase = dev->base_addr;
5119
5120 if (lp->chipset == DC21140) {
5121 return inl(DE4X5_GEP);
5122 } else if ((lp->chipset & ~0x00ff) == DC2114x) {
5123 return inl(DE4X5_SIGR) & 0x000fffff;
5124 }
5125
5126 return 0;
5127}
5128
5129static void
5130yawn(struct net_device *dev, int state)
5131{
5132 struct de4x5_private *lp = netdev_priv(dev);
5133 u_long iobase = dev->base_addr;
5134
5135 if ((lp->chipset == DC21040) || (lp->chipset == DC21140)) return;
5136
5137 if(lp->bus == EISA) {
5138 switch(state) {
5139 case WAKEUP:
5140 outb(WAKEUP, PCI_CFPM);
5141 mdelay(10);
5142 break;
5143
5144 case SNOOZE:
5145 outb(SNOOZE, PCI_CFPM);
5146 break;
5147
5148 case SLEEP:
5149 outl(0, DE4X5_SICR);
5150 outb(SLEEP, PCI_CFPM);
5151 break;
5152 }
5153 } else {
5154 struct pci_dev *pdev = to_pci_dev (lp->gendev);
5155 switch(state) {
5156 case WAKEUP:
5157 pci_write_config_byte(pdev, PCI_CFDA_PSM, WAKEUP);
5158 mdelay(10);
5159 break;
5160
5161 case SNOOZE:
5162 pci_write_config_byte(pdev, PCI_CFDA_PSM, SNOOZE);
5163 break;
5164
5165 case SLEEP:
5166 outl(0, DE4X5_SICR);
5167 pci_write_config_byte(pdev, PCI_CFDA_PSM, SLEEP);
5168 break;
5169 }
5170 }
5171}
5172
5173static void
5174de4x5_parse_params(struct net_device *dev)
5175{
5176 struct de4x5_private *lp = netdev_priv(dev);
5177 char *p, *q, t;
5178
5179 lp->params.fdx = false;
5180 lp->params.autosense = AUTO;
5181
5182 if (args == NULL) return;
5183
5184 if ((p = strstr(args, dev->name))) {
5185 if (!(q = strstr(p+strlen(dev->name), "eth"))) q = p + strlen(p);
5186 t = *q;
5187 *q = '\0';
5188
5189 if (strstr(p, "fdx") || strstr(p, "FDX")) lp->params.fdx = true;
5190
5191 if (strstr(p, "autosense") || strstr(p, "AUTOSENSE")) {
5192 if (strstr(p, "TP_NW")) {
5193 lp->params.autosense = TP_NW;
5194 } else if (strstr(p, "TP")) {
5195 lp->params.autosense = TP;
5196 } else if (strstr(p, "BNC_AUI")) {
5197 lp->params.autosense = BNC;
5198 } else if (strstr(p, "BNC")) {
5199 lp->params.autosense = BNC;
5200 } else if (strstr(p, "AUI")) {
5201 lp->params.autosense = AUI;
5202 } else if (strstr(p, "10Mb")) {
5203 lp->params.autosense = _10Mb;
5204 } else if (strstr(p, "100Mb")) {
5205 lp->params.autosense = _100Mb;
5206 } else if (strstr(p, "AUTO")) {
5207 lp->params.autosense = AUTO;
5208 }
5209 }
5210 *q = t;
5211 }
5212}
5213
5214static void
5215de4x5_dbg_open(struct net_device *dev)
5216{
5217 struct de4x5_private *lp = netdev_priv(dev);
5218 int i;
5219
5220 if (de4x5_debug & DEBUG_OPEN) {
5221 printk("%s: de4x5 opening with irq %d\n",dev->name,dev->irq);
5222 printk("\tphysical address: %pM\n", dev->dev_addr);
5223 printk("Descriptor head addresses:\n");
5224 printk("\t0x%8.8lx 0x%8.8lx\n",(u_long)lp->rx_ring,(u_long)lp->tx_ring);
5225 printk("Descriptor addresses:\nRX: ");
5226 for (i=0;i<lp->rxRingSize-1;i++){
5227 if (i < 3) {
5228 printk("0x%8.8lx ",(u_long)&lp->rx_ring[i].status);
5229 }
5230 }
5231 printk("...0x%8.8lx\n",(u_long)&lp->rx_ring[i].status);
5232 printk("TX: ");
5233 for (i=0;i<lp->txRingSize-1;i++){
5234 if (i < 3) {
5235 printk("0x%8.8lx ", (u_long)&lp->tx_ring[i].status);
5236 }
5237 }
5238 printk("...0x%8.8lx\n", (u_long)&lp->tx_ring[i].status);
5239 printk("Descriptor buffers:\nRX: ");
5240 for (i=0;i<lp->rxRingSize-1;i++){
5241 if (i < 3) {
5242 printk("0x%8.8x ",le32_to_cpu(lp->rx_ring[i].buf));
5243 }
5244 }
5245 printk("...0x%8.8x\n",le32_to_cpu(lp->rx_ring[i].buf));
5246 printk("TX: ");
5247 for (i=0;i<lp->txRingSize-1;i++){
5248 if (i < 3) {
5249 printk("0x%8.8x ", le32_to_cpu(lp->tx_ring[i].buf));
5250 }
5251 }
5252 printk("...0x%8.8x\n", le32_to_cpu(lp->tx_ring[i].buf));
5253 printk("Ring size:\nRX: %d\nTX: %d\n",
5254 (short)lp->rxRingSize,
5255 (short)lp->txRingSize);
5256 }
5257}
5258
5259static void
5260de4x5_dbg_mii(struct net_device *dev, int k)
5261{
5262 struct de4x5_private *lp = netdev_priv(dev);
5263 u_long iobase = dev->base_addr;
5264
5265 if (de4x5_debug & DEBUG_MII) {
5266 printk("\nMII device address: %d\n", lp->phy[k].addr);
5267 printk("MII CR: %x\n",mii_rd(MII_CR,lp->phy[k].addr,DE4X5_MII));
5268 printk("MII SR: %x\n",mii_rd(MII_SR,lp->phy[k].addr,DE4X5_MII));
5269 printk("MII ID0: %x\n",mii_rd(MII_ID0,lp->phy[k].addr,DE4X5_MII));
5270 printk("MII ID1: %x\n",mii_rd(MII_ID1,lp->phy[k].addr,DE4X5_MII));
5271 if (lp->phy[k].id != BROADCOM_T4) {
5272 printk("MII ANA: %x\n",mii_rd(0x04,lp->phy[k].addr,DE4X5_MII));
5273 printk("MII ANC: %x\n",mii_rd(0x05,lp->phy[k].addr,DE4X5_MII));
5274 }
5275 printk("MII 16: %x\n",mii_rd(0x10,lp->phy[k].addr,DE4X5_MII));
5276 if (lp->phy[k].id != BROADCOM_T4) {
5277 printk("MII 17: %x\n",mii_rd(0x11,lp->phy[k].addr,DE4X5_MII));
5278 printk("MII 18: %x\n",mii_rd(0x12,lp->phy[k].addr,DE4X5_MII));
5279 } else {
5280 printk("MII 20: %x\n",mii_rd(0x14,lp->phy[k].addr,DE4X5_MII));
5281 }
5282 }
5283}
5284
5285static void
5286de4x5_dbg_media(struct net_device *dev)
5287{
5288 struct de4x5_private *lp = netdev_priv(dev);
5289
5290 if (lp->media != lp->c_media) {
5291 if (de4x5_debug & DEBUG_MEDIA) {
5292 printk("%s: media is %s%s\n", dev->name,
5293 (lp->media == NC ? "unconnected, link down or incompatible connection" :
5294 (lp->media == TP ? "TP" :
5295 (lp->media == ANS ? "TP/Nway" :
5296 (lp->media == BNC ? "BNC" :
5297 (lp->media == AUI ? "AUI" :
5298 (lp->media == BNC_AUI ? "BNC/AUI" :
5299 (lp->media == EXT_SIA ? "EXT SIA" :
5300 (lp->media == _100Mb ? "100Mb/s" :
5301 (lp->media == _10Mb ? "10Mb/s" :
5302 "???"
5303 ))))))))), (lp->fdx?" full duplex.":"."));
5304 }
5305 lp->c_media = lp->media;
5306 }
5307}
5308
5309static void
5310de4x5_dbg_srom(struct de4x5_srom *p)
5311{
5312 int i;
5313
5314 if (de4x5_debug & DEBUG_SROM) {
5315 printk("Sub-system Vendor ID: %04x\n", *((u_short *)p->sub_vendor_id));
5316 printk("Sub-system ID: %04x\n", *((u_short *)p->sub_system_id));
5317 printk("ID Block CRC: %02x\n", (u_char)(p->id_block_crc));
5318 printk("SROM version: %02x\n", (u_char)(p->version));
5319 printk("# controllers: %02x\n", (u_char)(p->num_controllers));
5320
5321 printk("Hardware Address: %pM\n", p->ieee_addr);
5322 printk("CRC checksum: %04x\n", (u_short)(p->chksum));
5323 for (i=0; i<64; i++) {
5324 printk("%3d %04x\n", i<<1, (u_short)*((u_short *)p+i));
5325 }
5326 }
5327}
5328
5329static void
5330de4x5_dbg_rx(struct sk_buff *skb, int len)
5331{
5332 int i, j;
5333
5334 if (de4x5_debug & DEBUG_RX) {
5335 printk("R: %pM <- %pM len/SAP:%02x%02x [%d]\n",
5336 skb->data, &skb->data[6],
5337 (u_char)skb->data[12],
5338 (u_char)skb->data[13],
5339 len);
5340 for (j=0; len>0;j+=16, len-=16) {
5341 printk(" %03x: ",j);
5342 for (i=0; i<16 && i<len; i++) {
5343 printk("%02x ",(u_char)skb->data[i+j]);
5344 }
5345 printk("\n");
5346 }
5347 }
5348}
5349
5350
5351
5352
5353
5354
5355static int
5356de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
5357{
5358 struct de4x5_private *lp = netdev_priv(dev);
5359 struct de4x5_ioctl *ioc = (struct de4x5_ioctl *) &rq->ifr_ifru;
5360 u_long iobase = dev->base_addr;
5361 int i, j, status = 0;
5362 s32 omr;
5363 union {
5364 u8 addr[144];
5365 u16 sval[72];
5366 u32 lval[36];
5367 } tmp;
5368 u_long flags = 0;
5369
5370 switch(ioc->cmd) {
5371 case DE4X5_GET_HWADDR:
5372 ioc->len = ETH_ALEN;
5373 for (i=0; i<ETH_ALEN; i++) {
5374 tmp.addr[i] = dev->dev_addr[i];
5375 }
5376 if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
5377 break;
5378
5379 case DE4X5_SET_HWADDR:
5380 if (!capable(CAP_NET_ADMIN)) return -EPERM;
5381 if (copy_from_user(tmp.addr, ioc->data, ETH_ALEN)) return -EFAULT;
5382 if (netif_queue_stopped(dev))
5383 return -EBUSY;
5384 netif_stop_queue(dev);
5385 for (i=0; i<ETH_ALEN; i++) {
5386 dev->dev_addr[i] = tmp.addr[i];
5387 }
5388 build_setup_frame(dev, PHYS_ADDR_ONLY);
5389
5390 load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET |
5391 SETUP_FRAME_LEN, (struct sk_buff *)1);
5392 lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
5393 outl(POLL_DEMAND, DE4X5_TPD);
5394 netif_wake_queue(dev);
5395 break;
5396
5397 case DE4X5_SAY_BOO:
5398 if (!capable(CAP_NET_ADMIN)) return -EPERM;
5399 printk("%s: Boo!\n", dev->name);
5400 break;
5401
5402 case DE4X5_MCA_EN:
5403 if (!capable(CAP_NET_ADMIN)) return -EPERM;
5404 omr = inl(DE4X5_OMR);
5405 omr |= OMR_PM;
5406 outl(omr, DE4X5_OMR);
5407 break;
5408
5409 case DE4X5_GET_STATS:
5410 {
5411 struct pkt_stats statbuf;
5412 ioc->len = sizeof(statbuf);
5413 spin_lock_irqsave(&lp->lock, flags);
5414 memcpy(&statbuf, &lp->pktStats, ioc->len);
5415 spin_unlock_irqrestore(&lp->lock, flags);
5416 if (copy_to_user(ioc->data, &statbuf, ioc->len))
5417 return -EFAULT;
5418 break;
5419 }
5420 case DE4X5_CLR_STATS:
5421 if (!capable(CAP_NET_ADMIN)) return -EPERM;
5422 spin_lock_irqsave(&lp->lock, flags);
5423 memset(&lp->pktStats, 0, sizeof(lp->pktStats));
5424 spin_unlock_irqrestore(&lp->lock, flags);
5425 break;
5426
5427 case DE4X5_GET_OMR:
5428 tmp.addr[0] = inl(DE4X5_OMR);
5429 if (copy_to_user(ioc->data, tmp.addr, 1)) return -EFAULT;
5430 break;
5431
5432 case DE4X5_SET_OMR:
5433 if (!capable(CAP_NET_ADMIN)) return -EPERM;
5434 if (copy_from_user(tmp.addr, ioc->data, 1)) return -EFAULT;
5435 outl(tmp.addr[0], DE4X5_OMR);
5436 break;
5437
5438 case DE4X5_GET_REG:
5439 j = 0;
5440 tmp.lval[0] = inl(DE4X5_STS); j+=4;
5441 tmp.lval[1] = inl(DE4X5_BMR); j+=4;
5442 tmp.lval[2] = inl(DE4X5_IMR); j+=4;
5443 tmp.lval[3] = inl(DE4X5_OMR); j+=4;
5444 tmp.lval[4] = inl(DE4X5_SISR); j+=4;
5445 tmp.lval[5] = inl(DE4X5_SICR); j+=4;
5446 tmp.lval[6] = inl(DE4X5_STRR); j+=4;
5447 tmp.lval[7] = inl(DE4X5_SIGR); j+=4;
5448 ioc->len = j;
5449 if (copy_to_user(ioc->data, tmp.lval, ioc->len))
5450 return -EFAULT;
5451 break;
5452
5453#define DE4X5_DUMP 0x0f
5454
5455
5456
5457
5458
5459
5460
5461
5462
5463
5464
5465
5466
5467
5468
5469
5470
5471
5472
5473
5474
5475
5476
5477
5478
5479
5480
5481
5482
5483
5484
5485
5486
5487
5488
5489
5490
5491
5492
5493
5494
5495
5496
5497
5498
5499
5500
5501
5502
5503
5504
5505
5506
5507
5508
5509
5510
5511
5512
5513
5514
5515
5516
5517
5518
5519
5520
5521
5522
5523
5524
5525
5526
5527
5528
5529
5530
5531
5532
5533
5534
5535
5536
5537
5538
5539
5540
5541
5542
5543 default:
5544 return -EOPNOTSUPP;
5545 }
5546
5547 return status;
5548}
5549
5550static int __init de4x5_module_init (void)
5551{
5552 int err = 0;
5553
5554#ifdef CONFIG_PCI
5555 err = pci_register_driver(&de4x5_pci_driver);
5556#endif
5557#ifdef CONFIG_EISA
5558 err |= eisa_driver_register (&de4x5_eisa_driver);
5559#endif
5560
5561 return err;
5562}
5563
5564static void __exit de4x5_module_exit (void)
5565{
5566#ifdef CONFIG_PCI
5567 pci_unregister_driver (&de4x5_pci_driver);
5568#endif
5569#ifdef CONFIG_EISA
5570 eisa_driver_unregister (&de4x5_eisa_driver);
5571#endif
5572}
5573
5574module_init (de4x5_module_init);
5575module_exit (de4x5_module_exit);
5576