1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446#include <linux/module.h>
447#include <linux/kernel.h>
448#include <linux/string.h>
449#include <linux/interrupt.h>
450#include <linux/ptrace.h>
451#include <linux/errno.h>
452#include <linux/ioport.h>
453#include <linux/pci.h>
454#include <linux/eisa.h>
455#include <linux/delay.h>
456#include <linux/init.h>
457#include <linux/spinlock.h>
458#include <linux/crc32.h>
459#include <linux/netdevice.h>
460#include <linux/etherdevice.h>
461#include <linux/skbuff.h>
462#include <linux/time.h>
463#include <linux/types.h>
464#include <linux/unistd.h>
465#include <linux/ctype.h>
466#include <linux/dma-mapping.h>
467#include <linux/moduleparam.h>
468#include <linux/bitops.h>
469#include <linux/gfp.h>
470
471#include <asm/io.h>
472#include <asm/dma.h>
473#include <asm/byteorder.h>
474#include <asm/unaligned.h>
475#include <linux/uaccess.h>
476#ifdef CONFIG_PPC_PMAC
477#include <asm/machdep.h>
478#endif
479
480#include "de4x5.h"
481
482static const char version[] =
483 KERN_INFO "de4x5.c:V0.546 2001/02/22 davies@maniac.ultranet.com\n";
484
485#define c_char const char
486
487
488
489
490struct phy_table {
491 int reset;
492 int id;
493 int ta;
494 struct {
495 int reg;
496 int mask;
497 int value;
498 } spd;
499};
500
501struct mii_phy {
502 int reset;
503 int id;
504 int ta;
505 struct {
506 int reg;
507 int mask;
508 int value;
509 } spd;
510 int addr;
511 u_char *gep;
512 u_char *rst;
513 u_int mc;
514 u_int ana;
515 u_int fdx;
516 u_int ttm;
517 u_int mci;
518};
519
520#define DE4X5_MAX_PHY 8
521
522struct sia_phy {
523 u_char mc;
524 u_char ext;
525 int csr13;
526 int csr14;
527 int csr15;
528 int gepc;
529 int gep;
530};
531
532
533
534
535
536static struct phy_table phy_info[] = {
537 {0, NATIONAL_TX, 1, {0x19, 0x40, 0x00}},
538 {1, BROADCOM_T4, 1, {0x10, 0x02, 0x02}},
539 {0, SEEQ_T4 , 1, {0x12, 0x10, 0x10}},
540 {0, CYPRESS_T4 , 1, {0x05, 0x20, 0x20}},
541 {0, 0x7810 , 1, {0x14, 0x0800, 0x0800}}
542};
543
544
545
546
547
548
549#define GENERIC_REG 0x05
550#define GENERIC_MASK MII_ANLPA_100M
551#define GENERIC_VALUE MII_ANLPA_100M
552
553
554
555
556static c_char enet_det[][ETH_ALEN] = {
557 {0x00, 0x00, 0xc0, 0x00, 0x00, 0x00},
558 {0x00, 0x00, 0xe8, 0x00, 0x00, 0x00}
559};
560
561#define SMC 1
562#define ACCTON 2
563
564
565
566
567
568
569static c_char srom_repair_info[][100] = {
570 {0x00,0x1e,0x00,0x00,0x00,0x08,
571 0x1f,0x01,0x8f,0x01,0x00,0x01,0x00,0x02,
572 0x01,0x00,0x00,0x78,0xe0,0x01,0x00,0x50,
573 0x00,0x18,}
574};
575
576
577#ifdef DE4X5_DEBUG
578static int de4x5_debug = DE4X5_DEBUG;
579#else
580
581static int de4x5_debug = (DEBUG_MEDIA | DEBUG_VERSION);
582#endif
583
584
585
586
587
588
589
590
591
592
593#ifdef DE4X5_PARM
594static char *args = DE4X5_PARM;
595#else
596static char *args;
597#endif
598
599struct parameters {
600 bool fdx;
601 int autosense;
602};
603
604#define DE4X5_AUTOSENSE_MS 250
605
606#define DE4X5_NDA 0xffe0
607
608
609
610
611#define PROBE_LENGTH 32
612#define ETH_PROM_SIG 0xAA5500FFUL
613
614
615
616
617#define PKT_BUF_SZ 1536
618#define IEEE802_3_SZ 1518
619#define MAX_PKT_SZ 1514
620#define MAX_DAT_SZ 1500
621#define MIN_DAT_SZ 1
622#define PKT_HDR_LEN 14
623#define FAKE_FRAME_LEN (MAX_PKT_SZ + 1)
624#define QUEUE_PKT_TIMEOUT (3*HZ)
625
626
627
628
629
630#define DE4X5_EISA_IO_PORTS 0x0c00
631#define DE4X5_EISA_TOTAL_SIZE 0x100
632
633#define EISA_ALLOWED_IRQ_LIST {5, 9, 10, 11}
634
635#define DE4X5_SIGNATURE {"DE425","DE434","DE435","DE450","DE500"}
636#define DE4X5_NAME_LENGTH 8
637
638static c_char *de4x5_signatures[] = DE4X5_SIGNATURE;
639
640
641
642
643#define PROBE_LENGTH 32
644#define ETH_PROM_SIG 0xAA5500FFUL
645
646
647
648
649#define PCI_MAX_BUS_NUM 8
650#define DE4X5_PCI_TOTAL_SIZE 0x80
651#define DE4X5_CLASS_CODE 0x00020000
652
653
654
655
656
657
658
659#define DE4X5_ALIGN4 ((u_long)4 - 1)
660#define DE4X5_ALIGN8 ((u_long)8 - 1)
661#define DE4X5_ALIGN16 ((u_long)16 - 1)
662#define DE4X5_ALIGN32 ((u_long)32 - 1)
663#define DE4X5_ALIGN64 ((u_long)64 - 1)
664#define DE4X5_ALIGN128 ((u_long)128 - 1)
665
666#define DE4X5_ALIGN DE4X5_ALIGN32
667#define DE4X5_CACHE_ALIGN CAL_16LONG
668#define DESC_SKIP_LEN DSL_0
669
670#define DESC_ALIGN
671
672#ifndef DEC_ONLY
673static int dec_only;
674#else
675static int dec_only = 1;
676#endif
677
678
679
680
681#define ENABLE_IRQs { \
682 imr |= lp->irq_en;\
683 outl(imr, DE4X5_IMR); \
684}
685
686#define DISABLE_IRQs {\
687 imr = inl(DE4X5_IMR);\
688 imr &= ~lp->irq_en;\
689 outl(imr, DE4X5_IMR); \
690}
691
692#define UNMASK_IRQs {\
693 imr |= lp->irq_mask;\
694 outl(imr, DE4X5_IMR); \
695}
696
697#define MASK_IRQs {\
698 imr = inl(DE4X5_IMR);\
699 imr &= ~lp->irq_mask;\
700 outl(imr, DE4X5_IMR); \
701}
702
703
704
705
706#define START_DE4X5 {\
707 omr = inl(DE4X5_OMR);\
708 omr |= OMR_ST | OMR_SR;\
709 outl(omr, DE4X5_OMR); \
710}
711
712#define STOP_DE4X5 {\
713 omr = inl(DE4X5_OMR);\
714 omr &= ~(OMR_ST|OMR_SR);\
715 outl(omr, DE4X5_OMR); \
716}
717
718
719
720
721#define RESET_SIA outl(0, DE4X5_SICR);
722
723
724
725
726#define DE4X5_AUTOSENSE_MS 250
727
728
729
730
731struct de4x5_srom {
732 char sub_vendor_id[2];
733 char sub_system_id[2];
734 char reserved[12];
735 char id_block_crc;
736 char reserved2;
737 char version;
738 char num_controllers;
739 char ieee_addr[6];
740 char info[100];
741 short chksum;
742};
743#define SUB_VENDOR_ID 0x500a
744
745
746
747
748
749
750
751
752
753#define NUM_RX_DESC 8
754#define NUM_TX_DESC 32
755#define RX_BUFF_SZ 1536
756
757
758struct de4x5_desc {
759 volatile __le32 status;
760 __le32 des1;
761 __le32 buf;
762 __le32 next;
763 DESC_ALIGN
764};
765
766
767
768
769#define DE4X5_PKT_STAT_SZ 16
770#define DE4X5_PKT_BIN_SZ 128
771
772
773struct pkt_stats {
774 u_int bins[DE4X5_PKT_STAT_SZ];
775 u_int unicast;
776 u_int multicast;
777 u_int broadcast;
778 u_int excessive_collisions;
779 u_int tx_underruns;
780 u_int excessive_underruns;
781 u_int rx_runt_frames;
782 u_int rx_collision;
783 u_int rx_dribble;
784 u_int rx_overflow;
785};
786
787struct de4x5_private {
788 char adapter_name[80];
789 u_long interrupt;
790 struct de4x5_desc *rx_ring;
791 struct de4x5_desc *tx_ring;
792 struct sk_buff *tx_skb[NUM_TX_DESC];
793 struct sk_buff *rx_skb[NUM_RX_DESC];
794 int rx_new, rx_old;
795 int tx_new, tx_old;
796 char setup_frame[SETUP_FRAME_LEN];
797 char frame[64];
798 spinlock_t lock;
799 struct net_device_stats stats;
800 struct pkt_stats pktStats;
801 char rxRingSize;
802 char txRingSize;
803 int bus;
804 int bus_num;
805 int device;
806 int state;
807 int chipset;
808 s32 irq_mask;
809 s32 irq_en;
810 int media;
811 int c_media;
812 bool fdx;
813 int linkOK;
814 int autosense;
815 bool tx_enable;
816 int setup_f;
817 int local_state;
818 struct mii_phy phy[DE4X5_MAX_PHY];
819 struct sia_phy sia;
820 int active;
821 int mii_cnt;
822 int timeout;
823 struct timer_list timer;
824 int tmp;
825 struct {
826 u_long lock;
827 s32 csr0;
828 s32 csr6;
829 s32 csr7;
830 s32 gep;
831 s32 gepc;
832 s32 csr13;
833 s32 csr14;
834 s32 csr15;
835 int save_cnt;
836 struct sk_buff_head queue;
837 } cache;
838 struct de4x5_srom srom;
839 int cfrv;
840 int rx_ovf;
841 bool useSROM;
842 bool useMII;
843 int asBitValid;
844 int asPolarity;
845 int asBit;
846 int defMedium;
847 int tcount;
848 int infoblock_init;
849 int infoleaf_offset;
850 s32 infoblock_csr6;
851 int infoblock_media;
852 int (*infoleaf_fn)(struct net_device *);
853 u_char *rst;
854 u_char ibn;
855 struct parameters params;
856 struct device *gendev;
857 dma_addr_t dma_rings;
858 int dma_size;
859 char *rx_bufs;
860};
861
862
863
864
865
866
867
868
869
870
871
872
873
874static struct {
875 int chipset;
876 int bus;
877 int irq;
878 u_char addr[ETH_ALEN];
879} last = {0,};
880
881
882
883
884
885
886
887
888#define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\
889 lp->tx_old+lp->txRingSize-lp->tx_new-1:\
890 lp->tx_old -lp->tx_new-1)
891
892#define TX_PKT_PENDING (lp->tx_old != lp->tx_new)
893
894
895
896
897static int de4x5_open(struct net_device *dev);
898static netdev_tx_t de4x5_queue_pkt(struct sk_buff *skb,
899 struct net_device *dev);
900static irqreturn_t de4x5_interrupt(int irq, void *dev_id);
901static int de4x5_close(struct net_device *dev);
902static struct net_device_stats *de4x5_get_stats(struct net_device *dev);
903static void de4x5_local_stats(struct net_device *dev, char *buf, int pkt_len);
904static void set_multicast_list(struct net_device *dev);
905static int de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
906
907
908
909
910static int de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev);
911static int de4x5_init(struct net_device *dev);
912static int de4x5_sw_reset(struct net_device *dev);
913static int de4x5_rx(struct net_device *dev);
914static int de4x5_tx(struct net_device *dev);
915static void de4x5_ast(struct timer_list *t);
916static int de4x5_txur(struct net_device *dev);
917static int de4x5_rx_ovfc(struct net_device *dev);
918
919static int autoconf_media(struct net_device *dev);
920static void create_packet(struct net_device *dev, char *frame, int len);
921static void load_packet(struct net_device *dev, char *buf, u32 flags, struct sk_buff *skb);
922static int dc21040_autoconf(struct net_device *dev);
923static int dc21041_autoconf(struct net_device *dev);
924static int dc21140m_autoconf(struct net_device *dev);
925static int dc2114x_autoconf(struct net_device *dev);
926static int srom_autoconf(struct net_device *dev);
927static int de4x5_suspect_state(struct net_device *dev, int timeout, int prev_state, int (*fn)(struct net_device *, int), int (*asfn)(struct net_device *));
928static int dc21040_state(struct net_device *dev, int csr13, int csr14, int csr15, int timeout, int next_state, int suspect_state, int (*fn)(struct net_device *, int));
929static int test_media(struct net_device *dev, s32 irqs, s32 irq_mask, s32 csr13, s32 csr14, s32 csr15, s32 msec);
930static int test_for_100Mb(struct net_device *dev, int msec);
931static int wait_for_link(struct net_device *dev);
932static int test_mii_reg(struct net_device *dev, int reg, int mask, bool pol, long msec);
933static int is_spd_100(struct net_device *dev);
934static int is_100_up(struct net_device *dev);
935static int is_10_up(struct net_device *dev);
936static int is_anc_capable(struct net_device *dev);
937static int ping_media(struct net_device *dev, int msec);
938static struct sk_buff *de4x5_alloc_rx_buff(struct net_device *dev, int index, int len);
939static void de4x5_free_rx_buffs(struct net_device *dev);
940static void de4x5_free_tx_buffs(struct net_device *dev);
941static void de4x5_save_skbs(struct net_device *dev);
942static void de4x5_rst_desc_ring(struct net_device *dev);
943static void de4x5_cache_state(struct net_device *dev, int flag);
944static void de4x5_put_cache(struct net_device *dev, struct sk_buff *skb);
945static void de4x5_putb_cache(struct net_device *dev, struct sk_buff *skb);
946static struct sk_buff *de4x5_get_cache(struct net_device *dev);
947static void de4x5_setup_intr(struct net_device *dev);
948static void de4x5_init_connection(struct net_device *dev);
949static int de4x5_reset_phy(struct net_device *dev);
950static void reset_init_sia(struct net_device *dev, s32 sicr, s32 strr, s32 sigr);
951static int test_ans(struct net_device *dev, s32 irqs, s32 irq_mask, s32 msec);
952static int test_tp(struct net_device *dev, s32 msec);
953static int EISA_signature(char *name, struct device *device);
954static int PCI_signature(char *name, struct de4x5_private *lp);
955static void DevicePresent(struct net_device *dev, u_long iobase);
956static void enet_addr_rst(u_long aprom_addr);
957static int de4x5_bad_srom(struct de4x5_private *lp);
958static short srom_rd(u_long address, u_char offset);
959static void srom_latch(u_int command, u_long address);
960static void srom_command(u_int command, u_long address);
961static void srom_address(u_int command, u_long address, u_char offset);
962static short srom_data(u_int command, u_long address);
963
964static void sendto_srom(u_int command, u_long addr);
965static int getfrom_srom(u_long addr);
966static int srom_map_media(struct net_device *dev);
967static int srom_infoleaf_info(struct net_device *dev);
968static void srom_init(struct net_device *dev);
969static void srom_exec(struct net_device *dev, u_char *p);
970static int mii_rd(u_char phyreg, u_char phyaddr, u_long ioaddr);
971static void mii_wr(int data, u_char phyreg, u_char phyaddr, u_long ioaddr);
972static int mii_rdata(u_long ioaddr);
973static void mii_wdata(int data, int len, u_long ioaddr);
974static void mii_ta(u_long rw, u_long ioaddr);
975static int mii_swap(int data, int len);
976static void mii_address(u_char addr, u_long ioaddr);
977static void sendto_mii(u32 command, int data, u_long ioaddr);
978static int getfrom_mii(u32 command, u_long ioaddr);
979static int mii_get_oui(u_char phyaddr, u_long ioaddr);
980static int mii_get_phy(struct net_device *dev);
981static void SetMulticastFilter(struct net_device *dev);
982static int get_hw_addr(struct net_device *dev);
983static void srom_repair(struct net_device *dev, int card);
984static int test_bad_enet(struct net_device *dev, int status);
985static int an_exception(struct de4x5_private *lp);
986static char *build_setup_frame(struct net_device *dev, int mode);
987static void disable_ast(struct net_device *dev);
988static long de4x5_switch_mac_port(struct net_device *dev);
989static int gep_rd(struct net_device *dev);
990static void gep_wr(s32 data, struct net_device *dev);
991static void yawn(struct net_device *dev, int state);
992static void de4x5_parse_params(struct net_device *dev);
993static void de4x5_dbg_open(struct net_device *dev);
994static void de4x5_dbg_mii(struct net_device *dev, int k);
995static void de4x5_dbg_media(struct net_device *dev);
996static void de4x5_dbg_srom(struct de4x5_srom *p);
997static void de4x5_dbg_rx(struct sk_buff *skb, int len);
998static int dc21041_infoleaf(struct net_device *dev);
999static int dc21140_infoleaf(struct net_device *dev);
1000static int dc21142_infoleaf(struct net_device *dev);
1001static int dc21143_infoleaf(struct net_device *dev);
1002static int type0_infoblock(struct net_device *dev, u_char count, u_char *p);
1003static int type1_infoblock(struct net_device *dev, u_char count, u_char *p);
1004static int type2_infoblock(struct net_device *dev, u_char count, u_char *p);
1005static int type3_infoblock(struct net_device *dev, u_char count, u_char *p);
1006static int type4_infoblock(struct net_device *dev, u_char count, u_char *p);
1007static int type5_infoblock(struct net_device *dev, u_char count, u_char *p);
1008static int compact_infoblock(struct net_device *dev, u_char count, u_char *p);
1009
1010
1011
1012
1013
1014
1015
1016static int io=0x0;
1017
1018module_param_hw(io, int, ioport, 0);
1019module_param(de4x5_debug, int, 0);
1020module_param(dec_only, int, 0);
1021module_param(args, charp, 0);
1022
1023MODULE_PARM_DESC(io, "de4x5 I/O base address");
1024MODULE_PARM_DESC(de4x5_debug, "de4x5 debug mask");
1025MODULE_PARM_DESC(dec_only, "de4x5 probe only for Digital boards (0-1)");
1026MODULE_PARM_DESC(args, "de4x5 full duplex and media type settings; see de4x5.c for details");
1027MODULE_LICENSE("GPL");
1028
1029
1030
1031
1032struct InfoLeaf {
1033 int chipset;
1034 int (*fn)(struct net_device *);
1035};
1036static struct InfoLeaf infoleaf_array[] = {
1037 {DC21041, dc21041_infoleaf},
1038 {DC21140, dc21140_infoleaf},
1039 {DC21142, dc21142_infoleaf},
1040 {DC21143, dc21143_infoleaf}
1041};
1042#define INFOLEAF_SIZE ARRAY_SIZE(infoleaf_array)
1043
1044
1045
1046
1047static int (*dc_infoblock[])(struct net_device *dev, u_char, u_char *) = {
1048 type0_infoblock,
1049 type1_infoblock,
1050 type2_infoblock,
1051 type3_infoblock,
1052 type4_infoblock,
1053 type5_infoblock,
1054 compact_infoblock
1055};
1056
1057#define COMPACT (ARRAY_SIZE(dc_infoblock) - 1)
1058
1059
1060
1061
1062#define RESET_DE4X5 {\
1063 int i;\
1064 i=inl(DE4X5_BMR);\
1065 mdelay(1);\
1066 outl(i | BMR_SWR, DE4X5_BMR);\
1067 mdelay(1);\
1068 outl(i, DE4X5_BMR);\
1069 mdelay(1);\
1070 for (i=0;i<5;i++) {inl(DE4X5_BMR); mdelay(1);}\
1071 mdelay(1);\
1072}
1073
1074#define PHY_HARD_RESET {\
1075 outl(GEP_HRST, DE4X5_GEP); \
1076 mdelay(1); \
1077 outl(0x00, DE4X5_GEP);\
1078 mdelay(2); \
1079}
1080
1081static const struct net_device_ops de4x5_netdev_ops = {
1082 .ndo_open = de4x5_open,
1083 .ndo_stop = de4x5_close,
1084 .ndo_start_xmit = de4x5_queue_pkt,
1085 .ndo_get_stats = de4x5_get_stats,
1086 .ndo_set_rx_mode = set_multicast_list,
1087 .ndo_do_ioctl = de4x5_ioctl,
1088 .ndo_set_mac_address= eth_mac_addr,
1089 .ndo_validate_addr = eth_validate_addr,
1090};
1091
1092
1093static int
1094de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev)
1095{
1096 char name[DE4X5_NAME_LENGTH + 1];
1097 struct de4x5_private *lp = netdev_priv(dev);
1098 struct pci_dev *pdev = NULL;
1099 int i, status=0;
1100
1101 dev_set_drvdata(gendev, dev);
1102
1103
1104 if (lp->bus == EISA) {
1105 outb(WAKEUP, PCI_CFPM);
1106 } else {
1107 pdev = to_pci_dev (gendev);
1108 pci_write_config_byte(pdev, PCI_CFDA_PSM, WAKEUP);
1109 }
1110 mdelay(10);
1111
1112 RESET_DE4X5;
1113
1114 if ((inl(DE4X5_STS) & (STS_TS | STS_RS)) != 0) {
1115 return -ENXIO;
1116 }
1117
1118
1119
1120
1121 lp->useSROM = false;
1122 if (lp->bus == PCI) {
1123 PCI_signature(name, lp);
1124 } else {
1125 EISA_signature(name, gendev);
1126 }
1127
1128 if (*name == '\0') {
1129 return -ENXIO;
1130 }
1131
1132 dev->base_addr = iobase;
1133 printk ("%s: %s at 0x%04lx", dev_name(gendev), name, iobase);
1134
1135 status = get_hw_addr(dev);
1136 printk(", h/w address %pM\n", dev->dev_addr);
1137
1138 if (status != 0) {
1139 printk(" which has an Ethernet PROM CRC error.\n");
1140 return -ENXIO;
1141 } else {
1142 skb_queue_head_init(&lp->cache.queue);
1143 lp->cache.gepc = GEP_INIT;
1144 lp->asBit = GEP_SLNK;
1145 lp->asPolarity = GEP_SLNK;
1146 lp->asBitValid = ~0;
1147 lp->timeout = -1;
1148 lp->gendev = gendev;
1149 spin_lock_init(&lp->lock);
1150 timer_setup(&lp->timer, de4x5_ast, 0);
1151 de4x5_parse_params(dev);
1152
1153
1154
1155
1156 lp->autosense = lp->params.autosense;
1157 if (lp->chipset != DC21140) {
1158 if ((lp->chipset==DC21040) && (lp->params.autosense&TP_NW)) {
1159 lp->params.autosense = TP;
1160 }
1161 if ((lp->chipset==DC21041) && (lp->params.autosense&BNC_AUI)) {
1162 lp->params.autosense = BNC;
1163 }
1164 }
1165 lp->fdx = lp->params.fdx;
1166 sprintf(lp->adapter_name,"%s (%s)", name, dev_name(gendev));
1167
1168 lp->dma_size = (NUM_RX_DESC + NUM_TX_DESC) * sizeof(struct de4x5_desc);
1169#if defined(__alpha__) || defined(__powerpc__) || defined(CONFIG_SPARC) || defined(DE4X5_DO_MEMCPY)
1170 lp->dma_size += RX_BUFF_SZ * NUM_RX_DESC + DE4X5_ALIGN;
1171#endif
1172 lp->rx_ring = dma_alloc_coherent(gendev, lp->dma_size,
1173 &lp->dma_rings, GFP_ATOMIC);
1174 if (lp->rx_ring == NULL) {
1175 return -ENOMEM;
1176 }
1177
1178 lp->tx_ring = lp->rx_ring + NUM_RX_DESC;
1179
1180
1181
1182
1183
1184#if !defined(__alpha__) && !defined(__powerpc__) && !defined(CONFIG_SPARC) && !defined(DE4X5_DO_MEMCPY)
1185 for (i=0; i<NUM_RX_DESC; i++) {
1186 lp->rx_ring[i].status = 0;
1187 lp->rx_ring[i].des1 = cpu_to_le32(RX_BUFF_SZ);
1188 lp->rx_ring[i].buf = 0;
1189 lp->rx_ring[i].next = 0;
1190 lp->rx_skb[i] = (struct sk_buff *) 1;
1191 }
1192
1193#else
1194 {
1195 dma_addr_t dma_rx_bufs;
1196
1197 dma_rx_bufs = lp->dma_rings + (NUM_RX_DESC + NUM_TX_DESC)
1198 * sizeof(struct de4x5_desc);
1199 dma_rx_bufs = (dma_rx_bufs + DE4X5_ALIGN) & ~DE4X5_ALIGN;
1200 lp->rx_bufs = (char *)(((long)(lp->rx_ring + NUM_RX_DESC
1201 + NUM_TX_DESC) + DE4X5_ALIGN) & ~DE4X5_ALIGN);
1202 for (i=0; i<NUM_RX_DESC; i++) {
1203 lp->rx_ring[i].status = 0;
1204 lp->rx_ring[i].des1 = cpu_to_le32(RX_BUFF_SZ);
1205 lp->rx_ring[i].buf =
1206 cpu_to_le32(dma_rx_bufs+i*RX_BUFF_SZ);
1207 lp->rx_ring[i].next = 0;
1208 lp->rx_skb[i] = (struct sk_buff *) 1;
1209 }
1210
1211 }
1212#endif
1213
1214 barrier();
1215
1216 lp->rxRingSize = NUM_RX_DESC;
1217 lp->txRingSize = NUM_TX_DESC;
1218
1219
1220 lp->rx_ring[lp->rxRingSize - 1].des1 |= cpu_to_le32(RD_RER);
1221 lp->tx_ring[lp->txRingSize - 1].des1 |= cpu_to_le32(TD_TER);
1222
1223
1224 outl(lp->dma_rings, DE4X5_RRBA);
1225 outl(lp->dma_rings + NUM_RX_DESC * sizeof(struct de4x5_desc),
1226 DE4X5_TRBA);
1227
1228
1229 lp->irq_mask = IMR_RIM | IMR_TIM | IMR_TUM | IMR_UNM;
1230 lp->irq_en = IMR_NIM | IMR_AIM;
1231
1232
1233 create_packet(dev, lp->frame, sizeof(lp->frame));
1234
1235
1236 i = lp->cfrv & 0x000000fe;
1237 if ((lp->chipset == DC21140) && (i == 0x20)) {
1238 lp->rx_ovf = 1;
1239 }
1240
1241
1242 if (lp->useSROM) {
1243 lp->state = INITIALISED;
1244 if (srom_infoleaf_info(dev)) {
1245 dma_free_coherent (gendev, lp->dma_size,
1246 lp->rx_ring, lp->dma_rings);
1247 return -ENXIO;
1248 }
1249 srom_init(dev);
1250 }
1251
1252 lp->state = CLOSED;
1253
1254
1255
1256
1257 if ((lp->chipset != DC21040) && (lp->chipset != DC21041)) {
1258 mii_get_phy(dev);
1259 }
1260
1261 printk(" and requires IRQ%d (provided by %s).\n", dev->irq,
1262 ((lp->bus == PCI) ? "PCI BIOS" : "EISA CNFG"));
1263 }
1264
1265 if (de4x5_debug & DEBUG_VERSION) {
1266 printk(version);
1267 }
1268
1269
1270 SET_NETDEV_DEV(dev, gendev);
1271 dev->netdev_ops = &de4x5_netdev_ops;
1272 dev->mem_start = 0;
1273
1274
1275 if ((status = register_netdev (dev))) {
1276 dma_free_coherent (gendev, lp->dma_size,
1277 lp->rx_ring, lp->dma_rings);
1278 return status;
1279 }
1280
1281
1282 yawn(dev, SLEEP);
1283
1284 return status;
1285}
1286
1287
1288static int
1289de4x5_open(struct net_device *dev)
1290{
1291 struct de4x5_private *lp = netdev_priv(dev);
1292 u_long iobase = dev->base_addr;
1293 int i, status = 0;
1294 s32 omr;
1295
1296
1297 for (i=0; i<lp->rxRingSize; i++) {
1298 if (de4x5_alloc_rx_buff(dev, i, 0) == NULL) {
1299 de4x5_free_rx_buffs(dev);
1300 return -EAGAIN;
1301 }
1302 }
1303
1304
1305
1306
1307 yawn(dev, WAKEUP);
1308
1309
1310
1311
1312 status = de4x5_init(dev);
1313 spin_lock_init(&lp->lock);
1314 lp->state = OPEN;
1315 de4x5_dbg_open(dev);
1316
1317 if (request_irq(dev->irq, de4x5_interrupt, IRQF_SHARED,
1318 lp->adapter_name, dev)) {
1319 printk("de4x5_open(): Requested IRQ%d is busy - attempting FAST/SHARE...", dev->irq);
1320 if (request_irq(dev->irq, de4x5_interrupt, IRQF_SHARED,
1321 lp->adapter_name, dev)) {
1322 printk("\n Cannot get IRQ- reconfigure your hardware.\n");
1323 disable_ast(dev);
1324 de4x5_free_rx_buffs(dev);
1325 de4x5_free_tx_buffs(dev);
1326 yawn(dev, SLEEP);
1327 lp->state = CLOSED;
1328 return -EAGAIN;
1329 } else {
1330 printk("\n Succeeded, but you should reconfigure your hardware to avoid this.\n");
1331 printk("WARNING: there may be IRQ related problems in heavily loaded systems.\n");
1332 }
1333 }
1334
1335 lp->interrupt = UNMASK_INTERRUPTS;
1336 netif_trans_update(dev);
1337
1338 START_DE4X5;
1339
1340 de4x5_setup_intr(dev);
1341
1342 if (de4x5_debug & DEBUG_OPEN) {
1343 printk("\tsts: 0x%08x\n", inl(DE4X5_STS));
1344 printk("\tbmr: 0x%08x\n", inl(DE4X5_BMR));
1345 printk("\timr: 0x%08x\n", inl(DE4X5_IMR));
1346 printk("\tomr: 0x%08x\n", inl(DE4X5_OMR));
1347 printk("\tsisr: 0x%08x\n", inl(DE4X5_SISR));
1348 printk("\tsicr: 0x%08x\n", inl(DE4X5_SICR));
1349 printk("\tstrr: 0x%08x\n", inl(DE4X5_STRR));
1350 printk("\tsigr: 0x%08x\n", inl(DE4X5_SIGR));
1351 }
1352
1353 return status;
1354}
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364static int
1365de4x5_init(struct net_device *dev)
1366{
1367
1368 netif_stop_queue(dev);
1369
1370 de4x5_sw_reset(dev);
1371
1372
1373 autoconf_media(dev);
1374
1375 return 0;
1376}
1377
1378static int
1379de4x5_sw_reset(struct net_device *dev)
1380{
1381 struct de4x5_private *lp = netdev_priv(dev);
1382 u_long iobase = dev->base_addr;
1383 int i, j, status = 0;
1384 s32 bmr, omr;
1385
1386
1387 if (!lp->useSROM) {
1388 if (lp->phy[lp->active].id != 0) {
1389 lp->infoblock_csr6 = OMR_SDP | OMR_PS | OMR_HBD;
1390 } else {
1391 lp->infoblock_csr6 = OMR_SDP | OMR_TTM;
1392 }
1393 de4x5_switch_mac_port(dev);
1394 }
1395
1396
1397
1398
1399
1400
1401 bmr = (lp->chipset==DC21140 ? PBL_8 : PBL_4) | DESC_SKIP_LEN | DE4X5_CACHE_ALIGN;
1402 bmr |= ((lp->chipset & ~0x00ff)==DC2114x ? BMR_RML : 0);
1403 outl(bmr, DE4X5_BMR);
1404
1405 omr = inl(DE4X5_OMR) & ~OMR_PR;
1406 if (lp->chipset == DC21140) {
1407 omr |= (OMR_SDP | OMR_SB);
1408 }
1409 lp->setup_f = PERFECT;
1410 outl(lp->dma_rings, DE4X5_RRBA);
1411 outl(lp->dma_rings + NUM_RX_DESC * sizeof(struct de4x5_desc),
1412 DE4X5_TRBA);
1413
1414 lp->rx_new = lp->rx_old = 0;
1415 lp->tx_new = lp->tx_old = 0;
1416
1417 for (i = 0; i < lp->rxRingSize; i++) {
1418 lp->rx_ring[i].status = cpu_to_le32(R_OWN);
1419 }
1420
1421 for (i = 0; i < lp->txRingSize; i++) {
1422 lp->tx_ring[i].status = cpu_to_le32(0);
1423 }
1424
1425 barrier();
1426
1427
1428 SetMulticastFilter(dev);
1429
1430 load_packet(dev, lp->setup_frame, PERFECT_F|TD_SET|SETUP_FRAME_LEN, (struct sk_buff *)1);
1431 outl(omr|OMR_ST, DE4X5_OMR);
1432
1433
1434
1435 for (j=0, i=0;(i<500) && (j==0);i++) {
1436 mdelay(1);
1437 if ((s32)le32_to_cpu(lp->tx_ring[lp->tx_new].status) >= 0) j=1;
1438 }
1439 outl(omr, DE4X5_OMR);
1440
1441 if (j == 0) {
1442 printk("%s: Setup frame timed out, status %08x\n", dev->name,
1443 inl(DE4X5_STS));
1444 status = -EIO;
1445 }
1446
1447 lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
1448 lp->tx_old = lp->tx_new;
1449
1450 return status;
1451}
1452
1453
1454
1455
1456static netdev_tx_t
1457de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev)
1458{
1459 struct de4x5_private *lp = netdev_priv(dev);
1460 u_long iobase = dev->base_addr;
1461 u_long flags = 0;
1462
1463 netif_stop_queue(dev);
1464 if (!lp->tx_enable)
1465 goto tx_err;
1466
1467
1468
1469
1470
1471
1472 spin_lock_irqsave(&lp->lock, flags);
1473 de4x5_tx(dev);
1474 spin_unlock_irqrestore(&lp->lock, flags);
1475
1476
1477 if (test_and_set_bit(0, (void *)&lp->cache.lock) && !lp->interrupt)
1478 goto tx_err;
1479
1480
1481 if (netif_queue_stopped(dev) || (u_long) lp->tx_skb[lp->tx_new] > 1) {
1482 if (lp->interrupt) {
1483 de4x5_putb_cache(dev, skb);
1484 } else {
1485 de4x5_put_cache(dev, skb);
1486 }
1487 if (de4x5_debug & DEBUG_TX) {
1488 printk("%s: transmit busy, lost media or stale skb found:\n STS:%08x\n tbusy:%d\n IMR:%08x\n OMR:%08x\n Stale skb: %s\n",dev->name, inl(DE4X5_STS), netif_queue_stopped(dev), inl(DE4X5_IMR), inl(DE4X5_OMR), ((u_long) lp->tx_skb[lp->tx_new] > 1) ? "YES" : "NO");
1489 }
1490 } else if (skb->len > 0) {
1491
1492 if (!skb_queue_empty(&lp->cache.queue) && !lp->interrupt) {
1493 de4x5_put_cache(dev, skb);
1494 skb = de4x5_get_cache(dev);
1495 }
1496
1497 while (skb && !netif_queue_stopped(dev) &&
1498 (u_long) lp->tx_skb[lp->tx_new] <= 1) {
1499 spin_lock_irqsave(&lp->lock, flags);
1500 netif_stop_queue(dev);
1501 load_packet(dev, skb->data, TD_IC | TD_LS | TD_FS | skb->len, skb);
1502 lp->stats.tx_bytes += skb->len;
1503 outl(POLL_DEMAND, DE4X5_TPD);
1504
1505 lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
1506
1507 if (TX_BUFFS_AVAIL) {
1508 netif_start_queue(dev);
1509 }
1510 skb = de4x5_get_cache(dev);
1511 spin_unlock_irqrestore(&lp->lock, flags);
1512 }
1513 if (skb) de4x5_putb_cache(dev, skb);
1514 }
1515
1516 lp->cache.lock = 0;
1517
1518 return NETDEV_TX_OK;
1519tx_err:
1520 dev_kfree_skb_any(skb);
1521 return NETDEV_TX_OK;
1522}
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535static irqreturn_t
1536de4x5_interrupt(int irq, void *dev_id)
1537{
1538 struct net_device *dev = dev_id;
1539 struct de4x5_private *lp;
1540 s32 imr, omr, sts, limit;
1541 u_long iobase;
1542 unsigned int handled = 0;
1543
1544 lp = netdev_priv(dev);
1545 spin_lock(&lp->lock);
1546 iobase = dev->base_addr;
1547
1548 DISABLE_IRQs;
1549
1550 if (test_and_set_bit(MASK_INTERRUPTS, (void*) &lp->interrupt))
1551 printk("%s: Re-entering the interrupt handler.\n", dev->name);
1552
1553 synchronize_irq(dev->irq);
1554
1555 for (limit=0; limit<8; limit++) {
1556 sts = inl(DE4X5_STS);
1557 outl(sts, DE4X5_STS);
1558
1559 if (!(sts & lp->irq_mask)) break;
1560 handled = 1;
1561
1562 if (sts & (STS_RI | STS_RU))
1563 de4x5_rx(dev);
1564
1565 if (sts & (STS_TI | STS_TU))
1566 de4x5_tx(dev);
1567
1568 if (sts & STS_LNF) {
1569 lp->irq_mask &= ~IMR_LFM;
1570 }
1571
1572 if (sts & STS_UNF) {
1573 de4x5_txur(dev);
1574 }
1575
1576 if (sts & STS_SE) {
1577 STOP_DE4X5;
1578 printk("%s: Fatal bus error occurred, sts=%#8x, device stopped.\n",
1579 dev->name, sts);
1580 spin_unlock(&lp->lock);
1581 return IRQ_HANDLED;
1582 }
1583 }
1584
1585
1586 if (!test_and_set_bit(0, (void *)&lp->cache.lock)) {
1587 while (!skb_queue_empty(&lp->cache.queue) && !netif_queue_stopped(dev) && lp->tx_enable) {
1588 de4x5_queue_pkt(de4x5_get_cache(dev), dev);
1589 }
1590 lp->cache.lock = 0;
1591 }
1592
1593 lp->interrupt = UNMASK_INTERRUPTS;
1594 ENABLE_IRQs;
1595 spin_unlock(&lp->lock);
1596
1597 return IRQ_RETVAL(handled);
1598}
1599
1600static int
1601de4x5_rx(struct net_device *dev)
1602{
1603 struct de4x5_private *lp = netdev_priv(dev);
1604 u_long iobase = dev->base_addr;
1605 int entry;
1606 s32 status;
1607
1608 for (entry=lp->rx_new; (s32)le32_to_cpu(lp->rx_ring[entry].status)>=0;
1609 entry=lp->rx_new) {
1610 status = (s32)le32_to_cpu(lp->rx_ring[entry].status);
1611
1612 if (lp->rx_ovf) {
1613 if (inl(DE4X5_MFC) & MFC_FOCM) {
1614 de4x5_rx_ovfc(dev);
1615 break;
1616 }
1617 }
1618
1619 if (status & RD_FS) {
1620 lp->rx_old = entry;
1621 }
1622
1623 if (status & RD_LS) {
1624 if (lp->tx_enable) lp->linkOK++;
1625 if (status & RD_ES) {
1626 lp->stats.rx_errors++;
1627 if (status & (RD_RF | RD_TL)) lp->stats.rx_frame_errors++;
1628 if (status & RD_CE) lp->stats.rx_crc_errors++;
1629 if (status & RD_OF) lp->stats.rx_fifo_errors++;
1630 if (status & RD_TL) lp->stats.rx_length_errors++;
1631 if (status & RD_RF) lp->pktStats.rx_runt_frames++;
1632 if (status & RD_CS) lp->pktStats.rx_collision++;
1633 if (status & RD_DB) lp->pktStats.rx_dribble++;
1634 if (status & RD_OF) lp->pktStats.rx_overflow++;
1635 } else {
1636 struct sk_buff *skb;
1637 short pkt_len = (short)(le32_to_cpu(lp->rx_ring[entry].status)
1638 >> 16) - 4;
1639
1640 if ((skb = de4x5_alloc_rx_buff(dev, entry, pkt_len)) == NULL) {
1641 printk("%s: Insufficient memory; nuking packet.\n",
1642 dev->name);
1643 lp->stats.rx_dropped++;
1644 } else {
1645 de4x5_dbg_rx(skb, pkt_len);
1646
1647
1648 skb->protocol=eth_type_trans(skb,dev);
1649 de4x5_local_stats(dev, skb->data, pkt_len);
1650 netif_rx(skb);
1651
1652
1653 lp->stats.rx_packets++;
1654 lp->stats.rx_bytes += pkt_len;
1655 }
1656 }
1657
1658
1659 for (;lp->rx_old!=entry;lp->rx_old=(lp->rx_old + 1)%lp->rxRingSize) {
1660 lp->rx_ring[lp->rx_old].status = cpu_to_le32(R_OWN);
1661 barrier();
1662 }
1663 lp->rx_ring[entry].status = cpu_to_le32(R_OWN);
1664 barrier();
1665 }
1666
1667
1668
1669
1670 lp->rx_new = (lp->rx_new + 1) % lp->rxRingSize;
1671 }
1672
1673 return 0;
1674}
1675
1676static inline void
1677de4x5_free_tx_buff(struct de4x5_private *lp, int entry)
1678{
1679 dma_unmap_single(lp->gendev, le32_to_cpu(lp->tx_ring[entry].buf),
1680 le32_to_cpu(lp->tx_ring[entry].des1) & TD_TBS1,
1681 DMA_TO_DEVICE);
1682 if ((u_long) lp->tx_skb[entry] > 1)
1683 dev_kfree_skb_irq(lp->tx_skb[entry]);
1684 lp->tx_skb[entry] = NULL;
1685}
1686
1687
1688
1689
1690static int
1691de4x5_tx(struct net_device *dev)
1692{
1693 struct de4x5_private *lp = netdev_priv(dev);
1694 u_long iobase = dev->base_addr;
1695 int entry;
1696 s32 status;
1697
1698 for (entry = lp->tx_old; entry != lp->tx_new; entry = lp->tx_old) {
1699 status = (s32)le32_to_cpu(lp->tx_ring[entry].status);
1700 if (status < 0) {
1701 break;
1702 } else if (status != 0x7fffffff) {
1703 if (status & TD_ES) {
1704 lp->stats.tx_errors++;
1705 if (status & TD_NC) lp->stats.tx_carrier_errors++;
1706 if (status & TD_LC) lp->stats.tx_window_errors++;
1707 if (status & TD_UF) lp->stats.tx_fifo_errors++;
1708 if (status & TD_EC) lp->pktStats.excessive_collisions++;
1709 if (status & TD_DE) lp->stats.tx_aborted_errors++;
1710
1711 if (TX_PKT_PENDING) {
1712 outl(POLL_DEMAND, DE4X5_TPD);
1713 }
1714 } else {
1715 lp->stats.tx_packets++;
1716 if (lp->tx_enable) lp->linkOK++;
1717 }
1718
1719 lp->stats.collisions += ((status & TD_EC) ? 16 :
1720 ((status & TD_CC) >> 3));
1721
1722
1723 if (lp->tx_skb[entry] != NULL)
1724 de4x5_free_tx_buff(lp, entry);
1725 }
1726
1727
1728 lp->tx_old = (lp->tx_old + 1) % lp->txRingSize;
1729 }
1730
1731
1732 if (TX_BUFFS_AVAIL && netif_queue_stopped(dev)) {
1733 if (lp->interrupt)
1734 netif_wake_queue(dev);
1735 else
1736 netif_start_queue(dev);
1737 }
1738
1739 return 0;
1740}
1741
1742static void
1743de4x5_ast(struct timer_list *t)
1744{
1745 struct de4x5_private *lp = from_timer(lp, t, timer);
1746 struct net_device *dev = dev_get_drvdata(lp->gendev);
1747 int next_tick = DE4X5_AUTOSENSE_MS;
1748 int dt;
1749
1750 if (lp->useSROM)
1751 next_tick = srom_autoconf(dev);
1752 else if (lp->chipset == DC21140)
1753 next_tick = dc21140m_autoconf(dev);
1754 else if (lp->chipset == DC21041)
1755 next_tick = dc21041_autoconf(dev);
1756 else if (lp->chipset == DC21040)
1757 next_tick = dc21040_autoconf(dev);
1758 lp->linkOK = 0;
1759
1760 dt = (next_tick * HZ) / 1000;
1761
1762 if (!dt)
1763 dt = 1;
1764
1765 mod_timer(&lp->timer, jiffies + dt);
1766}
1767
1768static int
1769de4x5_txur(struct net_device *dev)
1770{
1771 struct de4x5_private *lp = netdev_priv(dev);
1772 u_long iobase = dev->base_addr;
1773 int omr;
1774
1775 omr = inl(DE4X5_OMR);
1776 if (!(omr & OMR_SF) || (lp->chipset==DC21041) || (lp->chipset==DC21040)) {
1777 omr &= ~(OMR_ST|OMR_SR);
1778 outl(omr, DE4X5_OMR);
1779 while (inl(DE4X5_STS) & STS_TS);
1780 if ((omr & OMR_TR) < OMR_TR) {
1781 omr += 0x4000;
1782 } else {
1783 omr |= OMR_SF;
1784 }
1785 outl(omr | OMR_ST | OMR_SR, DE4X5_OMR);
1786 }
1787
1788 return 0;
1789}
1790
1791static int
1792de4x5_rx_ovfc(struct net_device *dev)
1793{
1794 struct de4x5_private *lp = netdev_priv(dev);
1795 u_long iobase = dev->base_addr;
1796 int omr;
1797
1798 omr = inl(DE4X5_OMR);
1799 outl(omr & ~OMR_SR, DE4X5_OMR);
1800 while (inl(DE4X5_STS) & STS_RS);
1801
1802 for (; (s32)le32_to_cpu(lp->rx_ring[lp->rx_new].status)>=0;) {
1803 lp->rx_ring[lp->rx_new].status = cpu_to_le32(R_OWN);
1804 lp->rx_new = (lp->rx_new + 1) % lp->rxRingSize;
1805 }
1806
1807 outl(omr, DE4X5_OMR);
1808
1809 return 0;
1810}
1811
1812static int
1813de4x5_close(struct net_device *dev)
1814{
1815 struct de4x5_private *lp = netdev_priv(dev);
1816 u_long iobase = dev->base_addr;
1817 s32 imr, omr;
1818
1819 disable_ast(dev);
1820
1821 netif_stop_queue(dev);
1822
1823 if (de4x5_debug & DEBUG_CLOSE) {
1824 printk("%s: Shutting down ethercard, status was %8.8x.\n",
1825 dev->name, inl(DE4X5_STS));
1826 }
1827
1828
1829
1830
1831 DISABLE_IRQs;
1832 STOP_DE4X5;
1833
1834
1835 free_irq(dev->irq, dev);
1836 lp->state = CLOSED;
1837
1838
1839 de4x5_free_rx_buffs(dev);
1840 de4x5_free_tx_buffs(dev);
1841
1842
1843 yawn(dev, SLEEP);
1844
1845 return 0;
1846}
1847
1848static struct net_device_stats *
1849de4x5_get_stats(struct net_device *dev)
1850{
1851 struct de4x5_private *lp = netdev_priv(dev);
1852 u_long iobase = dev->base_addr;
1853
1854 lp->stats.rx_missed_errors = (int)(inl(DE4X5_MFC) & (MFC_OVFL | MFC_CNTR));
1855
1856 return &lp->stats;
1857}
1858
1859static void
1860de4x5_local_stats(struct net_device *dev, char *buf, int pkt_len)
1861{
1862 struct de4x5_private *lp = netdev_priv(dev);
1863 int i;
1864
1865 for (i=1; i<DE4X5_PKT_STAT_SZ-1; i++) {
1866 if (pkt_len < (i*DE4X5_PKT_BIN_SZ)) {
1867 lp->pktStats.bins[i]++;
1868 i = DE4X5_PKT_STAT_SZ;
1869 }
1870 }
1871 if (is_multicast_ether_addr(buf)) {
1872 if (is_broadcast_ether_addr(buf)) {
1873 lp->pktStats.broadcast++;
1874 } else {
1875 lp->pktStats.multicast++;
1876 }
1877 } else if (ether_addr_equal(buf, dev->dev_addr)) {
1878 lp->pktStats.unicast++;
1879 }
1880
1881 lp->pktStats.bins[0]++;
1882 if (lp->pktStats.bins[0] == 0) {
1883 memset((char *)&lp->pktStats, 0, sizeof(lp->pktStats));
1884 }
1885}
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895static void
1896load_packet(struct net_device *dev, char *buf, u32 flags, struct sk_buff *skb)
1897{
1898 struct de4x5_private *lp = netdev_priv(dev);
1899 int entry = (lp->tx_new ? lp->tx_new-1 : lp->txRingSize-1);
1900 dma_addr_t buf_dma = dma_map_single(lp->gendev, buf, flags & TD_TBS1, DMA_TO_DEVICE);
1901
1902 lp->tx_ring[lp->tx_new].buf = cpu_to_le32(buf_dma);
1903 lp->tx_ring[lp->tx_new].des1 &= cpu_to_le32(TD_TER);
1904 lp->tx_ring[lp->tx_new].des1 |= cpu_to_le32(flags);
1905 lp->tx_skb[lp->tx_new] = skb;
1906 lp->tx_ring[entry].des1 &= cpu_to_le32(~TD_IC);
1907 barrier();
1908
1909 lp->tx_ring[lp->tx_new].status = cpu_to_le32(T_OWN);
1910 barrier();
1911}
1912
1913
1914
1915
1916static void
1917set_multicast_list(struct net_device *dev)
1918{
1919 struct de4x5_private *lp = netdev_priv(dev);
1920 u_long iobase = dev->base_addr;
1921
1922
1923 if (lp->state == OPEN) {
1924 if (dev->flags & IFF_PROMISC) {
1925 u32 omr;
1926 omr = inl(DE4X5_OMR);
1927 omr |= OMR_PR;
1928 outl(omr, DE4X5_OMR);
1929 } else {
1930 SetMulticastFilter(dev);
1931 load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET |
1932 SETUP_FRAME_LEN, (struct sk_buff *)1);
1933
1934 lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
1935 outl(POLL_DEMAND, DE4X5_TPD);
1936 netif_trans_update(dev);
1937 }
1938 }
1939}
1940
1941
1942
1943
1944
1945
1946static void
1947SetMulticastFilter(struct net_device *dev)
1948{
1949 struct de4x5_private *lp = netdev_priv(dev);
1950 struct netdev_hw_addr *ha;
1951 u_long iobase = dev->base_addr;
1952 int i, bit, byte;
1953 u16 hashcode;
1954 u32 omr, crc;
1955 char *pa;
1956 unsigned char *addrs;
1957
1958 omr = inl(DE4X5_OMR);
1959 omr &= ~(OMR_PR | OMR_PM);
1960 pa = build_setup_frame(dev, ALL);
1961
1962 if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 14)) {
1963 omr |= OMR_PM;
1964 } else if (lp->setup_f == HASH_PERF) {
1965 netdev_for_each_mc_addr(ha, dev) {
1966 crc = ether_crc_le(ETH_ALEN, ha->addr);
1967 hashcode = crc & DE4X5_HASH_BITS;
1968
1969 byte = hashcode >> 3;
1970 bit = 1 << (hashcode & 0x07);
1971
1972 byte <<= 1;
1973 if (byte & 0x02) {
1974 byte -= 1;
1975 }
1976 lp->setup_frame[byte] |= bit;
1977 }
1978 } else {
1979 netdev_for_each_mc_addr(ha, dev) {
1980 addrs = ha->addr;
1981 for (i=0; i<ETH_ALEN; i++) {
1982 *(pa + (i&1)) = *addrs++;
1983 if (i & 0x01) pa += 4;
1984 }
1985 }
1986 }
1987 outl(omr, DE4X5_OMR);
1988}
1989
1990#ifdef CONFIG_EISA
1991
1992static u_char de4x5_irq[] = EISA_ALLOWED_IRQ_LIST;
1993
1994static int de4x5_eisa_probe(struct device *gendev)
1995{
1996 struct eisa_device *edev;
1997 u_long iobase;
1998 u_char irq, regval;
1999 u_short vendor;
2000 u32 cfid;
2001 int status, device;
2002 struct net_device *dev;
2003 struct de4x5_private *lp;
2004
2005 edev = to_eisa_device (gendev);
2006 iobase = edev->base_addr;
2007
2008 if (!request_region (iobase, DE4X5_EISA_TOTAL_SIZE, "de4x5"))
2009 return -EBUSY;
2010
2011 if (!request_region (iobase + DE4X5_EISA_IO_PORTS,
2012 DE4X5_EISA_TOTAL_SIZE, "de4x5")) {
2013 status = -EBUSY;
2014 goto release_reg_1;
2015 }
2016
2017 if (!(dev = alloc_etherdev (sizeof (struct de4x5_private)))) {
2018 status = -ENOMEM;
2019 goto release_reg_2;
2020 }
2021 lp = netdev_priv(dev);
2022
2023 cfid = (u32) inl(PCI_CFID);
2024 lp->cfrv = (u_short) inl(PCI_CFRV);
2025 device = (cfid >> 8) & 0x00ffff00;
2026 vendor = (u_short) cfid;
2027
2028
2029 regval = inb(EISA_REG0) & (ER0_INTL | ER0_INTT);
2030#ifdef CONFIG_ALPHA
2031
2032
2033
2034
2035
2036
2037 outb (ER1_IAM | 1, EISA_REG1);
2038 mdelay (1);
2039
2040
2041 outb (ER1_IAM, EISA_REG1);
2042 mdelay (1);
2043
2044
2045 outb (ER3_BWE | ER3_BRE, EISA_REG3);
2046
2047
2048 outb (ER0_BSW | ER0_BMW | ER0_EPT | regval, EISA_REG0);
2049#endif
2050 irq = de4x5_irq[(regval >> 1) & 0x03];
2051
2052 if (is_DC2114x) {
2053 device = ((lp->cfrv & CFRV_RN) < DC2114x_BRK ? DC21142 : DC21143);
2054 }
2055 lp->chipset = device;
2056 lp->bus = EISA;
2057
2058
2059 outl(PCI_COMMAND_IO | PCI_COMMAND_MASTER, PCI_CFCS);
2060 outl(0x00006000, PCI_CFLT);
2061 outl(iobase, PCI_CBIO);
2062
2063 DevicePresent(dev, EISA_APROM);
2064
2065 dev->irq = irq;
2066
2067 if (!(status = de4x5_hw_init (dev, iobase, gendev))) {
2068 return 0;
2069 }
2070
2071 free_netdev (dev);
2072 release_reg_2:
2073 release_region (iobase + DE4X5_EISA_IO_PORTS, DE4X5_EISA_TOTAL_SIZE);
2074 release_reg_1:
2075 release_region (iobase, DE4X5_EISA_TOTAL_SIZE);
2076
2077 return status;
2078}
2079
2080static int de4x5_eisa_remove(struct device *device)
2081{
2082 struct net_device *dev;
2083 u_long iobase;
2084
2085 dev = dev_get_drvdata(device);
2086 iobase = dev->base_addr;
2087
2088 unregister_netdev (dev);
2089 free_netdev (dev);
2090 release_region (iobase + DE4X5_EISA_IO_PORTS, DE4X5_EISA_TOTAL_SIZE);
2091 release_region (iobase, DE4X5_EISA_TOTAL_SIZE);
2092
2093 return 0;
2094}
2095
2096static const struct eisa_device_id de4x5_eisa_ids[] = {
2097 { "DEC4250", 0 },
2098 { "" }
2099};
2100MODULE_DEVICE_TABLE(eisa, de4x5_eisa_ids);
2101
2102static struct eisa_driver de4x5_eisa_driver = {
2103 .id_table = de4x5_eisa_ids,
2104 .driver = {
2105 .name = "de4x5",
2106 .probe = de4x5_eisa_probe,
2107 .remove = de4x5_eisa_remove,
2108 }
2109};
2110MODULE_DEVICE_TABLE(eisa, de4x5_eisa_ids);
2111#endif
2112
2113#ifdef CONFIG_PCI
2114
2115
2116
2117
2118
2119
2120
2121static void
2122srom_search(struct net_device *dev, struct pci_dev *pdev)
2123{
2124 u_char pb;
2125 u_short vendor, status;
2126 u_int irq = 0, device;
2127 u_long iobase = 0;
2128 int i, j;
2129 struct de4x5_private *lp = netdev_priv(dev);
2130 struct pci_dev *this_dev;
2131
2132 list_for_each_entry(this_dev, &pdev->bus->devices, bus_list) {
2133 vendor = this_dev->vendor;
2134 device = this_dev->device << 8;
2135 if (!(is_DC21040 || is_DC21041 || is_DC21140 || is_DC2114x)) continue;
2136
2137
2138 pb = this_dev->bus->number;
2139
2140
2141 lp->device = PCI_SLOT(this_dev->devfn);
2142 lp->bus_num = pb;
2143
2144
2145 if (is_DC2114x) {
2146 device = ((this_dev->revision & CFRV_RN) < DC2114x_BRK
2147 ? DC21142 : DC21143);
2148 }
2149 lp->chipset = device;
2150
2151
2152 iobase = pci_resource_start(this_dev, 0);
2153
2154
2155 irq = this_dev->irq;
2156 if ((irq == 0) || (irq == 0xff) || ((int)irq == -1)) continue;
2157
2158
2159 pci_read_config_word(this_dev, PCI_COMMAND, &status);
2160 if (!(status & PCI_COMMAND_IO)) continue;
2161
2162
2163 DevicePresent(dev, DE4X5_APROM);
2164 for (j=0, i=0; i<ETH_ALEN; i++) {
2165 j += (u_char) *((u_char *)&lp->srom + SROM_HWADD + i);
2166 }
2167 if (j != 0 && j != 6 * 0xff) {
2168 last.chipset = device;
2169 last.bus = pb;
2170 last.irq = irq;
2171 for (i=0; i<ETH_ALEN; i++) {
2172 last.addr[i] = (u_char)*((u_char *)&lp->srom + SROM_HWADD + i);
2173 }
2174 return;
2175 }
2176 }
2177}
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195static int de4x5_pci_probe(struct pci_dev *pdev,
2196 const struct pci_device_id *ent)
2197{
2198 u_char pb, pbus = 0, dev_num, dnum = 0, timer;
2199 u_short vendor, status;
2200 u_int irq = 0, device;
2201 u_long iobase = 0;
2202 int error;
2203 struct net_device *dev;
2204 struct de4x5_private *lp;
2205
2206 dev_num = PCI_SLOT(pdev->devfn);
2207 pb = pdev->bus->number;
2208
2209 if (io) {
2210 pbus = (u_short)(io >> 8);
2211 dnum = (u_short)(io & 0xff);
2212 if ((pbus != pb) || (dnum != dev_num))
2213 return -ENODEV;
2214 }
2215
2216 vendor = pdev->vendor;
2217 device = pdev->device << 8;
2218 if (!(is_DC21040 || is_DC21041 || is_DC21140 || is_DC2114x))
2219 return -ENODEV;
2220
2221
2222 if ((error = pci_enable_device (pdev)))
2223 return error;
2224
2225 if (!(dev = alloc_etherdev (sizeof (struct de4x5_private)))) {
2226 error = -ENOMEM;
2227 goto disable_dev;
2228 }
2229
2230 lp = netdev_priv(dev);
2231 lp->bus = PCI;
2232 lp->bus_num = 0;
2233
2234
2235 if (lp->bus_num != pb) {
2236 lp->bus_num = pb;
2237 srom_search(dev, pdev);
2238 }
2239
2240
2241 lp->cfrv = pdev->revision;
2242
2243
2244 lp->device = dev_num;
2245 lp->bus_num = pb;
2246
2247
2248 if (is_DC2114x) {
2249 device = ((lp->cfrv & CFRV_RN) < DC2114x_BRK ? DC21142 : DC21143);
2250 }
2251 lp->chipset = device;
2252
2253
2254 iobase = pci_resource_start(pdev, 0);
2255
2256
2257 irq = pdev->irq;
2258 if ((irq == 0) || (irq == 0xff) || ((int)irq == -1)) {
2259 error = -ENODEV;
2260 goto free_dev;
2261 }
2262
2263
2264 pci_read_config_word(pdev, PCI_COMMAND, &status);
2265#ifdef __powerpc__
2266 if (!(status & PCI_COMMAND_IO)) {
2267 status |= PCI_COMMAND_IO;
2268 pci_write_config_word(pdev, PCI_COMMAND, status);
2269 pci_read_config_word(pdev, PCI_COMMAND, &status);
2270 }
2271#endif
2272 if (!(status & PCI_COMMAND_IO)) {
2273 error = -ENODEV;
2274 goto free_dev;
2275 }
2276
2277 if (!(status & PCI_COMMAND_MASTER)) {
2278 status |= PCI_COMMAND_MASTER;
2279 pci_write_config_word(pdev, PCI_COMMAND, status);
2280 pci_read_config_word(pdev, PCI_COMMAND, &status);
2281 }
2282 if (!(status & PCI_COMMAND_MASTER)) {
2283 error = -ENODEV;
2284 goto free_dev;
2285 }
2286
2287
2288 pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &timer);
2289 if (timer < 0x60) {
2290 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x60);
2291 }
2292
2293 DevicePresent(dev, DE4X5_APROM);
2294
2295 if (!request_region (iobase, DE4X5_PCI_TOTAL_SIZE, "de4x5")) {
2296 error = -EBUSY;
2297 goto free_dev;
2298 }
2299
2300 dev->irq = irq;
2301
2302 if ((error = de4x5_hw_init(dev, iobase, &pdev->dev))) {
2303 goto release;
2304 }
2305
2306 return 0;
2307
2308 release:
2309 release_region (iobase, DE4X5_PCI_TOTAL_SIZE);
2310 free_dev:
2311 free_netdev (dev);
2312 disable_dev:
2313 pci_disable_device (pdev);
2314 return error;
2315}
2316
2317static void de4x5_pci_remove(struct pci_dev *pdev)
2318{
2319 struct net_device *dev;
2320 u_long iobase;
2321
2322 dev = pci_get_drvdata(pdev);
2323 iobase = dev->base_addr;
2324
2325 unregister_netdev (dev);
2326 free_netdev (dev);
2327 release_region (iobase, DE4X5_PCI_TOTAL_SIZE);
2328 pci_disable_device (pdev);
2329}
2330
2331static const struct pci_device_id de4x5_pci_tbl[] = {
2332 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP,
2333 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
2334 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_PLUS,
2335 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
2336 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_FAST,
2337 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
2338 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21142,
2339 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 },
2340 { },
2341};
2342
2343static struct pci_driver de4x5_pci_driver = {
2344 .name = "de4x5",
2345 .id_table = de4x5_pci_tbl,
2346 .probe = de4x5_pci_probe,
2347 .remove = de4x5_pci_remove,
2348};
2349
2350#endif
2351
2352
2353
2354
2355
2356
2357
2358
2359static int
2360autoconf_media(struct net_device *dev)
2361{
2362 struct de4x5_private *lp = netdev_priv(dev);
2363 u_long iobase = dev->base_addr;
2364
2365 disable_ast(dev);
2366
2367 lp->c_media = AUTO;
2368 inl(DE4X5_MFC);
2369 lp->media = INIT;
2370 lp->tcount = 0;
2371
2372 de4x5_ast(&lp->timer);
2373
2374 return lp->media;
2375}
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389static int
2390dc21040_autoconf(struct net_device *dev)
2391{
2392 struct de4x5_private *lp = netdev_priv(dev);
2393 u_long iobase = dev->base_addr;
2394 int next_tick = DE4X5_AUTOSENSE_MS;
2395 s32 imr;
2396
2397 switch (lp->media) {
2398 case INIT:
2399 DISABLE_IRQs;
2400 lp->tx_enable = false;
2401 lp->timeout = -1;
2402 de4x5_save_skbs(dev);
2403 if ((lp->autosense == AUTO) || (lp->autosense == TP)) {
2404 lp->media = TP;
2405 } else if ((lp->autosense == BNC) || (lp->autosense == AUI) || (lp->autosense == BNC_AUI)) {
2406 lp->media = BNC_AUI;
2407 } else if (lp->autosense == EXT_SIA) {
2408 lp->media = EXT_SIA;
2409 } else {
2410 lp->media = NC;
2411 }
2412 lp->local_state = 0;
2413 next_tick = dc21040_autoconf(dev);
2414 break;
2415
2416 case TP:
2417 next_tick = dc21040_state(dev, 0x8f01, 0xffff, 0x0000, 3000, BNC_AUI,
2418 TP_SUSPECT, test_tp);
2419 break;
2420
2421 case TP_SUSPECT:
2422 next_tick = de4x5_suspect_state(dev, 1000, TP, test_tp, dc21040_autoconf);
2423 break;
2424
2425 case BNC:
2426 case AUI:
2427 case BNC_AUI:
2428 next_tick = dc21040_state(dev, 0x8f09, 0x0705, 0x0006, 3000, EXT_SIA,
2429 BNC_AUI_SUSPECT, ping_media);
2430 break;
2431
2432 case BNC_AUI_SUSPECT:
2433 next_tick = de4x5_suspect_state(dev, 1000, BNC_AUI, ping_media, dc21040_autoconf);
2434 break;
2435
2436 case EXT_SIA:
2437 next_tick = dc21040_state(dev, 0x3041, 0x0000, 0x0006, 3000,
2438 NC, EXT_SIA_SUSPECT, ping_media);
2439 break;
2440
2441 case EXT_SIA_SUSPECT:
2442 next_tick = de4x5_suspect_state(dev, 1000, EXT_SIA, ping_media, dc21040_autoconf);
2443 break;
2444
2445 case NC:
2446
2447 reset_init_sia(dev, 0x8f01, 0xffff, 0x0000);
2448 if (lp->media != lp->c_media) {
2449 de4x5_dbg_media(dev);
2450 lp->c_media = lp->media;
2451 }
2452 lp->media = INIT;
2453 lp->tx_enable = false;
2454 break;
2455 }
2456
2457 return next_tick;
2458}
2459
2460static int
2461dc21040_state(struct net_device *dev, int csr13, int csr14, int csr15, int timeout,
2462 int next_state, int suspect_state,
2463 int (*fn)(struct net_device *, int))
2464{
2465 struct de4x5_private *lp = netdev_priv(dev);
2466 int next_tick = DE4X5_AUTOSENSE_MS;
2467 int linkBad;
2468
2469 switch (lp->local_state) {
2470 case 0:
2471 reset_init_sia(dev, csr13, csr14, csr15);
2472 lp->local_state++;
2473 next_tick = 500;
2474 break;
2475
2476 case 1:
2477 if (!lp->tx_enable) {
2478 linkBad = fn(dev, timeout);
2479 if (linkBad < 0) {
2480 next_tick = linkBad & ~TIMER_CB;
2481 } else {
2482 if (linkBad && (lp->autosense == AUTO)) {
2483 lp->local_state = 0;
2484 lp->media = next_state;
2485 } else {
2486 de4x5_init_connection(dev);
2487 }
2488 }
2489 } else if (!lp->linkOK && (lp->autosense == AUTO)) {
2490 lp->media = suspect_state;
2491 next_tick = 3000;
2492 }
2493 break;
2494 }
2495
2496 return next_tick;
2497}
2498
2499static int
2500de4x5_suspect_state(struct net_device *dev, int timeout, int prev_state,
2501 int (*fn)(struct net_device *, int),
2502 int (*asfn)(struct net_device *))
2503{
2504 struct de4x5_private *lp = netdev_priv(dev);
2505 int next_tick = DE4X5_AUTOSENSE_MS;
2506 int linkBad;
2507
2508 switch (lp->local_state) {
2509 case 1:
2510 if (lp->linkOK) {
2511 lp->media = prev_state;
2512 } else {
2513 lp->local_state++;
2514 next_tick = asfn(dev);
2515 }
2516 break;
2517
2518 case 2:
2519 linkBad = fn(dev, timeout);
2520 if (linkBad < 0) {
2521 next_tick = linkBad & ~TIMER_CB;
2522 } else if (!linkBad) {
2523 lp->local_state--;
2524 lp->media = prev_state;
2525 } else {
2526 lp->media = INIT;
2527 lp->tcount++;
2528 }
2529 }
2530
2531 return next_tick;
2532}
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543static int
2544dc21041_autoconf(struct net_device *dev)
2545{
2546 struct de4x5_private *lp = netdev_priv(dev);
2547 u_long iobase = dev->base_addr;
2548 s32 sts, irqs, irq_mask, imr, omr;
2549 int next_tick = DE4X5_AUTOSENSE_MS;
2550
2551 switch (lp->media) {
2552 case INIT:
2553 DISABLE_IRQs;
2554 lp->tx_enable = false;
2555 lp->timeout = -1;
2556 de4x5_save_skbs(dev);
2557 if ((lp->autosense == AUTO) || (lp->autosense == TP_NW)) {
2558 lp->media = TP;
2559 } else if (lp->autosense == TP) {
2560 lp->media = TP;
2561 } else if (lp->autosense == BNC) {
2562 lp->media = BNC;
2563 } else if (lp->autosense == AUI) {
2564 lp->media = AUI;
2565 } else {
2566 lp->media = NC;
2567 }
2568 lp->local_state = 0;
2569 next_tick = dc21041_autoconf(dev);
2570 break;
2571
2572 case TP_NW:
2573 if (lp->timeout < 0) {
2574 omr = inl(DE4X5_OMR);
2575 outl(omr | OMR_FDX, DE4X5_OMR);
2576 }
2577 irqs = STS_LNF | STS_LNP;
2578 irq_mask = IMR_LFM | IMR_LPM;
2579 sts = test_media(dev, irqs, irq_mask, 0xef01, 0xffff, 0x0008, 2400);
2580 if (sts < 0) {
2581 next_tick = sts & ~TIMER_CB;
2582 } else {
2583 if (sts & STS_LNP) {
2584 lp->media = ANS;
2585 } else {
2586 lp->media = AUI;
2587 }
2588 next_tick = dc21041_autoconf(dev);
2589 }
2590 break;
2591
2592 case ANS:
2593 if (!lp->tx_enable) {
2594 irqs = STS_LNP;
2595 irq_mask = IMR_LPM;
2596 sts = test_ans(dev, irqs, irq_mask, 3000);
2597 if (sts < 0) {
2598 next_tick = sts & ~TIMER_CB;
2599 } else {
2600 if (!(sts & STS_LNP) && (lp->autosense == AUTO)) {
2601 lp->media = TP;
2602 next_tick = dc21041_autoconf(dev);
2603 } else {
2604 lp->local_state = 1;
2605 de4x5_init_connection(dev);
2606 }
2607 }
2608 } else if (!lp->linkOK && (lp->autosense == AUTO)) {
2609 lp->media = ANS_SUSPECT;
2610 next_tick = 3000;
2611 }
2612 break;
2613
2614 case ANS_SUSPECT:
2615 next_tick = de4x5_suspect_state(dev, 1000, ANS, test_tp, dc21041_autoconf);
2616 break;
2617
2618 case TP:
2619 if (!lp->tx_enable) {
2620 if (lp->timeout < 0) {
2621 omr = inl(DE4X5_OMR);
2622 outl(omr & ~OMR_FDX, DE4X5_OMR);
2623 }
2624 irqs = STS_LNF | STS_LNP;
2625 irq_mask = IMR_LFM | IMR_LPM;
2626 sts = test_media(dev,irqs, irq_mask, 0xef01, 0xff3f, 0x0008, 2400);
2627 if (sts < 0) {
2628 next_tick = sts & ~TIMER_CB;
2629 } else {
2630 if (!(sts & STS_LNP) && (lp->autosense == AUTO)) {
2631 if (inl(DE4X5_SISR) & SISR_NRA) {
2632 lp->media = AUI;
2633 } else {
2634 lp->media = BNC;
2635 }
2636 next_tick = dc21041_autoconf(dev);
2637 } else {
2638 lp->local_state = 1;
2639 de4x5_init_connection(dev);
2640 }
2641 }
2642 } else if (!lp->linkOK && (lp->autosense == AUTO)) {
2643 lp->media = TP_SUSPECT;
2644 next_tick = 3000;
2645 }
2646 break;
2647
2648 case TP_SUSPECT:
2649 next_tick = de4x5_suspect_state(dev, 1000, TP, test_tp, dc21041_autoconf);
2650 break;
2651
2652 case AUI:
2653 if (!lp->tx_enable) {
2654 if (lp->timeout < 0) {
2655 omr = inl(DE4X5_OMR);
2656 outl(omr & ~OMR_FDX, DE4X5_OMR);
2657 }
2658 irqs = 0;
2659 irq_mask = 0;
2660 sts = test_media(dev,irqs, irq_mask, 0xef09, 0xf73d, 0x000e, 1000);
2661 if (sts < 0) {
2662 next_tick = sts & ~TIMER_CB;
2663 } else {
2664 if (!(inl(DE4X5_SISR) & SISR_SRA) && (lp->autosense == AUTO)) {
2665 lp->media = BNC;
2666 next_tick = dc21041_autoconf(dev);
2667 } else {
2668 lp->local_state = 1;
2669 de4x5_init_connection(dev);
2670 }
2671 }
2672 } else if (!lp->linkOK && (lp->autosense == AUTO)) {
2673 lp->media = AUI_SUSPECT;
2674 next_tick = 3000;
2675 }
2676 break;
2677
2678 case AUI_SUSPECT:
2679 next_tick = de4x5_suspect_state(dev, 1000, AUI, ping_media, dc21041_autoconf);
2680 break;
2681
2682 case BNC:
2683 switch (lp->local_state) {
2684 case 0:
2685 if (lp->timeout < 0) {
2686 omr = inl(DE4X5_OMR);
2687 outl(omr & ~OMR_FDX, DE4X5_OMR);
2688 }
2689 irqs = 0;
2690 irq_mask = 0;
2691 sts = test_media(dev,irqs, irq_mask, 0xef09, 0xf73d, 0x0006, 1000);
2692 if (sts < 0) {
2693 next_tick = sts & ~TIMER_CB;
2694 } else {
2695 lp->local_state++;
2696 next_tick = dc21041_autoconf(dev);
2697 }
2698 break;
2699
2700 case 1:
2701 if (!lp->tx_enable) {
2702 if ((sts = ping_media(dev, 3000)) < 0) {
2703 next_tick = sts & ~TIMER_CB;
2704 } else {
2705 if (sts) {
2706 lp->local_state = 0;
2707 lp->media = NC;
2708 } else {
2709 de4x5_init_connection(dev);
2710 }
2711 }
2712 } else if (!lp->linkOK && (lp->autosense == AUTO)) {
2713 lp->media = BNC_SUSPECT;
2714 next_tick = 3000;
2715 }
2716 break;
2717 }
2718 break;
2719
2720 case BNC_SUSPECT:
2721 next_tick = de4x5_suspect_state(dev, 1000, BNC, ping_media, dc21041_autoconf);
2722 break;
2723
2724 case NC:
2725 omr = inl(DE4X5_OMR);
2726 outl(omr | OMR_FDX, DE4X5_OMR);
2727 reset_init_sia(dev, 0xef01, 0xffff, 0x0008);
2728 if (lp->media != lp->c_media) {
2729 de4x5_dbg_media(dev);
2730 lp->c_media = lp->media;
2731 }
2732 lp->media = INIT;
2733 lp->tx_enable = false;
2734 break;
2735 }
2736
2737 return next_tick;
2738}
2739
2740
2741
2742
2743
2744
2745static int
2746dc21140m_autoconf(struct net_device *dev)
2747{
2748 struct de4x5_private *lp = netdev_priv(dev);
2749 int ana, anlpa, cap, cr, slnk, sr;
2750 int next_tick = DE4X5_AUTOSENSE_MS;
2751 u_long imr, omr, iobase = dev->base_addr;
2752
2753 switch(lp->media) {
2754 case INIT:
2755 if (lp->timeout < 0) {
2756 DISABLE_IRQs;
2757 lp->tx_enable = false;
2758 lp->linkOK = 0;
2759 de4x5_save_skbs(dev);
2760 }
2761 if ((next_tick = de4x5_reset_phy(dev)) < 0) {
2762 next_tick &= ~TIMER_CB;
2763 } else {
2764 if (lp->useSROM) {
2765 if (srom_map_media(dev) < 0) {
2766 lp->tcount++;
2767 return next_tick;
2768 }
2769 srom_exec(dev, lp->phy[lp->active].gep);
2770 if (lp->infoblock_media == ANS) {
2771 ana = lp->phy[lp->active].ana | MII_ANA_CSMA;
2772 mii_wr(ana, MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
2773 }
2774 } else {
2775 lp->tmp = MII_SR_ASSC;
2776 SET_10Mb;
2777 if (lp->autosense == _100Mb) {
2778 lp->media = _100Mb;
2779 } else if (lp->autosense == _10Mb) {
2780 lp->media = _10Mb;
2781 } else if ((lp->autosense == AUTO) &&
2782 ((sr=is_anc_capable(dev)) & MII_SR_ANC)) {
2783 ana = (((sr >> 6) & MII_ANA_TAF) | MII_ANA_CSMA);
2784 ana &= (lp->fdx ? ~0 : ~MII_ANA_FDAM);
2785 mii_wr(ana, MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
2786 lp->media = ANS;
2787 } else if (lp->autosense == AUTO) {
2788 lp->media = SPD_DET;
2789 } else if (is_spd_100(dev) && is_100_up(dev)) {
2790 lp->media = _100Mb;
2791 } else {
2792 lp->media = NC;
2793 }
2794 }
2795 lp->local_state = 0;
2796 next_tick = dc21140m_autoconf(dev);
2797 }
2798 break;
2799
2800 case ANS:
2801 switch (lp->local_state) {
2802 case 0:
2803 if (lp->timeout < 0) {
2804 mii_wr(MII_CR_ASSE | MII_CR_RAN, MII_CR, lp->phy[lp->active].addr, DE4X5_MII);
2805 }
2806 cr = test_mii_reg(dev, MII_CR, MII_CR_RAN, false, 500);
2807 if (cr < 0) {
2808 next_tick = cr & ~TIMER_CB;
2809 } else {
2810 if (cr) {
2811 lp->local_state = 0;
2812 lp->media = SPD_DET;
2813 } else {
2814 lp->local_state++;
2815 }
2816 next_tick = dc21140m_autoconf(dev);
2817 }
2818 break;
2819
2820 case 1:
2821 if ((sr=test_mii_reg(dev, MII_SR, MII_SR_ASSC, true, 2000)) < 0) {
2822 next_tick = sr & ~TIMER_CB;
2823 } else {
2824 lp->media = SPD_DET;
2825 lp->local_state = 0;
2826 if (sr) {
2827 lp->tmp = MII_SR_ASSC;
2828 anlpa = mii_rd(MII_ANLPA, lp->phy[lp->active].addr, DE4X5_MII);
2829 ana = mii_rd(MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
2830 if (!(anlpa & MII_ANLPA_RF) &&
2831 (cap = anlpa & MII_ANLPA_TAF & ana)) {
2832 if (cap & MII_ANA_100M) {
2833 lp->fdx = (ana & anlpa & MII_ANA_FDAM & MII_ANA_100M) != 0;
2834 lp->media = _100Mb;
2835 } else if (cap & MII_ANA_10M) {
2836 lp->fdx = (ana & anlpa & MII_ANA_FDAM & MII_ANA_10M) != 0;
2837
2838 lp->media = _10Mb;
2839 }
2840 }
2841 }
2842 next_tick = dc21140m_autoconf(dev);
2843 }
2844 break;
2845 }
2846 break;
2847
2848 case SPD_DET:
2849 if (lp->timeout < 0) {
2850 lp->tmp = (lp->phy[lp->active].id ? MII_SR_LKS :
2851 (~gep_rd(dev) & GEP_LNP));
2852 SET_100Mb_PDET;
2853 }
2854 if ((slnk = test_for_100Mb(dev, 6500)) < 0) {
2855 next_tick = slnk & ~TIMER_CB;
2856 } else {
2857 if (is_spd_100(dev) && is_100_up(dev)) {
2858 lp->media = _100Mb;
2859 } else if ((!is_spd_100(dev) && (is_10_up(dev) & lp->tmp))) {
2860 lp->media = _10Mb;
2861 } else {
2862 lp->media = NC;
2863 }
2864 next_tick = dc21140m_autoconf(dev);
2865 }
2866 break;
2867
2868 case _100Mb:
2869 next_tick = 3000;
2870 if (!lp->tx_enable) {
2871 SET_100Mb;
2872 de4x5_init_connection(dev);
2873 } else {
2874 if (!lp->linkOK && (lp->autosense == AUTO)) {
2875 if (!is_100_up(dev) || (!lp->useSROM && !is_spd_100(dev))) {
2876 lp->media = INIT;
2877 lp->tcount++;
2878 next_tick = DE4X5_AUTOSENSE_MS;
2879 }
2880 }
2881 }
2882 break;
2883
2884 case BNC:
2885 case AUI:
2886 case _10Mb:
2887 next_tick = 3000;
2888 if (!lp->tx_enable) {
2889 SET_10Mb;
2890 de4x5_init_connection(dev);
2891 } else {
2892 if (!lp->linkOK && (lp->autosense == AUTO)) {
2893 if (!is_10_up(dev) || (!lp->useSROM && is_spd_100(dev))) {
2894 lp->media = INIT;
2895 lp->tcount++;
2896 next_tick = DE4X5_AUTOSENSE_MS;
2897 }
2898 }
2899 }
2900 break;
2901
2902 case NC:
2903 if (lp->media != lp->c_media) {
2904 de4x5_dbg_media(dev);
2905 lp->c_media = lp->media;
2906 }
2907 lp->media = INIT;
2908 lp->tx_enable = false;
2909 break;
2910 }
2911
2912 return next_tick;
2913}
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929static int
2930dc2114x_autoconf(struct net_device *dev)
2931{
2932 struct de4x5_private *lp = netdev_priv(dev);
2933 u_long iobase = dev->base_addr;
2934 s32 cr, anlpa, ana, cap, irqs, irq_mask, imr, omr, slnk, sr, sts;
2935 int next_tick = DE4X5_AUTOSENSE_MS;
2936
2937 switch (lp->media) {
2938 case INIT:
2939 if (lp->timeout < 0) {
2940 DISABLE_IRQs;
2941 lp->tx_enable = false;
2942 lp->linkOK = 0;
2943 lp->timeout = -1;
2944 de4x5_save_skbs(dev);
2945 if (lp->params.autosense & ~AUTO) {
2946 srom_map_media(dev);
2947 if (lp->media != lp->params.autosense) {
2948 lp->tcount++;
2949 lp->media = INIT;
2950 return next_tick;
2951 }
2952 lp->media = INIT;
2953 }
2954 }
2955 if ((next_tick = de4x5_reset_phy(dev)) < 0) {
2956 next_tick &= ~TIMER_CB;
2957 } else {
2958 if (lp->autosense == _100Mb) {
2959 lp->media = _100Mb;
2960 } else if (lp->autosense == _10Mb) {
2961 lp->media = _10Mb;
2962 } else if (lp->autosense == TP) {
2963 lp->media = TP;
2964 } else if (lp->autosense == BNC) {
2965 lp->media = BNC;
2966 } else if (lp->autosense == AUI) {
2967 lp->media = AUI;
2968 } else {
2969 lp->media = SPD_DET;
2970 if ((lp->infoblock_media == ANS) &&
2971 ((sr=is_anc_capable(dev)) & MII_SR_ANC)) {
2972 ana = (((sr >> 6) & MII_ANA_TAF) | MII_ANA_CSMA);
2973 ana &= (lp->fdx ? ~0 : ~MII_ANA_FDAM);
2974 mii_wr(ana, MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
2975 lp->media = ANS;
2976 }
2977 }
2978 lp->local_state = 0;
2979 next_tick = dc2114x_autoconf(dev);
2980 }
2981 break;
2982
2983 case ANS:
2984 switch (lp->local_state) {
2985 case 0:
2986 if (lp->timeout < 0) {
2987 mii_wr(MII_CR_ASSE | MII_CR_RAN, MII_CR, lp->phy[lp->active].addr, DE4X5_MII);
2988 }
2989 cr = test_mii_reg(dev, MII_CR, MII_CR_RAN, false, 500);
2990 if (cr < 0) {
2991 next_tick = cr & ~TIMER_CB;
2992 } else {
2993 if (cr) {
2994 lp->local_state = 0;
2995 lp->media = SPD_DET;
2996 } else {
2997 lp->local_state++;
2998 }
2999 next_tick = dc2114x_autoconf(dev);
3000 }
3001 break;
3002
3003 case 1:
3004 sr = test_mii_reg(dev, MII_SR, MII_SR_ASSC, true, 2000);
3005 if (sr < 0) {
3006 next_tick = sr & ~TIMER_CB;
3007 } else {
3008 lp->media = SPD_DET;
3009 lp->local_state = 0;
3010 if (sr) {
3011 lp->tmp = MII_SR_ASSC;
3012 anlpa = mii_rd(MII_ANLPA, lp->phy[lp->active].addr, DE4X5_MII);
3013 ana = mii_rd(MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
3014 if (!(anlpa & MII_ANLPA_RF) &&
3015 (cap = anlpa & MII_ANLPA_TAF & ana)) {
3016 if (cap & MII_ANA_100M) {
3017 lp->fdx = (ana & anlpa & MII_ANA_FDAM & MII_ANA_100M) != 0;
3018 lp->media = _100Mb;
3019 } else if (cap & MII_ANA_10M) {
3020 lp->fdx = (ana & anlpa & MII_ANA_FDAM & MII_ANA_10M) != 0;
3021 lp->media = _10Mb;
3022 }
3023 }
3024 }
3025 next_tick = dc2114x_autoconf(dev);
3026 }
3027 break;
3028 }
3029 break;
3030
3031 case AUI:
3032 if (!lp->tx_enable) {
3033 if (lp->timeout < 0) {
3034 omr = inl(DE4X5_OMR);
3035 outl(omr & ~OMR_FDX, DE4X5_OMR);
3036 }
3037 irqs = 0;
3038 irq_mask = 0;
3039 sts = test_media(dev,irqs, irq_mask, 0, 0, 0, 1000);
3040 if (sts < 0) {
3041 next_tick = sts & ~TIMER_CB;
3042 } else {
3043 if (!(inl(DE4X5_SISR) & SISR_SRA) && (lp->autosense == AUTO)) {
3044 lp->media = BNC;
3045 next_tick = dc2114x_autoconf(dev);
3046 } else {
3047 lp->local_state = 1;
3048 de4x5_init_connection(dev);
3049 }
3050 }
3051 } else if (!lp->linkOK && (lp->autosense == AUTO)) {
3052 lp->media = AUI_SUSPECT;
3053 next_tick = 3000;
3054 }
3055 break;
3056
3057 case AUI_SUSPECT:
3058 next_tick = de4x5_suspect_state(dev, 1000, AUI, ping_media, dc2114x_autoconf);
3059 break;
3060
3061 case BNC:
3062 switch (lp->local_state) {
3063 case 0:
3064 if (lp->timeout < 0) {
3065 omr = inl(DE4X5_OMR);
3066 outl(omr & ~OMR_FDX, DE4X5_OMR);
3067 }
3068 irqs = 0;
3069 irq_mask = 0;
3070 sts = test_media(dev,irqs, irq_mask, 0, 0, 0, 1000);
3071 if (sts < 0) {
3072 next_tick = sts & ~TIMER_CB;
3073 } else {
3074 lp->local_state++;
3075 next_tick = dc2114x_autoconf(dev);
3076 }
3077 break;
3078
3079 case 1:
3080 if (!lp->tx_enable) {
3081 if ((sts = ping_media(dev, 3000)) < 0) {
3082 next_tick = sts & ~TIMER_CB;
3083 } else {
3084 if (sts) {
3085 lp->local_state = 0;
3086 lp->tcount++;
3087 lp->media = INIT;
3088 } else {
3089 de4x5_init_connection(dev);
3090 }
3091 }
3092 } else if (!lp->linkOK && (lp->autosense == AUTO)) {
3093 lp->media = BNC_SUSPECT;
3094 next_tick = 3000;
3095 }
3096 break;
3097 }
3098 break;
3099
3100 case BNC_SUSPECT:
3101 next_tick = de4x5_suspect_state(dev, 1000, BNC, ping_media, dc2114x_autoconf);
3102 break;
3103
3104 case SPD_DET:
3105 if (srom_map_media(dev) < 0) {
3106 lp->tcount++;
3107 lp->media = INIT;
3108 return next_tick;
3109 }
3110 if (lp->media == _100Mb) {
3111 if ((slnk = test_for_100Mb(dev, 6500)) < 0) {
3112 lp->media = SPD_DET;
3113 return slnk & ~TIMER_CB;
3114 }
3115 } else {
3116 if (wait_for_link(dev) < 0) {
3117 lp->media = SPD_DET;
3118 return PDET_LINK_WAIT;
3119 }
3120 }
3121 if (lp->media == ANS) {
3122 if (is_spd_100(dev)) {
3123 lp->media = _100Mb;
3124 } else {
3125 lp->media = _10Mb;
3126 }
3127 next_tick = dc2114x_autoconf(dev);
3128 } else if (((lp->media == _100Mb) && is_100_up(dev)) ||
3129 (((lp->media == _10Mb) || (lp->media == TP) ||
3130 (lp->media == BNC) || (lp->media == AUI)) &&
3131 is_10_up(dev))) {
3132 next_tick = dc2114x_autoconf(dev);
3133 } else {
3134 lp->tcount++;
3135 lp->media = INIT;
3136 }
3137 break;
3138
3139 case _10Mb:
3140 next_tick = 3000;
3141 if (!lp->tx_enable) {
3142 SET_10Mb;
3143 de4x5_init_connection(dev);
3144 } else {
3145 if (!lp->linkOK && (lp->autosense == AUTO)) {
3146 if (!is_10_up(dev) || (!lp->useSROM && is_spd_100(dev))) {
3147 lp->media = INIT;
3148 lp->tcount++;
3149 next_tick = DE4X5_AUTOSENSE_MS;
3150 }
3151 }
3152 }
3153 break;
3154
3155 case _100Mb:
3156 next_tick = 3000;
3157 if (!lp->tx_enable) {
3158 SET_100Mb;
3159 de4x5_init_connection(dev);
3160 } else {
3161 if (!lp->linkOK && (lp->autosense == AUTO)) {
3162 if (!is_100_up(dev) || (!lp->useSROM && !is_spd_100(dev))) {
3163 lp->media = INIT;
3164 lp->tcount++;
3165 next_tick = DE4X5_AUTOSENSE_MS;
3166 }
3167 }
3168 }
3169 break;
3170
3171 default:
3172 lp->tcount++;
3173printk("Huh?: media:%02x\n", lp->media);
3174 lp->media = INIT;
3175 break;
3176 }
3177
3178 return next_tick;
3179}
3180
3181static int
3182srom_autoconf(struct net_device *dev)
3183{
3184 struct de4x5_private *lp = netdev_priv(dev);
3185
3186 return lp->infoleaf_fn(dev);
3187}
3188
3189
3190
3191
3192
3193
3194static int
3195srom_map_media(struct net_device *dev)
3196{
3197 struct de4x5_private *lp = netdev_priv(dev);
3198
3199 lp->fdx = false;
3200 if (lp->infoblock_media == lp->media)
3201 return 0;
3202
3203 switch(lp->infoblock_media) {
3204 case SROM_10BASETF:
3205 if (!lp->params.fdx) return -1;
3206 lp->fdx = true;
3207 case SROM_10BASET:
3208 if (lp->params.fdx && !lp->fdx) return -1;
3209 if ((lp->chipset == DC21140) || ((lp->chipset & ~0x00ff) == DC2114x)) {
3210 lp->media = _10Mb;
3211 } else {
3212 lp->media = TP;
3213 }
3214 break;
3215
3216 case SROM_10BASE2:
3217 lp->media = BNC;
3218 break;
3219
3220 case SROM_10BASE5:
3221 lp->media = AUI;
3222 break;
3223
3224 case SROM_100BASETF:
3225 if (!lp->params.fdx) return -1;
3226 lp->fdx = true;
3227 case SROM_100BASET:
3228 if (lp->params.fdx && !lp->fdx) return -1;
3229 lp->media = _100Mb;
3230 break;
3231
3232 case SROM_100BASET4:
3233 lp->media = _100Mb;
3234 break;
3235
3236 case SROM_100BASEFF:
3237 if (!lp->params.fdx) return -1;
3238 lp->fdx = true;
3239 case SROM_100BASEF:
3240 if (lp->params.fdx && !lp->fdx) return -1;
3241 lp->media = _100Mb;
3242 break;
3243
3244 case ANS:
3245 lp->media = ANS;
3246 lp->fdx = lp->params.fdx;
3247 break;
3248
3249 default:
3250 printk("%s: Bad media code [%d] detected in SROM!\n", dev->name,
3251 lp->infoblock_media);
3252 return -1;
3253 }
3254
3255 return 0;
3256}
3257
3258static void
3259de4x5_init_connection(struct net_device *dev)
3260{
3261 struct de4x5_private *lp = netdev_priv(dev);
3262 u_long iobase = dev->base_addr;
3263 u_long flags = 0;
3264
3265 if (lp->media != lp->c_media) {
3266 de4x5_dbg_media(dev);
3267 lp->c_media = lp->media;
3268 }
3269
3270 spin_lock_irqsave(&lp->lock, flags);
3271 de4x5_rst_desc_ring(dev);
3272 de4x5_setup_intr(dev);
3273 lp->tx_enable = true;
3274 spin_unlock_irqrestore(&lp->lock, flags);
3275 outl(POLL_DEMAND, DE4X5_TPD);
3276
3277 netif_wake_queue(dev);
3278}
3279
3280
3281
3282
3283
3284
3285static int
3286de4x5_reset_phy(struct net_device *dev)
3287{
3288 struct de4x5_private *lp = netdev_priv(dev);
3289 u_long iobase = dev->base_addr;
3290 int next_tick = 0;
3291
3292 if ((lp->useSROM) || (lp->phy[lp->active].id)) {
3293 if (lp->timeout < 0) {
3294 if (lp->useSROM) {
3295 if (lp->phy[lp->active].rst) {
3296 srom_exec(dev, lp->phy[lp->active].rst);
3297 srom_exec(dev, lp->phy[lp->active].rst);
3298 } else if (lp->rst) {
3299 srom_exec(dev, lp->rst);
3300 srom_exec(dev, lp->rst);
3301 }
3302 } else {
3303 PHY_HARD_RESET;
3304 }
3305 if (lp->useMII) {
3306 mii_wr(MII_CR_RST, MII_CR, lp->phy[lp->active].addr, DE4X5_MII);
3307 }
3308 }
3309 if (lp->useMII) {
3310 next_tick = test_mii_reg(dev, MII_CR, MII_CR_RST, false, 500);
3311 }
3312 } else if (lp->chipset == DC21140) {
3313 PHY_HARD_RESET;
3314 }
3315
3316 return next_tick;
3317}
3318
3319static int
3320test_media(struct net_device *dev, s32 irqs, s32 irq_mask, s32 csr13, s32 csr14, s32 csr15, s32 msec)
3321{
3322 struct de4x5_private *lp = netdev_priv(dev);
3323 u_long iobase = dev->base_addr;
3324 s32 sts, csr12;
3325
3326 if (lp->timeout < 0) {
3327 lp->timeout = msec/100;
3328 if (!lp->useSROM) {
3329 reset_init_sia(dev, csr13, csr14, csr15);
3330 }
3331
3332
3333 outl(irq_mask, DE4X5_IMR);
3334
3335
3336 sts = inl(DE4X5_STS);
3337 outl(sts, DE4X5_STS);
3338
3339
3340 if ((lp->chipset == DC21041) || lp->useSROM) {
3341 csr12 = inl(DE4X5_SISR);
3342 outl(csr12, DE4X5_SISR);
3343 }
3344 }
3345
3346 sts = inl(DE4X5_STS) & ~TIMER_CB;
3347
3348 if (!(sts & irqs) && --lp->timeout) {
3349 sts = 100 | TIMER_CB;
3350 } else {
3351 lp->timeout = -1;
3352 }
3353
3354 return sts;
3355}
3356
3357static int
3358test_tp(struct net_device *dev, s32 msec)
3359{
3360 struct de4x5_private *lp = netdev_priv(dev);
3361 u_long iobase = dev->base_addr;
3362 int sisr;
3363
3364 if (lp->timeout < 0) {
3365 lp->timeout = msec/100;
3366 }
3367
3368 sisr = (inl(DE4X5_SISR) & ~TIMER_CB) & (SISR_LKF | SISR_NCR);
3369
3370 if (sisr && --lp->timeout) {
3371 sisr = 100 | TIMER_CB;
3372 } else {
3373 lp->timeout = -1;
3374 }
3375
3376 return sisr;
3377}
3378
3379
3380
3381
3382
3383
3384#define SAMPLE_INTERVAL 500
3385#define SAMPLE_DELAY 2000
3386static int
3387test_for_100Mb(struct net_device *dev, int msec)
3388{
3389 struct de4x5_private *lp = netdev_priv(dev);
3390 int gep = 0, ret = ((lp->chipset & ~0x00ff)==DC2114x? -1 :GEP_SLNK);
3391
3392 if (lp->timeout < 0) {
3393 if ((msec/SAMPLE_INTERVAL) <= 0) return 0;
3394 if (msec > SAMPLE_DELAY) {
3395 lp->timeout = (msec - SAMPLE_DELAY)/SAMPLE_INTERVAL;
3396 gep = SAMPLE_DELAY | TIMER_CB;
3397 return gep;
3398 } else {
3399 lp->timeout = msec/SAMPLE_INTERVAL;
3400 }
3401 }
3402
3403 if (lp->phy[lp->active].id || lp->useSROM) {
3404 gep = is_100_up(dev) | is_spd_100(dev);
3405 } else {
3406 gep = (~gep_rd(dev) & (GEP_SLNK | GEP_LNP));
3407 }
3408 if (!(gep & ret) && --lp->timeout) {
3409 gep = SAMPLE_INTERVAL | TIMER_CB;
3410 } else {
3411 lp->timeout = -1;
3412 }
3413
3414 return gep;
3415}
3416
3417static int
3418wait_for_link(struct net_device *dev)
3419{
3420 struct de4x5_private *lp = netdev_priv(dev);
3421
3422 if (lp->timeout < 0) {
3423 lp->timeout = 1;
3424 }
3425
3426 if (lp->timeout--) {
3427 return TIMER_CB;
3428 } else {
3429 lp->timeout = -1;
3430 }
3431
3432 return 0;
3433}
3434
3435
3436
3437
3438
3439static int
3440test_mii_reg(struct net_device *dev, int reg, int mask, bool pol, long msec)
3441{
3442 struct de4x5_private *lp = netdev_priv(dev);
3443 int test;
3444 u_long iobase = dev->base_addr;
3445
3446 if (lp->timeout < 0) {
3447 lp->timeout = msec/100;
3448 }
3449
3450 reg = mii_rd((u_char)reg, lp->phy[lp->active].addr, DE4X5_MII) & mask;
3451 test = (reg ^ (pol ? ~0 : 0)) & mask;
3452
3453 if (test && --lp->timeout) {
3454 reg = 100 | TIMER_CB;
3455 } else {
3456 lp->timeout = -1;
3457 }
3458
3459 return reg;
3460}
3461
3462static int
3463is_spd_100(struct net_device *dev)
3464{
3465 struct de4x5_private *lp = netdev_priv(dev);
3466 u_long iobase = dev->base_addr;
3467 int spd;
3468
3469 if (lp->useMII) {
3470 spd = mii_rd(lp->phy[lp->active].spd.reg, lp->phy[lp->active].addr, DE4X5_MII);
3471 spd = ~(spd ^ lp->phy[lp->active].spd.value);
3472 spd &= lp->phy[lp->active].spd.mask;
3473 } else if (!lp->useSROM) {
3474 spd = ((~gep_rd(dev)) & GEP_SLNK);
3475 } else {
3476 if ((lp->ibn == 2) || !lp->asBitValid)
3477 return (lp->chipset == DC21143) ? (~inl(DE4X5_SISR)&SISR_LS100) : 0;
3478
3479 spd = (lp->asBitValid & (lp->asPolarity ^ (gep_rd(dev) & lp->asBit))) |
3480 (lp->linkOK & ~lp->asBitValid);
3481 }
3482
3483 return spd;
3484}
3485
3486static int
3487is_100_up(struct net_device *dev)
3488{
3489 struct de4x5_private *lp = netdev_priv(dev);
3490 u_long iobase = dev->base_addr;
3491
3492 if (lp->useMII) {
3493
3494 mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);
3495 return mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII) & MII_SR_LKS;
3496 } else if (!lp->useSROM) {
3497 return (~gep_rd(dev)) & GEP_SLNK;
3498 } else {
3499 if ((lp->ibn == 2) || !lp->asBitValid)
3500 return (lp->chipset == DC21143) ? (~inl(DE4X5_SISR)&SISR_LS100) : 0;
3501
3502 return (lp->asBitValid&(lp->asPolarity^(gep_rd(dev)&lp->asBit))) |
3503 (lp->linkOK & ~lp->asBitValid);
3504 }
3505}
3506
3507static int
3508is_10_up(struct net_device *dev)
3509{
3510 struct de4x5_private *lp = netdev_priv(dev);
3511 u_long iobase = dev->base_addr;
3512
3513 if (lp->useMII) {
3514
3515 mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);
3516 return mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII) & MII_SR_LKS;
3517 } else if (!lp->useSROM) {
3518 return (~gep_rd(dev)) & GEP_LNP;
3519 } else {
3520 if ((lp->ibn == 2) || !lp->asBitValid)
3521 return ((lp->chipset & ~0x00ff) == DC2114x) ?
3522 (~inl(DE4X5_SISR)&SISR_LS10):
3523 0;
3524
3525 return (lp->asBitValid&(lp->asPolarity^(gep_rd(dev)&lp->asBit))) |
3526 (lp->linkOK & ~lp->asBitValid);
3527 }
3528}
3529
3530static int
3531is_anc_capable(struct net_device *dev)
3532{
3533 struct de4x5_private *lp = netdev_priv(dev);
3534 u_long iobase = dev->base_addr;
3535
3536 if (lp->phy[lp->active].id && (!lp->useSROM || lp->useMII)) {
3537 return mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);
3538 } else if ((lp->chipset & ~0x00ff) == DC2114x) {
3539 return (inl(DE4X5_SISR) & SISR_LPN) >> 12;
3540 } else {
3541 return 0;
3542 }
3543}
3544
3545
3546
3547
3548
3549static int
3550ping_media(struct net_device *dev, int msec)
3551{
3552 struct de4x5_private *lp = netdev_priv(dev);
3553 u_long iobase = dev->base_addr;
3554 int sisr;
3555
3556 if (lp->timeout < 0) {
3557 lp->timeout = msec/100;
3558
3559 lp->tmp = lp->tx_new;
3560 load_packet(dev, lp->frame, TD_LS | TD_FS | sizeof(lp->frame), (struct sk_buff *)1);
3561 lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
3562 outl(POLL_DEMAND, DE4X5_TPD);
3563 }
3564
3565 sisr = inl(DE4X5_SISR);
3566
3567 if ((!(sisr & SISR_NCR)) &&
3568 ((s32)le32_to_cpu(lp->tx_ring[lp->tmp].status) < 0) &&
3569 (--lp->timeout)) {
3570 sisr = 100 | TIMER_CB;
3571 } else {
3572 if ((!(sisr & SISR_NCR)) &&
3573 !(le32_to_cpu(lp->tx_ring[lp->tmp].status) & (T_OWN | TD_ES)) &&
3574 lp->timeout) {
3575 sisr = 0;
3576 } else {
3577 sisr = 1;
3578 }
3579 lp->timeout = -1;
3580 }
3581
3582 return sisr;
3583}
3584
3585
3586
3587
3588
3589
3590static struct sk_buff *
3591de4x5_alloc_rx_buff(struct net_device *dev, int index, int len)
3592{
3593 struct de4x5_private *lp = netdev_priv(dev);
3594 struct sk_buff *p;
3595
3596#if !defined(__alpha__) && !defined(__powerpc__) && !defined(CONFIG_SPARC) && !defined(DE4X5_DO_MEMCPY)
3597 struct sk_buff *ret;
3598 u_long i=0, tmp;
3599
3600 p = netdev_alloc_skb(dev, IEEE802_3_SZ + DE4X5_ALIGN + 2);
3601 if (!p) return NULL;
3602
3603 tmp = virt_to_bus(p->data);
3604 i = ((tmp + DE4X5_ALIGN) & ~DE4X5_ALIGN) - tmp;
3605 skb_reserve(p, i);
3606 lp->rx_ring[index].buf = cpu_to_le32(tmp + i);
3607
3608 ret = lp->rx_skb[index];
3609 lp->rx_skb[index] = p;
3610
3611 if ((u_long) ret > 1) {
3612 skb_put(ret, len);
3613 }
3614
3615 return ret;
3616
3617#else
3618 if (lp->state != OPEN) return (struct sk_buff *)1;
3619
3620 p = netdev_alloc_skb(dev, len + 2);
3621 if (!p) return NULL;
3622
3623 skb_reserve(p, 2);
3624 if (index < lp->rx_old) {
3625 short tlen = (lp->rxRingSize - lp->rx_old) * RX_BUFF_SZ;
3626 skb_put_data(p, lp->rx_bufs + lp->rx_old * RX_BUFF_SZ, tlen);
3627 skb_put_data(p, lp->rx_bufs, len - tlen);
3628 } else {
3629 skb_put_data(p, lp->rx_bufs + lp->rx_old * RX_BUFF_SZ, len);
3630 }
3631
3632 return p;
3633#endif
3634}
3635
3636static void
3637de4x5_free_rx_buffs(struct net_device *dev)
3638{
3639 struct de4x5_private *lp = netdev_priv(dev);
3640 int i;
3641
3642 for (i=0; i<lp->rxRingSize; i++) {
3643 if ((u_long) lp->rx_skb[i] > 1) {
3644 dev_kfree_skb(lp->rx_skb[i]);
3645 }
3646 lp->rx_ring[i].status = 0;
3647 lp->rx_skb[i] = (struct sk_buff *)1;
3648 }
3649}
3650
3651static void
3652de4x5_free_tx_buffs(struct net_device *dev)
3653{
3654 struct de4x5_private *lp = netdev_priv(dev);
3655 int i;
3656
3657 for (i=0; i<lp->txRingSize; i++) {
3658 if (lp->tx_skb[i])
3659 de4x5_free_tx_buff(lp, i);
3660 lp->tx_ring[i].status = 0;
3661 }
3662
3663
3664 __skb_queue_purge(&lp->cache.queue);
3665}
3666
3667
3668
3669
3670
3671
3672
3673
3674static void
3675de4x5_save_skbs(struct net_device *dev)
3676{
3677 struct de4x5_private *lp = netdev_priv(dev);
3678 u_long iobase = dev->base_addr;
3679 s32 omr;
3680
3681 if (!lp->cache.save_cnt) {
3682 STOP_DE4X5;
3683 de4x5_tx(dev);
3684 de4x5_free_tx_buffs(dev);
3685 de4x5_cache_state(dev, DE4X5_SAVE_STATE);
3686 de4x5_sw_reset(dev);
3687 de4x5_cache_state(dev, DE4X5_RESTORE_STATE);
3688 lp->cache.save_cnt++;
3689 START_DE4X5;
3690 }
3691}
3692
3693static void
3694de4x5_rst_desc_ring(struct net_device *dev)
3695{
3696 struct de4x5_private *lp = netdev_priv(dev);
3697 u_long iobase = dev->base_addr;
3698 int i;
3699 s32 omr;
3700
3701 if (lp->cache.save_cnt) {
3702 STOP_DE4X5;
3703 outl(lp->dma_rings, DE4X5_RRBA);
3704 outl(lp->dma_rings + NUM_RX_DESC * sizeof(struct de4x5_desc),
3705 DE4X5_TRBA);
3706
3707 lp->rx_new = lp->rx_old = 0;
3708 lp->tx_new = lp->tx_old = 0;
3709
3710 for (i = 0; i < lp->rxRingSize; i++) {
3711 lp->rx_ring[i].status = cpu_to_le32(R_OWN);
3712 }
3713
3714 for (i = 0; i < lp->txRingSize; i++) {
3715 lp->tx_ring[i].status = cpu_to_le32(0);
3716 }
3717
3718 barrier();
3719 lp->cache.save_cnt--;
3720 START_DE4X5;
3721 }
3722}
3723
3724static void
3725de4x5_cache_state(struct net_device *dev, int flag)
3726{
3727 struct de4x5_private *lp = netdev_priv(dev);
3728 u_long iobase = dev->base_addr;
3729
3730 switch(flag) {
3731 case DE4X5_SAVE_STATE:
3732 lp->cache.csr0 = inl(DE4X5_BMR);
3733 lp->cache.csr6 = (inl(DE4X5_OMR) & ~(OMR_ST | OMR_SR));
3734 lp->cache.csr7 = inl(DE4X5_IMR);
3735 break;
3736
3737 case DE4X5_RESTORE_STATE:
3738 outl(lp->cache.csr0, DE4X5_BMR);
3739 outl(lp->cache.csr6, DE4X5_OMR);
3740 outl(lp->cache.csr7, DE4X5_IMR);
3741 if (lp->chipset == DC21140) {
3742 gep_wr(lp->cache.gepc, dev);
3743 gep_wr(lp->cache.gep, dev);
3744 } else {
3745 reset_init_sia(dev, lp->cache.csr13, lp->cache.csr14,
3746 lp->cache.csr15);
3747 }
3748 break;
3749 }
3750}
3751
3752static void
3753de4x5_put_cache(struct net_device *dev, struct sk_buff *skb)
3754{
3755 struct de4x5_private *lp = netdev_priv(dev);
3756
3757 __skb_queue_tail(&lp->cache.queue, skb);
3758}
3759
3760static void
3761de4x5_putb_cache(struct net_device *dev, struct sk_buff *skb)
3762{
3763 struct de4x5_private *lp = netdev_priv(dev);
3764
3765 __skb_queue_head(&lp->cache.queue, skb);
3766}
3767
3768static struct sk_buff *
3769de4x5_get_cache(struct net_device *dev)
3770{
3771 struct de4x5_private *lp = netdev_priv(dev);
3772
3773 return __skb_dequeue(&lp->cache.queue);
3774}
3775
3776
3777
3778
3779
3780static int
3781test_ans(struct net_device *dev, s32 irqs, s32 irq_mask, s32 msec)
3782{
3783 struct de4x5_private *lp = netdev_priv(dev);
3784 u_long iobase = dev->base_addr;
3785 s32 sts, ans;
3786
3787 if (lp->timeout < 0) {
3788 lp->timeout = msec/100;
3789 outl(irq_mask, DE4X5_IMR);
3790
3791
3792 sts = inl(DE4X5_STS);
3793 outl(sts, DE4X5_STS);
3794 }
3795
3796 ans = inl(DE4X5_SISR) & SISR_ANS;
3797 sts = inl(DE4X5_STS) & ~TIMER_CB;
3798
3799 if (!(sts & irqs) && (ans ^ ANS_NWOK) && --lp->timeout) {
3800 sts = 100 | TIMER_CB;
3801 } else {
3802 lp->timeout = -1;
3803 }
3804
3805 return sts;
3806}
3807
3808static void
3809de4x5_setup_intr(struct net_device *dev)
3810{
3811 struct de4x5_private *lp = netdev_priv(dev);
3812 u_long iobase = dev->base_addr;
3813 s32 imr, sts;
3814
3815 if (inl(DE4X5_OMR) & OMR_SR) {
3816 imr = 0;
3817 UNMASK_IRQs;
3818 sts = inl(DE4X5_STS);
3819 outl(sts, DE4X5_STS);
3820 ENABLE_IRQs;
3821 }
3822}
3823
3824
3825
3826
3827static void
3828reset_init_sia(struct net_device *dev, s32 csr13, s32 csr14, s32 csr15)
3829{
3830 struct de4x5_private *lp = netdev_priv(dev);
3831 u_long iobase = dev->base_addr;
3832
3833 RESET_SIA;
3834 if (lp->useSROM) {
3835 if (lp->ibn == 3) {
3836 srom_exec(dev, lp->phy[lp->active].rst);
3837 srom_exec(dev, lp->phy[lp->active].gep);
3838 outl(1, DE4X5_SICR);
3839 return;
3840 } else {
3841 csr15 = lp->cache.csr15;
3842 csr14 = lp->cache.csr14;
3843 csr13 = lp->cache.csr13;
3844 outl(csr15 | lp->cache.gepc, DE4X5_SIGR);
3845 outl(csr15 | lp->cache.gep, DE4X5_SIGR);
3846 }
3847 } else {
3848 outl(csr15, DE4X5_SIGR);
3849 }
3850 outl(csr14, DE4X5_STRR);
3851 outl(csr13, DE4X5_SICR);
3852
3853 mdelay(10);
3854}
3855
3856
3857
3858
3859static void
3860create_packet(struct net_device *dev, char *frame, int len)
3861{
3862 int i;
3863 char *buf = frame;
3864
3865 for (i=0; i<ETH_ALEN; i++) {
3866 *buf++ = dev->dev_addr[i];
3867 }
3868 for (i=0; i<ETH_ALEN; i++) {
3869 *buf++ = dev->dev_addr[i];
3870 }
3871
3872 *buf++ = 0;
3873 *buf++ = 1;
3874}
3875
3876
3877
3878
3879static int
3880EISA_signature(char *name, struct device *device)
3881{
3882 int i, status = 0, siglen = ARRAY_SIZE(de4x5_signatures);
3883 struct eisa_device *edev;
3884
3885 *name = '\0';
3886 edev = to_eisa_device (device);
3887 i = edev->id.driver_data;
3888
3889 if (i >= 0 && i < siglen) {
3890 strcpy (name, de4x5_signatures[i]);
3891 status = 1;
3892 }
3893
3894 return status;
3895}
3896
3897
3898
3899
3900static int
3901PCI_signature(char *name, struct de4x5_private *lp)
3902{
3903 int i, status = 0, siglen = ARRAY_SIZE(de4x5_signatures);
3904
3905 if (lp->chipset == DC21040) {
3906 strcpy(name, "DE434/5");
3907 return status;
3908 } else {
3909 int tmp = *((char *)&lp->srom + 19) * 3;
3910 strncpy(name, (char *)&lp->srom + 26 + tmp, 8);
3911 }
3912 name[8] = '\0';
3913 for (i=0; i<siglen; i++) {
3914 if (strstr(name,de4x5_signatures[i])!=NULL) break;
3915 }
3916 if (i == siglen) {
3917 if (dec_only) {
3918 *name = '\0';
3919 } else {
3920 strcpy(name, (((lp->chipset == DC21040) ? "DC21040" :
3921 ((lp->chipset == DC21041) ? "DC21041" :
3922 ((lp->chipset == DC21140) ? "DC21140" :
3923 ((lp->chipset == DC21142) ? "DC21142" :
3924 ((lp->chipset == DC21143) ? "DC21143" : "UNKNOWN"
3925 )))))));
3926 }
3927 if (lp->chipset != DC21041) {
3928 lp->useSROM = true;
3929 }
3930 } else if ((lp->chipset & ~0x00ff) == DC2114x) {
3931 lp->useSROM = true;
3932 }
3933
3934 return status;
3935}
3936
3937
3938
3939
3940
3941
3942
3943
3944
3945static void
3946DevicePresent(struct net_device *dev, u_long aprom_addr)
3947{
3948 int i, j=0;
3949 struct de4x5_private *lp = netdev_priv(dev);
3950
3951 if (lp->chipset == DC21040) {
3952 if (lp->bus == EISA) {
3953 enet_addr_rst(aprom_addr);
3954 } else {
3955 outl(0, aprom_addr);
3956 }
3957 } else {
3958 u_short tmp;
3959 __le16 *p = (__le16 *)((char *)&lp->srom + SROM_HWADD);
3960 for (i=0; i<(ETH_ALEN>>1); i++) {
3961 tmp = srom_rd(aprom_addr, (SROM_HWADD>>1) + i);
3962 j += tmp;
3963 *p = cpu_to_le16(tmp);
3964 }
3965 if (j == 0 || j == 3 * 0xffff) {
3966
3967 return;
3968 }
3969
3970 p = (__le16 *)&lp->srom;
3971 for (i=0; i<(sizeof(struct de4x5_srom)>>1); i++) {
3972 tmp = srom_rd(aprom_addr, i);
3973 *p++ = cpu_to_le16(tmp);
3974 }
3975 de4x5_dbg_srom(&lp->srom);
3976 }
3977}
3978
3979
3980
3981
3982
3983
3984static void
3985enet_addr_rst(u_long aprom_addr)
3986{
3987 union {
3988 struct {
3989 u32 a;
3990 u32 b;
3991 } llsig;
3992 char Sig[sizeof(u32) << 1];
3993 } dev;
3994 short sigLength=0;
3995 s8 data;
3996 int i, j;
3997
3998 dev.llsig.a = ETH_PROM_SIG;
3999 dev.llsig.b = ETH_PROM_SIG;
4000 sigLength = sizeof(u32) << 1;
4001
4002 for (i=0,j=0;j<sigLength && i<PROBE_LENGTH+sigLength-1;i++) {
4003 data = inb(aprom_addr);
4004 if (dev.Sig[j] == data) {
4005 j++;
4006 } else {
4007 if (data == dev.Sig[0]) {
4008 j=1;
4009 } else {
4010 j=0;
4011 }
4012 }
4013 }
4014}
4015
4016
4017
4018
4019
4020
4021
4022static int
4023get_hw_addr(struct net_device *dev)
4024{
4025 u_long iobase = dev->base_addr;
4026 int broken, i, k, tmp, status = 0;
4027 u_short j,chksum;
4028 struct de4x5_private *lp = netdev_priv(dev);
4029
4030 broken = de4x5_bad_srom(lp);
4031
4032 for (i=0,k=0,j=0;j<3;j++) {
4033 k <<= 1;
4034 if (k > 0xffff) k-=0xffff;
4035
4036 if (lp->bus == PCI) {
4037 if (lp->chipset == DC21040) {
4038 while ((tmp = inl(DE4X5_APROM)) < 0);
4039 k += (u_char) tmp;
4040 dev->dev_addr[i++] = (u_char) tmp;
4041 while ((tmp = inl(DE4X5_APROM)) < 0);
4042 k += (u_short) (tmp << 8);
4043 dev->dev_addr[i++] = (u_char) tmp;
4044 } else if (!broken) {
4045 dev->dev_addr[i] = (u_char) lp->srom.ieee_addr[i]; i++;
4046 dev->dev_addr[i] = (u_char) lp->srom.ieee_addr[i]; i++;
4047 } else if ((broken == SMC) || (broken == ACCTON)) {
4048 dev->dev_addr[i] = *((u_char *)&lp->srom + i); i++;
4049 dev->dev_addr[i] = *((u_char *)&lp->srom + i); i++;
4050 }
4051 } else {
4052 k += (u_char) (tmp = inb(EISA_APROM));
4053 dev->dev_addr[i++] = (u_char) tmp;
4054 k += (u_short) ((tmp = inb(EISA_APROM)) << 8);
4055 dev->dev_addr[i++] = (u_char) tmp;
4056 }
4057
4058 if (k > 0xffff) k-=0xffff;
4059 }
4060 if (k == 0xffff) k=0;
4061
4062 if (lp->bus == PCI) {
4063 if (lp->chipset == DC21040) {
4064 while ((tmp = inl(DE4X5_APROM)) < 0);
4065 chksum = (u_char) tmp;
4066 while ((tmp = inl(DE4X5_APROM)) < 0);
4067 chksum |= (u_short) (tmp << 8);
4068 if ((k != chksum) && (dec_only)) status = -1;
4069 }
4070 } else {
4071 chksum = (u_char) inb(EISA_APROM);
4072 chksum |= (u_short) (inb(EISA_APROM) << 8);
4073 if ((k != chksum) && (dec_only)) status = -1;
4074 }
4075
4076
4077 srom_repair(dev, broken);
4078
4079#ifdef CONFIG_PPC_PMAC
4080
4081
4082
4083
4084 if ( machine_is(powermac) &&
4085 (dev->dev_addr[0] == 0) &&
4086 (dev->dev_addr[1] == 0xa0) )
4087 {
4088 for (i = 0; i < ETH_ALEN; ++i)
4089 {
4090 int x = dev->dev_addr[i];
4091 x = ((x & 0xf) << 4) + ((x & 0xf0) >> 4);
4092 x = ((x & 0x33) << 2) + ((x & 0xcc) >> 2);
4093 dev->dev_addr[i] = ((x & 0x55) << 1) + ((x & 0xaa) >> 1);
4094 }
4095 }
4096#endif
4097
4098
4099 status = test_bad_enet(dev, status);
4100
4101 return status;
4102}
4103
4104
4105
4106
4107static int
4108de4x5_bad_srom(struct de4x5_private *lp)
4109{
4110 int i, status = 0;
4111
4112 for (i = 0; i < ARRAY_SIZE(enet_det); i++) {
4113 if (!memcmp(&lp->srom, &enet_det[i], 3) &&
4114 !memcmp((char *)&lp->srom+0x10, &enet_det[i], 3)) {
4115 if (i == 0) {
4116 status = SMC;
4117 } else if (i == 1) {
4118 status = ACCTON;
4119 }
4120 break;
4121 }
4122 }
4123
4124 return status;
4125}
4126
4127static void
4128srom_repair(struct net_device *dev, int card)
4129{
4130 struct de4x5_private *lp = netdev_priv(dev);
4131
4132 switch(card) {
4133 case SMC:
4134 memset((char *)&lp->srom, 0, sizeof(struct de4x5_srom));
4135 memcpy(lp->srom.ieee_addr, (char *)dev->dev_addr, ETH_ALEN);
4136 memcpy(lp->srom.info, (char *)&srom_repair_info[SMC-1], 100);
4137 lp->useSROM = true;
4138 break;
4139 }
4140}
4141
4142
4143
4144
4145
4146static int
4147test_bad_enet(struct net_device *dev, int status)
4148{
4149 struct de4x5_private *lp = netdev_priv(dev);
4150 int i, tmp;
4151
4152 for (tmp=0,i=0; i<ETH_ALEN; i++) tmp += (u_char)dev->dev_addr[i];
4153 if ((tmp == 0) || (tmp == 0x5fa)) {
4154 if ((lp->chipset == last.chipset) &&
4155 (lp->bus_num == last.bus) && (lp->bus_num > 0)) {
4156 for (i=0; i<ETH_ALEN; i++) dev->dev_addr[i] = last.addr[i];
4157 for (i=ETH_ALEN-1; i>2; --i) {
4158 dev->dev_addr[i] += 1;
4159 if (dev->dev_addr[i] != 0) break;
4160 }
4161 for (i=0; i<ETH_ALEN; i++) last.addr[i] = dev->dev_addr[i];
4162 if (!an_exception(lp)) {
4163 dev->irq = last.irq;
4164 }
4165
4166 status = 0;
4167 }
4168 } else if (!status) {
4169 last.chipset = lp->chipset;
4170 last.bus = lp->bus_num;
4171 last.irq = dev->irq;
4172 for (i=0; i<ETH_ALEN; i++) last.addr[i] = dev->dev_addr[i];
4173 }
4174
4175 return status;
4176}
4177
4178
4179
4180
4181static int
4182an_exception(struct de4x5_private *lp)
4183{
4184 if ((*(u_short *)lp->srom.sub_vendor_id == 0x00c0) &&
4185 (*(u_short *)lp->srom.sub_system_id == 0x95e0)) {
4186 return -1;
4187 }
4188
4189 return 0;
4190}
4191
4192
4193
4194
4195static short
4196srom_rd(u_long addr, u_char offset)
4197{
4198 sendto_srom(SROM_RD | SROM_SR, addr);
4199
4200 srom_latch(SROM_RD | SROM_SR | DT_CS, addr);
4201 srom_command(SROM_RD | SROM_SR | DT_IN | DT_CS, addr);
4202 srom_address(SROM_RD | SROM_SR | DT_CS, addr, offset);
4203
4204 return srom_data(SROM_RD | SROM_SR | DT_CS, addr);
4205}
4206
4207static void
4208srom_latch(u_int command, u_long addr)
4209{
4210 sendto_srom(command, addr);
4211 sendto_srom(command | DT_CLK, addr);
4212 sendto_srom(command, addr);
4213}
4214
4215static void
4216srom_command(u_int command, u_long addr)
4217{
4218 srom_latch(command, addr);
4219 srom_latch(command, addr);
4220 srom_latch((command & 0x0000ff00) | DT_CS, addr);
4221}
4222
4223static void
4224srom_address(u_int command, u_long addr, u_char offset)
4225{
4226 int i, a;
4227
4228 a = offset << 2;
4229 for (i=0; i<6; i++, a <<= 1) {
4230 srom_latch(command | ((a & 0x80) ? DT_IN : 0), addr);
4231 }
4232 udelay(1);
4233
4234 i = (getfrom_srom(addr) >> 3) & 0x01;
4235}
4236
4237static short
4238srom_data(u_int command, u_long addr)
4239{
4240 int i;
4241 short word = 0;
4242 s32 tmp;
4243
4244 for (i=0; i<16; i++) {
4245 sendto_srom(command | DT_CLK, addr);
4246 tmp = getfrom_srom(addr);
4247 sendto_srom(command, addr);
4248
4249 word = (word << 1) | ((tmp >> 3) & 0x01);
4250 }
4251
4252 sendto_srom(command & 0x0000ff00, addr);
4253
4254 return word;
4255}
4256
4257
4258
4259
4260
4261
4262
4263
4264
4265
4266
4267
4268
4269
4270
4271static void
4272sendto_srom(u_int command, u_long addr)
4273{
4274 outl(command, addr);
4275 udelay(1);
4276}
4277
4278static int
4279getfrom_srom(u_long addr)
4280{
4281 s32 tmp;
4282
4283 tmp = inl(addr);
4284 udelay(1);
4285
4286 return tmp;
4287}
4288
4289static int
4290srom_infoleaf_info(struct net_device *dev)
4291{
4292 struct de4x5_private *lp = netdev_priv(dev);
4293 int i, count;
4294 u_char *p;
4295
4296
4297 for (i=0; i<INFOLEAF_SIZE; i++) {
4298 if (lp->chipset == infoleaf_array[i].chipset) break;
4299 }
4300 if (i == INFOLEAF_SIZE) {
4301 lp->useSROM = false;
4302 printk("%s: Cannot find correct chipset for SROM decoding!\n",
4303 dev->name);
4304 return -ENXIO;
4305 }
4306
4307 lp->infoleaf_fn = infoleaf_array[i].fn;
4308
4309
4310 count = *((u_char *)&lp->srom + 19);
4311 p = (u_char *)&lp->srom + 26;
4312
4313 if (count > 1) {
4314 for (i=count; i; --i, p+=3) {
4315 if (lp->device == *p) break;
4316 }
4317 if (i == 0) {
4318 lp->useSROM = false;
4319 printk("%s: Cannot find correct PCI device [%d] for SROM decoding!\n",
4320 dev->name, lp->device);
4321 return -ENXIO;
4322 }
4323 }
4324
4325 lp->infoleaf_offset = get_unaligned_le16(p + 1);
4326
4327 return 0;
4328}
4329
4330
4331
4332
4333
4334
4335
4336
4337static void
4338srom_init(struct net_device *dev)
4339{
4340 struct de4x5_private *lp = netdev_priv(dev);
4341 u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset;
4342 u_char count;
4343
4344 p+=2;
4345 if (lp->chipset == DC21140) {
4346 lp->cache.gepc = (*p++ | GEP_CTRL);
4347 gep_wr(lp->cache.gepc, dev);
4348 }
4349
4350
4351 count = *p++;
4352
4353
4354 for (;count; --count) {
4355 if (*p < 128) {
4356 p += COMPACT_LEN;
4357 } else if (*(p+1) == 5) {
4358 type5_infoblock(dev, 1, p);
4359 p += ((*p & BLOCK_LEN) + 1);
4360 } else if (*(p+1) == 4) {
4361 p += ((*p & BLOCK_LEN) + 1);
4362 } else if (*(p+1) == 3) {
4363 type3_infoblock(dev, 1, p);
4364 p += ((*p & BLOCK_LEN) + 1);
4365 } else if (*(p+1) == 2) {
4366 p += ((*p & BLOCK_LEN) + 1);
4367 } else if (*(p+1) == 1) {
4368 type1_infoblock(dev, 1, p);
4369 p += ((*p & BLOCK_LEN) + 1);
4370 } else {
4371 p += ((*p & BLOCK_LEN) + 1);
4372 }
4373 }
4374}
4375
4376
4377
4378
4379
4380static void
4381srom_exec(struct net_device *dev, u_char *p)
4382{
4383 struct de4x5_private *lp = netdev_priv(dev);
4384 u_long iobase = dev->base_addr;
4385 u_char count = (p ? *p++ : 0);
4386 u_short *w = (u_short *)p;
4387
4388 if (((lp->ibn != 1) && (lp->ibn != 3) && (lp->ibn != 5)) || !count) return;
4389
4390 if (lp->chipset != DC21140) RESET_SIA;
4391
4392 while (count--) {
4393 gep_wr(((lp->chipset==DC21140) && (lp->ibn!=5) ?
4394 *p++ : get_unaligned_le16(w++)), dev);
4395 mdelay(2);
4396 }
4397
4398 if (lp->chipset != DC21140) {
4399 outl(lp->cache.csr14, DE4X5_STRR);
4400 outl(lp->cache.csr13, DE4X5_SICR);
4401 }
4402}
4403
4404
4405
4406
4407
4408
4409static int
4410dc21041_infoleaf(struct net_device *dev)
4411{
4412 return DE4X5_AUTOSENSE_MS;
4413}
4414
4415static int
4416dc21140_infoleaf(struct net_device *dev)
4417{
4418 struct de4x5_private *lp = netdev_priv(dev);
4419 u_char count = 0;
4420 u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset;
4421 int next_tick = DE4X5_AUTOSENSE_MS;
4422
4423
4424 p+=2;
4425
4426
4427 lp->cache.gepc = (*p++ | GEP_CTRL);
4428
4429
4430 count = *p++;
4431
4432
4433 if (*p < 128) {
4434 next_tick = dc_infoblock[COMPACT](dev, count, p);
4435 } else {
4436 next_tick = dc_infoblock[*(p+1)](dev, count, p);
4437 }
4438
4439 if (lp->tcount == count) {
4440 lp->media = NC;
4441 if (lp->media != lp->c_media) {
4442 de4x5_dbg_media(dev);
4443 lp->c_media = lp->media;
4444 }
4445 lp->media = INIT;
4446 lp->tcount = 0;
4447 lp->tx_enable = false;
4448 }
4449
4450 return next_tick & ~TIMER_CB;
4451}
4452
4453static int
4454dc21142_infoleaf(struct net_device *dev)
4455{
4456 struct de4x5_private *lp = netdev_priv(dev);
4457 u_char count = 0;
4458 u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset;
4459 int next_tick = DE4X5_AUTOSENSE_MS;
4460
4461
4462 p+=2;
4463
4464
4465 count = *p++;
4466
4467
4468 if (*p < 128) {
4469 next_tick = dc_infoblock[COMPACT](dev, count, p);
4470 } else {
4471 next_tick = dc_infoblock[*(p+1)](dev, count, p);
4472 }
4473
4474 if (lp->tcount == count) {
4475 lp->media = NC;
4476 if (lp->media != lp->c_media) {
4477 de4x5_dbg_media(dev);
4478 lp->c_media = lp->media;
4479 }
4480 lp->media = INIT;
4481 lp->tcount = 0;
4482 lp->tx_enable = false;
4483 }
4484
4485 return next_tick & ~TIMER_CB;
4486}
4487
4488static int
4489dc21143_infoleaf(struct net_device *dev)
4490{
4491 struct de4x5_private *lp = netdev_priv(dev);
4492 u_char count = 0;
4493 u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset;
4494 int next_tick = DE4X5_AUTOSENSE_MS;
4495
4496
4497 p+=2;
4498
4499
4500 count = *p++;
4501
4502
4503 if (*p < 128) {
4504 next_tick = dc_infoblock[COMPACT](dev, count, p);
4505 } else {
4506 next_tick = dc_infoblock[*(p+1)](dev, count, p);
4507 }
4508 if (lp->tcount == count) {
4509 lp->media = NC;
4510 if (lp->media != lp->c_media) {
4511 de4x5_dbg_media(dev);
4512 lp->c_media = lp->media;
4513 }
4514 lp->media = INIT;
4515 lp->tcount = 0;
4516 lp->tx_enable = false;
4517 }
4518
4519 return next_tick & ~TIMER_CB;
4520}
4521
4522
4523
4524
4525
4526static int
4527compact_infoblock(struct net_device *dev, u_char count, u_char *p)
4528{
4529 struct de4x5_private *lp = netdev_priv(dev);
4530 u_char flags, csr6;
4531
4532
4533 if (--count > lp->tcount) {
4534 if (*(p+COMPACT_LEN) < 128) {
4535 return dc_infoblock[COMPACT](dev, count, p+COMPACT_LEN);
4536 } else {
4537 return dc_infoblock[*(p+COMPACT_LEN+1)](dev, count, p+COMPACT_LEN);
4538 }
4539 }
4540
4541 if ((lp->media == INIT) && (lp->timeout < 0)) {
4542 lp->ibn = COMPACT;
4543 lp->active = 0;
4544 gep_wr(lp->cache.gepc, dev);
4545 lp->infoblock_media = (*p++) & COMPACT_MC;
4546 lp->cache.gep = *p++;
4547 csr6 = *p++;
4548 flags = *p++;
4549
4550 lp->asBitValid = (flags & 0x80) ? 0 : -1;
4551 lp->defMedium = (flags & 0x40) ? -1 : 0;
4552 lp->asBit = 1 << ((csr6 >> 1) & 0x07);
4553 lp->asPolarity = ((csr6 & 0x80) ? -1 : 0) & lp->asBit;
4554 lp->infoblock_csr6 = OMR_DEF | ((csr6 & 0x71) << 18);
4555 lp->useMII = false;
4556
4557 de4x5_switch_mac_port(dev);
4558 }
4559
4560 return dc21140m_autoconf(dev);
4561}
4562
4563
4564
4565
4566static int
4567type0_infoblock(struct net_device *dev, u_char count, u_char *p)
4568{
4569 struct de4x5_private *lp = netdev_priv(dev);
4570 u_char flags, csr6, len = (*p & BLOCK_LEN)+1;
4571
4572
4573 if (--count > lp->tcount) {
4574 if (*(p+len) < 128) {
4575 return dc_infoblock[COMPACT](dev, count, p+len);
4576 } else {
4577 return dc_infoblock[*(p+len+1)](dev, count, p+len);
4578 }
4579 }
4580
4581 if ((lp->media == INIT) && (lp->timeout < 0)) {
4582 lp->ibn = 0;
4583 lp->active = 0;
4584 gep_wr(lp->cache.gepc, dev);
4585 p+=2;
4586 lp->infoblock_media = (*p++) & BLOCK0_MC;
4587 lp->cache.gep = *p++;
4588 csr6 = *p++;
4589 flags = *p++;
4590
4591 lp->asBitValid = (flags & 0x80) ? 0 : -1;
4592 lp->defMedium = (flags & 0x40) ? -1 : 0;
4593 lp->asBit = 1 << ((csr6 >> 1) & 0x07);
4594 lp->asPolarity = ((csr6 & 0x80) ? -1 : 0) & lp->asBit;
4595 lp->infoblock_csr6 = OMR_DEF | ((csr6 & 0x71) << 18);
4596 lp->useMII = false;
4597
4598 de4x5_switch_mac_port(dev);
4599 }
4600
4601 return dc21140m_autoconf(dev);
4602}
4603
4604
4605
4606static int
4607type1_infoblock(struct net_device *dev, u_char count, u_char *p)
4608{
4609 struct de4x5_private *lp = netdev_priv(dev);
4610 u_char len = (*p & BLOCK_LEN)+1;
4611
4612
4613 if (--count > lp->tcount) {
4614 if (*(p+len) < 128) {
4615 return dc_infoblock[COMPACT](dev, count, p+len);
4616 } else {
4617 return dc_infoblock[*(p+len+1)](dev, count, p+len);
4618 }
4619 }
4620
4621 p += 2;
4622 if (lp->state == INITIALISED) {
4623 lp->ibn = 1;
4624 lp->active = *p++;
4625 lp->phy[lp->active].gep = (*p ? p : NULL); p += (*p + 1);
4626 lp->phy[lp->active].rst = (*p ? p : NULL); p += (*p + 1);
4627 lp->phy[lp->active].mc = get_unaligned_le16(p); p += 2;
4628 lp->phy[lp->active].ana = get_unaligned_le16(p); p += 2;
4629 lp->phy[lp->active].fdx = get_unaligned_le16(p); p += 2;
4630 lp->phy[lp->active].ttm = get_unaligned_le16(p);
4631 return 0;
4632 } else if ((lp->media == INIT) && (lp->timeout < 0)) {
4633 lp->ibn = 1;
4634 lp->active = *p;
4635 lp->infoblock_csr6 = OMR_MII_100;
4636 lp->useMII = true;
4637 lp->infoblock_media = ANS;
4638
4639 de4x5_switch_mac_port(dev);
4640 }
4641
4642 return dc21140m_autoconf(dev);
4643}
4644
4645static int
4646type2_infoblock(struct net_device *dev, u_char count, u_char *p)
4647{
4648 struct de4x5_private *lp = netdev_priv(dev);
4649 u_char len = (*p & BLOCK_LEN)+1;
4650
4651
4652 if (--count > lp->tcount) {
4653 if (*(p+len) < 128) {
4654 return dc_infoblock[COMPACT](dev, count, p+len);
4655 } else {
4656 return dc_infoblock[*(p+len+1)](dev, count, p+len);
4657 }
4658 }
4659
4660 if ((lp->media == INIT) && (lp->timeout < 0)) {
4661 lp->ibn = 2;
4662 lp->active = 0;
4663 p += 2;
4664 lp->infoblock_media = (*p) & MEDIA_CODE;
4665
4666 if ((*p++) & EXT_FIELD) {
4667 lp->cache.csr13 = get_unaligned_le16(p); p += 2;
4668 lp->cache.csr14 = get_unaligned_le16(p); p += 2;
4669 lp->cache.csr15 = get_unaligned_le16(p); p += 2;
4670 } else {
4671 lp->cache.csr13 = CSR13;
4672 lp->cache.csr14 = CSR14;
4673 lp->cache.csr15 = CSR15;
4674 }
4675 lp->cache.gepc = ((s32)(get_unaligned_le16(p)) << 16); p += 2;
4676 lp->cache.gep = ((s32)(get_unaligned_le16(p)) << 16);
4677 lp->infoblock_csr6 = OMR_SIA;
4678 lp->useMII = false;
4679
4680 de4x5_switch_mac_port(dev);
4681 }
4682
4683 return dc2114x_autoconf(dev);
4684}
4685
4686static int
4687type3_infoblock(struct net_device *dev, u_char count, u_char *p)
4688{
4689 struct de4x5_private *lp = netdev_priv(dev);
4690 u_char len = (*p & BLOCK_LEN)+1;
4691
4692
4693 if (--count > lp->tcount) {
4694 if (*(p+len) < 128) {
4695 return dc_infoblock[COMPACT](dev, count, p+len);
4696 } else {
4697 return dc_infoblock[*(p+len+1)](dev, count, p+len);
4698 }
4699 }
4700
4701 p += 2;
4702 if (lp->state == INITIALISED) {
4703 lp->ibn = 3;
4704 lp->active = *p++;
4705 if (MOTO_SROM_BUG) lp->active = 0;
4706 lp->phy[lp->active].gep = (*p ? p : NULL); p += (2 * (*p) + 1);
4707 lp->phy[lp->active].rst = (*p ? p : NULL); p += (2 * (*p) + 1);
4708 lp->phy[lp->active].mc = get_unaligned_le16(p); p += 2;
4709 lp->phy[lp->active].ana = get_unaligned_le16(p); p += 2;
4710 lp->phy[lp->active].fdx = get_unaligned_le16(p); p += 2;
4711 lp->phy[lp->active].ttm = get_unaligned_le16(p); p += 2;
4712 lp->phy[lp->active].mci = *p;
4713 return 0;
4714 } else if ((lp->media == INIT) && (lp->timeout < 0)) {
4715 lp->ibn = 3;
4716 lp->active = *p;
4717 if (MOTO_SROM_BUG) lp->active = 0;
4718 lp->infoblock_csr6 = OMR_MII_100;
4719 lp->useMII = true;
4720 lp->infoblock_media = ANS;
4721
4722 de4x5_switch_mac_port(dev);
4723 }
4724
4725 return dc2114x_autoconf(dev);
4726}
4727
4728static int
4729type4_infoblock(struct net_device *dev, u_char count, u_char *p)
4730{
4731 struct de4x5_private *lp = netdev_priv(dev);
4732 u_char flags, csr6, len = (*p & BLOCK_LEN)+1;
4733
4734
4735 if (--count > lp->tcount) {
4736 if (*(p+len) < 128) {
4737 return dc_infoblock[COMPACT](dev, count, p+len);
4738 } else {
4739 return dc_infoblock[*(p+len+1)](dev, count, p+len);
4740 }
4741 }
4742
4743 if ((lp->media == INIT) && (lp->timeout < 0)) {
4744 lp->ibn = 4;
4745 lp->active = 0;
4746 p+=2;
4747 lp->infoblock_media = (*p++) & MEDIA_CODE;
4748 lp->cache.csr13 = CSR13;
4749 lp->cache.csr14 = CSR14;
4750 lp->cache.csr15 = CSR15;
4751 lp->cache.gepc = ((s32)(get_unaligned_le16(p)) << 16); p += 2;
4752 lp->cache.gep = ((s32)(get_unaligned_le16(p)) << 16); p += 2;
4753 csr6 = *p++;
4754 flags = *p++;
4755
4756 lp->asBitValid = (flags & 0x80) ? 0 : -1;
4757 lp->defMedium = (flags & 0x40) ? -1 : 0;
4758 lp->asBit = 1 << ((csr6 >> 1) & 0x07);
4759 lp->asPolarity = ((csr6 & 0x80) ? -1 : 0) & lp->asBit;
4760 lp->infoblock_csr6 = OMR_DEF | ((csr6 & 0x71) << 18);
4761 lp->useMII = false;
4762
4763 de4x5_switch_mac_port(dev);
4764 }
4765
4766 return dc2114x_autoconf(dev);
4767}
4768
4769
4770
4771
4772
4773static int
4774type5_infoblock(struct net_device *dev, u_char count, u_char *p)
4775{
4776 struct de4x5_private *lp = netdev_priv(dev);
4777 u_char len = (*p & BLOCK_LEN)+1;
4778
4779
4780 if (--count > lp->tcount) {
4781 if (*(p+len) < 128) {
4782 return dc_infoblock[COMPACT](dev, count, p+len);
4783 } else {
4784 return dc_infoblock[*(p+len+1)](dev, count, p+len);
4785 }
4786 }
4787
4788
4789 if ((lp->state == INITIALISED) || (lp->media == INIT)) {
4790 p+=2;
4791 lp->rst = p;
4792 srom_exec(dev, lp->rst);
4793 }
4794
4795 return DE4X5_AUTOSENSE_MS;
4796}
4797
4798
4799
4800
4801
4802static int
4803mii_rd(u_char phyreg, u_char phyaddr, u_long ioaddr)
4804{
4805 mii_wdata(MII_PREAMBLE, 2, ioaddr);
4806 mii_wdata(MII_PREAMBLE, 32, ioaddr);
4807 mii_wdata(MII_STRD, 4, ioaddr);
4808 mii_address(phyaddr, ioaddr);
4809 mii_address(phyreg, ioaddr);
4810 mii_ta(MII_STRD, ioaddr);
4811
4812 return mii_rdata(ioaddr);
4813}
4814
4815static void
4816mii_wr(int data, u_char phyreg, u_char phyaddr, u_long ioaddr)
4817{
4818 mii_wdata(MII_PREAMBLE, 2, ioaddr);
4819 mii_wdata(MII_PREAMBLE, 32, ioaddr);
4820 mii_wdata(MII_STWR, 4, ioaddr);
4821 mii_address(phyaddr, ioaddr);
4822 mii_address(phyreg, ioaddr);
4823 mii_ta(MII_STWR, ioaddr);
4824 data = mii_swap(data, 16);
4825 mii_wdata(data, 16, ioaddr);
4826}
4827
4828static int
4829mii_rdata(u_long ioaddr)
4830{
4831 int i;
4832 s32 tmp = 0;
4833
4834 for (i=0; i<16; i++) {
4835 tmp <<= 1;
4836 tmp |= getfrom_mii(MII_MRD | MII_RD, ioaddr);
4837 }
4838
4839 return tmp;
4840}
4841
4842static void
4843mii_wdata(int data, int len, u_long ioaddr)
4844{
4845 int i;
4846
4847 for (i=0; i<len; i++) {
4848 sendto_mii(MII_MWR | MII_WR, data, ioaddr);
4849 data >>= 1;
4850 }
4851}
4852
4853static void
4854mii_address(u_char addr, u_long ioaddr)
4855{
4856 int i;
4857
4858 addr = mii_swap(addr, 5);
4859 for (i=0; i<5; i++) {
4860 sendto_mii(MII_MWR | MII_WR, addr, ioaddr);
4861 addr >>= 1;
4862 }
4863}
4864
4865static void
4866mii_ta(u_long rw, u_long ioaddr)
4867{
4868 if (rw == MII_STWR) {
4869 sendto_mii(MII_MWR | MII_WR, 1, ioaddr);
4870 sendto_mii(MII_MWR | MII_WR, 0, ioaddr);
4871 } else {
4872 getfrom_mii(MII_MRD | MII_RD, ioaddr);
4873 }
4874}
4875
4876static int
4877mii_swap(int data, int len)
4878{
4879 int i, tmp = 0;
4880
4881 for (i=0; i<len; i++) {
4882 tmp <<= 1;
4883 tmp |= (data & 1);
4884 data >>= 1;
4885 }
4886
4887 return tmp;
4888}
4889
4890static void
4891sendto_mii(u32 command, int data, u_long ioaddr)
4892{
4893 u32 j;
4894
4895 j = (data & 1) << 17;
4896 outl(command | j, ioaddr);
4897 udelay(1);
4898 outl(command | MII_MDC | j, ioaddr);
4899 udelay(1);
4900}
4901
4902static int
4903getfrom_mii(u32 command, u_long ioaddr)
4904{
4905 outl(command, ioaddr);
4906 udelay(1);
4907 outl(command | MII_MDC, ioaddr);
4908 udelay(1);
4909
4910 return (inl(ioaddr) >> 19) & 1;
4911}
4912
4913
4914
4915
4916static int
4917mii_get_oui(u_char phyaddr, u_long ioaddr)
4918{
4919
4920
4921
4922
4923
4924
4925 int r2, r3;
4926
4927
4928 r2 = mii_rd(MII_ID0, phyaddr, ioaddr);
4929 r3 = mii_rd(MII_ID1, phyaddr, ioaddr);
4930
4931
4932
4933
4934
4935
4936
4937
4938
4939
4940
4941
4942
4943
4944
4945
4946
4947
4948
4949
4950
4951
4952
4953
4954
4955
4956
4957 return r2;
4958}
4959
4960
4961
4962
4963static int
4964mii_get_phy(struct net_device *dev)
4965{
4966 struct de4x5_private *lp = netdev_priv(dev);
4967 u_long iobase = dev->base_addr;
4968 int i, j, k, n, limit=ARRAY_SIZE(phy_info);
4969 int id;
4970
4971 lp->active = 0;
4972 lp->useMII = true;
4973
4974
4975 for (n=0, lp->mii_cnt=0, i=1; !((i==1) && (n==1)); i=(i+1)%DE4X5_MAX_MII) {
4976 lp->phy[lp->active].addr = i;
4977 if (i==0) n++;
4978 while (de4x5_reset_phy(dev)<0) udelay(100);
4979 id = mii_get_oui(i, DE4X5_MII);
4980 if ((id == 0) || (id == 65535)) continue;
4981 for (j=0; j<limit; j++) {
4982 if (id != phy_info[j].id) continue;
4983 for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++);
4984 if (k < DE4X5_MAX_PHY) {
4985 memcpy((char *)&lp->phy[k],
4986 (char *)&phy_info[j], sizeof(struct phy_table));
4987 lp->phy[k].addr = i;
4988 lp->mii_cnt++;
4989 lp->active++;
4990 } else {
4991 goto purgatory;
4992 }
4993 break;
4994 }
4995 if ((j == limit) && (i < DE4X5_MAX_MII)) {
4996 for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++);
4997 lp->phy[k].addr = i;
4998 lp->phy[k].id = id;
4999 lp->phy[k].spd.reg = GENERIC_REG;
5000 lp->phy[k].spd.mask = GENERIC_MASK;
5001 lp->phy[k].spd.value = GENERIC_VALUE;
5002 lp->mii_cnt++;
5003 lp->active++;
5004 printk("%s: Using generic MII device control. If the board doesn't operate,\nplease mail the following dump to the author:\n", dev->name);
5005 j = de4x5_debug;
5006 de4x5_debug |= DEBUG_MII;
5007 de4x5_dbg_mii(dev, k);
5008 de4x5_debug = j;
5009 printk("\n");
5010 }
5011 }
5012 purgatory:
5013 lp->active = 0;
5014 if (lp->phy[0].id) {
5015 for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++) {
5016 mii_wr(MII_CR_RST, MII_CR, lp->phy[k].addr, DE4X5_MII);
5017 while (mii_rd(MII_CR, lp->phy[k].addr, DE4X5_MII) & MII_CR_RST);
5018
5019 de4x5_dbg_mii(dev, k);
5020 }
5021 }
5022 if (!lp->mii_cnt) lp->useMII = false;
5023
5024 return lp->mii_cnt;
5025}
5026
5027static char *
5028build_setup_frame(struct net_device *dev, int mode)
5029{
5030 struct de4x5_private *lp = netdev_priv(dev);
5031 int i;
5032 char *pa = lp->setup_frame;
5033
5034
5035 if (mode == ALL) {
5036 memset(lp->setup_frame, 0, SETUP_FRAME_LEN);
5037 }
5038
5039 if (lp->setup_f == HASH_PERF) {
5040 for (pa=lp->setup_frame+IMPERF_PA_OFFSET, i=0; i<ETH_ALEN; i++) {
5041 *(pa + i) = dev->dev_addr[i];
5042 if (i & 0x01) pa += 2;
5043 }
5044 *(lp->setup_frame + (DE4X5_HASH_TABLE_LEN >> 3) - 3) = 0x80;
5045 } else {
5046 for (i=0; i<ETH_ALEN; i++) {
5047 *(pa + (i&1)) = dev->dev_addr[i];
5048 if (i & 0x01) pa += 4;
5049 }
5050 for (i=0; i<ETH_ALEN; i++) {
5051 *(pa + (i&1)) = (char) 0xff;
5052 if (i & 0x01) pa += 4;
5053 }
5054 }
5055
5056 return pa;
5057}
5058
5059static void
5060disable_ast(struct net_device *dev)
5061{
5062 struct de4x5_private *lp = netdev_priv(dev);
5063 del_timer_sync(&lp->timer);
5064}
5065
5066static long
5067de4x5_switch_mac_port(struct net_device *dev)
5068{
5069 struct de4x5_private *lp = netdev_priv(dev);
5070 u_long iobase = dev->base_addr;
5071 s32 omr;
5072
5073 STOP_DE4X5;
5074
5075
5076 omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR |
5077 OMR_FDX));
5078 omr |= lp->infoblock_csr6;
5079 if (omr & OMR_PS) omr |= OMR_HBD;
5080 outl(omr, DE4X5_OMR);
5081
5082
5083 RESET_DE4X5;
5084
5085
5086 if (lp->chipset == DC21140) {
5087 gep_wr(lp->cache.gepc, dev);
5088 gep_wr(lp->cache.gep, dev);
5089 } else if ((lp->chipset & ~0x0ff) == DC2114x) {
5090 reset_init_sia(dev, lp->cache.csr13, lp->cache.csr14, lp->cache.csr15);
5091 }
5092
5093
5094 outl(omr, DE4X5_OMR);
5095
5096
5097 inl(DE4X5_MFC);
5098
5099 return omr;
5100}
5101
5102static void
5103gep_wr(s32 data, struct net_device *dev)
5104{
5105 struct de4x5_private *lp = netdev_priv(dev);
5106 u_long iobase = dev->base_addr;
5107
5108 if (lp->chipset == DC21140) {
5109 outl(data, DE4X5_GEP);
5110 } else if ((lp->chipset & ~0x00ff) == DC2114x) {
5111 outl((data<<16) | lp->cache.csr15, DE4X5_SIGR);
5112 }
5113}
5114
5115static int
5116gep_rd(struct net_device *dev)
5117{
5118 struct de4x5_private *lp = netdev_priv(dev);
5119 u_long iobase = dev->base_addr;
5120
5121 if (lp->chipset == DC21140) {
5122 return inl(DE4X5_GEP);
5123 } else if ((lp->chipset & ~0x00ff) == DC2114x) {
5124 return inl(DE4X5_SIGR) & 0x000fffff;
5125 }
5126
5127 return 0;
5128}
5129
5130static void
5131yawn(struct net_device *dev, int state)
5132{
5133 struct de4x5_private *lp = netdev_priv(dev);
5134 u_long iobase = dev->base_addr;
5135
5136 if ((lp->chipset == DC21040) || (lp->chipset == DC21140)) return;
5137
5138 if(lp->bus == EISA) {
5139 switch(state) {
5140 case WAKEUP:
5141 outb(WAKEUP, PCI_CFPM);
5142 mdelay(10);
5143 break;
5144
5145 case SNOOZE:
5146 outb(SNOOZE, PCI_CFPM);
5147 break;
5148
5149 case SLEEP:
5150 outl(0, DE4X5_SICR);
5151 outb(SLEEP, PCI_CFPM);
5152 break;
5153 }
5154 } else {
5155 struct pci_dev *pdev = to_pci_dev (lp->gendev);
5156 switch(state) {
5157 case WAKEUP:
5158 pci_write_config_byte(pdev, PCI_CFDA_PSM, WAKEUP);
5159 mdelay(10);
5160 break;
5161
5162 case SNOOZE:
5163 pci_write_config_byte(pdev, PCI_CFDA_PSM, SNOOZE);
5164 break;
5165
5166 case SLEEP:
5167 outl(0, DE4X5_SICR);
5168 pci_write_config_byte(pdev, PCI_CFDA_PSM, SLEEP);
5169 break;
5170 }
5171 }
5172}
5173
5174static void
5175de4x5_parse_params(struct net_device *dev)
5176{
5177 struct de4x5_private *lp = netdev_priv(dev);
5178 char *p, *q, t;
5179
5180 lp->params.fdx = false;
5181 lp->params.autosense = AUTO;
5182
5183 if (args == NULL) return;
5184
5185 if ((p = strstr(args, dev->name))) {
5186 if (!(q = strstr(p+strlen(dev->name), "eth"))) q = p + strlen(p);
5187 t = *q;
5188 *q = '\0';
5189
5190 if (strstr(p, "fdx") || strstr(p, "FDX")) lp->params.fdx = true;
5191
5192 if (strstr(p, "autosense") || strstr(p, "AUTOSENSE")) {
5193 if (strstr(p, "TP_NW")) {
5194 lp->params.autosense = TP_NW;
5195 } else if (strstr(p, "TP")) {
5196 lp->params.autosense = TP;
5197 } else if (strstr(p, "BNC_AUI")) {
5198 lp->params.autosense = BNC;
5199 } else if (strstr(p, "BNC")) {
5200 lp->params.autosense = BNC;
5201 } else if (strstr(p, "AUI")) {
5202 lp->params.autosense = AUI;
5203 } else if (strstr(p, "10Mb")) {
5204 lp->params.autosense = _10Mb;
5205 } else if (strstr(p, "100Mb")) {
5206 lp->params.autosense = _100Mb;
5207 } else if (strstr(p, "AUTO")) {
5208 lp->params.autosense = AUTO;
5209 }
5210 }
5211 *q = t;
5212 }
5213}
5214
5215static void
5216de4x5_dbg_open(struct net_device *dev)
5217{
5218 struct de4x5_private *lp = netdev_priv(dev);
5219 int i;
5220
5221 if (de4x5_debug & DEBUG_OPEN) {
5222 printk("%s: de4x5 opening with irq %d\n",dev->name,dev->irq);
5223 printk("\tphysical address: %pM\n", dev->dev_addr);
5224 printk("Descriptor head addresses:\n");
5225 printk("\t0x%8.8lx 0x%8.8lx\n",(u_long)lp->rx_ring,(u_long)lp->tx_ring);
5226 printk("Descriptor addresses:\nRX: ");
5227 for (i=0;i<lp->rxRingSize-1;i++){
5228 if (i < 3) {
5229 printk("0x%8.8lx ",(u_long)&lp->rx_ring[i].status);
5230 }
5231 }
5232 printk("...0x%8.8lx\n",(u_long)&lp->rx_ring[i].status);
5233 printk("TX: ");
5234 for (i=0;i<lp->txRingSize-1;i++){
5235 if (i < 3) {
5236 printk("0x%8.8lx ", (u_long)&lp->tx_ring[i].status);
5237 }
5238 }
5239 printk("...0x%8.8lx\n", (u_long)&lp->tx_ring[i].status);
5240 printk("Descriptor buffers:\nRX: ");
5241 for (i=0;i<lp->rxRingSize-1;i++){
5242 if (i < 3) {
5243 printk("0x%8.8x ",le32_to_cpu(lp->rx_ring[i].buf));
5244 }
5245 }
5246 printk("...0x%8.8x\n",le32_to_cpu(lp->rx_ring[i].buf));
5247 printk("TX: ");
5248 for (i=0;i<lp->txRingSize-1;i++){
5249 if (i < 3) {
5250 printk("0x%8.8x ", le32_to_cpu(lp->tx_ring[i].buf));
5251 }
5252 }
5253 printk("...0x%8.8x\n", le32_to_cpu(lp->tx_ring[i].buf));
5254 printk("Ring size:\nRX: %d\nTX: %d\n",
5255 (short)lp->rxRingSize,
5256 (short)lp->txRingSize);
5257 }
5258}
5259
5260static void
5261de4x5_dbg_mii(struct net_device *dev, int k)
5262{
5263 struct de4x5_private *lp = netdev_priv(dev);
5264 u_long iobase = dev->base_addr;
5265
5266 if (de4x5_debug & DEBUG_MII) {
5267 printk("\nMII device address: %d\n", lp->phy[k].addr);
5268 printk("MII CR: %x\n",mii_rd(MII_CR,lp->phy[k].addr,DE4X5_MII));
5269 printk("MII SR: %x\n",mii_rd(MII_SR,lp->phy[k].addr,DE4X5_MII));
5270 printk("MII ID0: %x\n",mii_rd(MII_ID0,lp->phy[k].addr,DE4X5_MII));
5271 printk("MII ID1: %x\n",mii_rd(MII_ID1,lp->phy[k].addr,DE4X5_MII));
5272 if (lp->phy[k].id != BROADCOM_T4) {
5273 printk("MII ANA: %x\n",mii_rd(0x04,lp->phy[k].addr,DE4X5_MII));
5274 printk("MII ANC: %x\n",mii_rd(0x05,lp->phy[k].addr,DE4X5_MII));
5275 }
5276 printk("MII 16: %x\n",mii_rd(0x10,lp->phy[k].addr,DE4X5_MII));
5277 if (lp->phy[k].id != BROADCOM_T4) {
5278 printk("MII 17: %x\n",mii_rd(0x11,lp->phy[k].addr,DE4X5_MII));
5279 printk("MII 18: %x\n",mii_rd(0x12,lp->phy[k].addr,DE4X5_MII));
5280 } else {
5281 printk("MII 20: %x\n",mii_rd(0x14,lp->phy[k].addr,DE4X5_MII));
5282 }
5283 }
5284}
5285
5286static void
5287de4x5_dbg_media(struct net_device *dev)
5288{
5289 struct de4x5_private *lp = netdev_priv(dev);
5290
5291 if (lp->media != lp->c_media) {
5292 if (de4x5_debug & DEBUG_MEDIA) {
5293 printk("%s: media is %s%s\n", dev->name,
5294 (lp->media == NC ? "unconnected, link down or incompatible connection" :
5295 (lp->media == TP ? "TP" :
5296 (lp->media == ANS ? "TP/Nway" :
5297 (lp->media == BNC ? "BNC" :
5298 (lp->media == AUI ? "AUI" :
5299 (lp->media == BNC_AUI ? "BNC/AUI" :
5300 (lp->media == EXT_SIA ? "EXT SIA" :
5301 (lp->media == _100Mb ? "100Mb/s" :
5302 (lp->media == _10Mb ? "10Mb/s" :
5303 "???"
5304 ))))))))), (lp->fdx?" full duplex.":"."));
5305 }
5306 lp->c_media = lp->media;
5307 }
5308}
5309
5310static void
5311de4x5_dbg_srom(struct de4x5_srom *p)
5312{
5313 int i;
5314
5315 if (de4x5_debug & DEBUG_SROM) {
5316 printk("Sub-system Vendor ID: %04x\n", *((u_short *)p->sub_vendor_id));
5317 printk("Sub-system ID: %04x\n", *((u_short *)p->sub_system_id));
5318 printk("ID Block CRC: %02x\n", (u_char)(p->id_block_crc));
5319 printk("SROM version: %02x\n", (u_char)(p->version));
5320 printk("# controllers: %02x\n", (u_char)(p->num_controllers));
5321
5322 printk("Hardware Address: %pM\n", p->ieee_addr);
5323 printk("CRC checksum: %04x\n", (u_short)(p->chksum));
5324 for (i=0; i<64; i++) {
5325 printk("%3d %04x\n", i<<1, (u_short)*((u_short *)p+i));
5326 }
5327 }
5328}
5329
5330static void
5331de4x5_dbg_rx(struct sk_buff *skb, int len)
5332{
5333 int i, j;
5334
5335 if (de4x5_debug & DEBUG_RX) {
5336 printk("R: %pM <- %pM len/SAP:%02x%02x [%d]\n",
5337 skb->data, &skb->data[6],
5338 (u_char)skb->data[12],
5339 (u_char)skb->data[13],
5340 len);
5341 for (j=0; len>0;j+=16, len-=16) {
5342 printk(" %03x: ",j);
5343 for (i=0; i<16 && i<len; i++) {
5344 printk("%02x ",(u_char)skb->data[i+j]);
5345 }
5346 printk("\n");
5347 }
5348 }
5349}
5350
5351
5352
5353
5354
5355
5356static int
5357de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
5358{
5359 struct de4x5_private *lp = netdev_priv(dev);
5360 struct de4x5_ioctl *ioc = (struct de4x5_ioctl *) &rq->ifr_ifru;
5361 u_long iobase = dev->base_addr;
5362 int i, j, status = 0;
5363 s32 omr;
5364 union {
5365 u8 addr[144];
5366 u16 sval[72];
5367 u32 lval[36];
5368 } tmp;
5369 u_long flags = 0;
5370
5371 switch(ioc->cmd) {
5372 case DE4X5_GET_HWADDR:
5373 ioc->len = ETH_ALEN;
5374 for (i=0; i<ETH_ALEN; i++) {
5375 tmp.addr[i] = dev->dev_addr[i];
5376 }
5377 if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
5378 break;
5379
5380 case DE4X5_SET_HWADDR:
5381 if (!capable(CAP_NET_ADMIN)) return -EPERM;
5382 if (copy_from_user(tmp.addr, ioc->data, ETH_ALEN)) return -EFAULT;
5383 if (netif_queue_stopped(dev))
5384 return -EBUSY;
5385 netif_stop_queue(dev);
5386 for (i=0; i<ETH_ALEN; i++) {
5387 dev->dev_addr[i] = tmp.addr[i];
5388 }
5389 build_setup_frame(dev, PHYS_ADDR_ONLY);
5390
5391 load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET |
5392 SETUP_FRAME_LEN, (struct sk_buff *)1);
5393 lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
5394 outl(POLL_DEMAND, DE4X5_TPD);
5395 netif_wake_queue(dev);
5396 break;
5397
5398 case DE4X5_SAY_BOO:
5399 if (!capable(CAP_NET_ADMIN)) return -EPERM;
5400 printk("%s: Boo!\n", dev->name);
5401 break;
5402
5403 case DE4X5_MCA_EN:
5404 if (!capable(CAP_NET_ADMIN)) return -EPERM;
5405 omr = inl(DE4X5_OMR);
5406 omr |= OMR_PM;
5407 outl(omr, DE4X5_OMR);
5408 break;
5409
5410 case DE4X5_GET_STATS:
5411 {
5412 struct pkt_stats statbuf;
5413 ioc->len = sizeof(statbuf);
5414 spin_lock_irqsave(&lp->lock, flags);
5415 memcpy(&statbuf, &lp->pktStats, ioc->len);
5416 spin_unlock_irqrestore(&lp->lock, flags);
5417 if (copy_to_user(ioc->data, &statbuf, ioc->len))
5418 return -EFAULT;
5419 break;
5420 }
5421 case DE4X5_CLR_STATS:
5422 if (!capable(CAP_NET_ADMIN)) return -EPERM;
5423 spin_lock_irqsave(&lp->lock, flags);
5424 memset(&lp->pktStats, 0, sizeof(lp->pktStats));
5425 spin_unlock_irqrestore(&lp->lock, flags);
5426 break;
5427
5428 case DE4X5_GET_OMR:
5429 tmp.addr[0] = inl(DE4X5_OMR);
5430 if (copy_to_user(ioc->data, tmp.addr, 1)) return -EFAULT;
5431 break;
5432
5433 case DE4X5_SET_OMR:
5434 if (!capable(CAP_NET_ADMIN)) return -EPERM;
5435 if (copy_from_user(tmp.addr, ioc->data, 1)) return -EFAULT;
5436 outl(tmp.addr[0], DE4X5_OMR);
5437 break;
5438
5439 case DE4X5_GET_REG:
5440 j = 0;
5441 tmp.lval[0] = inl(DE4X5_STS); j+=4;
5442 tmp.lval[1] = inl(DE4X5_BMR); j+=4;
5443 tmp.lval[2] = inl(DE4X5_IMR); j+=4;
5444 tmp.lval[3] = inl(DE4X5_OMR); j+=4;
5445 tmp.lval[4] = inl(DE4X5_SISR); j+=4;
5446 tmp.lval[5] = inl(DE4X5_SICR); j+=4;
5447 tmp.lval[6] = inl(DE4X5_STRR); j+=4;
5448 tmp.lval[7] = inl(DE4X5_SIGR); j+=4;
5449 ioc->len = j;
5450 if (copy_to_user(ioc->data, tmp.lval, ioc->len))
5451 return -EFAULT;
5452 break;
5453
5454#define DE4X5_DUMP 0x0f
5455
5456
5457
5458
5459
5460
5461
5462
5463
5464
5465
5466
5467
5468
5469
5470
5471
5472
5473
5474
5475
5476
5477
5478
5479
5480
5481
5482
5483
5484
5485
5486
5487
5488
5489
5490
5491
5492
5493
5494
5495
5496
5497
5498
5499
5500
5501
5502
5503
5504
5505
5506
5507
5508
5509
5510
5511
5512
5513
5514
5515
5516
5517
5518
5519
5520
5521
5522
5523
5524
5525
5526
5527
5528
5529
5530
5531
5532
5533
5534
5535
5536
5537
5538
5539
5540
5541
5542
5543
5544 default:
5545 return -EOPNOTSUPP;
5546 }
5547
5548 return status;
5549}
5550
5551static int __init de4x5_module_init (void)
5552{
5553 int err = 0;
5554
5555#ifdef CONFIG_PCI
5556 err = pci_register_driver(&de4x5_pci_driver);
5557#endif
5558#ifdef CONFIG_EISA
5559 err |= eisa_driver_register (&de4x5_eisa_driver);
5560#endif
5561
5562 return err;
5563}
5564
5565static void __exit de4x5_module_exit (void)
5566{
5567#ifdef CONFIG_PCI
5568 pci_unregister_driver (&de4x5_pci_driver);
5569#endif
5570#ifdef CONFIG_EISA
5571 eisa_driver_unregister (&de4x5_eisa_driver);
5572#endif
5573}
5574
5575module_init (de4x5_module_init);
5576module_exit (de4x5_module_exit);
5577