1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446#include <linux/module.h>
447#include <linux/kernel.h>
448#include <linux/string.h>
449#include <linux/interrupt.h>
450#include <linux/ptrace.h>
451#include <linux/errno.h>
452#include <linux/ioport.h>
453#include <linux/pci.h>
454#include <linux/eisa.h>
455#include <linux/delay.h>
456#include <linux/init.h>
457#include <linux/spinlock.h>
458#include <linux/crc32.h>
459#include <linux/netdevice.h>
460#include <linux/etherdevice.h>
461#include <linux/skbuff.h>
462#include <linux/time.h>
463#include <linux/types.h>
464#include <linux/unistd.h>
465#include <linux/ctype.h>
466#include <linux/dma-mapping.h>
467#include <linux/moduleparam.h>
468#include <linux/bitops.h>
469#include <linux/gfp.h>
470
471#include <asm/io.h>
472#include <asm/dma.h>
473#include <asm/byteorder.h>
474#include <asm/unaligned.h>
475#include <linux/uaccess.h>
476#ifdef CONFIG_PPC_PMAC
477#include <asm/machdep.h>
478#endif
479
480#include "de4x5.h"
481
482static const char version[] =
483 KERN_INFO "de4x5.c:V0.546 2001/02/22 davies@maniac.ultranet.com\n";
484
485#define c_char const char
486
487
488
489
490struct phy_table {
491 int reset;
492 int id;
493 int ta;
494 struct {
495 int reg;
496 int mask;
497 int value;
498 } spd;
499};
500
501struct mii_phy {
502 int reset;
503 int id;
504 int ta;
505 struct {
506 int reg;
507 int mask;
508 int value;
509 } spd;
510 int addr;
511 u_char *gep;
512 u_char *rst;
513 u_int mc;
514 u_int ana;
515 u_int fdx;
516 u_int ttm;
517 u_int mci;
518};
519
520#define DE4X5_MAX_PHY 8
521
522struct sia_phy {
523 u_char mc;
524 u_char ext;
525 int csr13;
526 int csr14;
527 int csr15;
528 int gepc;
529 int gep;
530};
531
532
533
534
535
536static struct phy_table phy_info[] = {
537 {0, NATIONAL_TX, 1, {0x19, 0x40, 0x00}},
538 {1, BROADCOM_T4, 1, {0x10, 0x02, 0x02}},
539 {0, SEEQ_T4 , 1, {0x12, 0x10, 0x10}},
540 {0, CYPRESS_T4 , 1, {0x05, 0x20, 0x20}},
541 {0, 0x7810 , 1, {0x14, 0x0800, 0x0800}}
542};
543
544
545
546
547
548
549#define GENERIC_REG 0x05
550#define GENERIC_MASK MII_ANLPA_100M
551#define GENERIC_VALUE MII_ANLPA_100M
552
553
554
555
556static c_char enet_det[][ETH_ALEN] = {
557 {0x00, 0x00, 0xc0, 0x00, 0x00, 0x00},
558 {0x00, 0x00, 0xe8, 0x00, 0x00, 0x00}
559};
560
561#define SMC 1
562#define ACCTON 2
563
564
565
566
567
568
569static c_char srom_repair_info[][100] = {
570 {0x00,0x1e,0x00,0x00,0x00,0x08,
571 0x1f,0x01,0x8f,0x01,0x00,0x01,0x00,0x02,
572 0x01,0x00,0x00,0x78,0xe0,0x01,0x00,0x50,
573 0x00,0x18,}
574};
575
576
577#ifdef DE4X5_DEBUG
578static int de4x5_debug = DE4X5_DEBUG;
579#else
580
581static int de4x5_debug = (DEBUG_MEDIA | DEBUG_VERSION);
582#endif
583
584
585
586
587
588
589
590
591
592
593#ifdef DE4X5_PARM
594static char *args = DE4X5_PARM;
595#else
596static char *args;
597#endif
598
599struct parameters {
600 bool fdx;
601 int autosense;
602};
603
604#define DE4X5_AUTOSENSE_MS 250
605
606#define DE4X5_NDA 0xffe0
607
608
609
610
611#define PROBE_LENGTH 32
612#define ETH_PROM_SIG 0xAA5500FFUL
613
614
615
616
617#define PKT_BUF_SZ 1536
618#define IEEE802_3_SZ 1518
619#define MAX_PKT_SZ 1514
620#define MAX_DAT_SZ 1500
621#define MIN_DAT_SZ 1
622#define PKT_HDR_LEN 14
623#define FAKE_FRAME_LEN (MAX_PKT_SZ + 1)
624#define QUEUE_PKT_TIMEOUT (3*HZ)
625
626
627
628
629
630#define DE4X5_EISA_IO_PORTS 0x0c00
631#define DE4X5_EISA_TOTAL_SIZE 0x100
632
633#define EISA_ALLOWED_IRQ_LIST {5, 9, 10, 11}
634
635#define DE4X5_SIGNATURE {"DE425","DE434","DE435","DE450","DE500"}
636#define DE4X5_NAME_LENGTH 8
637
638static c_char *de4x5_signatures[] = DE4X5_SIGNATURE;
639
640
641
642
643#define PROBE_LENGTH 32
644#define ETH_PROM_SIG 0xAA5500FFUL
645
646
647
648
649#define PCI_MAX_BUS_NUM 8
650#define DE4X5_PCI_TOTAL_SIZE 0x80
651#define DE4X5_CLASS_CODE 0x00020000
652
653
654
655
656
657
658
659#define DE4X5_ALIGN4 ((u_long)4 - 1)
660#define DE4X5_ALIGN8 ((u_long)8 - 1)
661#define DE4X5_ALIGN16 ((u_long)16 - 1)
662#define DE4X5_ALIGN32 ((u_long)32 - 1)
663#define DE4X5_ALIGN64 ((u_long)64 - 1)
664#define DE4X5_ALIGN128 ((u_long)128 - 1)
665
666#define DE4X5_ALIGN DE4X5_ALIGN32
667#define DE4X5_CACHE_ALIGN CAL_16LONG
668#define DESC_SKIP_LEN DSL_0
669
670#define DESC_ALIGN
671
672#ifndef DEC_ONLY
673static int dec_only;
674#else
675static int dec_only = 1;
676#endif
677
678
679
680
681#define ENABLE_IRQs { \
682 imr |= lp->irq_en;\
683 outl(imr, DE4X5_IMR); \
684}
685
686#define DISABLE_IRQs {\
687 imr = inl(DE4X5_IMR);\
688 imr &= ~lp->irq_en;\
689 outl(imr, DE4X5_IMR); \
690}
691
692#define UNMASK_IRQs {\
693 imr |= lp->irq_mask;\
694 outl(imr, DE4X5_IMR); \
695}
696
697#define MASK_IRQs {\
698 imr = inl(DE4X5_IMR);\
699 imr &= ~lp->irq_mask;\
700 outl(imr, DE4X5_IMR); \
701}
702
703
704
705
706#define START_DE4X5 {\
707 omr = inl(DE4X5_OMR);\
708 omr |= OMR_ST | OMR_SR;\
709 outl(omr, DE4X5_OMR); \
710}
711
712#define STOP_DE4X5 {\
713 omr = inl(DE4X5_OMR);\
714 omr &= ~(OMR_ST|OMR_SR);\
715 outl(omr, DE4X5_OMR); \
716}
717
718
719
720
721#define RESET_SIA outl(0, DE4X5_SICR);
722
723
724
725
726#define DE4X5_AUTOSENSE_MS 250
727
728
729
730
731struct de4x5_srom {
732 char sub_vendor_id[2];
733 char sub_system_id[2];
734 char reserved[12];
735 char id_block_crc;
736 char reserved2;
737 char version;
738 char num_controllers;
739 char ieee_addr[6];
740 char info[100];
741 short chksum;
742};
743#define SUB_VENDOR_ID 0x500a
744
745
746
747
748
749
750
751
752
753#define NUM_RX_DESC 8
754#define NUM_TX_DESC 32
755#define RX_BUFF_SZ 1536
756
757
758struct de4x5_desc {
759 volatile __le32 status;
760 __le32 des1;
761 __le32 buf;
762 __le32 next;
763 DESC_ALIGN
764};
765
766
767
768
769#define DE4X5_PKT_STAT_SZ 16
770#define DE4X5_PKT_BIN_SZ 128
771
772
773struct pkt_stats {
774 u_int bins[DE4X5_PKT_STAT_SZ];
775 u_int unicast;
776 u_int multicast;
777 u_int broadcast;
778 u_int excessive_collisions;
779 u_int tx_underruns;
780 u_int excessive_underruns;
781 u_int rx_runt_frames;
782 u_int rx_collision;
783 u_int rx_dribble;
784 u_int rx_overflow;
785};
786
787struct de4x5_private {
788 char adapter_name[80];
789 u_long interrupt;
790 struct de4x5_desc *rx_ring;
791 struct de4x5_desc *tx_ring;
792 struct sk_buff *tx_skb[NUM_TX_DESC];
793 struct sk_buff *rx_skb[NUM_RX_DESC];
794 int rx_new, rx_old;
795 int tx_new, tx_old;
796 char setup_frame[SETUP_FRAME_LEN];
797 char frame[64];
798 spinlock_t lock;
799 struct net_device_stats stats;
800 struct pkt_stats pktStats;
801 char rxRingSize;
802 char txRingSize;
803 int bus;
804 int bus_num;
805 int device;
806 int state;
807 int chipset;
808 s32 irq_mask;
809 s32 irq_en;
810 int media;
811 int c_media;
812 bool fdx;
813 int linkOK;
814 int autosense;
815 bool tx_enable;
816 int setup_f;
817 int local_state;
818 struct mii_phy phy[DE4X5_MAX_PHY];
819 struct sia_phy sia;
820 int active;
821 int mii_cnt;
822 int timeout;
823 struct timer_list timer;
824 int tmp;
825 struct {
826 u_long lock;
827 s32 csr0;
828 s32 csr6;
829 s32 csr7;
830 s32 gep;
831 s32 gepc;
832 s32 csr13;
833 s32 csr14;
834 s32 csr15;
835 int save_cnt;
836 struct sk_buff_head queue;
837 } cache;
838 struct de4x5_srom srom;
839 int cfrv;
840 int rx_ovf;
841 bool useSROM;
842 bool useMII;
843 int asBitValid;
844 int asPolarity;
845 int asBit;
846 int defMedium;
847 int tcount;
848 int infoblock_init;
849 int infoleaf_offset;
850 s32 infoblock_csr6;
851 int infoblock_media;
852 int (*infoleaf_fn)(struct net_device *);
853 u_char *rst;
854 u_char ibn;
855 struct parameters params;
856 struct device *gendev;
857 dma_addr_t dma_rings;
858 int dma_size;
859 char *rx_bufs;
860};
861
862
863
864
865
866
867
868
869
870
871
872
873
874static struct {
875 int chipset;
876 int bus;
877 int irq;
878 u_char addr[ETH_ALEN];
879} last = {0,};
880
881
882
883
884
885
886
887
888#define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\
889 lp->tx_old+lp->txRingSize-lp->tx_new-1:\
890 lp->tx_old -lp->tx_new-1)
891
892#define TX_PKT_PENDING (lp->tx_old != lp->tx_new)
893
894
895
896
897static int de4x5_open(struct net_device *dev);
898static netdev_tx_t de4x5_queue_pkt(struct sk_buff *skb,
899 struct net_device *dev);
900static irqreturn_t de4x5_interrupt(int irq, void *dev_id);
901static int de4x5_close(struct net_device *dev);
902static struct net_device_stats *de4x5_get_stats(struct net_device *dev);
903static void de4x5_local_stats(struct net_device *dev, char *buf, int pkt_len);
904static void set_multicast_list(struct net_device *dev);
905static int de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
906
907
908
909
910static int de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev);
911static int de4x5_init(struct net_device *dev);
912static int de4x5_sw_reset(struct net_device *dev);
913static int de4x5_rx(struct net_device *dev);
914static int de4x5_tx(struct net_device *dev);
915static void de4x5_ast(struct timer_list *t);
916static int de4x5_txur(struct net_device *dev);
917static int de4x5_rx_ovfc(struct net_device *dev);
918
919static int autoconf_media(struct net_device *dev);
920static void create_packet(struct net_device *dev, char *frame, int len);
921static void load_packet(struct net_device *dev, char *buf, u32 flags, struct sk_buff *skb);
922static int dc21040_autoconf(struct net_device *dev);
923static int dc21041_autoconf(struct net_device *dev);
924static int dc21140m_autoconf(struct net_device *dev);
925static int dc2114x_autoconf(struct net_device *dev);
926static int srom_autoconf(struct net_device *dev);
927static int de4x5_suspect_state(struct net_device *dev, int timeout, int prev_state, int (*fn)(struct net_device *, int), int (*asfn)(struct net_device *));
928static int dc21040_state(struct net_device *dev, int csr13, int csr14, int csr15, int timeout, int next_state, int suspect_state, int (*fn)(struct net_device *, int));
929static int test_media(struct net_device *dev, s32 irqs, s32 irq_mask, s32 csr13, s32 csr14, s32 csr15, s32 msec);
930static int test_for_100Mb(struct net_device *dev, int msec);
931static int wait_for_link(struct net_device *dev);
932static int test_mii_reg(struct net_device *dev, int reg, int mask, bool pol, long msec);
933static int is_spd_100(struct net_device *dev);
934static int is_100_up(struct net_device *dev);
935static int is_10_up(struct net_device *dev);
936static int is_anc_capable(struct net_device *dev);
937static int ping_media(struct net_device *dev, int msec);
938static struct sk_buff *de4x5_alloc_rx_buff(struct net_device *dev, int index, int len);
939static void de4x5_free_rx_buffs(struct net_device *dev);
940static void de4x5_free_tx_buffs(struct net_device *dev);
941static void de4x5_save_skbs(struct net_device *dev);
942static void de4x5_rst_desc_ring(struct net_device *dev);
943static void de4x5_cache_state(struct net_device *dev, int flag);
944static void de4x5_put_cache(struct net_device *dev, struct sk_buff *skb);
945static void de4x5_putb_cache(struct net_device *dev, struct sk_buff *skb);
946static struct sk_buff *de4x5_get_cache(struct net_device *dev);
947static void de4x5_setup_intr(struct net_device *dev);
948static void de4x5_init_connection(struct net_device *dev);
949static int de4x5_reset_phy(struct net_device *dev);
950static void reset_init_sia(struct net_device *dev, s32 sicr, s32 strr, s32 sigr);
951static int test_ans(struct net_device *dev, s32 irqs, s32 irq_mask, s32 msec);
952static int test_tp(struct net_device *dev, s32 msec);
953static int EISA_signature(char *name, struct device *device);
954static void PCI_signature(char *name, struct de4x5_private *lp);
955static void DevicePresent(struct net_device *dev, u_long iobase);
956static void enet_addr_rst(u_long aprom_addr);
957static int de4x5_bad_srom(struct de4x5_private *lp);
958static short srom_rd(u_long address, u_char offset);
959static void srom_latch(u_int command, u_long address);
960static void srom_command(u_int command, u_long address);
961static void srom_address(u_int command, u_long address, u_char offset);
962static short srom_data(u_int command, u_long address);
963
964static void sendto_srom(u_int command, u_long addr);
965static int getfrom_srom(u_long addr);
966static int srom_map_media(struct net_device *dev);
967static int srom_infoleaf_info(struct net_device *dev);
968static void srom_init(struct net_device *dev);
969static void srom_exec(struct net_device *dev, u_char *p);
970static int mii_rd(u_char phyreg, u_char phyaddr, u_long ioaddr);
971static void mii_wr(int data, u_char phyreg, u_char phyaddr, u_long ioaddr);
972static int mii_rdata(u_long ioaddr);
973static void mii_wdata(int data, int len, u_long ioaddr);
974static void mii_ta(u_long rw, u_long ioaddr);
975static int mii_swap(int data, int len);
976static void mii_address(u_char addr, u_long ioaddr);
977static void sendto_mii(u32 command, int data, u_long ioaddr);
978static int getfrom_mii(u32 command, u_long ioaddr);
979static int mii_get_oui(u_char phyaddr, u_long ioaddr);
980static int mii_get_phy(struct net_device *dev);
981static void SetMulticastFilter(struct net_device *dev);
982static int get_hw_addr(struct net_device *dev);
983static void srom_repair(struct net_device *dev, int card);
984static int test_bad_enet(struct net_device *dev, int status);
985static int an_exception(struct de4x5_private *lp);
986static char *build_setup_frame(struct net_device *dev, int mode);
987static void disable_ast(struct net_device *dev);
988static long de4x5_switch_mac_port(struct net_device *dev);
989static int gep_rd(struct net_device *dev);
990static void gep_wr(s32 data, struct net_device *dev);
991static void yawn(struct net_device *dev, int state);
992static void de4x5_parse_params(struct net_device *dev);
993static void de4x5_dbg_open(struct net_device *dev);
994static void de4x5_dbg_mii(struct net_device *dev, int k);
995static void de4x5_dbg_media(struct net_device *dev);
996static void de4x5_dbg_srom(struct de4x5_srom *p);
997static void de4x5_dbg_rx(struct sk_buff *skb, int len);
998static int dc21041_infoleaf(struct net_device *dev);
999static int dc21140_infoleaf(struct net_device *dev);
1000static int dc21142_infoleaf(struct net_device *dev);
1001static int dc21143_infoleaf(struct net_device *dev);
1002static int type0_infoblock(struct net_device *dev, u_char count, u_char *p);
1003static int type1_infoblock(struct net_device *dev, u_char count, u_char *p);
1004static int type2_infoblock(struct net_device *dev, u_char count, u_char *p);
1005static int type3_infoblock(struct net_device *dev, u_char count, u_char *p);
1006static int type4_infoblock(struct net_device *dev, u_char count, u_char *p);
1007static int type5_infoblock(struct net_device *dev, u_char count, u_char *p);
1008static int compact_infoblock(struct net_device *dev, u_char count, u_char *p);
1009
1010
1011
1012
1013
1014
1015
1016static int io=0x0;
1017
1018module_param_hw(io, int, ioport, 0);
1019module_param(de4x5_debug, int, 0);
1020module_param(dec_only, int, 0);
1021module_param(args, charp, 0);
1022
1023MODULE_PARM_DESC(io, "de4x5 I/O base address");
1024MODULE_PARM_DESC(de4x5_debug, "de4x5 debug mask");
1025MODULE_PARM_DESC(dec_only, "de4x5 probe only for Digital boards (0-1)");
1026MODULE_PARM_DESC(args, "de4x5 full duplex and media type settings; see de4x5.c for details");
1027MODULE_LICENSE("GPL");
1028
1029
1030
1031
1032struct InfoLeaf {
1033 int chipset;
1034 int (*fn)(struct net_device *);
1035};
1036static struct InfoLeaf infoleaf_array[] = {
1037 {DC21041, dc21041_infoleaf},
1038 {DC21140, dc21140_infoleaf},
1039 {DC21142, dc21142_infoleaf},
1040 {DC21143, dc21143_infoleaf}
1041};
1042#define INFOLEAF_SIZE ARRAY_SIZE(infoleaf_array)
1043
1044
1045
1046
1047static int (*dc_infoblock[])(struct net_device *dev, u_char, u_char *) = {
1048 type0_infoblock,
1049 type1_infoblock,
1050 type2_infoblock,
1051 type3_infoblock,
1052 type4_infoblock,
1053 type5_infoblock,
1054 compact_infoblock
1055};
1056
1057#define COMPACT (ARRAY_SIZE(dc_infoblock) - 1)
1058
1059
1060
1061
1062#define RESET_DE4X5 {\
1063 int i;\
1064 i=inl(DE4X5_BMR);\
1065 mdelay(1);\
1066 outl(i | BMR_SWR, DE4X5_BMR);\
1067 mdelay(1);\
1068 outl(i, DE4X5_BMR);\
1069 mdelay(1);\
1070 for (i=0;i<5;i++) {inl(DE4X5_BMR); mdelay(1);}\
1071 mdelay(1);\
1072}
1073
1074#define PHY_HARD_RESET {\
1075 outl(GEP_HRST, DE4X5_GEP); \
1076 mdelay(1); \
1077 outl(0x00, DE4X5_GEP);\
1078 mdelay(2); \
1079}
1080
1081static const struct net_device_ops de4x5_netdev_ops = {
1082 .ndo_open = de4x5_open,
1083 .ndo_stop = de4x5_close,
1084 .ndo_start_xmit = de4x5_queue_pkt,
1085 .ndo_get_stats = de4x5_get_stats,
1086 .ndo_set_rx_mode = set_multicast_list,
1087 .ndo_do_ioctl = de4x5_ioctl,
1088 .ndo_set_mac_address= eth_mac_addr,
1089 .ndo_validate_addr = eth_validate_addr,
1090};
1091
1092
1093static int
1094de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev)
1095{
1096 char name[DE4X5_NAME_LENGTH + 1];
1097 struct de4x5_private *lp = netdev_priv(dev);
1098 struct pci_dev *pdev = NULL;
1099 int i, status=0;
1100
1101 dev_set_drvdata(gendev, dev);
1102
1103
1104 if (lp->bus == EISA) {
1105 outb(WAKEUP, PCI_CFPM);
1106 } else {
1107 pdev = to_pci_dev (gendev);
1108 pci_write_config_byte(pdev, PCI_CFDA_PSM, WAKEUP);
1109 }
1110 mdelay(10);
1111
1112 RESET_DE4X5;
1113
1114 if ((inl(DE4X5_STS) & (STS_TS | STS_RS)) != 0) {
1115 return -ENXIO;
1116 }
1117
1118
1119
1120
1121 lp->useSROM = false;
1122 if (lp->bus == PCI) {
1123 PCI_signature(name, lp);
1124 } else {
1125 EISA_signature(name, gendev);
1126 }
1127
1128 if (*name == '\0') {
1129 return -ENXIO;
1130 }
1131
1132 dev->base_addr = iobase;
1133 printk ("%s: %s at 0x%04lx", dev_name(gendev), name, iobase);
1134
1135 status = get_hw_addr(dev);
1136 printk(", h/w address %pM\n", dev->dev_addr);
1137
1138 if (status != 0) {
1139 printk(" which has an Ethernet PROM CRC error.\n");
1140 return -ENXIO;
1141 } else {
1142 skb_queue_head_init(&lp->cache.queue);
1143 lp->cache.gepc = GEP_INIT;
1144 lp->asBit = GEP_SLNK;
1145 lp->asPolarity = GEP_SLNK;
1146 lp->asBitValid = ~0;
1147 lp->timeout = -1;
1148 lp->gendev = gendev;
1149 spin_lock_init(&lp->lock);
1150 timer_setup(&lp->timer, de4x5_ast, 0);
1151 de4x5_parse_params(dev);
1152
1153
1154
1155
1156 lp->autosense = lp->params.autosense;
1157 if (lp->chipset != DC21140) {
1158 if ((lp->chipset==DC21040) && (lp->params.autosense&TP_NW)) {
1159 lp->params.autosense = TP;
1160 }
1161 if ((lp->chipset==DC21041) && (lp->params.autosense&BNC_AUI)) {
1162 lp->params.autosense = BNC;
1163 }
1164 }
1165 lp->fdx = lp->params.fdx;
1166 sprintf(lp->adapter_name,"%s (%s)", name, dev_name(gendev));
1167
1168 lp->dma_size = (NUM_RX_DESC + NUM_TX_DESC) * sizeof(struct de4x5_desc);
1169#if defined(__alpha__) || defined(__powerpc__) || defined(CONFIG_SPARC) || defined(DE4X5_DO_MEMCPY)
1170 lp->dma_size += RX_BUFF_SZ * NUM_RX_DESC + DE4X5_ALIGN;
1171#endif
1172 lp->rx_ring = dma_alloc_coherent(gendev, lp->dma_size,
1173 &lp->dma_rings, GFP_ATOMIC);
1174 if (lp->rx_ring == NULL) {
1175 return -ENOMEM;
1176 }
1177
1178 lp->tx_ring = lp->rx_ring + NUM_RX_DESC;
1179
1180
1181
1182
1183
1184#if !defined(__alpha__) && !defined(__powerpc__) && !defined(CONFIG_SPARC) && !defined(DE4X5_DO_MEMCPY)
1185 for (i=0; i<NUM_RX_DESC; i++) {
1186 lp->rx_ring[i].status = 0;
1187 lp->rx_ring[i].des1 = cpu_to_le32(RX_BUFF_SZ);
1188 lp->rx_ring[i].buf = 0;
1189 lp->rx_ring[i].next = 0;
1190 lp->rx_skb[i] = (struct sk_buff *) 1;
1191 }
1192
1193#else
1194 {
1195 dma_addr_t dma_rx_bufs;
1196
1197 dma_rx_bufs = lp->dma_rings + (NUM_RX_DESC + NUM_TX_DESC)
1198 * sizeof(struct de4x5_desc);
1199 dma_rx_bufs = (dma_rx_bufs + DE4X5_ALIGN) & ~DE4X5_ALIGN;
1200 lp->rx_bufs = (char *)(((long)(lp->rx_ring + NUM_RX_DESC
1201 + NUM_TX_DESC) + DE4X5_ALIGN) & ~DE4X5_ALIGN);
1202 for (i=0; i<NUM_RX_DESC; i++) {
1203 lp->rx_ring[i].status = 0;
1204 lp->rx_ring[i].des1 = cpu_to_le32(RX_BUFF_SZ);
1205 lp->rx_ring[i].buf =
1206 cpu_to_le32(dma_rx_bufs+i*RX_BUFF_SZ);
1207 lp->rx_ring[i].next = 0;
1208 lp->rx_skb[i] = (struct sk_buff *) 1;
1209 }
1210
1211 }
1212#endif
1213
1214 barrier();
1215
1216 lp->rxRingSize = NUM_RX_DESC;
1217 lp->txRingSize = NUM_TX_DESC;
1218
1219
1220 lp->rx_ring[lp->rxRingSize - 1].des1 |= cpu_to_le32(RD_RER);
1221 lp->tx_ring[lp->txRingSize - 1].des1 |= cpu_to_le32(TD_TER);
1222
1223
1224 outl(lp->dma_rings, DE4X5_RRBA);
1225 outl(lp->dma_rings + NUM_RX_DESC * sizeof(struct de4x5_desc),
1226 DE4X5_TRBA);
1227
1228
1229 lp->irq_mask = IMR_RIM | IMR_TIM | IMR_TUM | IMR_UNM;
1230 lp->irq_en = IMR_NIM | IMR_AIM;
1231
1232
1233 create_packet(dev, lp->frame, sizeof(lp->frame));
1234
1235
1236 i = lp->cfrv & 0x000000fe;
1237 if ((lp->chipset == DC21140) && (i == 0x20)) {
1238 lp->rx_ovf = 1;
1239 }
1240
1241
1242 if (lp->useSROM) {
1243 lp->state = INITIALISED;
1244 if (srom_infoleaf_info(dev)) {
1245 dma_free_coherent (gendev, lp->dma_size,
1246 lp->rx_ring, lp->dma_rings);
1247 return -ENXIO;
1248 }
1249 srom_init(dev);
1250 }
1251
1252 lp->state = CLOSED;
1253
1254
1255
1256
1257 if ((lp->chipset != DC21040) && (lp->chipset != DC21041)) {
1258 mii_get_phy(dev);
1259 }
1260
1261 printk(" and requires IRQ%d (provided by %s).\n", dev->irq,
1262 ((lp->bus == PCI) ? "PCI BIOS" : "EISA CNFG"));
1263 }
1264
1265 if (de4x5_debug & DEBUG_VERSION) {
1266 printk(version);
1267 }
1268
1269
1270 SET_NETDEV_DEV(dev, gendev);
1271 dev->netdev_ops = &de4x5_netdev_ops;
1272 dev->mem_start = 0;
1273
1274
1275 if ((status = register_netdev (dev))) {
1276 dma_free_coherent (gendev, lp->dma_size,
1277 lp->rx_ring, lp->dma_rings);
1278 return status;
1279 }
1280
1281
1282 yawn(dev, SLEEP);
1283
1284 return status;
1285}
1286
1287
1288static int
1289de4x5_open(struct net_device *dev)
1290{
1291 struct de4x5_private *lp = netdev_priv(dev);
1292 u_long iobase = dev->base_addr;
1293 int i, status = 0;
1294 s32 omr;
1295
1296
1297 for (i=0; i<lp->rxRingSize; i++) {
1298 if (de4x5_alloc_rx_buff(dev, i, 0) == NULL) {
1299 de4x5_free_rx_buffs(dev);
1300 return -EAGAIN;
1301 }
1302 }
1303
1304
1305
1306
1307 yawn(dev, WAKEUP);
1308
1309
1310
1311
1312 status = de4x5_init(dev);
1313 spin_lock_init(&lp->lock);
1314 lp->state = OPEN;
1315 de4x5_dbg_open(dev);
1316
1317 if (request_irq(dev->irq, de4x5_interrupt, IRQF_SHARED,
1318 lp->adapter_name, dev)) {
1319 printk("de4x5_open(): Requested IRQ%d is busy - attempting FAST/SHARE...", dev->irq);
1320 if (request_irq(dev->irq, de4x5_interrupt, IRQF_SHARED,
1321 lp->adapter_name, dev)) {
1322 printk("\n Cannot get IRQ- reconfigure your hardware.\n");
1323 disable_ast(dev);
1324 de4x5_free_rx_buffs(dev);
1325 de4x5_free_tx_buffs(dev);
1326 yawn(dev, SLEEP);
1327 lp->state = CLOSED;
1328 return -EAGAIN;
1329 } else {
1330 printk("\n Succeeded, but you should reconfigure your hardware to avoid this.\n");
1331 printk("WARNING: there may be IRQ related problems in heavily loaded systems.\n");
1332 }
1333 }
1334
1335 lp->interrupt = UNMASK_INTERRUPTS;
1336 netif_trans_update(dev);
1337
1338 START_DE4X5;
1339
1340 de4x5_setup_intr(dev);
1341
1342 if (de4x5_debug & DEBUG_OPEN) {
1343 printk("\tsts: 0x%08x\n", inl(DE4X5_STS));
1344 printk("\tbmr: 0x%08x\n", inl(DE4X5_BMR));
1345 printk("\timr: 0x%08x\n", inl(DE4X5_IMR));
1346 printk("\tomr: 0x%08x\n", inl(DE4X5_OMR));
1347 printk("\tsisr: 0x%08x\n", inl(DE4X5_SISR));
1348 printk("\tsicr: 0x%08x\n", inl(DE4X5_SICR));
1349 printk("\tstrr: 0x%08x\n", inl(DE4X5_STRR));
1350 printk("\tsigr: 0x%08x\n", inl(DE4X5_SIGR));
1351 }
1352
1353 return status;
1354}
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364static int
1365de4x5_init(struct net_device *dev)
1366{
1367
1368 netif_stop_queue(dev);
1369
1370 de4x5_sw_reset(dev);
1371
1372
1373 autoconf_media(dev);
1374
1375 return 0;
1376}
1377
1378static int
1379de4x5_sw_reset(struct net_device *dev)
1380{
1381 struct de4x5_private *lp = netdev_priv(dev);
1382 u_long iobase = dev->base_addr;
1383 int i, j, status = 0;
1384 s32 bmr, omr;
1385
1386
1387 if (!lp->useSROM) {
1388 if (lp->phy[lp->active].id != 0) {
1389 lp->infoblock_csr6 = OMR_SDP | OMR_PS | OMR_HBD;
1390 } else {
1391 lp->infoblock_csr6 = OMR_SDP | OMR_TTM;
1392 }
1393 de4x5_switch_mac_port(dev);
1394 }
1395
1396
1397
1398
1399
1400
1401 bmr = (lp->chipset==DC21140 ? PBL_8 : PBL_4) | DESC_SKIP_LEN | DE4X5_CACHE_ALIGN;
1402 bmr |= ((lp->chipset & ~0x00ff)==DC2114x ? BMR_RML : 0);
1403 outl(bmr, DE4X5_BMR);
1404
1405 omr = inl(DE4X5_OMR) & ~OMR_PR;
1406 if (lp->chipset == DC21140) {
1407 omr |= (OMR_SDP | OMR_SB);
1408 }
1409 lp->setup_f = PERFECT;
1410 outl(lp->dma_rings, DE4X5_RRBA);
1411 outl(lp->dma_rings + NUM_RX_DESC * sizeof(struct de4x5_desc),
1412 DE4X5_TRBA);
1413
1414 lp->rx_new = lp->rx_old = 0;
1415 lp->tx_new = lp->tx_old = 0;
1416
1417 for (i = 0; i < lp->rxRingSize; i++) {
1418 lp->rx_ring[i].status = cpu_to_le32(R_OWN);
1419 }
1420
1421 for (i = 0; i < lp->txRingSize; i++) {
1422 lp->tx_ring[i].status = cpu_to_le32(0);
1423 }
1424
1425 barrier();
1426
1427
1428 SetMulticastFilter(dev);
1429
1430 load_packet(dev, lp->setup_frame, PERFECT_F|TD_SET|SETUP_FRAME_LEN, (struct sk_buff *)1);
1431 outl(omr|OMR_ST, DE4X5_OMR);
1432
1433
1434
1435 for (j=0, i=0;(i<500) && (j==0);i++) {
1436 mdelay(1);
1437 if ((s32)le32_to_cpu(lp->tx_ring[lp->tx_new].status) >= 0) j=1;
1438 }
1439 outl(omr, DE4X5_OMR);
1440
1441 if (j == 0) {
1442 printk("%s: Setup frame timed out, status %08x\n", dev->name,
1443 inl(DE4X5_STS));
1444 status = -EIO;
1445 }
1446
1447 lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
1448 lp->tx_old = lp->tx_new;
1449
1450 return status;
1451}
1452
1453
1454
1455
1456static netdev_tx_t
1457de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev)
1458{
1459 struct de4x5_private *lp = netdev_priv(dev);
1460 u_long iobase = dev->base_addr;
1461 u_long flags = 0;
1462
1463 netif_stop_queue(dev);
1464 if (!lp->tx_enable)
1465 goto tx_err;
1466
1467
1468
1469
1470
1471
1472 spin_lock_irqsave(&lp->lock, flags);
1473 de4x5_tx(dev);
1474 spin_unlock_irqrestore(&lp->lock, flags);
1475
1476
1477 if (test_and_set_bit(0, (void *)&lp->cache.lock) && !lp->interrupt)
1478 goto tx_err;
1479
1480
1481 if (netif_queue_stopped(dev) || (u_long) lp->tx_skb[lp->tx_new] > 1) {
1482 if (lp->interrupt) {
1483 de4x5_putb_cache(dev, skb);
1484 } else {
1485 de4x5_put_cache(dev, skb);
1486 }
1487 if (de4x5_debug & DEBUG_TX) {
1488 printk("%s: transmit busy, lost media or stale skb found:\n STS:%08x\n tbusy:%d\n IMR:%08x\n OMR:%08x\n Stale skb: %s\n",dev->name, inl(DE4X5_STS), netif_queue_stopped(dev), inl(DE4X5_IMR), inl(DE4X5_OMR), ((u_long) lp->tx_skb[lp->tx_new] > 1) ? "YES" : "NO");
1489 }
1490 } else if (skb->len > 0) {
1491
1492 if (!skb_queue_empty(&lp->cache.queue) && !lp->interrupt) {
1493 de4x5_put_cache(dev, skb);
1494 skb = de4x5_get_cache(dev);
1495 }
1496
1497 while (skb && !netif_queue_stopped(dev) &&
1498 (u_long) lp->tx_skb[lp->tx_new] <= 1) {
1499 spin_lock_irqsave(&lp->lock, flags);
1500 netif_stop_queue(dev);
1501 load_packet(dev, skb->data, TD_IC | TD_LS | TD_FS | skb->len, skb);
1502 lp->stats.tx_bytes += skb->len;
1503 outl(POLL_DEMAND, DE4X5_TPD);
1504
1505 lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
1506
1507 if (TX_BUFFS_AVAIL) {
1508 netif_start_queue(dev);
1509 }
1510 skb = de4x5_get_cache(dev);
1511 spin_unlock_irqrestore(&lp->lock, flags);
1512 }
1513 if (skb) de4x5_putb_cache(dev, skb);
1514 }
1515
1516 lp->cache.lock = 0;
1517
1518 return NETDEV_TX_OK;
1519tx_err:
1520 dev_kfree_skb_any(skb);
1521 return NETDEV_TX_OK;
1522}
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535static irqreturn_t
1536de4x5_interrupt(int irq, void *dev_id)
1537{
1538 struct net_device *dev = dev_id;
1539 struct de4x5_private *lp;
1540 s32 imr, omr, sts, limit;
1541 u_long iobase;
1542 unsigned int handled = 0;
1543
1544 lp = netdev_priv(dev);
1545 spin_lock(&lp->lock);
1546 iobase = dev->base_addr;
1547
1548 DISABLE_IRQs;
1549
1550 if (test_and_set_bit(MASK_INTERRUPTS, (void*) &lp->interrupt))
1551 printk("%s: Re-entering the interrupt handler.\n", dev->name);
1552
1553 synchronize_irq(dev->irq);
1554
1555 for (limit=0; limit<8; limit++) {
1556 sts = inl(DE4X5_STS);
1557 outl(sts, DE4X5_STS);
1558
1559 if (!(sts & lp->irq_mask)) break;
1560 handled = 1;
1561
1562 if (sts & (STS_RI | STS_RU))
1563 de4x5_rx(dev);
1564
1565 if (sts & (STS_TI | STS_TU))
1566 de4x5_tx(dev);
1567
1568 if (sts & STS_LNF) {
1569 lp->irq_mask &= ~IMR_LFM;
1570 }
1571
1572 if (sts & STS_UNF) {
1573 de4x5_txur(dev);
1574 }
1575
1576 if (sts & STS_SE) {
1577 STOP_DE4X5;
1578 printk("%s: Fatal bus error occurred, sts=%#8x, device stopped.\n",
1579 dev->name, sts);
1580 spin_unlock(&lp->lock);
1581 return IRQ_HANDLED;
1582 }
1583 }
1584
1585
1586 if (!test_and_set_bit(0, (void *)&lp->cache.lock)) {
1587 while (!skb_queue_empty(&lp->cache.queue) && !netif_queue_stopped(dev) && lp->tx_enable) {
1588 de4x5_queue_pkt(de4x5_get_cache(dev), dev);
1589 }
1590 lp->cache.lock = 0;
1591 }
1592
1593 lp->interrupt = UNMASK_INTERRUPTS;
1594 ENABLE_IRQs;
1595 spin_unlock(&lp->lock);
1596
1597 return IRQ_RETVAL(handled);
1598}
1599
1600static int
1601de4x5_rx(struct net_device *dev)
1602{
1603 struct de4x5_private *lp = netdev_priv(dev);
1604 u_long iobase = dev->base_addr;
1605 int entry;
1606 s32 status;
1607
1608 for (entry=lp->rx_new; (s32)le32_to_cpu(lp->rx_ring[entry].status)>=0;
1609 entry=lp->rx_new) {
1610 status = (s32)le32_to_cpu(lp->rx_ring[entry].status);
1611
1612 if (lp->rx_ovf) {
1613 if (inl(DE4X5_MFC) & MFC_FOCM) {
1614 de4x5_rx_ovfc(dev);
1615 break;
1616 }
1617 }
1618
1619 if (status & RD_FS) {
1620 lp->rx_old = entry;
1621 }
1622
1623 if (status & RD_LS) {
1624 if (lp->tx_enable) lp->linkOK++;
1625 if (status & RD_ES) {
1626 lp->stats.rx_errors++;
1627 if (status & (RD_RF | RD_TL)) lp->stats.rx_frame_errors++;
1628 if (status & RD_CE) lp->stats.rx_crc_errors++;
1629 if (status & RD_OF) lp->stats.rx_fifo_errors++;
1630 if (status & RD_TL) lp->stats.rx_length_errors++;
1631 if (status & RD_RF) lp->pktStats.rx_runt_frames++;
1632 if (status & RD_CS) lp->pktStats.rx_collision++;
1633 if (status & RD_DB) lp->pktStats.rx_dribble++;
1634 if (status & RD_OF) lp->pktStats.rx_overflow++;
1635 } else {
1636 struct sk_buff *skb;
1637 short pkt_len = (short)(le32_to_cpu(lp->rx_ring[entry].status)
1638 >> 16) - 4;
1639
1640 if ((skb = de4x5_alloc_rx_buff(dev, entry, pkt_len)) == NULL) {
1641 printk("%s: Insufficient memory; nuking packet.\n",
1642 dev->name);
1643 lp->stats.rx_dropped++;
1644 } else {
1645 de4x5_dbg_rx(skb, pkt_len);
1646
1647
1648 skb->protocol=eth_type_trans(skb,dev);
1649 de4x5_local_stats(dev, skb->data, pkt_len);
1650 netif_rx(skb);
1651
1652
1653 lp->stats.rx_packets++;
1654 lp->stats.rx_bytes += pkt_len;
1655 }
1656 }
1657
1658
1659 for (;lp->rx_old!=entry;lp->rx_old=(lp->rx_old + 1)%lp->rxRingSize) {
1660 lp->rx_ring[lp->rx_old].status = cpu_to_le32(R_OWN);
1661 barrier();
1662 }
1663 lp->rx_ring[entry].status = cpu_to_le32(R_OWN);
1664 barrier();
1665 }
1666
1667
1668
1669
1670 lp->rx_new = (lp->rx_new + 1) % lp->rxRingSize;
1671 }
1672
1673 return 0;
1674}
1675
1676static inline void
1677de4x5_free_tx_buff(struct de4x5_private *lp, int entry)
1678{
1679 dma_unmap_single(lp->gendev, le32_to_cpu(lp->tx_ring[entry].buf),
1680 le32_to_cpu(lp->tx_ring[entry].des1) & TD_TBS1,
1681 DMA_TO_DEVICE);
1682 if ((u_long) lp->tx_skb[entry] > 1)
1683 dev_kfree_skb_irq(lp->tx_skb[entry]);
1684 lp->tx_skb[entry] = NULL;
1685}
1686
1687
1688
1689
1690static int
1691de4x5_tx(struct net_device *dev)
1692{
1693 struct de4x5_private *lp = netdev_priv(dev);
1694 u_long iobase = dev->base_addr;
1695 int entry;
1696 s32 status;
1697
1698 for (entry = lp->tx_old; entry != lp->tx_new; entry = lp->tx_old) {
1699 status = (s32)le32_to_cpu(lp->tx_ring[entry].status);
1700 if (status < 0) {
1701 break;
1702 } else if (status != 0x7fffffff) {
1703 if (status & TD_ES) {
1704 lp->stats.tx_errors++;
1705 if (status & TD_NC) lp->stats.tx_carrier_errors++;
1706 if (status & TD_LC) lp->stats.tx_window_errors++;
1707 if (status & TD_UF) lp->stats.tx_fifo_errors++;
1708 if (status & TD_EC) lp->pktStats.excessive_collisions++;
1709 if (status & TD_DE) lp->stats.tx_aborted_errors++;
1710
1711 if (TX_PKT_PENDING) {
1712 outl(POLL_DEMAND, DE4X5_TPD);
1713 }
1714 } else {
1715 lp->stats.tx_packets++;
1716 if (lp->tx_enable) lp->linkOK++;
1717 }
1718
1719 lp->stats.collisions += ((status & TD_EC) ? 16 :
1720 ((status & TD_CC) >> 3));
1721
1722
1723 if (lp->tx_skb[entry] != NULL)
1724 de4x5_free_tx_buff(lp, entry);
1725 }
1726
1727
1728 lp->tx_old = (lp->tx_old + 1) % lp->txRingSize;
1729 }
1730
1731
1732 if (TX_BUFFS_AVAIL && netif_queue_stopped(dev)) {
1733 if (lp->interrupt)
1734 netif_wake_queue(dev);
1735 else
1736 netif_start_queue(dev);
1737 }
1738
1739 return 0;
1740}
1741
1742static void
1743de4x5_ast(struct timer_list *t)
1744{
1745 struct de4x5_private *lp = from_timer(lp, t, timer);
1746 struct net_device *dev = dev_get_drvdata(lp->gendev);
1747 int next_tick = DE4X5_AUTOSENSE_MS;
1748 int dt;
1749
1750 if (lp->useSROM)
1751 next_tick = srom_autoconf(dev);
1752 else if (lp->chipset == DC21140)
1753 next_tick = dc21140m_autoconf(dev);
1754 else if (lp->chipset == DC21041)
1755 next_tick = dc21041_autoconf(dev);
1756 else if (lp->chipset == DC21040)
1757 next_tick = dc21040_autoconf(dev);
1758 lp->linkOK = 0;
1759
1760 dt = (next_tick * HZ) / 1000;
1761
1762 if (!dt)
1763 dt = 1;
1764
1765 mod_timer(&lp->timer, jiffies + dt);
1766}
1767
1768static int
1769de4x5_txur(struct net_device *dev)
1770{
1771 struct de4x5_private *lp = netdev_priv(dev);
1772 u_long iobase = dev->base_addr;
1773 int omr;
1774
1775 omr = inl(DE4X5_OMR);
1776 if (!(omr & OMR_SF) || (lp->chipset==DC21041) || (lp->chipset==DC21040)) {
1777 omr &= ~(OMR_ST|OMR_SR);
1778 outl(omr, DE4X5_OMR);
1779 while (inl(DE4X5_STS) & STS_TS);
1780 if ((omr & OMR_TR) < OMR_TR) {
1781 omr += 0x4000;
1782 } else {
1783 omr |= OMR_SF;
1784 }
1785 outl(omr | OMR_ST | OMR_SR, DE4X5_OMR);
1786 }
1787
1788 return 0;
1789}
1790
1791static int
1792de4x5_rx_ovfc(struct net_device *dev)
1793{
1794 struct de4x5_private *lp = netdev_priv(dev);
1795 u_long iobase = dev->base_addr;
1796 int omr;
1797
1798 omr = inl(DE4X5_OMR);
1799 outl(omr & ~OMR_SR, DE4X5_OMR);
1800 while (inl(DE4X5_STS) & STS_RS);
1801
1802 for (; (s32)le32_to_cpu(lp->rx_ring[lp->rx_new].status)>=0;) {
1803 lp->rx_ring[lp->rx_new].status = cpu_to_le32(R_OWN);
1804 lp->rx_new = (lp->rx_new + 1) % lp->rxRingSize;
1805 }
1806
1807 outl(omr, DE4X5_OMR);
1808
1809 return 0;
1810}
1811
1812static int
1813de4x5_close(struct net_device *dev)
1814{
1815 struct de4x5_private *lp = netdev_priv(dev);
1816 u_long iobase = dev->base_addr;
1817 s32 imr, omr;
1818
1819 disable_ast(dev);
1820
1821 netif_stop_queue(dev);
1822
1823 if (de4x5_debug & DEBUG_CLOSE) {
1824 printk("%s: Shutting down ethercard, status was %8.8x.\n",
1825 dev->name, inl(DE4X5_STS));
1826 }
1827
1828
1829
1830
1831 DISABLE_IRQs;
1832 STOP_DE4X5;
1833
1834
1835 free_irq(dev->irq, dev);
1836 lp->state = CLOSED;
1837
1838
1839 de4x5_free_rx_buffs(dev);
1840 de4x5_free_tx_buffs(dev);
1841
1842
1843 yawn(dev, SLEEP);
1844
1845 return 0;
1846}
1847
1848static struct net_device_stats *
1849de4x5_get_stats(struct net_device *dev)
1850{
1851 struct de4x5_private *lp = netdev_priv(dev);
1852 u_long iobase = dev->base_addr;
1853
1854 lp->stats.rx_missed_errors = (int)(inl(DE4X5_MFC) & (MFC_OVFL | MFC_CNTR));
1855
1856 return &lp->stats;
1857}
1858
1859static void
1860de4x5_local_stats(struct net_device *dev, char *buf, int pkt_len)
1861{
1862 struct de4x5_private *lp = netdev_priv(dev);
1863 int i;
1864
1865 for (i=1; i<DE4X5_PKT_STAT_SZ-1; i++) {
1866 if (pkt_len < (i*DE4X5_PKT_BIN_SZ)) {
1867 lp->pktStats.bins[i]++;
1868 i = DE4X5_PKT_STAT_SZ;
1869 }
1870 }
1871 if (is_multicast_ether_addr(buf)) {
1872 if (is_broadcast_ether_addr(buf)) {
1873 lp->pktStats.broadcast++;
1874 } else {
1875 lp->pktStats.multicast++;
1876 }
1877 } else if (ether_addr_equal(buf, dev->dev_addr)) {
1878 lp->pktStats.unicast++;
1879 }
1880
1881 lp->pktStats.bins[0]++;
1882 if (lp->pktStats.bins[0] == 0) {
1883 memset((char *)&lp->pktStats, 0, sizeof(lp->pktStats));
1884 }
1885}
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895static void
1896load_packet(struct net_device *dev, char *buf, u32 flags, struct sk_buff *skb)
1897{
1898 struct de4x5_private *lp = netdev_priv(dev);
1899 int entry = (lp->tx_new ? lp->tx_new-1 : lp->txRingSize-1);
1900 dma_addr_t buf_dma = dma_map_single(lp->gendev, buf, flags & TD_TBS1, DMA_TO_DEVICE);
1901
1902 lp->tx_ring[lp->tx_new].buf = cpu_to_le32(buf_dma);
1903 lp->tx_ring[lp->tx_new].des1 &= cpu_to_le32(TD_TER);
1904 lp->tx_ring[lp->tx_new].des1 |= cpu_to_le32(flags);
1905 lp->tx_skb[lp->tx_new] = skb;
1906 lp->tx_ring[entry].des1 &= cpu_to_le32(~TD_IC);
1907 barrier();
1908
1909 lp->tx_ring[lp->tx_new].status = cpu_to_le32(T_OWN);
1910 barrier();
1911}
1912
1913
1914
1915
1916static void
1917set_multicast_list(struct net_device *dev)
1918{
1919 struct de4x5_private *lp = netdev_priv(dev);
1920 u_long iobase = dev->base_addr;
1921
1922
1923 if (lp->state == OPEN) {
1924 if (dev->flags & IFF_PROMISC) {
1925 u32 omr;
1926 omr = inl(DE4X5_OMR);
1927 omr |= OMR_PR;
1928 outl(omr, DE4X5_OMR);
1929 } else {
1930 SetMulticastFilter(dev);
1931 load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET |
1932 SETUP_FRAME_LEN, (struct sk_buff *)1);
1933
1934 lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
1935 outl(POLL_DEMAND, DE4X5_TPD);
1936 netif_trans_update(dev);
1937 }
1938 }
1939}
1940
1941
1942
1943
1944
1945
1946static void
1947SetMulticastFilter(struct net_device *dev)
1948{
1949 struct de4x5_private *lp = netdev_priv(dev);
1950 struct netdev_hw_addr *ha;
1951 u_long iobase = dev->base_addr;
1952 int i, bit, byte;
1953 u16 hashcode;
1954 u32 omr, crc;
1955 char *pa;
1956 unsigned char *addrs;
1957
1958 omr = inl(DE4X5_OMR);
1959 omr &= ~(OMR_PR | OMR_PM);
1960 pa = build_setup_frame(dev, ALL);
1961
1962 if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 14)) {
1963 omr |= OMR_PM;
1964 } else if (lp->setup_f == HASH_PERF) {
1965 netdev_for_each_mc_addr(ha, dev) {
1966 crc = ether_crc_le(ETH_ALEN, ha->addr);
1967 hashcode = crc & DE4X5_HASH_BITS;
1968
1969 byte = hashcode >> 3;
1970 bit = 1 << (hashcode & 0x07);
1971
1972 byte <<= 1;
1973 if (byte & 0x02) {
1974 byte -= 1;
1975 }
1976 lp->setup_frame[byte] |= bit;
1977 }
1978 } else {
1979 netdev_for_each_mc_addr(ha, dev) {
1980 addrs = ha->addr;
1981 for (i=0; i<ETH_ALEN; i++) {
1982 *(pa + (i&1)) = *addrs++;
1983 if (i & 0x01) pa += 4;
1984 }
1985 }
1986 }
1987 outl(omr, DE4X5_OMR);
1988}
1989
1990#ifdef CONFIG_EISA
1991
1992static u_char de4x5_irq[] = EISA_ALLOWED_IRQ_LIST;
1993
1994static int de4x5_eisa_probe(struct device *gendev)
1995{
1996 struct eisa_device *edev;
1997 u_long iobase;
1998 u_char irq, regval;
1999 u_short vendor;
2000 u32 cfid;
2001 int status, device;
2002 struct net_device *dev;
2003 struct de4x5_private *lp;
2004
2005 edev = to_eisa_device (gendev);
2006 iobase = edev->base_addr;
2007
2008 if (!request_region (iobase, DE4X5_EISA_TOTAL_SIZE, "de4x5"))
2009 return -EBUSY;
2010
2011 if (!request_region (iobase + DE4X5_EISA_IO_PORTS,
2012 DE4X5_EISA_TOTAL_SIZE, "de4x5")) {
2013 status = -EBUSY;
2014 goto release_reg_1;
2015 }
2016
2017 if (!(dev = alloc_etherdev (sizeof (struct de4x5_private)))) {
2018 status = -ENOMEM;
2019 goto release_reg_2;
2020 }
2021 lp = netdev_priv(dev);
2022
2023 cfid = (u32) inl(PCI_CFID);
2024 lp->cfrv = (u_short) inl(PCI_CFRV);
2025 device = (cfid >> 8) & 0x00ffff00;
2026 vendor = (u_short) cfid;
2027
2028
2029 regval = inb(EISA_REG0) & (ER0_INTL | ER0_INTT);
2030#ifdef CONFIG_ALPHA
2031
2032
2033
2034
2035
2036
2037 outb (ER1_IAM | 1, EISA_REG1);
2038 mdelay (1);
2039
2040
2041 outb (ER1_IAM, EISA_REG1);
2042 mdelay (1);
2043
2044
2045 outb (ER3_BWE | ER3_BRE, EISA_REG3);
2046
2047
2048 outb (ER0_BSW | ER0_BMW | ER0_EPT | regval, EISA_REG0);
2049#endif
2050 irq = de4x5_irq[(regval >> 1) & 0x03];
2051
2052 if (is_DC2114x) {
2053 device = ((lp->cfrv & CFRV_RN) < DC2114x_BRK ? DC21142 : DC21143);
2054 }
2055 lp->chipset = device;
2056 lp->bus = EISA;
2057
2058
2059 outl(PCI_COMMAND_IO | PCI_COMMAND_MASTER, PCI_CFCS);
2060 outl(0x00006000, PCI_CFLT);
2061 outl(iobase, PCI_CBIO);
2062
2063 DevicePresent(dev, EISA_APROM);
2064
2065 dev->irq = irq;
2066
2067 if (!(status = de4x5_hw_init (dev, iobase, gendev))) {
2068 return 0;
2069 }
2070
2071 free_netdev (dev);
2072 release_reg_2:
2073 release_region (iobase + DE4X5_EISA_IO_PORTS, DE4X5_EISA_TOTAL_SIZE);
2074 release_reg_1:
2075 release_region (iobase, DE4X5_EISA_TOTAL_SIZE);
2076
2077 return status;
2078}
2079
2080static int de4x5_eisa_remove(struct device *device)
2081{
2082 struct net_device *dev;
2083 u_long iobase;
2084
2085 dev = dev_get_drvdata(device);
2086 iobase = dev->base_addr;
2087
2088 unregister_netdev (dev);
2089 free_netdev (dev);
2090 release_region (iobase + DE4X5_EISA_IO_PORTS, DE4X5_EISA_TOTAL_SIZE);
2091 release_region (iobase, DE4X5_EISA_TOTAL_SIZE);
2092
2093 return 0;
2094}
2095
2096static const struct eisa_device_id de4x5_eisa_ids[] = {
2097 { "DEC4250", 0 },
2098 { "" }
2099};
2100MODULE_DEVICE_TABLE(eisa, de4x5_eisa_ids);
2101
2102static struct eisa_driver de4x5_eisa_driver = {
2103 .id_table = de4x5_eisa_ids,
2104 .driver = {
2105 .name = "de4x5",
2106 .probe = de4x5_eisa_probe,
2107 .remove = de4x5_eisa_remove,
2108 }
2109};
2110#endif
2111
2112#ifdef CONFIG_PCI
2113
2114
2115
2116
2117
2118
2119
2120static void
2121srom_search(struct net_device *dev, struct pci_dev *pdev)
2122{
2123 u_char pb;
2124 u_short vendor, status;
2125 u_int irq = 0, device;
2126 u_long iobase = 0;
2127 int i, j;
2128 struct de4x5_private *lp = netdev_priv(dev);
2129 struct pci_dev *this_dev;
2130
2131 list_for_each_entry(this_dev, &pdev->bus->devices, bus_list) {
2132 vendor = this_dev->vendor;
2133 device = this_dev->device << 8;
2134 if (!(is_DC21040 || is_DC21041 || is_DC21140 || is_DC2114x)) continue;
2135
2136
2137 pb = this_dev->bus->number;
2138
2139
2140 lp->device = PCI_SLOT(this_dev->devfn);
2141 lp->bus_num = pb;
2142
2143
2144 if (is_DC2114x) {
2145 device = ((this_dev->revision & CFRV_RN) < DC2114x_BRK
2146 ? DC21142 : DC21143);
2147 }
2148 lp->chipset = device;
2149
2150
2151 iobase = pci_resource_start(this_dev, 0);
2152
2153
2154 irq = this_dev->irq;
2155 if ((irq == 0) || (irq == 0xff) || ((int)irq == -1)) continue;
2156
2157
2158 pci_read_config_word(this_dev, PCI_COMMAND, &status);
2159 if (!(status & PCI_COMMAND_IO)) continue;
2160
2161
2162 DevicePresent(dev, DE4X5_APROM);
2163 for (j=0, i=0; i<ETH_ALEN; i++) {
2164 j += (u_char) *((u_char *)&lp->srom + SROM_HWADD + i);
2165 }
2166 if (j != 0 && j != 6 * 0xff) {
2167 last.chipset = device;
2168 last.bus = pb;
2169 last.irq = irq;
2170 for (i=0; i<ETH_ALEN; i++) {
2171 last.addr[i] = (u_char)*((u_char *)&lp->srom + SROM_HWADD + i);
2172 }
2173 return;
2174 }
2175 }
2176}
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194static int de4x5_pci_probe(struct pci_dev *pdev,
2195 const struct pci_device_id *ent)
2196{
2197 u_char pb, pbus = 0, dev_num, dnum = 0, timer;
2198 u_short vendor, status;
2199 u_int irq = 0, device;
2200 u_long iobase = 0;
2201 int error;
2202 struct net_device *dev;
2203 struct de4x5_private *lp;
2204
2205 dev_num = PCI_SLOT(pdev->devfn);
2206 pb = pdev->bus->number;
2207
2208 if (io) {
2209 pbus = (u_short)(io >> 8);
2210 dnum = (u_short)(io & 0xff);
2211 if ((pbus != pb) || (dnum != dev_num))
2212 return -ENODEV;
2213 }
2214
2215 vendor = pdev->vendor;
2216 device = pdev->device << 8;
2217 if (!(is_DC21040 || is_DC21041 || is_DC21140 || is_DC2114x))
2218 return -ENODEV;
2219
2220
2221 if ((error = pci_enable_device (pdev)))
2222 return error;
2223
2224 if (!(dev = alloc_etherdev (sizeof (struct de4x5_private)))) {
2225 error = -ENOMEM;
2226 goto disable_dev;
2227 }
2228
2229 lp = netdev_priv(dev);
2230 lp->bus = PCI;
2231 lp->bus_num = 0;
2232
2233
2234 if (lp->bus_num != pb) {
2235 lp->bus_num = pb;
2236 srom_search(dev, pdev);
2237 }
2238
2239
2240 lp->cfrv = pdev->revision;
2241
2242
2243 lp->device = dev_num;
2244 lp->bus_num = pb;
2245
2246
2247 if (is_DC2114x) {
2248 device = ((lp->cfrv & CFRV_RN) < DC2114x_BRK ? DC21142 : DC21143);
2249 }
2250 lp->chipset = device;
2251
2252
2253 iobase = pci_resource_start(pdev, 0);
2254
2255
2256 irq = pdev->irq;
2257 if ((irq == 0) || (irq == 0xff) || ((int)irq == -1)) {
2258 error = -ENODEV;
2259 goto free_dev;
2260 }
2261
2262
2263 pci_read_config_word(pdev, PCI_COMMAND, &status);
2264#ifdef __powerpc__
2265 if (!(status & PCI_COMMAND_IO)) {
2266 status |= PCI_COMMAND_IO;
2267 pci_write_config_word(pdev, PCI_COMMAND, status);
2268 pci_read_config_word(pdev, PCI_COMMAND, &status);
2269 }
2270#endif
2271 if (!(status & PCI_COMMAND_IO)) {
2272 error = -ENODEV;
2273 goto free_dev;
2274 }
2275
2276 if (!(status & PCI_COMMAND_MASTER)) {
2277 status |= PCI_COMMAND_MASTER;
2278 pci_write_config_word(pdev, PCI_COMMAND, status);
2279 pci_read_config_word(pdev, PCI_COMMAND, &status);
2280 }
2281 if (!(status & PCI_COMMAND_MASTER)) {
2282 error = -ENODEV;
2283 goto free_dev;
2284 }
2285
2286
2287 pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &timer);
2288 if (timer < 0x60) {
2289 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x60);
2290 }
2291
2292 DevicePresent(dev, DE4X5_APROM);
2293
2294 if (!request_region (iobase, DE4X5_PCI_TOTAL_SIZE, "de4x5")) {
2295 error = -EBUSY;
2296 goto free_dev;
2297 }
2298
2299 dev->irq = irq;
2300
2301 if ((error = de4x5_hw_init(dev, iobase, &pdev->dev))) {
2302 goto release;
2303 }
2304
2305 return 0;
2306
2307 release:
2308 release_region (iobase, DE4X5_PCI_TOTAL_SIZE);
2309 free_dev:
2310 free_netdev (dev);
2311 disable_dev:
2312 pci_disable_device (pdev);
2313 return error;
2314}
2315
2316static void de4x5_pci_remove(struct pci_dev *pdev)
2317{
2318 struct net_device *dev;
2319 u_long iobase;
2320
2321 dev = pci_get_drvdata(pdev);
2322 iobase = dev->base_addr;
2323
2324 unregister_netdev (dev);
2325 free_netdev (dev);
2326 release_region (iobase, DE4X5_PCI_TOTAL_SIZE);
2327 pci_disable_device (pdev);
2328}
2329
2330static const struct pci_device_id de4x5_pci_tbl[] = {
2331 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP,
2332 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
2333 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_PLUS,
2334 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
2335 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_FAST,
2336 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
2337 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21142,
2338 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 },
2339 { },
2340};
2341
2342static struct pci_driver de4x5_pci_driver = {
2343 .name = "de4x5",
2344 .id_table = de4x5_pci_tbl,
2345 .probe = de4x5_pci_probe,
2346 .remove = de4x5_pci_remove,
2347};
2348
2349#endif
2350
2351
2352
2353
2354
2355
2356
2357
2358static int
2359autoconf_media(struct net_device *dev)
2360{
2361 struct de4x5_private *lp = netdev_priv(dev);
2362 u_long iobase = dev->base_addr;
2363
2364 disable_ast(dev);
2365
2366 lp->c_media = AUTO;
2367 inl(DE4X5_MFC);
2368 lp->media = INIT;
2369 lp->tcount = 0;
2370
2371 de4x5_ast(&lp->timer);
2372
2373 return lp->media;
2374}
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388static int
2389dc21040_autoconf(struct net_device *dev)
2390{
2391 struct de4x5_private *lp = netdev_priv(dev);
2392 u_long iobase = dev->base_addr;
2393 int next_tick = DE4X5_AUTOSENSE_MS;
2394 s32 imr;
2395
2396 switch (lp->media) {
2397 case INIT:
2398 DISABLE_IRQs;
2399 lp->tx_enable = false;
2400 lp->timeout = -1;
2401 de4x5_save_skbs(dev);
2402 if ((lp->autosense == AUTO) || (lp->autosense == TP)) {
2403 lp->media = TP;
2404 } else if ((lp->autosense == BNC) || (lp->autosense == AUI) || (lp->autosense == BNC_AUI)) {
2405 lp->media = BNC_AUI;
2406 } else if (lp->autosense == EXT_SIA) {
2407 lp->media = EXT_SIA;
2408 } else {
2409 lp->media = NC;
2410 }
2411 lp->local_state = 0;
2412 next_tick = dc21040_autoconf(dev);
2413 break;
2414
2415 case TP:
2416 next_tick = dc21040_state(dev, 0x8f01, 0xffff, 0x0000, 3000, BNC_AUI,
2417 TP_SUSPECT, test_tp);
2418 break;
2419
2420 case TP_SUSPECT:
2421 next_tick = de4x5_suspect_state(dev, 1000, TP, test_tp, dc21040_autoconf);
2422 break;
2423
2424 case BNC:
2425 case AUI:
2426 case BNC_AUI:
2427 next_tick = dc21040_state(dev, 0x8f09, 0x0705, 0x0006, 3000, EXT_SIA,
2428 BNC_AUI_SUSPECT, ping_media);
2429 break;
2430
2431 case BNC_AUI_SUSPECT:
2432 next_tick = de4x5_suspect_state(dev, 1000, BNC_AUI, ping_media, dc21040_autoconf);
2433 break;
2434
2435 case EXT_SIA:
2436 next_tick = dc21040_state(dev, 0x3041, 0x0000, 0x0006, 3000,
2437 NC, EXT_SIA_SUSPECT, ping_media);
2438 break;
2439
2440 case EXT_SIA_SUSPECT:
2441 next_tick = de4x5_suspect_state(dev, 1000, EXT_SIA, ping_media, dc21040_autoconf);
2442 break;
2443
2444 case NC:
2445
2446 reset_init_sia(dev, 0x8f01, 0xffff, 0x0000);
2447 if (lp->media != lp->c_media) {
2448 de4x5_dbg_media(dev);
2449 lp->c_media = lp->media;
2450 }
2451 lp->media = INIT;
2452 lp->tx_enable = false;
2453 break;
2454 }
2455
2456 return next_tick;
2457}
2458
2459static int
2460dc21040_state(struct net_device *dev, int csr13, int csr14, int csr15, int timeout,
2461 int next_state, int suspect_state,
2462 int (*fn)(struct net_device *, int))
2463{
2464 struct de4x5_private *lp = netdev_priv(dev);
2465 int next_tick = DE4X5_AUTOSENSE_MS;
2466 int linkBad;
2467
2468 switch (lp->local_state) {
2469 case 0:
2470 reset_init_sia(dev, csr13, csr14, csr15);
2471 lp->local_state++;
2472 next_tick = 500;
2473 break;
2474
2475 case 1:
2476 if (!lp->tx_enable) {
2477 linkBad = fn(dev, timeout);
2478 if (linkBad < 0) {
2479 next_tick = linkBad & ~TIMER_CB;
2480 } else {
2481 if (linkBad && (lp->autosense == AUTO)) {
2482 lp->local_state = 0;
2483 lp->media = next_state;
2484 } else {
2485 de4x5_init_connection(dev);
2486 }
2487 }
2488 } else if (!lp->linkOK && (lp->autosense == AUTO)) {
2489 lp->media = suspect_state;
2490 next_tick = 3000;
2491 }
2492 break;
2493 }
2494
2495 return next_tick;
2496}
2497
2498static int
2499de4x5_suspect_state(struct net_device *dev, int timeout, int prev_state,
2500 int (*fn)(struct net_device *, int),
2501 int (*asfn)(struct net_device *))
2502{
2503 struct de4x5_private *lp = netdev_priv(dev);
2504 int next_tick = DE4X5_AUTOSENSE_MS;
2505 int linkBad;
2506
2507 switch (lp->local_state) {
2508 case 1:
2509 if (lp->linkOK) {
2510 lp->media = prev_state;
2511 } else {
2512 lp->local_state++;
2513 next_tick = asfn(dev);
2514 }
2515 break;
2516
2517 case 2:
2518 linkBad = fn(dev, timeout);
2519 if (linkBad < 0) {
2520 next_tick = linkBad & ~TIMER_CB;
2521 } else if (!linkBad) {
2522 lp->local_state--;
2523 lp->media = prev_state;
2524 } else {
2525 lp->media = INIT;
2526 lp->tcount++;
2527 }
2528 }
2529
2530 return next_tick;
2531}
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542static int
2543dc21041_autoconf(struct net_device *dev)
2544{
2545 struct de4x5_private *lp = netdev_priv(dev);
2546 u_long iobase = dev->base_addr;
2547 s32 sts, irqs, irq_mask, imr, omr;
2548 int next_tick = DE4X5_AUTOSENSE_MS;
2549
2550 switch (lp->media) {
2551 case INIT:
2552 DISABLE_IRQs;
2553 lp->tx_enable = false;
2554 lp->timeout = -1;
2555 de4x5_save_skbs(dev);
2556 if ((lp->autosense == AUTO) || (lp->autosense == TP_NW)) {
2557 lp->media = TP;
2558 } else if (lp->autosense == TP) {
2559 lp->media = TP;
2560 } else if (lp->autosense == BNC) {
2561 lp->media = BNC;
2562 } else if (lp->autosense == AUI) {
2563 lp->media = AUI;
2564 } else {
2565 lp->media = NC;
2566 }
2567 lp->local_state = 0;
2568 next_tick = dc21041_autoconf(dev);
2569 break;
2570
2571 case TP_NW:
2572 if (lp->timeout < 0) {
2573 omr = inl(DE4X5_OMR);
2574 outl(omr | OMR_FDX, DE4X5_OMR);
2575 }
2576 irqs = STS_LNF | STS_LNP;
2577 irq_mask = IMR_LFM | IMR_LPM;
2578 sts = test_media(dev, irqs, irq_mask, 0xef01, 0xffff, 0x0008, 2400);
2579 if (sts < 0) {
2580 next_tick = sts & ~TIMER_CB;
2581 } else {
2582 if (sts & STS_LNP) {
2583 lp->media = ANS;
2584 } else {
2585 lp->media = AUI;
2586 }
2587 next_tick = dc21041_autoconf(dev);
2588 }
2589 break;
2590
2591 case ANS:
2592 if (!lp->tx_enable) {
2593 irqs = STS_LNP;
2594 irq_mask = IMR_LPM;
2595 sts = test_ans(dev, irqs, irq_mask, 3000);
2596 if (sts < 0) {
2597 next_tick = sts & ~TIMER_CB;
2598 } else {
2599 if (!(sts & STS_LNP) && (lp->autosense == AUTO)) {
2600 lp->media = TP;
2601 next_tick = dc21041_autoconf(dev);
2602 } else {
2603 lp->local_state = 1;
2604 de4x5_init_connection(dev);
2605 }
2606 }
2607 } else if (!lp->linkOK && (lp->autosense == AUTO)) {
2608 lp->media = ANS_SUSPECT;
2609 next_tick = 3000;
2610 }
2611 break;
2612
2613 case ANS_SUSPECT:
2614 next_tick = de4x5_suspect_state(dev, 1000, ANS, test_tp, dc21041_autoconf);
2615 break;
2616
2617 case TP:
2618 if (!lp->tx_enable) {
2619 if (lp->timeout < 0) {
2620 omr = inl(DE4X5_OMR);
2621 outl(omr & ~OMR_FDX, DE4X5_OMR);
2622 }
2623 irqs = STS_LNF | STS_LNP;
2624 irq_mask = IMR_LFM | IMR_LPM;
2625 sts = test_media(dev,irqs, irq_mask, 0xef01, 0xff3f, 0x0008, 2400);
2626 if (sts < 0) {
2627 next_tick = sts & ~TIMER_CB;
2628 } else {
2629 if (!(sts & STS_LNP) && (lp->autosense == AUTO)) {
2630 if (inl(DE4X5_SISR) & SISR_NRA) {
2631 lp->media = AUI;
2632 } else {
2633 lp->media = BNC;
2634 }
2635 next_tick = dc21041_autoconf(dev);
2636 } else {
2637 lp->local_state = 1;
2638 de4x5_init_connection(dev);
2639 }
2640 }
2641 } else if (!lp->linkOK && (lp->autosense == AUTO)) {
2642 lp->media = TP_SUSPECT;
2643 next_tick = 3000;
2644 }
2645 break;
2646
2647 case TP_SUSPECT:
2648 next_tick = de4x5_suspect_state(dev, 1000, TP, test_tp, dc21041_autoconf);
2649 break;
2650
2651 case AUI:
2652 if (!lp->tx_enable) {
2653 if (lp->timeout < 0) {
2654 omr = inl(DE4X5_OMR);
2655 outl(omr & ~OMR_FDX, DE4X5_OMR);
2656 }
2657 irqs = 0;
2658 irq_mask = 0;
2659 sts = test_media(dev,irqs, irq_mask, 0xef09, 0xf73d, 0x000e, 1000);
2660 if (sts < 0) {
2661 next_tick = sts & ~TIMER_CB;
2662 } else {
2663 if (!(inl(DE4X5_SISR) & SISR_SRA) && (lp->autosense == AUTO)) {
2664 lp->media = BNC;
2665 next_tick = dc21041_autoconf(dev);
2666 } else {
2667 lp->local_state = 1;
2668 de4x5_init_connection(dev);
2669 }
2670 }
2671 } else if (!lp->linkOK && (lp->autosense == AUTO)) {
2672 lp->media = AUI_SUSPECT;
2673 next_tick = 3000;
2674 }
2675 break;
2676
2677 case AUI_SUSPECT:
2678 next_tick = de4x5_suspect_state(dev, 1000, AUI, ping_media, dc21041_autoconf);
2679 break;
2680
2681 case BNC:
2682 switch (lp->local_state) {
2683 case 0:
2684 if (lp->timeout < 0) {
2685 omr = inl(DE4X5_OMR);
2686 outl(omr & ~OMR_FDX, DE4X5_OMR);
2687 }
2688 irqs = 0;
2689 irq_mask = 0;
2690 sts = test_media(dev,irqs, irq_mask, 0xef09, 0xf73d, 0x0006, 1000);
2691 if (sts < 0) {
2692 next_tick = sts & ~TIMER_CB;
2693 } else {
2694 lp->local_state++;
2695 next_tick = dc21041_autoconf(dev);
2696 }
2697 break;
2698
2699 case 1:
2700 if (!lp->tx_enable) {
2701 if ((sts = ping_media(dev, 3000)) < 0) {
2702 next_tick = sts & ~TIMER_CB;
2703 } else {
2704 if (sts) {
2705 lp->local_state = 0;
2706 lp->media = NC;
2707 } else {
2708 de4x5_init_connection(dev);
2709 }
2710 }
2711 } else if (!lp->linkOK && (lp->autosense == AUTO)) {
2712 lp->media = BNC_SUSPECT;
2713 next_tick = 3000;
2714 }
2715 break;
2716 }
2717 break;
2718
2719 case BNC_SUSPECT:
2720 next_tick = de4x5_suspect_state(dev, 1000, BNC, ping_media, dc21041_autoconf);
2721 break;
2722
2723 case NC:
2724 omr = inl(DE4X5_OMR);
2725 outl(omr | OMR_FDX, DE4X5_OMR);
2726 reset_init_sia(dev, 0xef01, 0xffff, 0x0008);
2727 if (lp->media != lp->c_media) {
2728 de4x5_dbg_media(dev);
2729 lp->c_media = lp->media;
2730 }
2731 lp->media = INIT;
2732 lp->tx_enable = false;
2733 break;
2734 }
2735
2736 return next_tick;
2737}
2738
2739
2740
2741
2742
2743
2744static int
2745dc21140m_autoconf(struct net_device *dev)
2746{
2747 struct de4x5_private *lp = netdev_priv(dev);
2748 int ana, anlpa, cap, cr, slnk, sr;
2749 int next_tick = DE4X5_AUTOSENSE_MS;
2750 u_long imr, omr, iobase = dev->base_addr;
2751
2752 switch(lp->media) {
2753 case INIT:
2754 if (lp->timeout < 0) {
2755 DISABLE_IRQs;
2756 lp->tx_enable = false;
2757 lp->linkOK = 0;
2758 de4x5_save_skbs(dev);
2759 }
2760 if ((next_tick = de4x5_reset_phy(dev)) < 0) {
2761 next_tick &= ~TIMER_CB;
2762 } else {
2763 if (lp->useSROM) {
2764 if (srom_map_media(dev) < 0) {
2765 lp->tcount++;
2766 return next_tick;
2767 }
2768 srom_exec(dev, lp->phy[lp->active].gep);
2769 if (lp->infoblock_media == ANS) {
2770 ana = lp->phy[lp->active].ana | MII_ANA_CSMA;
2771 mii_wr(ana, MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
2772 }
2773 } else {
2774 lp->tmp = MII_SR_ASSC;
2775 SET_10Mb;
2776 if (lp->autosense == _100Mb) {
2777 lp->media = _100Mb;
2778 } else if (lp->autosense == _10Mb) {
2779 lp->media = _10Mb;
2780 } else if ((lp->autosense == AUTO) &&
2781 ((sr=is_anc_capable(dev)) & MII_SR_ANC)) {
2782 ana = (((sr >> 6) & MII_ANA_TAF) | MII_ANA_CSMA);
2783 ana &= (lp->fdx ? ~0 : ~MII_ANA_FDAM);
2784 mii_wr(ana, MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
2785 lp->media = ANS;
2786 } else if (lp->autosense == AUTO) {
2787 lp->media = SPD_DET;
2788 } else if (is_spd_100(dev) && is_100_up(dev)) {
2789 lp->media = _100Mb;
2790 } else {
2791 lp->media = NC;
2792 }
2793 }
2794 lp->local_state = 0;
2795 next_tick = dc21140m_autoconf(dev);
2796 }
2797 break;
2798
2799 case ANS:
2800 switch (lp->local_state) {
2801 case 0:
2802 if (lp->timeout < 0) {
2803 mii_wr(MII_CR_ASSE | MII_CR_RAN, MII_CR, lp->phy[lp->active].addr, DE4X5_MII);
2804 }
2805 cr = test_mii_reg(dev, MII_CR, MII_CR_RAN, false, 500);
2806 if (cr < 0) {
2807 next_tick = cr & ~TIMER_CB;
2808 } else {
2809 if (cr) {
2810 lp->local_state = 0;
2811 lp->media = SPD_DET;
2812 } else {
2813 lp->local_state++;
2814 }
2815 next_tick = dc21140m_autoconf(dev);
2816 }
2817 break;
2818
2819 case 1:
2820 if ((sr=test_mii_reg(dev, MII_SR, MII_SR_ASSC, true, 2000)) < 0) {
2821 next_tick = sr & ~TIMER_CB;
2822 } else {
2823 lp->media = SPD_DET;
2824 lp->local_state = 0;
2825 if (sr) {
2826 lp->tmp = MII_SR_ASSC;
2827 anlpa = mii_rd(MII_ANLPA, lp->phy[lp->active].addr, DE4X5_MII);
2828 ana = mii_rd(MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
2829 if (!(anlpa & MII_ANLPA_RF) &&
2830 (cap = anlpa & MII_ANLPA_TAF & ana)) {
2831 if (cap & MII_ANA_100M) {
2832 lp->fdx = (ana & anlpa & MII_ANA_FDAM & MII_ANA_100M) != 0;
2833 lp->media = _100Mb;
2834 } else if (cap & MII_ANA_10M) {
2835 lp->fdx = (ana & anlpa & MII_ANA_FDAM & MII_ANA_10M) != 0;
2836
2837 lp->media = _10Mb;
2838 }
2839 }
2840 }
2841 next_tick = dc21140m_autoconf(dev);
2842 }
2843 break;
2844 }
2845 break;
2846
2847 case SPD_DET:
2848 if (lp->timeout < 0) {
2849 lp->tmp = (lp->phy[lp->active].id ? MII_SR_LKS :
2850 (~gep_rd(dev) & GEP_LNP));
2851 SET_100Mb_PDET;
2852 }
2853 if ((slnk = test_for_100Mb(dev, 6500)) < 0) {
2854 next_tick = slnk & ~TIMER_CB;
2855 } else {
2856 if (is_spd_100(dev) && is_100_up(dev)) {
2857 lp->media = _100Mb;
2858 } else if ((!is_spd_100(dev) && (is_10_up(dev) & lp->tmp))) {
2859 lp->media = _10Mb;
2860 } else {
2861 lp->media = NC;
2862 }
2863 next_tick = dc21140m_autoconf(dev);
2864 }
2865 break;
2866
2867 case _100Mb:
2868 next_tick = 3000;
2869 if (!lp->tx_enable) {
2870 SET_100Mb;
2871 de4x5_init_connection(dev);
2872 } else {
2873 if (!lp->linkOK && (lp->autosense == AUTO)) {
2874 if (!is_100_up(dev) || (!lp->useSROM && !is_spd_100(dev))) {
2875 lp->media = INIT;
2876 lp->tcount++;
2877 next_tick = DE4X5_AUTOSENSE_MS;
2878 }
2879 }
2880 }
2881 break;
2882
2883 case BNC:
2884 case AUI:
2885 case _10Mb:
2886 next_tick = 3000;
2887 if (!lp->tx_enable) {
2888 SET_10Mb;
2889 de4x5_init_connection(dev);
2890 } else {
2891 if (!lp->linkOK && (lp->autosense == AUTO)) {
2892 if (!is_10_up(dev) || (!lp->useSROM && is_spd_100(dev))) {
2893 lp->media = INIT;
2894 lp->tcount++;
2895 next_tick = DE4X5_AUTOSENSE_MS;
2896 }
2897 }
2898 }
2899 break;
2900
2901 case NC:
2902 if (lp->media != lp->c_media) {
2903 de4x5_dbg_media(dev);
2904 lp->c_media = lp->media;
2905 }
2906 lp->media = INIT;
2907 lp->tx_enable = false;
2908 break;
2909 }
2910
2911 return next_tick;
2912}
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928static int
2929dc2114x_autoconf(struct net_device *dev)
2930{
2931 struct de4x5_private *lp = netdev_priv(dev);
2932 u_long iobase = dev->base_addr;
2933 s32 cr, anlpa, ana, cap, irqs, irq_mask, imr, omr, slnk, sr, sts;
2934 int next_tick = DE4X5_AUTOSENSE_MS;
2935
2936 switch (lp->media) {
2937 case INIT:
2938 if (lp->timeout < 0) {
2939 DISABLE_IRQs;
2940 lp->tx_enable = false;
2941 lp->linkOK = 0;
2942 lp->timeout = -1;
2943 de4x5_save_skbs(dev);
2944 if (lp->params.autosense & ~AUTO) {
2945 srom_map_media(dev);
2946 if (lp->media != lp->params.autosense) {
2947 lp->tcount++;
2948 lp->media = INIT;
2949 return next_tick;
2950 }
2951 lp->media = INIT;
2952 }
2953 }
2954 if ((next_tick = de4x5_reset_phy(dev)) < 0) {
2955 next_tick &= ~TIMER_CB;
2956 } else {
2957 if (lp->autosense == _100Mb) {
2958 lp->media = _100Mb;
2959 } else if (lp->autosense == _10Mb) {
2960 lp->media = _10Mb;
2961 } else if (lp->autosense == TP) {
2962 lp->media = TP;
2963 } else if (lp->autosense == BNC) {
2964 lp->media = BNC;
2965 } else if (lp->autosense == AUI) {
2966 lp->media = AUI;
2967 } else {
2968 lp->media = SPD_DET;
2969 if ((lp->infoblock_media == ANS) &&
2970 ((sr=is_anc_capable(dev)) & MII_SR_ANC)) {
2971 ana = (((sr >> 6) & MII_ANA_TAF) | MII_ANA_CSMA);
2972 ana &= (lp->fdx ? ~0 : ~MII_ANA_FDAM);
2973 mii_wr(ana, MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
2974 lp->media = ANS;
2975 }
2976 }
2977 lp->local_state = 0;
2978 next_tick = dc2114x_autoconf(dev);
2979 }
2980 break;
2981
2982 case ANS:
2983 switch (lp->local_state) {
2984 case 0:
2985 if (lp->timeout < 0) {
2986 mii_wr(MII_CR_ASSE | MII_CR_RAN, MII_CR, lp->phy[lp->active].addr, DE4X5_MII);
2987 }
2988 cr = test_mii_reg(dev, MII_CR, MII_CR_RAN, false, 500);
2989 if (cr < 0) {
2990 next_tick = cr & ~TIMER_CB;
2991 } else {
2992 if (cr) {
2993 lp->local_state = 0;
2994 lp->media = SPD_DET;
2995 } else {
2996 lp->local_state++;
2997 }
2998 next_tick = dc2114x_autoconf(dev);
2999 }
3000 break;
3001
3002 case 1:
3003 sr = test_mii_reg(dev, MII_SR, MII_SR_ASSC, true, 2000);
3004 if (sr < 0) {
3005 next_tick = sr & ~TIMER_CB;
3006 } else {
3007 lp->media = SPD_DET;
3008 lp->local_state = 0;
3009 if (sr) {
3010 lp->tmp = MII_SR_ASSC;
3011 anlpa = mii_rd(MII_ANLPA, lp->phy[lp->active].addr, DE4X5_MII);
3012 ana = mii_rd(MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
3013 if (!(anlpa & MII_ANLPA_RF) &&
3014 (cap = anlpa & MII_ANLPA_TAF & ana)) {
3015 if (cap & MII_ANA_100M) {
3016 lp->fdx = (ana & anlpa & MII_ANA_FDAM & MII_ANA_100M) != 0;
3017 lp->media = _100Mb;
3018 } else if (cap & MII_ANA_10M) {
3019 lp->fdx = (ana & anlpa & MII_ANA_FDAM & MII_ANA_10M) != 0;
3020 lp->media = _10Mb;
3021 }
3022 }
3023 }
3024 next_tick = dc2114x_autoconf(dev);
3025 }
3026 break;
3027 }
3028 break;
3029
3030 case AUI:
3031 if (!lp->tx_enable) {
3032 if (lp->timeout < 0) {
3033 omr = inl(DE4X5_OMR);
3034 outl(omr & ~OMR_FDX, DE4X5_OMR);
3035 }
3036 irqs = 0;
3037 irq_mask = 0;
3038 sts = test_media(dev,irqs, irq_mask, 0, 0, 0, 1000);
3039 if (sts < 0) {
3040 next_tick = sts & ~TIMER_CB;
3041 } else {
3042 if (!(inl(DE4X5_SISR) & SISR_SRA) && (lp->autosense == AUTO)) {
3043 lp->media = BNC;
3044 next_tick = dc2114x_autoconf(dev);
3045 } else {
3046 lp->local_state = 1;
3047 de4x5_init_connection(dev);
3048 }
3049 }
3050 } else if (!lp->linkOK && (lp->autosense == AUTO)) {
3051 lp->media = AUI_SUSPECT;
3052 next_tick = 3000;
3053 }
3054 break;
3055
3056 case AUI_SUSPECT:
3057 next_tick = de4x5_suspect_state(dev, 1000, AUI, ping_media, dc2114x_autoconf);
3058 break;
3059
3060 case BNC:
3061 switch (lp->local_state) {
3062 case 0:
3063 if (lp->timeout < 0) {
3064 omr = inl(DE4X5_OMR);
3065 outl(omr & ~OMR_FDX, DE4X5_OMR);
3066 }
3067 irqs = 0;
3068 irq_mask = 0;
3069 sts = test_media(dev,irqs, irq_mask, 0, 0, 0, 1000);
3070 if (sts < 0) {
3071 next_tick = sts & ~TIMER_CB;
3072 } else {
3073 lp->local_state++;
3074 next_tick = dc2114x_autoconf(dev);
3075 }
3076 break;
3077
3078 case 1:
3079 if (!lp->tx_enable) {
3080 if ((sts = ping_media(dev, 3000)) < 0) {
3081 next_tick = sts & ~TIMER_CB;
3082 } else {
3083 if (sts) {
3084 lp->local_state = 0;
3085 lp->tcount++;
3086 lp->media = INIT;
3087 } else {
3088 de4x5_init_connection(dev);
3089 }
3090 }
3091 } else if (!lp->linkOK && (lp->autosense == AUTO)) {
3092 lp->media = BNC_SUSPECT;
3093 next_tick = 3000;
3094 }
3095 break;
3096 }
3097 break;
3098
3099 case BNC_SUSPECT:
3100 next_tick = de4x5_suspect_state(dev, 1000, BNC, ping_media, dc2114x_autoconf);
3101 break;
3102
3103 case SPD_DET:
3104 if (srom_map_media(dev) < 0) {
3105 lp->tcount++;
3106 lp->media = INIT;
3107 return next_tick;
3108 }
3109 if (lp->media == _100Mb) {
3110 if ((slnk = test_for_100Mb(dev, 6500)) < 0) {
3111 lp->media = SPD_DET;
3112 return slnk & ~TIMER_CB;
3113 }
3114 } else {
3115 if (wait_for_link(dev) < 0) {
3116 lp->media = SPD_DET;
3117 return PDET_LINK_WAIT;
3118 }
3119 }
3120 if (lp->media == ANS) {
3121 if (is_spd_100(dev)) {
3122 lp->media = _100Mb;
3123 } else {
3124 lp->media = _10Mb;
3125 }
3126 next_tick = dc2114x_autoconf(dev);
3127 } else if (((lp->media == _100Mb) && is_100_up(dev)) ||
3128 (((lp->media == _10Mb) || (lp->media == TP) ||
3129 (lp->media == BNC) || (lp->media == AUI)) &&
3130 is_10_up(dev))) {
3131 next_tick = dc2114x_autoconf(dev);
3132 } else {
3133 lp->tcount++;
3134 lp->media = INIT;
3135 }
3136 break;
3137
3138 case _10Mb:
3139 next_tick = 3000;
3140 if (!lp->tx_enable) {
3141 SET_10Mb;
3142 de4x5_init_connection(dev);
3143 } else {
3144 if (!lp->linkOK && (lp->autosense == AUTO)) {
3145 if (!is_10_up(dev) || (!lp->useSROM && is_spd_100(dev))) {
3146 lp->media = INIT;
3147 lp->tcount++;
3148 next_tick = DE4X5_AUTOSENSE_MS;
3149 }
3150 }
3151 }
3152 break;
3153
3154 case _100Mb:
3155 next_tick = 3000;
3156 if (!lp->tx_enable) {
3157 SET_100Mb;
3158 de4x5_init_connection(dev);
3159 } else {
3160 if (!lp->linkOK && (lp->autosense == AUTO)) {
3161 if (!is_100_up(dev) || (!lp->useSROM && !is_spd_100(dev))) {
3162 lp->media = INIT;
3163 lp->tcount++;
3164 next_tick = DE4X5_AUTOSENSE_MS;
3165 }
3166 }
3167 }
3168 break;
3169
3170 default:
3171 lp->tcount++;
3172printk("Huh?: media:%02x\n", lp->media);
3173 lp->media = INIT;
3174 break;
3175 }
3176
3177 return next_tick;
3178}
3179
3180static int
3181srom_autoconf(struct net_device *dev)
3182{
3183 struct de4x5_private *lp = netdev_priv(dev);
3184
3185 return lp->infoleaf_fn(dev);
3186}
3187
3188
3189
3190
3191
3192
3193static int
3194srom_map_media(struct net_device *dev)
3195{
3196 struct de4x5_private *lp = netdev_priv(dev);
3197
3198 lp->fdx = false;
3199 if (lp->infoblock_media == lp->media)
3200 return 0;
3201
3202 switch(lp->infoblock_media) {
3203 case SROM_10BASETF:
3204 if (!lp->params.fdx) return -1;
3205 lp->fdx = true;
3206 fallthrough;
3207
3208 case SROM_10BASET:
3209 if (lp->params.fdx && !lp->fdx) return -1;
3210 if ((lp->chipset == DC21140) || ((lp->chipset & ~0x00ff) == DC2114x)) {
3211 lp->media = _10Mb;
3212 } else {
3213 lp->media = TP;
3214 }
3215 break;
3216
3217 case SROM_10BASE2:
3218 lp->media = BNC;
3219 break;
3220
3221 case SROM_10BASE5:
3222 lp->media = AUI;
3223 break;
3224
3225 case SROM_100BASETF:
3226 if (!lp->params.fdx) return -1;
3227 lp->fdx = true;
3228 fallthrough;
3229
3230 case SROM_100BASET:
3231 if (lp->params.fdx && !lp->fdx) return -1;
3232 lp->media = _100Mb;
3233 break;
3234
3235 case SROM_100BASET4:
3236 lp->media = _100Mb;
3237 break;
3238
3239 case SROM_100BASEFF:
3240 if (!lp->params.fdx) return -1;
3241 lp->fdx = true;
3242 fallthrough;
3243
3244 case SROM_100BASEF:
3245 if (lp->params.fdx && !lp->fdx) return -1;
3246 lp->media = _100Mb;
3247 break;
3248
3249 case ANS:
3250 lp->media = ANS;
3251 lp->fdx = lp->params.fdx;
3252 break;
3253
3254 default:
3255 printk("%s: Bad media code [%d] detected in SROM!\n", dev->name,
3256 lp->infoblock_media);
3257 return -1;
3258 }
3259
3260 return 0;
3261}
3262
3263static void
3264de4x5_init_connection(struct net_device *dev)
3265{
3266 struct de4x5_private *lp = netdev_priv(dev);
3267 u_long iobase = dev->base_addr;
3268 u_long flags = 0;
3269
3270 if (lp->media != lp->c_media) {
3271 de4x5_dbg_media(dev);
3272 lp->c_media = lp->media;
3273 }
3274
3275 spin_lock_irqsave(&lp->lock, flags);
3276 de4x5_rst_desc_ring(dev);
3277 de4x5_setup_intr(dev);
3278 lp->tx_enable = true;
3279 spin_unlock_irqrestore(&lp->lock, flags);
3280 outl(POLL_DEMAND, DE4X5_TPD);
3281
3282 netif_wake_queue(dev);
3283}
3284
3285
3286
3287
3288
3289
3290static int
3291de4x5_reset_phy(struct net_device *dev)
3292{
3293 struct de4x5_private *lp = netdev_priv(dev);
3294 u_long iobase = dev->base_addr;
3295 int next_tick = 0;
3296
3297 if ((lp->useSROM) || (lp->phy[lp->active].id)) {
3298 if (lp->timeout < 0) {
3299 if (lp->useSROM) {
3300 if (lp->phy[lp->active].rst) {
3301 srom_exec(dev, lp->phy[lp->active].rst);
3302 srom_exec(dev, lp->phy[lp->active].rst);
3303 } else if (lp->rst) {
3304 srom_exec(dev, lp->rst);
3305 srom_exec(dev, lp->rst);
3306 }
3307 } else {
3308 PHY_HARD_RESET;
3309 }
3310 if (lp->useMII) {
3311 mii_wr(MII_CR_RST, MII_CR, lp->phy[lp->active].addr, DE4X5_MII);
3312 }
3313 }
3314 if (lp->useMII) {
3315 next_tick = test_mii_reg(dev, MII_CR, MII_CR_RST, false, 500);
3316 }
3317 } else if (lp->chipset == DC21140) {
3318 PHY_HARD_RESET;
3319 }
3320
3321 return next_tick;
3322}
3323
3324static int
3325test_media(struct net_device *dev, s32 irqs, s32 irq_mask, s32 csr13, s32 csr14, s32 csr15, s32 msec)
3326{
3327 struct de4x5_private *lp = netdev_priv(dev);
3328 u_long iobase = dev->base_addr;
3329 s32 sts, csr12;
3330
3331 if (lp->timeout < 0) {
3332 lp->timeout = msec/100;
3333 if (!lp->useSROM) {
3334 reset_init_sia(dev, csr13, csr14, csr15);
3335 }
3336
3337
3338 outl(irq_mask, DE4X5_IMR);
3339
3340
3341 sts = inl(DE4X5_STS);
3342 outl(sts, DE4X5_STS);
3343
3344
3345 if ((lp->chipset == DC21041) || lp->useSROM) {
3346 csr12 = inl(DE4X5_SISR);
3347 outl(csr12, DE4X5_SISR);
3348 }
3349 }
3350
3351 sts = inl(DE4X5_STS) & ~TIMER_CB;
3352
3353 if (!(sts & irqs) && --lp->timeout) {
3354 sts = 100 | TIMER_CB;
3355 } else {
3356 lp->timeout = -1;
3357 }
3358
3359 return sts;
3360}
3361
3362static int
3363test_tp(struct net_device *dev, s32 msec)
3364{
3365 struct de4x5_private *lp = netdev_priv(dev);
3366 u_long iobase = dev->base_addr;
3367 int sisr;
3368
3369 if (lp->timeout < 0) {
3370 lp->timeout = msec/100;
3371 }
3372
3373 sisr = (inl(DE4X5_SISR) & ~TIMER_CB) & (SISR_LKF | SISR_NCR);
3374
3375 if (sisr && --lp->timeout) {
3376 sisr = 100 | TIMER_CB;
3377 } else {
3378 lp->timeout = -1;
3379 }
3380
3381 return sisr;
3382}
3383
3384
3385
3386
3387
3388
3389#define SAMPLE_INTERVAL 500
3390#define SAMPLE_DELAY 2000
3391static int
3392test_for_100Mb(struct net_device *dev, int msec)
3393{
3394 struct de4x5_private *lp = netdev_priv(dev);
3395 int gep = 0, ret = ((lp->chipset & ~0x00ff)==DC2114x? -1 :GEP_SLNK);
3396
3397 if (lp->timeout < 0) {
3398 if ((msec/SAMPLE_INTERVAL) <= 0) return 0;
3399 if (msec > SAMPLE_DELAY) {
3400 lp->timeout = (msec - SAMPLE_DELAY)/SAMPLE_INTERVAL;
3401 gep = SAMPLE_DELAY | TIMER_CB;
3402 return gep;
3403 } else {
3404 lp->timeout = msec/SAMPLE_INTERVAL;
3405 }
3406 }
3407
3408 if (lp->phy[lp->active].id || lp->useSROM) {
3409 gep = is_100_up(dev) | is_spd_100(dev);
3410 } else {
3411 gep = (~gep_rd(dev) & (GEP_SLNK | GEP_LNP));
3412 }
3413 if (!(gep & ret) && --lp->timeout) {
3414 gep = SAMPLE_INTERVAL | TIMER_CB;
3415 } else {
3416 lp->timeout = -1;
3417 }
3418
3419 return gep;
3420}
3421
3422static int
3423wait_for_link(struct net_device *dev)
3424{
3425 struct de4x5_private *lp = netdev_priv(dev);
3426
3427 if (lp->timeout < 0) {
3428 lp->timeout = 1;
3429 }
3430
3431 if (lp->timeout--) {
3432 return TIMER_CB;
3433 } else {
3434 lp->timeout = -1;
3435 }
3436
3437 return 0;
3438}
3439
3440
3441
3442
3443
3444static int
3445test_mii_reg(struct net_device *dev, int reg, int mask, bool pol, long msec)
3446{
3447 struct de4x5_private *lp = netdev_priv(dev);
3448 int test;
3449 u_long iobase = dev->base_addr;
3450
3451 if (lp->timeout < 0) {
3452 lp->timeout = msec/100;
3453 }
3454
3455 reg = mii_rd((u_char)reg, lp->phy[lp->active].addr, DE4X5_MII) & mask;
3456 test = (reg ^ (pol ? ~0 : 0)) & mask;
3457
3458 if (test && --lp->timeout) {
3459 reg = 100 | TIMER_CB;
3460 } else {
3461 lp->timeout = -1;
3462 }
3463
3464 return reg;
3465}
3466
3467static int
3468is_spd_100(struct net_device *dev)
3469{
3470 struct de4x5_private *lp = netdev_priv(dev);
3471 u_long iobase = dev->base_addr;
3472 int spd;
3473
3474 if (lp->useMII) {
3475 spd = mii_rd(lp->phy[lp->active].spd.reg, lp->phy[lp->active].addr, DE4X5_MII);
3476 spd = ~(spd ^ lp->phy[lp->active].spd.value);
3477 spd &= lp->phy[lp->active].spd.mask;
3478 } else if (!lp->useSROM) {
3479 spd = ((~gep_rd(dev)) & GEP_SLNK);
3480 } else {
3481 if ((lp->ibn == 2) || !lp->asBitValid)
3482 return (lp->chipset == DC21143) ? (~inl(DE4X5_SISR)&SISR_LS100) : 0;
3483
3484 spd = (lp->asBitValid & (lp->asPolarity ^ (gep_rd(dev) & lp->asBit))) |
3485 (lp->linkOK & ~lp->asBitValid);
3486 }
3487
3488 return spd;
3489}
3490
3491static int
3492is_100_up(struct net_device *dev)
3493{
3494 struct de4x5_private *lp = netdev_priv(dev);
3495 u_long iobase = dev->base_addr;
3496
3497 if (lp->useMII) {
3498
3499 mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);
3500 return mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII) & MII_SR_LKS;
3501 } else if (!lp->useSROM) {
3502 return (~gep_rd(dev)) & GEP_SLNK;
3503 } else {
3504 if ((lp->ibn == 2) || !lp->asBitValid)
3505 return (lp->chipset == DC21143) ? (~inl(DE4X5_SISR)&SISR_LS100) : 0;
3506
3507 return (lp->asBitValid&(lp->asPolarity^(gep_rd(dev)&lp->asBit))) |
3508 (lp->linkOK & ~lp->asBitValid);
3509 }
3510}
3511
3512static int
3513is_10_up(struct net_device *dev)
3514{
3515 struct de4x5_private *lp = netdev_priv(dev);
3516 u_long iobase = dev->base_addr;
3517
3518 if (lp->useMII) {
3519
3520 mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);
3521 return mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII) & MII_SR_LKS;
3522 } else if (!lp->useSROM) {
3523 return (~gep_rd(dev)) & GEP_LNP;
3524 } else {
3525 if ((lp->ibn == 2) || !lp->asBitValid)
3526 return ((lp->chipset & ~0x00ff) == DC2114x) ?
3527 (~inl(DE4X5_SISR)&SISR_LS10):
3528 0;
3529
3530 return (lp->asBitValid&(lp->asPolarity^(gep_rd(dev)&lp->asBit))) |
3531 (lp->linkOK & ~lp->asBitValid);
3532 }
3533}
3534
3535static int
3536is_anc_capable(struct net_device *dev)
3537{
3538 struct de4x5_private *lp = netdev_priv(dev);
3539 u_long iobase = dev->base_addr;
3540
3541 if (lp->phy[lp->active].id && (!lp->useSROM || lp->useMII)) {
3542 return mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);
3543 } else if ((lp->chipset & ~0x00ff) == DC2114x) {
3544 return (inl(DE4X5_SISR) & SISR_LPN) >> 12;
3545 } else {
3546 return 0;
3547 }
3548}
3549
3550
3551
3552
3553
3554static int
3555ping_media(struct net_device *dev, int msec)
3556{
3557 struct de4x5_private *lp = netdev_priv(dev);
3558 u_long iobase = dev->base_addr;
3559 int sisr;
3560
3561 if (lp->timeout < 0) {
3562 lp->timeout = msec/100;
3563
3564 lp->tmp = lp->tx_new;
3565 load_packet(dev, lp->frame, TD_LS | TD_FS | sizeof(lp->frame), (struct sk_buff *)1);
3566 lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
3567 outl(POLL_DEMAND, DE4X5_TPD);
3568 }
3569
3570 sisr = inl(DE4X5_SISR);
3571
3572 if ((!(sisr & SISR_NCR)) &&
3573 ((s32)le32_to_cpu(lp->tx_ring[lp->tmp].status) < 0) &&
3574 (--lp->timeout)) {
3575 sisr = 100 | TIMER_CB;
3576 } else {
3577 if ((!(sisr & SISR_NCR)) &&
3578 !(le32_to_cpu(lp->tx_ring[lp->tmp].status) & (T_OWN | TD_ES)) &&
3579 lp->timeout) {
3580 sisr = 0;
3581 } else {
3582 sisr = 1;
3583 }
3584 lp->timeout = -1;
3585 }
3586
3587 return sisr;
3588}
3589
3590
3591
3592
3593
3594
3595static struct sk_buff *
3596de4x5_alloc_rx_buff(struct net_device *dev, int index, int len)
3597{
3598 struct de4x5_private *lp = netdev_priv(dev);
3599 struct sk_buff *p;
3600
3601#if !defined(__alpha__) && !defined(__powerpc__) && !defined(CONFIG_SPARC) && !defined(DE4X5_DO_MEMCPY)
3602 struct sk_buff *ret;
3603 u_long i=0, tmp;
3604
3605 p = netdev_alloc_skb(dev, IEEE802_3_SZ + DE4X5_ALIGN + 2);
3606 if (!p) return NULL;
3607
3608 tmp = virt_to_bus(p->data);
3609 i = ((tmp + DE4X5_ALIGN) & ~DE4X5_ALIGN) - tmp;
3610 skb_reserve(p, i);
3611 lp->rx_ring[index].buf = cpu_to_le32(tmp + i);
3612
3613 ret = lp->rx_skb[index];
3614 lp->rx_skb[index] = p;
3615
3616 if ((u_long) ret > 1) {
3617 skb_put(ret, len);
3618 }
3619
3620 return ret;
3621
3622#else
3623 if (lp->state != OPEN) return (struct sk_buff *)1;
3624
3625 p = netdev_alloc_skb(dev, len + 2);
3626 if (!p) return NULL;
3627
3628 skb_reserve(p, 2);
3629 if (index < lp->rx_old) {
3630 short tlen = (lp->rxRingSize - lp->rx_old) * RX_BUFF_SZ;
3631 skb_put_data(p, lp->rx_bufs + lp->rx_old * RX_BUFF_SZ, tlen);
3632 skb_put_data(p, lp->rx_bufs, len - tlen);
3633 } else {
3634 skb_put_data(p, lp->rx_bufs + lp->rx_old * RX_BUFF_SZ, len);
3635 }
3636
3637 return p;
3638#endif
3639}
3640
3641static void
3642de4x5_free_rx_buffs(struct net_device *dev)
3643{
3644 struct de4x5_private *lp = netdev_priv(dev);
3645 int i;
3646
3647 for (i=0; i<lp->rxRingSize; i++) {
3648 if ((u_long) lp->rx_skb[i] > 1) {
3649 dev_kfree_skb(lp->rx_skb[i]);
3650 }
3651 lp->rx_ring[i].status = 0;
3652 lp->rx_skb[i] = (struct sk_buff *)1;
3653 }
3654}
3655
3656static void
3657de4x5_free_tx_buffs(struct net_device *dev)
3658{
3659 struct de4x5_private *lp = netdev_priv(dev);
3660 int i;
3661
3662 for (i=0; i<lp->txRingSize; i++) {
3663 if (lp->tx_skb[i])
3664 de4x5_free_tx_buff(lp, i);
3665 lp->tx_ring[i].status = 0;
3666 }
3667
3668
3669 __skb_queue_purge(&lp->cache.queue);
3670}
3671
3672
3673
3674
3675
3676
3677
3678
3679static void
3680de4x5_save_skbs(struct net_device *dev)
3681{
3682 struct de4x5_private *lp = netdev_priv(dev);
3683 u_long iobase = dev->base_addr;
3684 s32 omr;
3685
3686 if (!lp->cache.save_cnt) {
3687 STOP_DE4X5;
3688 de4x5_tx(dev);
3689 de4x5_free_tx_buffs(dev);
3690 de4x5_cache_state(dev, DE4X5_SAVE_STATE);
3691 de4x5_sw_reset(dev);
3692 de4x5_cache_state(dev, DE4X5_RESTORE_STATE);
3693 lp->cache.save_cnt++;
3694 START_DE4X5;
3695 }
3696}
3697
3698static void
3699de4x5_rst_desc_ring(struct net_device *dev)
3700{
3701 struct de4x5_private *lp = netdev_priv(dev);
3702 u_long iobase = dev->base_addr;
3703 int i;
3704 s32 omr;
3705
3706 if (lp->cache.save_cnt) {
3707 STOP_DE4X5;
3708 outl(lp->dma_rings, DE4X5_RRBA);
3709 outl(lp->dma_rings + NUM_RX_DESC * sizeof(struct de4x5_desc),
3710 DE4X5_TRBA);
3711
3712 lp->rx_new = lp->rx_old = 0;
3713 lp->tx_new = lp->tx_old = 0;
3714
3715 for (i = 0; i < lp->rxRingSize; i++) {
3716 lp->rx_ring[i].status = cpu_to_le32(R_OWN);
3717 }
3718
3719 for (i = 0; i < lp->txRingSize; i++) {
3720 lp->tx_ring[i].status = cpu_to_le32(0);
3721 }
3722
3723 barrier();
3724 lp->cache.save_cnt--;
3725 START_DE4X5;
3726 }
3727}
3728
3729static void
3730de4x5_cache_state(struct net_device *dev, int flag)
3731{
3732 struct de4x5_private *lp = netdev_priv(dev);
3733 u_long iobase = dev->base_addr;
3734
3735 switch(flag) {
3736 case DE4X5_SAVE_STATE:
3737 lp->cache.csr0 = inl(DE4X5_BMR);
3738 lp->cache.csr6 = (inl(DE4X5_OMR) & ~(OMR_ST | OMR_SR));
3739 lp->cache.csr7 = inl(DE4X5_IMR);
3740 break;
3741
3742 case DE4X5_RESTORE_STATE:
3743 outl(lp->cache.csr0, DE4X5_BMR);
3744 outl(lp->cache.csr6, DE4X5_OMR);
3745 outl(lp->cache.csr7, DE4X5_IMR);
3746 if (lp->chipset == DC21140) {
3747 gep_wr(lp->cache.gepc, dev);
3748 gep_wr(lp->cache.gep, dev);
3749 } else {
3750 reset_init_sia(dev, lp->cache.csr13, lp->cache.csr14,
3751 lp->cache.csr15);
3752 }
3753 break;
3754 }
3755}
3756
3757static void
3758de4x5_put_cache(struct net_device *dev, struct sk_buff *skb)
3759{
3760 struct de4x5_private *lp = netdev_priv(dev);
3761
3762 __skb_queue_tail(&lp->cache.queue, skb);
3763}
3764
3765static void
3766de4x5_putb_cache(struct net_device *dev, struct sk_buff *skb)
3767{
3768 struct de4x5_private *lp = netdev_priv(dev);
3769
3770 __skb_queue_head(&lp->cache.queue, skb);
3771}
3772
3773static struct sk_buff *
3774de4x5_get_cache(struct net_device *dev)
3775{
3776 struct de4x5_private *lp = netdev_priv(dev);
3777
3778 return __skb_dequeue(&lp->cache.queue);
3779}
3780
3781
3782
3783
3784
3785static int
3786test_ans(struct net_device *dev, s32 irqs, s32 irq_mask, s32 msec)
3787{
3788 struct de4x5_private *lp = netdev_priv(dev);
3789 u_long iobase = dev->base_addr;
3790 s32 sts, ans;
3791
3792 if (lp->timeout < 0) {
3793 lp->timeout = msec/100;
3794 outl(irq_mask, DE4X5_IMR);
3795
3796
3797 sts = inl(DE4X5_STS);
3798 outl(sts, DE4X5_STS);
3799 }
3800
3801 ans = inl(DE4X5_SISR) & SISR_ANS;
3802 sts = inl(DE4X5_STS) & ~TIMER_CB;
3803
3804 if (!(sts & irqs) && (ans ^ ANS_NWOK) && --lp->timeout) {
3805 sts = 100 | TIMER_CB;
3806 } else {
3807 lp->timeout = -1;
3808 }
3809
3810 return sts;
3811}
3812
3813static void
3814de4x5_setup_intr(struct net_device *dev)
3815{
3816 struct de4x5_private *lp = netdev_priv(dev);
3817 u_long iobase = dev->base_addr;
3818 s32 imr, sts;
3819
3820 if (inl(DE4X5_OMR) & OMR_SR) {
3821 imr = 0;
3822 UNMASK_IRQs;
3823 sts = inl(DE4X5_STS);
3824 outl(sts, DE4X5_STS);
3825 ENABLE_IRQs;
3826 }
3827}
3828
3829
3830
3831
3832static void
3833reset_init_sia(struct net_device *dev, s32 csr13, s32 csr14, s32 csr15)
3834{
3835 struct de4x5_private *lp = netdev_priv(dev);
3836 u_long iobase = dev->base_addr;
3837
3838 RESET_SIA;
3839 if (lp->useSROM) {
3840 if (lp->ibn == 3) {
3841 srom_exec(dev, lp->phy[lp->active].rst);
3842 srom_exec(dev, lp->phy[lp->active].gep);
3843 outl(1, DE4X5_SICR);
3844 return;
3845 } else {
3846 csr15 = lp->cache.csr15;
3847 csr14 = lp->cache.csr14;
3848 csr13 = lp->cache.csr13;
3849 outl(csr15 | lp->cache.gepc, DE4X5_SIGR);
3850 outl(csr15 | lp->cache.gep, DE4X5_SIGR);
3851 }
3852 } else {
3853 outl(csr15, DE4X5_SIGR);
3854 }
3855 outl(csr14, DE4X5_STRR);
3856 outl(csr13, DE4X5_SICR);
3857
3858 mdelay(10);
3859}
3860
3861
3862
3863
3864static void
3865create_packet(struct net_device *dev, char *frame, int len)
3866{
3867 int i;
3868 char *buf = frame;
3869
3870 for (i=0; i<ETH_ALEN; i++) {
3871 *buf++ = dev->dev_addr[i];
3872 }
3873 for (i=0; i<ETH_ALEN; i++) {
3874 *buf++ = dev->dev_addr[i];
3875 }
3876
3877 *buf++ = 0;
3878 *buf++ = 1;
3879}
3880
3881
3882
3883
3884static int
3885EISA_signature(char *name, struct device *device)
3886{
3887 int i, status = 0, siglen = ARRAY_SIZE(de4x5_signatures);
3888 struct eisa_device *edev;
3889
3890 *name = '\0';
3891 edev = to_eisa_device (device);
3892 i = edev->id.driver_data;
3893
3894 if (i >= 0 && i < siglen) {
3895 strcpy (name, de4x5_signatures[i]);
3896 status = 1;
3897 }
3898
3899 return status;
3900}
3901
3902
3903
3904
3905static void
3906PCI_signature(char *name, struct de4x5_private *lp)
3907{
3908 int i, siglen = ARRAY_SIZE(de4x5_signatures);
3909
3910 if (lp->chipset == DC21040) {
3911 strcpy(name, "DE434/5");
3912 return;
3913 } else {
3914 int tmp = *((char *)&lp->srom + 19) * 3;
3915 strncpy(name, (char *)&lp->srom + 26 + tmp, 8);
3916 }
3917 name[8] = '\0';
3918 for (i=0; i<siglen; i++) {
3919 if (strstr(name,de4x5_signatures[i])!=NULL) break;
3920 }
3921 if (i == siglen) {
3922 if (dec_only) {
3923 *name = '\0';
3924 } else {
3925 strcpy(name, (((lp->chipset == DC21040) ? "DC21040" :
3926 ((lp->chipset == DC21041) ? "DC21041" :
3927 ((lp->chipset == DC21140) ? "DC21140" :
3928 ((lp->chipset == DC21142) ? "DC21142" :
3929 ((lp->chipset == DC21143) ? "DC21143" : "UNKNOWN"
3930 )))))));
3931 }
3932 if (lp->chipset != DC21041) {
3933 lp->useSROM = true;
3934 }
3935 } else if ((lp->chipset & ~0x00ff) == DC2114x) {
3936 lp->useSROM = true;
3937 }
3938}
3939
3940
3941
3942
3943
3944
3945
3946
3947
3948static void
3949DevicePresent(struct net_device *dev, u_long aprom_addr)
3950{
3951 int i, j=0;
3952 struct de4x5_private *lp = netdev_priv(dev);
3953
3954 if (lp->chipset == DC21040) {
3955 if (lp->bus == EISA) {
3956 enet_addr_rst(aprom_addr);
3957 } else {
3958 outl(0, aprom_addr);
3959 }
3960 } else {
3961 u_short tmp;
3962 __le16 *p = (__le16 *)((char *)&lp->srom + SROM_HWADD);
3963 for (i=0; i<(ETH_ALEN>>1); i++) {
3964 tmp = srom_rd(aprom_addr, (SROM_HWADD>>1) + i);
3965 j += tmp;
3966 *p = cpu_to_le16(tmp);
3967 }
3968 if (j == 0 || j == 3 * 0xffff) {
3969
3970 return;
3971 }
3972
3973 p = (__le16 *)&lp->srom;
3974 for (i=0; i<(sizeof(struct de4x5_srom)>>1); i++) {
3975 tmp = srom_rd(aprom_addr, i);
3976 *p++ = cpu_to_le16(tmp);
3977 }
3978 de4x5_dbg_srom(&lp->srom);
3979 }
3980}
3981
3982
3983
3984
3985
3986
3987static void
3988enet_addr_rst(u_long aprom_addr)
3989{
3990 union {
3991 struct {
3992 u32 a;
3993 u32 b;
3994 } llsig;
3995 char Sig[sizeof(u32) << 1];
3996 } dev;
3997 short sigLength=0;
3998 s8 data;
3999 int i, j;
4000
4001 dev.llsig.a = ETH_PROM_SIG;
4002 dev.llsig.b = ETH_PROM_SIG;
4003 sigLength = sizeof(u32) << 1;
4004
4005 for (i=0,j=0;j<sigLength && i<PROBE_LENGTH+sigLength-1;i++) {
4006 data = inb(aprom_addr);
4007 if (dev.Sig[j] == data) {
4008 j++;
4009 } else {
4010 if (data == dev.Sig[0]) {
4011 j=1;
4012 } else {
4013 j=0;
4014 }
4015 }
4016 }
4017}
4018
4019
4020
4021
4022
4023
4024
4025static int
4026get_hw_addr(struct net_device *dev)
4027{
4028 u_long iobase = dev->base_addr;
4029 int broken, i, k, tmp, status = 0;
4030 u_short j,chksum;
4031 struct de4x5_private *lp = netdev_priv(dev);
4032
4033 broken = de4x5_bad_srom(lp);
4034
4035 for (i=0,k=0,j=0;j<3;j++) {
4036 k <<= 1;
4037 if (k > 0xffff) k-=0xffff;
4038
4039 if (lp->bus == PCI) {
4040 if (lp->chipset == DC21040) {
4041 while ((tmp = inl(DE4X5_APROM)) < 0);
4042 k += (u_char) tmp;
4043 dev->dev_addr[i++] = (u_char) tmp;
4044 while ((tmp = inl(DE4X5_APROM)) < 0);
4045 k += (u_short) (tmp << 8);
4046 dev->dev_addr[i++] = (u_char) tmp;
4047 } else if (!broken) {
4048 dev->dev_addr[i] = (u_char) lp->srom.ieee_addr[i]; i++;
4049 dev->dev_addr[i] = (u_char) lp->srom.ieee_addr[i]; i++;
4050 } else if ((broken == SMC) || (broken == ACCTON)) {
4051 dev->dev_addr[i] = *((u_char *)&lp->srom + i); i++;
4052 dev->dev_addr[i] = *((u_char *)&lp->srom + i); i++;
4053 }
4054 } else {
4055 k += (u_char) (tmp = inb(EISA_APROM));
4056 dev->dev_addr[i++] = (u_char) tmp;
4057 k += (u_short) ((tmp = inb(EISA_APROM)) << 8);
4058 dev->dev_addr[i++] = (u_char) tmp;
4059 }
4060
4061 if (k > 0xffff) k-=0xffff;
4062 }
4063 if (k == 0xffff) k=0;
4064
4065 if (lp->bus == PCI) {
4066 if (lp->chipset == DC21040) {
4067 while ((tmp = inl(DE4X5_APROM)) < 0);
4068 chksum = (u_char) tmp;
4069 while ((tmp = inl(DE4X5_APROM)) < 0);
4070 chksum |= (u_short) (tmp << 8);
4071 if ((k != chksum) && (dec_only)) status = -1;
4072 }
4073 } else {
4074 chksum = (u_char) inb(EISA_APROM);
4075 chksum |= (u_short) (inb(EISA_APROM) << 8);
4076 if ((k != chksum) && (dec_only)) status = -1;
4077 }
4078
4079
4080 srom_repair(dev, broken);
4081
4082#ifdef CONFIG_PPC_PMAC
4083
4084
4085
4086
4087 if ( machine_is(powermac) &&
4088 (dev->dev_addr[0] == 0) &&
4089 (dev->dev_addr[1] == 0xa0) )
4090 {
4091 for (i = 0; i < ETH_ALEN; ++i)
4092 {
4093 int x = dev->dev_addr[i];
4094 x = ((x & 0xf) << 4) + ((x & 0xf0) >> 4);
4095 x = ((x & 0x33) << 2) + ((x & 0xcc) >> 2);
4096 dev->dev_addr[i] = ((x & 0x55) << 1) + ((x & 0xaa) >> 1);
4097 }
4098 }
4099#endif
4100
4101
4102 status = test_bad_enet(dev, status);
4103
4104 return status;
4105}
4106
4107
4108
4109
4110static int
4111de4x5_bad_srom(struct de4x5_private *lp)
4112{
4113 int i, status = 0;
4114
4115 for (i = 0; i < ARRAY_SIZE(enet_det); i++) {
4116 if (!memcmp(&lp->srom, &enet_det[i], 3) &&
4117 !memcmp((char *)&lp->srom+0x10, &enet_det[i], 3)) {
4118 if (i == 0) {
4119 status = SMC;
4120 } else if (i == 1) {
4121 status = ACCTON;
4122 }
4123 break;
4124 }
4125 }
4126
4127 return status;
4128}
4129
4130static void
4131srom_repair(struct net_device *dev, int card)
4132{
4133 struct de4x5_private *lp = netdev_priv(dev);
4134
4135 switch(card) {
4136 case SMC:
4137 memset((char *)&lp->srom, 0, sizeof(struct de4x5_srom));
4138 memcpy(lp->srom.ieee_addr, (char *)dev->dev_addr, ETH_ALEN);
4139 memcpy(lp->srom.info, (char *)&srom_repair_info[SMC-1], 100);
4140 lp->useSROM = true;
4141 break;
4142 }
4143}
4144
4145
4146
4147
4148
4149static int
4150test_bad_enet(struct net_device *dev, int status)
4151{
4152 struct de4x5_private *lp = netdev_priv(dev);
4153 int i, tmp;
4154
4155 for (tmp=0,i=0; i<ETH_ALEN; i++) tmp += (u_char)dev->dev_addr[i];
4156 if ((tmp == 0) || (tmp == 0x5fa)) {
4157 if ((lp->chipset == last.chipset) &&
4158 (lp->bus_num == last.bus) && (lp->bus_num > 0)) {
4159 for (i=0; i<ETH_ALEN; i++) dev->dev_addr[i] = last.addr[i];
4160 for (i=ETH_ALEN-1; i>2; --i) {
4161 dev->dev_addr[i] += 1;
4162 if (dev->dev_addr[i] != 0) break;
4163 }
4164 for (i=0; i<ETH_ALEN; i++) last.addr[i] = dev->dev_addr[i];
4165 if (!an_exception(lp)) {
4166 dev->irq = last.irq;
4167 }
4168
4169 status = 0;
4170 }
4171 } else if (!status) {
4172 last.chipset = lp->chipset;
4173 last.bus = lp->bus_num;
4174 last.irq = dev->irq;
4175 for (i=0; i<ETH_ALEN; i++) last.addr[i] = dev->dev_addr[i];
4176 }
4177
4178 return status;
4179}
4180
4181
4182
4183
4184static int
4185an_exception(struct de4x5_private *lp)
4186{
4187 if ((*(u_short *)lp->srom.sub_vendor_id == 0x00c0) &&
4188 (*(u_short *)lp->srom.sub_system_id == 0x95e0)) {
4189 return -1;
4190 }
4191
4192 return 0;
4193}
4194
4195
4196
4197
4198static short
4199srom_rd(u_long addr, u_char offset)
4200{
4201 sendto_srom(SROM_RD | SROM_SR, addr);
4202
4203 srom_latch(SROM_RD | SROM_SR | DT_CS, addr);
4204 srom_command(SROM_RD | SROM_SR | DT_IN | DT_CS, addr);
4205 srom_address(SROM_RD | SROM_SR | DT_CS, addr, offset);
4206
4207 return srom_data(SROM_RD | SROM_SR | DT_CS, addr);
4208}
4209
4210static void
4211srom_latch(u_int command, u_long addr)
4212{
4213 sendto_srom(command, addr);
4214 sendto_srom(command | DT_CLK, addr);
4215 sendto_srom(command, addr);
4216}
4217
4218static void
4219srom_command(u_int command, u_long addr)
4220{
4221 srom_latch(command, addr);
4222 srom_latch(command, addr);
4223 srom_latch((command & 0x0000ff00) | DT_CS, addr);
4224}
4225
4226static void
4227srom_address(u_int command, u_long addr, u_char offset)
4228{
4229 int i, a;
4230
4231 a = offset << 2;
4232 for (i=0; i<6; i++, a <<= 1) {
4233 srom_latch(command | ((a & 0x80) ? DT_IN : 0), addr);
4234 }
4235 udelay(1);
4236
4237 i = (getfrom_srom(addr) >> 3) & 0x01;
4238}
4239
4240static short
4241srom_data(u_int command, u_long addr)
4242{
4243 int i;
4244 short word = 0;
4245 s32 tmp;
4246
4247 for (i=0; i<16; i++) {
4248 sendto_srom(command | DT_CLK, addr);
4249 tmp = getfrom_srom(addr);
4250 sendto_srom(command, addr);
4251
4252 word = (word << 1) | ((tmp >> 3) & 0x01);
4253 }
4254
4255 sendto_srom(command & 0x0000ff00, addr);
4256
4257 return word;
4258}
4259
4260
4261
4262
4263
4264
4265
4266
4267
4268
4269
4270
4271
4272
4273
4274static void
4275sendto_srom(u_int command, u_long addr)
4276{
4277 outl(command, addr);
4278 udelay(1);
4279}
4280
4281static int
4282getfrom_srom(u_long addr)
4283{
4284 s32 tmp;
4285
4286 tmp = inl(addr);
4287 udelay(1);
4288
4289 return tmp;
4290}
4291
4292static int
4293srom_infoleaf_info(struct net_device *dev)
4294{
4295 struct de4x5_private *lp = netdev_priv(dev);
4296 int i, count;
4297 u_char *p;
4298
4299
4300 for (i=0; i<INFOLEAF_SIZE; i++) {
4301 if (lp->chipset == infoleaf_array[i].chipset) break;
4302 }
4303 if (i == INFOLEAF_SIZE) {
4304 lp->useSROM = false;
4305 printk("%s: Cannot find correct chipset for SROM decoding!\n",
4306 dev->name);
4307 return -ENXIO;
4308 }
4309
4310 lp->infoleaf_fn = infoleaf_array[i].fn;
4311
4312
4313 count = *((u_char *)&lp->srom + 19);
4314 p = (u_char *)&lp->srom + 26;
4315
4316 if (count > 1) {
4317 for (i=count; i; --i, p+=3) {
4318 if (lp->device == *p) break;
4319 }
4320 if (i == 0) {
4321 lp->useSROM = false;
4322 printk("%s: Cannot find correct PCI device [%d] for SROM decoding!\n",
4323 dev->name, lp->device);
4324 return -ENXIO;
4325 }
4326 }
4327
4328 lp->infoleaf_offset = get_unaligned_le16(p + 1);
4329
4330 return 0;
4331}
4332
4333
4334
4335
4336
4337
4338
4339
4340static void
4341srom_init(struct net_device *dev)
4342{
4343 struct de4x5_private *lp = netdev_priv(dev);
4344 u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset;
4345 u_char count;
4346
4347 p+=2;
4348 if (lp->chipset == DC21140) {
4349 lp->cache.gepc = (*p++ | GEP_CTRL);
4350 gep_wr(lp->cache.gepc, dev);
4351 }
4352
4353
4354 count = *p++;
4355
4356
4357 for (;count; --count) {
4358 if (*p < 128) {
4359 p += COMPACT_LEN;
4360 } else if (*(p+1) == 5) {
4361 type5_infoblock(dev, 1, p);
4362 p += ((*p & BLOCK_LEN) + 1);
4363 } else if (*(p+1) == 4) {
4364 p += ((*p & BLOCK_LEN) + 1);
4365 } else if (*(p+1) == 3) {
4366 type3_infoblock(dev, 1, p);
4367 p += ((*p & BLOCK_LEN) + 1);
4368 } else if (*(p+1) == 2) {
4369 p += ((*p & BLOCK_LEN) + 1);
4370 } else if (*(p+1) == 1) {
4371 type1_infoblock(dev, 1, p);
4372 p += ((*p & BLOCK_LEN) + 1);
4373 } else {
4374 p += ((*p & BLOCK_LEN) + 1);
4375 }
4376 }
4377}
4378
4379
4380
4381
4382
4383static void
4384srom_exec(struct net_device *dev, u_char *p)
4385{
4386 struct de4x5_private *lp = netdev_priv(dev);
4387 u_long iobase = dev->base_addr;
4388 u_char count = (p ? *p++ : 0);
4389 u_short *w = (u_short *)p;
4390
4391 if (((lp->ibn != 1) && (lp->ibn != 3) && (lp->ibn != 5)) || !count) return;
4392
4393 if (lp->chipset != DC21140) RESET_SIA;
4394
4395 while (count--) {
4396 gep_wr(((lp->chipset==DC21140) && (lp->ibn!=5) ?
4397 *p++ : get_unaligned_le16(w++)), dev);
4398 mdelay(2);
4399 }
4400
4401 if (lp->chipset != DC21140) {
4402 outl(lp->cache.csr14, DE4X5_STRR);
4403 outl(lp->cache.csr13, DE4X5_SICR);
4404 }
4405}
4406
4407
4408
4409
4410
4411
4412static int
4413dc21041_infoleaf(struct net_device *dev)
4414{
4415 return DE4X5_AUTOSENSE_MS;
4416}
4417
4418static int
4419dc21140_infoleaf(struct net_device *dev)
4420{
4421 struct de4x5_private *lp = netdev_priv(dev);
4422 u_char count = 0;
4423 u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset;
4424 int next_tick = DE4X5_AUTOSENSE_MS;
4425
4426
4427 p+=2;
4428
4429
4430 lp->cache.gepc = (*p++ | GEP_CTRL);
4431
4432
4433 count = *p++;
4434
4435
4436 if (*p < 128) {
4437 next_tick = dc_infoblock[COMPACT](dev, count, p);
4438 } else {
4439 next_tick = dc_infoblock[*(p+1)](dev, count, p);
4440 }
4441
4442 if (lp->tcount == count) {
4443 lp->media = NC;
4444 if (lp->media != lp->c_media) {
4445 de4x5_dbg_media(dev);
4446 lp->c_media = lp->media;
4447 }
4448 lp->media = INIT;
4449 lp->tcount = 0;
4450 lp->tx_enable = false;
4451 }
4452
4453 return next_tick & ~TIMER_CB;
4454}
4455
4456static int
4457dc21142_infoleaf(struct net_device *dev)
4458{
4459 struct de4x5_private *lp = netdev_priv(dev);
4460 u_char count = 0;
4461 u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset;
4462 int next_tick = DE4X5_AUTOSENSE_MS;
4463
4464
4465 p+=2;
4466
4467
4468 count = *p++;
4469
4470
4471 if (*p < 128) {
4472 next_tick = dc_infoblock[COMPACT](dev, count, p);
4473 } else {
4474 next_tick = dc_infoblock[*(p+1)](dev, count, p);
4475 }
4476
4477 if (lp->tcount == count) {
4478 lp->media = NC;
4479 if (lp->media != lp->c_media) {
4480 de4x5_dbg_media(dev);
4481 lp->c_media = lp->media;
4482 }
4483 lp->media = INIT;
4484 lp->tcount = 0;
4485 lp->tx_enable = false;
4486 }
4487
4488 return next_tick & ~TIMER_CB;
4489}
4490
4491static int
4492dc21143_infoleaf(struct net_device *dev)
4493{
4494 struct de4x5_private *lp = netdev_priv(dev);
4495 u_char count = 0;
4496 u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset;
4497 int next_tick = DE4X5_AUTOSENSE_MS;
4498
4499
4500 p+=2;
4501
4502
4503 count = *p++;
4504
4505
4506 if (*p < 128) {
4507 next_tick = dc_infoblock[COMPACT](dev, count, p);
4508 } else {
4509 next_tick = dc_infoblock[*(p+1)](dev, count, p);
4510 }
4511 if (lp->tcount == count) {
4512 lp->media = NC;
4513 if (lp->media != lp->c_media) {
4514 de4x5_dbg_media(dev);
4515 lp->c_media = lp->media;
4516 }
4517 lp->media = INIT;
4518 lp->tcount = 0;
4519 lp->tx_enable = false;
4520 }
4521
4522 return next_tick & ~TIMER_CB;
4523}
4524
4525
4526
4527
4528
4529static int
4530compact_infoblock(struct net_device *dev, u_char count, u_char *p)
4531{
4532 struct de4x5_private *lp = netdev_priv(dev);
4533 u_char flags, csr6;
4534
4535
4536 if (--count > lp->tcount) {
4537 if (*(p+COMPACT_LEN) < 128) {
4538 return dc_infoblock[COMPACT](dev, count, p+COMPACT_LEN);
4539 } else {
4540 return dc_infoblock[*(p+COMPACT_LEN+1)](dev, count, p+COMPACT_LEN);
4541 }
4542 }
4543
4544 if ((lp->media == INIT) && (lp->timeout < 0)) {
4545 lp->ibn = COMPACT;
4546 lp->active = 0;
4547 gep_wr(lp->cache.gepc, dev);
4548 lp->infoblock_media = (*p++) & COMPACT_MC;
4549 lp->cache.gep = *p++;
4550 csr6 = *p++;
4551 flags = *p++;
4552
4553 lp->asBitValid = (flags & 0x80) ? 0 : -1;
4554 lp->defMedium = (flags & 0x40) ? -1 : 0;
4555 lp->asBit = 1 << ((csr6 >> 1) & 0x07);
4556 lp->asPolarity = ((csr6 & 0x80) ? -1 : 0) & lp->asBit;
4557 lp->infoblock_csr6 = OMR_DEF | ((csr6 & 0x71) << 18);
4558 lp->useMII = false;
4559
4560 de4x5_switch_mac_port(dev);
4561 }
4562
4563 return dc21140m_autoconf(dev);
4564}
4565
4566
4567
4568
4569static int
4570type0_infoblock(struct net_device *dev, u_char count, u_char *p)
4571{
4572 struct de4x5_private *lp = netdev_priv(dev);
4573 u_char flags, csr6, len = (*p & BLOCK_LEN)+1;
4574
4575
4576 if (--count > lp->tcount) {
4577 if (*(p+len) < 128) {
4578 return dc_infoblock[COMPACT](dev, count, p+len);
4579 } else {
4580 return dc_infoblock[*(p+len+1)](dev, count, p+len);
4581 }
4582 }
4583
4584 if ((lp->media == INIT) && (lp->timeout < 0)) {
4585 lp->ibn = 0;
4586 lp->active = 0;
4587 gep_wr(lp->cache.gepc, dev);
4588 p+=2;
4589 lp->infoblock_media = (*p++) & BLOCK0_MC;
4590 lp->cache.gep = *p++;
4591 csr6 = *p++;
4592 flags = *p++;
4593
4594 lp->asBitValid = (flags & 0x80) ? 0 : -1;
4595 lp->defMedium = (flags & 0x40) ? -1 : 0;
4596 lp->asBit = 1 << ((csr6 >> 1) & 0x07);
4597 lp->asPolarity = ((csr6 & 0x80) ? -1 : 0) & lp->asBit;
4598 lp->infoblock_csr6 = OMR_DEF | ((csr6 & 0x71) << 18);
4599 lp->useMII = false;
4600
4601 de4x5_switch_mac_port(dev);
4602 }
4603
4604 return dc21140m_autoconf(dev);
4605}
4606
4607
4608
4609static int
4610type1_infoblock(struct net_device *dev, u_char count, u_char *p)
4611{
4612 struct de4x5_private *lp = netdev_priv(dev);
4613 u_char len = (*p & BLOCK_LEN)+1;
4614
4615
4616 if (--count > lp->tcount) {
4617 if (*(p+len) < 128) {
4618 return dc_infoblock[COMPACT](dev, count, p+len);
4619 } else {
4620 return dc_infoblock[*(p+len+1)](dev, count, p+len);
4621 }
4622 }
4623
4624 p += 2;
4625 if (lp->state == INITIALISED) {
4626 lp->ibn = 1;
4627 lp->active = *p++;
4628 lp->phy[lp->active].gep = (*p ? p : NULL); p += (*p + 1);
4629 lp->phy[lp->active].rst = (*p ? p : NULL); p += (*p + 1);
4630 lp->phy[lp->active].mc = get_unaligned_le16(p); p += 2;
4631 lp->phy[lp->active].ana = get_unaligned_le16(p); p += 2;
4632 lp->phy[lp->active].fdx = get_unaligned_le16(p); p += 2;
4633 lp->phy[lp->active].ttm = get_unaligned_le16(p);
4634 return 0;
4635 } else if ((lp->media == INIT) && (lp->timeout < 0)) {
4636 lp->ibn = 1;
4637 lp->active = *p;
4638 lp->infoblock_csr6 = OMR_MII_100;
4639 lp->useMII = true;
4640 lp->infoblock_media = ANS;
4641
4642 de4x5_switch_mac_port(dev);
4643 }
4644
4645 return dc21140m_autoconf(dev);
4646}
4647
4648static int
4649type2_infoblock(struct net_device *dev, u_char count, u_char *p)
4650{
4651 struct de4x5_private *lp = netdev_priv(dev);
4652 u_char len = (*p & BLOCK_LEN)+1;
4653
4654
4655 if (--count > lp->tcount) {
4656 if (*(p+len) < 128) {
4657 return dc_infoblock[COMPACT](dev, count, p+len);
4658 } else {
4659 return dc_infoblock[*(p+len+1)](dev, count, p+len);
4660 }
4661 }
4662
4663 if ((lp->media == INIT) && (lp->timeout < 0)) {
4664 lp->ibn = 2;
4665 lp->active = 0;
4666 p += 2;
4667 lp->infoblock_media = (*p) & MEDIA_CODE;
4668
4669 if ((*p++) & EXT_FIELD) {
4670 lp->cache.csr13 = get_unaligned_le16(p); p += 2;
4671 lp->cache.csr14 = get_unaligned_le16(p); p += 2;
4672 lp->cache.csr15 = get_unaligned_le16(p); p += 2;
4673 } else {
4674 lp->cache.csr13 = CSR13;
4675 lp->cache.csr14 = CSR14;
4676 lp->cache.csr15 = CSR15;
4677 }
4678 lp->cache.gepc = ((s32)(get_unaligned_le16(p)) << 16); p += 2;
4679 lp->cache.gep = ((s32)(get_unaligned_le16(p)) << 16);
4680 lp->infoblock_csr6 = OMR_SIA;
4681 lp->useMII = false;
4682
4683 de4x5_switch_mac_port(dev);
4684 }
4685
4686 return dc2114x_autoconf(dev);
4687}
4688
4689static int
4690type3_infoblock(struct net_device *dev, u_char count, u_char *p)
4691{
4692 struct de4x5_private *lp = netdev_priv(dev);
4693 u_char len = (*p & BLOCK_LEN)+1;
4694
4695
4696 if (--count > lp->tcount) {
4697 if (*(p+len) < 128) {
4698 return dc_infoblock[COMPACT](dev, count, p+len);
4699 } else {
4700 return dc_infoblock[*(p+len+1)](dev, count, p+len);
4701 }
4702 }
4703
4704 p += 2;
4705 if (lp->state == INITIALISED) {
4706 lp->ibn = 3;
4707 lp->active = *p++;
4708 if (MOTO_SROM_BUG) lp->active = 0;
4709 lp->phy[lp->active].gep = (*p ? p : NULL); p += (2 * (*p) + 1);
4710 lp->phy[lp->active].rst = (*p ? p : NULL); p += (2 * (*p) + 1);
4711 lp->phy[lp->active].mc = get_unaligned_le16(p); p += 2;
4712 lp->phy[lp->active].ana = get_unaligned_le16(p); p += 2;
4713 lp->phy[lp->active].fdx = get_unaligned_le16(p); p += 2;
4714 lp->phy[lp->active].ttm = get_unaligned_le16(p); p += 2;
4715 lp->phy[lp->active].mci = *p;
4716 return 0;
4717 } else if ((lp->media == INIT) && (lp->timeout < 0)) {
4718 lp->ibn = 3;
4719 lp->active = *p;
4720 if (MOTO_SROM_BUG) lp->active = 0;
4721 lp->infoblock_csr6 = OMR_MII_100;
4722 lp->useMII = true;
4723 lp->infoblock_media = ANS;
4724
4725 de4x5_switch_mac_port(dev);
4726 }
4727
4728 return dc2114x_autoconf(dev);
4729}
4730
4731static int
4732type4_infoblock(struct net_device *dev, u_char count, u_char *p)
4733{
4734 struct de4x5_private *lp = netdev_priv(dev);
4735 u_char flags, csr6, len = (*p & BLOCK_LEN)+1;
4736
4737
4738 if (--count > lp->tcount) {
4739 if (*(p+len) < 128) {
4740 return dc_infoblock[COMPACT](dev, count, p+len);
4741 } else {
4742 return dc_infoblock[*(p+len+1)](dev, count, p+len);
4743 }
4744 }
4745
4746 if ((lp->media == INIT) && (lp->timeout < 0)) {
4747 lp->ibn = 4;
4748 lp->active = 0;
4749 p+=2;
4750 lp->infoblock_media = (*p++) & MEDIA_CODE;
4751 lp->cache.csr13 = CSR13;
4752 lp->cache.csr14 = CSR14;
4753 lp->cache.csr15 = CSR15;
4754 lp->cache.gepc = ((s32)(get_unaligned_le16(p)) << 16); p += 2;
4755 lp->cache.gep = ((s32)(get_unaligned_le16(p)) << 16); p += 2;
4756 csr6 = *p++;
4757 flags = *p++;
4758
4759 lp->asBitValid = (flags & 0x80) ? 0 : -1;
4760 lp->defMedium = (flags & 0x40) ? -1 : 0;
4761 lp->asBit = 1 << ((csr6 >> 1) & 0x07);
4762 lp->asPolarity = ((csr6 & 0x80) ? -1 : 0) & lp->asBit;
4763 lp->infoblock_csr6 = OMR_DEF | ((csr6 & 0x71) << 18);
4764 lp->useMII = false;
4765
4766 de4x5_switch_mac_port(dev);
4767 }
4768
4769 return dc2114x_autoconf(dev);
4770}
4771
4772
4773
4774
4775
4776static int
4777type5_infoblock(struct net_device *dev, u_char count, u_char *p)
4778{
4779 struct de4x5_private *lp = netdev_priv(dev);
4780 u_char len = (*p & BLOCK_LEN)+1;
4781
4782
4783 if (--count > lp->tcount) {
4784 if (*(p+len) < 128) {
4785 return dc_infoblock[COMPACT](dev, count, p+len);
4786 } else {
4787 return dc_infoblock[*(p+len+1)](dev, count, p+len);
4788 }
4789 }
4790
4791
4792 if ((lp->state == INITIALISED) || (lp->media == INIT)) {
4793 p+=2;
4794 lp->rst = p;
4795 srom_exec(dev, lp->rst);
4796 }
4797
4798 return DE4X5_AUTOSENSE_MS;
4799}
4800
4801
4802
4803
4804
4805static int
4806mii_rd(u_char phyreg, u_char phyaddr, u_long ioaddr)
4807{
4808 mii_wdata(MII_PREAMBLE, 2, ioaddr);
4809 mii_wdata(MII_PREAMBLE, 32, ioaddr);
4810 mii_wdata(MII_STRD, 4, ioaddr);
4811 mii_address(phyaddr, ioaddr);
4812 mii_address(phyreg, ioaddr);
4813 mii_ta(MII_STRD, ioaddr);
4814
4815 return mii_rdata(ioaddr);
4816}
4817
4818static void
4819mii_wr(int data, u_char phyreg, u_char phyaddr, u_long ioaddr)
4820{
4821 mii_wdata(MII_PREAMBLE, 2, ioaddr);
4822 mii_wdata(MII_PREAMBLE, 32, ioaddr);
4823 mii_wdata(MII_STWR, 4, ioaddr);
4824 mii_address(phyaddr, ioaddr);
4825 mii_address(phyreg, ioaddr);
4826 mii_ta(MII_STWR, ioaddr);
4827 data = mii_swap(data, 16);
4828 mii_wdata(data, 16, ioaddr);
4829}
4830
4831static int
4832mii_rdata(u_long ioaddr)
4833{
4834 int i;
4835 s32 tmp = 0;
4836
4837 for (i=0; i<16; i++) {
4838 tmp <<= 1;
4839 tmp |= getfrom_mii(MII_MRD | MII_RD, ioaddr);
4840 }
4841
4842 return tmp;
4843}
4844
4845static void
4846mii_wdata(int data, int len, u_long ioaddr)
4847{
4848 int i;
4849
4850 for (i=0; i<len; i++) {
4851 sendto_mii(MII_MWR | MII_WR, data, ioaddr);
4852 data >>= 1;
4853 }
4854}
4855
4856static void
4857mii_address(u_char addr, u_long ioaddr)
4858{
4859 int i;
4860
4861 addr = mii_swap(addr, 5);
4862 for (i=0; i<5; i++) {
4863 sendto_mii(MII_MWR | MII_WR, addr, ioaddr);
4864 addr >>= 1;
4865 }
4866}
4867
4868static void
4869mii_ta(u_long rw, u_long ioaddr)
4870{
4871 if (rw == MII_STWR) {
4872 sendto_mii(MII_MWR | MII_WR, 1, ioaddr);
4873 sendto_mii(MII_MWR | MII_WR, 0, ioaddr);
4874 } else {
4875 getfrom_mii(MII_MRD | MII_RD, ioaddr);
4876 }
4877}
4878
4879static int
4880mii_swap(int data, int len)
4881{
4882 int i, tmp = 0;
4883
4884 for (i=0; i<len; i++) {
4885 tmp <<= 1;
4886 tmp |= (data & 1);
4887 data >>= 1;
4888 }
4889
4890 return tmp;
4891}
4892
4893static void
4894sendto_mii(u32 command, int data, u_long ioaddr)
4895{
4896 u32 j;
4897
4898 j = (data & 1) << 17;
4899 outl(command | j, ioaddr);
4900 udelay(1);
4901 outl(command | MII_MDC | j, ioaddr);
4902 udelay(1);
4903}
4904
4905static int
4906getfrom_mii(u32 command, u_long ioaddr)
4907{
4908 outl(command, ioaddr);
4909 udelay(1);
4910 outl(command | MII_MDC, ioaddr);
4911 udelay(1);
4912
4913 return (inl(ioaddr) >> 19) & 1;
4914}
4915
4916
4917
4918
4919static int
4920mii_get_oui(u_char phyaddr, u_long ioaddr)
4921{
4922
4923
4924
4925
4926
4927
4928 int r2, r3;
4929
4930
4931 r2 = mii_rd(MII_ID0, phyaddr, ioaddr);
4932 r3 = mii_rd(MII_ID1, phyaddr, ioaddr);
4933
4934
4935
4936
4937
4938
4939
4940
4941
4942
4943
4944
4945
4946
4947
4948
4949
4950
4951
4952
4953
4954
4955
4956
4957
4958
4959
4960 return r2;
4961}
4962
4963
4964
4965
4966static int
4967mii_get_phy(struct net_device *dev)
4968{
4969 struct de4x5_private *lp = netdev_priv(dev);
4970 u_long iobase = dev->base_addr;
4971 int i, j, k, n, limit=ARRAY_SIZE(phy_info);
4972 int id;
4973
4974 lp->active = 0;
4975 lp->useMII = true;
4976
4977
4978 for (n=0, lp->mii_cnt=0, i=1; !((i==1) && (n==1)); i=(i+1)%DE4X5_MAX_MII) {
4979 lp->phy[lp->active].addr = i;
4980 if (i==0) n++;
4981 while (de4x5_reset_phy(dev)<0) udelay(100);
4982 id = mii_get_oui(i, DE4X5_MII);
4983 if ((id == 0) || (id == 65535)) continue;
4984 for (j=0; j<limit; j++) {
4985 if (id != phy_info[j].id) continue;
4986 for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++);
4987 if (k < DE4X5_MAX_PHY) {
4988 memcpy((char *)&lp->phy[k],
4989 (char *)&phy_info[j], sizeof(struct phy_table));
4990 lp->phy[k].addr = i;
4991 lp->mii_cnt++;
4992 lp->active++;
4993 } else {
4994 goto purgatory;
4995 }
4996 break;
4997 }
4998 if ((j == limit) && (i < DE4X5_MAX_MII)) {
4999 for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++);
5000 lp->phy[k].addr = i;
5001 lp->phy[k].id = id;
5002 lp->phy[k].spd.reg = GENERIC_REG;
5003 lp->phy[k].spd.mask = GENERIC_MASK;
5004 lp->phy[k].spd.value = GENERIC_VALUE;
5005 lp->mii_cnt++;
5006 lp->active++;
5007 printk("%s: Using generic MII device control. If the board doesn't operate,\nplease mail the following dump to the author:\n", dev->name);
5008 j = de4x5_debug;
5009 de4x5_debug |= DEBUG_MII;
5010 de4x5_dbg_mii(dev, k);
5011 de4x5_debug = j;
5012 printk("\n");
5013 }
5014 }
5015 purgatory:
5016 lp->active = 0;
5017 if (lp->phy[0].id) {
5018 for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++) {
5019 mii_wr(MII_CR_RST, MII_CR, lp->phy[k].addr, DE4X5_MII);
5020 while (mii_rd(MII_CR, lp->phy[k].addr, DE4X5_MII) & MII_CR_RST);
5021
5022 de4x5_dbg_mii(dev, k);
5023 }
5024 }
5025 if (!lp->mii_cnt) lp->useMII = false;
5026
5027 return lp->mii_cnt;
5028}
5029
5030static char *
5031build_setup_frame(struct net_device *dev, int mode)
5032{
5033 struct de4x5_private *lp = netdev_priv(dev);
5034 int i;
5035 char *pa = lp->setup_frame;
5036
5037
5038 if (mode == ALL) {
5039 memset(lp->setup_frame, 0, SETUP_FRAME_LEN);
5040 }
5041
5042 if (lp->setup_f == HASH_PERF) {
5043 for (pa=lp->setup_frame+IMPERF_PA_OFFSET, i=0; i<ETH_ALEN; i++) {
5044 *(pa + i) = dev->dev_addr[i];
5045 if (i & 0x01) pa += 2;
5046 }
5047 *(lp->setup_frame + (DE4X5_HASH_TABLE_LEN >> 3) - 3) = 0x80;
5048 } else {
5049 for (i=0; i<ETH_ALEN; i++) {
5050 *(pa + (i&1)) = dev->dev_addr[i];
5051 if (i & 0x01) pa += 4;
5052 }
5053 for (i=0; i<ETH_ALEN; i++) {
5054 *(pa + (i&1)) = (char) 0xff;
5055 if (i & 0x01) pa += 4;
5056 }
5057 }
5058
5059 return pa;
5060}
5061
5062static void
5063disable_ast(struct net_device *dev)
5064{
5065 struct de4x5_private *lp = netdev_priv(dev);
5066 del_timer_sync(&lp->timer);
5067}
5068
5069static long
5070de4x5_switch_mac_port(struct net_device *dev)
5071{
5072 struct de4x5_private *lp = netdev_priv(dev);
5073 u_long iobase = dev->base_addr;
5074 s32 omr;
5075
5076 STOP_DE4X5;
5077
5078
5079 omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR |
5080 OMR_FDX));
5081 omr |= lp->infoblock_csr6;
5082 if (omr & OMR_PS) omr |= OMR_HBD;
5083 outl(omr, DE4X5_OMR);
5084
5085
5086 RESET_DE4X5;
5087
5088
5089 if (lp->chipset == DC21140) {
5090 gep_wr(lp->cache.gepc, dev);
5091 gep_wr(lp->cache.gep, dev);
5092 } else if ((lp->chipset & ~0x0ff) == DC2114x) {
5093 reset_init_sia(dev, lp->cache.csr13, lp->cache.csr14, lp->cache.csr15);
5094 }
5095
5096
5097 outl(omr, DE4X5_OMR);
5098
5099
5100 inl(DE4X5_MFC);
5101
5102 return omr;
5103}
5104
5105static void
5106gep_wr(s32 data, struct net_device *dev)
5107{
5108 struct de4x5_private *lp = netdev_priv(dev);
5109 u_long iobase = dev->base_addr;
5110
5111 if (lp->chipset == DC21140) {
5112 outl(data, DE4X5_GEP);
5113 } else if ((lp->chipset & ~0x00ff) == DC2114x) {
5114 outl((data<<16) | lp->cache.csr15, DE4X5_SIGR);
5115 }
5116}
5117
5118static int
5119gep_rd(struct net_device *dev)
5120{
5121 struct de4x5_private *lp = netdev_priv(dev);
5122 u_long iobase = dev->base_addr;
5123
5124 if (lp->chipset == DC21140) {
5125 return inl(DE4X5_GEP);
5126 } else if ((lp->chipset & ~0x00ff) == DC2114x) {
5127 return inl(DE4X5_SIGR) & 0x000fffff;
5128 }
5129
5130 return 0;
5131}
5132
5133static void
5134yawn(struct net_device *dev, int state)
5135{
5136 struct de4x5_private *lp = netdev_priv(dev);
5137 u_long iobase = dev->base_addr;
5138
5139 if ((lp->chipset == DC21040) || (lp->chipset == DC21140)) return;
5140
5141 if(lp->bus == EISA) {
5142 switch(state) {
5143 case WAKEUP:
5144 outb(WAKEUP, PCI_CFPM);
5145 mdelay(10);
5146 break;
5147
5148 case SNOOZE:
5149 outb(SNOOZE, PCI_CFPM);
5150 break;
5151
5152 case SLEEP:
5153 outl(0, DE4X5_SICR);
5154 outb(SLEEP, PCI_CFPM);
5155 break;
5156 }
5157 } else {
5158 struct pci_dev *pdev = to_pci_dev (lp->gendev);
5159 switch(state) {
5160 case WAKEUP:
5161 pci_write_config_byte(pdev, PCI_CFDA_PSM, WAKEUP);
5162 mdelay(10);
5163 break;
5164
5165 case SNOOZE:
5166 pci_write_config_byte(pdev, PCI_CFDA_PSM, SNOOZE);
5167 break;
5168
5169 case SLEEP:
5170 outl(0, DE4X5_SICR);
5171 pci_write_config_byte(pdev, PCI_CFDA_PSM, SLEEP);
5172 break;
5173 }
5174 }
5175}
5176
5177static void
5178de4x5_parse_params(struct net_device *dev)
5179{
5180 struct de4x5_private *lp = netdev_priv(dev);
5181 char *p, *q, t;
5182
5183 lp->params.fdx = false;
5184 lp->params.autosense = AUTO;
5185
5186 if (args == NULL) return;
5187
5188 if ((p = strstr(args, dev->name))) {
5189 if (!(q = strstr(p+strlen(dev->name), "eth"))) q = p + strlen(p);
5190 t = *q;
5191 *q = '\0';
5192
5193 if (strstr(p, "fdx") || strstr(p, "FDX")) lp->params.fdx = true;
5194
5195 if (strstr(p, "autosense") || strstr(p, "AUTOSENSE")) {
5196 if (strstr(p, "TP_NW")) {
5197 lp->params.autosense = TP_NW;
5198 } else if (strstr(p, "TP")) {
5199 lp->params.autosense = TP;
5200 } else if (strstr(p, "BNC_AUI")) {
5201 lp->params.autosense = BNC;
5202 } else if (strstr(p, "BNC")) {
5203 lp->params.autosense = BNC;
5204 } else if (strstr(p, "AUI")) {
5205 lp->params.autosense = AUI;
5206 } else if (strstr(p, "10Mb")) {
5207 lp->params.autosense = _10Mb;
5208 } else if (strstr(p, "100Mb")) {
5209 lp->params.autosense = _100Mb;
5210 } else if (strstr(p, "AUTO")) {
5211 lp->params.autosense = AUTO;
5212 }
5213 }
5214 *q = t;
5215 }
5216}
5217
5218static void
5219de4x5_dbg_open(struct net_device *dev)
5220{
5221 struct de4x5_private *lp = netdev_priv(dev);
5222 int i;
5223
5224 if (de4x5_debug & DEBUG_OPEN) {
5225 printk("%s: de4x5 opening with irq %d\n",dev->name,dev->irq);
5226 printk("\tphysical address: %pM\n", dev->dev_addr);
5227 printk("Descriptor head addresses:\n");
5228 printk("\t0x%8.8lx 0x%8.8lx\n",(u_long)lp->rx_ring,(u_long)lp->tx_ring);
5229 printk("Descriptor addresses:\nRX: ");
5230 for (i=0;i<lp->rxRingSize-1;i++){
5231 if (i < 3) {
5232 printk("0x%8.8lx ",(u_long)&lp->rx_ring[i].status);
5233 }
5234 }
5235 printk("...0x%8.8lx\n",(u_long)&lp->rx_ring[i].status);
5236 printk("TX: ");
5237 for (i=0;i<lp->txRingSize-1;i++){
5238 if (i < 3) {
5239 printk("0x%8.8lx ", (u_long)&lp->tx_ring[i].status);
5240 }
5241 }
5242 printk("...0x%8.8lx\n", (u_long)&lp->tx_ring[i].status);
5243 printk("Descriptor buffers:\nRX: ");
5244 for (i=0;i<lp->rxRingSize-1;i++){
5245 if (i < 3) {
5246 printk("0x%8.8x ",le32_to_cpu(lp->rx_ring[i].buf));
5247 }
5248 }
5249 printk("...0x%8.8x\n",le32_to_cpu(lp->rx_ring[i].buf));
5250 printk("TX: ");
5251 for (i=0;i<lp->txRingSize-1;i++){
5252 if (i < 3) {
5253 printk("0x%8.8x ", le32_to_cpu(lp->tx_ring[i].buf));
5254 }
5255 }
5256 printk("...0x%8.8x\n", le32_to_cpu(lp->tx_ring[i].buf));
5257 printk("Ring size:\nRX: %d\nTX: %d\n",
5258 (short)lp->rxRingSize,
5259 (short)lp->txRingSize);
5260 }
5261}
5262
5263static void
5264de4x5_dbg_mii(struct net_device *dev, int k)
5265{
5266 struct de4x5_private *lp = netdev_priv(dev);
5267 u_long iobase = dev->base_addr;
5268
5269 if (de4x5_debug & DEBUG_MII) {
5270 printk("\nMII device address: %d\n", lp->phy[k].addr);
5271 printk("MII CR: %x\n",mii_rd(MII_CR,lp->phy[k].addr,DE4X5_MII));
5272 printk("MII SR: %x\n",mii_rd(MII_SR,lp->phy[k].addr,DE4X5_MII));
5273 printk("MII ID0: %x\n",mii_rd(MII_ID0,lp->phy[k].addr,DE4X5_MII));
5274 printk("MII ID1: %x\n",mii_rd(MII_ID1,lp->phy[k].addr,DE4X5_MII));
5275 if (lp->phy[k].id != BROADCOM_T4) {
5276 printk("MII ANA: %x\n",mii_rd(0x04,lp->phy[k].addr,DE4X5_MII));
5277 printk("MII ANC: %x\n",mii_rd(0x05,lp->phy[k].addr,DE4X5_MII));
5278 }
5279 printk("MII 16: %x\n",mii_rd(0x10,lp->phy[k].addr,DE4X5_MII));
5280 if (lp->phy[k].id != BROADCOM_T4) {
5281 printk("MII 17: %x\n",mii_rd(0x11,lp->phy[k].addr,DE4X5_MII));
5282 printk("MII 18: %x\n",mii_rd(0x12,lp->phy[k].addr,DE4X5_MII));
5283 } else {
5284 printk("MII 20: %x\n",mii_rd(0x14,lp->phy[k].addr,DE4X5_MII));
5285 }
5286 }
5287}
5288
5289static void
5290de4x5_dbg_media(struct net_device *dev)
5291{
5292 struct de4x5_private *lp = netdev_priv(dev);
5293
5294 if (lp->media != lp->c_media) {
5295 if (de4x5_debug & DEBUG_MEDIA) {
5296 printk("%s: media is %s%s\n", dev->name,
5297 (lp->media == NC ? "unconnected, link down or incompatible connection" :
5298 (lp->media == TP ? "TP" :
5299 (lp->media == ANS ? "TP/Nway" :
5300 (lp->media == BNC ? "BNC" :
5301 (lp->media == AUI ? "AUI" :
5302 (lp->media == BNC_AUI ? "BNC/AUI" :
5303 (lp->media == EXT_SIA ? "EXT SIA" :
5304 (lp->media == _100Mb ? "100Mb/s" :
5305 (lp->media == _10Mb ? "10Mb/s" :
5306 "???"
5307 ))))))))), (lp->fdx?" full duplex.":"."));
5308 }
5309 lp->c_media = lp->media;
5310 }
5311}
5312
5313static void
5314de4x5_dbg_srom(struct de4x5_srom *p)
5315{
5316 int i;
5317
5318 if (de4x5_debug & DEBUG_SROM) {
5319 printk("Sub-system Vendor ID: %04x\n", *((u_short *)p->sub_vendor_id));
5320 printk("Sub-system ID: %04x\n", *((u_short *)p->sub_system_id));
5321 printk("ID Block CRC: %02x\n", (u_char)(p->id_block_crc));
5322 printk("SROM version: %02x\n", (u_char)(p->version));
5323 printk("# controllers: %02x\n", (u_char)(p->num_controllers));
5324
5325 printk("Hardware Address: %pM\n", p->ieee_addr);
5326 printk("CRC checksum: %04x\n", (u_short)(p->chksum));
5327 for (i=0; i<64; i++) {
5328 printk("%3d %04x\n", i<<1, (u_short)*((u_short *)p+i));
5329 }
5330 }
5331}
5332
5333static void
5334de4x5_dbg_rx(struct sk_buff *skb, int len)
5335{
5336 int i, j;
5337
5338 if (de4x5_debug & DEBUG_RX) {
5339 printk("R: %pM <- %pM len/SAP:%02x%02x [%d]\n",
5340 skb->data, &skb->data[6],
5341 (u_char)skb->data[12],
5342 (u_char)skb->data[13],
5343 len);
5344 for (j=0; len>0;j+=16, len-=16) {
5345 printk(" %03x: ",j);
5346 for (i=0; i<16 && i<len; i++) {
5347 printk("%02x ",(u_char)skb->data[i+j]);
5348 }
5349 printk("\n");
5350 }
5351 }
5352}
5353
5354
5355
5356
5357
5358
5359static int
5360de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
5361{
5362 struct de4x5_private *lp = netdev_priv(dev);
5363 struct de4x5_ioctl *ioc = (struct de4x5_ioctl *) &rq->ifr_ifru;
5364 u_long iobase = dev->base_addr;
5365 int i, j, status = 0;
5366 s32 omr;
5367 union {
5368 u8 addr[144];
5369 u16 sval[72];
5370 u32 lval[36];
5371 } tmp;
5372 u_long flags = 0;
5373
5374 switch(ioc->cmd) {
5375 case DE4X5_GET_HWADDR:
5376 ioc->len = ETH_ALEN;
5377 for (i=0; i<ETH_ALEN; i++) {
5378 tmp.addr[i] = dev->dev_addr[i];
5379 }
5380 if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
5381 break;
5382
5383 case DE4X5_SET_HWADDR:
5384 if (!capable(CAP_NET_ADMIN)) return -EPERM;
5385 if (copy_from_user(tmp.addr, ioc->data, ETH_ALEN)) return -EFAULT;
5386 if (netif_queue_stopped(dev))
5387 return -EBUSY;
5388 netif_stop_queue(dev);
5389 for (i=0; i<ETH_ALEN; i++) {
5390 dev->dev_addr[i] = tmp.addr[i];
5391 }
5392 build_setup_frame(dev, PHYS_ADDR_ONLY);
5393
5394 load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET |
5395 SETUP_FRAME_LEN, (struct sk_buff *)1);
5396 lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
5397 outl(POLL_DEMAND, DE4X5_TPD);
5398 netif_wake_queue(dev);
5399 break;
5400
5401 case DE4X5_SAY_BOO:
5402 if (!capable(CAP_NET_ADMIN)) return -EPERM;
5403 printk("%s: Boo!\n", dev->name);
5404 break;
5405
5406 case DE4X5_MCA_EN:
5407 if (!capable(CAP_NET_ADMIN)) return -EPERM;
5408 omr = inl(DE4X5_OMR);
5409 omr |= OMR_PM;
5410 outl(omr, DE4X5_OMR);
5411 break;
5412
5413 case DE4X5_GET_STATS:
5414 {
5415 struct pkt_stats statbuf;
5416 ioc->len = sizeof(statbuf);
5417 spin_lock_irqsave(&lp->lock, flags);
5418 memcpy(&statbuf, &lp->pktStats, ioc->len);
5419 spin_unlock_irqrestore(&lp->lock, flags);
5420 if (copy_to_user(ioc->data, &statbuf, ioc->len))
5421 return -EFAULT;
5422 break;
5423 }
5424 case DE4X5_CLR_STATS:
5425 if (!capable(CAP_NET_ADMIN)) return -EPERM;
5426 spin_lock_irqsave(&lp->lock, flags);
5427 memset(&lp->pktStats, 0, sizeof(lp->pktStats));
5428 spin_unlock_irqrestore(&lp->lock, flags);
5429 break;
5430
5431 case DE4X5_GET_OMR:
5432 tmp.addr[0] = inl(DE4X5_OMR);
5433 if (copy_to_user(ioc->data, tmp.addr, 1)) return -EFAULT;
5434 break;
5435
5436 case DE4X5_SET_OMR:
5437 if (!capable(CAP_NET_ADMIN)) return -EPERM;
5438 if (copy_from_user(tmp.addr, ioc->data, 1)) return -EFAULT;
5439 outl(tmp.addr[0], DE4X5_OMR);
5440 break;
5441
5442 case DE4X5_GET_REG:
5443 j = 0;
5444 tmp.lval[0] = inl(DE4X5_STS); j+=4;
5445 tmp.lval[1] = inl(DE4X5_BMR); j+=4;
5446 tmp.lval[2] = inl(DE4X5_IMR); j+=4;
5447 tmp.lval[3] = inl(DE4X5_OMR); j+=4;
5448 tmp.lval[4] = inl(DE4X5_SISR); j+=4;
5449 tmp.lval[5] = inl(DE4X5_SICR); j+=4;
5450 tmp.lval[6] = inl(DE4X5_STRR); j+=4;
5451 tmp.lval[7] = inl(DE4X5_SIGR); j+=4;
5452 ioc->len = j;
5453 if (copy_to_user(ioc->data, tmp.lval, ioc->len))
5454 return -EFAULT;
5455 break;
5456
5457#define DE4X5_DUMP 0x0f
5458
5459
5460
5461
5462
5463
5464
5465
5466
5467
5468
5469
5470
5471
5472
5473
5474
5475
5476
5477
5478
5479
5480
5481
5482
5483
5484
5485
5486
5487
5488
5489
5490
5491
5492
5493
5494
5495
5496
5497
5498
5499
5500
5501
5502
5503
5504
5505
5506
5507
5508
5509
5510
5511
5512
5513
5514
5515
5516
5517
5518
5519
5520
5521
5522
5523
5524
5525
5526
5527
5528
5529
5530
5531
5532
5533
5534
5535
5536
5537
5538
5539
5540
5541
5542
5543
5544
5545
5546
5547 default:
5548 return -EOPNOTSUPP;
5549 }
5550
5551 return status;
5552}
5553
5554static int __init de4x5_module_init (void)
5555{
5556 int err = 0;
5557
5558#ifdef CONFIG_PCI
5559 err = pci_register_driver(&de4x5_pci_driver);
5560#endif
5561#ifdef CONFIG_EISA
5562 err |= eisa_driver_register (&de4x5_eisa_driver);
5563#endif
5564
5565 return err;
5566}
5567
5568static void __exit de4x5_module_exit (void)
5569{
5570#ifdef CONFIG_PCI
5571 pci_unregister_driver (&de4x5_pci_driver);
5572#endif
5573#ifdef CONFIG_EISA
5574 eisa_driver_unregister (&de4x5_eisa_driver);
5575#endif
5576}
5577
5578module_init (de4x5_module_init);
5579module_exit (de4x5_module_exit);
5580