1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202#include <linux/bitops.h>
203#include <linux/compiler.h>
204#include <linux/delay.h>
205#include <linux/dma-mapping.h>
206#include <linux/eisa.h>
207#include <linux/errno.h>
208#include <linux/fddidevice.h>
209#include <linux/init.h>
210#include <linux/interrupt.h>
211#include <linux/ioport.h>
212#include <linux/kernel.h>
213#include <linux/module.h>
214#include <linux/netdevice.h>
215#include <linux/pci.h>
216#include <linux/skbuff.h>
217#include <linux/slab.h>
218#include <linux/string.h>
219#include <linux/tc.h>
220
221#include <asm/byteorder.h>
222#include <asm/io.h>
223
224#include "defxx.h"
225
226
227#define DRV_NAME "defxx"
228#define DRV_VERSION "v1.10"
229#define DRV_RELDATE "2006/12/14"
230
231static char version[] =
232 DRV_NAME ": " DRV_VERSION " " DRV_RELDATE
233 " Lawrence V. Stefani and others\n";
234
235#define DYNAMIC_BUFFERS 1
236
237#define SKBUFF_RX_COPYBREAK 200
238
239
240
241
242#define NEW_SKB_SIZE (PI_RCV_DATA_K_SIZE_MAX+128)
243
244#ifdef CONFIG_PCI
245#define DFX_BUS_PCI(dev) (dev->bus == &pci_bus_type)
246#else
247#define DFX_BUS_PCI(dev) 0
248#endif
249
250#ifdef CONFIG_EISA
251#define DFX_BUS_EISA(dev) (dev->bus == &eisa_bus_type)
252#else
253#define DFX_BUS_EISA(dev) 0
254#endif
255
256#ifdef CONFIG_TC
257#define DFX_BUS_TC(dev) (dev->bus == &tc_bus_type)
258#else
259#define DFX_BUS_TC(dev) 0
260#endif
261
262#ifdef CONFIG_DEFXX_MMIO
263#define DFX_MMIO 1
264#else
265#define DFX_MMIO 0
266#endif
267
268
269
270static void dfx_bus_init(struct net_device *dev);
271static void dfx_bus_uninit(struct net_device *dev);
272static void dfx_bus_config_check(DFX_board_t *bp);
273
274static int dfx_driver_init(struct net_device *dev,
275 const char *print_name,
276 resource_size_t bar_start);
277static int dfx_adap_init(DFX_board_t *bp, int get_buffers);
278
279static int dfx_open(struct net_device *dev);
280static int dfx_close(struct net_device *dev);
281
282static void dfx_int_pr_halt_id(DFX_board_t *bp);
283static void dfx_int_type_0_process(DFX_board_t *bp);
284static void dfx_int_common(struct net_device *dev);
285static irqreturn_t dfx_interrupt(int irq, void *dev_id);
286
287static struct net_device_stats *dfx_ctl_get_stats(struct net_device *dev);
288static void dfx_ctl_set_multicast_list(struct net_device *dev);
289static int dfx_ctl_set_mac_address(struct net_device *dev, void *addr);
290static int dfx_ctl_update_cam(DFX_board_t *bp);
291static int dfx_ctl_update_filters(DFX_board_t *bp);
292
293static int dfx_hw_dma_cmd_req(DFX_board_t *bp);
294static int dfx_hw_port_ctrl_req(DFX_board_t *bp, PI_UINT32 command, PI_UINT32 data_a, PI_UINT32 data_b, PI_UINT32 *host_data);
295static void dfx_hw_adap_reset(DFX_board_t *bp, PI_UINT32 type);
296static int dfx_hw_adap_state_rd(DFX_board_t *bp);
297static int dfx_hw_dma_uninit(DFX_board_t *bp, PI_UINT32 type);
298
299static int dfx_rcv_init(DFX_board_t *bp, int get_buffers);
300static void dfx_rcv_queue_process(DFX_board_t *bp);
301static void dfx_rcv_flush(DFX_board_t *bp);
302
303static netdev_tx_t dfx_xmt_queue_pkt(struct sk_buff *skb,
304 struct net_device *dev);
305static int dfx_xmt_done(DFX_board_t *bp);
306static void dfx_xmt_flush(DFX_board_t *bp);
307
308
309
310static struct pci_driver dfx_pci_driver;
311static struct eisa_driver dfx_eisa_driver;
312static struct tc_driver dfx_tc_driver;
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365static inline void dfx_writel(DFX_board_t *bp, int offset, u32 data)
366{
367 writel(data, bp->base.mem + offset);
368 mb();
369}
370
371static inline void dfx_outl(DFX_board_t *bp, int offset, u32 data)
372{
373 outl(data, bp->base.port + offset);
374}
375
376static void dfx_port_write_long(DFX_board_t *bp, int offset, u32 data)
377{
378 struct device __maybe_unused *bdev = bp->bus_dev;
379 int dfx_bus_tc = DFX_BUS_TC(bdev);
380 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
381
382 if (dfx_use_mmio)
383 dfx_writel(bp, offset, data);
384 else
385 dfx_outl(bp, offset, data);
386}
387
388
389static inline void dfx_readl(DFX_board_t *bp, int offset, u32 *data)
390{
391 mb();
392 *data = readl(bp->base.mem + offset);
393}
394
395static inline void dfx_inl(DFX_board_t *bp, int offset, u32 *data)
396{
397 *data = inl(bp->base.port + offset);
398}
399
400static void dfx_port_read_long(DFX_board_t *bp, int offset, u32 *data)
401{
402 struct device __maybe_unused *bdev = bp->bus_dev;
403 int dfx_bus_tc = DFX_BUS_TC(bdev);
404 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
405
406 if (dfx_use_mmio)
407 dfx_readl(bp, offset, data);
408 else
409 dfx_inl(bp, offset, data);
410}
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436static void dfx_get_bars(struct device *bdev,
437 resource_size_t *bar_start, resource_size_t *bar_len)
438{
439 int dfx_bus_pci = DFX_BUS_PCI(bdev);
440 int dfx_bus_eisa = DFX_BUS_EISA(bdev);
441 int dfx_bus_tc = DFX_BUS_TC(bdev);
442 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
443
444 if (dfx_bus_pci) {
445 int num = dfx_use_mmio ? 0 : 1;
446
447 *bar_start = pci_resource_start(to_pci_dev(bdev), num);
448 *bar_len = pci_resource_len(to_pci_dev(bdev), num);
449 }
450 if (dfx_bus_eisa) {
451 unsigned long base_addr = to_eisa_device(bdev)->base_addr;
452 resource_size_t bar;
453
454 if (dfx_use_mmio) {
455 bar = inb(base_addr + PI_ESIC_K_MEM_ADD_CMP_2);
456 bar <<= 8;
457 bar |= inb(base_addr + PI_ESIC_K_MEM_ADD_CMP_1);
458 bar <<= 8;
459 bar |= inb(base_addr + PI_ESIC_K_MEM_ADD_CMP_0);
460 bar <<= 16;
461 *bar_start = bar;
462 bar = inb(base_addr + PI_ESIC_K_MEM_ADD_MASK_2);
463 bar <<= 8;
464 bar |= inb(base_addr + PI_ESIC_K_MEM_ADD_MASK_1);
465 bar <<= 8;
466 bar |= inb(base_addr + PI_ESIC_K_MEM_ADD_MASK_0);
467 bar <<= 16;
468 *bar_len = (bar | PI_MEM_ADD_MASK_M) + 1;
469 } else {
470 *bar_start = base_addr;
471 *bar_len = PI_ESIC_K_CSR_IO_LEN;
472 }
473 }
474 if (dfx_bus_tc) {
475 *bar_start = to_tc_dev(bdev)->resource.start +
476 PI_TC_K_CSR_OFFSET;
477 *bar_len = PI_TC_K_CSR_LEN;
478 }
479}
480
481static const struct net_device_ops dfx_netdev_ops = {
482 .ndo_open = dfx_open,
483 .ndo_stop = dfx_close,
484 .ndo_start_xmit = dfx_xmt_queue_pkt,
485 .ndo_get_stats = dfx_ctl_get_stats,
486 .ndo_set_rx_mode = dfx_ctl_set_multicast_list,
487 .ndo_set_mac_address = dfx_ctl_set_mac_address,
488};
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518static int dfx_register(struct device *bdev)
519{
520 static int version_disp;
521 int dfx_bus_pci = DFX_BUS_PCI(bdev);
522 int dfx_bus_tc = DFX_BUS_TC(bdev);
523 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
524 const char *print_name = dev_name(bdev);
525 struct net_device *dev;
526 DFX_board_t *bp;
527 resource_size_t bar_start = 0;
528 resource_size_t bar_len = 0;
529 int alloc_size;
530 struct resource *region;
531 int err = 0;
532
533 if (!version_disp) {
534 version_disp = 1;
535 printk(version);
536 }
537
538 dev = alloc_fddidev(sizeof(*bp));
539 if (!dev) {
540 printk(KERN_ERR "%s: Unable to allocate fddidev, aborting\n",
541 print_name);
542 return -ENOMEM;
543 }
544
545
546 if (dfx_bus_pci && pci_enable_device(to_pci_dev(bdev))) {
547 printk(KERN_ERR "%s: Cannot enable PCI device, aborting\n",
548 print_name);
549 goto err_out;
550 }
551
552 SET_NETDEV_DEV(dev, bdev);
553
554 bp = netdev_priv(dev);
555 bp->bus_dev = bdev;
556 dev_set_drvdata(bdev, dev);
557
558 dfx_get_bars(bdev, &bar_start, &bar_len);
559
560 if (dfx_use_mmio)
561 region = request_mem_region(bar_start, bar_len, print_name);
562 else
563 region = request_region(bar_start, bar_len, print_name);
564 if (!region) {
565 printk(KERN_ERR "%s: Cannot reserve I/O resource "
566 "0x%lx @ 0x%lx, aborting\n",
567 print_name, (long)bar_len, (long)bar_start);
568 err = -EBUSY;
569 goto err_out_disable;
570 }
571
572
573 if (dfx_use_mmio) {
574 bp->base.mem = ioremap_nocache(bar_start, bar_len);
575 if (!bp->base.mem) {
576 printk(KERN_ERR "%s: Cannot map MMIO\n", print_name);
577 err = -ENOMEM;
578 goto err_out_region;
579 }
580 } else {
581 bp->base.port = bar_start;
582 dev->base_addr = bar_start;
583 }
584
585
586 dev->netdev_ops = &dfx_netdev_ops;
587
588 if (dfx_bus_pci)
589 pci_set_master(to_pci_dev(bdev));
590
591 if (dfx_driver_init(dev, print_name, bar_start) != DFX_K_SUCCESS) {
592 err = -ENODEV;
593 goto err_out_unmap;
594 }
595
596 err = register_netdev(dev);
597 if (err)
598 goto err_out_kfree;
599
600 printk("%s: registered as %s\n", print_name, dev->name);
601 return 0;
602
603err_out_kfree:
604 alloc_size = sizeof(PI_DESCR_BLOCK) +
605 PI_CMD_REQ_K_SIZE_MAX + PI_CMD_RSP_K_SIZE_MAX +
606#ifndef DYNAMIC_BUFFERS
607 (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX) +
608#endif
609 sizeof(PI_CONSUMER_BLOCK) +
610 (PI_ALIGN_K_DESC_BLK - 1);
611 if (bp->kmalloced)
612 dma_free_coherent(bdev, alloc_size,
613 bp->kmalloced, bp->kmalloced_dma);
614
615err_out_unmap:
616 if (dfx_use_mmio)
617 iounmap(bp->base.mem);
618
619err_out_region:
620 if (dfx_use_mmio)
621 release_mem_region(bar_start, bar_len);
622 else
623 release_region(bar_start, bar_len);
624
625err_out_disable:
626 if (dfx_bus_pci)
627 pci_disable_device(to_pci_dev(bdev));
628
629err_out:
630 free_netdev(dev);
631 return err;
632}
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666static void dfx_bus_init(struct net_device *dev)
667{
668 DFX_board_t *bp = netdev_priv(dev);
669 struct device *bdev = bp->bus_dev;
670 int dfx_bus_pci = DFX_BUS_PCI(bdev);
671 int dfx_bus_eisa = DFX_BUS_EISA(bdev);
672 int dfx_bus_tc = DFX_BUS_TC(bdev);
673 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
674 u8 val;
675
676 DBG_printk("In dfx_bus_init...\n");
677
678
679 bp->dev = dev;
680
681
682
683 if (dfx_bus_tc)
684 dev->irq = to_tc_dev(bdev)->interrupt;
685 if (dfx_bus_eisa) {
686 unsigned long base_addr = to_eisa_device(bdev)->base_addr;
687
688
689 val = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
690 val &= PI_CONFIG_STAT_0_M_IRQ;
691 val >>= PI_CONFIG_STAT_0_V_IRQ;
692
693 switch (val) {
694 case PI_CONFIG_STAT_0_IRQ_K_9:
695 dev->irq = 9;
696 break;
697
698 case PI_CONFIG_STAT_0_IRQ_K_10:
699 dev->irq = 10;
700 break;
701
702 case PI_CONFIG_STAT_0_IRQ_K_11:
703 dev->irq = 11;
704 break;
705
706 case PI_CONFIG_STAT_0_IRQ_K_15:
707 dev->irq = 15;
708 break;
709 }
710
711
712
713
714
715
716
717
718
719
720
721
722
723 val = ((bp->base.port >> 12) << PI_IO_CMP_V_SLOT);
724 outb(base_addr + PI_ESIC_K_IO_ADD_CMP_0_1, val);
725 outb(base_addr + PI_ESIC_K_IO_ADD_CMP_0_0, 0);
726 outb(base_addr + PI_ESIC_K_IO_ADD_CMP_1_1, val);
727 outb(base_addr + PI_ESIC_K_IO_ADD_CMP_1_0, 0);
728 val = PI_ESIC_K_CSR_IO_LEN - 1;
729 outb(base_addr + PI_ESIC_K_IO_ADD_MASK_0_1, (val >> 8) & 0xff);
730 outb(base_addr + PI_ESIC_K_IO_ADD_MASK_0_0, val & 0xff);
731 outb(base_addr + PI_ESIC_K_IO_ADD_MASK_1_1, (val >> 8) & 0xff);
732 outb(base_addr + PI_ESIC_K_IO_ADD_MASK_1_0, val & 0xff);
733
734
735 val = PI_FUNCTION_CNTRL_M_IOCS1 | PI_FUNCTION_CNTRL_M_IOCS0;
736 if (dfx_use_mmio)
737 val |= PI_FUNCTION_CNTRL_M_MEMCS0;
738 outb(base_addr + PI_ESIC_K_FUNCTION_CNTRL, val);
739
740
741
742
743
744 val = PI_SLOT_CNTRL_M_ENB;
745 outb(base_addr + PI_ESIC_K_SLOT_CNTRL, val);
746
747
748
749
750
751 val = inb(base_addr + PI_DEFEA_K_BURST_HOLDOFF);
752 if (dfx_use_mmio)
753 val |= PI_BURST_HOLDOFF_V_MEM_MAP;
754 else
755 val &= ~PI_BURST_HOLDOFF_V_MEM_MAP;
756 outb(base_addr + PI_DEFEA_K_BURST_HOLDOFF, val);
757
758
759 val = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
760 val |= PI_CONFIG_STAT_0_M_INT_ENB;
761 outb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0, val);
762 }
763 if (dfx_bus_pci) {
764 struct pci_dev *pdev = to_pci_dev(bdev);
765
766
767
768 dev->irq = pdev->irq;
769
770
771
772 pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &val);
773 if (val < PFI_K_LAT_TIMER_MIN) {
774 val = PFI_K_LAT_TIMER_DEF;
775 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, val);
776 }
777
778
779 val = PFI_MODE_M_PDQ_INT_ENB | PFI_MODE_M_DMA_ENB;
780 dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL, val);
781 }
782}
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812static void dfx_bus_uninit(struct net_device *dev)
813{
814 DFX_board_t *bp = netdev_priv(dev);
815 struct device *bdev = bp->bus_dev;
816 int dfx_bus_pci = DFX_BUS_PCI(bdev);
817 int dfx_bus_eisa = DFX_BUS_EISA(bdev);
818 u8 val;
819
820 DBG_printk("In dfx_bus_uninit...\n");
821
822
823
824 if (dfx_bus_eisa) {
825 unsigned long base_addr = to_eisa_device(bdev)->base_addr;
826
827
828 val = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
829 val &= ~PI_CONFIG_STAT_0_M_INT_ENB;
830 outb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0, val);
831 }
832 if (dfx_bus_pci) {
833
834 dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL, 0);
835 }
836}
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869static void dfx_bus_config_check(DFX_board_t *bp)
870{
871 struct device __maybe_unused *bdev = bp->bus_dev;
872 int dfx_bus_eisa = DFX_BUS_EISA(bdev);
873 int status;
874 u32 host_data;
875
876 DBG_printk("In dfx_bus_config_check...\n");
877
878
879
880 if (dfx_bus_eisa) {
881
882
883
884
885
886
887
888 if (to_eisa_device(bdev)->id.driver_data == DEFEA_PROD_ID_2) {
889
890
891
892
893 status = dfx_hw_port_ctrl_req(bp,
894 PI_PCTRL_M_SUB_CMD,
895 PI_SUB_CMD_K_PDQ_REV_GET,
896 0,
897 &host_data);
898 if ((status != DFX_K_SUCCESS) || (host_data == 2))
899 {
900
901
902
903
904
905
906
907
908 switch (bp->burst_size)
909 {
910 case PI_PDATA_B_DMA_BURST_SIZE_32:
911 case PI_PDATA_B_DMA_BURST_SIZE_16:
912 bp->burst_size = PI_PDATA_B_DMA_BURST_SIZE_8;
913 break;
914
915 default:
916 break;
917 }
918
919
920
921 bp->full_duplex_enb = PI_SNMP_K_FALSE;
922 }
923 }
924 }
925 }
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965static int dfx_driver_init(struct net_device *dev, const char *print_name,
966 resource_size_t bar_start)
967{
968 DFX_board_t *bp = netdev_priv(dev);
969 struct device *bdev = bp->bus_dev;
970 int dfx_bus_pci = DFX_BUS_PCI(bdev);
971 int dfx_bus_eisa = DFX_BUS_EISA(bdev);
972 int dfx_bus_tc = DFX_BUS_TC(bdev);
973 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
974 int alloc_size;
975 char *top_v, *curr_v;
976 dma_addr_t top_p, curr_p;
977 u32 data;
978 __le32 le32;
979 char *board_name = NULL;
980
981 DBG_printk("In dfx_driver_init...\n");
982
983
984
985 dfx_bus_init(dev);
986
987
988
989
990
991
992
993
994
995
996 bp->full_duplex_enb = PI_SNMP_K_FALSE;
997 bp->req_ttrt = 8 * 12500;
998 bp->burst_size = PI_PDATA_B_DMA_BURST_SIZE_DEF;
999 bp->rcv_bufs_to_post = RCV_BUFS_DEF;
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010 dfx_bus_config_check(bp);
1011
1012
1013
1014 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS);
1015
1016
1017
1018 (void) dfx_hw_dma_uninit(bp, PI_PDATA_A_RESET_M_SKIP_ST);
1019
1020
1021
1022 if (dfx_hw_port_ctrl_req(bp, PI_PCTRL_M_MLA, PI_PDATA_A_MLA_K_LO, 0,
1023 &data) != DFX_K_SUCCESS) {
1024 printk("%s: Could not read adapter factory MAC address!\n",
1025 print_name);
1026 return DFX_K_FAILURE;
1027 }
1028 le32 = cpu_to_le32(data);
1029 memcpy(&bp->factory_mac_addr[0], &le32, sizeof(u32));
1030
1031 if (dfx_hw_port_ctrl_req(bp, PI_PCTRL_M_MLA, PI_PDATA_A_MLA_K_HI, 0,
1032 &data) != DFX_K_SUCCESS) {
1033 printk("%s: Could not read adapter factory MAC address!\n",
1034 print_name);
1035 return DFX_K_FAILURE;
1036 }
1037 le32 = cpu_to_le32(data);
1038 memcpy(&bp->factory_mac_addr[4], &le32, sizeof(u16));
1039
1040
1041
1042
1043
1044
1045
1046
1047 memcpy(dev->dev_addr, bp->factory_mac_addr, FDDI_K_ALEN);
1048 if (dfx_bus_tc)
1049 board_name = "DEFTA";
1050 if (dfx_bus_eisa)
1051 board_name = "DEFEA";
1052 if (dfx_bus_pci)
1053 board_name = "DEFPA";
1054 pr_info("%s: %s at %saddr = 0x%llx, IRQ = %d, Hardware addr = %pMF\n",
1055 print_name, board_name, dfx_use_mmio ? "" : "I/O ",
1056 (long long)bar_start, dev->irq, dev->dev_addr);
1057
1058
1059
1060
1061
1062
1063 alloc_size = sizeof(PI_DESCR_BLOCK) +
1064 PI_CMD_REQ_K_SIZE_MAX +
1065 PI_CMD_RSP_K_SIZE_MAX +
1066#ifndef DYNAMIC_BUFFERS
1067 (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX) +
1068#endif
1069 sizeof(PI_CONSUMER_BLOCK) +
1070 (PI_ALIGN_K_DESC_BLK - 1);
1071 bp->kmalloced = top_v = dma_alloc_coherent(bp->bus_dev, alloc_size,
1072 &bp->kmalloced_dma,
1073 GFP_ATOMIC | __GFP_ZERO);
1074 if (top_v == NULL)
1075 return DFX_K_FAILURE;
1076
1077 top_p = bp->kmalloced_dma;
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091 curr_p = ALIGN(top_p, PI_ALIGN_K_DESC_BLK);
1092 curr_v = top_v + (curr_p - top_p);
1093
1094
1095
1096 bp->descr_block_virt = (PI_DESCR_BLOCK *) curr_v;
1097 bp->descr_block_phys = curr_p;
1098 curr_v += sizeof(PI_DESCR_BLOCK);
1099 curr_p += sizeof(PI_DESCR_BLOCK);
1100
1101
1102
1103 bp->cmd_req_virt = (PI_DMA_CMD_REQ *) curr_v;
1104 bp->cmd_req_phys = curr_p;
1105 curr_v += PI_CMD_REQ_K_SIZE_MAX;
1106 curr_p += PI_CMD_REQ_K_SIZE_MAX;
1107
1108
1109
1110 bp->cmd_rsp_virt = (PI_DMA_CMD_RSP *) curr_v;
1111 bp->cmd_rsp_phys = curr_p;
1112 curr_v += PI_CMD_RSP_K_SIZE_MAX;
1113 curr_p += PI_CMD_RSP_K_SIZE_MAX;
1114
1115
1116
1117 bp->rcv_block_virt = curr_v;
1118 bp->rcv_block_phys = curr_p;
1119
1120#ifndef DYNAMIC_BUFFERS
1121 curr_v += (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX);
1122 curr_p += (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX);
1123#endif
1124
1125
1126
1127 bp->cons_block_virt = (PI_CONSUMER_BLOCK *) curr_v;
1128 bp->cons_block_phys = curr_p;
1129
1130
1131
1132 DBG_printk("%s: Descriptor block virt = %0lX, phys = %0X\n",
1133 print_name,
1134 (long)bp->descr_block_virt, bp->descr_block_phys);
1135 DBG_printk("%s: Command Request buffer virt = %0lX, phys = %0X\n",
1136 print_name, (long)bp->cmd_req_virt, bp->cmd_req_phys);
1137 DBG_printk("%s: Command Response buffer virt = %0lX, phys = %0X\n",
1138 print_name, (long)bp->cmd_rsp_virt, bp->cmd_rsp_phys);
1139 DBG_printk("%s: Receive buffer block virt = %0lX, phys = %0X\n",
1140 print_name, (long)bp->rcv_block_virt, bp->rcv_block_phys);
1141 DBG_printk("%s: Consumer block virt = %0lX, phys = %0X\n",
1142 print_name, (long)bp->cons_block_virt, bp->cons_block_phys);
1143
1144 return DFX_K_SUCCESS;
1145}
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181static int dfx_adap_init(DFX_board_t *bp, int get_buffers)
1182 {
1183 DBG_printk("In dfx_adap_init...\n");
1184
1185
1186
1187 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS);
1188
1189
1190
1191 if (dfx_hw_dma_uninit(bp, bp->reset_type) != DFX_K_SUCCESS)
1192 {
1193 printk("%s: Could not uninitialize/reset adapter!\n", bp->dev->name);
1194 return DFX_K_FAILURE;
1195 }
1196
1197
1198
1199
1200
1201
1202 dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_0_STATUS, PI_HOST_INT_K_ACK_ALL_TYPE_0);
1203
1204
1205
1206
1207
1208
1209
1210
1211 bp->cmd_req_reg.lword = 0;
1212 bp->cmd_rsp_reg.lword = 0;
1213 bp->rcv_xmt_reg.lword = 0;
1214
1215
1216
1217 memset(bp->cons_block_virt, 0, sizeof(PI_CONSUMER_BLOCK));
1218
1219
1220
1221 if (dfx_hw_port_ctrl_req(bp,
1222 PI_PCTRL_M_SUB_CMD,
1223 PI_SUB_CMD_K_BURST_SIZE_SET,
1224 bp->burst_size,
1225 NULL) != DFX_K_SUCCESS)
1226 {
1227 printk("%s: Could not set adapter burst size!\n", bp->dev->name);
1228 return DFX_K_FAILURE;
1229 }
1230
1231
1232
1233
1234
1235
1236
1237
1238 if (dfx_hw_port_ctrl_req(bp,
1239 PI_PCTRL_M_CONS_BLOCK,
1240 bp->cons_block_phys,
1241 0,
1242 NULL) != DFX_K_SUCCESS)
1243 {
1244 printk("%s: Could not set consumer block address!\n", bp->dev->name);
1245 return DFX_K_FAILURE;
1246 }
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258 if (dfx_hw_port_ctrl_req(bp, PI_PCTRL_M_INIT,
1259 (u32)(bp->descr_block_phys |
1260 PI_PDATA_A_INIT_M_BSWAP_INIT),
1261 0, NULL) != DFX_K_SUCCESS) {
1262 printk("%s: Could not set descriptor block address!\n",
1263 bp->dev->name);
1264 return DFX_K_FAILURE;
1265 }
1266
1267
1268
1269 bp->cmd_req_virt->cmd_type = PI_CMD_K_CHARS_SET;
1270 bp->cmd_req_virt->char_set.item[0].item_code = PI_ITEM_K_FLUSH_TIME;
1271 bp->cmd_req_virt->char_set.item[0].value = 3;
1272 bp->cmd_req_virt->char_set.item[0].item_index = 0;
1273 bp->cmd_req_virt->char_set.item[1].item_code = PI_ITEM_K_EOL;
1274 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
1275 {
1276 printk("%s: DMA command request failed!\n", bp->dev->name);
1277 return DFX_K_FAILURE;
1278 }
1279
1280
1281
1282 bp->cmd_req_virt->cmd_type = PI_CMD_K_SNMP_SET;
1283 bp->cmd_req_virt->snmp_set.item[0].item_code = PI_ITEM_K_FDX_ENB_DIS;
1284 bp->cmd_req_virt->snmp_set.item[0].value = bp->full_duplex_enb;
1285 bp->cmd_req_virt->snmp_set.item[0].item_index = 0;
1286 bp->cmd_req_virt->snmp_set.item[1].item_code = PI_ITEM_K_MAC_T_REQ;
1287 bp->cmd_req_virt->snmp_set.item[1].value = bp->req_ttrt;
1288 bp->cmd_req_virt->snmp_set.item[1].item_index = 0;
1289 bp->cmd_req_virt->snmp_set.item[2].item_code = PI_ITEM_K_EOL;
1290 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
1291 {
1292 printk("%s: DMA command request failed!\n", bp->dev->name);
1293 return DFX_K_FAILURE;
1294 }
1295
1296
1297
1298 if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS)
1299 {
1300 printk("%s: Adapter CAM update failed!\n", bp->dev->name);
1301 return DFX_K_FAILURE;
1302 }
1303
1304
1305
1306 if (dfx_ctl_update_filters(bp) != DFX_K_SUCCESS)
1307 {
1308 printk("%s: Adapter filters update failed!\n", bp->dev->name);
1309 return DFX_K_FAILURE;
1310 }
1311
1312
1313
1314
1315
1316
1317 if (get_buffers)
1318 dfx_rcv_flush(bp);
1319
1320
1321
1322 if (dfx_rcv_init(bp, get_buffers))
1323 {
1324 printk("%s: Receive buffer allocation failed\n", bp->dev->name);
1325 if (get_buffers)
1326 dfx_rcv_flush(bp);
1327 return DFX_K_FAILURE;
1328 }
1329
1330
1331
1332 bp->cmd_req_virt->cmd_type = PI_CMD_K_START;
1333 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
1334 {
1335 printk("%s: Start command failed\n", bp->dev->name);
1336 if (get_buffers)
1337 dfx_rcv_flush(bp);
1338 return DFX_K_FAILURE;
1339 }
1340
1341
1342
1343 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_ENABLE_DEF_INTS);
1344 return DFX_K_SUCCESS;
1345 }
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378static int dfx_open(struct net_device *dev)
1379{
1380 DFX_board_t *bp = netdev_priv(dev);
1381 int ret;
1382
1383 DBG_printk("In dfx_open...\n");
1384
1385
1386
1387 ret = request_irq(dev->irq, dfx_interrupt, IRQF_SHARED, dev->name,
1388 dev);
1389 if (ret) {
1390 printk(KERN_ERR "%s: Requested IRQ %d is busy\n", dev->name, dev->irq);
1391 return ret;
1392 }
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405 memcpy(dev->dev_addr, bp->factory_mac_addr, FDDI_K_ALEN);
1406
1407
1408
1409 memset(bp->uc_table, 0, sizeof(bp->uc_table));
1410 memset(bp->mc_table, 0, sizeof(bp->mc_table));
1411 bp->uc_count = 0;
1412 bp->mc_count = 0;
1413
1414
1415
1416 bp->ind_group_prom = PI_FSTATE_K_BLOCK;
1417 bp->group_prom = PI_FSTATE_K_BLOCK;
1418
1419 spin_lock_init(&bp->lock);
1420
1421
1422
1423 bp->reset_type = PI_PDATA_A_RESET_M_SKIP_ST;
1424 if (dfx_adap_init(bp, 1) != DFX_K_SUCCESS)
1425 {
1426 printk(KERN_ERR "%s: Adapter open failed!\n", dev->name);
1427 free_irq(dev->irq, dev);
1428 return -EAGAIN;
1429 }
1430
1431
1432 netif_start_queue(dev);
1433 return 0;
1434}
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469static int dfx_close(struct net_device *dev)
1470{
1471 DFX_board_t *bp = netdev_priv(dev);
1472
1473 DBG_printk("In dfx_close...\n");
1474
1475
1476
1477 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS);
1478
1479
1480
1481 (void) dfx_hw_dma_uninit(bp, PI_PDATA_A_RESET_M_SKIP_ST);
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492 dfx_xmt_flush(bp);
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505 bp->cmd_req_reg.lword = 0;
1506 bp->cmd_rsp_reg.lword = 0;
1507 bp->rcv_xmt_reg.lword = 0;
1508
1509
1510
1511 memset(bp->cons_block_virt, 0, sizeof(PI_CONSUMER_BLOCK));
1512
1513
1514
1515 dfx_rcv_flush(bp);
1516
1517
1518
1519 netif_stop_queue(dev);
1520
1521
1522
1523 free_irq(dev->irq, dev);
1524
1525 return 0;
1526}
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556static void dfx_int_pr_halt_id(DFX_board_t *bp)
1557 {
1558 PI_UINT32 port_status;
1559 PI_UINT32 halt_id;
1560
1561
1562
1563 dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &port_status);
1564
1565
1566
1567 halt_id = (port_status & PI_PSTATUS_M_HALT_ID) >> PI_PSTATUS_V_HALT_ID;
1568 switch (halt_id)
1569 {
1570 case PI_HALT_ID_K_SELFTEST_TIMEOUT:
1571 printk("%s: Halt ID: Selftest Timeout\n", bp->dev->name);
1572 break;
1573
1574 case PI_HALT_ID_K_PARITY_ERROR:
1575 printk("%s: Halt ID: Host Bus Parity Error\n", bp->dev->name);
1576 break;
1577
1578 case PI_HALT_ID_K_HOST_DIR_HALT:
1579 printk("%s: Halt ID: Host-Directed Halt\n", bp->dev->name);
1580 break;
1581
1582 case PI_HALT_ID_K_SW_FAULT:
1583 printk("%s: Halt ID: Adapter Software Fault\n", bp->dev->name);
1584 break;
1585
1586 case PI_HALT_ID_K_HW_FAULT:
1587 printk("%s: Halt ID: Adapter Hardware Fault\n", bp->dev->name);
1588 break;
1589
1590 case PI_HALT_ID_K_PC_TRACE:
1591 printk("%s: Halt ID: FDDI Network PC Trace Path Test\n", bp->dev->name);
1592 break;
1593
1594 case PI_HALT_ID_K_DMA_ERROR:
1595 printk("%s: Halt ID: Adapter DMA Error\n", bp->dev->name);
1596 break;
1597
1598 case PI_HALT_ID_K_IMAGE_CRC_ERROR:
1599 printk("%s: Halt ID: Firmware Image CRC Error\n", bp->dev->name);
1600 break;
1601
1602 case PI_HALT_ID_K_BUS_EXCEPTION:
1603 printk("%s: Halt ID: 68000 Bus Exception\n", bp->dev->name);
1604 break;
1605
1606 default:
1607 printk("%s: Halt ID: Unknown (code = %X)\n", bp->dev->name, halt_id);
1608 break;
1609 }
1610 }
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660static void dfx_int_type_0_process(DFX_board_t *bp)
1661
1662 {
1663 PI_UINT32 type_0_status;
1664 PI_UINT32 state;
1665
1666
1667
1668
1669
1670
1671
1672 dfx_port_read_long(bp, PI_PDQ_K_REG_TYPE_0_STATUS, &type_0_status);
1673 dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_0_STATUS, type_0_status);
1674
1675
1676
1677 if (type_0_status & (PI_TYPE_0_STAT_M_NXM |
1678 PI_TYPE_0_STAT_M_PM_PAR_ERR |
1679 PI_TYPE_0_STAT_M_BUS_PAR_ERR))
1680 {
1681
1682
1683 if (type_0_status & PI_TYPE_0_STAT_M_NXM)
1684 printk("%s: Non-Existent Memory Access Error\n", bp->dev->name);
1685
1686
1687
1688 if (type_0_status & PI_TYPE_0_STAT_M_PM_PAR_ERR)
1689 printk("%s: Packet Memory Parity Error\n", bp->dev->name);
1690
1691
1692
1693 if (type_0_status & PI_TYPE_0_STAT_M_BUS_PAR_ERR)
1694 printk("%s: Host Bus Parity Error\n", bp->dev->name);
1695
1696
1697
1698 bp->link_available = PI_K_FALSE;
1699 bp->reset_type = 0;
1700 printk("%s: Resetting adapter...\n", bp->dev->name);
1701 if (dfx_adap_init(bp, 0) != DFX_K_SUCCESS)
1702 {
1703 printk("%s: Adapter reset failed! Disabling adapter interrupts.\n", bp->dev->name);
1704 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS);
1705 return;
1706 }
1707 printk("%s: Adapter reset successful!\n", bp->dev->name);
1708 return;
1709 }
1710
1711
1712
1713 if (type_0_status & PI_TYPE_0_STAT_M_XMT_FLUSH)
1714 {
1715
1716
1717 bp->link_available = PI_K_FALSE;
1718 dfx_xmt_flush(bp);
1719 (void) dfx_hw_port_ctrl_req(bp,
1720 PI_PCTRL_M_XMT_DATA_FLUSH_DONE,
1721 0,
1722 0,
1723 NULL);
1724 }
1725
1726
1727
1728 if (type_0_status & PI_TYPE_0_STAT_M_STATE_CHANGE)
1729 {
1730
1731
1732 state = dfx_hw_adap_state_rd(bp);
1733 if (state == PI_STATE_K_HALTED)
1734 {
1735
1736
1737
1738
1739
1740
1741 printk("%s: Controller has transitioned to HALTED state!\n", bp->dev->name);
1742 dfx_int_pr_halt_id(bp);
1743
1744
1745
1746 bp->link_available = PI_K_FALSE;
1747 bp->reset_type = 0;
1748 printk("%s: Resetting adapter...\n", bp->dev->name);
1749 if (dfx_adap_init(bp, 0) != DFX_K_SUCCESS)
1750 {
1751 printk("%s: Adapter reset failed! Disabling adapter interrupts.\n", bp->dev->name);
1752 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS);
1753 return;
1754 }
1755 printk("%s: Adapter reset successful!\n", bp->dev->name);
1756 }
1757 else if (state == PI_STATE_K_LINK_AVAIL)
1758 {
1759 bp->link_available = PI_K_TRUE;
1760 }
1761 }
1762 }
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805static void dfx_int_common(struct net_device *dev)
1806{
1807 DFX_board_t *bp = netdev_priv(dev);
1808 PI_UINT32 port_status;
1809
1810
1811
1812 if(dfx_xmt_done(bp))
1813 netif_wake_queue(dev);
1814
1815
1816
1817 dfx_rcv_queue_process(bp);
1818
1819
1820
1821
1822
1823
1824
1825
1826 dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_2_PROD, bp->rcv_xmt_reg.lword);
1827
1828
1829
1830 dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &port_status);
1831
1832
1833
1834 if (port_status & PI_PSTATUS_M_TYPE_0_PENDING)
1835 dfx_int_type_0_process(bp);
1836 }
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875static irqreturn_t dfx_interrupt(int irq, void *dev_id)
1876{
1877 struct net_device *dev = dev_id;
1878 DFX_board_t *bp = netdev_priv(dev);
1879 struct device *bdev = bp->bus_dev;
1880 int dfx_bus_pci = DFX_BUS_PCI(bdev);
1881 int dfx_bus_eisa = DFX_BUS_EISA(bdev);
1882 int dfx_bus_tc = DFX_BUS_TC(bdev);
1883
1884
1885
1886 if (dfx_bus_pci) {
1887 u32 status;
1888
1889 dfx_port_read_long(bp, PFI_K_REG_STATUS, &status);
1890 if (!(status & PFI_STATUS_M_PDQ_INT))
1891 return IRQ_NONE;
1892
1893 spin_lock(&bp->lock);
1894
1895
1896 dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL,
1897 PFI_MODE_M_DMA_ENB);
1898
1899
1900 dfx_int_common(dev);
1901
1902
1903 dfx_port_write_long(bp, PFI_K_REG_STATUS,
1904 PFI_STATUS_M_PDQ_INT);
1905 dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL,
1906 (PFI_MODE_M_PDQ_INT_ENB |
1907 PFI_MODE_M_DMA_ENB));
1908
1909 spin_unlock(&bp->lock);
1910 }
1911 if (dfx_bus_eisa) {
1912 unsigned long base_addr = to_eisa_device(bdev)->base_addr;
1913 u8 status;
1914
1915 status = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
1916 if (!(status & PI_CONFIG_STAT_0_M_PEND))
1917 return IRQ_NONE;
1918
1919 spin_lock(&bp->lock);
1920
1921
1922 status &= ~PI_CONFIG_STAT_0_M_INT_ENB;
1923 outb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0, status);
1924
1925
1926 dfx_int_common(dev);
1927
1928
1929 status = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
1930 status |= PI_CONFIG_STAT_0_M_INT_ENB;
1931 outb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0, status);
1932
1933 spin_unlock(&bp->lock);
1934 }
1935 if (dfx_bus_tc) {
1936 u32 status;
1937
1938 dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &status);
1939 if (!(status & (PI_PSTATUS_M_RCV_DATA_PENDING |
1940 PI_PSTATUS_M_XMT_DATA_PENDING |
1941 PI_PSTATUS_M_SMT_HOST_PENDING |
1942 PI_PSTATUS_M_UNSOL_PENDING |
1943 PI_PSTATUS_M_CMD_RSP_PENDING |
1944 PI_PSTATUS_M_CMD_REQ_PENDING |
1945 PI_PSTATUS_M_TYPE_0_PENDING)))
1946 return IRQ_NONE;
1947
1948 spin_lock(&bp->lock);
1949
1950
1951 dfx_int_common(dev);
1952
1953 spin_unlock(&bp->lock);
1954 }
1955
1956 return IRQ_HANDLED;
1957}
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003static struct net_device_stats *dfx_ctl_get_stats(struct net_device *dev)
2004 {
2005 DFX_board_t *bp = netdev_priv(dev);
2006
2007
2008
2009 bp->stats.gen.rx_packets = bp->rcv_total_frames;
2010 bp->stats.gen.tx_packets = bp->xmt_total_frames;
2011 bp->stats.gen.rx_bytes = bp->rcv_total_bytes;
2012 bp->stats.gen.tx_bytes = bp->xmt_total_bytes;
2013 bp->stats.gen.rx_errors = bp->rcv_crc_errors +
2014 bp->rcv_frame_status_errors +
2015 bp->rcv_length_errors;
2016 bp->stats.gen.tx_errors = bp->xmt_length_errors;
2017 bp->stats.gen.rx_dropped = bp->rcv_discards;
2018 bp->stats.gen.tx_dropped = bp->xmt_discards;
2019 bp->stats.gen.multicast = bp->rcv_multicast_frames;
2020 bp->stats.gen.collisions = 0;
2021
2022
2023
2024 bp->cmd_req_virt->cmd_type = PI_CMD_K_SMT_MIB_GET;
2025 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
2026 return (struct net_device_stats *)&bp->stats;
2027
2028
2029
2030 memcpy(bp->stats.smt_station_id, &bp->cmd_rsp_virt->smt_mib_get.smt_station_id, sizeof(bp->cmd_rsp_virt->smt_mib_get.smt_station_id));
2031 bp->stats.smt_op_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_op_version_id;
2032 bp->stats.smt_hi_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_hi_version_id;
2033 bp->stats.smt_lo_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_lo_version_id;
2034 memcpy(bp->stats.smt_user_data, &bp->cmd_rsp_virt->smt_mib_get.smt_user_data, sizeof(bp->cmd_rsp_virt->smt_mib_get.smt_user_data));
2035 bp->stats.smt_mib_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_mib_version_id;
2036 bp->stats.smt_mac_cts = bp->cmd_rsp_virt->smt_mib_get.smt_mac_ct;
2037 bp->stats.smt_non_master_cts = bp->cmd_rsp_virt->smt_mib_get.smt_non_master_ct;
2038 bp->stats.smt_master_cts = bp->cmd_rsp_virt->smt_mib_get.smt_master_ct;
2039 bp->stats.smt_available_paths = bp->cmd_rsp_virt->smt_mib_get.smt_available_paths;
2040 bp->stats.smt_config_capabilities = bp->cmd_rsp_virt->smt_mib_get.smt_config_capabilities;
2041 bp->stats.smt_config_policy = bp->cmd_rsp_virt->smt_mib_get.smt_config_policy;
2042 bp->stats.smt_connection_policy = bp->cmd_rsp_virt->smt_mib_get.smt_connection_policy;
2043 bp->stats.smt_t_notify = bp->cmd_rsp_virt->smt_mib_get.smt_t_notify;
2044 bp->stats.smt_stat_rpt_policy = bp->cmd_rsp_virt->smt_mib_get.smt_stat_rpt_policy;
2045 bp->stats.smt_trace_max_expiration = bp->cmd_rsp_virt->smt_mib_get.smt_trace_max_expiration;
2046 bp->stats.smt_bypass_present = bp->cmd_rsp_virt->smt_mib_get.smt_bypass_present;
2047 bp->stats.smt_ecm_state = bp->cmd_rsp_virt->smt_mib_get.smt_ecm_state;
2048 bp->stats.smt_cf_state = bp->cmd_rsp_virt->smt_mib_get.smt_cf_state;
2049 bp->stats.smt_remote_disconnect_flag = bp->cmd_rsp_virt->smt_mib_get.smt_remote_disconnect_flag;
2050 bp->stats.smt_station_status = bp->cmd_rsp_virt->smt_mib_get.smt_station_status;
2051 bp->stats.smt_peer_wrap_flag = bp->cmd_rsp_virt->smt_mib_get.smt_peer_wrap_flag;
2052 bp->stats.smt_time_stamp = bp->cmd_rsp_virt->smt_mib_get.smt_msg_time_stamp.ls;
2053 bp->stats.smt_transition_time_stamp = bp->cmd_rsp_virt->smt_mib_get.smt_transition_time_stamp.ls;
2054 bp->stats.mac_frame_status_functions = bp->cmd_rsp_virt->smt_mib_get.mac_frame_status_functions;
2055 bp->stats.mac_t_max_capability = bp->cmd_rsp_virt->smt_mib_get.mac_t_max_capability;
2056 bp->stats.mac_tvx_capability = bp->cmd_rsp_virt->smt_mib_get.mac_tvx_capability;
2057 bp->stats.mac_available_paths = bp->cmd_rsp_virt->smt_mib_get.mac_available_paths;
2058 bp->stats.mac_current_path = bp->cmd_rsp_virt->smt_mib_get.mac_current_path;
2059 memcpy(bp->stats.mac_upstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_upstream_nbr, FDDI_K_ALEN);
2060 memcpy(bp->stats.mac_downstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_downstream_nbr, FDDI_K_ALEN);
2061 memcpy(bp->stats.mac_old_upstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_old_upstream_nbr, FDDI_K_ALEN);
2062 memcpy(bp->stats.mac_old_downstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_old_downstream_nbr, FDDI_K_ALEN);
2063 bp->stats.mac_dup_address_test = bp->cmd_rsp_virt->smt_mib_get.mac_dup_address_test;
2064 bp->stats.mac_requested_paths = bp->cmd_rsp_virt->smt_mib_get.mac_requested_paths;
2065 bp->stats.mac_downstream_port_type = bp->cmd_rsp_virt->smt_mib_get.mac_downstream_port_type;
2066 memcpy(bp->stats.mac_smt_address, &bp->cmd_rsp_virt->smt_mib_get.mac_smt_address, FDDI_K_ALEN);
2067 bp->stats.mac_t_req = bp->cmd_rsp_virt->smt_mib_get.mac_t_req;
2068 bp->stats.mac_t_neg = bp->cmd_rsp_virt->smt_mib_get.mac_t_neg;
2069 bp->stats.mac_t_max = bp->cmd_rsp_virt->smt_mib_get.mac_t_max;
2070 bp->stats.mac_tvx_value = bp->cmd_rsp_virt->smt_mib_get.mac_tvx_value;
2071 bp->stats.mac_frame_error_threshold = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_threshold;
2072 bp->stats.mac_frame_error_ratio = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_ratio;
2073 bp->stats.mac_rmt_state = bp->cmd_rsp_virt->smt_mib_get.mac_rmt_state;
2074 bp->stats.mac_da_flag = bp->cmd_rsp_virt->smt_mib_get.mac_da_flag;
2075 bp->stats.mac_una_da_flag = bp->cmd_rsp_virt->smt_mib_get.mac_unda_flag;
2076 bp->stats.mac_frame_error_flag = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_flag;
2077 bp->stats.mac_ma_unitdata_available = bp->cmd_rsp_virt->smt_mib_get.mac_ma_unitdata_available;
2078 bp->stats.mac_hardware_present = bp->cmd_rsp_virt->smt_mib_get.mac_hardware_present;
2079 bp->stats.mac_ma_unitdata_enable = bp->cmd_rsp_virt->smt_mib_get.mac_ma_unitdata_enable;
2080 bp->stats.path_tvx_lower_bound = bp->cmd_rsp_virt->smt_mib_get.path_tvx_lower_bound;
2081 bp->stats.path_t_max_lower_bound = bp->cmd_rsp_virt->smt_mib_get.path_t_max_lower_bound;
2082 bp->stats.path_max_t_req = bp->cmd_rsp_virt->smt_mib_get.path_max_t_req;
2083 memcpy(bp->stats.path_configuration, &bp->cmd_rsp_virt->smt_mib_get.path_configuration, sizeof(bp->cmd_rsp_virt->smt_mib_get.path_configuration));
2084 bp->stats.port_my_type[0] = bp->cmd_rsp_virt->smt_mib_get.port_my_type[0];
2085 bp->stats.port_my_type[1] = bp->cmd_rsp_virt->smt_mib_get.port_my_type[1];
2086 bp->stats.port_neighbor_type[0] = bp->cmd_rsp_virt->smt_mib_get.port_neighbor_type[0];
2087 bp->stats.port_neighbor_type[1] = bp->cmd_rsp_virt->smt_mib_get.port_neighbor_type[1];
2088 bp->stats.port_connection_policies[0] = bp->cmd_rsp_virt->smt_mib_get.port_connection_policies[0];
2089 bp->stats.port_connection_policies[1] = bp->cmd_rsp_virt->smt_mib_get.port_connection_policies[1];
2090 bp->stats.port_mac_indicated[0] = bp->cmd_rsp_virt->smt_mib_get.port_mac_indicated[0];
2091 bp->stats.port_mac_indicated[1] = bp->cmd_rsp_virt->smt_mib_get.port_mac_indicated[1];
2092 bp->stats.port_current_path[0] = bp->cmd_rsp_virt->smt_mib_get.port_current_path[0];
2093 bp->stats.port_current_path[1] = bp->cmd_rsp_virt->smt_mib_get.port_current_path[1];
2094 memcpy(&bp->stats.port_requested_paths[0*3], &bp->cmd_rsp_virt->smt_mib_get.port_requested_paths[0], 3);
2095 memcpy(&bp->stats.port_requested_paths[1*3], &bp->cmd_rsp_virt->smt_mib_get.port_requested_paths[1], 3);
2096 bp->stats.port_mac_placement[0] = bp->cmd_rsp_virt->smt_mib_get.port_mac_placement[0];
2097 bp->stats.port_mac_placement[1] = bp->cmd_rsp_virt->smt_mib_get.port_mac_placement[1];
2098 bp->stats.port_available_paths[0] = bp->cmd_rsp_virt->smt_mib_get.port_available_paths[0];
2099 bp->stats.port_available_paths[1] = bp->cmd_rsp_virt->smt_mib_get.port_available_paths[1];
2100 bp->stats.port_pmd_class[0] = bp->cmd_rsp_virt->smt_mib_get.port_pmd_class[0];
2101 bp->stats.port_pmd_class[1] = bp->cmd_rsp_virt->smt_mib_get.port_pmd_class[1];
2102 bp->stats.port_connection_capabilities[0] = bp->cmd_rsp_virt->smt_mib_get.port_connection_capabilities[0];
2103 bp->stats.port_connection_capabilities[1] = bp->cmd_rsp_virt->smt_mib_get.port_connection_capabilities[1];
2104 bp->stats.port_bs_flag[0] = bp->cmd_rsp_virt->smt_mib_get.port_bs_flag[0];
2105 bp->stats.port_bs_flag[1] = bp->cmd_rsp_virt->smt_mib_get.port_bs_flag[1];
2106 bp->stats.port_ler_estimate[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_estimate[0];
2107 bp->stats.port_ler_estimate[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_estimate[1];
2108 bp->stats.port_ler_cutoff[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_cutoff[0];
2109 bp->stats.port_ler_cutoff[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_cutoff[1];
2110 bp->stats.port_ler_alarm[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_alarm[0];
2111 bp->stats.port_ler_alarm[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_alarm[1];
2112 bp->stats.port_connect_state[0] = bp->cmd_rsp_virt->smt_mib_get.port_connect_state[0];
2113 bp->stats.port_connect_state[1] = bp->cmd_rsp_virt->smt_mib_get.port_connect_state[1];
2114 bp->stats.port_pcm_state[0] = bp->cmd_rsp_virt->smt_mib_get.port_pcm_state[0];
2115 bp->stats.port_pcm_state[1] = bp->cmd_rsp_virt->smt_mib_get.port_pcm_state[1];
2116 bp->stats.port_pc_withhold[0] = bp->cmd_rsp_virt->smt_mib_get.port_pc_withhold[0];
2117 bp->stats.port_pc_withhold[1] = bp->cmd_rsp_virt->smt_mib_get.port_pc_withhold[1];
2118 bp->stats.port_ler_flag[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_flag[0];
2119 bp->stats.port_ler_flag[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_flag[1];
2120 bp->stats.port_hardware_present[0] = bp->cmd_rsp_virt->smt_mib_get.port_hardware_present[0];
2121 bp->stats.port_hardware_present[1] = bp->cmd_rsp_virt->smt_mib_get.port_hardware_present[1];
2122
2123
2124
2125 bp->cmd_req_virt->cmd_type = PI_CMD_K_CNTRS_GET;
2126 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
2127 return (struct net_device_stats *)&bp->stats;
2128
2129
2130
2131 bp->stats.mac_frame_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.frame_cnt.ls;
2132 bp->stats.mac_copied_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.copied_cnt.ls;
2133 bp->stats.mac_transmit_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.transmit_cnt.ls;
2134 bp->stats.mac_error_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.error_cnt.ls;
2135 bp->stats.mac_lost_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.lost_cnt.ls;
2136 bp->stats.port_lct_fail_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.lct_rejects[0].ls;
2137 bp->stats.port_lct_fail_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.lct_rejects[1].ls;
2138 bp->stats.port_lem_reject_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.lem_rejects[0].ls;
2139 bp->stats.port_lem_reject_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.lem_rejects[1].ls;
2140 bp->stats.port_lem_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[0].ls;
2141 bp->stats.port_lem_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[1].ls;
2142
2143 return (struct net_device_stats *)&bp->stats;
2144 }
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190static void dfx_ctl_set_multicast_list(struct net_device *dev)
2191{
2192 DFX_board_t *bp = netdev_priv(dev);
2193 int i;
2194 struct netdev_hw_addr *ha;
2195
2196
2197
2198 if (dev->flags & IFF_PROMISC)
2199 bp->ind_group_prom = PI_FSTATE_K_PASS;
2200
2201
2202
2203 else
2204 {
2205 bp->ind_group_prom = PI_FSTATE_K_BLOCK;
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226 if (netdev_mc_count(dev) > (PI_CMD_ADDR_FILTER_K_SIZE - bp->uc_count))
2227 {
2228 bp->group_prom = PI_FSTATE_K_PASS;
2229 bp->mc_count = 0;
2230 }
2231 else
2232 {
2233 bp->group_prom = PI_FSTATE_K_BLOCK;
2234 bp->mc_count = netdev_mc_count(dev);
2235 }
2236
2237
2238
2239 i = 0;
2240 netdev_for_each_mc_addr(ha, dev)
2241 memcpy(&bp->mc_table[i++ * FDDI_K_ALEN],
2242 ha->addr, FDDI_K_ALEN);
2243
2244 if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS)
2245 {
2246 DBG_printk("%s: Could not update multicast address table!\n", dev->name);
2247 }
2248 else
2249 {
2250 DBG_printk("%s: Multicast address table updated! Added %d addresses.\n", dev->name, bp->mc_count);
2251 }
2252 }
2253
2254
2255
2256 if (dfx_ctl_update_filters(bp) != DFX_K_SUCCESS)
2257 {
2258 DBG_printk("%s: Could not update adapter filters!\n", dev->name);
2259 }
2260 else
2261 {
2262 DBG_printk("%s: Adapter filters updated!\n", dev->name);
2263 }
2264 }
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303static int dfx_ctl_set_mac_address(struct net_device *dev, void *addr)
2304 {
2305 struct sockaddr *p_sockaddr = (struct sockaddr *)addr;
2306 DFX_board_t *bp = netdev_priv(dev);
2307
2308
2309
2310 memcpy(dev->dev_addr, p_sockaddr->sa_data, FDDI_K_ALEN);
2311 memcpy(&bp->uc_table[0], p_sockaddr->sa_data, FDDI_K_ALEN);
2312 bp->uc_count = 1;
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326 if ((bp->uc_count + bp->mc_count) > PI_CMD_ADDR_FILTER_K_SIZE)
2327 {
2328 bp->group_prom = PI_FSTATE_K_PASS;
2329 bp->mc_count = 0;
2330
2331
2332
2333 if (dfx_ctl_update_filters(bp) != DFX_K_SUCCESS)
2334 {
2335 DBG_printk("%s: Could not update adapter filters!\n", dev->name);
2336 }
2337 else
2338 {
2339 DBG_printk("%s: Adapter filters updated!\n", dev->name);
2340 }
2341 }
2342
2343
2344
2345 if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS)
2346 {
2347 DBG_printk("%s: Could not set new MAC address!\n", dev->name);
2348 }
2349 else
2350 {
2351 DBG_printk("%s: Adapter CAM updated with new MAC address\n", dev->name);
2352 }
2353 return 0;
2354 }
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390static int dfx_ctl_update_cam(DFX_board_t *bp)
2391 {
2392 int i;
2393 PI_LAN_ADDR *p_addr;
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408 memset(bp->cmd_req_virt, 0, PI_CMD_REQ_K_SIZE_MAX);
2409 bp->cmd_req_virt->cmd_type = PI_CMD_K_ADDR_FILTER_SET;
2410 p_addr = &bp->cmd_req_virt->addr_filter_set.entry[0];
2411
2412
2413
2414 for (i=0; i < (int)bp->uc_count; i++)
2415 {
2416 if (i < PI_CMD_ADDR_FILTER_K_SIZE)
2417 {
2418 memcpy(p_addr, &bp->uc_table[i*FDDI_K_ALEN], FDDI_K_ALEN);
2419 p_addr++;
2420 }
2421 }
2422
2423
2424
2425 for (i=0; i < (int)bp->mc_count; i++)
2426 {
2427 if ((i + bp->uc_count) < PI_CMD_ADDR_FILTER_K_SIZE)
2428 {
2429 memcpy(p_addr, &bp->mc_table[i*FDDI_K_ALEN], FDDI_K_ALEN);
2430 p_addr++;
2431 }
2432 }
2433
2434
2435
2436 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
2437 return DFX_K_FAILURE;
2438 return DFX_K_SUCCESS;
2439 }
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473static int dfx_ctl_update_filters(DFX_board_t *bp)
2474 {
2475 int i = 0;
2476
2477
2478
2479 bp->cmd_req_virt->cmd_type = PI_CMD_K_FILTERS_SET;
2480
2481
2482
2483 bp->cmd_req_virt->filter_set.item[i].item_code = PI_ITEM_K_BROADCAST;
2484 bp->cmd_req_virt->filter_set.item[i++].value = PI_FSTATE_K_PASS;
2485
2486
2487
2488 bp->cmd_req_virt->filter_set.item[i].item_code = PI_ITEM_K_IND_GROUP_PROM;
2489 bp->cmd_req_virt->filter_set.item[i++].value = bp->ind_group_prom;
2490
2491
2492
2493 bp->cmd_req_virt->filter_set.item[i].item_code = PI_ITEM_K_GROUP_PROM;
2494 bp->cmd_req_virt->filter_set.item[i++].value = bp->group_prom;
2495
2496
2497
2498 bp->cmd_req_virt->filter_set.item[i].item_code = PI_ITEM_K_EOL;
2499
2500
2501
2502 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
2503 return DFX_K_FAILURE;
2504 return DFX_K_SUCCESS;
2505 }
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548static int dfx_hw_dma_cmd_req(DFX_board_t *bp)
2549 {
2550 int status;
2551 int timeout_cnt;
2552
2553
2554
2555 status = dfx_hw_adap_state_rd(bp);
2556 if ((status == PI_STATE_K_RESET) ||
2557 (status == PI_STATE_K_HALTED) ||
2558 (status == PI_STATE_K_DMA_UNAVAIL) ||
2559 (status == PI_STATE_K_UPGRADE))
2560 return DFX_K_OUTSTATE;
2561
2562
2563
2564 bp->descr_block_virt->cmd_rsp[bp->cmd_rsp_reg.index.prod].long_0 = (u32) (PI_RCV_DESCR_M_SOP |
2565 ((PI_CMD_RSP_K_SIZE_MAX / PI_ALIGN_K_CMD_RSP_BUFF) << PI_RCV_DESCR_V_SEG_LEN));
2566 bp->descr_block_virt->cmd_rsp[bp->cmd_rsp_reg.index.prod].long_1 = bp->cmd_rsp_phys;
2567
2568
2569
2570 bp->cmd_rsp_reg.index.prod += 1;
2571 bp->cmd_rsp_reg.index.prod &= PI_CMD_RSP_K_NUM_ENTRIES-1;
2572 dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_RSP_PROD, bp->cmd_rsp_reg.lword);
2573
2574
2575
2576 bp->descr_block_virt->cmd_req[bp->cmd_req_reg.index.prod].long_0 = (u32) (PI_XMT_DESCR_M_SOP |
2577 PI_XMT_DESCR_M_EOP | (PI_CMD_REQ_K_SIZE_MAX << PI_XMT_DESCR_V_SEG_LEN));
2578 bp->descr_block_virt->cmd_req[bp->cmd_req_reg.index.prod].long_1 = bp->cmd_req_phys;
2579
2580
2581
2582 bp->cmd_req_reg.index.prod += 1;
2583 bp->cmd_req_reg.index.prod &= PI_CMD_REQ_K_NUM_ENTRIES-1;
2584 dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_REQ_PROD, bp->cmd_req_reg.lword);
2585
2586
2587
2588
2589
2590
2591 for (timeout_cnt = 20000; timeout_cnt > 0; timeout_cnt--)
2592 {
2593 if (bp->cmd_req_reg.index.prod == (u8)(bp->cons_block_virt->cmd_req))
2594 break;
2595 udelay(100);
2596 }
2597 if (timeout_cnt == 0)
2598 return DFX_K_HW_TIMEOUT;
2599
2600
2601
2602 bp->cmd_req_reg.index.comp += 1;
2603 bp->cmd_req_reg.index.comp &= PI_CMD_REQ_K_NUM_ENTRIES-1;
2604 dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_REQ_PROD, bp->cmd_req_reg.lword);
2605
2606
2607
2608
2609
2610
2611 for (timeout_cnt = 20000; timeout_cnt > 0; timeout_cnt--)
2612 {
2613 if (bp->cmd_rsp_reg.index.prod == (u8)(bp->cons_block_virt->cmd_rsp))
2614 break;
2615 udelay(100);
2616 }
2617 if (timeout_cnt == 0)
2618 return DFX_K_HW_TIMEOUT;
2619
2620
2621
2622 bp->cmd_rsp_reg.index.comp += 1;
2623 bp->cmd_rsp_reg.index.comp &= PI_CMD_RSP_K_NUM_ENTRIES-1;
2624 dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_RSP_PROD, bp->cmd_rsp_reg.lword);
2625 return DFX_K_SUCCESS;
2626 }
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662static int dfx_hw_port_ctrl_req(
2663 DFX_board_t *bp,
2664 PI_UINT32 command,
2665 PI_UINT32 data_a,
2666 PI_UINT32 data_b,
2667 PI_UINT32 *host_data
2668 )
2669
2670 {
2671 PI_UINT32 port_cmd;
2672 int timeout_cnt;
2673
2674
2675
2676 port_cmd = (PI_UINT32) (command | PI_PCTRL_M_CMD_ERROR);
2677
2678
2679
2680 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_DATA_A, data_a);
2681 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_DATA_B, data_b);
2682 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_CTRL, port_cmd);
2683
2684
2685
2686 if (command == PI_PCTRL_M_BLAST_FLASH)
2687 timeout_cnt = 600000;
2688 else
2689 timeout_cnt = 20000;
2690
2691 for (; timeout_cnt > 0; timeout_cnt--)
2692 {
2693 dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_CTRL, &port_cmd);
2694 if (!(port_cmd & PI_PCTRL_M_CMD_ERROR))
2695 break;
2696 udelay(100);
2697 }
2698 if (timeout_cnt == 0)
2699 return DFX_K_HW_TIMEOUT;
2700
2701
2702
2703
2704
2705
2706
2707 if (host_data != NULL)
2708 dfx_port_read_long(bp, PI_PDQ_K_REG_HOST_DATA, host_data);
2709 return DFX_K_SUCCESS;
2710 }
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746static void dfx_hw_adap_reset(
2747 DFX_board_t *bp,
2748 PI_UINT32 type
2749 )
2750
2751 {
2752
2753
2754 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_DATA_A, type);
2755 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_RESET, PI_RESET_M_ASSERT_RESET);
2756
2757
2758
2759 udelay(20);
2760
2761
2762
2763 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_RESET, 0);
2764 }
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794static int dfx_hw_adap_state_rd(DFX_board_t *bp)
2795 {
2796 PI_UINT32 port_status;
2797
2798 dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &port_status);
2799 return (port_status & PI_PSTATUS_M_STATE) >> PI_PSTATUS_V_STATE;
2800 }
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832
2833
2834static int dfx_hw_dma_uninit(DFX_board_t *bp, PI_UINT32 type)
2835 {
2836 int timeout_cnt;
2837
2838
2839
2840 dfx_hw_adap_reset(bp, type);
2841
2842
2843
2844 for (timeout_cnt = 100000; timeout_cnt > 0; timeout_cnt--)
2845 {
2846 if (dfx_hw_adap_state_rd(bp) == PI_STATE_K_DMA_UNAVAIL)
2847 break;
2848 udelay(100);
2849 }
2850 if (timeout_cnt == 0)
2851 return DFX_K_HW_TIMEOUT;
2852 return DFX_K_SUCCESS;
2853 }
2854
2855
2856
2857
2858
2859
2860static void my_skb_align(struct sk_buff *skb, int n)
2861{
2862 unsigned long x = (unsigned long)skb->data;
2863 unsigned long v;
2864
2865 v = ALIGN(x, n);
2866
2867 skb_reserve(skb, v - x);
2868}
2869
2870
2871
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906static int dfx_rcv_init(DFX_board_t *bp, int get_buffers)
2907 {
2908 int i, j;
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928 if (get_buffers) {
2929#ifdef DYNAMIC_BUFFERS
2930 for (i = 0; i < (int)(bp->rcv_bufs_to_post); i++)
2931 for (j = 0; (i + j) < (int)PI_RCV_DATA_K_NUM_ENTRIES; j += bp->rcv_bufs_to_post)
2932 {
2933 struct sk_buff *newskb = __netdev_alloc_skb(bp->dev, NEW_SKB_SIZE, GFP_NOIO);
2934 if (!newskb)
2935 return -ENOMEM;
2936 bp->descr_block_virt->rcv_data[i+j].long_0 = (u32) (PI_RCV_DESCR_M_SOP |
2937 ((PI_RCV_DATA_K_SIZE_MAX / PI_ALIGN_K_RCV_DATA_BUFF) << PI_RCV_DESCR_V_SEG_LEN));
2938
2939
2940
2941
2942
2943 my_skb_align(newskb, 128);
2944 bp->descr_block_virt->rcv_data[i + j].long_1 =
2945 (u32)dma_map_single(bp->bus_dev, newskb->data,
2946 NEW_SKB_SIZE,
2947 DMA_FROM_DEVICE);
2948
2949
2950
2951
2952 bp->p_rcv_buff_va[i+j] = (char *) newskb;
2953 }
2954#else
2955 for (i=0; i < (int)(bp->rcv_bufs_to_post); i++)
2956 for (j=0; (i + j) < (int)PI_RCV_DATA_K_NUM_ENTRIES; j += bp->rcv_bufs_to_post)
2957 {
2958 bp->descr_block_virt->rcv_data[i+j].long_0 = (u32) (PI_RCV_DESCR_M_SOP |
2959 ((PI_RCV_DATA_K_SIZE_MAX / PI_ALIGN_K_RCV_DATA_BUFF) << PI_RCV_DESCR_V_SEG_LEN));
2960 bp->descr_block_virt->rcv_data[i+j].long_1 = (u32) (bp->rcv_block_phys + (i * PI_RCV_DATA_K_SIZE_MAX));
2961 bp->p_rcv_buff_va[i+j] = (bp->rcv_block_virt + (i * PI_RCV_DATA_K_SIZE_MAX));
2962 }
2963#endif
2964 }
2965
2966
2967
2968 bp->rcv_xmt_reg.index.rcv_prod = bp->rcv_bufs_to_post;
2969 dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_2_PROD, bp->rcv_xmt_reg.lword);
2970 return 0;
2971 }
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006static void dfx_rcv_queue_process(
3007 DFX_board_t *bp
3008 )
3009
3010 {
3011 PI_TYPE_2_CONSUMER *p_type_2_cons;
3012 char *p_buff;
3013 u32 descr, pkt_len;
3014 struct sk_buff *skb;
3015
3016
3017
3018 p_type_2_cons = (PI_TYPE_2_CONSUMER *)(&bp->cons_block_virt->xmt_rcv_data);
3019 while (bp->rcv_xmt_reg.index.rcv_comp != p_type_2_cons->index.rcv_cons)
3020 {
3021
3022
3023 int entry;
3024
3025 entry = bp->rcv_xmt_reg.index.rcv_comp;
3026#ifdef DYNAMIC_BUFFERS
3027 p_buff = (char *) (((struct sk_buff *)bp->p_rcv_buff_va[entry])->data);
3028#else
3029 p_buff = bp->p_rcv_buff_va[entry];
3030#endif
3031 memcpy(&descr, p_buff + RCV_BUFF_K_DESCR, sizeof(u32));
3032
3033 if (descr & PI_FMC_DESCR_M_RCC_FLUSH)
3034 {
3035 if (descr & PI_FMC_DESCR_M_RCC_CRC)
3036 bp->rcv_crc_errors++;
3037 else
3038 bp->rcv_frame_status_errors++;
3039 }
3040 else
3041 {
3042 int rx_in_place = 0;
3043
3044
3045
3046 pkt_len = (u32)((descr & PI_FMC_DESCR_M_LEN) >> PI_FMC_DESCR_V_LEN);
3047 pkt_len -= 4;
3048 if (!IN_RANGE(pkt_len, FDDI_K_LLC_ZLEN, FDDI_K_LLC_LEN))
3049 bp->rcv_length_errors++;
3050 else{
3051#ifdef DYNAMIC_BUFFERS
3052 if (pkt_len > SKBUFF_RX_COPYBREAK) {
3053 struct sk_buff *newskb;
3054
3055 newskb = dev_alloc_skb(NEW_SKB_SIZE);
3056 if (newskb){
3057 rx_in_place = 1;
3058
3059 my_skb_align(newskb, 128);
3060 skb = (struct sk_buff *)bp->p_rcv_buff_va[entry];
3061 dma_unmap_single(bp->bus_dev,
3062 bp->descr_block_virt->rcv_data[entry].long_1,
3063 NEW_SKB_SIZE,
3064 DMA_FROM_DEVICE);
3065 skb_reserve(skb, RCV_BUFF_K_PADDING);
3066 bp->p_rcv_buff_va[entry] = (char *)newskb;
3067 bp->descr_block_virt->rcv_data[entry].long_1 =
3068 (u32)dma_map_single(bp->bus_dev,
3069 newskb->data,
3070 NEW_SKB_SIZE,
3071 DMA_FROM_DEVICE);
3072 } else
3073 skb = NULL;
3074 } else
3075#endif
3076 skb = dev_alloc_skb(pkt_len+3);
3077 if (skb == NULL)
3078 {
3079 printk("%s: Could not allocate receive buffer. Dropping packet.\n", bp->dev->name);
3080 bp->rcv_discards++;
3081 break;
3082 }
3083 else {
3084#ifndef DYNAMIC_BUFFERS
3085 if (! rx_in_place)
3086#endif
3087 {
3088
3089
3090 skb_copy_to_linear_data(skb,
3091 p_buff + RCV_BUFF_K_PADDING,
3092 pkt_len + 3);
3093 }
3094
3095 skb_reserve(skb,3);
3096 skb_put(skb, pkt_len);
3097 skb->protocol = fddi_type_trans(skb, bp->dev);
3098 bp->rcv_total_bytes += skb->len;
3099 netif_rx(skb);
3100
3101
3102 bp->rcv_total_frames++;
3103 if (*(p_buff + RCV_BUFF_K_DA) & 0x01)
3104 bp->rcv_multicast_frames++;
3105 }
3106 }
3107 }
3108
3109
3110
3111
3112
3113
3114
3115
3116
3117 bp->rcv_xmt_reg.index.rcv_prod += 1;
3118 bp->rcv_xmt_reg.index.rcv_comp += 1;
3119 }
3120 }
3121
3122
3123
3124
3125
3126
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140
3141
3142
3143
3144
3145
3146
3147
3148
3149
3150
3151
3152
3153
3154
3155
3156
3157
3158
3159
3160
3161
3162
3163
3164
3165
3166
3167
3168
3169
3170
3171
3172
3173
3174
3175
3176
3177
3178
3179
3180
3181
3182
3183
3184static netdev_tx_t dfx_xmt_queue_pkt(struct sk_buff *skb,
3185 struct net_device *dev)
3186 {
3187 DFX_board_t *bp = netdev_priv(dev);
3188 u8 prod;
3189 PI_XMT_DESCR *p_xmt_descr;
3190 XMT_DRIVER_DESCR *p_xmt_drv_descr;
3191 unsigned long flags;
3192
3193 netif_stop_queue(dev);
3194
3195
3196
3197
3198
3199
3200
3201
3202
3203
3204 if (!IN_RANGE(skb->len, FDDI_K_LLC_ZLEN, FDDI_K_LLC_LEN))
3205 {
3206 printk("%s: Invalid packet length - %u bytes\n",
3207 dev->name, skb->len);
3208 bp->xmt_length_errors++;
3209 netif_wake_queue(dev);
3210 dev_kfree_skb(skb);
3211 return NETDEV_TX_OK;
3212 }
3213
3214
3215
3216
3217
3218
3219
3220
3221
3222
3223
3224
3225 if (bp->link_available == PI_K_FALSE)
3226 {
3227 if (dfx_hw_adap_state_rd(bp) == PI_STATE_K_LINK_AVAIL)
3228 bp->link_available = PI_K_TRUE;
3229 else
3230 {
3231 bp->xmt_discards++;
3232 dev_kfree_skb(skb);
3233 netif_wake_queue(dev);
3234 return NETDEV_TX_OK;
3235 }
3236 }
3237
3238 spin_lock_irqsave(&bp->lock, flags);
3239
3240
3241
3242 prod = bp->rcv_xmt_reg.index.xmt_prod;
3243 p_xmt_descr = &(bp->descr_block_virt->xmt_data[prod]);
3244
3245
3246
3247
3248
3249
3250
3251
3252
3253
3254
3255
3256 p_xmt_drv_descr = &(bp->xmt_drv_descr_blk[prod++]);
3257
3258
3259
3260 skb_push(skb,3);
3261 skb->data[0] = DFX_PRH0_BYTE;
3262 skb->data[1] = DFX_PRH1_BYTE;
3263 skb->data[2] = DFX_PRH2_BYTE;
3264
3265
3266
3267
3268
3269
3270
3271
3272
3273
3274
3275
3276
3277
3278
3279
3280
3281
3282
3283
3284
3285
3286
3287
3288
3289
3290
3291
3292 p_xmt_descr->long_0 = (u32) (PI_XMT_DESCR_M_SOP | PI_XMT_DESCR_M_EOP | ((skb->len) << PI_XMT_DESCR_V_SEG_LEN));
3293 p_xmt_descr->long_1 = (u32)dma_map_single(bp->bus_dev, skb->data,
3294 skb->len, DMA_TO_DEVICE);
3295
3296
3297
3298
3299
3300
3301
3302
3303
3304
3305
3306
3307 if (prod == bp->rcv_xmt_reg.index.xmt_comp)
3308 {
3309 skb_pull(skb,3);
3310 spin_unlock_irqrestore(&bp->lock, flags);
3311 return NETDEV_TX_BUSY;
3312 }
3313
3314
3315
3316
3317
3318
3319
3320
3321
3322
3323
3324
3325
3326
3327
3328
3329
3330 p_xmt_drv_descr->p_skb = skb;
3331
3332
3333
3334 bp->rcv_xmt_reg.index.xmt_prod = prod;
3335 dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_2_PROD, bp->rcv_xmt_reg.lword);
3336 spin_unlock_irqrestore(&bp->lock, flags);
3337 netif_wake_queue(dev);
3338 return NETDEV_TX_OK;
3339 }
3340
3341
3342
3343
3344
3345
3346
3347
3348
3349
3350
3351
3352
3353
3354
3355
3356
3357
3358
3359
3360
3361
3362
3363
3364
3365
3366
3367
3368
3369
3370
3371
3372
3373
3374static int dfx_xmt_done(DFX_board_t *bp)
3375 {
3376 XMT_DRIVER_DESCR *p_xmt_drv_descr;
3377 PI_TYPE_2_CONSUMER *p_type_2_cons;
3378 u8 comp;
3379 int freed = 0;
3380
3381
3382
3383 p_type_2_cons = (PI_TYPE_2_CONSUMER *)(&bp->cons_block_virt->xmt_rcv_data);
3384 while (bp->rcv_xmt_reg.index.xmt_comp != p_type_2_cons->index.xmt_cons)
3385 {
3386
3387
3388 p_xmt_drv_descr = &(bp->xmt_drv_descr_blk[bp->rcv_xmt_reg.index.xmt_comp]);
3389
3390
3391
3392 bp->xmt_total_frames++;
3393 bp->xmt_total_bytes += p_xmt_drv_descr->p_skb->len;
3394
3395
3396 comp = bp->rcv_xmt_reg.index.xmt_comp;
3397 dma_unmap_single(bp->bus_dev,
3398 bp->descr_block_virt->xmt_data[comp].long_1,
3399 p_xmt_drv_descr->p_skb->len,
3400 DMA_TO_DEVICE);
3401 dev_kfree_skb_irq(p_xmt_drv_descr->p_skb);
3402
3403
3404
3405
3406
3407
3408
3409
3410
3411
3412
3413
3414 bp->rcv_xmt_reg.index.xmt_comp += 1;
3415 freed++;
3416 }
3417 return freed;
3418 }
3419
3420
3421
3422
3423
3424
3425
3426
3427
3428
3429
3430
3431
3432
3433
3434
3435
3436
3437
3438
3439
3440
3441
3442
3443
3444
3445
3446
3447#ifdef DYNAMIC_BUFFERS
3448static void dfx_rcv_flush( DFX_board_t *bp )
3449 {
3450 int i, j;
3451
3452 for (i = 0; i < (int)(bp->rcv_bufs_to_post); i++)
3453 for (j = 0; (i + j) < (int)PI_RCV_DATA_K_NUM_ENTRIES; j += bp->rcv_bufs_to_post)
3454 {
3455 struct sk_buff *skb;
3456 skb = (struct sk_buff *)bp->p_rcv_buff_va[i+j];
3457 if (skb)
3458 dev_kfree_skb(skb);
3459 bp->p_rcv_buff_va[i+j] = NULL;
3460 }
3461
3462 }
3463#else
3464static inline void dfx_rcv_flush( DFX_board_t *bp )
3465{
3466}
3467#endif
3468
3469
3470
3471
3472
3473
3474
3475
3476
3477
3478
3479
3480
3481
3482
3483
3484
3485
3486
3487
3488
3489
3490
3491
3492
3493
3494
3495
3496
3497
3498
3499
3500
3501
3502
3503
3504
3505static void dfx_xmt_flush( DFX_board_t *bp )
3506 {
3507 u32 prod_cons;
3508 XMT_DRIVER_DESCR *p_xmt_drv_descr;
3509 u8 comp;
3510
3511
3512
3513 while (bp->rcv_xmt_reg.index.xmt_comp != bp->rcv_xmt_reg.index.xmt_prod)
3514 {
3515
3516
3517 p_xmt_drv_descr = &(bp->xmt_drv_descr_blk[bp->rcv_xmt_reg.index.xmt_comp]);
3518
3519
3520 comp = bp->rcv_xmt_reg.index.xmt_comp;
3521 dma_unmap_single(bp->bus_dev,
3522 bp->descr_block_virt->xmt_data[comp].long_1,
3523 p_xmt_drv_descr->p_skb->len,
3524 DMA_TO_DEVICE);
3525 dev_kfree_skb(p_xmt_drv_descr->p_skb);
3526
3527
3528
3529 bp->xmt_discards++;
3530
3531
3532
3533
3534
3535
3536
3537
3538
3539
3540
3541
3542 bp->rcv_xmt_reg.index.xmt_comp += 1;
3543 }
3544
3545
3546
3547 prod_cons = (u32)(bp->cons_block_virt->xmt_rcv_data & ~PI_CONS_M_XMT_INDEX);
3548 prod_cons |= (u32)(bp->rcv_xmt_reg.index.xmt_prod << PI_CONS_V_XMT_INDEX);
3549 bp->cons_block_virt->xmt_rcv_data = prod_cons;
3550 }
3551
3552
3553
3554
3555
3556
3557
3558
3559
3560
3561
3562
3563
3564
3565
3566
3567
3568
3569
3570
3571
3572
3573
3574
3575
3576
3577
3578static void dfx_unregister(struct device *bdev)
3579{
3580 struct net_device *dev = dev_get_drvdata(bdev);
3581 DFX_board_t *bp = netdev_priv(dev);
3582 int dfx_bus_pci = DFX_BUS_PCI(bdev);
3583 int dfx_bus_tc = DFX_BUS_TC(bdev);
3584 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
3585 resource_size_t bar_start = 0;
3586 resource_size_t bar_len = 0;
3587 int alloc_size;
3588
3589 unregister_netdev(dev);
3590
3591 alloc_size = sizeof(PI_DESCR_BLOCK) +
3592 PI_CMD_REQ_K_SIZE_MAX + PI_CMD_RSP_K_SIZE_MAX +
3593#ifndef DYNAMIC_BUFFERS
3594 (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX) +
3595#endif
3596 sizeof(PI_CONSUMER_BLOCK) +
3597 (PI_ALIGN_K_DESC_BLK - 1);
3598 if (bp->kmalloced)
3599 dma_free_coherent(bdev, alloc_size,
3600 bp->kmalloced, bp->kmalloced_dma);
3601
3602 dfx_bus_uninit(dev);
3603
3604 dfx_get_bars(bdev, &bar_start, &bar_len);
3605 if (dfx_use_mmio) {
3606 iounmap(bp->base.mem);
3607 release_mem_region(bar_start, bar_len);
3608 } else
3609 release_region(bar_start, bar_len);
3610
3611 if (dfx_bus_pci)
3612 pci_disable_device(to_pci_dev(bdev));
3613
3614 free_netdev(dev);
3615}
3616
3617
3618static int __maybe_unused dfx_dev_register(struct device *);
3619static int __maybe_unused dfx_dev_unregister(struct device *);
3620
3621#ifdef CONFIG_PCI
3622static int dfx_pci_register(struct pci_dev *, const struct pci_device_id *);
3623static void dfx_pci_unregister(struct pci_dev *);
3624
3625static const struct pci_device_id dfx_pci_table[] = {
3626 { PCI_DEVICE(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_FDDI) },
3627 { }
3628};
3629MODULE_DEVICE_TABLE(pci, dfx_pci_table);
3630
3631static struct pci_driver dfx_pci_driver = {
3632 .name = "defxx",
3633 .id_table = dfx_pci_table,
3634 .probe = dfx_pci_register,
3635 .remove = dfx_pci_unregister,
3636};
3637
3638static int dfx_pci_register(struct pci_dev *pdev,
3639 const struct pci_device_id *ent)
3640{
3641 return dfx_register(&pdev->dev);
3642}
3643
3644static void dfx_pci_unregister(struct pci_dev *pdev)
3645{
3646 dfx_unregister(&pdev->dev);
3647}
3648#endif
3649
3650#ifdef CONFIG_EISA
3651static struct eisa_device_id dfx_eisa_table[] = {
3652 { "DEC3001", DEFEA_PROD_ID_1 },
3653 { "DEC3002", DEFEA_PROD_ID_2 },
3654 { "DEC3003", DEFEA_PROD_ID_3 },
3655 { "DEC3004", DEFEA_PROD_ID_4 },
3656 { }
3657};
3658MODULE_DEVICE_TABLE(eisa, dfx_eisa_table);
3659
3660static struct eisa_driver dfx_eisa_driver = {
3661 .id_table = dfx_eisa_table,
3662 .driver = {
3663 .name = "defxx",
3664 .bus = &eisa_bus_type,
3665 .probe = dfx_dev_register,
3666 .remove = dfx_dev_unregister,
3667 },
3668};
3669#endif
3670
3671#ifdef CONFIG_TC
3672static struct tc_device_id const dfx_tc_table[] = {
3673 { "DEC ", "PMAF-FA " },
3674 { "DEC ", "PMAF-FD " },
3675 { "DEC ", "PMAF-FS " },
3676 { "DEC ", "PMAF-FU " },
3677 { }
3678};
3679MODULE_DEVICE_TABLE(tc, dfx_tc_table);
3680
3681static struct tc_driver dfx_tc_driver = {
3682 .id_table = dfx_tc_table,
3683 .driver = {
3684 .name = "defxx",
3685 .bus = &tc_bus_type,
3686 .probe = dfx_dev_register,
3687 .remove = dfx_dev_unregister,
3688 },
3689};
3690#endif
3691
3692static int __maybe_unused dfx_dev_register(struct device *dev)
3693{
3694 int status;
3695
3696 status = dfx_register(dev);
3697 if (!status)
3698 get_device(dev);
3699 return status;
3700}
3701
3702static int __maybe_unused dfx_dev_unregister(struct device *dev)
3703{
3704 put_device(dev);
3705 dfx_unregister(dev);
3706 return 0;
3707}
3708
3709
3710static int dfx_init(void)
3711{
3712 int status;
3713
3714 status = pci_register_driver(&dfx_pci_driver);
3715 if (!status)
3716 status = eisa_driver_register(&dfx_eisa_driver);
3717 if (!status)
3718 status = tc_register_driver(&dfx_tc_driver);
3719 return status;
3720}
3721
3722static void dfx_cleanup(void)
3723{
3724 tc_unregister_driver(&dfx_tc_driver);
3725 eisa_driver_unregister(&dfx_eisa_driver);
3726 pci_unregister_driver(&dfx_pci_driver);
3727}
3728
3729module_init(dfx_init);
3730module_exit(dfx_cleanup);
3731MODULE_AUTHOR("Lawrence V. Stefani");
3732MODULE_DESCRIPTION("DEC FDDIcontroller TC/EISA/PCI (DEFTA/DEFEA/DEFPA) driver "
3733 DRV_VERSION " " DRV_RELDATE);
3734MODULE_LICENSE("GPL");
3735