1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202#include <linux/bitops.h>
203#include <linux/compiler.h>
204#include <linux/delay.h>
205#include <linux/dma-mapping.h>
206#include <linux/eisa.h>
207#include <linux/errno.h>
208#include <linux/fddidevice.h>
209#include <linux/init.h>
210#include <linux/interrupt.h>
211#include <linux/ioport.h>
212#include <linux/kernel.h>
213#include <linux/module.h>
214#include <linux/netdevice.h>
215#include <linux/pci.h>
216#include <linux/skbuff.h>
217#include <linux/slab.h>
218#include <linux/string.h>
219#include <linux/tc.h>
220
221#include <asm/byteorder.h>
222#include <asm/io.h>
223
224#include "defxx.h"
225
226
227#define DRV_NAME "defxx"
228#define DRV_VERSION "v1.10"
229#define DRV_RELDATE "2006/12/14"
230
231static char version[] =
232 DRV_NAME ": " DRV_VERSION " " DRV_RELDATE
233 " Lawrence V. Stefani and others\n";
234
235#define DYNAMIC_BUFFERS 1
236
237#define SKBUFF_RX_COPYBREAK 200
238
239
240
241
242#define NEW_SKB_SIZE (PI_RCV_DATA_K_SIZE_MAX+128)
243
244#ifdef CONFIG_PCI
245#define DFX_BUS_PCI(dev) (dev->bus == &pci_bus_type)
246#else
247#define DFX_BUS_PCI(dev) 0
248#endif
249
250#ifdef CONFIG_EISA
251#define DFX_BUS_EISA(dev) (dev->bus == &eisa_bus_type)
252#else
253#define DFX_BUS_EISA(dev) 0
254#endif
255
256#ifdef CONFIG_TC
257#define DFX_BUS_TC(dev) (dev->bus == &tc_bus_type)
258#else
259#define DFX_BUS_TC(dev) 0
260#endif
261
262#ifdef CONFIG_DEFXX_MMIO
263#define DFX_MMIO 1
264#else
265#define DFX_MMIO 0
266#endif
267
268
269
270static void dfx_bus_init(struct net_device *dev);
271static void dfx_bus_uninit(struct net_device *dev);
272static void dfx_bus_config_check(DFX_board_t *bp);
273
274static int dfx_driver_init(struct net_device *dev,
275 const char *print_name,
276 resource_size_t bar_start);
277static int dfx_adap_init(DFX_board_t *bp, int get_buffers);
278
279static int dfx_open(struct net_device *dev);
280static int dfx_close(struct net_device *dev);
281
282static void dfx_int_pr_halt_id(DFX_board_t *bp);
283static void dfx_int_type_0_process(DFX_board_t *bp);
284static void dfx_int_common(struct net_device *dev);
285static irqreturn_t dfx_interrupt(int irq, void *dev_id);
286
287static struct net_device_stats *dfx_ctl_get_stats(struct net_device *dev);
288static void dfx_ctl_set_multicast_list(struct net_device *dev);
289static int dfx_ctl_set_mac_address(struct net_device *dev, void *addr);
290static int dfx_ctl_update_cam(DFX_board_t *bp);
291static int dfx_ctl_update_filters(DFX_board_t *bp);
292
293static int dfx_hw_dma_cmd_req(DFX_board_t *bp);
294static int dfx_hw_port_ctrl_req(DFX_board_t *bp, PI_UINT32 command, PI_UINT32 data_a, PI_UINT32 data_b, PI_UINT32 *host_data);
295static void dfx_hw_adap_reset(DFX_board_t *bp, PI_UINT32 type);
296static int dfx_hw_adap_state_rd(DFX_board_t *bp);
297static int dfx_hw_dma_uninit(DFX_board_t *bp, PI_UINT32 type);
298
299static int dfx_rcv_init(DFX_board_t *bp, int get_buffers);
300static void dfx_rcv_queue_process(DFX_board_t *bp);
301static void dfx_rcv_flush(DFX_board_t *bp);
302
303static netdev_tx_t dfx_xmt_queue_pkt(struct sk_buff *skb,
304 struct net_device *dev);
305static int dfx_xmt_done(DFX_board_t *bp);
306static void dfx_xmt_flush(DFX_board_t *bp);
307
308
309
310static struct pci_driver dfx_pci_driver;
311static struct eisa_driver dfx_eisa_driver;
312static struct tc_driver dfx_tc_driver;
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365static inline void dfx_writel(DFX_board_t *bp, int offset, u32 data)
366{
367 writel(data, bp->base.mem + offset);
368 mb();
369}
370
371static inline void dfx_outl(DFX_board_t *bp, int offset, u32 data)
372{
373 outl(data, bp->base.port + offset);
374}
375
376static void dfx_port_write_long(DFX_board_t *bp, int offset, u32 data)
377{
378 struct device __maybe_unused *bdev = bp->bus_dev;
379 int dfx_bus_tc = DFX_BUS_TC(bdev);
380 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
381
382 if (dfx_use_mmio)
383 dfx_writel(bp, offset, data);
384 else
385 dfx_outl(bp, offset, data);
386}
387
388
389static inline void dfx_readl(DFX_board_t *bp, int offset, u32 *data)
390{
391 mb();
392 *data = readl(bp->base.mem + offset);
393}
394
395static inline void dfx_inl(DFX_board_t *bp, int offset, u32 *data)
396{
397 *data = inl(bp->base.port + offset);
398}
399
400static void dfx_port_read_long(DFX_board_t *bp, int offset, u32 *data)
401{
402 struct device __maybe_unused *bdev = bp->bus_dev;
403 int dfx_bus_tc = DFX_BUS_TC(bdev);
404 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
405
406 if (dfx_use_mmio)
407 dfx_readl(bp, offset, data);
408 else
409 dfx_inl(bp, offset, data);
410}
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436static void dfx_get_bars(struct device *bdev,
437 resource_size_t *bar_start, resource_size_t *bar_len)
438{
439 int dfx_bus_pci = DFX_BUS_PCI(bdev);
440 int dfx_bus_eisa = DFX_BUS_EISA(bdev);
441 int dfx_bus_tc = DFX_BUS_TC(bdev);
442 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
443
444 if (dfx_bus_pci) {
445 int num = dfx_use_mmio ? 0 : 1;
446
447 *bar_start = pci_resource_start(to_pci_dev(bdev), num);
448 *bar_len = pci_resource_len(to_pci_dev(bdev), num);
449 }
450 if (dfx_bus_eisa) {
451 unsigned long base_addr = to_eisa_device(bdev)->base_addr;
452 resource_size_t bar;
453
454 if (dfx_use_mmio) {
455 bar = inb(base_addr + PI_ESIC_K_MEM_ADD_CMP_2);
456 bar <<= 8;
457 bar |= inb(base_addr + PI_ESIC_K_MEM_ADD_CMP_1);
458 bar <<= 8;
459 bar |= inb(base_addr + PI_ESIC_K_MEM_ADD_CMP_0);
460 bar <<= 16;
461 *bar_start = bar;
462 bar = inb(base_addr + PI_ESIC_K_MEM_ADD_MASK_2);
463 bar <<= 8;
464 bar |= inb(base_addr + PI_ESIC_K_MEM_ADD_MASK_1);
465 bar <<= 8;
466 bar |= inb(base_addr + PI_ESIC_K_MEM_ADD_MASK_0);
467 bar <<= 16;
468 *bar_len = (bar | PI_MEM_ADD_MASK_M) + 1;
469 } else {
470 *bar_start = base_addr;
471 *bar_len = PI_ESIC_K_CSR_IO_LEN;
472 }
473 }
474 if (dfx_bus_tc) {
475 *bar_start = to_tc_dev(bdev)->resource.start +
476 PI_TC_K_CSR_OFFSET;
477 *bar_len = PI_TC_K_CSR_LEN;
478 }
479}
480
481static const struct net_device_ops dfx_netdev_ops = {
482 .ndo_open = dfx_open,
483 .ndo_stop = dfx_close,
484 .ndo_start_xmit = dfx_xmt_queue_pkt,
485 .ndo_get_stats = dfx_ctl_get_stats,
486 .ndo_set_rx_mode = dfx_ctl_set_multicast_list,
487 .ndo_set_mac_address = dfx_ctl_set_mac_address,
488};
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518static int dfx_register(struct device *bdev)
519{
520 static int version_disp;
521 int dfx_bus_pci = DFX_BUS_PCI(bdev);
522 int dfx_bus_tc = DFX_BUS_TC(bdev);
523 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
524 const char *print_name = dev_name(bdev);
525 struct net_device *dev;
526 DFX_board_t *bp;
527 resource_size_t bar_start = 0;
528 resource_size_t bar_len = 0;
529 int alloc_size;
530 struct resource *region;
531 int err = 0;
532
533 if (!version_disp) {
534 version_disp = 1;
535 printk(version);
536 }
537
538 dev = alloc_fddidev(sizeof(*bp));
539 if (!dev) {
540 printk(KERN_ERR "%s: Unable to allocate fddidev, aborting\n",
541 print_name);
542 return -ENOMEM;
543 }
544
545
546 if (dfx_bus_pci && pci_enable_device(to_pci_dev(bdev))) {
547 printk(KERN_ERR "%s: Cannot enable PCI device, aborting\n",
548 print_name);
549 goto err_out;
550 }
551
552 SET_NETDEV_DEV(dev, bdev);
553
554 bp = netdev_priv(dev);
555 bp->bus_dev = bdev;
556 dev_set_drvdata(bdev, dev);
557
558 dfx_get_bars(bdev, &bar_start, &bar_len);
559
560 if (dfx_use_mmio)
561 region = request_mem_region(bar_start, bar_len, print_name);
562 else
563 region = request_region(bar_start, bar_len, print_name);
564 if (!region) {
565 printk(KERN_ERR "%s: Cannot reserve I/O resource "
566 "0x%lx @ 0x%lx, aborting\n",
567 print_name, (long)bar_len, (long)bar_start);
568 err = -EBUSY;
569 goto err_out_disable;
570 }
571
572
573 if (dfx_use_mmio) {
574 bp->base.mem = ioremap_nocache(bar_start, bar_len);
575 if (!bp->base.mem) {
576 printk(KERN_ERR "%s: Cannot map MMIO\n", print_name);
577 err = -ENOMEM;
578 goto err_out_region;
579 }
580 } else {
581 bp->base.port = bar_start;
582 dev->base_addr = bar_start;
583 }
584
585
586 dev->netdev_ops = &dfx_netdev_ops;
587
588 if (dfx_bus_pci)
589 pci_set_master(to_pci_dev(bdev));
590
591 if (dfx_driver_init(dev, print_name, bar_start) != DFX_K_SUCCESS) {
592 err = -ENODEV;
593 goto err_out_unmap;
594 }
595
596 err = register_netdev(dev);
597 if (err)
598 goto err_out_kfree;
599
600 printk("%s: registered as %s\n", print_name, dev->name);
601 return 0;
602
603err_out_kfree:
604 alloc_size = sizeof(PI_DESCR_BLOCK) +
605 PI_CMD_REQ_K_SIZE_MAX + PI_CMD_RSP_K_SIZE_MAX +
606#ifndef DYNAMIC_BUFFERS
607 (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX) +
608#endif
609 sizeof(PI_CONSUMER_BLOCK) +
610 (PI_ALIGN_K_DESC_BLK - 1);
611 if (bp->kmalloced)
612 dma_free_coherent(bdev, alloc_size,
613 bp->kmalloced, bp->kmalloced_dma);
614
615err_out_unmap:
616 if (dfx_use_mmio)
617 iounmap(bp->base.mem);
618
619err_out_region:
620 if (dfx_use_mmio)
621 release_mem_region(bar_start, bar_len);
622 else
623 release_region(bar_start, bar_len);
624
625err_out_disable:
626 if (dfx_bus_pci)
627 pci_disable_device(to_pci_dev(bdev));
628
629err_out:
630 free_netdev(dev);
631 return err;
632}
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666static void dfx_bus_init(struct net_device *dev)
667{
668 DFX_board_t *bp = netdev_priv(dev);
669 struct device *bdev = bp->bus_dev;
670 int dfx_bus_pci = DFX_BUS_PCI(bdev);
671 int dfx_bus_eisa = DFX_BUS_EISA(bdev);
672 int dfx_bus_tc = DFX_BUS_TC(bdev);
673 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
674 u8 val;
675
676 DBG_printk("In dfx_bus_init...\n");
677
678
679 bp->dev = dev;
680
681
682
683 if (dfx_bus_tc)
684 dev->irq = to_tc_dev(bdev)->interrupt;
685 if (dfx_bus_eisa) {
686 unsigned long base_addr = to_eisa_device(bdev)->base_addr;
687
688
689 val = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
690 val &= PI_CONFIG_STAT_0_M_IRQ;
691 val >>= PI_CONFIG_STAT_0_V_IRQ;
692
693 switch (val) {
694 case PI_CONFIG_STAT_0_IRQ_K_9:
695 dev->irq = 9;
696 break;
697
698 case PI_CONFIG_STAT_0_IRQ_K_10:
699 dev->irq = 10;
700 break;
701
702 case PI_CONFIG_STAT_0_IRQ_K_11:
703 dev->irq = 11;
704 break;
705
706 case PI_CONFIG_STAT_0_IRQ_K_15:
707 dev->irq = 15;
708 break;
709 }
710
711
712
713
714
715
716
717
718
719
720
721
722
723 val = ((bp->base.port >> 12) << PI_IO_CMP_V_SLOT);
724 outb(base_addr + PI_ESIC_K_IO_ADD_CMP_0_1, val);
725 outb(base_addr + PI_ESIC_K_IO_ADD_CMP_0_0, 0);
726 outb(base_addr + PI_ESIC_K_IO_ADD_CMP_1_1, val);
727 outb(base_addr + PI_ESIC_K_IO_ADD_CMP_1_0, 0);
728 val = PI_ESIC_K_CSR_IO_LEN - 1;
729 outb(base_addr + PI_ESIC_K_IO_ADD_MASK_0_1, (val >> 8) & 0xff);
730 outb(base_addr + PI_ESIC_K_IO_ADD_MASK_0_0, val & 0xff);
731 outb(base_addr + PI_ESIC_K_IO_ADD_MASK_1_1, (val >> 8) & 0xff);
732 outb(base_addr + PI_ESIC_K_IO_ADD_MASK_1_0, val & 0xff);
733
734
735 val = PI_FUNCTION_CNTRL_M_IOCS1 | PI_FUNCTION_CNTRL_M_IOCS0;
736 if (dfx_use_mmio)
737 val |= PI_FUNCTION_CNTRL_M_MEMCS0;
738 outb(base_addr + PI_ESIC_K_FUNCTION_CNTRL, val);
739
740
741
742
743
744 val = PI_SLOT_CNTRL_M_ENB;
745 outb(base_addr + PI_ESIC_K_SLOT_CNTRL, val);
746
747
748
749
750
751 val = inb(base_addr + PI_DEFEA_K_BURST_HOLDOFF);
752 if (dfx_use_mmio)
753 val |= PI_BURST_HOLDOFF_V_MEM_MAP;
754 else
755 val &= ~PI_BURST_HOLDOFF_V_MEM_MAP;
756 outb(base_addr + PI_DEFEA_K_BURST_HOLDOFF, val);
757
758
759 val = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
760 val |= PI_CONFIG_STAT_0_M_INT_ENB;
761 outb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0, val);
762 }
763 if (dfx_bus_pci) {
764 struct pci_dev *pdev = to_pci_dev(bdev);
765
766
767
768 dev->irq = pdev->irq;
769
770
771
772 pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &val);
773 if (val < PFI_K_LAT_TIMER_MIN) {
774 val = PFI_K_LAT_TIMER_DEF;
775 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, val);
776 }
777
778
779 val = PFI_MODE_M_PDQ_INT_ENB | PFI_MODE_M_DMA_ENB;
780 dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL, val);
781 }
782}
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812static void dfx_bus_uninit(struct net_device *dev)
813{
814 DFX_board_t *bp = netdev_priv(dev);
815 struct device *bdev = bp->bus_dev;
816 int dfx_bus_pci = DFX_BUS_PCI(bdev);
817 int dfx_bus_eisa = DFX_BUS_EISA(bdev);
818 u8 val;
819
820 DBG_printk("In dfx_bus_uninit...\n");
821
822
823
824 if (dfx_bus_eisa) {
825 unsigned long base_addr = to_eisa_device(bdev)->base_addr;
826
827
828 val = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
829 val &= ~PI_CONFIG_STAT_0_M_INT_ENB;
830 outb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0, val);
831 }
832 if (dfx_bus_pci) {
833
834 dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL, 0);
835 }
836}
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869static void dfx_bus_config_check(DFX_board_t *bp)
870{
871 struct device __maybe_unused *bdev = bp->bus_dev;
872 int dfx_bus_eisa = DFX_BUS_EISA(bdev);
873 int status;
874 u32 host_data;
875
876 DBG_printk("In dfx_bus_config_check...\n");
877
878
879
880 if (dfx_bus_eisa) {
881
882
883
884
885
886
887
888 if (to_eisa_device(bdev)->id.driver_data == DEFEA_PROD_ID_2) {
889
890
891
892
893 status = dfx_hw_port_ctrl_req(bp,
894 PI_PCTRL_M_SUB_CMD,
895 PI_SUB_CMD_K_PDQ_REV_GET,
896 0,
897 &host_data);
898 if ((status != DFX_K_SUCCESS) || (host_data == 2))
899 {
900
901
902
903
904
905
906
907
908 switch (bp->burst_size)
909 {
910 case PI_PDATA_B_DMA_BURST_SIZE_32:
911 case PI_PDATA_B_DMA_BURST_SIZE_16:
912 bp->burst_size = PI_PDATA_B_DMA_BURST_SIZE_8;
913 break;
914
915 default:
916 break;
917 }
918
919
920
921 bp->full_duplex_enb = PI_SNMP_K_FALSE;
922 }
923 }
924 }
925 }
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965static int dfx_driver_init(struct net_device *dev, const char *print_name,
966 resource_size_t bar_start)
967{
968 DFX_board_t *bp = netdev_priv(dev);
969 struct device *bdev = bp->bus_dev;
970 int dfx_bus_pci = DFX_BUS_PCI(bdev);
971 int dfx_bus_eisa = DFX_BUS_EISA(bdev);
972 int dfx_bus_tc = DFX_BUS_TC(bdev);
973 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
974 int alloc_size;
975 char *top_v, *curr_v;
976 dma_addr_t top_p, curr_p;
977 u32 data;
978 __le32 le32;
979 char *board_name = NULL;
980
981 DBG_printk("In dfx_driver_init...\n");
982
983
984
985 dfx_bus_init(dev);
986
987
988
989
990
991
992
993
994
995
996 bp->full_duplex_enb = PI_SNMP_K_FALSE;
997 bp->req_ttrt = 8 * 12500;
998 bp->burst_size = PI_PDATA_B_DMA_BURST_SIZE_DEF;
999 bp->rcv_bufs_to_post = RCV_BUFS_DEF;
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010 dfx_bus_config_check(bp);
1011
1012
1013
1014 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS);
1015
1016
1017
1018 (void) dfx_hw_dma_uninit(bp, PI_PDATA_A_RESET_M_SKIP_ST);
1019
1020
1021
1022 if (dfx_hw_port_ctrl_req(bp, PI_PCTRL_M_MLA, PI_PDATA_A_MLA_K_LO, 0,
1023 &data) != DFX_K_SUCCESS) {
1024 printk("%s: Could not read adapter factory MAC address!\n",
1025 print_name);
1026 return DFX_K_FAILURE;
1027 }
1028 le32 = cpu_to_le32(data);
1029 memcpy(&bp->factory_mac_addr[0], &le32, sizeof(u32));
1030
1031 if (dfx_hw_port_ctrl_req(bp, PI_PCTRL_M_MLA, PI_PDATA_A_MLA_K_HI, 0,
1032 &data) != DFX_K_SUCCESS) {
1033 printk("%s: Could not read adapter factory MAC address!\n",
1034 print_name);
1035 return DFX_K_FAILURE;
1036 }
1037 le32 = cpu_to_le32(data);
1038 memcpy(&bp->factory_mac_addr[4], &le32, sizeof(u16));
1039
1040
1041
1042
1043
1044
1045
1046
1047 memcpy(dev->dev_addr, bp->factory_mac_addr, FDDI_K_ALEN);
1048 if (dfx_bus_tc)
1049 board_name = "DEFTA";
1050 if (dfx_bus_eisa)
1051 board_name = "DEFEA";
1052 if (dfx_bus_pci)
1053 board_name = "DEFPA";
1054 pr_info("%s: %s at %saddr = 0x%llx, IRQ = %d, Hardware addr = %pMF\n",
1055 print_name, board_name, dfx_use_mmio ? "" : "I/O ",
1056 (long long)bar_start, dev->irq, dev->dev_addr);
1057
1058
1059
1060
1061
1062
1063 alloc_size = sizeof(PI_DESCR_BLOCK) +
1064 PI_CMD_REQ_K_SIZE_MAX +
1065 PI_CMD_RSP_K_SIZE_MAX +
1066#ifndef DYNAMIC_BUFFERS
1067 (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX) +
1068#endif
1069 sizeof(PI_CONSUMER_BLOCK) +
1070 (PI_ALIGN_K_DESC_BLK - 1);
1071 bp->kmalloced = top_v = dma_alloc_coherent(bp->bus_dev, alloc_size,
1072 &bp->kmalloced_dma,
1073 GFP_ATOMIC);
1074 if (top_v == NULL) {
1075 printk("%s: Could not allocate memory for host buffers "
1076 "and structures!\n", print_name);
1077 return DFX_K_FAILURE;
1078 }
1079 memset(top_v, 0, alloc_size);
1080 top_p = bp->kmalloced_dma;
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094 curr_p = ALIGN(top_p, PI_ALIGN_K_DESC_BLK);
1095 curr_v = top_v + (curr_p - top_p);
1096
1097
1098
1099 bp->descr_block_virt = (PI_DESCR_BLOCK *) curr_v;
1100 bp->descr_block_phys = curr_p;
1101 curr_v += sizeof(PI_DESCR_BLOCK);
1102 curr_p += sizeof(PI_DESCR_BLOCK);
1103
1104
1105
1106 bp->cmd_req_virt = (PI_DMA_CMD_REQ *) curr_v;
1107 bp->cmd_req_phys = curr_p;
1108 curr_v += PI_CMD_REQ_K_SIZE_MAX;
1109 curr_p += PI_CMD_REQ_K_SIZE_MAX;
1110
1111
1112
1113 bp->cmd_rsp_virt = (PI_DMA_CMD_RSP *) curr_v;
1114 bp->cmd_rsp_phys = curr_p;
1115 curr_v += PI_CMD_RSP_K_SIZE_MAX;
1116 curr_p += PI_CMD_RSP_K_SIZE_MAX;
1117
1118
1119
1120 bp->rcv_block_virt = curr_v;
1121 bp->rcv_block_phys = curr_p;
1122
1123#ifndef DYNAMIC_BUFFERS
1124 curr_v += (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX);
1125 curr_p += (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX);
1126#endif
1127
1128
1129
1130 bp->cons_block_virt = (PI_CONSUMER_BLOCK *) curr_v;
1131 bp->cons_block_phys = curr_p;
1132
1133
1134
1135 DBG_printk("%s: Descriptor block virt = %0lX, phys = %0X\n",
1136 print_name,
1137 (long)bp->descr_block_virt, bp->descr_block_phys);
1138 DBG_printk("%s: Command Request buffer virt = %0lX, phys = %0X\n",
1139 print_name, (long)bp->cmd_req_virt, bp->cmd_req_phys);
1140 DBG_printk("%s: Command Response buffer virt = %0lX, phys = %0X\n",
1141 print_name, (long)bp->cmd_rsp_virt, bp->cmd_rsp_phys);
1142 DBG_printk("%s: Receive buffer block virt = %0lX, phys = %0X\n",
1143 print_name, (long)bp->rcv_block_virt, bp->rcv_block_phys);
1144 DBG_printk("%s: Consumer block virt = %0lX, phys = %0X\n",
1145 print_name, (long)bp->cons_block_virt, bp->cons_block_phys);
1146
1147 return DFX_K_SUCCESS;
1148}
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184static int dfx_adap_init(DFX_board_t *bp, int get_buffers)
1185 {
1186 DBG_printk("In dfx_adap_init...\n");
1187
1188
1189
1190 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS);
1191
1192
1193
1194 if (dfx_hw_dma_uninit(bp, bp->reset_type) != DFX_K_SUCCESS)
1195 {
1196 printk("%s: Could not uninitialize/reset adapter!\n", bp->dev->name);
1197 return DFX_K_FAILURE;
1198 }
1199
1200
1201
1202
1203
1204
1205 dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_0_STATUS, PI_HOST_INT_K_ACK_ALL_TYPE_0);
1206
1207
1208
1209
1210
1211
1212
1213
1214 bp->cmd_req_reg.lword = 0;
1215 bp->cmd_rsp_reg.lword = 0;
1216 bp->rcv_xmt_reg.lword = 0;
1217
1218
1219
1220 memset(bp->cons_block_virt, 0, sizeof(PI_CONSUMER_BLOCK));
1221
1222
1223
1224 if (dfx_hw_port_ctrl_req(bp,
1225 PI_PCTRL_M_SUB_CMD,
1226 PI_SUB_CMD_K_BURST_SIZE_SET,
1227 bp->burst_size,
1228 NULL) != DFX_K_SUCCESS)
1229 {
1230 printk("%s: Could not set adapter burst size!\n", bp->dev->name);
1231 return DFX_K_FAILURE;
1232 }
1233
1234
1235
1236
1237
1238
1239
1240
1241 if (dfx_hw_port_ctrl_req(bp,
1242 PI_PCTRL_M_CONS_BLOCK,
1243 bp->cons_block_phys,
1244 0,
1245 NULL) != DFX_K_SUCCESS)
1246 {
1247 printk("%s: Could not set consumer block address!\n", bp->dev->name);
1248 return DFX_K_FAILURE;
1249 }
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261 if (dfx_hw_port_ctrl_req(bp, PI_PCTRL_M_INIT,
1262 (u32)(bp->descr_block_phys |
1263 PI_PDATA_A_INIT_M_BSWAP_INIT),
1264 0, NULL) != DFX_K_SUCCESS) {
1265 printk("%s: Could not set descriptor block address!\n",
1266 bp->dev->name);
1267 return DFX_K_FAILURE;
1268 }
1269
1270
1271
1272 bp->cmd_req_virt->cmd_type = PI_CMD_K_CHARS_SET;
1273 bp->cmd_req_virt->char_set.item[0].item_code = PI_ITEM_K_FLUSH_TIME;
1274 bp->cmd_req_virt->char_set.item[0].value = 3;
1275 bp->cmd_req_virt->char_set.item[0].item_index = 0;
1276 bp->cmd_req_virt->char_set.item[1].item_code = PI_ITEM_K_EOL;
1277 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
1278 {
1279 printk("%s: DMA command request failed!\n", bp->dev->name);
1280 return DFX_K_FAILURE;
1281 }
1282
1283
1284
1285 bp->cmd_req_virt->cmd_type = PI_CMD_K_SNMP_SET;
1286 bp->cmd_req_virt->snmp_set.item[0].item_code = PI_ITEM_K_FDX_ENB_DIS;
1287 bp->cmd_req_virt->snmp_set.item[0].value = bp->full_duplex_enb;
1288 bp->cmd_req_virt->snmp_set.item[0].item_index = 0;
1289 bp->cmd_req_virt->snmp_set.item[1].item_code = PI_ITEM_K_MAC_T_REQ;
1290 bp->cmd_req_virt->snmp_set.item[1].value = bp->req_ttrt;
1291 bp->cmd_req_virt->snmp_set.item[1].item_index = 0;
1292 bp->cmd_req_virt->snmp_set.item[2].item_code = PI_ITEM_K_EOL;
1293 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
1294 {
1295 printk("%s: DMA command request failed!\n", bp->dev->name);
1296 return DFX_K_FAILURE;
1297 }
1298
1299
1300
1301 if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS)
1302 {
1303 printk("%s: Adapter CAM update failed!\n", bp->dev->name);
1304 return DFX_K_FAILURE;
1305 }
1306
1307
1308
1309 if (dfx_ctl_update_filters(bp) != DFX_K_SUCCESS)
1310 {
1311 printk("%s: Adapter filters update failed!\n", bp->dev->name);
1312 return DFX_K_FAILURE;
1313 }
1314
1315
1316
1317
1318
1319
1320 if (get_buffers)
1321 dfx_rcv_flush(bp);
1322
1323
1324
1325 if (dfx_rcv_init(bp, get_buffers))
1326 {
1327 printk("%s: Receive buffer allocation failed\n", bp->dev->name);
1328 if (get_buffers)
1329 dfx_rcv_flush(bp);
1330 return DFX_K_FAILURE;
1331 }
1332
1333
1334
1335 bp->cmd_req_virt->cmd_type = PI_CMD_K_START;
1336 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
1337 {
1338 printk("%s: Start command failed\n", bp->dev->name);
1339 if (get_buffers)
1340 dfx_rcv_flush(bp);
1341 return DFX_K_FAILURE;
1342 }
1343
1344
1345
1346 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_ENABLE_DEF_INTS);
1347 return DFX_K_SUCCESS;
1348 }
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381static int dfx_open(struct net_device *dev)
1382{
1383 DFX_board_t *bp = netdev_priv(dev);
1384 int ret;
1385
1386 DBG_printk("In dfx_open...\n");
1387
1388
1389
1390 ret = request_irq(dev->irq, dfx_interrupt, IRQF_SHARED, dev->name,
1391 dev);
1392 if (ret) {
1393 printk(KERN_ERR "%s: Requested IRQ %d is busy\n", dev->name, dev->irq);
1394 return ret;
1395 }
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408 memcpy(dev->dev_addr, bp->factory_mac_addr, FDDI_K_ALEN);
1409
1410
1411
1412 memset(bp->uc_table, 0, sizeof(bp->uc_table));
1413 memset(bp->mc_table, 0, sizeof(bp->mc_table));
1414 bp->uc_count = 0;
1415 bp->mc_count = 0;
1416
1417
1418
1419 bp->ind_group_prom = PI_FSTATE_K_BLOCK;
1420 bp->group_prom = PI_FSTATE_K_BLOCK;
1421
1422 spin_lock_init(&bp->lock);
1423
1424
1425
1426 bp->reset_type = PI_PDATA_A_RESET_M_SKIP_ST;
1427 if (dfx_adap_init(bp, 1) != DFX_K_SUCCESS)
1428 {
1429 printk(KERN_ERR "%s: Adapter open failed!\n", dev->name);
1430 free_irq(dev->irq, dev);
1431 return -EAGAIN;
1432 }
1433
1434
1435 netif_start_queue(dev);
1436 return 0;
1437}
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472static int dfx_close(struct net_device *dev)
1473{
1474 DFX_board_t *bp = netdev_priv(dev);
1475
1476 DBG_printk("In dfx_close...\n");
1477
1478
1479
1480 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS);
1481
1482
1483
1484 (void) dfx_hw_dma_uninit(bp, PI_PDATA_A_RESET_M_SKIP_ST);
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495 dfx_xmt_flush(bp);
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508 bp->cmd_req_reg.lword = 0;
1509 bp->cmd_rsp_reg.lword = 0;
1510 bp->rcv_xmt_reg.lword = 0;
1511
1512
1513
1514 memset(bp->cons_block_virt, 0, sizeof(PI_CONSUMER_BLOCK));
1515
1516
1517
1518 dfx_rcv_flush(bp);
1519
1520
1521
1522 netif_stop_queue(dev);
1523
1524
1525
1526 free_irq(dev->irq, dev);
1527
1528 return 0;
1529}
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559static void dfx_int_pr_halt_id(DFX_board_t *bp)
1560 {
1561 PI_UINT32 port_status;
1562 PI_UINT32 halt_id;
1563
1564
1565
1566 dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &port_status);
1567
1568
1569
1570 halt_id = (port_status & PI_PSTATUS_M_HALT_ID) >> PI_PSTATUS_V_HALT_ID;
1571 switch (halt_id)
1572 {
1573 case PI_HALT_ID_K_SELFTEST_TIMEOUT:
1574 printk("%s: Halt ID: Selftest Timeout\n", bp->dev->name);
1575 break;
1576
1577 case PI_HALT_ID_K_PARITY_ERROR:
1578 printk("%s: Halt ID: Host Bus Parity Error\n", bp->dev->name);
1579 break;
1580
1581 case PI_HALT_ID_K_HOST_DIR_HALT:
1582 printk("%s: Halt ID: Host-Directed Halt\n", bp->dev->name);
1583 break;
1584
1585 case PI_HALT_ID_K_SW_FAULT:
1586 printk("%s: Halt ID: Adapter Software Fault\n", bp->dev->name);
1587 break;
1588
1589 case PI_HALT_ID_K_HW_FAULT:
1590 printk("%s: Halt ID: Adapter Hardware Fault\n", bp->dev->name);
1591 break;
1592
1593 case PI_HALT_ID_K_PC_TRACE:
1594 printk("%s: Halt ID: FDDI Network PC Trace Path Test\n", bp->dev->name);
1595 break;
1596
1597 case PI_HALT_ID_K_DMA_ERROR:
1598 printk("%s: Halt ID: Adapter DMA Error\n", bp->dev->name);
1599 break;
1600
1601 case PI_HALT_ID_K_IMAGE_CRC_ERROR:
1602 printk("%s: Halt ID: Firmware Image CRC Error\n", bp->dev->name);
1603 break;
1604
1605 case PI_HALT_ID_K_BUS_EXCEPTION:
1606 printk("%s: Halt ID: 68000 Bus Exception\n", bp->dev->name);
1607 break;
1608
1609 default:
1610 printk("%s: Halt ID: Unknown (code = %X)\n", bp->dev->name, halt_id);
1611 break;
1612 }
1613 }
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663static void dfx_int_type_0_process(DFX_board_t *bp)
1664
1665 {
1666 PI_UINT32 type_0_status;
1667 PI_UINT32 state;
1668
1669
1670
1671
1672
1673
1674
1675 dfx_port_read_long(bp, PI_PDQ_K_REG_TYPE_0_STATUS, &type_0_status);
1676 dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_0_STATUS, type_0_status);
1677
1678
1679
1680 if (type_0_status & (PI_TYPE_0_STAT_M_NXM |
1681 PI_TYPE_0_STAT_M_PM_PAR_ERR |
1682 PI_TYPE_0_STAT_M_BUS_PAR_ERR))
1683 {
1684
1685
1686 if (type_0_status & PI_TYPE_0_STAT_M_NXM)
1687 printk("%s: Non-Existent Memory Access Error\n", bp->dev->name);
1688
1689
1690
1691 if (type_0_status & PI_TYPE_0_STAT_M_PM_PAR_ERR)
1692 printk("%s: Packet Memory Parity Error\n", bp->dev->name);
1693
1694
1695
1696 if (type_0_status & PI_TYPE_0_STAT_M_BUS_PAR_ERR)
1697 printk("%s: Host Bus Parity Error\n", bp->dev->name);
1698
1699
1700
1701 bp->link_available = PI_K_FALSE;
1702 bp->reset_type = 0;
1703 printk("%s: Resetting adapter...\n", bp->dev->name);
1704 if (dfx_adap_init(bp, 0) != DFX_K_SUCCESS)
1705 {
1706 printk("%s: Adapter reset failed! Disabling adapter interrupts.\n", bp->dev->name);
1707 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS);
1708 return;
1709 }
1710 printk("%s: Adapter reset successful!\n", bp->dev->name);
1711 return;
1712 }
1713
1714
1715
1716 if (type_0_status & PI_TYPE_0_STAT_M_XMT_FLUSH)
1717 {
1718
1719
1720 bp->link_available = PI_K_FALSE;
1721 dfx_xmt_flush(bp);
1722 (void) dfx_hw_port_ctrl_req(bp,
1723 PI_PCTRL_M_XMT_DATA_FLUSH_DONE,
1724 0,
1725 0,
1726 NULL);
1727 }
1728
1729
1730
1731 if (type_0_status & PI_TYPE_0_STAT_M_STATE_CHANGE)
1732 {
1733
1734
1735 state = dfx_hw_adap_state_rd(bp);
1736 if (state == PI_STATE_K_HALTED)
1737 {
1738
1739
1740
1741
1742
1743
1744 printk("%s: Controller has transitioned to HALTED state!\n", bp->dev->name);
1745 dfx_int_pr_halt_id(bp);
1746
1747
1748
1749 bp->link_available = PI_K_FALSE;
1750 bp->reset_type = 0;
1751 printk("%s: Resetting adapter...\n", bp->dev->name);
1752 if (dfx_adap_init(bp, 0) != DFX_K_SUCCESS)
1753 {
1754 printk("%s: Adapter reset failed! Disabling adapter interrupts.\n", bp->dev->name);
1755 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS);
1756 return;
1757 }
1758 printk("%s: Adapter reset successful!\n", bp->dev->name);
1759 }
1760 else if (state == PI_STATE_K_LINK_AVAIL)
1761 {
1762 bp->link_available = PI_K_TRUE;
1763 }
1764 }
1765 }
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808static void dfx_int_common(struct net_device *dev)
1809{
1810 DFX_board_t *bp = netdev_priv(dev);
1811 PI_UINT32 port_status;
1812
1813
1814
1815 if(dfx_xmt_done(bp))
1816 netif_wake_queue(dev);
1817
1818
1819
1820 dfx_rcv_queue_process(bp);
1821
1822
1823
1824
1825
1826
1827
1828
1829 dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_2_PROD, bp->rcv_xmt_reg.lword);
1830
1831
1832
1833 dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &port_status);
1834
1835
1836
1837 if (port_status & PI_PSTATUS_M_TYPE_0_PENDING)
1838 dfx_int_type_0_process(bp);
1839 }
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878static irqreturn_t dfx_interrupt(int irq, void *dev_id)
1879{
1880 struct net_device *dev = dev_id;
1881 DFX_board_t *bp = netdev_priv(dev);
1882 struct device *bdev = bp->bus_dev;
1883 int dfx_bus_pci = DFX_BUS_PCI(bdev);
1884 int dfx_bus_eisa = DFX_BUS_EISA(bdev);
1885 int dfx_bus_tc = DFX_BUS_TC(bdev);
1886
1887
1888
1889 if (dfx_bus_pci) {
1890 u32 status;
1891
1892 dfx_port_read_long(bp, PFI_K_REG_STATUS, &status);
1893 if (!(status & PFI_STATUS_M_PDQ_INT))
1894 return IRQ_NONE;
1895
1896 spin_lock(&bp->lock);
1897
1898
1899 dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL,
1900 PFI_MODE_M_DMA_ENB);
1901
1902
1903 dfx_int_common(dev);
1904
1905
1906 dfx_port_write_long(bp, PFI_K_REG_STATUS,
1907 PFI_STATUS_M_PDQ_INT);
1908 dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL,
1909 (PFI_MODE_M_PDQ_INT_ENB |
1910 PFI_MODE_M_DMA_ENB));
1911
1912 spin_unlock(&bp->lock);
1913 }
1914 if (dfx_bus_eisa) {
1915 unsigned long base_addr = to_eisa_device(bdev)->base_addr;
1916 u8 status;
1917
1918 status = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
1919 if (!(status & PI_CONFIG_STAT_0_M_PEND))
1920 return IRQ_NONE;
1921
1922 spin_lock(&bp->lock);
1923
1924
1925 status &= ~PI_CONFIG_STAT_0_M_INT_ENB;
1926 outb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0, status);
1927
1928
1929 dfx_int_common(dev);
1930
1931
1932 status = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
1933 status |= PI_CONFIG_STAT_0_M_INT_ENB;
1934 outb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0, status);
1935
1936 spin_unlock(&bp->lock);
1937 }
1938 if (dfx_bus_tc) {
1939 u32 status;
1940
1941 dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &status);
1942 if (!(status & (PI_PSTATUS_M_RCV_DATA_PENDING |
1943 PI_PSTATUS_M_XMT_DATA_PENDING |
1944 PI_PSTATUS_M_SMT_HOST_PENDING |
1945 PI_PSTATUS_M_UNSOL_PENDING |
1946 PI_PSTATUS_M_CMD_RSP_PENDING |
1947 PI_PSTATUS_M_CMD_REQ_PENDING |
1948 PI_PSTATUS_M_TYPE_0_PENDING)))
1949 return IRQ_NONE;
1950
1951 spin_lock(&bp->lock);
1952
1953
1954 dfx_int_common(dev);
1955
1956 spin_unlock(&bp->lock);
1957 }
1958
1959 return IRQ_HANDLED;
1960}
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006static struct net_device_stats *dfx_ctl_get_stats(struct net_device *dev)
2007 {
2008 DFX_board_t *bp = netdev_priv(dev);
2009
2010
2011
2012 bp->stats.gen.rx_packets = bp->rcv_total_frames;
2013 bp->stats.gen.tx_packets = bp->xmt_total_frames;
2014 bp->stats.gen.rx_bytes = bp->rcv_total_bytes;
2015 bp->stats.gen.tx_bytes = bp->xmt_total_bytes;
2016 bp->stats.gen.rx_errors = bp->rcv_crc_errors +
2017 bp->rcv_frame_status_errors +
2018 bp->rcv_length_errors;
2019 bp->stats.gen.tx_errors = bp->xmt_length_errors;
2020 bp->stats.gen.rx_dropped = bp->rcv_discards;
2021 bp->stats.gen.tx_dropped = bp->xmt_discards;
2022 bp->stats.gen.multicast = bp->rcv_multicast_frames;
2023 bp->stats.gen.collisions = 0;
2024
2025
2026
2027 bp->cmd_req_virt->cmd_type = PI_CMD_K_SMT_MIB_GET;
2028 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
2029 return (struct net_device_stats *)&bp->stats;
2030
2031
2032
2033 memcpy(bp->stats.smt_station_id, &bp->cmd_rsp_virt->smt_mib_get.smt_station_id, sizeof(bp->cmd_rsp_virt->smt_mib_get.smt_station_id));
2034 bp->stats.smt_op_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_op_version_id;
2035 bp->stats.smt_hi_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_hi_version_id;
2036 bp->stats.smt_lo_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_lo_version_id;
2037 memcpy(bp->stats.smt_user_data, &bp->cmd_rsp_virt->smt_mib_get.smt_user_data, sizeof(bp->cmd_rsp_virt->smt_mib_get.smt_user_data));
2038 bp->stats.smt_mib_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_mib_version_id;
2039 bp->stats.smt_mac_cts = bp->cmd_rsp_virt->smt_mib_get.smt_mac_ct;
2040 bp->stats.smt_non_master_cts = bp->cmd_rsp_virt->smt_mib_get.smt_non_master_ct;
2041 bp->stats.smt_master_cts = bp->cmd_rsp_virt->smt_mib_get.smt_master_ct;
2042 bp->stats.smt_available_paths = bp->cmd_rsp_virt->smt_mib_get.smt_available_paths;
2043 bp->stats.smt_config_capabilities = bp->cmd_rsp_virt->smt_mib_get.smt_config_capabilities;
2044 bp->stats.smt_config_policy = bp->cmd_rsp_virt->smt_mib_get.smt_config_policy;
2045 bp->stats.smt_connection_policy = bp->cmd_rsp_virt->smt_mib_get.smt_connection_policy;
2046 bp->stats.smt_t_notify = bp->cmd_rsp_virt->smt_mib_get.smt_t_notify;
2047 bp->stats.smt_stat_rpt_policy = bp->cmd_rsp_virt->smt_mib_get.smt_stat_rpt_policy;
2048 bp->stats.smt_trace_max_expiration = bp->cmd_rsp_virt->smt_mib_get.smt_trace_max_expiration;
2049 bp->stats.smt_bypass_present = bp->cmd_rsp_virt->smt_mib_get.smt_bypass_present;
2050 bp->stats.smt_ecm_state = bp->cmd_rsp_virt->smt_mib_get.smt_ecm_state;
2051 bp->stats.smt_cf_state = bp->cmd_rsp_virt->smt_mib_get.smt_cf_state;
2052 bp->stats.smt_remote_disconnect_flag = bp->cmd_rsp_virt->smt_mib_get.smt_remote_disconnect_flag;
2053 bp->stats.smt_station_status = bp->cmd_rsp_virt->smt_mib_get.smt_station_status;
2054 bp->stats.smt_peer_wrap_flag = bp->cmd_rsp_virt->smt_mib_get.smt_peer_wrap_flag;
2055 bp->stats.smt_time_stamp = bp->cmd_rsp_virt->smt_mib_get.smt_msg_time_stamp.ls;
2056 bp->stats.smt_transition_time_stamp = bp->cmd_rsp_virt->smt_mib_get.smt_transition_time_stamp.ls;
2057 bp->stats.mac_frame_status_functions = bp->cmd_rsp_virt->smt_mib_get.mac_frame_status_functions;
2058 bp->stats.mac_t_max_capability = bp->cmd_rsp_virt->smt_mib_get.mac_t_max_capability;
2059 bp->stats.mac_tvx_capability = bp->cmd_rsp_virt->smt_mib_get.mac_tvx_capability;
2060 bp->stats.mac_available_paths = bp->cmd_rsp_virt->smt_mib_get.mac_available_paths;
2061 bp->stats.mac_current_path = bp->cmd_rsp_virt->smt_mib_get.mac_current_path;
2062 memcpy(bp->stats.mac_upstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_upstream_nbr, FDDI_K_ALEN);
2063 memcpy(bp->stats.mac_downstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_downstream_nbr, FDDI_K_ALEN);
2064 memcpy(bp->stats.mac_old_upstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_old_upstream_nbr, FDDI_K_ALEN);
2065 memcpy(bp->stats.mac_old_downstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_old_downstream_nbr, FDDI_K_ALEN);
2066 bp->stats.mac_dup_address_test = bp->cmd_rsp_virt->smt_mib_get.mac_dup_address_test;
2067 bp->stats.mac_requested_paths = bp->cmd_rsp_virt->smt_mib_get.mac_requested_paths;
2068 bp->stats.mac_downstream_port_type = bp->cmd_rsp_virt->smt_mib_get.mac_downstream_port_type;
2069 memcpy(bp->stats.mac_smt_address, &bp->cmd_rsp_virt->smt_mib_get.mac_smt_address, FDDI_K_ALEN);
2070 bp->stats.mac_t_req = bp->cmd_rsp_virt->smt_mib_get.mac_t_req;
2071 bp->stats.mac_t_neg = bp->cmd_rsp_virt->smt_mib_get.mac_t_neg;
2072 bp->stats.mac_t_max = bp->cmd_rsp_virt->smt_mib_get.mac_t_max;
2073 bp->stats.mac_tvx_value = bp->cmd_rsp_virt->smt_mib_get.mac_tvx_value;
2074 bp->stats.mac_frame_error_threshold = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_threshold;
2075 bp->stats.mac_frame_error_ratio = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_ratio;
2076 bp->stats.mac_rmt_state = bp->cmd_rsp_virt->smt_mib_get.mac_rmt_state;
2077 bp->stats.mac_da_flag = bp->cmd_rsp_virt->smt_mib_get.mac_da_flag;
2078 bp->stats.mac_una_da_flag = bp->cmd_rsp_virt->smt_mib_get.mac_unda_flag;
2079 bp->stats.mac_frame_error_flag = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_flag;
2080 bp->stats.mac_ma_unitdata_available = bp->cmd_rsp_virt->smt_mib_get.mac_ma_unitdata_available;
2081 bp->stats.mac_hardware_present = bp->cmd_rsp_virt->smt_mib_get.mac_hardware_present;
2082 bp->stats.mac_ma_unitdata_enable = bp->cmd_rsp_virt->smt_mib_get.mac_ma_unitdata_enable;
2083 bp->stats.path_tvx_lower_bound = bp->cmd_rsp_virt->smt_mib_get.path_tvx_lower_bound;
2084 bp->stats.path_t_max_lower_bound = bp->cmd_rsp_virt->smt_mib_get.path_t_max_lower_bound;
2085 bp->stats.path_max_t_req = bp->cmd_rsp_virt->smt_mib_get.path_max_t_req;
2086 memcpy(bp->stats.path_configuration, &bp->cmd_rsp_virt->smt_mib_get.path_configuration, sizeof(bp->cmd_rsp_virt->smt_mib_get.path_configuration));
2087 bp->stats.port_my_type[0] = bp->cmd_rsp_virt->smt_mib_get.port_my_type[0];
2088 bp->stats.port_my_type[1] = bp->cmd_rsp_virt->smt_mib_get.port_my_type[1];
2089 bp->stats.port_neighbor_type[0] = bp->cmd_rsp_virt->smt_mib_get.port_neighbor_type[0];
2090 bp->stats.port_neighbor_type[1] = bp->cmd_rsp_virt->smt_mib_get.port_neighbor_type[1];
2091 bp->stats.port_connection_policies[0] = bp->cmd_rsp_virt->smt_mib_get.port_connection_policies[0];
2092 bp->stats.port_connection_policies[1] = bp->cmd_rsp_virt->smt_mib_get.port_connection_policies[1];
2093 bp->stats.port_mac_indicated[0] = bp->cmd_rsp_virt->smt_mib_get.port_mac_indicated[0];
2094 bp->stats.port_mac_indicated[1] = bp->cmd_rsp_virt->smt_mib_get.port_mac_indicated[1];
2095 bp->stats.port_current_path[0] = bp->cmd_rsp_virt->smt_mib_get.port_current_path[0];
2096 bp->stats.port_current_path[1] = bp->cmd_rsp_virt->smt_mib_get.port_current_path[1];
2097 memcpy(&bp->stats.port_requested_paths[0*3], &bp->cmd_rsp_virt->smt_mib_get.port_requested_paths[0], 3);
2098 memcpy(&bp->stats.port_requested_paths[1*3], &bp->cmd_rsp_virt->smt_mib_get.port_requested_paths[1], 3);
2099 bp->stats.port_mac_placement[0] = bp->cmd_rsp_virt->smt_mib_get.port_mac_placement[0];
2100 bp->stats.port_mac_placement[1] = bp->cmd_rsp_virt->smt_mib_get.port_mac_placement[1];
2101 bp->stats.port_available_paths[0] = bp->cmd_rsp_virt->smt_mib_get.port_available_paths[0];
2102 bp->stats.port_available_paths[1] = bp->cmd_rsp_virt->smt_mib_get.port_available_paths[1];
2103 bp->stats.port_pmd_class[0] = bp->cmd_rsp_virt->smt_mib_get.port_pmd_class[0];
2104 bp->stats.port_pmd_class[1] = bp->cmd_rsp_virt->smt_mib_get.port_pmd_class[1];
2105 bp->stats.port_connection_capabilities[0] = bp->cmd_rsp_virt->smt_mib_get.port_connection_capabilities[0];
2106 bp->stats.port_connection_capabilities[1] = bp->cmd_rsp_virt->smt_mib_get.port_connection_capabilities[1];
2107 bp->stats.port_bs_flag[0] = bp->cmd_rsp_virt->smt_mib_get.port_bs_flag[0];
2108 bp->stats.port_bs_flag[1] = bp->cmd_rsp_virt->smt_mib_get.port_bs_flag[1];
2109 bp->stats.port_ler_estimate[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_estimate[0];
2110 bp->stats.port_ler_estimate[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_estimate[1];
2111 bp->stats.port_ler_cutoff[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_cutoff[0];
2112 bp->stats.port_ler_cutoff[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_cutoff[1];
2113 bp->stats.port_ler_alarm[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_alarm[0];
2114 bp->stats.port_ler_alarm[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_alarm[1];
2115 bp->stats.port_connect_state[0] = bp->cmd_rsp_virt->smt_mib_get.port_connect_state[0];
2116 bp->stats.port_connect_state[1] = bp->cmd_rsp_virt->smt_mib_get.port_connect_state[1];
2117 bp->stats.port_pcm_state[0] = bp->cmd_rsp_virt->smt_mib_get.port_pcm_state[0];
2118 bp->stats.port_pcm_state[1] = bp->cmd_rsp_virt->smt_mib_get.port_pcm_state[1];
2119 bp->stats.port_pc_withhold[0] = bp->cmd_rsp_virt->smt_mib_get.port_pc_withhold[0];
2120 bp->stats.port_pc_withhold[1] = bp->cmd_rsp_virt->smt_mib_get.port_pc_withhold[1];
2121 bp->stats.port_ler_flag[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_flag[0];
2122 bp->stats.port_ler_flag[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_flag[1];
2123 bp->stats.port_hardware_present[0] = bp->cmd_rsp_virt->smt_mib_get.port_hardware_present[0];
2124 bp->stats.port_hardware_present[1] = bp->cmd_rsp_virt->smt_mib_get.port_hardware_present[1];
2125
2126
2127
2128 bp->cmd_req_virt->cmd_type = PI_CMD_K_CNTRS_GET;
2129 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
2130 return (struct net_device_stats *)&bp->stats;
2131
2132
2133
2134 bp->stats.mac_frame_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.frame_cnt.ls;
2135 bp->stats.mac_copied_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.copied_cnt.ls;
2136 bp->stats.mac_transmit_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.transmit_cnt.ls;
2137 bp->stats.mac_error_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.error_cnt.ls;
2138 bp->stats.mac_lost_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.lost_cnt.ls;
2139 bp->stats.port_lct_fail_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.lct_rejects[0].ls;
2140 bp->stats.port_lct_fail_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.lct_rejects[1].ls;
2141 bp->stats.port_lem_reject_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.lem_rejects[0].ls;
2142 bp->stats.port_lem_reject_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.lem_rejects[1].ls;
2143 bp->stats.port_lem_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[0].ls;
2144 bp->stats.port_lem_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[1].ls;
2145
2146 return (struct net_device_stats *)&bp->stats;
2147 }
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193static void dfx_ctl_set_multicast_list(struct net_device *dev)
2194{
2195 DFX_board_t *bp = netdev_priv(dev);
2196 int i;
2197 struct netdev_hw_addr *ha;
2198
2199
2200
2201 if (dev->flags & IFF_PROMISC)
2202 bp->ind_group_prom = PI_FSTATE_K_PASS;
2203
2204
2205
2206 else
2207 {
2208 bp->ind_group_prom = PI_FSTATE_K_BLOCK;
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229 if (netdev_mc_count(dev) > (PI_CMD_ADDR_FILTER_K_SIZE - bp->uc_count))
2230 {
2231 bp->group_prom = PI_FSTATE_K_PASS;
2232 bp->mc_count = 0;
2233 }
2234 else
2235 {
2236 bp->group_prom = PI_FSTATE_K_BLOCK;
2237 bp->mc_count = netdev_mc_count(dev);
2238 }
2239
2240
2241
2242 i = 0;
2243 netdev_for_each_mc_addr(ha, dev)
2244 memcpy(&bp->mc_table[i++ * FDDI_K_ALEN],
2245 ha->addr, FDDI_K_ALEN);
2246
2247 if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS)
2248 {
2249 DBG_printk("%s: Could not update multicast address table!\n", dev->name);
2250 }
2251 else
2252 {
2253 DBG_printk("%s: Multicast address table updated! Added %d addresses.\n", dev->name, bp->mc_count);
2254 }
2255 }
2256
2257
2258
2259 if (dfx_ctl_update_filters(bp) != DFX_K_SUCCESS)
2260 {
2261 DBG_printk("%s: Could not update adapter filters!\n", dev->name);
2262 }
2263 else
2264 {
2265 DBG_printk("%s: Adapter filters updated!\n", dev->name);
2266 }
2267 }
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306static int dfx_ctl_set_mac_address(struct net_device *dev, void *addr)
2307 {
2308 struct sockaddr *p_sockaddr = (struct sockaddr *)addr;
2309 DFX_board_t *bp = netdev_priv(dev);
2310
2311
2312
2313 memcpy(dev->dev_addr, p_sockaddr->sa_data, FDDI_K_ALEN);
2314 memcpy(&bp->uc_table[0], p_sockaddr->sa_data, FDDI_K_ALEN);
2315 bp->uc_count = 1;
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329 if ((bp->uc_count + bp->mc_count) > PI_CMD_ADDR_FILTER_K_SIZE)
2330 {
2331 bp->group_prom = PI_FSTATE_K_PASS;
2332 bp->mc_count = 0;
2333
2334
2335
2336 if (dfx_ctl_update_filters(bp) != DFX_K_SUCCESS)
2337 {
2338 DBG_printk("%s: Could not update adapter filters!\n", dev->name);
2339 }
2340 else
2341 {
2342 DBG_printk("%s: Adapter filters updated!\n", dev->name);
2343 }
2344 }
2345
2346
2347
2348 if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS)
2349 {
2350 DBG_printk("%s: Could not set new MAC address!\n", dev->name);
2351 }
2352 else
2353 {
2354 DBG_printk("%s: Adapter CAM updated with new MAC address\n", dev->name);
2355 }
2356 return 0;
2357 }
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393static int dfx_ctl_update_cam(DFX_board_t *bp)
2394 {
2395 int i;
2396 PI_LAN_ADDR *p_addr;
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411 memset(bp->cmd_req_virt, 0, PI_CMD_REQ_K_SIZE_MAX);
2412 bp->cmd_req_virt->cmd_type = PI_CMD_K_ADDR_FILTER_SET;
2413 p_addr = &bp->cmd_req_virt->addr_filter_set.entry[0];
2414
2415
2416
2417 for (i=0; i < (int)bp->uc_count; i++)
2418 {
2419 if (i < PI_CMD_ADDR_FILTER_K_SIZE)
2420 {
2421 memcpy(p_addr, &bp->uc_table[i*FDDI_K_ALEN], FDDI_K_ALEN);
2422 p_addr++;
2423 }
2424 }
2425
2426
2427
2428 for (i=0; i < (int)bp->mc_count; i++)
2429 {
2430 if ((i + bp->uc_count) < PI_CMD_ADDR_FILTER_K_SIZE)
2431 {
2432 memcpy(p_addr, &bp->mc_table[i*FDDI_K_ALEN], FDDI_K_ALEN);
2433 p_addr++;
2434 }
2435 }
2436
2437
2438
2439 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
2440 return DFX_K_FAILURE;
2441 return DFX_K_SUCCESS;
2442 }
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476static int dfx_ctl_update_filters(DFX_board_t *bp)
2477 {
2478 int i = 0;
2479
2480
2481
2482 bp->cmd_req_virt->cmd_type = PI_CMD_K_FILTERS_SET;
2483
2484
2485
2486 bp->cmd_req_virt->filter_set.item[i].item_code = PI_ITEM_K_BROADCAST;
2487 bp->cmd_req_virt->filter_set.item[i++].value = PI_FSTATE_K_PASS;
2488
2489
2490
2491 bp->cmd_req_virt->filter_set.item[i].item_code = PI_ITEM_K_IND_GROUP_PROM;
2492 bp->cmd_req_virt->filter_set.item[i++].value = bp->ind_group_prom;
2493
2494
2495
2496 bp->cmd_req_virt->filter_set.item[i].item_code = PI_ITEM_K_GROUP_PROM;
2497 bp->cmd_req_virt->filter_set.item[i++].value = bp->group_prom;
2498
2499
2500
2501 bp->cmd_req_virt->filter_set.item[i].item_code = PI_ITEM_K_EOL;
2502
2503
2504
2505 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
2506 return DFX_K_FAILURE;
2507 return DFX_K_SUCCESS;
2508 }
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551static int dfx_hw_dma_cmd_req(DFX_board_t *bp)
2552 {
2553 int status;
2554 int timeout_cnt;
2555
2556
2557
2558 status = dfx_hw_adap_state_rd(bp);
2559 if ((status == PI_STATE_K_RESET) ||
2560 (status == PI_STATE_K_HALTED) ||
2561 (status == PI_STATE_K_DMA_UNAVAIL) ||
2562 (status == PI_STATE_K_UPGRADE))
2563 return DFX_K_OUTSTATE;
2564
2565
2566
2567 bp->descr_block_virt->cmd_rsp[bp->cmd_rsp_reg.index.prod].long_0 = (u32) (PI_RCV_DESCR_M_SOP |
2568 ((PI_CMD_RSP_K_SIZE_MAX / PI_ALIGN_K_CMD_RSP_BUFF) << PI_RCV_DESCR_V_SEG_LEN));
2569 bp->descr_block_virt->cmd_rsp[bp->cmd_rsp_reg.index.prod].long_1 = bp->cmd_rsp_phys;
2570
2571
2572
2573 bp->cmd_rsp_reg.index.prod += 1;
2574 bp->cmd_rsp_reg.index.prod &= PI_CMD_RSP_K_NUM_ENTRIES-1;
2575 dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_RSP_PROD, bp->cmd_rsp_reg.lword);
2576
2577
2578
2579 bp->descr_block_virt->cmd_req[bp->cmd_req_reg.index.prod].long_0 = (u32) (PI_XMT_DESCR_M_SOP |
2580 PI_XMT_DESCR_M_EOP | (PI_CMD_REQ_K_SIZE_MAX << PI_XMT_DESCR_V_SEG_LEN));
2581 bp->descr_block_virt->cmd_req[bp->cmd_req_reg.index.prod].long_1 = bp->cmd_req_phys;
2582
2583
2584
2585 bp->cmd_req_reg.index.prod += 1;
2586 bp->cmd_req_reg.index.prod &= PI_CMD_REQ_K_NUM_ENTRIES-1;
2587 dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_REQ_PROD, bp->cmd_req_reg.lword);
2588
2589
2590
2591
2592
2593
2594 for (timeout_cnt = 20000; timeout_cnt > 0; timeout_cnt--)
2595 {
2596 if (bp->cmd_req_reg.index.prod == (u8)(bp->cons_block_virt->cmd_req))
2597 break;
2598 udelay(100);
2599 }
2600 if (timeout_cnt == 0)
2601 return DFX_K_HW_TIMEOUT;
2602
2603
2604
2605 bp->cmd_req_reg.index.comp += 1;
2606 bp->cmd_req_reg.index.comp &= PI_CMD_REQ_K_NUM_ENTRIES-1;
2607 dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_REQ_PROD, bp->cmd_req_reg.lword);
2608
2609
2610
2611
2612
2613
2614 for (timeout_cnt = 20000; timeout_cnt > 0; timeout_cnt--)
2615 {
2616 if (bp->cmd_rsp_reg.index.prod == (u8)(bp->cons_block_virt->cmd_rsp))
2617 break;
2618 udelay(100);
2619 }
2620 if (timeout_cnt == 0)
2621 return DFX_K_HW_TIMEOUT;
2622
2623
2624
2625 bp->cmd_rsp_reg.index.comp += 1;
2626 bp->cmd_rsp_reg.index.comp &= PI_CMD_RSP_K_NUM_ENTRIES-1;
2627 dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_RSP_PROD, bp->cmd_rsp_reg.lword);
2628 return DFX_K_SUCCESS;
2629 }
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665static int dfx_hw_port_ctrl_req(
2666 DFX_board_t *bp,
2667 PI_UINT32 command,
2668 PI_UINT32 data_a,
2669 PI_UINT32 data_b,
2670 PI_UINT32 *host_data
2671 )
2672
2673 {
2674 PI_UINT32 port_cmd;
2675 int timeout_cnt;
2676
2677
2678
2679 port_cmd = (PI_UINT32) (command | PI_PCTRL_M_CMD_ERROR);
2680
2681
2682
2683 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_DATA_A, data_a);
2684 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_DATA_B, data_b);
2685 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_CTRL, port_cmd);
2686
2687
2688
2689 if (command == PI_PCTRL_M_BLAST_FLASH)
2690 timeout_cnt = 600000;
2691 else
2692 timeout_cnt = 20000;
2693
2694 for (; timeout_cnt > 0; timeout_cnt--)
2695 {
2696 dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_CTRL, &port_cmd);
2697 if (!(port_cmd & PI_PCTRL_M_CMD_ERROR))
2698 break;
2699 udelay(100);
2700 }
2701 if (timeout_cnt == 0)
2702 return DFX_K_HW_TIMEOUT;
2703
2704
2705
2706
2707
2708
2709
2710 if (host_data != NULL)
2711 dfx_port_read_long(bp, PI_PDQ_K_REG_HOST_DATA, host_data);
2712 return DFX_K_SUCCESS;
2713 }
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749static void dfx_hw_adap_reset(
2750 DFX_board_t *bp,
2751 PI_UINT32 type
2752 )
2753
2754 {
2755
2756
2757 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_DATA_A, type);
2758 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_RESET, PI_RESET_M_ASSERT_RESET);
2759
2760
2761
2762 udelay(20);
2763
2764
2765
2766 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_RESET, 0);
2767 }
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797static int dfx_hw_adap_state_rd(DFX_board_t *bp)
2798 {
2799 PI_UINT32 port_status;
2800
2801 dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &port_status);
2802 return (port_status & PI_PSTATUS_M_STATE) >> PI_PSTATUS_V_STATE;
2803 }
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837static int dfx_hw_dma_uninit(DFX_board_t *bp, PI_UINT32 type)
2838 {
2839 int timeout_cnt;
2840
2841
2842
2843 dfx_hw_adap_reset(bp, type);
2844
2845
2846
2847 for (timeout_cnt = 100000; timeout_cnt > 0; timeout_cnt--)
2848 {
2849 if (dfx_hw_adap_state_rd(bp) == PI_STATE_K_DMA_UNAVAIL)
2850 break;
2851 udelay(100);
2852 }
2853 if (timeout_cnt == 0)
2854 return DFX_K_HW_TIMEOUT;
2855 return DFX_K_SUCCESS;
2856 }
2857
2858
2859
2860
2861
2862
2863static void my_skb_align(struct sk_buff *skb, int n)
2864{
2865 unsigned long x = (unsigned long)skb->data;
2866 unsigned long v;
2867
2868 v = ALIGN(x, n);
2869
2870 skb_reserve(skb, v - x);
2871}
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909static int dfx_rcv_init(DFX_board_t *bp, int get_buffers)
2910 {
2911 int i, j;
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930
2931 if (get_buffers) {
2932#ifdef DYNAMIC_BUFFERS
2933 for (i = 0; i < (int)(bp->rcv_bufs_to_post); i++)
2934 for (j = 0; (i + j) < (int)PI_RCV_DATA_K_NUM_ENTRIES; j += bp->rcv_bufs_to_post)
2935 {
2936 struct sk_buff *newskb = __netdev_alloc_skb(bp->dev, NEW_SKB_SIZE, GFP_NOIO);
2937 if (!newskb)
2938 return -ENOMEM;
2939 bp->descr_block_virt->rcv_data[i+j].long_0 = (u32) (PI_RCV_DESCR_M_SOP |
2940 ((PI_RCV_DATA_K_SIZE_MAX / PI_ALIGN_K_RCV_DATA_BUFF) << PI_RCV_DESCR_V_SEG_LEN));
2941
2942
2943
2944
2945
2946 my_skb_align(newskb, 128);
2947 bp->descr_block_virt->rcv_data[i + j].long_1 =
2948 (u32)dma_map_single(bp->bus_dev, newskb->data,
2949 NEW_SKB_SIZE,
2950 DMA_FROM_DEVICE);
2951
2952
2953
2954
2955 bp->p_rcv_buff_va[i+j] = (char *) newskb;
2956 }
2957#else
2958 for (i=0; i < (int)(bp->rcv_bufs_to_post); i++)
2959 for (j=0; (i + j) < (int)PI_RCV_DATA_K_NUM_ENTRIES; j += bp->rcv_bufs_to_post)
2960 {
2961 bp->descr_block_virt->rcv_data[i+j].long_0 = (u32) (PI_RCV_DESCR_M_SOP |
2962 ((PI_RCV_DATA_K_SIZE_MAX / PI_ALIGN_K_RCV_DATA_BUFF) << PI_RCV_DESCR_V_SEG_LEN));
2963 bp->descr_block_virt->rcv_data[i+j].long_1 = (u32) (bp->rcv_block_phys + (i * PI_RCV_DATA_K_SIZE_MAX));
2964 bp->p_rcv_buff_va[i+j] = (bp->rcv_block_virt + (i * PI_RCV_DATA_K_SIZE_MAX));
2965 }
2966#endif
2967 }
2968
2969
2970
2971 bp->rcv_xmt_reg.index.rcv_prod = bp->rcv_bufs_to_post;
2972 dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_2_PROD, bp->rcv_xmt_reg.lword);
2973 return 0;
2974 }
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008
3009static void dfx_rcv_queue_process(
3010 DFX_board_t *bp
3011 )
3012
3013 {
3014 PI_TYPE_2_CONSUMER *p_type_2_cons;
3015 char *p_buff;
3016 u32 descr, pkt_len;
3017 struct sk_buff *skb;
3018
3019
3020
3021 p_type_2_cons = (PI_TYPE_2_CONSUMER *)(&bp->cons_block_virt->xmt_rcv_data);
3022 while (bp->rcv_xmt_reg.index.rcv_comp != p_type_2_cons->index.rcv_cons)
3023 {
3024
3025
3026 int entry;
3027
3028 entry = bp->rcv_xmt_reg.index.rcv_comp;
3029#ifdef DYNAMIC_BUFFERS
3030 p_buff = (char *) (((struct sk_buff *)bp->p_rcv_buff_va[entry])->data);
3031#else
3032 p_buff = bp->p_rcv_buff_va[entry];
3033#endif
3034 memcpy(&descr, p_buff + RCV_BUFF_K_DESCR, sizeof(u32));
3035
3036 if (descr & PI_FMC_DESCR_M_RCC_FLUSH)
3037 {
3038 if (descr & PI_FMC_DESCR_M_RCC_CRC)
3039 bp->rcv_crc_errors++;
3040 else
3041 bp->rcv_frame_status_errors++;
3042 }
3043 else
3044 {
3045 int rx_in_place = 0;
3046
3047
3048
3049 pkt_len = (u32)((descr & PI_FMC_DESCR_M_LEN) >> PI_FMC_DESCR_V_LEN);
3050 pkt_len -= 4;
3051 if (!IN_RANGE(pkt_len, FDDI_K_LLC_ZLEN, FDDI_K_LLC_LEN))
3052 bp->rcv_length_errors++;
3053 else{
3054#ifdef DYNAMIC_BUFFERS
3055 if (pkt_len > SKBUFF_RX_COPYBREAK) {
3056 struct sk_buff *newskb;
3057
3058 newskb = dev_alloc_skb(NEW_SKB_SIZE);
3059 if (newskb){
3060 rx_in_place = 1;
3061
3062 my_skb_align(newskb, 128);
3063 skb = (struct sk_buff *)bp->p_rcv_buff_va[entry];
3064 dma_unmap_single(bp->bus_dev,
3065 bp->descr_block_virt->rcv_data[entry].long_1,
3066 NEW_SKB_SIZE,
3067 DMA_FROM_DEVICE);
3068 skb_reserve(skb, RCV_BUFF_K_PADDING);
3069 bp->p_rcv_buff_va[entry] = (char *)newskb;
3070 bp->descr_block_virt->rcv_data[entry].long_1 =
3071 (u32)dma_map_single(bp->bus_dev,
3072 newskb->data,
3073 NEW_SKB_SIZE,
3074 DMA_FROM_DEVICE);
3075 } else
3076 skb = NULL;
3077 } else
3078#endif
3079 skb = dev_alloc_skb(pkt_len+3);
3080 if (skb == NULL)
3081 {
3082 printk("%s: Could not allocate receive buffer. Dropping packet.\n", bp->dev->name);
3083 bp->rcv_discards++;
3084 break;
3085 }
3086 else {
3087#ifndef DYNAMIC_BUFFERS
3088 if (! rx_in_place)
3089#endif
3090 {
3091
3092
3093 skb_copy_to_linear_data(skb,
3094 p_buff + RCV_BUFF_K_PADDING,
3095 pkt_len + 3);
3096 }
3097
3098 skb_reserve(skb,3);
3099 skb_put(skb, pkt_len);
3100 skb->protocol = fddi_type_trans(skb, bp->dev);
3101 bp->rcv_total_bytes += skb->len;
3102 netif_rx(skb);
3103
3104
3105 bp->rcv_total_frames++;
3106 if (*(p_buff + RCV_BUFF_K_DA) & 0x01)
3107 bp->rcv_multicast_frames++;
3108 }
3109 }
3110 }
3111
3112
3113
3114
3115
3116
3117
3118
3119
3120 bp->rcv_xmt_reg.index.rcv_prod += 1;
3121 bp->rcv_xmt_reg.index.rcv_comp += 1;
3122 }
3123 }
3124
3125
3126
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140
3141
3142
3143
3144
3145
3146
3147
3148
3149
3150
3151
3152
3153
3154
3155
3156
3157
3158
3159
3160
3161
3162
3163
3164
3165
3166
3167
3168
3169
3170
3171
3172
3173
3174
3175
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185
3186
3187static netdev_tx_t dfx_xmt_queue_pkt(struct sk_buff *skb,
3188 struct net_device *dev)
3189 {
3190 DFX_board_t *bp = netdev_priv(dev);
3191 u8 prod;
3192 PI_XMT_DESCR *p_xmt_descr;
3193 XMT_DRIVER_DESCR *p_xmt_drv_descr;
3194 unsigned long flags;
3195
3196 netif_stop_queue(dev);
3197
3198
3199
3200
3201
3202
3203
3204
3205
3206
3207 if (!IN_RANGE(skb->len, FDDI_K_LLC_ZLEN, FDDI_K_LLC_LEN))
3208 {
3209 printk("%s: Invalid packet length - %u bytes\n",
3210 dev->name, skb->len);
3211 bp->xmt_length_errors++;
3212 netif_wake_queue(dev);
3213 dev_kfree_skb(skb);
3214 return NETDEV_TX_OK;
3215 }
3216
3217
3218
3219
3220
3221
3222
3223
3224
3225
3226
3227
3228 if (bp->link_available == PI_K_FALSE)
3229 {
3230 if (dfx_hw_adap_state_rd(bp) == PI_STATE_K_LINK_AVAIL)
3231 bp->link_available = PI_K_TRUE;
3232 else
3233 {
3234 bp->xmt_discards++;
3235 dev_kfree_skb(skb);
3236 netif_wake_queue(dev);
3237 return NETDEV_TX_OK;
3238 }
3239 }
3240
3241 spin_lock_irqsave(&bp->lock, flags);
3242
3243
3244
3245 prod = bp->rcv_xmt_reg.index.xmt_prod;
3246 p_xmt_descr = &(bp->descr_block_virt->xmt_data[prod]);
3247
3248
3249
3250
3251
3252
3253
3254
3255
3256
3257
3258
3259 p_xmt_drv_descr = &(bp->xmt_drv_descr_blk[prod++]);
3260
3261
3262
3263 skb_push(skb,3);
3264 skb->data[0] = DFX_PRH0_BYTE;
3265 skb->data[1] = DFX_PRH1_BYTE;
3266 skb->data[2] = DFX_PRH2_BYTE;
3267
3268
3269
3270
3271
3272
3273
3274
3275
3276
3277
3278
3279
3280
3281
3282
3283
3284
3285
3286
3287
3288
3289
3290
3291
3292
3293
3294
3295 p_xmt_descr->long_0 = (u32) (PI_XMT_DESCR_M_SOP | PI_XMT_DESCR_M_EOP | ((skb->len) << PI_XMT_DESCR_V_SEG_LEN));
3296 p_xmt_descr->long_1 = (u32)dma_map_single(bp->bus_dev, skb->data,
3297 skb->len, DMA_TO_DEVICE);
3298
3299
3300
3301
3302
3303
3304
3305
3306
3307
3308
3309
3310 if (prod == bp->rcv_xmt_reg.index.xmt_comp)
3311 {
3312 skb_pull(skb,3);
3313 spin_unlock_irqrestore(&bp->lock, flags);
3314 return NETDEV_TX_BUSY;
3315 }
3316
3317
3318
3319
3320
3321
3322
3323
3324
3325
3326
3327
3328
3329
3330
3331
3332
3333 p_xmt_drv_descr->p_skb = skb;
3334
3335
3336
3337 bp->rcv_xmt_reg.index.xmt_prod = prod;
3338 dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_2_PROD, bp->rcv_xmt_reg.lword);
3339 spin_unlock_irqrestore(&bp->lock, flags);
3340 netif_wake_queue(dev);
3341 return NETDEV_TX_OK;
3342 }
3343
3344
3345
3346
3347
3348
3349
3350
3351
3352
3353
3354
3355
3356
3357
3358
3359
3360
3361
3362
3363
3364
3365
3366
3367
3368
3369
3370
3371
3372
3373
3374
3375
3376
3377static int dfx_xmt_done(DFX_board_t *bp)
3378 {
3379 XMT_DRIVER_DESCR *p_xmt_drv_descr;
3380 PI_TYPE_2_CONSUMER *p_type_2_cons;
3381 u8 comp;
3382 int freed = 0;
3383
3384
3385
3386 p_type_2_cons = (PI_TYPE_2_CONSUMER *)(&bp->cons_block_virt->xmt_rcv_data);
3387 while (bp->rcv_xmt_reg.index.xmt_comp != p_type_2_cons->index.xmt_cons)
3388 {
3389
3390
3391 p_xmt_drv_descr = &(bp->xmt_drv_descr_blk[bp->rcv_xmt_reg.index.xmt_comp]);
3392
3393
3394
3395 bp->xmt_total_frames++;
3396 bp->xmt_total_bytes += p_xmt_drv_descr->p_skb->len;
3397
3398
3399 comp = bp->rcv_xmt_reg.index.xmt_comp;
3400 dma_unmap_single(bp->bus_dev,
3401 bp->descr_block_virt->xmt_data[comp].long_1,
3402 p_xmt_drv_descr->p_skb->len,
3403 DMA_TO_DEVICE);
3404 dev_kfree_skb_irq(p_xmt_drv_descr->p_skb);
3405
3406
3407
3408
3409
3410
3411
3412
3413
3414
3415
3416
3417 bp->rcv_xmt_reg.index.xmt_comp += 1;
3418 freed++;
3419 }
3420 return freed;
3421 }
3422
3423
3424
3425
3426
3427
3428
3429
3430
3431
3432
3433
3434
3435
3436
3437
3438
3439
3440
3441
3442
3443
3444
3445
3446
3447
3448
3449
3450#ifdef DYNAMIC_BUFFERS
3451static void dfx_rcv_flush( DFX_board_t *bp )
3452 {
3453 int i, j;
3454
3455 for (i = 0; i < (int)(bp->rcv_bufs_to_post); i++)
3456 for (j = 0; (i + j) < (int)PI_RCV_DATA_K_NUM_ENTRIES; j += bp->rcv_bufs_to_post)
3457 {
3458 struct sk_buff *skb;
3459 skb = (struct sk_buff *)bp->p_rcv_buff_va[i+j];
3460 if (skb)
3461 dev_kfree_skb(skb);
3462 bp->p_rcv_buff_va[i+j] = NULL;
3463 }
3464
3465 }
3466#else
3467static inline void dfx_rcv_flush( DFX_board_t *bp )
3468{
3469}
3470#endif
3471
3472
3473
3474
3475
3476
3477
3478
3479
3480
3481
3482
3483
3484
3485
3486
3487
3488
3489
3490
3491
3492
3493
3494
3495
3496
3497
3498
3499
3500
3501
3502
3503
3504
3505
3506
3507
3508static void dfx_xmt_flush( DFX_board_t *bp )
3509 {
3510 u32 prod_cons;
3511 XMT_DRIVER_DESCR *p_xmt_drv_descr;
3512 u8 comp;
3513
3514
3515
3516 while (bp->rcv_xmt_reg.index.xmt_comp != bp->rcv_xmt_reg.index.xmt_prod)
3517 {
3518
3519
3520 p_xmt_drv_descr = &(bp->xmt_drv_descr_blk[bp->rcv_xmt_reg.index.xmt_comp]);
3521
3522
3523 comp = bp->rcv_xmt_reg.index.xmt_comp;
3524 dma_unmap_single(bp->bus_dev,
3525 bp->descr_block_virt->xmt_data[comp].long_1,
3526 p_xmt_drv_descr->p_skb->len,
3527 DMA_TO_DEVICE);
3528 dev_kfree_skb(p_xmt_drv_descr->p_skb);
3529
3530
3531
3532 bp->xmt_discards++;
3533
3534
3535
3536
3537
3538
3539
3540
3541
3542
3543
3544
3545 bp->rcv_xmt_reg.index.xmt_comp += 1;
3546 }
3547
3548
3549
3550 prod_cons = (u32)(bp->cons_block_virt->xmt_rcv_data & ~PI_CONS_M_XMT_INDEX);
3551 prod_cons |= (u32)(bp->rcv_xmt_reg.index.xmt_prod << PI_CONS_V_XMT_INDEX);
3552 bp->cons_block_virt->xmt_rcv_data = prod_cons;
3553 }
3554
3555
3556
3557
3558
3559
3560
3561
3562
3563
3564
3565
3566
3567
3568
3569
3570
3571
3572
3573
3574
3575
3576
3577
3578
3579
3580
3581static void dfx_unregister(struct device *bdev)
3582{
3583 struct net_device *dev = dev_get_drvdata(bdev);
3584 DFX_board_t *bp = netdev_priv(dev);
3585 int dfx_bus_pci = DFX_BUS_PCI(bdev);
3586 int dfx_bus_tc = DFX_BUS_TC(bdev);
3587 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
3588 resource_size_t bar_start = 0;
3589 resource_size_t bar_len = 0;
3590 int alloc_size;
3591
3592 unregister_netdev(dev);
3593
3594 alloc_size = sizeof(PI_DESCR_BLOCK) +
3595 PI_CMD_REQ_K_SIZE_MAX + PI_CMD_RSP_K_SIZE_MAX +
3596#ifndef DYNAMIC_BUFFERS
3597 (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX) +
3598#endif
3599 sizeof(PI_CONSUMER_BLOCK) +
3600 (PI_ALIGN_K_DESC_BLK - 1);
3601 if (bp->kmalloced)
3602 dma_free_coherent(bdev, alloc_size,
3603 bp->kmalloced, bp->kmalloced_dma);
3604
3605 dfx_bus_uninit(dev);
3606
3607 dfx_get_bars(bdev, &bar_start, &bar_len);
3608 if (dfx_use_mmio) {
3609 iounmap(bp->base.mem);
3610 release_mem_region(bar_start, bar_len);
3611 } else
3612 release_region(bar_start, bar_len);
3613
3614 if (dfx_bus_pci)
3615 pci_disable_device(to_pci_dev(bdev));
3616
3617 free_netdev(dev);
3618}
3619
3620
3621static int __maybe_unused dfx_dev_register(struct device *);
3622static int __maybe_unused dfx_dev_unregister(struct device *);
3623
3624#ifdef CONFIG_PCI
3625static int dfx_pci_register(struct pci_dev *, const struct pci_device_id *);
3626static void dfx_pci_unregister(struct pci_dev *);
3627
3628static DEFINE_PCI_DEVICE_TABLE(dfx_pci_table) = {
3629 { PCI_DEVICE(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_FDDI) },
3630 { }
3631};
3632MODULE_DEVICE_TABLE(pci, dfx_pci_table);
3633
3634static struct pci_driver dfx_pci_driver = {
3635 .name = "defxx",
3636 .id_table = dfx_pci_table,
3637 .probe = dfx_pci_register,
3638 .remove = dfx_pci_unregister,
3639};
3640
3641static int dfx_pci_register(struct pci_dev *pdev,
3642 const struct pci_device_id *ent)
3643{
3644 return dfx_register(&pdev->dev);
3645}
3646
3647static void dfx_pci_unregister(struct pci_dev *pdev)
3648{
3649 dfx_unregister(&pdev->dev);
3650}
3651#endif
3652
3653#ifdef CONFIG_EISA
3654static struct eisa_device_id dfx_eisa_table[] = {
3655 { "DEC3001", DEFEA_PROD_ID_1 },
3656 { "DEC3002", DEFEA_PROD_ID_2 },
3657 { "DEC3003", DEFEA_PROD_ID_3 },
3658 { "DEC3004", DEFEA_PROD_ID_4 },
3659 { }
3660};
3661MODULE_DEVICE_TABLE(eisa, dfx_eisa_table);
3662
3663static struct eisa_driver dfx_eisa_driver = {
3664 .id_table = dfx_eisa_table,
3665 .driver = {
3666 .name = "defxx",
3667 .bus = &eisa_bus_type,
3668 .probe = dfx_dev_register,
3669 .remove = dfx_dev_unregister,
3670 },
3671};
3672#endif
3673
3674#ifdef CONFIG_TC
3675static struct tc_device_id const dfx_tc_table[] = {
3676 { "DEC ", "PMAF-FA " },
3677 { "DEC ", "PMAF-FD " },
3678 { "DEC ", "PMAF-FS " },
3679 { "DEC ", "PMAF-FU " },
3680 { }
3681};
3682MODULE_DEVICE_TABLE(tc, dfx_tc_table);
3683
3684static struct tc_driver dfx_tc_driver = {
3685 .id_table = dfx_tc_table,
3686 .driver = {
3687 .name = "defxx",
3688 .bus = &tc_bus_type,
3689 .probe = dfx_dev_register,
3690 .remove = dfx_dev_unregister,
3691 },
3692};
3693#endif
3694
3695static int __maybe_unused dfx_dev_register(struct device *dev)
3696{
3697 int status;
3698
3699 status = dfx_register(dev);
3700 if (!status)
3701 get_device(dev);
3702 return status;
3703}
3704
3705static int __maybe_unused dfx_dev_unregister(struct device *dev)
3706{
3707 put_device(dev);
3708 dfx_unregister(dev);
3709 return 0;
3710}
3711
3712
3713static int dfx_init(void)
3714{
3715 int status;
3716
3717 status = pci_register_driver(&dfx_pci_driver);
3718 if (!status)
3719 status = eisa_driver_register(&dfx_eisa_driver);
3720 if (!status)
3721 status = tc_register_driver(&dfx_tc_driver);
3722 return status;
3723}
3724
3725static void dfx_cleanup(void)
3726{
3727 tc_unregister_driver(&dfx_tc_driver);
3728 eisa_driver_unregister(&dfx_eisa_driver);
3729 pci_unregister_driver(&dfx_pci_driver);
3730}
3731
3732module_init(dfx_init);
3733module_exit(dfx_cleanup);
3734MODULE_AUTHOR("Lawrence V. Stefani");
3735MODULE_DESCRIPTION("DEC FDDIcontroller TC/EISA/PCI (DEFTA/DEFEA/DEFPA) driver "
3736 DRV_VERSION " " DRV_RELDATE);
3737MODULE_LICENSE("GPL");
3738