1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202#include <linux/bitops.h>
203#include <linux/compiler.h>
204#include <linux/delay.h>
205#include <linux/dma-mapping.h>
206#include <linux/eisa.h>
207#include <linux/errno.h>
208#include <linux/fddidevice.h>
209#include <linux/init.h>
210#include <linux/interrupt.h>
211#include <linux/ioport.h>
212#include <linux/kernel.h>
213#include <linux/module.h>
214#include <linux/netdevice.h>
215#include <linux/pci.h>
216#include <linux/skbuff.h>
217#include <linux/slab.h>
218#include <linux/string.h>
219#include <linux/tc.h>
220
221#include <asm/byteorder.h>
222#include <asm/io.h>
223
224#include "defxx.h"
225
226
227#define DRV_NAME "defxx"
228#define DRV_VERSION "v1.10"
229#define DRV_RELDATE "2006/12/14"
230
231static char version[] __devinitdata =
232 DRV_NAME ": " DRV_VERSION " " DRV_RELDATE
233 " Lawrence V. Stefani and others\n";
234
235#define DYNAMIC_BUFFERS 1
236
237#define SKBUFF_RX_COPYBREAK 200
238
239
240
241
242#define NEW_SKB_SIZE (PI_RCV_DATA_K_SIZE_MAX+128)
243
244#ifdef CONFIG_PCI
245#define DFX_BUS_PCI(dev) (dev->bus == &pci_bus_type)
246#else
247#define DFX_BUS_PCI(dev) 0
248#endif
249
250#ifdef CONFIG_EISA
251#define DFX_BUS_EISA(dev) (dev->bus == &eisa_bus_type)
252#else
253#define DFX_BUS_EISA(dev) 0
254#endif
255
256#ifdef CONFIG_TC
257#define DFX_BUS_TC(dev) (dev->bus == &tc_bus_type)
258#else
259#define DFX_BUS_TC(dev) 0
260#endif
261
262#ifdef CONFIG_DEFXX_MMIO
263#define DFX_MMIO 1
264#else
265#define DFX_MMIO 0
266#endif
267
268
269
270static void dfx_bus_init(struct net_device *dev);
271static void dfx_bus_uninit(struct net_device *dev);
272static void dfx_bus_config_check(DFX_board_t *bp);
273
274static int dfx_driver_init(struct net_device *dev,
275 const char *print_name,
276 resource_size_t bar_start);
277static int dfx_adap_init(DFX_board_t *bp, int get_buffers);
278
279static int dfx_open(struct net_device *dev);
280static int dfx_close(struct net_device *dev);
281
282static void dfx_int_pr_halt_id(DFX_board_t *bp);
283static void dfx_int_type_0_process(DFX_board_t *bp);
284static void dfx_int_common(struct net_device *dev);
285static irqreturn_t dfx_interrupt(int irq, void *dev_id);
286
287static struct net_device_stats *dfx_ctl_get_stats(struct net_device *dev);
288static void dfx_ctl_set_multicast_list(struct net_device *dev);
289static int dfx_ctl_set_mac_address(struct net_device *dev, void *addr);
290static int dfx_ctl_update_cam(DFX_board_t *bp);
291static int dfx_ctl_update_filters(DFX_board_t *bp);
292
293static int dfx_hw_dma_cmd_req(DFX_board_t *bp);
294static int dfx_hw_port_ctrl_req(DFX_board_t *bp, PI_UINT32 command, PI_UINT32 data_a, PI_UINT32 data_b, PI_UINT32 *host_data);
295static void dfx_hw_adap_reset(DFX_board_t *bp, PI_UINT32 type);
296static int dfx_hw_adap_state_rd(DFX_board_t *bp);
297static int dfx_hw_dma_uninit(DFX_board_t *bp, PI_UINT32 type);
298
299static int dfx_rcv_init(DFX_board_t *bp, int get_buffers);
300static void dfx_rcv_queue_process(DFX_board_t *bp);
301static void dfx_rcv_flush(DFX_board_t *bp);
302
303static netdev_tx_t dfx_xmt_queue_pkt(struct sk_buff *skb,
304 struct net_device *dev);
305static int dfx_xmt_done(DFX_board_t *bp);
306static void dfx_xmt_flush(DFX_board_t *bp);
307
308
309
310static struct pci_driver dfx_pci_driver;
311static struct eisa_driver dfx_eisa_driver;
312static struct tc_driver dfx_tc_driver;
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365static inline void dfx_writel(DFX_board_t *bp, int offset, u32 data)
366{
367 writel(data, bp->base.mem + offset);
368 mb();
369}
370
371static inline void dfx_outl(DFX_board_t *bp, int offset, u32 data)
372{
373 outl(data, bp->base.port + offset);
374}
375
376static void dfx_port_write_long(DFX_board_t *bp, int offset, u32 data)
377{
378 struct device __maybe_unused *bdev = bp->bus_dev;
379 int dfx_bus_tc = DFX_BUS_TC(bdev);
380 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
381
382 if (dfx_use_mmio)
383 dfx_writel(bp, offset, data);
384 else
385 dfx_outl(bp, offset, data);
386}
387
388
389static inline void dfx_readl(DFX_board_t *bp, int offset, u32 *data)
390{
391 mb();
392 *data = readl(bp->base.mem + offset);
393}
394
395static inline void dfx_inl(DFX_board_t *bp, int offset, u32 *data)
396{
397 *data = inl(bp->base.port + offset);
398}
399
400static void dfx_port_read_long(DFX_board_t *bp, int offset, u32 *data)
401{
402 struct device __maybe_unused *bdev = bp->bus_dev;
403 int dfx_bus_tc = DFX_BUS_TC(bdev);
404 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
405
406 if (dfx_use_mmio)
407 dfx_readl(bp, offset, data);
408 else
409 dfx_inl(bp, offset, data);
410}
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436static void dfx_get_bars(struct device *bdev,
437 resource_size_t *bar_start, resource_size_t *bar_len)
438{
439 int dfx_bus_pci = DFX_BUS_PCI(bdev);
440 int dfx_bus_eisa = DFX_BUS_EISA(bdev);
441 int dfx_bus_tc = DFX_BUS_TC(bdev);
442 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
443
444 if (dfx_bus_pci) {
445 int num = dfx_use_mmio ? 0 : 1;
446
447 *bar_start = pci_resource_start(to_pci_dev(bdev), num);
448 *bar_len = pci_resource_len(to_pci_dev(bdev), num);
449 }
450 if (dfx_bus_eisa) {
451 unsigned long base_addr = to_eisa_device(bdev)->base_addr;
452 resource_size_t bar;
453
454 if (dfx_use_mmio) {
455 bar = inb(base_addr + PI_ESIC_K_MEM_ADD_CMP_2);
456 bar <<= 8;
457 bar |= inb(base_addr + PI_ESIC_K_MEM_ADD_CMP_1);
458 bar <<= 8;
459 bar |= inb(base_addr + PI_ESIC_K_MEM_ADD_CMP_0);
460 bar <<= 16;
461 *bar_start = bar;
462 bar = inb(base_addr + PI_ESIC_K_MEM_ADD_MASK_2);
463 bar <<= 8;
464 bar |= inb(base_addr + PI_ESIC_K_MEM_ADD_MASK_1);
465 bar <<= 8;
466 bar |= inb(base_addr + PI_ESIC_K_MEM_ADD_MASK_0);
467 bar <<= 16;
468 *bar_len = (bar | PI_MEM_ADD_MASK_M) + 1;
469 } else {
470 *bar_start = base_addr;
471 *bar_len = PI_ESIC_K_CSR_IO_LEN;
472 }
473 }
474 if (dfx_bus_tc) {
475 *bar_start = to_tc_dev(bdev)->resource.start +
476 PI_TC_K_CSR_OFFSET;
477 *bar_len = PI_TC_K_CSR_LEN;
478 }
479}
480
481static const struct net_device_ops dfx_netdev_ops = {
482 .ndo_open = dfx_open,
483 .ndo_stop = dfx_close,
484 .ndo_start_xmit = dfx_xmt_queue_pkt,
485 .ndo_get_stats = dfx_ctl_get_stats,
486 .ndo_set_multicast_list = dfx_ctl_set_multicast_list,
487 .ndo_set_mac_address = dfx_ctl_set_mac_address,
488};
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518static int __devinit dfx_register(struct device *bdev)
519{
520 static int version_disp;
521 int dfx_bus_pci = DFX_BUS_PCI(bdev);
522 int dfx_bus_tc = DFX_BUS_TC(bdev);
523 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
524 const char *print_name = dev_name(bdev);
525 struct net_device *dev;
526 DFX_board_t *bp;
527 resource_size_t bar_start = 0;
528 resource_size_t bar_len = 0;
529 int alloc_size;
530 struct resource *region;
531 int err = 0;
532
533 if (!version_disp) {
534 version_disp = 1;
535 printk(version);
536 }
537
538 dev = alloc_fddidev(sizeof(*bp));
539 if (!dev) {
540 printk(KERN_ERR "%s: Unable to allocate fddidev, aborting\n",
541 print_name);
542 return -ENOMEM;
543 }
544
545
546 if (dfx_bus_pci && pci_enable_device(to_pci_dev(bdev))) {
547 printk(KERN_ERR "%s: Cannot enable PCI device, aborting\n",
548 print_name);
549 goto err_out;
550 }
551
552 SET_NETDEV_DEV(dev, bdev);
553
554 bp = netdev_priv(dev);
555 bp->bus_dev = bdev;
556 dev_set_drvdata(bdev, dev);
557
558 dfx_get_bars(bdev, &bar_start, &bar_len);
559
560 if (dfx_use_mmio)
561 region = request_mem_region(bar_start, bar_len, print_name);
562 else
563 region = request_region(bar_start, bar_len, print_name);
564 if (!region) {
565 printk(KERN_ERR "%s: Cannot reserve I/O resource "
566 "0x%lx @ 0x%lx, aborting\n",
567 print_name, (long)bar_len, (long)bar_start);
568 err = -EBUSY;
569 goto err_out_disable;
570 }
571
572
573 if (dfx_use_mmio) {
574 bp->base.mem = ioremap_nocache(bar_start, bar_len);
575 if (!bp->base.mem) {
576 printk(KERN_ERR "%s: Cannot map MMIO\n", print_name);
577 err = -ENOMEM;
578 goto err_out_region;
579 }
580 } else {
581 bp->base.port = bar_start;
582 dev->base_addr = bar_start;
583 }
584
585
586 dev->netdev_ops = &dfx_netdev_ops;
587
588 if (dfx_bus_pci)
589 pci_set_master(to_pci_dev(bdev));
590
591 if (dfx_driver_init(dev, print_name, bar_start) != DFX_K_SUCCESS) {
592 err = -ENODEV;
593 goto err_out_unmap;
594 }
595
596 err = register_netdev(dev);
597 if (err)
598 goto err_out_kfree;
599
600 printk("%s: registered as %s\n", print_name, dev->name);
601 return 0;
602
603err_out_kfree:
604 alloc_size = sizeof(PI_DESCR_BLOCK) +
605 PI_CMD_REQ_K_SIZE_MAX + PI_CMD_RSP_K_SIZE_MAX +
606#ifndef DYNAMIC_BUFFERS
607 (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX) +
608#endif
609 sizeof(PI_CONSUMER_BLOCK) +
610 (PI_ALIGN_K_DESC_BLK - 1);
611 if (bp->kmalloced)
612 dma_free_coherent(bdev, alloc_size,
613 bp->kmalloced, bp->kmalloced_dma);
614
615err_out_unmap:
616 if (dfx_use_mmio)
617 iounmap(bp->base.mem);
618
619err_out_region:
620 if (dfx_use_mmio)
621 release_mem_region(bar_start, bar_len);
622 else
623 release_region(bar_start, bar_len);
624
625err_out_disable:
626 if (dfx_bus_pci)
627 pci_disable_device(to_pci_dev(bdev));
628
629err_out:
630 free_netdev(dev);
631 return err;
632}
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666static void __devinit dfx_bus_init(struct net_device *dev)
667{
668 DFX_board_t *bp = netdev_priv(dev);
669 struct device *bdev = bp->bus_dev;
670 int dfx_bus_pci = DFX_BUS_PCI(bdev);
671 int dfx_bus_eisa = DFX_BUS_EISA(bdev);
672 int dfx_bus_tc = DFX_BUS_TC(bdev);
673 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
674 u8 val;
675
676 DBG_printk("In dfx_bus_init...\n");
677
678
679 bp->dev = dev;
680
681
682
683 if (dfx_bus_tc)
684 dev->irq = to_tc_dev(bdev)->interrupt;
685 if (dfx_bus_eisa) {
686 unsigned long base_addr = to_eisa_device(bdev)->base_addr;
687
688
689 val = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
690 val &= PI_CONFIG_STAT_0_M_IRQ;
691 val >>= PI_CONFIG_STAT_0_V_IRQ;
692
693 switch (val) {
694 case PI_CONFIG_STAT_0_IRQ_K_9:
695 dev->irq = 9;
696 break;
697
698 case PI_CONFIG_STAT_0_IRQ_K_10:
699 dev->irq = 10;
700 break;
701
702 case PI_CONFIG_STAT_0_IRQ_K_11:
703 dev->irq = 11;
704 break;
705
706 case PI_CONFIG_STAT_0_IRQ_K_15:
707 dev->irq = 15;
708 break;
709 }
710
711
712
713
714
715
716
717
718
719
720
721
722
723 val = ((bp->base.port >> 12) << PI_IO_CMP_V_SLOT);
724 outb(base_addr + PI_ESIC_K_IO_ADD_CMP_0_1, val);
725 outb(base_addr + PI_ESIC_K_IO_ADD_CMP_0_0, 0);
726 outb(base_addr + PI_ESIC_K_IO_ADD_CMP_1_1, val);
727 outb(base_addr + PI_ESIC_K_IO_ADD_CMP_1_0, 0);
728 val = PI_ESIC_K_CSR_IO_LEN - 1;
729 outb(base_addr + PI_ESIC_K_IO_ADD_MASK_0_1, (val >> 8) & 0xff);
730 outb(base_addr + PI_ESIC_K_IO_ADD_MASK_0_0, val & 0xff);
731 outb(base_addr + PI_ESIC_K_IO_ADD_MASK_1_1, (val >> 8) & 0xff);
732 outb(base_addr + PI_ESIC_K_IO_ADD_MASK_1_0, val & 0xff);
733
734
735 val = PI_FUNCTION_CNTRL_M_IOCS1 | PI_FUNCTION_CNTRL_M_IOCS0;
736 if (dfx_use_mmio)
737 val |= PI_FUNCTION_CNTRL_M_MEMCS0;
738 outb(base_addr + PI_ESIC_K_FUNCTION_CNTRL, val);
739
740
741
742
743
744 val = PI_SLOT_CNTRL_M_ENB;
745 outb(base_addr + PI_ESIC_K_SLOT_CNTRL, val);
746
747
748
749
750
751 val = inb(base_addr + PI_DEFEA_K_BURST_HOLDOFF);
752 if (dfx_use_mmio)
753 val |= PI_BURST_HOLDOFF_V_MEM_MAP;
754 else
755 val &= ~PI_BURST_HOLDOFF_V_MEM_MAP;
756 outb(base_addr + PI_DEFEA_K_BURST_HOLDOFF, val);
757
758
759 val = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
760 val |= PI_CONFIG_STAT_0_M_INT_ENB;
761 outb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0, val);
762 }
763 if (dfx_bus_pci) {
764 struct pci_dev *pdev = to_pci_dev(bdev);
765
766
767
768 dev->irq = pdev->irq;
769
770
771
772 pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &val);
773 if (val < PFI_K_LAT_TIMER_MIN) {
774 val = PFI_K_LAT_TIMER_DEF;
775 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, val);
776 }
777
778
779 val = PFI_MODE_M_PDQ_INT_ENB | PFI_MODE_M_DMA_ENB;
780 dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL, val);
781 }
782}
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812static void __devexit dfx_bus_uninit(struct net_device *dev)
813{
814 DFX_board_t *bp = netdev_priv(dev);
815 struct device *bdev = bp->bus_dev;
816 int dfx_bus_pci = DFX_BUS_PCI(bdev);
817 int dfx_bus_eisa = DFX_BUS_EISA(bdev);
818 u8 val;
819
820 DBG_printk("In dfx_bus_uninit...\n");
821
822
823
824 if (dfx_bus_eisa) {
825 unsigned long base_addr = to_eisa_device(bdev)->base_addr;
826
827
828 val = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
829 val &= ~PI_CONFIG_STAT_0_M_INT_ENB;
830 outb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0, val);
831 }
832 if (dfx_bus_pci) {
833
834 dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL, 0);
835 }
836}
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869static void __devinit dfx_bus_config_check(DFX_board_t *bp)
870{
871 struct device __maybe_unused *bdev = bp->bus_dev;
872 int dfx_bus_eisa = DFX_BUS_EISA(bdev);
873 int status;
874 u32 host_data;
875
876 DBG_printk("In dfx_bus_config_check...\n");
877
878
879
880 if (dfx_bus_eisa) {
881
882
883
884
885
886
887
888 if (to_eisa_device(bdev)->id.driver_data == DEFEA_PROD_ID_2) {
889
890
891
892
893 status = dfx_hw_port_ctrl_req(bp,
894 PI_PCTRL_M_SUB_CMD,
895 PI_SUB_CMD_K_PDQ_REV_GET,
896 0,
897 &host_data);
898 if ((status != DFX_K_SUCCESS) || (host_data == 2))
899 {
900
901
902
903
904
905
906
907
908 switch (bp->burst_size)
909 {
910 case PI_PDATA_B_DMA_BURST_SIZE_32:
911 case PI_PDATA_B_DMA_BURST_SIZE_16:
912 bp->burst_size = PI_PDATA_B_DMA_BURST_SIZE_8;
913 break;
914
915 default:
916 break;
917 }
918
919
920
921 bp->full_duplex_enb = PI_SNMP_K_FALSE;
922 }
923 }
924 }
925 }
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965static int __devinit dfx_driver_init(struct net_device *dev,
966 const char *print_name,
967 resource_size_t bar_start)
968{
969 DFX_board_t *bp = netdev_priv(dev);
970 struct device *bdev = bp->bus_dev;
971 int dfx_bus_pci = DFX_BUS_PCI(bdev);
972 int dfx_bus_eisa = DFX_BUS_EISA(bdev);
973 int dfx_bus_tc = DFX_BUS_TC(bdev);
974 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
975 int alloc_size;
976 char *top_v, *curr_v;
977 dma_addr_t top_p, curr_p;
978 u32 data;
979 __le32 le32;
980 char *board_name = NULL;
981
982 DBG_printk("In dfx_driver_init...\n");
983
984
985
986 dfx_bus_init(dev);
987
988
989
990
991
992
993
994
995
996
997 bp->full_duplex_enb = PI_SNMP_K_FALSE;
998 bp->req_ttrt = 8 * 12500;
999 bp->burst_size = PI_PDATA_B_DMA_BURST_SIZE_DEF;
1000 bp->rcv_bufs_to_post = RCV_BUFS_DEF;
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011 dfx_bus_config_check(bp);
1012
1013
1014
1015 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS);
1016
1017
1018
1019 (void) dfx_hw_dma_uninit(bp, PI_PDATA_A_RESET_M_SKIP_ST);
1020
1021
1022
1023 if (dfx_hw_port_ctrl_req(bp, PI_PCTRL_M_MLA, PI_PDATA_A_MLA_K_LO, 0,
1024 &data) != DFX_K_SUCCESS) {
1025 printk("%s: Could not read adapter factory MAC address!\n",
1026 print_name);
1027 return DFX_K_FAILURE;
1028 }
1029 le32 = cpu_to_le32(data);
1030 memcpy(&bp->factory_mac_addr[0], &le32, sizeof(u32));
1031
1032 if (dfx_hw_port_ctrl_req(bp, PI_PCTRL_M_MLA, PI_PDATA_A_MLA_K_HI, 0,
1033 &data) != DFX_K_SUCCESS) {
1034 printk("%s: Could not read adapter factory MAC address!\n",
1035 print_name);
1036 return DFX_K_FAILURE;
1037 }
1038 le32 = cpu_to_le32(data);
1039 memcpy(&bp->factory_mac_addr[4], &le32, sizeof(u16));
1040
1041
1042
1043
1044
1045
1046
1047
1048 memcpy(dev->dev_addr, bp->factory_mac_addr, FDDI_K_ALEN);
1049 if (dfx_bus_tc)
1050 board_name = "DEFTA";
1051 if (dfx_bus_eisa)
1052 board_name = "DEFEA";
1053 if (dfx_bus_pci)
1054 board_name = "DEFPA";
1055 pr_info("%s: %s at %saddr = 0x%llx, IRQ = %d, Hardware addr = %pMF\n",
1056 print_name, board_name, dfx_use_mmio ? "" : "I/O ",
1057 (long long)bar_start, dev->irq, dev->dev_addr);
1058
1059
1060
1061
1062
1063
1064 alloc_size = sizeof(PI_DESCR_BLOCK) +
1065 PI_CMD_REQ_K_SIZE_MAX +
1066 PI_CMD_RSP_K_SIZE_MAX +
1067#ifndef DYNAMIC_BUFFERS
1068 (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX) +
1069#endif
1070 sizeof(PI_CONSUMER_BLOCK) +
1071 (PI_ALIGN_K_DESC_BLK - 1);
1072 bp->kmalloced = top_v = dma_alloc_coherent(bp->bus_dev, alloc_size,
1073 &bp->kmalloced_dma,
1074 GFP_ATOMIC);
1075 if (top_v == NULL) {
1076 printk("%s: Could not allocate memory for host buffers "
1077 "and structures!\n", print_name);
1078 return DFX_K_FAILURE;
1079 }
1080 memset(top_v, 0, alloc_size);
1081 top_p = bp->kmalloced_dma;
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095 curr_p = ALIGN(top_p, PI_ALIGN_K_DESC_BLK);
1096 curr_v = top_v + (curr_p - top_p);
1097
1098
1099
1100 bp->descr_block_virt = (PI_DESCR_BLOCK *) curr_v;
1101 bp->descr_block_phys = curr_p;
1102 curr_v += sizeof(PI_DESCR_BLOCK);
1103 curr_p += sizeof(PI_DESCR_BLOCK);
1104
1105
1106
1107 bp->cmd_req_virt = (PI_DMA_CMD_REQ *) curr_v;
1108 bp->cmd_req_phys = curr_p;
1109 curr_v += PI_CMD_REQ_K_SIZE_MAX;
1110 curr_p += PI_CMD_REQ_K_SIZE_MAX;
1111
1112
1113
1114 bp->cmd_rsp_virt = (PI_DMA_CMD_RSP *) curr_v;
1115 bp->cmd_rsp_phys = curr_p;
1116 curr_v += PI_CMD_RSP_K_SIZE_MAX;
1117 curr_p += PI_CMD_RSP_K_SIZE_MAX;
1118
1119
1120
1121 bp->rcv_block_virt = curr_v;
1122 bp->rcv_block_phys = curr_p;
1123
1124#ifndef DYNAMIC_BUFFERS
1125 curr_v += (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX);
1126 curr_p += (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX);
1127#endif
1128
1129
1130
1131 bp->cons_block_virt = (PI_CONSUMER_BLOCK *) curr_v;
1132 bp->cons_block_phys = curr_p;
1133
1134
1135
1136 DBG_printk("%s: Descriptor block virt = %0lX, phys = %0X\n",
1137 print_name,
1138 (long)bp->descr_block_virt, bp->descr_block_phys);
1139 DBG_printk("%s: Command Request buffer virt = %0lX, phys = %0X\n",
1140 print_name, (long)bp->cmd_req_virt, bp->cmd_req_phys);
1141 DBG_printk("%s: Command Response buffer virt = %0lX, phys = %0X\n",
1142 print_name, (long)bp->cmd_rsp_virt, bp->cmd_rsp_phys);
1143 DBG_printk("%s: Receive buffer block virt = %0lX, phys = %0X\n",
1144 print_name, (long)bp->rcv_block_virt, bp->rcv_block_phys);
1145 DBG_printk("%s: Consumer block virt = %0lX, phys = %0X\n",
1146 print_name, (long)bp->cons_block_virt, bp->cons_block_phys);
1147
1148 return DFX_K_SUCCESS;
1149}
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185static int dfx_adap_init(DFX_board_t *bp, int get_buffers)
1186 {
1187 DBG_printk("In dfx_adap_init...\n");
1188
1189
1190
1191 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS);
1192
1193
1194
1195 if (dfx_hw_dma_uninit(bp, bp->reset_type) != DFX_K_SUCCESS)
1196 {
1197 printk("%s: Could not uninitialize/reset adapter!\n", bp->dev->name);
1198 return DFX_K_FAILURE;
1199 }
1200
1201
1202
1203
1204
1205
1206 dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_0_STATUS, PI_HOST_INT_K_ACK_ALL_TYPE_0);
1207
1208
1209
1210
1211
1212
1213
1214
1215 bp->cmd_req_reg.lword = 0;
1216 bp->cmd_rsp_reg.lword = 0;
1217 bp->rcv_xmt_reg.lword = 0;
1218
1219
1220
1221 memset(bp->cons_block_virt, 0, sizeof(PI_CONSUMER_BLOCK));
1222
1223
1224
1225 if (dfx_hw_port_ctrl_req(bp,
1226 PI_PCTRL_M_SUB_CMD,
1227 PI_SUB_CMD_K_BURST_SIZE_SET,
1228 bp->burst_size,
1229 NULL) != DFX_K_SUCCESS)
1230 {
1231 printk("%s: Could not set adapter burst size!\n", bp->dev->name);
1232 return DFX_K_FAILURE;
1233 }
1234
1235
1236
1237
1238
1239
1240
1241
1242 if (dfx_hw_port_ctrl_req(bp,
1243 PI_PCTRL_M_CONS_BLOCK,
1244 bp->cons_block_phys,
1245 0,
1246 NULL) != DFX_K_SUCCESS)
1247 {
1248 printk("%s: Could not set consumer block address!\n", bp->dev->name);
1249 return DFX_K_FAILURE;
1250 }
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262 if (dfx_hw_port_ctrl_req(bp, PI_PCTRL_M_INIT,
1263 (u32)(bp->descr_block_phys |
1264 PI_PDATA_A_INIT_M_BSWAP_INIT),
1265 0, NULL) != DFX_K_SUCCESS) {
1266 printk("%s: Could not set descriptor block address!\n",
1267 bp->dev->name);
1268 return DFX_K_FAILURE;
1269 }
1270
1271
1272
1273 bp->cmd_req_virt->cmd_type = PI_CMD_K_CHARS_SET;
1274 bp->cmd_req_virt->char_set.item[0].item_code = PI_ITEM_K_FLUSH_TIME;
1275 bp->cmd_req_virt->char_set.item[0].value = 3;
1276 bp->cmd_req_virt->char_set.item[0].item_index = 0;
1277 bp->cmd_req_virt->char_set.item[1].item_code = PI_ITEM_K_EOL;
1278 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
1279 {
1280 printk("%s: DMA command request failed!\n", bp->dev->name);
1281 return DFX_K_FAILURE;
1282 }
1283
1284
1285
1286 bp->cmd_req_virt->cmd_type = PI_CMD_K_SNMP_SET;
1287 bp->cmd_req_virt->snmp_set.item[0].item_code = PI_ITEM_K_FDX_ENB_DIS;
1288 bp->cmd_req_virt->snmp_set.item[0].value = bp->full_duplex_enb;
1289 bp->cmd_req_virt->snmp_set.item[0].item_index = 0;
1290 bp->cmd_req_virt->snmp_set.item[1].item_code = PI_ITEM_K_MAC_T_REQ;
1291 bp->cmd_req_virt->snmp_set.item[1].value = bp->req_ttrt;
1292 bp->cmd_req_virt->snmp_set.item[1].item_index = 0;
1293 bp->cmd_req_virt->snmp_set.item[2].item_code = PI_ITEM_K_EOL;
1294 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
1295 {
1296 printk("%s: DMA command request failed!\n", bp->dev->name);
1297 return DFX_K_FAILURE;
1298 }
1299
1300
1301
1302 if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS)
1303 {
1304 printk("%s: Adapter CAM update failed!\n", bp->dev->name);
1305 return DFX_K_FAILURE;
1306 }
1307
1308
1309
1310 if (dfx_ctl_update_filters(bp) != DFX_K_SUCCESS)
1311 {
1312 printk("%s: Adapter filters update failed!\n", bp->dev->name);
1313 return DFX_K_FAILURE;
1314 }
1315
1316
1317
1318
1319
1320
1321 if (get_buffers)
1322 dfx_rcv_flush(bp);
1323
1324
1325
1326 if (dfx_rcv_init(bp, get_buffers))
1327 {
1328 printk("%s: Receive buffer allocation failed\n", bp->dev->name);
1329 if (get_buffers)
1330 dfx_rcv_flush(bp);
1331 return DFX_K_FAILURE;
1332 }
1333
1334
1335
1336 bp->cmd_req_virt->cmd_type = PI_CMD_K_START;
1337 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
1338 {
1339 printk("%s: Start command failed\n", bp->dev->name);
1340 if (get_buffers)
1341 dfx_rcv_flush(bp);
1342 return DFX_K_FAILURE;
1343 }
1344
1345
1346
1347 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_ENABLE_DEF_INTS);
1348 return DFX_K_SUCCESS;
1349 }
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382static int dfx_open(struct net_device *dev)
1383{
1384 DFX_board_t *bp = netdev_priv(dev);
1385 int ret;
1386
1387 DBG_printk("In dfx_open...\n");
1388
1389
1390
1391 ret = request_irq(dev->irq, dfx_interrupt, IRQF_SHARED, dev->name,
1392 dev);
1393 if (ret) {
1394 printk(KERN_ERR "%s: Requested IRQ %d is busy\n", dev->name, dev->irq);
1395 return ret;
1396 }
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409 memcpy(dev->dev_addr, bp->factory_mac_addr, FDDI_K_ALEN);
1410
1411
1412
1413 memset(bp->uc_table, 0, sizeof(bp->uc_table));
1414 memset(bp->mc_table, 0, sizeof(bp->mc_table));
1415 bp->uc_count = 0;
1416 bp->mc_count = 0;
1417
1418
1419
1420 bp->ind_group_prom = PI_FSTATE_K_BLOCK;
1421 bp->group_prom = PI_FSTATE_K_BLOCK;
1422
1423 spin_lock_init(&bp->lock);
1424
1425
1426
1427 bp->reset_type = PI_PDATA_A_RESET_M_SKIP_ST;
1428 if (dfx_adap_init(bp, 1) != DFX_K_SUCCESS)
1429 {
1430 printk(KERN_ERR "%s: Adapter open failed!\n", dev->name);
1431 free_irq(dev->irq, dev);
1432 return -EAGAIN;
1433 }
1434
1435
1436 netif_start_queue(dev);
1437 return 0;
1438}
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473static int dfx_close(struct net_device *dev)
1474{
1475 DFX_board_t *bp = netdev_priv(dev);
1476
1477 DBG_printk("In dfx_close...\n");
1478
1479
1480
1481 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS);
1482
1483
1484
1485 (void) dfx_hw_dma_uninit(bp, PI_PDATA_A_RESET_M_SKIP_ST);
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496 dfx_xmt_flush(bp);
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509 bp->cmd_req_reg.lword = 0;
1510 bp->cmd_rsp_reg.lword = 0;
1511 bp->rcv_xmt_reg.lword = 0;
1512
1513
1514
1515 memset(bp->cons_block_virt, 0, sizeof(PI_CONSUMER_BLOCK));
1516
1517
1518
1519 dfx_rcv_flush(bp);
1520
1521
1522
1523 netif_stop_queue(dev);
1524
1525
1526
1527 free_irq(dev->irq, dev);
1528
1529 return 0;
1530}
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560static void dfx_int_pr_halt_id(DFX_board_t *bp)
1561 {
1562 PI_UINT32 port_status;
1563 PI_UINT32 halt_id;
1564
1565
1566
1567 dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &port_status);
1568
1569
1570
1571 halt_id = (port_status & PI_PSTATUS_M_HALT_ID) >> PI_PSTATUS_V_HALT_ID;
1572 switch (halt_id)
1573 {
1574 case PI_HALT_ID_K_SELFTEST_TIMEOUT:
1575 printk("%s: Halt ID: Selftest Timeout\n", bp->dev->name);
1576 break;
1577
1578 case PI_HALT_ID_K_PARITY_ERROR:
1579 printk("%s: Halt ID: Host Bus Parity Error\n", bp->dev->name);
1580 break;
1581
1582 case PI_HALT_ID_K_HOST_DIR_HALT:
1583 printk("%s: Halt ID: Host-Directed Halt\n", bp->dev->name);
1584 break;
1585
1586 case PI_HALT_ID_K_SW_FAULT:
1587 printk("%s: Halt ID: Adapter Software Fault\n", bp->dev->name);
1588 break;
1589
1590 case PI_HALT_ID_K_HW_FAULT:
1591 printk("%s: Halt ID: Adapter Hardware Fault\n", bp->dev->name);
1592 break;
1593
1594 case PI_HALT_ID_K_PC_TRACE:
1595 printk("%s: Halt ID: FDDI Network PC Trace Path Test\n", bp->dev->name);
1596 break;
1597
1598 case PI_HALT_ID_K_DMA_ERROR:
1599 printk("%s: Halt ID: Adapter DMA Error\n", bp->dev->name);
1600 break;
1601
1602 case PI_HALT_ID_K_IMAGE_CRC_ERROR:
1603 printk("%s: Halt ID: Firmware Image CRC Error\n", bp->dev->name);
1604 break;
1605
1606 case PI_HALT_ID_K_BUS_EXCEPTION:
1607 printk("%s: Halt ID: 68000 Bus Exception\n", bp->dev->name);
1608 break;
1609
1610 default:
1611 printk("%s: Halt ID: Unknown (code = %X)\n", bp->dev->name, halt_id);
1612 break;
1613 }
1614 }
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664static void dfx_int_type_0_process(DFX_board_t *bp)
1665
1666 {
1667 PI_UINT32 type_0_status;
1668 PI_UINT32 state;
1669
1670
1671
1672
1673
1674
1675
1676 dfx_port_read_long(bp, PI_PDQ_K_REG_TYPE_0_STATUS, &type_0_status);
1677 dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_0_STATUS, type_0_status);
1678
1679
1680
1681 if (type_0_status & (PI_TYPE_0_STAT_M_NXM |
1682 PI_TYPE_0_STAT_M_PM_PAR_ERR |
1683 PI_TYPE_0_STAT_M_BUS_PAR_ERR))
1684 {
1685
1686
1687 if (type_0_status & PI_TYPE_0_STAT_M_NXM)
1688 printk("%s: Non-Existent Memory Access Error\n", bp->dev->name);
1689
1690
1691
1692 if (type_0_status & PI_TYPE_0_STAT_M_PM_PAR_ERR)
1693 printk("%s: Packet Memory Parity Error\n", bp->dev->name);
1694
1695
1696
1697 if (type_0_status & PI_TYPE_0_STAT_M_BUS_PAR_ERR)
1698 printk("%s: Host Bus Parity Error\n", bp->dev->name);
1699
1700
1701
1702 bp->link_available = PI_K_FALSE;
1703 bp->reset_type = 0;
1704 printk("%s: Resetting adapter...\n", bp->dev->name);
1705 if (dfx_adap_init(bp, 0) != DFX_K_SUCCESS)
1706 {
1707 printk("%s: Adapter reset failed! Disabling adapter interrupts.\n", bp->dev->name);
1708 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS);
1709 return;
1710 }
1711 printk("%s: Adapter reset successful!\n", bp->dev->name);
1712 return;
1713 }
1714
1715
1716
1717 if (type_0_status & PI_TYPE_0_STAT_M_XMT_FLUSH)
1718 {
1719
1720
1721 bp->link_available = PI_K_FALSE;
1722 dfx_xmt_flush(bp);
1723 (void) dfx_hw_port_ctrl_req(bp,
1724 PI_PCTRL_M_XMT_DATA_FLUSH_DONE,
1725 0,
1726 0,
1727 NULL);
1728 }
1729
1730
1731
1732 if (type_0_status & PI_TYPE_0_STAT_M_STATE_CHANGE)
1733 {
1734
1735
1736 state = dfx_hw_adap_state_rd(bp);
1737 if (state == PI_STATE_K_HALTED)
1738 {
1739
1740
1741
1742
1743
1744
1745 printk("%s: Controller has transitioned to HALTED state!\n", bp->dev->name);
1746 dfx_int_pr_halt_id(bp);
1747
1748
1749
1750 bp->link_available = PI_K_FALSE;
1751 bp->reset_type = 0;
1752 printk("%s: Resetting adapter...\n", bp->dev->name);
1753 if (dfx_adap_init(bp, 0) != DFX_K_SUCCESS)
1754 {
1755 printk("%s: Adapter reset failed! Disabling adapter interrupts.\n", bp->dev->name);
1756 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS);
1757 return;
1758 }
1759 printk("%s: Adapter reset successful!\n", bp->dev->name);
1760 }
1761 else if (state == PI_STATE_K_LINK_AVAIL)
1762 {
1763 bp->link_available = PI_K_TRUE;
1764 }
1765 }
1766 }
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809static void dfx_int_common(struct net_device *dev)
1810{
1811 DFX_board_t *bp = netdev_priv(dev);
1812 PI_UINT32 port_status;
1813
1814
1815
1816 if(dfx_xmt_done(bp))
1817 netif_wake_queue(dev);
1818
1819
1820
1821 dfx_rcv_queue_process(bp);
1822
1823
1824
1825
1826
1827
1828
1829
1830 dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_2_PROD, bp->rcv_xmt_reg.lword);
1831
1832
1833
1834 dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &port_status);
1835
1836
1837
1838 if (port_status & PI_PSTATUS_M_TYPE_0_PENDING)
1839 dfx_int_type_0_process(bp);
1840 }
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879static irqreturn_t dfx_interrupt(int irq, void *dev_id)
1880{
1881 struct net_device *dev = dev_id;
1882 DFX_board_t *bp = netdev_priv(dev);
1883 struct device *bdev = bp->bus_dev;
1884 int dfx_bus_pci = DFX_BUS_PCI(bdev);
1885 int dfx_bus_eisa = DFX_BUS_EISA(bdev);
1886 int dfx_bus_tc = DFX_BUS_TC(bdev);
1887
1888
1889
1890 if (dfx_bus_pci) {
1891 u32 status;
1892
1893 dfx_port_read_long(bp, PFI_K_REG_STATUS, &status);
1894 if (!(status & PFI_STATUS_M_PDQ_INT))
1895 return IRQ_NONE;
1896
1897 spin_lock(&bp->lock);
1898
1899
1900 dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL,
1901 PFI_MODE_M_DMA_ENB);
1902
1903
1904 dfx_int_common(dev);
1905
1906
1907 dfx_port_write_long(bp, PFI_K_REG_STATUS,
1908 PFI_STATUS_M_PDQ_INT);
1909 dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL,
1910 (PFI_MODE_M_PDQ_INT_ENB |
1911 PFI_MODE_M_DMA_ENB));
1912
1913 spin_unlock(&bp->lock);
1914 }
1915 if (dfx_bus_eisa) {
1916 unsigned long base_addr = to_eisa_device(bdev)->base_addr;
1917 u8 status;
1918
1919 status = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
1920 if (!(status & PI_CONFIG_STAT_0_M_PEND))
1921 return IRQ_NONE;
1922
1923 spin_lock(&bp->lock);
1924
1925
1926 status &= ~PI_CONFIG_STAT_0_M_INT_ENB;
1927 outb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0, status);
1928
1929
1930 dfx_int_common(dev);
1931
1932
1933 status = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
1934 status |= PI_CONFIG_STAT_0_M_INT_ENB;
1935 outb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0, status);
1936
1937 spin_unlock(&bp->lock);
1938 }
1939 if (dfx_bus_tc) {
1940 u32 status;
1941
1942 dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &status);
1943 if (!(status & (PI_PSTATUS_M_RCV_DATA_PENDING |
1944 PI_PSTATUS_M_XMT_DATA_PENDING |
1945 PI_PSTATUS_M_SMT_HOST_PENDING |
1946 PI_PSTATUS_M_UNSOL_PENDING |
1947 PI_PSTATUS_M_CMD_RSP_PENDING |
1948 PI_PSTATUS_M_CMD_REQ_PENDING |
1949 PI_PSTATUS_M_TYPE_0_PENDING)))
1950 return IRQ_NONE;
1951
1952 spin_lock(&bp->lock);
1953
1954
1955 dfx_int_common(dev);
1956
1957 spin_unlock(&bp->lock);
1958 }
1959
1960 return IRQ_HANDLED;
1961}
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007static struct net_device_stats *dfx_ctl_get_stats(struct net_device *dev)
2008 {
2009 DFX_board_t *bp = netdev_priv(dev);
2010
2011
2012
2013 bp->stats.gen.rx_packets = bp->rcv_total_frames;
2014 bp->stats.gen.tx_packets = bp->xmt_total_frames;
2015 bp->stats.gen.rx_bytes = bp->rcv_total_bytes;
2016 bp->stats.gen.tx_bytes = bp->xmt_total_bytes;
2017 bp->stats.gen.rx_errors = bp->rcv_crc_errors +
2018 bp->rcv_frame_status_errors +
2019 bp->rcv_length_errors;
2020 bp->stats.gen.tx_errors = bp->xmt_length_errors;
2021 bp->stats.gen.rx_dropped = bp->rcv_discards;
2022 bp->stats.gen.tx_dropped = bp->xmt_discards;
2023 bp->stats.gen.multicast = bp->rcv_multicast_frames;
2024 bp->stats.gen.collisions = 0;
2025
2026
2027
2028 bp->cmd_req_virt->cmd_type = PI_CMD_K_SMT_MIB_GET;
2029 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
2030 return (struct net_device_stats *)&bp->stats;
2031
2032
2033
2034 memcpy(bp->stats.smt_station_id, &bp->cmd_rsp_virt->smt_mib_get.smt_station_id, sizeof(bp->cmd_rsp_virt->smt_mib_get.smt_station_id));
2035 bp->stats.smt_op_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_op_version_id;
2036 bp->stats.smt_hi_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_hi_version_id;
2037 bp->stats.smt_lo_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_lo_version_id;
2038 memcpy(bp->stats.smt_user_data, &bp->cmd_rsp_virt->smt_mib_get.smt_user_data, sizeof(bp->cmd_rsp_virt->smt_mib_get.smt_user_data));
2039 bp->stats.smt_mib_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_mib_version_id;
2040 bp->stats.smt_mac_cts = bp->cmd_rsp_virt->smt_mib_get.smt_mac_ct;
2041 bp->stats.smt_non_master_cts = bp->cmd_rsp_virt->smt_mib_get.smt_non_master_ct;
2042 bp->stats.smt_master_cts = bp->cmd_rsp_virt->smt_mib_get.smt_master_ct;
2043 bp->stats.smt_available_paths = bp->cmd_rsp_virt->smt_mib_get.smt_available_paths;
2044 bp->stats.smt_config_capabilities = bp->cmd_rsp_virt->smt_mib_get.smt_config_capabilities;
2045 bp->stats.smt_config_policy = bp->cmd_rsp_virt->smt_mib_get.smt_config_policy;
2046 bp->stats.smt_connection_policy = bp->cmd_rsp_virt->smt_mib_get.smt_connection_policy;
2047 bp->stats.smt_t_notify = bp->cmd_rsp_virt->smt_mib_get.smt_t_notify;
2048 bp->stats.smt_stat_rpt_policy = bp->cmd_rsp_virt->smt_mib_get.smt_stat_rpt_policy;
2049 bp->stats.smt_trace_max_expiration = bp->cmd_rsp_virt->smt_mib_get.smt_trace_max_expiration;
2050 bp->stats.smt_bypass_present = bp->cmd_rsp_virt->smt_mib_get.smt_bypass_present;
2051 bp->stats.smt_ecm_state = bp->cmd_rsp_virt->smt_mib_get.smt_ecm_state;
2052 bp->stats.smt_cf_state = bp->cmd_rsp_virt->smt_mib_get.smt_cf_state;
2053 bp->stats.smt_remote_disconnect_flag = bp->cmd_rsp_virt->smt_mib_get.smt_remote_disconnect_flag;
2054 bp->stats.smt_station_status = bp->cmd_rsp_virt->smt_mib_get.smt_station_status;
2055 bp->stats.smt_peer_wrap_flag = bp->cmd_rsp_virt->smt_mib_get.smt_peer_wrap_flag;
2056 bp->stats.smt_time_stamp = bp->cmd_rsp_virt->smt_mib_get.smt_msg_time_stamp.ls;
2057 bp->stats.smt_transition_time_stamp = bp->cmd_rsp_virt->smt_mib_get.smt_transition_time_stamp.ls;
2058 bp->stats.mac_frame_status_functions = bp->cmd_rsp_virt->smt_mib_get.mac_frame_status_functions;
2059 bp->stats.mac_t_max_capability = bp->cmd_rsp_virt->smt_mib_get.mac_t_max_capability;
2060 bp->stats.mac_tvx_capability = bp->cmd_rsp_virt->smt_mib_get.mac_tvx_capability;
2061 bp->stats.mac_available_paths = bp->cmd_rsp_virt->smt_mib_get.mac_available_paths;
2062 bp->stats.mac_current_path = bp->cmd_rsp_virt->smt_mib_get.mac_current_path;
2063 memcpy(bp->stats.mac_upstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_upstream_nbr, FDDI_K_ALEN);
2064 memcpy(bp->stats.mac_downstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_downstream_nbr, FDDI_K_ALEN);
2065 memcpy(bp->stats.mac_old_upstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_old_upstream_nbr, FDDI_K_ALEN);
2066 memcpy(bp->stats.mac_old_downstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_old_downstream_nbr, FDDI_K_ALEN);
2067 bp->stats.mac_dup_address_test = bp->cmd_rsp_virt->smt_mib_get.mac_dup_address_test;
2068 bp->stats.mac_requested_paths = bp->cmd_rsp_virt->smt_mib_get.mac_requested_paths;
2069 bp->stats.mac_downstream_port_type = bp->cmd_rsp_virt->smt_mib_get.mac_downstream_port_type;
2070 memcpy(bp->stats.mac_smt_address, &bp->cmd_rsp_virt->smt_mib_get.mac_smt_address, FDDI_K_ALEN);
2071 bp->stats.mac_t_req = bp->cmd_rsp_virt->smt_mib_get.mac_t_req;
2072 bp->stats.mac_t_neg = bp->cmd_rsp_virt->smt_mib_get.mac_t_neg;
2073 bp->stats.mac_t_max = bp->cmd_rsp_virt->smt_mib_get.mac_t_max;
2074 bp->stats.mac_tvx_value = bp->cmd_rsp_virt->smt_mib_get.mac_tvx_value;
2075 bp->stats.mac_frame_error_threshold = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_threshold;
2076 bp->stats.mac_frame_error_ratio = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_ratio;
2077 bp->stats.mac_rmt_state = bp->cmd_rsp_virt->smt_mib_get.mac_rmt_state;
2078 bp->stats.mac_da_flag = bp->cmd_rsp_virt->smt_mib_get.mac_da_flag;
2079 bp->stats.mac_una_da_flag = bp->cmd_rsp_virt->smt_mib_get.mac_unda_flag;
2080 bp->stats.mac_frame_error_flag = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_flag;
2081 bp->stats.mac_ma_unitdata_available = bp->cmd_rsp_virt->smt_mib_get.mac_ma_unitdata_available;
2082 bp->stats.mac_hardware_present = bp->cmd_rsp_virt->smt_mib_get.mac_hardware_present;
2083 bp->stats.mac_ma_unitdata_enable = bp->cmd_rsp_virt->smt_mib_get.mac_ma_unitdata_enable;
2084 bp->stats.path_tvx_lower_bound = bp->cmd_rsp_virt->smt_mib_get.path_tvx_lower_bound;
2085 bp->stats.path_t_max_lower_bound = bp->cmd_rsp_virt->smt_mib_get.path_t_max_lower_bound;
2086 bp->stats.path_max_t_req = bp->cmd_rsp_virt->smt_mib_get.path_max_t_req;
2087 memcpy(bp->stats.path_configuration, &bp->cmd_rsp_virt->smt_mib_get.path_configuration, sizeof(bp->cmd_rsp_virt->smt_mib_get.path_configuration));
2088 bp->stats.port_my_type[0] = bp->cmd_rsp_virt->smt_mib_get.port_my_type[0];
2089 bp->stats.port_my_type[1] = bp->cmd_rsp_virt->smt_mib_get.port_my_type[1];
2090 bp->stats.port_neighbor_type[0] = bp->cmd_rsp_virt->smt_mib_get.port_neighbor_type[0];
2091 bp->stats.port_neighbor_type[1] = bp->cmd_rsp_virt->smt_mib_get.port_neighbor_type[1];
2092 bp->stats.port_connection_policies[0] = bp->cmd_rsp_virt->smt_mib_get.port_connection_policies[0];
2093 bp->stats.port_connection_policies[1] = bp->cmd_rsp_virt->smt_mib_get.port_connection_policies[1];
2094 bp->stats.port_mac_indicated[0] = bp->cmd_rsp_virt->smt_mib_get.port_mac_indicated[0];
2095 bp->stats.port_mac_indicated[1] = bp->cmd_rsp_virt->smt_mib_get.port_mac_indicated[1];
2096 bp->stats.port_current_path[0] = bp->cmd_rsp_virt->smt_mib_get.port_current_path[0];
2097 bp->stats.port_current_path[1] = bp->cmd_rsp_virt->smt_mib_get.port_current_path[1];
2098 memcpy(&bp->stats.port_requested_paths[0*3], &bp->cmd_rsp_virt->smt_mib_get.port_requested_paths[0], 3);
2099 memcpy(&bp->stats.port_requested_paths[1*3], &bp->cmd_rsp_virt->smt_mib_get.port_requested_paths[1], 3);
2100 bp->stats.port_mac_placement[0] = bp->cmd_rsp_virt->smt_mib_get.port_mac_placement[0];
2101 bp->stats.port_mac_placement[1] = bp->cmd_rsp_virt->smt_mib_get.port_mac_placement[1];
2102 bp->stats.port_available_paths[0] = bp->cmd_rsp_virt->smt_mib_get.port_available_paths[0];
2103 bp->stats.port_available_paths[1] = bp->cmd_rsp_virt->smt_mib_get.port_available_paths[1];
2104 bp->stats.port_pmd_class[0] = bp->cmd_rsp_virt->smt_mib_get.port_pmd_class[0];
2105 bp->stats.port_pmd_class[1] = bp->cmd_rsp_virt->smt_mib_get.port_pmd_class[1];
2106 bp->stats.port_connection_capabilities[0] = bp->cmd_rsp_virt->smt_mib_get.port_connection_capabilities[0];
2107 bp->stats.port_connection_capabilities[1] = bp->cmd_rsp_virt->smt_mib_get.port_connection_capabilities[1];
2108 bp->stats.port_bs_flag[0] = bp->cmd_rsp_virt->smt_mib_get.port_bs_flag[0];
2109 bp->stats.port_bs_flag[1] = bp->cmd_rsp_virt->smt_mib_get.port_bs_flag[1];
2110 bp->stats.port_ler_estimate[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_estimate[0];
2111 bp->stats.port_ler_estimate[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_estimate[1];
2112 bp->stats.port_ler_cutoff[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_cutoff[0];
2113 bp->stats.port_ler_cutoff[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_cutoff[1];
2114 bp->stats.port_ler_alarm[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_alarm[0];
2115 bp->stats.port_ler_alarm[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_alarm[1];
2116 bp->stats.port_connect_state[0] = bp->cmd_rsp_virt->smt_mib_get.port_connect_state[0];
2117 bp->stats.port_connect_state[1] = bp->cmd_rsp_virt->smt_mib_get.port_connect_state[1];
2118 bp->stats.port_pcm_state[0] = bp->cmd_rsp_virt->smt_mib_get.port_pcm_state[0];
2119 bp->stats.port_pcm_state[1] = bp->cmd_rsp_virt->smt_mib_get.port_pcm_state[1];
2120 bp->stats.port_pc_withhold[0] = bp->cmd_rsp_virt->smt_mib_get.port_pc_withhold[0];
2121 bp->stats.port_pc_withhold[1] = bp->cmd_rsp_virt->smt_mib_get.port_pc_withhold[1];
2122 bp->stats.port_ler_flag[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_flag[0];
2123 bp->stats.port_ler_flag[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_flag[1];
2124 bp->stats.port_hardware_present[0] = bp->cmd_rsp_virt->smt_mib_get.port_hardware_present[0];
2125 bp->stats.port_hardware_present[1] = bp->cmd_rsp_virt->smt_mib_get.port_hardware_present[1];
2126
2127
2128
2129 bp->cmd_req_virt->cmd_type = PI_CMD_K_CNTRS_GET;
2130 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
2131 return (struct net_device_stats *)&bp->stats;
2132
2133
2134
2135 bp->stats.mac_frame_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.frame_cnt.ls;
2136 bp->stats.mac_copied_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.copied_cnt.ls;
2137 bp->stats.mac_transmit_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.transmit_cnt.ls;
2138 bp->stats.mac_error_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.error_cnt.ls;
2139 bp->stats.mac_lost_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.lost_cnt.ls;
2140 bp->stats.port_lct_fail_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.lct_rejects[0].ls;
2141 bp->stats.port_lct_fail_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.lct_rejects[1].ls;
2142 bp->stats.port_lem_reject_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.lem_rejects[0].ls;
2143 bp->stats.port_lem_reject_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.lem_rejects[1].ls;
2144 bp->stats.port_lem_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[0].ls;
2145 bp->stats.port_lem_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[1].ls;
2146
2147 return (struct net_device_stats *)&bp->stats;
2148 }
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194static void dfx_ctl_set_multicast_list(struct net_device *dev)
2195{
2196 DFX_board_t *bp = netdev_priv(dev);
2197 int i;
2198 struct netdev_hw_addr *ha;
2199
2200
2201
2202 if (dev->flags & IFF_PROMISC)
2203 bp->ind_group_prom = PI_FSTATE_K_PASS;
2204
2205
2206
2207 else
2208 {
2209 bp->ind_group_prom = PI_FSTATE_K_BLOCK;
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230 if (netdev_mc_count(dev) > (PI_CMD_ADDR_FILTER_K_SIZE - bp->uc_count))
2231 {
2232 bp->group_prom = PI_FSTATE_K_PASS;
2233 bp->mc_count = 0;
2234 }
2235 else
2236 {
2237 bp->group_prom = PI_FSTATE_K_BLOCK;
2238 bp->mc_count = netdev_mc_count(dev);
2239 }
2240
2241
2242
2243 i = 0;
2244 netdev_for_each_mc_addr(ha, dev)
2245 memcpy(&bp->mc_table[i++ * FDDI_K_ALEN],
2246 ha->addr, FDDI_K_ALEN);
2247
2248 if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS)
2249 {
2250 DBG_printk("%s: Could not update multicast address table!\n", dev->name);
2251 }
2252 else
2253 {
2254 DBG_printk("%s: Multicast address table updated! Added %d addresses.\n", dev->name, bp->mc_count);
2255 }
2256 }
2257
2258
2259
2260 if (dfx_ctl_update_filters(bp) != DFX_K_SUCCESS)
2261 {
2262 DBG_printk("%s: Could not update adapter filters!\n", dev->name);
2263 }
2264 else
2265 {
2266 DBG_printk("%s: Adapter filters updated!\n", dev->name);
2267 }
2268 }
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307static int dfx_ctl_set_mac_address(struct net_device *dev, void *addr)
2308 {
2309 struct sockaddr *p_sockaddr = (struct sockaddr *)addr;
2310 DFX_board_t *bp = netdev_priv(dev);
2311
2312
2313
2314 memcpy(dev->dev_addr, p_sockaddr->sa_data, FDDI_K_ALEN);
2315 memcpy(&bp->uc_table[0], p_sockaddr->sa_data, FDDI_K_ALEN);
2316 bp->uc_count = 1;
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330 if ((bp->uc_count + bp->mc_count) > PI_CMD_ADDR_FILTER_K_SIZE)
2331 {
2332 bp->group_prom = PI_FSTATE_K_PASS;
2333 bp->mc_count = 0;
2334
2335
2336
2337 if (dfx_ctl_update_filters(bp) != DFX_K_SUCCESS)
2338 {
2339 DBG_printk("%s: Could not update adapter filters!\n", dev->name);
2340 }
2341 else
2342 {
2343 DBG_printk("%s: Adapter filters updated!\n", dev->name);
2344 }
2345 }
2346
2347
2348
2349 if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS)
2350 {
2351 DBG_printk("%s: Could not set new MAC address!\n", dev->name);
2352 }
2353 else
2354 {
2355 DBG_printk("%s: Adapter CAM updated with new MAC address\n", dev->name);
2356 }
2357 return 0;
2358 }
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394static int dfx_ctl_update_cam(DFX_board_t *bp)
2395 {
2396 int i;
2397 PI_LAN_ADDR *p_addr;
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412 memset(bp->cmd_req_virt, 0, PI_CMD_REQ_K_SIZE_MAX);
2413 bp->cmd_req_virt->cmd_type = PI_CMD_K_ADDR_FILTER_SET;
2414 p_addr = &bp->cmd_req_virt->addr_filter_set.entry[0];
2415
2416
2417
2418 for (i=0; i < (int)bp->uc_count; i++)
2419 {
2420 if (i < PI_CMD_ADDR_FILTER_K_SIZE)
2421 {
2422 memcpy(p_addr, &bp->uc_table[i*FDDI_K_ALEN], FDDI_K_ALEN);
2423 p_addr++;
2424 }
2425 }
2426
2427
2428
2429 for (i=0; i < (int)bp->mc_count; i++)
2430 {
2431 if ((i + bp->uc_count) < PI_CMD_ADDR_FILTER_K_SIZE)
2432 {
2433 memcpy(p_addr, &bp->mc_table[i*FDDI_K_ALEN], FDDI_K_ALEN);
2434 p_addr++;
2435 }
2436 }
2437
2438
2439
2440 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
2441 return DFX_K_FAILURE;
2442 return DFX_K_SUCCESS;
2443 }
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477static int dfx_ctl_update_filters(DFX_board_t *bp)
2478 {
2479 int i = 0;
2480
2481
2482
2483 bp->cmd_req_virt->cmd_type = PI_CMD_K_FILTERS_SET;
2484
2485
2486
2487 bp->cmd_req_virt->filter_set.item[i].item_code = PI_ITEM_K_BROADCAST;
2488 bp->cmd_req_virt->filter_set.item[i++].value = PI_FSTATE_K_PASS;
2489
2490
2491
2492 bp->cmd_req_virt->filter_set.item[i].item_code = PI_ITEM_K_IND_GROUP_PROM;
2493 bp->cmd_req_virt->filter_set.item[i++].value = bp->ind_group_prom;
2494
2495
2496
2497 bp->cmd_req_virt->filter_set.item[i].item_code = PI_ITEM_K_GROUP_PROM;
2498 bp->cmd_req_virt->filter_set.item[i++].value = bp->group_prom;
2499
2500
2501
2502 bp->cmd_req_virt->filter_set.item[i].item_code = PI_ITEM_K_EOL;
2503
2504
2505
2506 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
2507 return DFX_K_FAILURE;
2508 return DFX_K_SUCCESS;
2509 }
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552static int dfx_hw_dma_cmd_req(DFX_board_t *bp)
2553 {
2554 int status;
2555 int timeout_cnt;
2556
2557
2558
2559 status = dfx_hw_adap_state_rd(bp);
2560 if ((status == PI_STATE_K_RESET) ||
2561 (status == PI_STATE_K_HALTED) ||
2562 (status == PI_STATE_K_DMA_UNAVAIL) ||
2563 (status == PI_STATE_K_UPGRADE))
2564 return DFX_K_OUTSTATE;
2565
2566
2567
2568 bp->descr_block_virt->cmd_rsp[bp->cmd_rsp_reg.index.prod].long_0 = (u32) (PI_RCV_DESCR_M_SOP |
2569 ((PI_CMD_RSP_K_SIZE_MAX / PI_ALIGN_K_CMD_RSP_BUFF) << PI_RCV_DESCR_V_SEG_LEN));
2570 bp->descr_block_virt->cmd_rsp[bp->cmd_rsp_reg.index.prod].long_1 = bp->cmd_rsp_phys;
2571
2572
2573
2574 bp->cmd_rsp_reg.index.prod += 1;
2575 bp->cmd_rsp_reg.index.prod &= PI_CMD_RSP_K_NUM_ENTRIES-1;
2576 dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_RSP_PROD, bp->cmd_rsp_reg.lword);
2577
2578
2579
2580 bp->descr_block_virt->cmd_req[bp->cmd_req_reg.index.prod].long_0 = (u32) (PI_XMT_DESCR_M_SOP |
2581 PI_XMT_DESCR_M_EOP | (PI_CMD_REQ_K_SIZE_MAX << PI_XMT_DESCR_V_SEG_LEN));
2582 bp->descr_block_virt->cmd_req[bp->cmd_req_reg.index.prod].long_1 = bp->cmd_req_phys;
2583
2584
2585
2586 bp->cmd_req_reg.index.prod += 1;
2587 bp->cmd_req_reg.index.prod &= PI_CMD_REQ_K_NUM_ENTRIES-1;
2588 dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_REQ_PROD, bp->cmd_req_reg.lword);
2589
2590
2591
2592
2593
2594
2595 for (timeout_cnt = 20000; timeout_cnt > 0; timeout_cnt--)
2596 {
2597 if (bp->cmd_req_reg.index.prod == (u8)(bp->cons_block_virt->cmd_req))
2598 break;
2599 udelay(100);
2600 }
2601 if (timeout_cnt == 0)
2602 return DFX_K_HW_TIMEOUT;
2603
2604
2605
2606 bp->cmd_req_reg.index.comp += 1;
2607 bp->cmd_req_reg.index.comp &= PI_CMD_REQ_K_NUM_ENTRIES-1;
2608 dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_REQ_PROD, bp->cmd_req_reg.lword);
2609
2610
2611
2612
2613
2614
2615 for (timeout_cnt = 20000; timeout_cnt > 0; timeout_cnt--)
2616 {
2617 if (bp->cmd_rsp_reg.index.prod == (u8)(bp->cons_block_virt->cmd_rsp))
2618 break;
2619 udelay(100);
2620 }
2621 if (timeout_cnt == 0)
2622 return DFX_K_HW_TIMEOUT;
2623
2624
2625
2626 bp->cmd_rsp_reg.index.comp += 1;
2627 bp->cmd_rsp_reg.index.comp &= PI_CMD_RSP_K_NUM_ENTRIES-1;
2628 dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_RSP_PROD, bp->cmd_rsp_reg.lword);
2629 return DFX_K_SUCCESS;
2630 }
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666static int dfx_hw_port_ctrl_req(
2667 DFX_board_t *bp,
2668 PI_UINT32 command,
2669 PI_UINT32 data_a,
2670 PI_UINT32 data_b,
2671 PI_UINT32 *host_data
2672 )
2673
2674 {
2675 PI_UINT32 port_cmd;
2676 int timeout_cnt;
2677
2678
2679
2680 port_cmd = (PI_UINT32) (command | PI_PCTRL_M_CMD_ERROR);
2681
2682
2683
2684 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_DATA_A, data_a);
2685 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_DATA_B, data_b);
2686 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_CTRL, port_cmd);
2687
2688
2689
2690 if (command == PI_PCTRL_M_BLAST_FLASH)
2691 timeout_cnt = 600000;
2692 else
2693 timeout_cnt = 20000;
2694
2695 for (; timeout_cnt > 0; timeout_cnt--)
2696 {
2697 dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_CTRL, &port_cmd);
2698 if (!(port_cmd & PI_PCTRL_M_CMD_ERROR))
2699 break;
2700 udelay(100);
2701 }
2702 if (timeout_cnt == 0)
2703 return DFX_K_HW_TIMEOUT;
2704
2705
2706
2707
2708
2709
2710
2711 if (host_data != NULL)
2712 dfx_port_read_long(bp, PI_PDQ_K_REG_HOST_DATA, host_data);
2713 return DFX_K_SUCCESS;
2714 }
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750static void dfx_hw_adap_reset(
2751 DFX_board_t *bp,
2752 PI_UINT32 type
2753 )
2754
2755 {
2756
2757
2758 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_DATA_A, type);
2759 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_RESET, PI_RESET_M_ASSERT_RESET);
2760
2761
2762
2763 udelay(20);
2764
2765
2766
2767 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_RESET, 0);
2768 }
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798static int dfx_hw_adap_state_rd(DFX_board_t *bp)
2799 {
2800 PI_UINT32 port_status;
2801
2802 dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &port_status);
2803 return (port_status & PI_PSTATUS_M_STATE) >> PI_PSTATUS_V_STATE;
2804 }
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838static int dfx_hw_dma_uninit(DFX_board_t *bp, PI_UINT32 type)
2839 {
2840 int timeout_cnt;
2841
2842
2843
2844 dfx_hw_adap_reset(bp, type);
2845
2846
2847
2848 for (timeout_cnt = 100000; timeout_cnt > 0; timeout_cnt--)
2849 {
2850 if (dfx_hw_adap_state_rd(bp) == PI_STATE_K_DMA_UNAVAIL)
2851 break;
2852 udelay(100);
2853 }
2854 if (timeout_cnt == 0)
2855 return DFX_K_HW_TIMEOUT;
2856 return DFX_K_SUCCESS;
2857 }
2858
2859
2860
2861
2862
2863
2864static void my_skb_align(struct sk_buff *skb, int n)
2865{
2866 unsigned long x = (unsigned long)skb->data;
2867 unsigned long v;
2868
2869 v = ALIGN(x, n);
2870
2871 skb_reserve(skb, v - x);
2872}
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910static int dfx_rcv_init(DFX_board_t *bp, int get_buffers)
2911 {
2912 int i, j;
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930
2931
2932 if (get_buffers) {
2933#ifdef DYNAMIC_BUFFERS
2934 for (i = 0; i < (int)(bp->rcv_bufs_to_post); i++)
2935 for (j = 0; (i + j) < (int)PI_RCV_DATA_K_NUM_ENTRIES; j += bp->rcv_bufs_to_post)
2936 {
2937 struct sk_buff *newskb = __netdev_alloc_skb(bp->dev, NEW_SKB_SIZE, GFP_NOIO);
2938 if (!newskb)
2939 return -ENOMEM;
2940 bp->descr_block_virt->rcv_data[i+j].long_0 = (u32) (PI_RCV_DESCR_M_SOP |
2941 ((PI_RCV_DATA_K_SIZE_MAX / PI_ALIGN_K_RCV_DATA_BUFF) << PI_RCV_DESCR_V_SEG_LEN));
2942
2943
2944
2945
2946
2947 my_skb_align(newskb, 128);
2948 bp->descr_block_virt->rcv_data[i + j].long_1 =
2949 (u32)dma_map_single(bp->bus_dev, newskb->data,
2950 NEW_SKB_SIZE,
2951 DMA_FROM_DEVICE);
2952
2953
2954
2955
2956 bp->p_rcv_buff_va[i+j] = (char *) newskb;
2957 }
2958#else
2959 for (i=0; i < (int)(bp->rcv_bufs_to_post); i++)
2960 for (j=0; (i + j) < (int)PI_RCV_DATA_K_NUM_ENTRIES; j += bp->rcv_bufs_to_post)
2961 {
2962 bp->descr_block_virt->rcv_data[i+j].long_0 = (u32) (PI_RCV_DESCR_M_SOP |
2963 ((PI_RCV_DATA_K_SIZE_MAX / PI_ALIGN_K_RCV_DATA_BUFF) << PI_RCV_DESCR_V_SEG_LEN));
2964 bp->descr_block_virt->rcv_data[i+j].long_1 = (u32) (bp->rcv_block_phys + (i * PI_RCV_DATA_K_SIZE_MAX));
2965 bp->p_rcv_buff_va[i+j] = (char *) (bp->rcv_block_virt + (i * PI_RCV_DATA_K_SIZE_MAX));
2966 }
2967#endif
2968 }
2969
2970
2971
2972 bp->rcv_xmt_reg.index.rcv_prod = bp->rcv_bufs_to_post;
2973 dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_2_PROD, bp->rcv_xmt_reg.lword);
2974 return 0;
2975 }
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008
3009
3010static void dfx_rcv_queue_process(
3011 DFX_board_t *bp
3012 )
3013
3014 {
3015 PI_TYPE_2_CONSUMER *p_type_2_cons;
3016 char *p_buff;
3017 u32 descr, pkt_len;
3018 struct sk_buff *skb;
3019
3020
3021
3022 p_type_2_cons = (PI_TYPE_2_CONSUMER *)(&bp->cons_block_virt->xmt_rcv_data);
3023 while (bp->rcv_xmt_reg.index.rcv_comp != p_type_2_cons->index.rcv_cons)
3024 {
3025
3026
3027 int entry;
3028
3029 entry = bp->rcv_xmt_reg.index.rcv_comp;
3030#ifdef DYNAMIC_BUFFERS
3031 p_buff = (char *) (((struct sk_buff *)bp->p_rcv_buff_va[entry])->data);
3032#else
3033 p_buff = (char *) bp->p_rcv_buff_va[entry];
3034#endif
3035 memcpy(&descr, p_buff + RCV_BUFF_K_DESCR, sizeof(u32));
3036
3037 if (descr & PI_FMC_DESCR_M_RCC_FLUSH)
3038 {
3039 if (descr & PI_FMC_DESCR_M_RCC_CRC)
3040 bp->rcv_crc_errors++;
3041 else
3042 bp->rcv_frame_status_errors++;
3043 }
3044 else
3045 {
3046 int rx_in_place = 0;
3047
3048
3049
3050 pkt_len = (u32)((descr & PI_FMC_DESCR_M_LEN) >> PI_FMC_DESCR_V_LEN);
3051 pkt_len -= 4;
3052 if (!IN_RANGE(pkt_len, FDDI_K_LLC_ZLEN, FDDI_K_LLC_LEN))
3053 bp->rcv_length_errors++;
3054 else{
3055#ifdef DYNAMIC_BUFFERS
3056 if (pkt_len > SKBUFF_RX_COPYBREAK) {
3057 struct sk_buff *newskb;
3058
3059 newskb = dev_alloc_skb(NEW_SKB_SIZE);
3060 if (newskb){
3061 rx_in_place = 1;
3062
3063 my_skb_align(newskb, 128);
3064 skb = (struct sk_buff *)bp->p_rcv_buff_va[entry];
3065 dma_unmap_single(bp->bus_dev,
3066 bp->descr_block_virt->rcv_data[entry].long_1,
3067 NEW_SKB_SIZE,
3068 DMA_FROM_DEVICE);
3069 skb_reserve(skb, RCV_BUFF_K_PADDING);
3070 bp->p_rcv_buff_va[entry] = (char *)newskb;
3071 bp->descr_block_virt->rcv_data[entry].long_1 =
3072 (u32)dma_map_single(bp->bus_dev,
3073 newskb->data,
3074 NEW_SKB_SIZE,
3075 DMA_FROM_DEVICE);
3076 } else
3077 skb = NULL;
3078 } else
3079#endif
3080 skb = dev_alloc_skb(pkt_len+3);
3081 if (skb == NULL)
3082 {
3083 printk("%s: Could not allocate receive buffer. Dropping packet.\n", bp->dev->name);
3084 bp->rcv_discards++;
3085 break;
3086 }
3087 else {
3088#ifndef DYNAMIC_BUFFERS
3089 if (! rx_in_place)
3090#endif
3091 {
3092
3093
3094 skb_copy_to_linear_data(skb,
3095 p_buff + RCV_BUFF_K_PADDING,
3096 pkt_len + 3);
3097 }
3098
3099 skb_reserve(skb,3);
3100 skb_put(skb, pkt_len);
3101 skb->protocol = fddi_type_trans(skb, bp->dev);
3102 bp->rcv_total_bytes += skb->len;
3103 netif_rx(skb);
3104
3105
3106 bp->rcv_total_frames++;
3107 if (*(p_buff + RCV_BUFF_K_DA) & 0x01)
3108 bp->rcv_multicast_frames++;
3109 }
3110 }
3111 }
3112
3113
3114
3115
3116
3117
3118
3119
3120
3121 bp->rcv_xmt_reg.index.rcv_prod += 1;
3122 bp->rcv_xmt_reg.index.rcv_comp += 1;
3123 }
3124 }
3125
3126
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140
3141
3142
3143
3144
3145
3146
3147
3148
3149
3150
3151
3152
3153
3154
3155
3156
3157
3158
3159
3160
3161
3162
3163
3164
3165
3166
3167
3168
3169
3170
3171
3172
3173
3174
3175
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185
3186
3187
3188static netdev_tx_t dfx_xmt_queue_pkt(struct sk_buff *skb,
3189 struct net_device *dev)
3190 {
3191 DFX_board_t *bp = netdev_priv(dev);
3192 u8 prod;
3193 PI_XMT_DESCR *p_xmt_descr;
3194 XMT_DRIVER_DESCR *p_xmt_drv_descr;
3195 unsigned long flags;
3196
3197 netif_stop_queue(dev);
3198
3199
3200
3201
3202
3203
3204
3205
3206
3207
3208 if (!IN_RANGE(skb->len, FDDI_K_LLC_ZLEN, FDDI_K_LLC_LEN))
3209 {
3210 printk("%s: Invalid packet length - %u bytes\n",
3211 dev->name, skb->len);
3212 bp->xmt_length_errors++;
3213 netif_wake_queue(dev);
3214 dev_kfree_skb(skb);
3215 return NETDEV_TX_OK;
3216 }
3217
3218
3219
3220
3221
3222
3223
3224
3225
3226
3227
3228
3229 if (bp->link_available == PI_K_FALSE)
3230 {
3231 if (dfx_hw_adap_state_rd(bp) == PI_STATE_K_LINK_AVAIL)
3232 bp->link_available = PI_K_TRUE;
3233 else
3234 {
3235 bp->xmt_discards++;
3236 dev_kfree_skb(skb);
3237 netif_wake_queue(dev);
3238 return NETDEV_TX_OK;
3239 }
3240 }
3241
3242 spin_lock_irqsave(&bp->lock, flags);
3243
3244
3245
3246 prod = bp->rcv_xmt_reg.index.xmt_prod;
3247 p_xmt_descr = &(bp->descr_block_virt->xmt_data[prod]);
3248
3249
3250
3251
3252
3253
3254
3255
3256
3257
3258
3259
3260 p_xmt_drv_descr = &(bp->xmt_drv_descr_blk[prod++]);
3261
3262
3263
3264 skb_push(skb,3);
3265 skb->data[0] = DFX_PRH0_BYTE;
3266 skb->data[1] = DFX_PRH1_BYTE;
3267 skb->data[2] = DFX_PRH2_BYTE;
3268
3269
3270
3271
3272
3273
3274
3275
3276
3277
3278
3279
3280
3281
3282
3283
3284
3285
3286
3287
3288
3289
3290
3291
3292
3293
3294
3295
3296 p_xmt_descr->long_0 = (u32) (PI_XMT_DESCR_M_SOP | PI_XMT_DESCR_M_EOP | ((skb->len) << PI_XMT_DESCR_V_SEG_LEN));
3297 p_xmt_descr->long_1 = (u32)dma_map_single(bp->bus_dev, skb->data,
3298 skb->len, DMA_TO_DEVICE);
3299
3300
3301
3302
3303
3304
3305
3306
3307
3308
3309
3310
3311 if (prod == bp->rcv_xmt_reg.index.xmt_comp)
3312 {
3313 skb_pull(skb,3);
3314 spin_unlock_irqrestore(&bp->lock, flags);
3315 return NETDEV_TX_BUSY;
3316 }
3317
3318
3319
3320
3321
3322
3323
3324
3325
3326
3327
3328
3329
3330
3331
3332
3333
3334 p_xmt_drv_descr->p_skb = skb;
3335
3336
3337
3338 bp->rcv_xmt_reg.index.xmt_prod = prod;
3339 dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_2_PROD, bp->rcv_xmt_reg.lword);
3340 spin_unlock_irqrestore(&bp->lock, flags);
3341 netif_wake_queue(dev);
3342 return NETDEV_TX_OK;
3343 }
3344
3345
3346
3347
3348
3349
3350
3351
3352
3353
3354
3355
3356
3357
3358
3359
3360
3361
3362
3363
3364
3365
3366
3367
3368
3369
3370
3371
3372
3373
3374
3375
3376
3377
3378static int dfx_xmt_done(DFX_board_t *bp)
3379 {
3380 XMT_DRIVER_DESCR *p_xmt_drv_descr;
3381 PI_TYPE_2_CONSUMER *p_type_2_cons;
3382 u8 comp;
3383 int freed = 0;
3384
3385
3386
3387 p_type_2_cons = (PI_TYPE_2_CONSUMER *)(&bp->cons_block_virt->xmt_rcv_data);
3388 while (bp->rcv_xmt_reg.index.xmt_comp != p_type_2_cons->index.xmt_cons)
3389 {
3390
3391
3392 p_xmt_drv_descr = &(bp->xmt_drv_descr_blk[bp->rcv_xmt_reg.index.xmt_comp]);
3393
3394
3395
3396 bp->xmt_total_frames++;
3397 bp->xmt_total_bytes += p_xmt_drv_descr->p_skb->len;
3398
3399
3400 comp = bp->rcv_xmt_reg.index.xmt_comp;
3401 dma_unmap_single(bp->bus_dev,
3402 bp->descr_block_virt->xmt_data[comp].long_1,
3403 p_xmt_drv_descr->p_skb->len,
3404 DMA_TO_DEVICE);
3405 dev_kfree_skb_irq(p_xmt_drv_descr->p_skb);
3406
3407
3408
3409
3410
3411
3412
3413
3414
3415
3416
3417
3418 bp->rcv_xmt_reg.index.xmt_comp += 1;
3419 freed++;
3420 }
3421 return freed;
3422 }
3423
3424
3425
3426
3427
3428
3429
3430
3431
3432
3433
3434
3435
3436
3437
3438
3439
3440
3441
3442
3443
3444
3445
3446
3447
3448
3449
3450
3451#ifdef DYNAMIC_BUFFERS
3452static void dfx_rcv_flush( DFX_board_t *bp )
3453 {
3454 int i, j;
3455
3456 for (i = 0; i < (int)(bp->rcv_bufs_to_post); i++)
3457 for (j = 0; (i + j) < (int)PI_RCV_DATA_K_NUM_ENTRIES; j += bp->rcv_bufs_to_post)
3458 {
3459 struct sk_buff *skb;
3460 skb = (struct sk_buff *)bp->p_rcv_buff_va[i+j];
3461 if (skb)
3462 dev_kfree_skb(skb);
3463 bp->p_rcv_buff_va[i+j] = NULL;
3464 }
3465
3466 }
3467#else
3468static inline void dfx_rcv_flush( DFX_board_t *bp )
3469{
3470}
3471#endif
3472
3473
3474
3475
3476
3477
3478
3479
3480
3481
3482
3483
3484
3485
3486
3487
3488
3489
3490
3491
3492
3493
3494
3495
3496
3497
3498
3499
3500
3501
3502
3503
3504
3505
3506
3507
3508
3509static void dfx_xmt_flush( DFX_board_t *bp )
3510 {
3511 u32 prod_cons;
3512 XMT_DRIVER_DESCR *p_xmt_drv_descr;
3513 u8 comp;
3514
3515
3516
3517 while (bp->rcv_xmt_reg.index.xmt_comp != bp->rcv_xmt_reg.index.xmt_prod)
3518 {
3519
3520
3521 p_xmt_drv_descr = &(bp->xmt_drv_descr_blk[bp->rcv_xmt_reg.index.xmt_comp]);
3522
3523
3524 comp = bp->rcv_xmt_reg.index.xmt_comp;
3525 dma_unmap_single(bp->bus_dev,
3526 bp->descr_block_virt->xmt_data[comp].long_1,
3527 p_xmt_drv_descr->p_skb->len,
3528 DMA_TO_DEVICE);
3529 dev_kfree_skb(p_xmt_drv_descr->p_skb);
3530
3531
3532
3533 bp->xmt_discards++;
3534
3535
3536
3537
3538
3539
3540
3541
3542
3543
3544
3545
3546 bp->rcv_xmt_reg.index.xmt_comp += 1;
3547 }
3548
3549
3550
3551 prod_cons = (u32)(bp->cons_block_virt->xmt_rcv_data & ~PI_CONS_M_XMT_INDEX);
3552 prod_cons |= (u32)(bp->rcv_xmt_reg.index.xmt_prod << PI_CONS_V_XMT_INDEX);
3553 bp->cons_block_virt->xmt_rcv_data = prod_cons;
3554 }
3555
3556
3557
3558
3559
3560
3561
3562
3563
3564
3565
3566
3567
3568
3569
3570
3571
3572
3573
3574
3575
3576
3577
3578
3579
3580
3581
3582static void __devexit dfx_unregister(struct device *bdev)
3583{
3584 struct net_device *dev = dev_get_drvdata(bdev);
3585 DFX_board_t *bp = netdev_priv(dev);
3586 int dfx_bus_pci = DFX_BUS_PCI(bdev);
3587 int dfx_bus_tc = DFX_BUS_TC(bdev);
3588 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
3589 resource_size_t bar_start = 0;
3590 resource_size_t bar_len = 0;
3591 int alloc_size;
3592
3593 unregister_netdev(dev);
3594
3595 alloc_size = sizeof(PI_DESCR_BLOCK) +
3596 PI_CMD_REQ_K_SIZE_MAX + PI_CMD_RSP_K_SIZE_MAX +
3597#ifndef DYNAMIC_BUFFERS
3598 (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX) +
3599#endif
3600 sizeof(PI_CONSUMER_BLOCK) +
3601 (PI_ALIGN_K_DESC_BLK - 1);
3602 if (bp->kmalloced)
3603 dma_free_coherent(bdev, alloc_size,
3604 bp->kmalloced, bp->kmalloced_dma);
3605
3606 dfx_bus_uninit(dev);
3607
3608 dfx_get_bars(bdev, &bar_start, &bar_len);
3609 if (dfx_use_mmio) {
3610 iounmap(bp->base.mem);
3611 release_mem_region(bar_start, bar_len);
3612 } else
3613 release_region(bar_start, bar_len);
3614
3615 if (dfx_bus_pci)
3616 pci_disable_device(to_pci_dev(bdev));
3617
3618 free_netdev(dev);
3619}
3620
3621
3622static int __devinit __maybe_unused dfx_dev_register(struct device *);
3623static int __devexit __maybe_unused dfx_dev_unregister(struct device *);
3624
3625#ifdef CONFIG_PCI
3626static int __devinit dfx_pci_register(struct pci_dev *,
3627 const struct pci_device_id *);
3628static void __devexit dfx_pci_unregister(struct pci_dev *);
3629
3630static DEFINE_PCI_DEVICE_TABLE(dfx_pci_table) = {
3631 { PCI_DEVICE(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_FDDI) },
3632 { }
3633};
3634MODULE_DEVICE_TABLE(pci, dfx_pci_table);
3635
3636static struct pci_driver dfx_pci_driver = {
3637 .name = "defxx",
3638 .id_table = dfx_pci_table,
3639 .probe = dfx_pci_register,
3640 .remove = __devexit_p(dfx_pci_unregister),
3641};
3642
3643static __devinit int dfx_pci_register(struct pci_dev *pdev,
3644 const struct pci_device_id *ent)
3645{
3646 return dfx_register(&pdev->dev);
3647}
3648
3649static void __devexit dfx_pci_unregister(struct pci_dev *pdev)
3650{
3651 dfx_unregister(&pdev->dev);
3652}
3653#endif
3654
3655#ifdef CONFIG_EISA
3656static struct eisa_device_id dfx_eisa_table[] = {
3657 { "DEC3001", DEFEA_PROD_ID_1 },
3658 { "DEC3002", DEFEA_PROD_ID_2 },
3659 { "DEC3003", DEFEA_PROD_ID_3 },
3660 { "DEC3004", DEFEA_PROD_ID_4 },
3661 { }
3662};
3663MODULE_DEVICE_TABLE(eisa, dfx_eisa_table);
3664
3665static struct eisa_driver dfx_eisa_driver = {
3666 .id_table = dfx_eisa_table,
3667 .driver = {
3668 .name = "defxx",
3669 .bus = &eisa_bus_type,
3670 .probe = dfx_dev_register,
3671 .remove = __devexit_p(dfx_dev_unregister),
3672 },
3673};
3674#endif
3675
3676#ifdef CONFIG_TC
3677static struct tc_device_id const dfx_tc_table[] = {
3678 { "DEC ", "PMAF-FA " },
3679 { "DEC ", "PMAF-FD " },
3680 { "DEC ", "PMAF-FS " },
3681 { "DEC ", "PMAF-FU " },
3682 { }
3683};
3684MODULE_DEVICE_TABLE(tc, dfx_tc_table);
3685
3686static struct tc_driver dfx_tc_driver = {
3687 .id_table = dfx_tc_table,
3688 .driver = {
3689 .name = "defxx",
3690 .bus = &tc_bus_type,
3691 .probe = dfx_dev_register,
3692 .remove = __devexit_p(dfx_dev_unregister),
3693 },
3694};
3695#endif
3696
3697static int __devinit __maybe_unused dfx_dev_register(struct device *dev)
3698{
3699 int status;
3700
3701 status = dfx_register(dev);
3702 if (!status)
3703 get_device(dev);
3704 return status;
3705}
3706
3707static int __devexit __maybe_unused dfx_dev_unregister(struct device *dev)
3708{
3709 put_device(dev);
3710 dfx_unregister(dev);
3711 return 0;
3712}
3713
3714
3715static int __devinit dfx_init(void)
3716{
3717 int status;
3718
3719 status = pci_register_driver(&dfx_pci_driver);
3720 if (!status)
3721 status = eisa_driver_register(&dfx_eisa_driver);
3722 if (!status)
3723 status = tc_register_driver(&dfx_tc_driver);
3724 return status;
3725}
3726
3727static void __devexit dfx_cleanup(void)
3728{
3729 tc_unregister_driver(&dfx_tc_driver);
3730 eisa_driver_unregister(&dfx_eisa_driver);
3731 pci_unregister_driver(&dfx_pci_driver);
3732}
3733
3734module_init(dfx_init);
3735module_exit(dfx_cleanup);
3736MODULE_AUTHOR("Lawrence V. Stefani");
3737MODULE_DESCRIPTION("DEC FDDIcontroller TC/EISA/PCI (DEFTA/DEFEA/DEFPA) driver "
3738 DRV_VERSION " " DRV_RELDATE);
3739MODULE_LICENSE("GPL");
3740