1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202#include <linux/bitops.h>
203#include <linux/compiler.h>
204#include <linux/delay.h>
205#include <linux/dma-mapping.h>
206#include <linux/eisa.h>
207#include <linux/errno.h>
208#include <linux/fddidevice.h>
209#include <linux/init.h>
210#include <linux/interrupt.h>
211#include <linux/ioport.h>
212#include <linux/kernel.h>
213#include <linux/module.h>
214#include <linux/netdevice.h>
215#include <linux/pci.h>
216#include <linux/skbuff.h>
217#include <linux/slab.h>
218#include <linux/string.h>
219#include <linux/tc.h>
220
221#include <asm/byteorder.h>
222#include <asm/io.h>
223
224#include "defxx.h"
225
226
227#define DRV_NAME "defxx"
228#define DRV_VERSION "v1.10"
229#define DRV_RELDATE "2006/12/14"
230
231static char version[] __devinitdata =
232 DRV_NAME ": " DRV_VERSION " " DRV_RELDATE
233 " Lawrence V. Stefani and others\n";
234
235#define DYNAMIC_BUFFERS 1
236
237#define SKBUFF_RX_COPYBREAK 200
238
239
240
241
242#define NEW_SKB_SIZE (PI_RCV_DATA_K_SIZE_MAX+128)
243
244#ifdef CONFIG_PCI
245#define DFX_BUS_PCI(dev) (dev->bus == &pci_bus_type)
246#else
247#define DFX_BUS_PCI(dev) 0
248#endif
249
250#ifdef CONFIG_EISA
251#define DFX_BUS_EISA(dev) (dev->bus == &eisa_bus_type)
252#else
253#define DFX_BUS_EISA(dev) 0
254#endif
255
256#ifdef CONFIG_TC
257#define DFX_BUS_TC(dev) (dev->bus == &tc_bus_type)
258#else
259#define DFX_BUS_TC(dev) 0
260#endif
261
262#ifdef CONFIG_DEFXX_MMIO
263#define DFX_MMIO 1
264#else
265#define DFX_MMIO 0
266#endif
267
268
269
270static void dfx_bus_init(struct net_device *dev);
271static void dfx_bus_uninit(struct net_device *dev);
272static void dfx_bus_config_check(DFX_board_t *bp);
273
274static int dfx_driver_init(struct net_device *dev,
275 const char *print_name,
276 resource_size_t bar_start);
277static int dfx_adap_init(DFX_board_t *bp, int get_buffers);
278
279static int dfx_open(struct net_device *dev);
280static int dfx_close(struct net_device *dev);
281
282static void dfx_int_pr_halt_id(DFX_board_t *bp);
283static void dfx_int_type_0_process(DFX_board_t *bp);
284static void dfx_int_common(struct net_device *dev);
285static irqreturn_t dfx_interrupt(int irq, void *dev_id);
286
287static struct net_device_stats *dfx_ctl_get_stats(struct net_device *dev);
288static void dfx_ctl_set_multicast_list(struct net_device *dev);
289static int dfx_ctl_set_mac_address(struct net_device *dev, void *addr);
290static int dfx_ctl_update_cam(DFX_board_t *bp);
291static int dfx_ctl_update_filters(DFX_board_t *bp);
292
293static int dfx_hw_dma_cmd_req(DFX_board_t *bp);
294static int dfx_hw_port_ctrl_req(DFX_board_t *bp, PI_UINT32 command, PI_UINT32 data_a, PI_UINT32 data_b, PI_UINT32 *host_data);
295static void dfx_hw_adap_reset(DFX_board_t *bp, PI_UINT32 type);
296static int dfx_hw_adap_state_rd(DFX_board_t *bp);
297static int dfx_hw_dma_uninit(DFX_board_t *bp, PI_UINT32 type);
298
299static int dfx_rcv_init(DFX_board_t *bp, int get_buffers);
300static void dfx_rcv_queue_process(DFX_board_t *bp);
301static void dfx_rcv_flush(DFX_board_t *bp);
302
303static netdev_tx_t dfx_xmt_queue_pkt(struct sk_buff *skb,
304 struct net_device *dev);
305static int dfx_xmt_done(DFX_board_t *bp);
306static void dfx_xmt_flush(DFX_board_t *bp);
307
308
309
310static struct pci_driver dfx_pci_driver;
311static struct eisa_driver dfx_eisa_driver;
312static struct tc_driver dfx_tc_driver;
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365static inline void dfx_writel(DFX_board_t *bp, int offset, u32 data)
366{
367 writel(data, bp->base.mem + offset);
368 mb();
369}
370
371static inline void dfx_outl(DFX_board_t *bp, int offset, u32 data)
372{
373 outl(data, bp->base.port + offset);
374}
375
376static void dfx_port_write_long(DFX_board_t *bp, int offset, u32 data)
377{
378 struct device __maybe_unused *bdev = bp->bus_dev;
379 int dfx_bus_tc = DFX_BUS_TC(bdev);
380 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
381
382 if (dfx_use_mmio)
383 dfx_writel(bp, offset, data);
384 else
385 dfx_outl(bp, offset, data);
386}
387
388
389static inline void dfx_readl(DFX_board_t *bp, int offset, u32 *data)
390{
391 mb();
392 *data = readl(bp->base.mem + offset);
393}
394
395static inline void dfx_inl(DFX_board_t *bp, int offset, u32 *data)
396{
397 *data = inl(bp->base.port + offset);
398}
399
400static void dfx_port_read_long(DFX_board_t *bp, int offset, u32 *data)
401{
402 struct device __maybe_unused *bdev = bp->bus_dev;
403 int dfx_bus_tc = DFX_BUS_TC(bdev);
404 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
405
406 if (dfx_use_mmio)
407 dfx_readl(bp, offset, data);
408 else
409 dfx_inl(bp, offset, data);
410}
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436static void dfx_get_bars(struct device *bdev,
437 resource_size_t *bar_start, resource_size_t *bar_len)
438{
439 int dfx_bus_pci = DFX_BUS_PCI(bdev);
440 int dfx_bus_eisa = DFX_BUS_EISA(bdev);
441 int dfx_bus_tc = DFX_BUS_TC(bdev);
442 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
443
444 if (dfx_bus_pci) {
445 int num = dfx_use_mmio ? 0 : 1;
446
447 *bar_start = pci_resource_start(to_pci_dev(bdev), num);
448 *bar_len = pci_resource_len(to_pci_dev(bdev), num);
449 }
450 if (dfx_bus_eisa) {
451 unsigned long base_addr = to_eisa_device(bdev)->base_addr;
452 resource_size_t bar;
453
454 if (dfx_use_mmio) {
455 bar = inb(base_addr + PI_ESIC_K_MEM_ADD_CMP_2);
456 bar <<= 8;
457 bar |= inb(base_addr + PI_ESIC_K_MEM_ADD_CMP_1);
458 bar <<= 8;
459 bar |= inb(base_addr + PI_ESIC_K_MEM_ADD_CMP_0);
460 bar <<= 16;
461 *bar_start = bar;
462 bar = inb(base_addr + PI_ESIC_K_MEM_ADD_MASK_2);
463 bar <<= 8;
464 bar |= inb(base_addr + PI_ESIC_K_MEM_ADD_MASK_1);
465 bar <<= 8;
466 bar |= inb(base_addr + PI_ESIC_K_MEM_ADD_MASK_0);
467 bar <<= 16;
468 *bar_len = (bar | PI_MEM_ADD_MASK_M) + 1;
469 } else {
470 *bar_start = base_addr;
471 *bar_len = PI_ESIC_K_CSR_IO_LEN;
472 }
473 }
474 if (dfx_bus_tc) {
475 *bar_start = to_tc_dev(bdev)->resource.start +
476 PI_TC_K_CSR_OFFSET;
477 *bar_len = PI_TC_K_CSR_LEN;
478 }
479}
480
481static const struct net_device_ops dfx_netdev_ops = {
482 .ndo_open = dfx_open,
483 .ndo_stop = dfx_close,
484 .ndo_start_xmit = dfx_xmt_queue_pkt,
485 .ndo_get_stats = dfx_ctl_get_stats,
486 .ndo_set_multicast_list = dfx_ctl_set_multicast_list,
487 .ndo_set_mac_address = dfx_ctl_set_mac_address,
488};
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518static int __devinit dfx_register(struct device *bdev)
519{
520 static int version_disp;
521 int dfx_bus_pci = DFX_BUS_PCI(bdev);
522 int dfx_bus_tc = DFX_BUS_TC(bdev);
523 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
524 const char *print_name = dev_name(bdev);
525 struct net_device *dev;
526 DFX_board_t *bp;
527 resource_size_t bar_start = 0;
528 resource_size_t bar_len = 0;
529 int alloc_size;
530 struct resource *region;
531 int err = 0;
532
533 if (!version_disp) {
534 version_disp = 1;
535 printk(version);
536 }
537
538 dev = alloc_fddidev(sizeof(*bp));
539 if (!dev) {
540 printk(KERN_ERR "%s: Unable to allocate fddidev, aborting\n",
541 print_name);
542 return -ENOMEM;
543 }
544
545
546 if (dfx_bus_pci && pci_enable_device(to_pci_dev(bdev))) {
547 printk(KERN_ERR "%s: Cannot enable PCI device, aborting\n",
548 print_name);
549 goto err_out;
550 }
551
552 SET_NETDEV_DEV(dev, bdev);
553
554 bp = netdev_priv(dev);
555 bp->bus_dev = bdev;
556 dev_set_drvdata(bdev, dev);
557
558 dfx_get_bars(bdev, &bar_start, &bar_len);
559
560 if (dfx_use_mmio)
561 region = request_mem_region(bar_start, bar_len, print_name);
562 else
563 region = request_region(bar_start, bar_len, print_name);
564 if (!region) {
565 printk(KERN_ERR "%s: Cannot reserve I/O resource "
566 "0x%lx @ 0x%lx, aborting\n",
567 print_name, (long)bar_len, (long)bar_start);
568 err = -EBUSY;
569 goto err_out_disable;
570 }
571
572
573 if (dfx_use_mmio) {
574 bp->base.mem = ioremap_nocache(bar_start, bar_len);
575 if (!bp->base.mem) {
576 printk(KERN_ERR "%s: Cannot map MMIO\n", print_name);
577 err = -ENOMEM;
578 goto err_out_region;
579 }
580 } else {
581 bp->base.port = bar_start;
582 dev->base_addr = bar_start;
583 }
584
585
586 dev->netdev_ops = &dfx_netdev_ops;
587
588 if (dfx_bus_pci)
589 pci_set_master(to_pci_dev(bdev));
590
591 if (dfx_driver_init(dev, print_name, bar_start) != DFX_K_SUCCESS) {
592 err = -ENODEV;
593 goto err_out_unmap;
594 }
595
596 err = register_netdev(dev);
597 if (err)
598 goto err_out_kfree;
599
600 printk("%s: registered as %s\n", print_name, dev->name);
601 return 0;
602
603err_out_kfree:
604 alloc_size = sizeof(PI_DESCR_BLOCK) +
605 PI_CMD_REQ_K_SIZE_MAX + PI_CMD_RSP_K_SIZE_MAX +
606#ifndef DYNAMIC_BUFFERS
607 (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX) +
608#endif
609 sizeof(PI_CONSUMER_BLOCK) +
610 (PI_ALIGN_K_DESC_BLK - 1);
611 if (bp->kmalloced)
612 dma_free_coherent(bdev, alloc_size,
613 bp->kmalloced, bp->kmalloced_dma);
614
615err_out_unmap:
616 if (dfx_use_mmio)
617 iounmap(bp->base.mem);
618
619err_out_region:
620 if (dfx_use_mmio)
621 release_mem_region(bar_start, bar_len);
622 else
623 release_region(bar_start, bar_len);
624
625err_out_disable:
626 if (dfx_bus_pci)
627 pci_disable_device(to_pci_dev(bdev));
628
629err_out:
630 free_netdev(dev);
631 return err;
632}
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666static void __devinit dfx_bus_init(struct net_device *dev)
667{
668 DFX_board_t *bp = netdev_priv(dev);
669 struct device *bdev = bp->bus_dev;
670 int dfx_bus_pci = DFX_BUS_PCI(bdev);
671 int dfx_bus_eisa = DFX_BUS_EISA(bdev);
672 int dfx_bus_tc = DFX_BUS_TC(bdev);
673 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
674 u8 val;
675
676 DBG_printk("In dfx_bus_init...\n");
677
678
679 bp->dev = dev;
680
681
682
683 if (dfx_bus_tc)
684 dev->irq = to_tc_dev(bdev)->interrupt;
685 if (dfx_bus_eisa) {
686 unsigned long base_addr = to_eisa_device(bdev)->base_addr;
687
688
689 val = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
690 val &= PI_CONFIG_STAT_0_M_IRQ;
691 val >>= PI_CONFIG_STAT_0_V_IRQ;
692
693 switch (val) {
694 case PI_CONFIG_STAT_0_IRQ_K_9:
695 dev->irq = 9;
696 break;
697
698 case PI_CONFIG_STAT_0_IRQ_K_10:
699 dev->irq = 10;
700 break;
701
702 case PI_CONFIG_STAT_0_IRQ_K_11:
703 dev->irq = 11;
704 break;
705
706 case PI_CONFIG_STAT_0_IRQ_K_15:
707 dev->irq = 15;
708 break;
709 }
710
711
712
713
714
715
716
717
718
719
720
721
722
723 val = ((bp->base.port >> 12) << PI_IO_CMP_V_SLOT);
724 outb(base_addr + PI_ESIC_K_IO_ADD_CMP_0_1, val);
725 outb(base_addr + PI_ESIC_K_IO_ADD_CMP_0_0, 0);
726 outb(base_addr + PI_ESIC_K_IO_ADD_CMP_1_1, val);
727 outb(base_addr + PI_ESIC_K_IO_ADD_CMP_1_0, 0);
728 val = PI_ESIC_K_CSR_IO_LEN - 1;
729 outb(base_addr + PI_ESIC_K_IO_ADD_MASK_0_1, (val >> 8) & 0xff);
730 outb(base_addr + PI_ESIC_K_IO_ADD_MASK_0_0, val & 0xff);
731 outb(base_addr + PI_ESIC_K_IO_ADD_MASK_1_1, (val >> 8) & 0xff);
732 outb(base_addr + PI_ESIC_K_IO_ADD_MASK_1_0, val & 0xff);
733
734
735 val = PI_FUNCTION_CNTRL_M_IOCS1 | PI_FUNCTION_CNTRL_M_IOCS0;
736 if (dfx_use_mmio)
737 val |= PI_FUNCTION_CNTRL_M_MEMCS0;
738 outb(base_addr + PI_ESIC_K_FUNCTION_CNTRL, val);
739
740
741
742
743
744 val = PI_SLOT_CNTRL_M_ENB;
745 outb(base_addr + PI_ESIC_K_SLOT_CNTRL, val);
746
747
748
749
750
751 val = inb(base_addr + PI_DEFEA_K_BURST_HOLDOFF);
752 if (dfx_use_mmio)
753 val |= PI_BURST_HOLDOFF_V_MEM_MAP;
754 else
755 val &= ~PI_BURST_HOLDOFF_V_MEM_MAP;
756 outb(base_addr + PI_DEFEA_K_BURST_HOLDOFF, val);
757
758
759 val = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
760 val |= PI_CONFIG_STAT_0_M_INT_ENB;
761 outb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0, val);
762 }
763 if (dfx_bus_pci) {
764 struct pci_dev *pdev = to_pci_dev(bdev);
765
766
767
768 dev->irq = pdev->irq;
769
770
771
772 pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &val);
773 if (val < PFI_K_LAT_TIMER_MIN) {
774 val = PFI_K_LAT_TIMER_DEF;
775 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, val);
776 }
777
778
779 val = PFI_MODE_M_PDQ_INT_ENB | PFI_MODE_M_DMA_ENB;
780 dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL, val);
781 }
782}
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812static void __devexit dfx_bus_uninit(struct net_device *dev)
813{
814 DFX_board_t *bp = netdev_priv(dev);
815 struct device *bdev = bp->bus_dev;
816 int dfx_bus_pci = DFX_BUS_PCI(bdev);
817 int dfx_bus_eisa = DFX_BUS_EISA(bdev);
818 u8 val;
819
820 DBG_printk("In dfx_bus_uninit...\n");
821
822
823
824 if (dfx_bus_eisa) {
825 unsigned long base_addr = to_eisa_device(bdev)->base_addr;
826
827
828 val = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
829 val &= ~PI_CONFIG_STAT_0_M_INT_ENB;
830 outb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0, val);
831 }
832 if (dfx_bus_pci) {
833
834 dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL, 0);
835 }
836}
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869static void __devinit dfx_bus_config_check(DFX_board_t *bp)
870{
871 struct device __maybe_unused *bdev = bp->bus_dev;
872 int dfx_bus_eisa = DFX_BUS_EISA(bdev);
873 int status;
874 u32 host_data;
875
876 DBG_printk("In dfx_bus_config_check...\n");
877
878
879
880 if (dfx_bus_eisa) {
881
882
883
884
885
886
887
888 if (to_eisa_device(bdev)->id.driver_data == DEFEA_PROD_ID_2) {
889
890
891
892
893 status = dfx_hw_port_ctrl_req(bp,
894 PI_PCTRL_M_SUB_CMD,
895 PI_SUB_CMD_K_PDQ_REV_GET,
896 0,
897 &host_data);
898 if ((status != DFX_K_SUCCESS) || (host_data == 2))
899 {
900
901
902
903
904
905
906
907
908 switch (bp->burst_size)
909 {
910 case PI_PDATA_B_DMA_BURST_SIZE_32:
911 case PI_PDATA_B_DMA_BURST_SIZE_16:
912 bp->burst_size = PI_PDATA_B_DMA_BURST_SIZE_8;
913 break;
914
915 default:
916 break;
917 }
918
919
920
921 bp->full_duplex_enb = PI_SNMP_K_FALSE;
922 }
923 }
924 }
925 }
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965static int __devinit dfx_driver_init(struct net_device *dev,
966 const char *print_name,
967 resource_size_t bar_start)
968{
969 DFX_board_t *bp = netdev_priv(dev);
970 struct device *bdev = bp->bus_dev;
971 int dfx_bus_pci = DFX_BUS_PCI(bdev);
972 int dfx_bus_eisa = DFX_BUS_EISA(bdev);
973 int dfx_bus_tc = DFX_BUS_TC(bdev);
974 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
975 int alloc_size;
976 char *top_v, *curr_v;
977 dma_addr_t top_p, curr_p;
978 u32 data;
979 __le32 le32;
980 char *board_name = NULL;
981
982 DBG_printk("In dfx_driver_init...\n");
983
984
985
986 dfx_bus_init(dev);
987
988
989
990
991
992
993
994
995
996
997 bp->full_duplex_enb = PI_SNMP_K_FALSE;
998 bp->req_ttrt = 8 * 12500;
999 bp->burst_size = PI_PDATA_B_DMA_BURST_SIZE_DEF;
1000 bp->rcv_bufs_to_post = RCV_BUFS_DEF;
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011 dfx_bus_config_check(bp);
1012
1013
1014
1015 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS);
1016
1017
1018
1019 (void) dfx_hw_dma_uninit(bp, PI_PDATA_A_RESET_M_SKIP_ST);
1020
1021
1022
1023 if (dfx_hw_port_ctrl_req(bp, PI_PCTRL_M_MLA, PI_PDATA_A_MLA_K_LO, 0,
1024 &data) != DFX_K_SUCCESS) {
1025 printk("%s: Could not read adapter factory MAC address!\n",
1026 print_name);
1027 return(DFX_K_FAILURE);
1028 }
1029 le32 = cpu_to_le32(data);
1030 memcpy(&bp->factory_mac_addr[0], &le32, sizeof(u32));
1031
1032 if (dfx_hw_port_ctrl_req(bp, PI_PCTRL_M_MLA, PI_PDATA_A_MLA_K_HI, 0,
1033 &data) != DFX_K_SUCCESS) {
1034 printk("%s: Could not read adapter factory MAC address!\n",
1035 print_name);
1036 return(DFX_K_FAILURE);
1037 }
1038 le32 = cpu_to_le32(data);
1039 memcpy(&bp->factory_mac_addr[4], &le32, sizeof(u16));
1040
1041
1042
1043
1044
1045
1046
1047
1048 memcpy(dev->dev_addr, bp->factory_mac_addr, FDDI_K_ALEN);
1049 if (dfx_bus_tc)
1050 board_name = "DEFTA";
1051 if (dfx_bus_eisa)
1052 board_name = "DEFEA";
1053 if (dfx_bus_pci)
1054 board_name = "DEFPA";
1055 pr_info("%s: %s at %saddr = 0x%llx, IRQ = %d, "
1056 "Hardware addr = %02X-%02X-%02X-%02X-%02X-%02X\n",
1057 print_name, board_name, dfx_use_mmio ? "" : "I/O ",
1058 (long long)bar_start, dev->irq,
1059 dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
1060 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
1061
1062
1063
1064
1065
1066
1067 alloc_size = sizeof(PI_DESCR_BLOCK) +
1068 PI_CMD_REQ_K_SIZE_MAX +
1069 PI_CMD_RSP_K_SIZE_MAX +
1070#ifndef DYNAMIC_BUFFERS
1071 (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX) +
1072#endif
1073 sizeof(PI_CONSUMER_BLOCK) +
1074 (PI_ALIGN_K_DESC_BLK - 1);
1075 bp->kmalloced = top_v = dma_alloc_coherent(bp->bus_dev, alloc_size,
1076 &bp->kmalloced_dma,
1077 GFP_ATOMIC);
1078 if (top_v == NULL) {
1079 printk("%s: Could not allocate memory for host buffers "
1080 "and structures!\n", print_name);
1081 return(DFX_K_FAILURE);
1082 }
1083 memset(top_v, 0, alloc_size);
1084 top_p = bp->kmalloced_dma;
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098 curr_p = ALIGN(top_p, PI_ALIGN_K_DESC_BLK);
1099 curr_v = top_v + (curr_p - top_p);
1100
1101
1102
1103 bp->descr_block_virt = (PI_DESCR_BLOCK *) curr_v;
1104 bp->descr_block_phys = curr_p;
1105 curr_v += sizeof(PI_DESCR_BLOCK);
1106 curr_p += sizeof(PI_DESCR_BLOCK);
1107
1108
1109
1110 bp->cmd_req_virt = (PI_DMA_CMD_REQ *) curr_v;
1111 bp->cmd_req_phys = curr_p;
1112 curr_v += PI_CMD_REQ_K_SIZE_MAX;
1113 curr_p += PI_CMD_REQ_K_SIZE_MAX;
1114
1115
1116
1117 bp->cmd_rsp_virt = (PI_DMA_CMD_RSP *) curr_v;
1118 bp->cmd_rsp_phys = curr_p;
1119 curr_v += PI_CMD_RSP_K_SIZE_MAX;
1120 curr_p += PI_CMD_RSP_K_SIZE_MAX;
1121
1122
1123
1124 bp->rcv_block_virt = curr_v;
1125 bp->rcv_block_phys = curr_p;
1126
1127#ifndef DYNAMIC_BUFFERS
1128 curr_v += (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX);
1129 curr_p += (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX);
1130#endif
1131
1132
1133
1134 bp->cons_block_virt = (PI_CONSUMER_BLOCK *) curr_v;
1135 bp->cons_block_phys = curr_p;
1136
1137
1138
1139 DBG_printk("%s: Descriptor block virt = %0lX, phys = %0X\n",
1140 print_name,
1141 (long)bp->descr_block_virt, bp->descr_block_phys);
1142 DBG_printk("%s: Command Request buffer virt = %0lX, phys = %0X\n",
1143 print_name, (long)bp->cmd_req_virt, bp->cmd_req_phys);
1144 DBG_printk("%s: Command Response buffer virt = %0lX, phys = %0X\n",
1145 print_name, (long)bp->cmd_rsp_virt, bp->cmd_rsp_phys);
1146 DBG_printk("%s: Receive buffer block virt = %0lX, phys = %0X\n",
1147 print_name, (long)bp->rcv_block_virt, bp->rcv_block_phys);
1148 DBG_printk("%s: Consumer block virt = %0lX, phys = %0X\n",
1149 print_name, (long)bp->cons_block_virt, bp->cons_block_phys);
1150
1151 return(DFX_K_SUCCESS);
1152}
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188static int dfx_adap_init(DFX_board_t *bp, int get_buffers)
1189 {
1190 DBG_printk("In dfx_adap_init...\n");
1191
1192
1193
1194 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS);
1195
1196
1197
1198 if (dfx_hw_dma_uninit(bp, bp->reset_type) != DFX_K_SUCCESS)
1199 {
1200 printk("%s: Could not uninitialize/reset adapter!\n", bp->dev->name);
1201 return(DFX_K_FAILURE);
1202 }
1203
1204
1205
1206
1207
1208
1209 dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_0_STATUS, PI_HOST_INT_K_ACK_ALL_TYPE_0);
1210
1211
1212
1213
1214
1215
1216
1217
1218 bp->cmd_req_reg.lword = 0;
1219 bp->cmd_rsp_reg.lword = 0;
1220 bp->rcv_xmt_reg.lword = 0;
1221
1222
1223
1224 memset(bp->cons_block_virt, 0, sizeof(PI_CONSUMER_BLOCK));
1225
1226
1227
1228 if (dfx_hw_port_ctrl_req(bp,
1229 PI_PCTRL_M_SUB_CMD,
1230 PI_SUB_CMD_K_BURST_SIZE_SET,
1231 bp->burst_size,
1232 NULL) != DFX_K_SUCCESS)
1233 {
1234 printk("%s: Could not set adapter burst size!\n", bp->dev->name);
1235 return(DFX_K_FAILURE);
1236 }
1237
1238
1239
1240
1241
1242
1243
1244
1245 if (dfx_hw_port_ctrl_req(bp,
1246 PI_PCTRL_M_CONS_BLOCK,
1247 bp->cons_block_phys,
1248 0,
1249 NULL) != DFX_K_SUCCESS)
1250 {
1251 printk("%s: Could not set consumer block address!\n", bp->dev->name);
1252 return(DFX_K_FAILURE);
1253 }
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265 if (dfx_hw_port_ctrl_req(bp, PI_PCTRL_M_INIT,
1266 (u32)(bp->descr_block_phys |
1267 PI_PDATA_A_INIT_M_BSWAP_INIT),
1268 0, NULL) != DFX_K_SUCCESS) {
1269 printk("%s: Could not set descriptor block address!\n",
1270 bp->dev->name);
1271 return DFX_K_FAILURE;
1272 }
1273
1274
1275
1276 bp->cmd_req_virt->cmd_type = PI_CMD_K_CHARS_SET;
1277 bp->cmd_req_virt->char_set.item[0].item_code = PI_ITEM_K_FLUSH_TIME;
1278 bp->cmd_req_virt->char_set.item[0].value = 3;
1279 bp->cmd_req_virt->char_set.item[0].item_index = 0;
1280 bp->cmd_req_virt->char_set.item[1].item_code = PI_ITEM_K_EOL;
1281 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
1282 {
1283 printk("%s: DMA command request failed!\n", bp->dev->name);
1284 return(DFX_K_FAILURE);
1285 }
1286
1287
1288
1289 bp->cmd_req_virt->cmd_type = PI_CMD_K_SNMP_SET;
1290 bp->cmd_req_virt->snmp_set.item[0].item_code = PI_ITEM_K_FDX_ENB_DIS;
1291 bp->cmd_req_virt->snmp_set.item[0].value = bp->full_duplex_enb;
1292 bp->cmd_req_virt->snmp_set.item[0].item_index = 0;
1293 bp->cmd_req_virt->snmp_set.item[1].item_code = PI_ITEM_K_MAC_T_REQ;
1294 bp->cmd_req_virt->snmp_set.item[1].value = bp->req_ttrt;
1295 bp->cmd_req_virt->snmp_set.item[1].item_index = 0;
1296 bp->cmd_req_virt->snmp_set.item[2].item_code = PI_ITEM_K_EOL;
1297 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
1298 {
1299 printk("%s: DMA command request failed!\n", bp->dev->name);
1300 return(DFX_K_FAILURE);
1301 }
1302
1303
1304
1305 if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS)
1306 {
1307 printk("%s: Adapter CAM update failed!\n", bp->dev->name);
1308 return(DFX_K_FAILURE);
1309 }
1310
1311
1312
1313 if (dfx_ctl_update_filters(bp) != DFX_K_SUCCESS)
1314 {
1315 printk("%s: Adapter filters update failed!\n", bp->dev->name);
1316 return(DFX_K_FAILURE);
1317 }
1318
1319
1320
1321
1322
1323
1324 if (get_buffers)
1325 dfx_rcv_flush(bp);
1326
1327
1328
1329 if (dfx_rcv_init(bp, get_buffers))
1330 {
1331 printk("%s: Receive buffer allocation failed\n", bp->dev->name);
1332 if (get_buffers)
1333 dfx_rcv_flush(bp);
1334 return(DFX_K_FAILURE);
1335 }
1336
1337
1338
1339 bp->cmd_req_virt->cmd_type = PI_CMD_K_START;
1340 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
1341 {
1342 printk("%s: Start command failed\n", bp->dev->name);
1343 if (get_buffers)
1344 dfx_rcv_flush(bp);
1345 return(DFX_K_FAILURE);
1346 }
1347
1348
1349
1350 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_ENABLE_DEF_INTS);
1351 return(DFX_K_SUCCESS);
1352 }
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385static int dfx_open(struct net_device *dev)
1386{
1387 DFX_board_t *bp = netdev_priv(dev);
1388 int ret;
1389
1390 DBG_printk("In dfx_open...\n");
1391
1392
1393
1394 ret = request_irq(dev->irq, dfx_interrupt, IRQF_SHARED, dev->name,
1395 dev);
1396 if (ret) {
1397 printk(KERN_ERR "%s: Requested IRQ %d is busy\n", dev->name, dev->irq);
1398 return ret;
1399 }
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412 memcpy(dev->dev_addr, bp->factory_mac_addr, FDDI_K_ALEN);
1413
1414
1415
1416 memset(bp->uc_table, 0, sizeof(bp->uc_table));
1417 memset(bp->mc_table, 0, sizeof(bp->mc_table));
1418 bp->uc_count = 0;
1419 bp->mc_count = 0;
1420
1421
1422
1423 bp->ind_group_prom = PI_FSTATE_K_BLOCK;
1424 bp->group_prom = PI_FSTATE_K_BLOCK;
1425
1426 spin_lock_init(&bp->lock);
1427
1428
1429
1430 bp->reset_type = PI_PDATA_A_RESET_M_SKIP_ST;
1431 if (dfx_adap_init(bp, 1) != DFX_K_SUCCESS)
1432 {
1433 printk(KERN_ERR "%s: Adapter open failed!\n", dev->name);
1434 free_irq(dev->irq, dev);
1435 return -EAGAIN;
1436 }
1437
1438
1439 netif_start_queue(dev);
1440 return(0);
1441}
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476static int dfx_close(struct net_device *dev)
1477{
1478 DFX_board_t *bp = netdev_priv(dev);
1479
1480 DBG_printk("In dfx_close...\n");
1481
1482
1483
1484 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS);
1485
1486
1487
1488 (void) dfx_hw_dma_uninit(bp, PI_PDATA_A_RESET_M_SKIP_ST);
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499 dfx_xmt_flush(bp);
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512 bp->cmd_req_reg.lword = 0;
1513 bp->cmd_rsp_reg.lword = 0;
1514 bp->rcv_xmt_reg.lword = 0;
1515
1516
1517
1518 memset(bp->cons_block_virt, 0, sizeof(PI_CONSUMER_BLOCK));
1519
1520
1521
1522 dfx_rcv_flush(bp);
1523
1524
1525
1526 netif_stop_queue(dev);
1527
1528
1529
1530 free_irq(dev->irq, dev);
1531
1532 return(0);
1533}
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563static void dfx_int_pr_halt_id(DFX_board_t *bp)
1564 {
1565 PI_UINT32 port_status;
1566 PI_UINT32 halt_id;
1567
1568
1569
1570 dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &port_status);
1571
1572
1573
1574 halt_id = (port_status & PI_PSTATUS_M_HALT_ID) >> PI_PSTATUS_V_HALT_ID;
1575 switch (halt_id)
1576 {
1577 case PI_HALT_ID_K_SELFTEST_TIMEOUT:
1578 printk("%s: Halt ID: Selftest Timeout\n", bp->dev->name);
1579 break;
1580
1581 case PI_HALT_ID_K_PARITY_ERROR:
1582 printk("%s: Halt ID: Host Bus Parity Error\n", bp->dev->name);
1583 break;
1584
1585 case PI_HALT_ID_K_HOST_DIR_HALT:
1586 printk("%s: Halt ID: Host-Directed Halt\n", bp->dev->name);
1587 break;
1588
1589 case PI_HALT_ID_K_SW_FAULT:
1590 printk("%s: Halt ID: Adapter Software Fault\n", bp->dev->name);
1591 break;
1592
1593 case PI_HALT_ID_K_HW_FAULT:
1594 printk("%s: Halt ID: Adapter Hardware Fault\n", bp->dev->name);
1595 break;
1596
1597 case PI_HALT_ID_K_PC_TRACE:
1598 printk("%s: Halt ID: FDDI Network PC Trace Path Test\n", bp->dev->name);
1599 break;
1600
1601 case PI_HALT_ID_K_DMA_ERROR:
1602 printk("%s: Halt ID: Adapter DMA Error\n", bp->dev->name);
1603 break;
1604
1605 case PI_HALT_ID_K_IMAGE_CRC_ERROR:
1606 printk("%s: Halt ID: Firmware Image CRC Error\n", bp->dev->name);
1607 break;
1608
1609 case PI_HALT_ID_K_BUS_EXCEPTION:
1610 printk("%s: Halt ID: 68000 Bus Exception\n", bp->dev->name);
1611 break;
1612
1613 default:
1614 printk("%s: Halt ID: Unknown (code = %X)\n", bp->dev->name, halt_id);
1615 break;
1616 }
1617 }
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667static void dfx_int_type_0_process(DFX_board_t *bp)
1668
1669 {
1670 PI_UINT32 type_0_status;
1671 PI_UINT32 state;
1672
1673
1674
1675
1676
1677
1678
1679 dfx_port_read_long(bp, PI_PDQ_K_REG_TYPE_0_STATUS, &type_0_status);
1680 dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_0_STATUS, type_0_status);
1681
1682
1683
1684 if (type_0_status & (PI_TYPE_0_STAT_M_NXM |
1685 PI_TYPE_0_STAT_M_PM_PAR_ERR |
1686 PI_TYPE_0_STAT_M_BUS_PAR_ERR))
1687 {
1688
1689
1690 if (type_0_status & PI_TYPE_0_STAT_M_NXM)
1691 printk("%s: Non-Existent Memory Access Error\n", bp->dev->name);
1692
1693
1694
1695 if (type_0_status & PI_TYPE_0_STAT_M_PM_PAR_ERR)
1696 printk("%s: Packet Memory Parity Error\n", bp->dev->name);
1697
1698
1699
1700 if (type_0_status & PI_TYPE_0_STAT_M_BUS_PAR_ERR)
1701 printk("%s: Host Bus Parity Error\n", bp->dev->name);
1702
1703
1704
1705 bp->link_available = PI_K_FALSE;
1706 bp->reset_type = 0;
1707 printk("%s: Resetting adapter...\n", bp->dev->name);
1708 if (dfx_adap_init(bp, 0) != DFX_K_SUCCESS)
1709 {
1710 printk("%s: Adapter reset failed! Disabling adapter interrupts.\n", bp->dev->name);
1711 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS);
1712 return;
1713 }
1714 printk("%s: Adapter reset successful!\n", bp->dev->name);
1715 return;
1716 }
1717
1718
1719
1720 if (type_0_status & PI_TYPE_0_STAT_M_XMT_FLUSH)
1721 {
1722
1723
1724 bp->link_available = PI_K_FALSE;
1725 dfx_xmt_flush(bp);
1726 (void) dfx_hw_port_ctrl_req(bp,
1727 PI_PCTRL_M_XMT_DATA_FLUSH_DONE,
1728 0,
1729 0,
1730 NULL);
1731 }
1732
1733
1734
1735 if (type_0_status & PI_TYPE_0_STAT_M_STATE_CHANGE)
1736 {
1737
1738
1739 state = dfx_hw_adap_state_rd(bp);
1740 if (state == PI_STATE_K_HALTED)
1741 {
1742
1743
1744
1745
1746
1747
1748 printk("%s: Controller has transitioned to HALTED state!\n", bp->dev->name);
1749 dfx_int_pr_halt_id(bp);
1750
1751
1752
1753 bp->link_available = PI_K_FALSE;
1754 bp->reset_type = 0;
1755 printk("%s: Resetting adapter...\n", bp->dev->name);
1756 if (dfx_adap_init(bp, 0) != DFX_K_SUCCESS)
1757 {
1758 printk("%s: Adapter reset failed! Disabling adapter interrupts.\n", bp->dev->name);
1759 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS);
1760 return;
1761 }
1762 printk("%s: Adapter reset successful!\n", bp->dev->name);
1763 }
1764 else if (state == PI_STATE_K_LINK_AVAIL)
1765 {
1766 bp->link_available = PI_K_TRUE;
1767 }
1768 }
1769 }
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812static void dfx_int_common(struct net_device *dev)
1813{
1814 DFX_board_t *bp = netdev_priv(dev);
1815 PI_UINT32 port_status;
1816
1817
1818
1819 if(dfx_xmt_done(bp))
1820 netif_wake_queue(dev);
1821
1822
1823
1824 dfx_rcv_queue_process(bp);
1825
1826
1827
1828
1829
1830
1831
1832
1833 dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_2_PROD, bp->rcv_xmt_reg.lword);
1834
1835
1836
1837 dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &port_status);
1838
1839
1840
1841 if (port_status & PI_PSTATUS_M_TYPE_0_PENDING)
1842 dfx_int_type_0_process(bp);
1843 }
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882static irqreturn_t dfx_interrupt(int irq, void *dev_id)
1883{
1884 struct net_device *dev = dev_id;
1885 DFX_board_t *bp = netdev_priv(dev);
1886 struct device *bdev = bp->bus_dev;
1887 int dfx_bus_pci = DFX_BUS_PCI(bdev);
1888 int dfx_bus_eisa = DFX_BUS_EISA(bdev);
1889 int dfx_bus_tc = DFX_BUS_TC(bdev);
1890
1891
1892
1893 if (dfx_bus_pci) {
1894 u32 status;
1895
1896 dfx_port_read_long(bp, PFI_K_REG_STATUS, &status);
1897 if (!(status & PFI_STATUS_M_PDQ_INT))
1898 return IRQ_NONE;
1899
1900 spin_lock(&bp->lock);
1901
1902
1903 dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL,
1904 PFI_MODE_M_DMA_ENB);
1905
1906
1907 dfx_int_common(dev);
1908
1909
1910 dfx_port_write_long(bp, PFI_K_REG_STATUS,
1911 PFI_STATUS_M_PDQ_INT);
1912 dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL,
1913 (PFI_MODE_M_PDQ_INT_ENB |
1914 PFI_MODE_M_DMA_ENB));
1915
1916 spin_unlock(&bp->lock);
1917 }
1918 if (dfx_bus_eisa) {
1919 unsigned long base_addr = to_eisa_device(bdev)->base_addr;
1920 u8 status;
1921
1922 status = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
1923 if (!(status & PI_CONFIG_STAT_0_M_PEND))
1924 return IRQ_NONE;
1925
1926 spin_lock(&bp->lock);
1927
1928
1929 status &= ~PI_CONFIG_STAT_0_M_INT_ENB;
1930 outb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0, status);
1931
1932
1933 dfx_int_common(dev);
1934
1935
1936 status = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
1937 status |= PI_CONFIG_STAT_0_M_INT_ENB;
1938 outb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0, status);
1939
1940 spin_unlock(&bp->lock);
1941 }
1942 if (dfx_bus_tc) {
1943 u32 status;
1944
1945 dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &status);
1946 if (!(status & (PI_PSTATUS_M_RCV_DATA_PENDING |
1947 PI_PSTATUS_M_XMT_DATA_PENDING |
1948 PI_PSTATUS_M_SMT_HOST_PENDING |
1949 PI_PSTATUS_M_UNSOL_PENDING |
1950 PI_PSTATUS_M_CMD_RSP_PENDING |
1951 PI_PSTATUS_M_CMD_REQ_PENDING |
1952 PI_PSTATUS_M_TYPE_0_PENDING)))
1953 return IRQ_NONE;
1954
1955 spin_lock(&bp->lock);
1956
1957
1958 dfx_int_common(dev);
1959
1960 spin_unlock(&bp->lock);
1961 }
1962
1963 return IRQ_HANDLED;
1964}
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010static struct net_device_stats *dfx_ctl_get_stats(struct net_device *dev)
2011 {
2012 DFX_board_t *bp = netdev_priv(dev);
2013
2014
2015
2016 bp->stats.gen.rx_packets = bp->rcv_total_frames;
2017 bp->stats.gen.tx_packets = bp->xmt_total_frames;
2018 bp->stats.gen.rx_bytes = bp->rcv_total_bytes;
2019 bp->stats.gen.tx_bytes = bp->xmt_total_bytes;
2020 bp->stats.gen.rx_errors = bp->rcv_crc_errors +
2021 bp->rcv_frame_status_errors +
2022 bp->rcv_length_errors;
2023 bp->stats.gen.tx_errors = bp->xmt_length_errors;
2024 bp->stats.gen.rx_dropped = bp->rcv_discards;
2025 bp->stats.gen.tx_dropped = bp->xmt_discards;
2026 bp->stats.gen.multicast = bp->rcv_multicast_frames;
2027 bp->stats.gen.collisions = 0;
2028
2029
2030
2031 bp->cmd_req_virt->cmd_type = PI_CMD_K_SMT_MIB_GET;
2032 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
2033 return((struct net_device_stats *) &bp->stats);
2034
2035
2036
2037 memcpy(bp->stats.smt_station_id, &bp->cmd_rsp_virt->smt_mib_get.smt_station_id, sizeof(bp->cmd_rsp_virt->smt_mib_get.smt_station_id));
2038 bp->stats.smt_op_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_op_version_id;
2039 bp->stats.smt_hi_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_hi_version_id;
2040 bp->stats.smt_lo_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_lo_version_id;
2041 memcpy(bp->stats.smt_user_data, &bp->cmd_rsp_virt->smt_mib_get.smt_user_data, sizeof(bp->cmd_rsp_virt->smt_mib_get.smt_user_data));
2042 bp->stats.smt_mib_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_mib_version_id;
2043 bp->stats.smt_mac_cts = bp->cmd_rsp_virt->smt_mib_get.smt_mac_ct;
2044 bp->stats.smt_non_master_cts = bp->cmd_rsp_virt->smt_mib_get.smt_non_master_ct;
2045 bp->stats.smt_master_cts = bp->cmd_rsp_virt->smt_mib_get.smt_master_ct;
2046 bp->stats.smt_available_paths = bp->cmd_rsp_virt->smt_mib_get.smt_available_paths;
2047 bp->stats.smt_config_capabilities = bp->cmd_rsp_virt->smt_mib_get.smt_config_capabilities;
2048 bp->stats.smt_config_policy = bp->cmd_rsp_virt->smt_mib_get.smt_config_policy;
2049 bp->stats.smt_connection_policy = bp->cmd_rsp_virt->smt_mib_get.smt_connection_policy;
2050 bp->stats.smt_t_notify = bp->cmd_rsp_virt->smt_mib_get.smt_t_notify;
2051 bp->stats.smt_stat_rpt_policy = bp->cmd_rsp_virt->smt_mib_get.smt_stat_rpt_policy;
2052 bp->stats.smt_trace_max_expiration = bp->cmd_rsp_virt->smt_mib_get.smt_trace_max_expiration;
2053 bp->stats.smt_bypass_present = bp->cmd_rsp_virt->smt_mib_get.smt_bypass_present;
2054 bp->stats.smt_ecm_state = bp->cmd_rsp_virt->smt_mib_get.smt_ecm_state;
2055 bp->stats.smt_cf_state = bp->cmd_rsp_virt->smt_mib_get.smt_cf_state;
2056 bp->stats.smt_remote_disconnect_flag = bp->cmd_rsp_virt->smt_mib_get.smt_remote_disconnect_flag;
2057 bp->stats.smt_station_status = bp->cmd_rsp_virt->smt_mib_get.smt_station_status;
2058 bp->stats.smt_peer_wrap_flag = bp->cmd_rsp_virt->smt_mib_get.smt_peer_wrap_flag;
2059 bp->stats.smt_time_stamp = bp->cmd_rsp_virt->smt_mib_get.smt_msg_time_stamp.ls;
2060 bp->stats.smt_transition_time_stamp = bp->cmd_rsp_virt->smt_mib_get.smt_transition_time_stamp.ls;
2061 bp->stats.mac_frame_status_functions = bp->cmd_rsp_virt->smt_mib_get.mac_frame_status_functions;
2062 bp->stats.mac_t_max_capability = bp->cmd_rsp_virt->smt_mib_get.mac_t_max_capability;
2063 bp->stats.mac_tvx_capability = bp->cmd_rsp_virt->smt_mib_get.mac_tvx_capability;
2064 bp->stats.mac_available_paths = bp->cmd_rsp_virt->smt_mib_get.mac_available_paths;
2065 bp->stats.mac_current_path = bp->cmd_rsp_virt->smt_mib_get.mac_current_path;
2066 memcpy(bp->stats.mac_upstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_upstream_nbr, FDDI_K_ALEN);
2067 memcpy(bp->stats.mac_downstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_downstream_nbr, FDDI_K_ALEN);
2068 memcpy(bp->stats.mac_old_upstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_old_upstream_nbr, FDDI_K_ALEN);
2069 memcpy(bp->stats.mac_old_downstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_old_downstream_nbr, FDDI_K_ALEN);
2070 bp->stats.mac_dup_address_test = bp->cmd_rsp_virt->smt_mib_get.mac_dup_address_test;
2071 bp->stats.mac_requested_paths = bp->cmd_rsp_virt->smt_mib_get.mac_requested_paths;
2072 bp->stats.mac_downstream_port_type = bp->cmd_rsp_virt->smt_mib_get.mac_downstream_port_type;
2073 memcpy(bp->stats.mac_smt_address, &bp->cmd_rsp_virt->smt_mib_get.mac_smt_address, FDDI_K_ALEN);
2074 bp->stats.mac_t_req = bp->cmd_rsp_virt->smt_mib_get.mac_t_req;
2075 bp->stats.mac_t_neg = bp->cmd_rsp_virt->smt_mib_get.mac_t_neg;
2076 bp->stats.mac_t_max = bp->cmd_rsp_virt->smt_mib_get.mac_t_max;
2077 bp->stats.mac_tvx_value = bp->cmd_rsp_virt->smt_mib_get.mac_tvx_value;
2078 bp->stats.mac_frame_error_threshold = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_threshold;
2079 bp->stats.mac_frame_error_ratio = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_ratio;
2080 bp->stats.mac_rmt_state = bp->cmd_rsp_virt->smt_mib_get.mac_rmt_state;
2081 bp->stats.mac_da_flag = bp->cmd_rsp_virt->smt_mib_get.mac_da_flag;
2082 bp->stats.mac_una_da_flag = bp->cmd_rsp_virt->smt_mib_get.mac_unda_flag;
2083 bp->stats.mac_frame_error_flag = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_flag;
2084 bp->stats.mac_ma_unitdata_available = bp->cmd_rsp_virt->smt_mib_get.mac_ma_unitdata_available;
2085 bp->stats.mac_hardware_present = bp->cmd_rsp_virt->smt_mib_get.mac_hardware_present;
2086 bp->stats.mac_ma_unitdata_enable = bp->cmd_rsp_virt->smt_mib_get.mac_ma_unitdata_enable;
2087 bp->stats.path_tvx_lower_bound = bp->cmd_rsp_virt->smt_mib_get.path_tvx_lower_bound;
2088 bp->stats.path_t_max_lower_bound = bp->cmd_rsp_virt->smt_mib_get.path_t_max_lower_bound;
2089 bp->stats.path_max_t_req = bp->cmd_rsp_virt->smt_mib_get.path_max_t_req;
2090 memcpy(bp->stats.path_configuration, &bp->cmd_rsp_virt->smt_mib_get.path_configuration, sizeof(bp->cmd_rsp_virt->smt_mib_get.path_configuration));
2091 bp->stats.port_my_type[0] = bp->cmd_rsp_virt->smt_mib_get.port_my_type[0];
2092 bp->stats.port_my_type[1] = bp->cmd_rsp_virt->smt_mib_get.port_my_type[1];
2093 bp->stats.port_neighbor_type[0] = bp->cmd_rsp_virt->smt_mib_get.port_neighbor_type[0];
2094 bp->stats.port_neighbor_type[1] = bp->cmd_rsp_virt->smt_mib_get.port_neighbor_type[1];
2095 bp->stats.port_connection_policies[0] = bp->cmd_rsp_virt->smt_mib_get.port_connection_policies[0];
2096 bp->stats.port_connection_policies[1] = bp->cmd_rsp_virt->smt_mib_get.port_connection_policies[1];
2097 bp->stats.port_mac_indicated[0] = bp->cmd_rsp_virt->smt_mib_get.port_mac_indicated[0];
2098 bp->stats.port_mac_indicated[1] = bp->cmd_rsp_virt->smt_mib_get.port_mac_indicated[1];
2099 bp->stats.port_current_path[0] = bp->cmd_rsp_virt->smt_mib_get.port_current_path[0];
2100 bp->stats.port_current_path[1] = bp->cmd_rsp_virt->smt_mib_get.port_current_path[1];
2101 memcpy(&bp->stats.port_requested_paths[0*3], &bp->cmd_rsp_virt->smt_mib_get.port_requested_paths[0], 3);
2102 memcpy(&bp->stats.port_requested_paths[1*3], &bp->cmd_rsp_virt->smt_mib_get.port_requested_paths[1], 3);
2103 bp->stats.port_mac_placement[0] = bp->cmd_rsp_virt->smt_mib_get.port_mac_placement[0];
2104 bp->stats.port_mac_placement[1] = bp->cmd_rsp_virt->smt_mib_get.port_mac_placement[1];
2105 bp->stats.port_available_paths[0] = bp->cmd_rsp_virt->smt_mib_get.port_available_paths[0];
2106 bp->stats.port_available_paths[1] = bp->cmd_rsp_virt->smt_mib_get.port_available_paths[1];
2107 bp->stats.port_pmd_class[0] = bp->cmd_rsp_virt->smt_mib_get.port_pmd_class[0];
2108 bp->stats.port_pmd_class[1] = bp->cmd_rsp_virt->smt_mib_get.port_pmd_class[1];
2109 bp->stats.port_connection_capabilities[0] = bp->cmd_rsp_virt->smt_mib_get.port_connection_capabilities[0];
2110 bp->stats.port_connection_capabilities[1] = bp->cmd_rsp_virt->smt_mib_get.port_connection_capabilities[1];
2111 bp->stats.port_bs_flag[0] = bp->cmd_rsp_virt->smt_mib_get.port_bs_flag[0];
2112 bp->stats.port_bs_flag[1] = bp->cmd_rsp_virt->smt_mib_get.port_bs_flag[1];
2113 bp->stats.port_ler_estimate[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_estimate[0];
2114 bp->stats.port_ler_estimate[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_estimate[1];
2115 bp->stats.port_ler_cutoff[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_cutoff[0];
2116 bp->stats.port_ler_cutoff[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_cutoff[1];
2117 bp->stats.port_ler_alarm[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_alarm[0];
2118 bp->stats.port_ler_alarm[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_alarm[1];
2119 bp->stats.port_connect_state[0] = bp->cmd_rsp_virt->smt_mib_get.port_connect_state[0];
2120 bp->stats.port_connect_state[1] = bp->cmd_rsp_virt->smt_mib_get.port_connect_state[1];
2121 bp->stats.port_pcm_state[0] = bp->cmd_rsp_virt->smt_mib_get.port_pcm_state[0];
2122 bp->stats.port_pcm_state[1] = bp->cmd_rsp_virt->smt_mib_get.port_pcm_state[1];
2123 bp->stats.port_pc_withhold[0] = bp->cmd_rsp_virt->smt_mib_get.port_pc_withhold[0];
2124 bp->stats.port_pc_withhold[1] = bp->cmd_rsp_virt->smt_mib_get.port_pc_withhold[1];
2125 bp->stats.port_ler_flag[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_flag[0];
2126 bp->stats.port_ler_flag[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_flag[1];
2127 bp->stats.port_hardware_present[0] = bp->cmd_rsp_virt->smt_mib_get.port_hardware_present[0];
2128 bp->stats.port_hardware_present[1] = bp->cmd_rsp_virt->smt_mib_get.port_hardware_present[1];
2129
2130
2131
2132 bp->cmd_req_virt->cmd_type = PI_CMD_K_CNTRS_GET;
2133 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
2134 return((struct net_device_stats *) &bp->stats);
2135
2136
2137
2138 bp->stats.mac_frame_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.frame_cnt.ls;
2139 bp->stats.mac_copied_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.copied_cnt.ls;
2140 bp->stats.mac_transmit_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.transmit_cnt.ls;
2141 bp->stats.mac_error_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.error_cnt.ls;
2142 bp->stats.mac_lost_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.lost_cnt.ls;
2143 bp->stats.port_lct_fail_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.lct_rejects[0].ls;
2144 bp->stats.port_lct_fail_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.lct_rejects[1].ls;
2145 bp->stats.port_lem_reject_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.lem_rejects[0].ls;
2146 bp->stats.port_lem_reject_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.lem_rejects[1].ls;
2147 bp->stats.port_lem_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[0].ls;
2148 bp->stats.port_lem_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[1].ls;
2149
2150 return((struct net_device_stats *) &bp->stats);
2151 }
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197static void dfx_ctl_set_multicast_list(struct net_device *dev)
2198{
2199 DFX_board_t *bp = netdev_priv(dev);
2200 int i;
2201 struct dev_mc_list *dmi;
2202
2203
2204
2205 if (dev->flags & IFF_PROMISC)
2206 bp->ind_group_prom = PI_FSTATE_K_PASS;
2207
2208
2209
2210 else
2211 {
2212 bp->ind_group_prom = PI_FSTATE_K_BLOCK;
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233 if (dev->mc_count > (PI_CMD_ADDR_FILTER_K_SIZE - bp->uc_count))
2234 {
2235 bp->group_prom = PI_FSTATE_K_PASS;
2236 bp->mc_count = 0;
2237 }
2238 else
2239 {
2240 bp->group_prom = PI_FSTATE_K_BLOCK;
2241 bp->mc_count = dev->mc_count;
2242 }
2243
2244
2245
2246 dmi = dev->mc_list;
2247 for (i=0; i < bp->mc_count; i++)
2248 {
2249 memcpy(&bp->mc_table[i*FDDI_K_ALEN], dmi->dmi_addr, FDDI_K_ALEN);
2250 dmi = dmi->next;
2251 }
2252 if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS)
2253 {
2254 DBG_printk("%s: Could not update multicast address table!\n", dev->name);
2255 }
2256 else
2257 {
2258 DBG_printk("%s: Multicast address table updated! Added %d addresses.\n", dev->name, bp->mc_count);
2259 }
2260 }
2261
2262
2263
2264 if (dfx_ctl_update_filters(bp) != DFX_K_SUCCESS)
2265 {
2266 DBG_printk("%s: Could not update adapter filters!\n", dev->name);
2267 }
2268 else
2269 {
2270 DBG_printk("%s: Adapter filters updated!\n", dev->name);
2271 }
2272 }
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311static int dfx_ctl_set_mac_address(struct net_device *dev, void *addr)
2312 {
2313 struct sockaddr *p_sockaddr = (struct sockaddr *)addr;
2314 DFX_board_t *bp = netdev_priv(dev);
2315
2316
2317
2318 memcpy(dev->dev_addr, p_sockaddr->sa_data, FDDI_K_ALEN);
2319 memcpy(&bp->uc_table[0], p_sockaddr->sa_data, FDDI_K_ALEN);
2320 bp->uc_count = 1;
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334 if ((bp->uc_count + bp->mc_count) > PI_CMD_ADDR_FILTER_K_SIZE)
2335 {
2336 bp->group_prom = PI_FSTATE_K_PASS;
2337 bp->mc_count = 0;
2338
2339
2340
2341 if (dfx_ctl_update_filters(bp) != DFX_K_SUCCESS)
2342 {
2343 DBG_printk("%s: Could not update adapter filters!\n", dev->name);
2344 }
2345 else
2346 {
2347 DBG_printk("%s: Adapter filters updated!\n", dev->name);
2348 }
2349 }
2350
2351
2352
2353 if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS)
2354 {
2355 DBG_printk("%s: Could not set new MAC address!\n", dev->name);
2356 }
2357 else
2358 {
2359 DBG_printk("%s: Adapter CAM updated with new MAC address\n", dev->name);
2360 }
2361 return(0);
2362 }
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398static int dfx_ctl_update_cam(DFX_board_t *bp)
2399 {
2400 int i;
2401 PI_LAN_ADDR *p_addr;
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416 memset(bp->cmd_req_virt, 0, PI_CMD_REQ_K_SIZE_MAX);
2417 bp->cmd_req_virt->cmd_type = PI_CMD_K_ADDR_FILTER_SET;
2418 p_addr = &bp->cmd_req_virt->addr_filter_set.entry[0];
2419
2420
2421
2422 for (i=0; i < (int)bp->uc_count; i++)
2423 {
2424 if (i < PI_CMD_ADDR_FILTER_K_SIZE)
2425 {
2426 memcpy(p_addr, &bp->uc_table[i*FDDI_K_ALEN], FDDI_K_ALEN);
2427 p_addr++;
2428 }
2429 }
2430
2431
2432
2433 for (i=0; i < (int)bp->mc_count; i++)
2434 {
2435 if ((i + bp->uc_count) < PI_CMD_ADDR_FILTER_K_SIZE)
2436 {
2437 memcpy(p_addr, &bp->mc_table[i*FDDI_K_ALEN], FDDI_K_ALEN);
2438 p_addr++;
2439 }
2440 }
2441
2442
2443
2444 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
2445 return(DFX_K_FAILURE);
2446 return(DFX_K_SUCCESS);
2447 }
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481static int dfx_ctl_update_filters(DFX_board_t *bp)
2482 {
2483 int i = 0;
2484
2485
2486
2487 bp->cmd_req_virt->cmd_type = PI_CMD_K_FILTERS_SET;
2488
2489
2490
2491 bp->cmd_req_virt->filter_set.item[i].item_code = PI_ITEM_K_BROADCAST;
2492 bp->cmd_req_virt->filter_set.item[i++].value = PI_FSTATE_K_PASS;
2493
2494
2495
2496 bp->cmd_req_virt->filter_set.item[i].item_code = PI_ITEM_K_IND_GROUP_PROM;
2497 bp->cmd_req_virt->filter_set.item[i++].value = bp->ind_group_prom;
2498
2499
2500
2501 bp->cmd_req_virt->filter_set.item[i].item_code = PI_ITEM_K_GROUP_PROM;
2502 bp->cmd_req_virt->filter_set.item[i++].value = bp->group_prom;
2503
2504
2505
2506 bp->cmd_req_virt->filter_set.item[i].item_code = PI_ITEM_K_EOL;
2507
2508
2509
2510 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
2511 return(DFX_K_FAILURE);
2512 return(DFX_K_SUCCESS);
2513 }
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556static int dfx_hw_dma_cmd_req(DFX_board_t *bp)
2557 {
2558 int status;
2559 int timeout_cnt;
2560
2561
2562
2563 status = dfx_hw_adap_state_rd(bp);
2564 if ((status == PI_STATE_K_RESET) ||
2565 (status == PI_STATE_K_HALTED) ||
2566 (status == PI_STATE_K_DMA_UNAVAIL) ||
2567 (status == PI_STATE_K_UPGRADE))
2568 return(DFX_K_OUTSTATE);
2569
2570
2571
2572 bp->descr_block_virt->cmd_rsp[bp->cmd_rsp_reg.index.prod].long_0 = (u32) (PI_RCV_DESCR_M_SOP |
2573 ((PI_CMD_RSP_K_SIZE_MAX / PI_ALIGN_K_CMD_RSP_BUFF) << PI_RCV_DESCR_V_SEG_LEN));
2574 bp->descr_block_virt->cmd_rsp[bp->cmd_rsp_reg.index.prod].long_1 = bp->cmd_rsp_phys;
2575
2576
2577
2578 bp->cmd_rsp_reg.index.prod += 1;
2579 bp->cmd_rsp_reg.index.prod &= PI_CMD_RSP_K_NUM_ENTRIES-1;
2580 dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_RSP_PROD, bp->cmd_rsp_reg.lword);
2581
2582
2583
2584 bp->descr_block_virt->cmd_req[bp->cmd_req_reg.index.prod].long_0 = (u32) (PI_XMT_DESCR_M_SOP |
2585 PI_XMT_DESCR_M_EOP | (PI_CMD_REQ_K_SIZE_MAX << PI_XMT_DESCR_V_SEG_LEN));
2586 bp->descr_block_virt->cmd_req[bp->cmd_req_reg.index.prod].long_1 = bp->cmd_req_phys;
2587
2588
2589
2590 bp->cmd_req_reg.index.prod += 1;
2591 bp->cmd_req_reg.index.prod &= PI_CMD_REQ_K_NUM_ENTRIES-1;
2592 dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_REQ_PROD, bp->cmd_req_reg.lword);
2593
2594
2595
2596
2597
2598
2599 for (timeout_cnt = 20000; timeout_cnt > 0; timeout_cnt--)
2600 {
2601 if (bp->cmd_req_reg.index.prod == (u8)(bp->cons_block_virt->cmd_req))
2602 break;
2603 udelay(100);
2604 }
2605 if (timeout_cnt == 0)
2606 return(DFX_K_HW_TIMEOUT);
2607
2608
2609
2610 bp->cmd_req_reg.index.comp += 1;
2611 bp->cmd_req_reg.index.comp &= PI_CMD_REQ_K_NUM_ENTRIES-1;
2612 dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_REQ_PROD, bp->cmd_req_reg.lword);
2613
2614
2615
2616
2617
2618
2619 for (timeout_cnt = 20000; timeout_cnt > 0; timeout_cnt--)
2620 {
2621 if (bp->cmd_rsp_reg.index.prod == (u8)(bp->cons_block_virt->cmd_rsp))
2622 break;
2623 udelay(100);
2624 }
2625 if (timeout_cnt == 0)
2626 return(DFX_K_HW_TIMEOUT);
2627
2628
2629
2630 bp->cmd_rsp_reg.index.comp += 1;
2631 bp->cmd_rsp_reg.index.comp &= PI_CMD_RSP_K_NUM_ENTRIES-1;
2632 dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_RSP_PROD, bp->cmd_rsp_reg.lword);
2633 return(DFX_K_SUCCESS);
2634 }
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670static int dfx_hw_port_ctrl_req(
2671 DFX_board_t *bp,
2672 PI_UINT32 command,
2673 PI_UINT32 data_a,
2674 PI_UINT32 data_b,
2675 PI_UINT32 *host_data
2676 )
2677
2678 {
2679 PI_UINT32 port_cmd;
2680 int timeout_cnt;
2681
2682
2683
2684 port_cmd = (PI_UINT32) (command | PI_PCTRL_M_CMD_ERROR);
2685
2686
2687
2688 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_DATA_A, data_a);
2689 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_DATA_B, data_b);
2690 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_CTRL, port_cmd);
2691
2692
2693
2694 if (command == PI_PCTRL_M_BLAST_FLASH)
2695 timeout_cnt = 600000;
2696 else
2697 timeout_cnt = 20000;
2698
2699 for (; timeout_cnt > 0; timeout_cnt--)
2700 {
2701 dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_CTRL, &port_cmd);
2702 if (!(port_cmd & PI_PCTRL_M_CMD_ERROR))
2703 break;
2704 udelay(100);
2705 }
2706 if (timeout_cnt == 0)
2707 return(DFX_K_HW_TIMEOUT);
2708
2709
2710
2711
2712
2713
2714
2715 if (host_data != NULL)
2716 dfx_port_read_long(bp, PI_PDQ_K_REG_HOST_DATA, host_data);
2717 return(DFX_K_SUCCESS);
2718 }
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754static void dfx_hw_adap_reset(
2755 DFX_board_t *bp,
2756 PI_UINT32 type
2757 )
2758
2759 {
2760
2761
2762 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_DATA_A, type);
2763 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_RESET, PI_RESET_M_ASSERT_RESET);
2764
2765
2766
2767 udelay(20);
2768
2769
2770
2771 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_RESET, 0);
2772 }
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802static int dfx_hw_adap_state_rd(DFX_board_t *bp)
2803 {
2804 PI_UINT32 port_status;
2805
2806 dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &port_status);
2807 return((port_status & PI_PSTATUS_M_STATE) >> PI_PSTATUS_V_STATE);
2808 }
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842static int dfx_hw_dma_uninit(DFX_board_t *bp, PI_UINT32 type)
2843 {
2844 int timeout_cnt;
2845
2846
2847
2848 dfx_hw_adap_reset(bp, type);
2849
2850
2851
2852 for (timeout_cnt = 100000; timeout_cnt > 0; timeout_cnt--)
2853 {
2854 if (dfx_hw_adap_state_rd(bp) == PI_STATE_K_DMA_UNAVAIL)
2855 break;
2856 udelay(100);
2857 }
2858 if (timeout_cnt == 0)
2859 return(DFX_K_HW_TIMEOUT);
2860 return(DFX_K_SUCCESS);
2861 }
2862
2863
2864
2865
2866
2867
2868static void my_skb_align(struct sk_buff *skb, int n)
2869{
2870 unsigned long x = (unsigned long)skb->data;
2871 unsigned long v;
2872
2873 v = ALIGN(x, n);
2874
2875 skb_reserve(skb, v - x);
2876}
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914static int dfx_rcv_init(DFX_board_t *bp, int get_buffers)
2915 {
2916 int i, j;
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936 if (get_buffers) {
2937#ifdef DYNAMIC_BUFFERS
2938 for (i = 0; i < (int)(bp->rcv_bufs_to_post); i++)
2939 for (j = 0; (i + j) < (int)PI_RCV_DATA_K_NUM_ENTRIES; j += bp->rcv_bufs_to_post)
2940 {
2941 struct sk_buff *newskb = __netdev_alloc_skb(bp->dev, NEW_SKB_SIZE, GFP_NOIO);
2942 if (!newskb)
2943 return -ENOMEM;
2944 bp->descr_block_virt->rcv_data[i+j].long_0 = (u32) (PI_RCV_DESCR_M_SOP |
2945 ((PI_RCV_DATA_K_SIZE_MAX / PI_ALIGN_K_RCV_DATA_BUFF) << PI_RCV_DESCR_V_SEG_LEN));
2946
2947
2948
2949
2950
2951 my_skb_align(newskb, 128);
2952 bp->descr_block_virt->rcv_data[i + j].long_1 =
2953 (u32)dma_map_single(bp->bus_dev, newskb->data,
2954 NEW_SKB_SIZE,
2955 DMA_FROM_DEVICE);
2956
2957
2958
2959
2960 bp->p_rcv_buff_va[i+j] = (char *) newskb;
2961 }
2962#else
2963 for (i=0; i < (int)(bp->rcv_bufs_to_post); i++)
2964 for (j=0; (i + j) < (int)PI_RCV_DATA_K_NUM_ENTRIES; j += bp->rcv_bufs_to_post)
2965 {
2966 bp->descr_block_virt->rcv_data[i+j].long_0 = (u32) (PI_RCV_DESCR_M_SOP |
2967 ((PI_RCV_DATA_K_SIZE_MAX / PI_ALIGN_K_RCV_DATA_BUFF) << PI_RCV_DESCR_V_SEG_LEN));
2968 bp->descr_block_virt->rcv_data[i+j].long_1 = (u32) (bp->rcv_block_phys + (i * PI_RCV_DATA_K_SIZE_MAX));
2969 bp->p_rcv_buff_va[i+j] = (char *) (bp->rcv_block_virt + (i * PI_RCV_DATA_K_SIZE_MAX));
2970 }
2971#endif
2972 }
2973
2974
2975
2976 bp->rcv_xmt_reg.index.rcv_prod = bp->rcv_bufs_to_post;
2977 dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_2_PROD, bp->rcv_xmt_reg.lword);
2978 return 0;
2979 }
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008
3009
3010
3011
3012
3013
3014static void dfx_rcv_queue_process(
3015 DFX_board_t *bp
3016 )
3017
3018 {
3019 PI_TYPE_2_CONSUMER *p_type_2_cons;
3020 char *p_buff;
3021 u32 descr, pkt_len;
3022 struct sk_buff *skb;
3023
3024
3025
3026 p_type_2_cons = (PI_TYPE_2_CONSUMER *)(&bp->cons_block_virt->xmt_rcv_data);
3027 while (bp->rcv_xmt_reg.index.rcv_comp != p_type_2_cons->index.rcv_cons)
3028 {
3029
3030
3031 int entry;
3032
3033 entry = bp->rcv_xmt_reg.index.rcv_comp;
3034#ifdef DYNAMIC_BUFFERS
3035 p_buff = (char *) (((struct sk_buff *)bp->p_rcv_buff_va[entry])->data);
3036#else
3037 p_buff = (char *) bp->p_rcv_buff_va[entry];
3038#endif
3039 memcpy(&descr, p_buff + RCV_BUFF_K_DESCR, sizeof(u32));
3040
3041 if (descr & PI_FMC_DESCR_M_RCC_FLUSH)
3042 {
3043 if (descr & PI_FMC_DESCR_M_RCC_CRC)
3044 bp->rcv_crc_errors++;
3045 else
3046 bp->rcv_frame_status_errors++;
3047 }
3048 else
3049 {
3050 int rx_in_place = 0;
3051
3052
3053
3054 pkt_len = (u32)((descr & PI_FMC_DESCR_M_LEN) >> PI_FMC_DESCR_V_LEN);
3055 pkt_len -= 4;
3056 if (!IN_RANGE(pkt_len, FDDI_K_LLC_ZLEN, FDDI_K_LLC_LEN))
3057 bp->rcv_length_errors++;
3058 else{
3059#ifdef DYNAMIC_BUFFERS
3060 if (pkt_len > SKBUFF_RX_COPYBREAK) {
3061 struct sk_buff *newskb;
3062
3063 newskb = dev_alloc_skb(NEW_SKB_SIZE);
3064 if (newskb){
3065 rx_in_place = 1;
3066
3067 my_skb_align(newskb, 128);
3068 skb = (struct sk_buff *)bp->p_rcv_buff_va[entry];
3069 dma_unmap_single(bp->bus_dev,
3070 bp->descr_block_virt->rcv_data[entry].long_1,
3071 NEW_SKB_SIZE,
3072 DMA_FROM_DEVICE);
3073 skb_reserve(skb, RCV_BUFF_K_PADDING);
3074 bp->p_rcv_buff_va[entry] = (char *)newskb;
3075 bp->descr_block_virt->rcv_data[entry].long_1 =
3076 (u32)dma_map_single(bp->bus_dev,
3077 newskb->data,
3078 NEW_SKB_SIZE,
3079 DMA_FROM_DEVICE);
3080 } else
3081 skb = NULL;
3082 } else
3083#endif
3084 skb = dev_alloc_skb(pkt_len+3);
3085 if (skb == NULL)
3086 {
3087 printk("%s: Could not allocate receive buffer. Dropping packet.\n", bp->dev->name);
3088 bp->rcv_discards++;
3089 break;
3090 }
3091 else {
3092#ifndef DYNAMIC_BUFFERS
3093 if (! rx_in_place)
3094#endif
3095 {
3096
3097
3098 skb_copy_to_linear_data(skb,
3099 p_buff + RCV_BUFF_K_PADDING,
3100 pkt_len + 3);
3101 }
3102
3103 skb_reserve(skb,3);
3104 skb_put(skb, pkt_len);
3105 skb->protocol = fddi_type_trans(skb, bp->dev);
3106 bp->rcv_total_bytes += skb->len;
3107 netif_rx(skb);
3108
3109
3110 bp->rcv_total_frames++;
3111 if (*(p_buff + RCV_BUFF_K_DA) & 0x01)
3112 bp->rcv_multicast_frames++;
3113 }
3114 }
3115 }
3116
3117
3118
3119
3120
3121
3122
3123
3124
3125 bp->rcv_xmt_reg.index.rcv_prod += 1;
3126 bp->rcv_xmt_reg.index.rcv_comp += 1;
3127 }
3128 }
3129
3130
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140
3141
3142
3143
3144
3145
3146
3147
3148
3149
3150
3151
3152
3153
3154
3155
3156
3157
3158
3159
3160
3161
3162
3163
3164
3165
3166
3167
3168
3169
3170
3171
3172
3173
3174
3175
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185
3186
3187
3188
3189
3190
3191
3192static netdev_tx_t dfx_xmt_queue_pkt(struct sk_buff *skb,
3193 struct net_device *dev)
3194 {
3195 DFX_board_t *bp = netdev_priv(dev);
3196 u8 prod;
3197 PI_XMT_DESCR *p_xmt_descr;
3198 XMT_DRIVER_DESCR *p_xmt_drv_descr;
3199 unsigned long flags;
3200
3201 netif_stop_queue(dev);
3202
3203
3204
3205
3206
3207
3208
3209
3210
3211
3212 if (!IN_RANGE(skb->len, FDDI_K_LLC_ZLEN, FDDI_K_LLC_LEN))
3213 {
3214 printk("%s: Invalid packet length - %u bytes\n",
3215 dev->name, skb->len);
3216 bp->xmt_length_errors++;
3217 netif_wake_queue(dev);
3218 dev_kfree_skb(skb);
3219 return NETDEV_TX_OK;
3220 }
3221
3222
3223
3224
3225
3226
3227
3228
3229
3230
3231
3232
3233 if (bp->link_available == PI_K_FALSE)
3234 {
3235 if (dfx_hw_adap_state_rd(bp) == PI_STATE_K_LINK_AVAIL)
3236 bp->link_available = PI_K_TRUE;
3237 else
3238 {
3239 bp->xmt_discards++;
3240 dev_kfree_skb(skb);
3241 netif_wake_queue(dev);
3242 return NETDEV_TX_OK;
3243 }
3244 }
3245
3246 spin_lock_irqsave(&bp->lock, flags);
3247
3248
3249
3250 prod = bp->rcv_xmt_reg.index.xmt_prod;
3251 p_xmt_descr = &(bp->descr_block_virt->xmt_data[prod]);
3252
3253
3254
3255
3256
3257
3258
3259
3260
3261
3262
3263
3264 p_xmt_drv_descr = &(bp->xmt_drv_descr_blk[prod++]);
3265
3266
3267
3268 skb_push(skb,3);
3269 skb->data[0] = DFX_PRH0_BYTE;
3270 skb->data[1] = DFX_PRH1_BYTE;
3271 skb->data[2] = DFX_PRH2_BYTE;
3272
3273
3274
3275
3276
3277
3278
3279
3280
3281
3282
3283
3284
3285
3286
3287
3288
3289
3290
3291
3292
3293
3294
3295
3296
3297
3298
3299
3300 p_xmt_descr->long_0 = (u32) (PI_XMT_DESCR_M_SOP | PI_XMT_DESCR_M_EOP | ((skb->len) << PI_XMT_DESCR_V_SEG_LEN));
3301 p_xmt_descr->long_1 = (u32)dma_map_single(bp->bus_dev, skb->data,
3302 skb->len, DMA_TO_DEVICE);
3303
3304
3305
3306
3307
3308
3309
3310
3311
3312
3313
3314
3315 if (prod == bp->rcv_xmt_reg.index.xmt_comp)
3316 {
3317 skb_pull(skb,3);
3318 spin_unlock_irqrestore(&bp->lock, flags);
3319 return NETDEV_TX_BUSY;
3320 }
3321
3322
3323
3324
3325
3326
3327
3328
3329
3330
3331
3332
3333
3334
3335
3336
3337
3338 p_xmt_drv_descr->p_skb = skb;
3339
3340
3341
3342 bp->rcv_xmt_reg.index.xmt_prod = prod;
3343 dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_2_PROD, bp->rcv_xmt_reg.lword);
3344 spin_unlock_irqrestore(&bp->lock, flags);
3345 netif_wake_queue(dev);
3346 return NETDEV_TX_OK;
3347 }
3348
3349
3350
3351
3352
3353
3354
3355
3356
3357
3358
3359
3360
3361
3362
3363
3364
3365
3366
3367
3368
3369
3370
3371
3372
3373
3374
3375
3376
3377
3378
3379
3380
3381
3382static int dfx_xmt_done(DFX_board_t *bp)
3383 {
3384 XMT_DRIVER_DESCR *p_xmt_drv_descr;
3385 PI_TYPE_2_CONSUMER *p_type_2_cons;
3386 u8 comp;
3387 int freed = 0;
3388
3389
3390
3391 p_type_2_cons = (PI_TYPE_2_CONSUMER *)(&bp->cons_block_virt->xmt_rcv_data);
3392 while (bp->rcv_xmt_reg.index.xmt_comp != p_type_2_cons->index.xmt_cons)
3393 {
3394
3395
3396 p_xmt_drv_descr = &(bp->xmt_drv_descr_blk[bp->rcv_xmt_reg.index.xmt_comp]);
3397
3398
3399
3400 bp->xmt_total_frames++;
3401 bp->xmt_total_bytes += p_xmt_drv_descr->p_skb->len;
3402
3403
3404 comp = bp->rcv_xmt_reg.index.xmt_comp;
3405 dma_unmap_single(bp->bus_dev,
3406 bp->descr_block_virt->xmt_data[comp].long_1,
3407 p_xmt_drv_descr->p_skb->len,
3408 DMA_TO_DEVICE);
3409 dev_kfree_skb_irq(p_xmt_drv_descr->p_skb);
3410
3411
3412
3413
3414
3415
3416
3417
3418
3419
3420
3421
3422 bp->rcv_xmt_reg.index.xmt_comp += 1;
3423 freed++;
3424 }
3425 return freed;
3426 }
3427
3428
3429
3430
3431
3432
3433
3434
3435
3436
3437
3438
3439
3440
3441
3442
3443
3444
3445
3446
3447
3448
3449
3450
3451
3452
3453
3454
3455#ifdef DYNAMIC_BUFFERS
3456static void dfx_rcv_flush( DFX_board_t *bp )
3457 {
3458 int i, j;
3459
3460 for (i = 0; i < (int)(bp->rcv_bufs_to_post); i++)
3461 for (j = 0; (i + j) < (int)PI_RCV_DATA_K_NUM_ENTRIES; j += bp->rcv_bufs_to_post)
3462 {
3463 struct sk_buff *skb;
3464 skb = (struct sk_buff *)bp->p_rcv_buff_va[i+j];
3465 if (skb)
3466 dev_kfree_skb(skb);
3467 bp->p_rcv_buff_va[i+j] = NULL;
3468 }
3469
3470 }
3471#else
3472static inline void dfx_rcv_flush( DFX_board_t *bp )
3473{
3474}
3475#endif
3476
3477
3478
3479
3480
3481
3482
3483
3484
3485
3486
3487
3488
3489
3490
3491
3492
3493
3494
3495
3496
3497
3498
3499
3500
3501
3502
3503
3504
3505
3506
3507
3508
3509
3510
3511
3512
3513static void dfx_xmt_flush( DFX_board_t *bp )
3514 {
3515 u32 prod_cons;
3516 XMT_DRIVER_DESCR *p_xmt_drv_descr;
3517 u8 comp;
3518
3519
3520
3521 while (bp->rcv_xmt_reg.index.xmt_comp != bp->rcv_xmt_reg.index.xmt_prod)
3522 {
3523
3524
3525 p_xmt_drv_descr = &(bp->xmt_drv_descr_blk[bp->rcv_xmt_reg.index.xmt_comp]);
3526
3527
3528 comp = bp->rcv_xmt_reg.index.xmt_comp;
3529 dma_unmap_single(bp->bus_dev,
3530 bp->descr_block_virt->xmt_data[comp].long_1,
3531 p_xmt_drv_descr->p_skb->len,
3532 DMA_TO_DEVICE);
3533 dev_kfree_skb(p_xmt_drv_descr->p_skb);
3534
3535
3536
3537 bp->xmt_discards++;
3538
3539
3540
3541
3542
3543
3544
3545
3546
3547
3548
3549
3550 bp->rcv_xmt_reg.index.xmt_comp += 1;
3551 }
3552
3553
3554
3555 prod_cons = (u32)(bp->cons_block_virt->xmt_rcv_data & ~PI_CONS_M_XMT_INDEX);
3556 prod_cons |= (u32)(bp->rcv_xmt_reg.index.xmt_prod << PI_CONS_V_XMT_INDEX);
3557 bp->cons_block_virt->xmt_rcv_data = prod_cons;
3558 }
3559
3560
3561
3562
3563
3564
3565
3566
3567
3568
3569
3570
3571
3572
3573
3574
3575
3576
3577
3578
3579
3580
3581
3582
3583
3584
3585
3586static void __devexit dfx_unregister(struct device *bdev)
3587{
3588 struct net_device *dev = dev_get_drvdata(bdev);
3589 DFX_board_t *bp = netdev_priv(dev);
3590 int dfx_bus_pci = DFX_BUS_PCI(bdev);
3591 int dfx_bus_tc = DFX_BUS_TC(bdev);
3592 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
3593 resource_size_t bar_start = 0;
3594 resource_size_t bar_len = 0;
3595 int alloc_size;
3596
3597 unregister_netdev(dev);
3598
3599 alloc_size = sizeof(PI_DESCR_BLOCK) +
3600 PI_CMD_REQ_K_SIZE_MAX + PI_CMD_RSP_K_SIZE_MAX +
3601#ifndef DYNAMIC_BUFFERS
3602 (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX) +
3603#endif
3604 sizeof(PI_CONSUMER_BLOCK) +
3605 (PI_ALIGN_K_DESC_BLK - 1);
3606 if (bp->kmalloced)
3607 dma_free_coherent(bdev, alloc_size,
3608 bp->kmalloced, bp->kmalloced_dma);
3609
3610 dfx_bus_uninit(dev);
3611
3612 dfx_get_bars(bdev, &bar_start, &bar_len);
3613 if (dfx_use_mmio) {
3614 iounmap(bp->base.mem);
3615 release_mem_region(bar_start, bar_len);
3616 } else
3617 release_region(bar_start, bar_len);
3618
3619 if (dfx_bus_pci)
3620 pci_disable_device(to_pci_dev(bdev));
3621
3622 free_netdev(dev);
3623}
3624
3625
3626static int __devinit __maybe_unused dfx_dev_register(struct device *);
3627static int __devexit __maybe_unused dfx_dev_unregister(struct device *);
3628
3629#ifdef CONFIG_PCI
3630static int __devinit dfx_pci_register(struct pci_dev *,
3631 const struct pci_device_id *);
3632static void __devexit dfx_pci_unregister(struct pci_dev *);
3633
3634static struct pci_device_id dfx_pci_table[] = {
3635 { PCI_DEVICE(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_FDDI) },
3636 { }
3637};
3638MODULE_DEVICE_TABLE(pci, dfx_pci_table);
3639
3640static struct pci_driver dfx_pci_driver = {
3641 .name = "defxx",
3642 .id_table = dfx_pci_table,
3643 .probe = dfx_pci_register,
3644 .remove = __devexit_p(dfx_pci_unregister),
3645};
3646
3647static __devinit int dfx_pci_register(struct pci_dev *pdev,
3648 const struct pci_device_id *ent)
3649{
3650 return dfx_register(&pdev->dev);
3651}
3652
3653static void __devexit dfx_pci_unregister(struct pci_dev *pdev)
3654{
3655 dfx_unregister(&pdev->dev);
3656}
3657#endif
3658
3659#ifdef CONFIG_EISA
3660static struct eisa_device_id dfx_eisa_table[] = {
3661 { "DEC3001", DEFEA_PROD_ID_1 },
3662 { "DEC3002", DEFEA_PROD_ID_2 },
3663 { "DEC3003", DEFEA_PROD_ID_3 },
3664 { "DEC3004", DEFEA_PROD_ID_4 },
3665 { }
3666};
3667MODULE_DEVICE_TABLE(eisa, dfx_eisa_table);
3668
3669static struct eisa_driver dfx_eisa_driver = {
3670 .id_table = dfx_eisa_table,
3671 .driver = {
3672 .name = "defxx",
3673 .bus = &eisa_bus_type,
3674 .probe = dfx_dev_register,
3675 .remove = __devexit_p(dfx_dev_unregister),
3676 },
3677};
3678#endif
3679
3680#ifdef CONFIG_TC
3681static struct tc_device_id const dfx_tc_table[] = {
3682 { "DEC ", "PMAF-FA " },
3683 { "DEC ", "PMAF-FD " },
3684 { "DEC ", "PMAF-FS " },
3685 { "DEC ", "PMAF-FU " },
3686 { }
3687};
3688MODULE_DEVICE_TABLE(tc, dfx_tc_table);
3689
3690static struct tc_driver dfx_tc_driver = {
3691 .id_table = dfx_tc_table,
3692 .driver = {
3693 .name = "defxx",
3694 .bus = &tc_bus_type,
3695 .probe = dfx_dev_register,
3696 .remove = __devexit_p(dfx_dev_unregister),
3697 },
3698};
3699#endif
3700
3701static int __devinit __maybe_unused dfx_dev_register(struct device *dev)
3702{
3703 int status;
3704
3705 status = dfx_register(dev);
3706 if (!status)
3707 get_device(dev);
3708 return status;
3709}
3710
3711static int __devexit __maybe_unused dfx_dev_unregister(struct device *dev)
3712{
3713 put_device(dev);
3714 dfx_unregister(dev);
3715 return 0;
3716}
3717
3718
3719static int __devinit dfx_init(void)
3720{
3721 int status;
3722
3723 status = pci_register_driver(&dfx_pci_driver);
3724 if (!status)
3725 status = eisa_driver_register(&dfx_eisa_driver);
3726 if (!status)
3727 status = tc_register_driver(&dfx_tc_driver);
3728 return status;
3729}
3730
3731static void __devexit dfx_cleanup(void)
3732{
3733 tc_unregister_driver(&dfx_tc_driver);
3734 eisa_driver_unregister(&dfx_eisa_driver);
3735 pci_unregister_driver(&dfx_pci_driver);
3736}
3737
3738module_init(dfx_init);
3739module_exit(dfx_cleanup);
3740MODULE_AUTHOR("Lawrence V. Stefani");
3741MODULE_DESCRIPTION("DEC FDDIcontroller TC/EISA/PCI (DEFTA/DEFEA/DEFPA) driver "
3742 DRV_VERSION " " DRV_RELDATE);
3743MODULE_LICENSE("GPL");
3744