1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203#include <linux/bitops.h>
204#include <linux/compiler.h>
205#include <linux/delay.h>
206#include <linux/dma-mapping.h>
207#include <linux/eisa.h>
208#include <linux/errno.h>
209#include <linux/fddidevice.h>
210#include <linux/interrupt.h>
211#include <linux/ioport.h>
212#include <linux/kernel.h>
213#include <linux/module.h>
214#include <linux/netdevice.h>
215#include <linux/pci.h>
216#include <linux/skbuff.h>
217#include <linux/slab.h>
218#include <linux/string.h>
219#include <linux/tc.h>
220
221#include <asm/byteorder.h>
222#include <asm/io.h>
223
224#include "defxx.h"
225
226
227#define DRV_NAME "defxx"
228#define DRV_VERSION "v1.11"
229#define DRV_RELDATE "2014/07/01"
230
231static char version[] =
232 DRV_NAME ": " DRV_VERSION " " DRV_RELDATE
233 " Lawrence V. Stefani and others\n";
234
235#define DYNAMIC_BUFFERS 1
236
237#define SKBUFF_RX_COPYBREAK 200
238
239
240
241
242#define NEW_SKB_SIZE (PI_RCV_DATA_K_SIZE_MAX+128)
243
244#ifdef CONFIG_EISA
245#define DFX_BUS_EISA(dev) (dev->bus == &eisa_bus_type)
246#else
247#define DFX_BUS_EISA(dev) 0
248#endif
249
250#ifdef CONFIG_TC
251#define DFX_BUS_TC(dev) (dev->bus == &tc_bus_type)
252#else
253#define DFX_BUS_TC(dev) 0
254#endif
255
256#ifdef CONFIG_DEFXX_MMIO
257#define DFX_MMIO 1
258#else
259#define DFX_MMIO 0
260#endif
261
262
263
264static void dfx_bus_init(struct net_device *dev);
265static void dfx_bus_uninit(struct net_device *dev);
266static void dfx_bus_config_check(DFX_board_t *bp);
267
268static int dfx_driver_init(struct net_device *dev,
269 const char *print_name,
270 resource_size_t bar_start);
271static int dfx_adap_init(DFX_board_t *bp, int get_buffers);
272
273static int dfx_open(struct net_device *dev);
274static int dfx_close(struct net_device *dev);
275
276static void dfx_int_pr_halt_id(DFX_board_t *bp);
277static void dfx_int_type_0_process(DFX_board_t *bp);
278static void dfx_int_common(struct net_device *dev);
279static irqreturn_t dfx_interrupt(int irq, void *dev_id);
280
281static struct net_device_stats *dfx_ctl_get_stats(struct net_device *dev);
282static void dfx_ctl_set_multicast_list(struct net_device *dev);
283static int dfx_ctl_set_mac_address(struct net_device *dev, void *addr);
284static int dfx_ctl_update_cam(DFX_board_t *bp);
285static int dfx_ctl_update_filters(DFX_board_t *bp);
286
287static int dfx_hw_dma_cmd_req(DFX_board_t *bp);
288static int dfx_hw_port_ctrl_req(DFX_board_t *bp, PI_UINT32 command, PI_UINT32 data_a, PI_UINT32 data_b, PI_UINT32 *host_data);
289static void dfx_hw_adap_reset(DFX_board_t *bp, PI_UINT32 type);
290static int dfx_hw_adap_state_rd(DFX_board_t *bp);
291static int dfx_hw_dma_uninit(DFX_board_t *bp, PI_UINT32 type);
292
293static int dfx_rcv_init(DFX_board_t *bp, int get_buffers);
294static void dfx_rcv_queue_process(DFX_board_t *bp);
295#ifdef DYNAMIC_BUFFERS
296static void dfx_rcv_flush(DFX_board_t *bp);
297#else
298static inline void dfx_rcv_flush(DFX_board_t *bp) {}
299#endif
300
301static netdev_tx_t dfx_xmt_queue_pkt(struct sk_buff *skb,
302 struct net_device *dev);
303static int dfx_xmt_done(DFX_board_t *bp);
304static void dfx_xmt_flush(DFX_board_t *bp);
305
306
307
308static struct pci_driver dfx_pci_driver;
309static struct eisa_driver dfx_eisa_driver;
310static struct tc_driver dfx_tc_driver;
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363static inline void dfx_writel(DFX_board_t *bp, int offset, u32 data)
364{
365 writel(data, bp->base.mem + offset);
366 mb();
367}
368
369static inline void dfx_outl(DFX_board_t *bp, int offset, u32 data)
370{
371 outl(data, bp->base.port + offset);
372}
373
374static void dfx_port_write_long(DFX_board_t *bp, int offset, u32 data)
375{
376 struct device __maybe_unused *bdev = bp->bus_dev;
377 int dfx_bus_tc = DFX_BUS_TC(bdev);
378 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
379
380 if (dfx_use_mmio)
381 dfx_writel(bp, offset, data);
382 else
383 dfx_outl(bp, offset, data);
384}
385
386
387static inline void dfx_readl(DFX_board_t *bp, int offset, u32 *data)
388{
389 mb();
390 *data = readl(bp->base.mem + offset);
391}
392
393static inline void dfx_inl(DFX_board_t *bp, int offset, u32 *data)
394{
395 *data = inl(bp->base.port + offset);
396}
397
398static void dfx_port_read_long(DFX_board_t *bp, int offset, u32 *data)
399{
400 struct device __maybe_unused *bdev = bp->bus_dev;
401 int dfx_bus_tc = DFX_BUS_TC(bdev);
402 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
403
404 if (dfx_use_mmio)
405 dfx_readl(bp, offset, data);
406 else
407 dfx_inl(bp, offset, data);
408}
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434static void dfx_get_bars(struct device *bdev,
435 resource_size_t *bar_start, resource_size_t *bar_len)
436{
437 int dfx_bus_pci = dev_is_pci(bdev);
438 int dfx_bus_eisa = DFX_BUS_EISA(bdev);
439 int dfx_bus_tc = DFX_BUS_TC(bdev);
440 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
441
442 if (dfx_bus_pci) {
443 int num = dfx_use_mmio ? 0 : 1;
444
445 bar_start[0] = pci_resource_start(to_pci_dev(bdev), num);
446 bar_len[0] = pci_resource_len(to_pci_dev(bdev), num);
447 bar_start[2] = bar_start[1] = 0;
448 bar_len[2] = bar_len[1] = 0;
449 }
450 if (dfx_bus_eisa) {
451 unsigned long base_addr = to_eisa_device(bdev)->base_addr;
452 resource_size_t bar_lo;
453 resource_size_t bar_hi;
454
455 if (dfx_use_mmio) {
456 bar_lo = inb(base_addr + PI_ESIC_K_MEM_ADD_LO_CMP_2);
457 bar_lo <<= 8;
458 bar_lo |= inb(base_addr + PI_ESIC_K_MEM_ADD_LO_CMP_1);
459 bar_lo <<= 8;
460 bar_lo |= inb(base_addr + PI_ESIC_K_MEM_ADD_LO_CMP_0);
461 bar_lo <<= 8;
462 bar_start[0] = bar_lo;
463 bar_hi = inb(base_addr + PI_ESIC_K_MEM_ADD_HI_CMP_2);
464 bar_hi <<= 8;
465 bar_hi |= inb(base_addr + PI_ESIC_K_MEM_ADD_HI_CMP_1);
466 bar_hi <<= 8;
467 bar_hi |= inb(base_addr + PI_ESIC_K_MEM_ADD_HI_CMP_0);
468 bar_hi <<= 8;
469 bar_len[0] = ((bar_hi - bar_lo) | PI_MEM_ADD_MASK_M) +
470 1;
471 } else {
472 bar_start[0] = base_addr;
473 bar_len[0] = PI_ESIC_K_CSR_IO_LEN;
474 }
475 bar_start[1] = base_addr + PI_DEFEA_K_BURST_HOLDOFF;
476 bar_len[1] = PI_ESIC_K_BURST_HOLDOFF_LEN;
477 bar_start[2] = base_addr + PI_ESIC_K_ESIC_CSR;
478 bar_len[2] = PI_ESIC_K_ESIC_CSR_LEN;
479 }
480 if (dfx_bus_tc) {
481 bar_start[0] = to_tc_dev(bdev)->resource.start +
482 PI_TC_K_CSR_OFFSET;
483 bar_len[0] = PI_TC_K_CSR_LEN;
484 bar_start[2] = bar_start[1] = 0;
485 bar_len[2] = bar_len[1] = 0;
486 }
487}
488
489static const struct net_device_ops dfx_netdev_ops = {
490 .ndo_open = dfx_open,
491 .ndo_stop = dfx_close,
492 .ndo_start_xmit = dfx_xmt_queue_pkt,
493 .ndo_get_stats = dfx_ctl_get_stats,
494 .ndo_set_rx_mode = dfx_ctl_set_multicast_list,
495 .ndo_set_mac_address = dfx_ctl_set_mac_address,
496};
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526static int dfx_register(struct device *bdev)
527{
528 static int version_disp;
529 int dfx_bus_pci = dev_is_pci(bdev);
530 int dfx_bus_eisa = DFX_BUS_EISA(bdev);
531 int dfx_bus_tc = DFX_BUS_TC(bdev);
532 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
533 const char *print_name = dev_name(bdev);
534 struct net_device *dev;
535 DFX_board_t *bp;
536 resource_size_t bar_start[3] = {0};
537 resource_size_t bar_len[3] = {0};
538 int alloc_size;
539 struct resource *region;
540 int err = 0;
541
542 if (!version_disp) {
543 version_disp = 1;
544 printk(version);
545 }
546
547 dev = alloc_fddidev(sizeof(*bp));
548 if (!dev) {
549 printk(KERN_ERR "%s: Unable to allocate fddidev, aborting\n",
550 print_name);
551 return -ENOMEM;
552 }
553
554
555 if (dfx_bus_pci) {
556 err = pci_enable_device(to_pci_dev(bdev));
557 if (err) {
558 pr_err("%s: Cannot enable PCI device, aborting\n",
559 print_name);
560 goto err_out;
561 }
562 }
563
564 SET_NETDEV_DEV(dev, bdev);
565
566 bp = netdev_priv(dev);
567 bp->bus_dev = bdev;
568 dev_set_drvdata(bdev, dev);
569
570 dfx_get_bars(bdev, bar_start, bar_len);
571 if (dfx_bus_eisa && dfx_use_mmio && bar_start[0] == 0) {
572 pr_err("%s: Cannot use MMIO, no address set, aborting\n",
573 print_name);
574 pr_err("%s: Run ECU and set adapter's MMIO location\n",
575 print_name);
576 pr_err("%s: Or recompile driver with \"CONFIG_DEFXX_MMIO=n\""
577 "\n", print_name);
578 err = -ENXIO;
579 goto err_out;
580 }
581
582 if (dfx_use_mmio)
583 region = request_mem_region(bar_start[0], bar_len[0],
584 print_name);
585 else
586 region = request_region(bar_start[0], bar_len[0], print_name);
587 if (!region) {
588 pr_err("%s: Cannot reserve %s resource 0x%lx @ 0x%lx, "
589 "aborting\n", dfx_use_mmio ? "MMIO" : "I/O", print_name,
590 (long)bar_len[0], (long)bar_start[0]);
591 err = -EBUSY;
592 goto err_out_disable;
593 }
594 if (bar_start[1] != 0) {
595 region = request_region(bar_start[1], bar_len[1], print_name);
596 if (!region) {
597 pr_err("%s: Cannot reserve I/O resource "
598 "0x%lx @ 0x%lx, aborting\n", print_name,
599 (long)bar_len[1], (long)bar_start[1]);
600 err = -EBUSY;
601 goto err_out_csr_region;
602 }
603 }
604 if (bar_start[2] != 0) {
605 region = request_region(bar_start[2], bar_len[2], print_name);
606 if (!region) {
607 pr_err("%s: Cannot reserve I/O resource "
608 "0x%lx @ 0x%lx, aborting\n", print_name,
609 (long)bar_len[2], (long)bar_start[2]);
610 err = -EBUSY;
611 goto err_out_bh_region;
612 }
613 }
614
615
616 if (dfx_use_mmio) {
617 bp->base.mem = ioremap_nocache(bar_start[0], bar_len[0]);
618 if (!bp->base.mem) {
619 printk(KERN_ERR "%s: Cannot map MMIO\n", print_name);
620 err = -ENOMEM;
621 goto err_out_esic_region;
622 }
623 } else {
624 bp->base.port = bar_start[0];
625 dev->base_addr = bar_start[0];
626 }
627
628
629 dev->netdev_ops = &dfx_netdev_ops;
630
631 if (dfx_bus_pci)
632 pci_set_master(to_pci_dev(bdev));
633
634 if (dfx_driver_init(dev, print_name, bar_start[0]) != DFX_K_SUCCESS) {
635 err = -ENODEV;
636 goto err_out_unmap;
637 }
638
639 err = register_netdev(dev);
640 if (err)
641 goto err_out_kfree;
642
643 printk("%s: registered as %s\n", print_name, dev->name);
644 return 0;
645
646err_out_kfree:
647 alloc_size = sizeof(PI_DESCR_BLOCK) +
648 PI_CMD_REQ_K_SIZE_MAX + PI_CMD_RSP_K_SIZE_MAX +
649#ifndef DYNAMIC_BUFFERS
650 (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX) +
651#endif
652 sizeof(PI_CONSUMER_BLOCK) +
653 (PI_ALIGN_K_DESC_BLK - 1);
654 if (bp->kmalloced)
655 dma_free_coherent(bdev, alloc_size,
656 bp->kmalloced, bp->kmalloced_dma);
657
658err_out_unmap:
659 if (dfx_use_mmio)
660 iounmap(bp->base.mem);
661
662err_out_esic_region:
663 if (bar_start[2] != 0)
664 release_region(bar_start[2], bar_len[2]);
665
666err_out_bh_region:
667 if (bar_start[1] != 0)
668 release_region(bar_start[1], bar_len[1]);
669
670err_out_csr_region:
671 if (dfx_use_mmio)
672 release_mem_region(bar_start[0], bar_len[0]);
673 else
674 release_region(bar_start[0], bar_len[0]);
675
676err_out_disable:
677 if (dfx_bus_pci)
678 pci_disable_device(to_pci_dev(bdev));
679
680err_out:
681 free_netdev(dev);
682 return err;
683}
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717static void dfx_bus_init(struct net_device *dev)
718{
719 DFX_board_t *bp = netdev_priv(dev);
720 struct device *bdev = bp->bus_dev;
721 int dfx_bus_pci = dev_is_pci(bdev);
722 int dfx_bus_eisa = DFX_BUS_EISA(bdev);
723 int dfx_bus_tc = DFX_BUS_TC(bdev);
724 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
725 u8 val;
726
727 DBG_printk("In dfx_bus_init...\n");
728
729
730 bp->dev = dev;
731
732
733
734 if (dfx_bus_tc)
735 dev->irq = to_tc_dev(bdev)->interrupt;
736 if (dfx_bus_eisa) {
737 unsigned long base_addr = to_eisa_device(bdev)->base_addr;
738
739
740 outb(0, base_addr + PI_ESIC_K_SLOT_CNTRL);
741
742
743 val = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
744 val &= PI_CONFIG_STAT_0_M_IRQ;
745 val >>= PI_CONFIG_STAT_0_V_IRQ;
746
747 switch (val) {
748 case PI_CONFIG_STAT_0_IRQ_K_9:
749 dev->irq = 9;
750 break;
751
752 case PI_CONFIG_STAT_0_IRQ_K_10:
753 dev->irq = 10;
754 break;
755
756 case PI_CONFIG_STAT_0_IRQ_K_11:
757 dev->irq = 11;
758 break;
759
760 case PI_CONFIG_STAT_0_IRQ_K_15:
761 dev->irq = 15;
762 break;
763 }
764
765
766
767
768
769
770
771
772
773
774
775
776
777 val = 0;
778 outb(val, base_addr + PI_ESIC_K_IO_ADD_CMP_0_1);
779 val = PI_DEFEA_K_CSR_IO;
780 outb(val, base_addr + PI_ESIC_K_IO_ADD_CMP_0_0);
781
782 val = PI_IO_CMP_M_SLOT;
783 outb(val, base_addr + PI_ESIC_K_IO_ADD_MASK_0_1);
784 val = (PI_ESIC_K_CSR_IO_LEN - 1) & ~3;
785 outb(val, base_addr + PI_ESIC_K_IO_ADD_MASK_0_0);
786
787 val = 0;
788 outb(val, base_addr + PI_ESIC_K_IO_ADD_CMP_1_1);
789 val = PI_DEFEA_K_BURST_HOLDOFF;
790 outb(val, base_addr + PI_ESIC_K_IO_ADD_CMP_1_0);
791
792 val = PI_IO_CMP_M_SLOT;
793 outb(val, base_addr + PI_ESIC_K_IO_ADD_MASK_1_1);
794 val = (PI_ESIC_K_BURST_HOLDOFF_LEN - 1) & ~3;
795 outb(val, base_addr + PI_ESIC_K_IO_ADD_MASK_1_0);
796
797
798 val = PI_FUNCTION_CNTRL_M_IOCS1;
799 if (dfx_use_mmio)
800 val |= PI_FUNCTION_CNTRL_M_MEMCS1;
801 else
802 val |= PI_FUNCTION_CNTRL_M_IOCS0;
803 outb(val, base_addr + PI_ESIC_K_FUNCTION_CNTRL);
804
805
806
807
808
809 val = PI_SLOT_CNTRL_M_ENB;
810 outb(val, base_addr + PI_ESIC_K_SLOT_CNTRL);
811
812
813
814
815
816 val = inb(base_addr + PI_DEFEA_K_BURST_HOLDOFF);
817 if (dfx_use_mmio)
818 val |= PI_BURST_HOLDOFF_M_MEM_MAP;
819 else
820 val &= ~PI_BURST_HOLDOFF_M_MEM_MAP;
821 outb(val, base_addr + PI_DEFEA_K_BURST_HOLDOFF);
822
823
824 val = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
825 val |= PI_CONFIG_STAT_0_M_INT_ENB;
826 outb(val, base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
827 }
828 if (dfx_bus_pci) {
829 struct pci_dev *pdev = to_pci_dev(bdev);
830
831
832
833 dev->irq = pdev->irq;
834
835
836
837 pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &val);
838 if (val < PFI_K_LAT_TIMER_MIN) {
839 val = PFI_K_LAT_TIMER_DEF;
840 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, val);
841 }
842
843
844 val = PFI_MODE_M_PDQ_INT_ENB | PFI_MODE_M_DMA_ENB;
845 dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL, val);
846 }
847}
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877static void dfx_bus_uninit(struct net_device *dev)
878{
879 DFX_board_t *bp = netdev_priv(dev);
880 struct device *bdev = bp->bus_dev;
881 int dfx_bus_pci = dev_is_pci(bdev);
882 int dfx_bus_eisa = DFX_BUS_EISA(bdev);
883 u8 val;
884
885 DBG_printk("In dfx_bus_uninit...\n");
886
887
888
889 if (dfx_bus_eisa) {
890 unsigned long base_addr = to_eisa_device(bdev)->base_addr;
891
892
893 val = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
894 val &= ~PI_CONFIG_STAT_0_M_INT_ENB;
895 outb(val, base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
896
897
898 outb(0, base_addr + PI_ESIC_K_SLOT_CNTRL);
899
900
901 outb(0, base_addr + PI_ESIC_K_FUNCTION_CNTRL);
902 }
903 if (dfx_bus_pci) {
904
905 dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL, 0);
906 }
907}
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940static void dfx_bus_config_check(DFX_board_t *bp)
941{
942 struct device __maybe_unused *bdev = bp->bus_dev;
943 int dfx_bus_eisa = DFX_BUS_EISA(bdev);
944 int status;
945 u32 host_data;
946
947 DBG_printk("In dfx_bus_config_check...\n");
948
949
950
951 if (dfx_bus_eisa) {
952
953
954
955
956
957
958
959 if (to_eisa_device(bdev)->id.driver_data == DEFEA_PROD_ID_2) {
960
961
962
963
964 status = dfx_hw_port_ctrl_req(bp,
965 PI_PCTRL_M_SUB_CMD,
966 PI_SUB_CMD_K_PDQ_REV_GET,
967 0,
968 &host_data);
969 if ((status != DFX_K_SUCCESS) || (host_data == 2))
970 {
971
972
973
974
975
976
977
978
979 switch (bp->burst_size)
980 {
981 case PI_PDATA_B_DMA_BURST_SIZE_32:
982 case PI_PDATA_B_DMA_BURST_SIZE_16:
983 bp->burst_size = PI_PDATA_B_DMA_BURST_SIZE_8;
984 break;
985
986 default:
987 break;
988 }
989
990
991
992 bp->full_duplex_enb = PI_SNMP_K_FALSE;
993 }
994 }
995 }
996 }
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036static int dfx_driver_init(struct net_device *dev, const char *print_name,
1037 resource_size_t bar_start)
1038{
1039 DFX_board_t *bp = netdev_priv(dev);
1040 struct device *bdev = bp->bus_dev;
1041 int dfx_bus_pci = dev_is_pci(bdev);
1042 int dfx_bus_eisa = DFX_BUS_EISA(bdev);
1043 int dfx_bus_tc = DFX_BUS_TC(bdev);
1044 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
1045 int alloc_size;
1046 char *top_v, *curr_v;
1047 dma_addr_t top_p, curr_p;
1048 u32 data;
1049 __le32 le32;
1050 char *board_name = NULL;
1051
1052 DBG_printk("In dfx_driver_init...\n");
1053
1054
1055
1056 dfx_bus_init(dev);
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067 bp->full_duplex_enb = PI_SNMP_K_FALSE;
1068 bp->req_ttrt = 8 * 12500;
1069 bp->burst_size = PI_PDATA_B_DMA_BURST_SIZE_DEF;
1070 bp->rcv_bufs_to_post = RCV_BUFS_DEF;
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081 dfx_bus_config_check(bp);
1082
1083
1084
1085 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS);
1086
1087
1088
1089 (void) dfx_hw_dma_uninit(bp, PI_PDATA_A_RESET_M_SKIP_ST);
1090
1091
1092
1093 if (dfx_hw_port_ctrl_req(bp, PI_PCTRL_M_MLA, PI_PDATA_A_MLA_K_LO, 0,
1094 &data) != DFX_K_SUCCESS) {
1095 printk("%s: Could not read adapter factory MAC address!\n",
1096 print_name);
1097 return DFX_K_FAILURE;
1098 }
1099 le32 = cpu_to_le32(data);
1100 memcpy(&bp->factory_mac_addr[0], &le32, sizeof(u32));
1101
1102 if (dfx_hw_port_ctrl_req(bp, PI_PCTRL_M_MLA, PI_PDATA_A_MLA_K_HI, 0,
1103 &data) != DFX_K_SUCCESS) {
1104 printk("%s: Could not read adapter factory MAC address!\n",
1105 print_name);
1106 return DFX_K_FAILURE;
1107 }
1108 le32 = cpu_to_le32(data);
1109 memcpy(&bp->factory_mac_addr[4], &le32, sizeof(u16));
1110
1111
1112
1113
1114
1115
1116
1117
1118 memcpy(dev->dev_addr, bp->factory_mac_addr, FDDI_K_ALEN);
1119 if (dfx_bus_tc)
1120 board_name = "DEFTA";
1121 if (dfx_bus_eisa)
1122 board_name = "DEFEA";
1123 if (dfx_bus_pci)
1124 board_name = "DEFPA";
1125 pr_info("%s: %s at %s addr = 0x%llx, IRQ = %d, Hardware addr = %pMF\n",
1126 print_name, board_name, dfx_use_mmio ? "MMIO" : "I/O",
1127 (long long)bar_start, dev->irq, dev->dev_addr);
1128
1129
1130
1131
1132
1133
1134 alloc_size = sizeof(PI_DESCR_BLOCK) +
1135 PI_CMD_REQ_K_SIZE_MAX +
1136 PI_CMD_RSP_K_SIZE_MAX +
1137#ifndef DYNAMIC_BUFFERS
1138 (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX) +
1139#endif
1140 sizeof(PI_CONSUMER_BLOCK) +
1141 (PI_ALIGN_K_DESC_BLK - 1);
1142 bp->kmalloced = top_v = dma_zalloc_coherent(bp->bus_dev, alloc_size,
1143 &bp->kmalloced_dma,
1144 GFP_ATOMIC);
1145 if (top_v == NULL)
1146 return DFX_K_FAILURE;
1147
1148 top_p = bp->kmalloced_dma;
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162 curr_p = ALIGN(top_p, PI_ALIGN_K_DESC_BLK);
1163 curr_v = top_v + (curr_p - top_p);
1164
1165
1166
1167 bp->descr_block_virt = (PI_DESCR_BLOCK *) curr_v;
1168 bp->descr_block_phys = curr_p;
1169 curr_v += sizeof(PI_DESCR_BLOCK);
1170 curr_p += sizeof(PI_DESCR_BLOCK);
1171
1172
1173
1174 bp->cmd_req_virt = (PI_DMA_CMD_REQ *) curr_v;
1175 bp->cmd_req_phys = curr_p;
1176 curr_v += PI_CMD_REQ_K_SIZE_MAX;
1177 curr_p += PI_CMD_REQ_K_SIZE_MAX;
1178
1179
1180
1181 bp->cmd_rsp_virt = (PI_DMA_CMD_RSP *) curr_v;
1182 bp->cmd_rsp_phys = curr_p;
1183 curr_v += PI_CMD_RSP_K_SIZE_MAX;
1184 curr_p += PI_CMD_RSP_K_SIZE_MAX;
1185
1186
1187
1188 bp->rcv_block_virt = curr_v;
1189 bp->rcv_block_phys = curr_p;
1190
1191#ifndef DYNAMIC_BUFFERS
1192 curr_v += (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX);
1193 curr_p += (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX);
1194#endif
1195
1196
1197
1198 bp->cons_block_virt = (PI_CONSUMER_BLOCK *) curr_v;
1199 bp->cons_block_phys = curr_p;
1200
1201
1202
1203 DBG_printk("%s: Descriptor block virt = %p, phys = %pad\n",
1204 print_name, bp->descr_block_virt, &bp->descr_block_phys);
1205 DBG_printk("%s: Command Request buffer virt = %p, phys = %pad\n",
1206 print_name, bp->cmd_req_virt, &bp->cmd_req_phys);
1207 DBG_printk("%s: Command Response buffer virt = %p, phys = %pad\n",
1208 print_name, bp->cmd_rsp_virt, &bp->cmd_rsp_phys);
1209 DBG_printk("%s: Receive buffer block virt = %p, phys = %pad\n",
1210 print_name, bp->rcv_block_virt, &bp->rcv_block_phys);
1211 DBG_printk("%s: Consumer block virt = %p, phys = %pad\n",
1212 print_name, bp->cons_block_virt, &bp->cons_block_phys);
1213
1214 return DFX_K_SUCCESS;
1215}
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251static int dfx_adap_init(DFX_board_t *bp, int get_buffers)
1252 {
1253 DBG_printk("In dfx_adap_init...\n");
1254
1255
1256
1257 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS);
1258
1259
1260
1261 if (dfx_hw_dma_uninit(bp, bp->reset_type) != DFX_K_SUCCESS)
1262 {
1263 printk("%s: Could not uninitialize/reset adapter!\n", bp->dev->name);
1264 return DFX_K_FAILURE;
1265 }
1266
1267
1268
1269
1270
1271
1272 dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_0_STATUS, PI_HOST_INT_K_ACK_ALL_TYPE_0);
1273
1274
1275
1276
1277
1278
1279
1280
1281 bp->cmd_req_reg.lword = 0;
1282 bp->cmd_rsp_reg.lword = 0;
1283 bp->rcv_xmt_reg.lword = 0;
1284
1285
1286
1287 memset(bp->cons_block_virt, 0, sizeof(PI_CONSUMER_BLOCK));
1288
1289
1290
1291 if (dfx_hw_port_ctrl_req(bp,
1292 PI_PCTRL_M_SUB_CMD,
1293 PI_SUB_CMD_K_BURST_SIZE_SET,
1294 bp->burst_size,
1295 NULL) != DFX_K_SUCCESS)
1296 {
1297 printk("%s: Could not set adapter burst size!\n", bp->dev->name);
1298 return DFX_K_FAILURE;
1299 }
1300
1301
1302
1303
1304
1305
1306
1307
1308 if (dfx_hw_port_ctrl_req(bp,
1309 PI_PCTRL_M_CONS_BLOCK,
1310 bp->cons_block_phys,
1311 0,
1312 NULL) != DFX_K_SUCCESS)
1313 {
1314 printk("%s: Could not set consumer block address!\n", bp->dev->name);
1315 return DFX_K_FAILURE;
1316 }
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328 if (dfx_hw_port_ctrl_req(bp, PI_PCTRL_M_INIT,
1329 (u32)(bp->descr_block_phys |
1330 PI_PDATA_A_INIT_M_BSWAP_INIT),
1331 0, NULL) != DFX_K_SUCCESS) {
1332 printk("%s: Could not set descriptor block address!\n",
1333 bp->dev->name);
1334 return DFX_K_FAILURE;
1335 }
1336
1337
1338
1339 bp->cmd_req_virt->cmd_type = PI_CMD_K_CHARS_SET;
1340 bp->cmd_req_virt->char_set.item[0].item_code = PI_ITEM_K_FLUSH_TIME;
1341 bp->cmd_req_virt->char_set.item[0].value = 3;
1342 bp->cmd_req_virt->char_set.item[0].item_index = 0;
1343 bp->cmd_req_virt->char_set.item[1].item_code = PI_ITEM_K_EOL;
1344 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
1345 {
1346 printk("%s: DMA command request failed!\n", bp->dev->name);
1347 return DFX_K_FAILURE;
1348 }
1349
1350
1351
1352 bp->cmd_req_virt->cmd_type = PI_CMD_K_SNMP_SET;
1353 bp->cmd_req_virt->snmp_set.item[0].item_code = PI_ITEM_K_FDX_ENB_DIS;
1354 bp->cmd_req_virt->snmp_set.item[0].value = bp->full_duplex_enb;
1355 bp->cmd_req_virt->snmp_set.item[0].item_index = 0;
1356 bp->cmd_req_virt->snmp_set.item[1].item_code = PI_ITEM_K_MAC_T_REQ;
1357 bp->cmd_req_virt->snmp_set.item[1].value = bp->req_ttrt;
1358 bp->cmd_req_virt->snmp_set.item[1].item_index = 0;
1359 bp->cmd_req_virt->snmp_set.item[2].item_code = PI_ITEM_K_EOL;
1360 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
1361 {
1362 printk("%s: DMA command request failed!\n", bp->dev->name);
1363 return DFX_K_FAILURE;
1364 }
1365
1366
1367
1368 if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS)
1369 {
1370 printk("%s: Adapter CAM update failed!\n", bp->dev->name);
1371 return DFX_K_FAILURE;
1372 }
1373
1374
1375
1376 if (dfx_ctl_update_filters(bp) != DFX_K_SUCCESS)
1377 {
1378 printk("%s: Adapter filters update failed!\n", bp->dev->name);
1379 return DFX_K_FAILURE;
1380 }
1381
1382
1383
1384
1385
1386
1387 if (get_buffers)
1388 dfx_rcv_flush(bp);
1389
1390
1391
1392 if (dfx_rcv_init(bp, get_buffers))
1393 {
1394 printk("%s: Receive buffer allocation failed\n", bp->dev->name);
1395 if (get_buffers)
1396 dfx_rcv_flush(bp);
1397 return DFX_K_FAILURE;
1398 }
1399
1400
1401
1402 bp->cmd_req_virt->cmd_type = PI_CMD_K_START;
1403 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
1404 {
1405 printk("%s: Start command failed\n", bp->dev->name);
1406 if (get_buffers)
1407 dfx_rcv_flush(bp);
1408 return DFX_K_FAILURE;
1409 }
1410
1411
1412
1413 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_ENABLE_DEF_INTS);
1414 return DFX_K_SUCCESS;
1415 }
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448static int dfx_open(struct net_device *dev)
1449{
1450 DFX_board_t *bp = netdev_priv(dev);
1451 int ret;
1452
1453 DBG_printk("In dfx_open...\n");
1454
1455
1456
1457 ret = request_irq(dev->irq, dfx_interrupt, IRQF_SHARED, dev->name,
1458 dev);
1459 if (ret) {
1460 printk(KERN_ERR "%s: Requested IRQ %d is busy\n", dev->name, dev->irq);
1461 return ret;
1462 }
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475 memcpy(dev->dev_addr, bp->factory_mac_addr, FDDI_K_ALEN);
1476
1477
1478
1479 memset(bp->uc_table, 0, sizeof(bp->uc_table));
1480 memset(bp->mc_table, 0, sizeof(bp->mc_table));
1481 bp->uc_count = 0;
1482 bp->mc_count = 0;
1483
1484
1485
1486 bp->ind_group_prom = PI_FSTATE_K_BLOCK;
1487 bp->group_prom = PI_FSTATE_K_BLOCK;
1488
1489 spin_lock_init(&bp->lock);
1490
1491
1492
1493 bp->reset_type = PI_PDATA_A_RESET_M_SKIP_ST;
1494 if (dfx_adap_init(bp, 1) != DFX_K_SUCCESS)
1495 {
1496 printk(KERN_ERR "%s: Adapter open failed!\n", dev->name);
1497 free_irq(dev->irq, dev);
1498 return -EAGAIN;
1499 }
1500
1501
1502 netif_start_queue(dev);
1503 return 0;
1504}
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539static int dfx_close(struct net_device *dev)
1540{
1541 DFX_board_t *bp = netdev_priv(dev);
1542
1543 DBG_printk("In dfx_close...\n");
1544
1545
1546
1547 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS);
1548
1549
1550
1551 (void) dfx_hw_dma_uninit(bp, PI_PDATA_A_RESET_M_SKIP_ST);
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562 dfx_xmt_flush(bp);
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575 bp->cmd_req_reg.lword = 0;
1576 bp->cmd_rsp_reg.lword = 0;
1577 bp->rcv_xmt_reg.lword = 0;
1578
1579
1580
1581 memset(bp->cons_block_virt, 0, sizeof(PI_CONSUMER_BLOCK));
1582
1583
1584
1585 dfx_rcv_flush(bp);
1586
1587
1588
1589 netif_stop_queue(dev);
1590
1591
1592
1593 free_irq(dev->irq, dev);
1594
1595 return 0;
1596}
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626static void dfx_int_pr_halt_id(DFX_board_t *bp)
1627 {
1628 PI_UINT32 port_status;
1629 PI_UINT32 halt_id;
1630
1631
1632
1633 dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &port_status);
1634
1635
1636
1637 halt_id = (port_status & PI_PSTATUS_M_HALT_ID) >> PI_PSTATUS_V_HALT_ID;
1638 switch (halt_id)
1639 {
1640 case PI_HALT_ID_K_SELFTEST_TIMEOUT:
1641 printk("%s: Halt ID: Selftest Timeout\n", bp->dev->name);
1642 break;
1643
1644 case PI_HALT_ID_K_PARITY_ERROR:
1645 printk("%s: Halt ID: Host Bus Parity Error\n", bp->dev->name);
1646 break;
1647
1648 case PI_HALT_ID_K_HOST_DIR_HALT:
1649 printk("%s: Halt ID: Host-Directed Halt\n", bp->dev->name);
1650 break;
1651
1652 case PI_HALT_ID_K_SW_FAULT:
1653 printk("%s: Halt ID: Adapter Software Fault\n", bp->dev->name);
1654 break;
1655
1656 case PI_HALT_ID_K_HW_FAULT:
1657 printk("%s: Halt ID: Adapter Hardware Fault\n", bp->dev->name);
1658 break;
1659
1660 case PI_HALT_ID_K_PC_TRACE:
1661 printk("%s: Halt ID: FDDI Network PC Trace Path Test\n", bp->dev->name);
1662 break;
1663
1664 case PI_HALT_ID_K_DMA_ERROR:
1665 printk("%s: Halt ID: Adapter DMA Error\n", bp->dev->name);
1666 break;
1667
1668 case PI_HALT_ID_K_IMAGE_CRC_ERROR:
1669 printk("%s: Halt ID: Firmware Image CRC Error\n", bp->dev->name);
1670 break;
1671
1672 case PI_HALT_ID_K_BUS_EXCEPTION:
1673 printk("%s: Halt ID: 68000 Bus Exception\n", bp->dev->name);
1674 break;
1675
1676 default:
1677 printk("%s: Halt ID: Unknown (code = %X)\n", bp->dev->name, halt_id);
1678 break;
1679 }
1680 }
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730static void dfx_int_type_0_process(DFX_board_t *bp)
1731
1732 {
1733 PI_UINT32 type_0_status;
1734 PI_UINT32 state;
1735
1736
1737
1738
1739
1740
1741
1742 dfx_port_read_long(bp, PI_PDQ_K_REG_TYPE_0_STATUS, &type_0_status);
1743 dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_0_STATUS, type_0_status);
1744
1745
1746
1747 if (type_0_status & (PI_TYPE_0_STAT_M_NXM |
1748 PI_TYPE_0_STAT_M_PM_PAR_ERR |
1749 PI_TYPE_0_STAT_M_BUS_PAR_ERR))
1750 {
1751
1752
1753 if (type_0_status & PI_TYPE_0_STAT_M_NXM)
1754 printk("%s: Non-Existent Memory Access Error\n", bp->dev->name);
1755
1756
1757
1758 if (type_0_status & PI_TYPE_0_STAT_M_PM_PAR_ERR)
1759 printk("%s: Packet Memory Parity Error\n", bp->dev->name);
1760
1761
1762
1763 if (type_0_status & PI_TYPE_0_STAT_M_BUS_PAR_ERR)
1764 printk("%s: Host Bus Parity Error\n", bp->dev->name);
1765
1766
1767
1768 bp->link_available = PI_K_FALSE;
1769 bp->reset_type = 0;
1770 printk("%s: Resetting adapter...\n", bp->dev->name);
1771 if (dfx_adap_init(bp, 0) != DFX_K_SUCCESS)
1772 {
1773 printk("%s: Adapter reset failed! Disabling adapter interrupts.\n", bp->dev->name);
1774 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS);
1775 return;
1776 }
1777 printk("%s: Adapter reset successful!\n", bp->dev->name);
1778 return;
1779 }
1780
1781
1782
1783 if (type_0_status & PI_TYPE_0_STAT_M_XMT_FLUSH)
1784 {
1785
1786
1787 bp->link_available = PI_K_FALSE;
1788 dfx_xmt_flush(bp);
1789 (void) dfx_hw_port_ctrl_req(bp,
1790 PI_PCTRL_M_XMT_DATA_FLUSH_DONE,
1791 0,
1792 0,
1793 NULL);
1794 }
1795
1796
1797
1798 if (type_0_status & PI_TYPE_0_STAT_M_STATE_CHANGE)
1799 {
1800
1801
1802 state = dfx_hw_adap_state_rd(bp);
1803 if (state == PI_STATE_K_HALTED)
1804 {
1805
1806
1807
1808
1809
1810
1811 printk("%s: Controller has transitioned to HALTED state!\n", bp->dev->name);
1812 dfx_int_pr_halt_id(bp);
1813
1814
1815
1816 bp->link_available = PI_K_FALSE;
1817 bp->reset_type = 0;
1818 printk("%s: Resetting adapter...\n", bp->dev->name);
1819 if (dfx_adap_init(bp, 0) != DFX_K_SUCCESS)
1820 {
1821 printk("%s: Adapter reset failed! Disabling adapter interrupts.\n", bp->dev->name);
1822 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS);
1823 return;
1824 }
1825 printk("%s: Adapter reset successful!\n", bp->dev->name);
1826 }
1827 else if (state == PI_STATE_K_LINK_AVAIL)
1828 {
1829 bp->link_available = PI_K_TRUE;
1830 }
1831 }
1832 }
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875static void dfx_int_common(struct net_device *dev)
1876{
1877 DFX_board_t *bp = netdev_priv(dev);
1878 PI_UINT32 port_status;
1879
1880
1881
1882 if(dfx_xmt_done(bp))
1883 netif_wake_queue(dev);
1884
1885
1886
1887 dfx_rcv_queue_process(bp);
1888
1889
1890
1891
1892
1893
1894
1895
1896 dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_2_PROD, bp->rcv_xmt_reg.lword);
1897
1898
1899
1900 dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &port_status);
1901
1902
1903
1904 if (port_status & PI_PSTATUS_M_TYPE_0_PENDING)
1905 dfx_int_type_0_process(bp);
1906 }
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945static irqreturn_t dfx_interrupt(int irq, void *dev_id)
1946{
1947 struct net_device *dev = dev_id;
1948 DFX_board_t *bp = netdev_priv(dev);
1949 struct device *bdev = bp->bus_dev;
1950 int dfx_bus_pci = dev_is_pci(bdev);
1951 int dfx_bus_eisa = DFX_BUS_EISA(bdev);
1952 int dfx_bus_tc = DFX_BUS_TC(bdev);
1953
1954
1955
1956 if (dfx_bus_pci) {
1957 u32 status;
1958
1959 dfx_port_read_long(bp, PFI_K_REG_STATUS, &status);
1960 if (!(status & PFI_STATUS_M_PDQ_INT))
1961 return IRQ_NONE;
1962
1963 spin_lock(&bp->lock);
1964
1965
1966 dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL,
1967 PFI_MODE_M_DMA_ENB);
1968
1969
1970 dfx_int_common(dev);
1971
1972
1973 dfx_port_write_long(bp, PFI_K_REG_STATUS,
1974 PFI_STATUS_M_PDQ_INT);
1975 dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL,
1976 (PFI_MODE_M_PDQ_INT_ENB |
1977 PFI_MODE_M_DMA_ENB));
1978
1979 spin_unlock(&bp->lock);
1980 }
1981 if (dfx_bus_eisa) {
1982 unsigned long base_addr = to_eisa_device(bdev)->base_addr;
1983 u8 status;
1984
1985 status = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
1986 if (!(status & PI_CONFIG_STAT_0_M_PEND))
1987 return IRQ_NONE;
1988
1989 spin_lock(&bp->lock);
1990
1991
1992 status &= ~PI_CONFIG_STAT_0_M_INT_ENB;
1993 outb(status, base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
1994
1995
1996 dfx_int_common(dev);
1997
1998
1999 status = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
2000 status |= PI_CONFIG_STAT_0_M_INT_ENB;
2001 outb(status, base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
2002
2003 spin_unlock(&bp->lock);
2004 }
2005 if (dfx_bus_tc) {
2006 u32 status;
2007
2008 dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &status);
2009 if (!(status & (PI_PSTATUS_M_RCV_DATA_PENDING |
2010 PI_PSTATUS_M_XMT_DATA_PENDING |
2011 PI_PSTATUS_M_SMT_HOST_PENDING |
2012 PI_PSTATUS_M_UNSOL_PENDING |
2013 PI_PSTATUS_M_CMD_RSP_PENDING |
2014 PI_PSTATUS_M_CMD_REQ_PENDING |
2015 PI_PSTATUS_M_TYPE_0_PENDING)))
2016 return IRQ_NONE;
2017
2018 spin_lock(&bp->lock);
2019
2020
2021 dfx_int_common(dev);
2022
2023 spin_unlock(&bp->lock);
2024 }
2025
2026 return IRQ_HANDLED;
2027}
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073static struct net_device_stats *dfx_ctl_get_stats(struct net_device *dev)
2074 {
2075 DFX_board_t *bp = netdev_priv(dev);
2076
2077
2078
2079 bp->stats.gen.rx_packets = bp->rcv_total_frames;
2080 bp->stats.gen.tx_packets = bp->xmt_total_frames;
2081 bp->stats.gen.rx_bytes = bp->rcv_total_bytes;
2082 bp->stats.gen.tx_bytes = bp->xmt_total_bytes;
2083 bp->stats.gen.rx_errors = bp->rcv_crc_errors +
2084 bp->rcv_frame_status_errors +
2085 bp->rcv_length_errors;
2086 bp->stats.gen.tx_errors = bp->xmt_length_errors;
2087 bp->stats.gen.rx_dropped = bp->rcv_discards;
2088 bp->stats.gen.tx_dropped = bp->xmt_discards;
2089 bp->stats.gen.multicast = bp->rcv_multicast_frames;
2090 bp->stats.gen.collisions = 0;
2091
2092
2093
2094 bp->cmd_req_virt->cmd_type = PI_CMD_K_SMT_MIB_GET;
2095 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
2096 return (struct net_device_stats *)&bp->stats;
2097
2098
2099
2100 memcpy(bp->stats.smt_station_id, &bp->cmd_rsp_virt->smt_mib_get.smt_station_id, sizeof(bp->cmd_rsp_virt->smt_mib_get.smt_station_id));
2101 bp->stats.smt_op_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_op_version_id;
2102 bp->stats.smt_hi_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_hi_version_id;
2103 bp->stats.smt_lo_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_lo_version_id;
2104 memcpy(bp->stats.smt_user_data, &bp->cmd_rsp_virt->smt_mib_get.smt_user_data, sizeof(bp->cmd_rsp_virt->smt_mib_get.smt_user_data));
2105 bp->stats.smt_mib_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_mib_version_id;
2106 bp->stats.smt_mac_cts = bp->cmd_rsp_virt->smt_mib_get.smt_mac_ct;
2107 bp->stats.smt_non_master_cts = bp->cmd_rsp_virt->smt_mib_get.smt_non_master_ct;
2108 bp->stats.smt_master_cts = bp->cmd_rsp_virt->smt_mib_get.smt_master_ct;
2109 bp->stats.smt_available_paths = bp->cmd_rsp_virt->smt_mib_get.smt_available_paths;
2110 bp->stats.smt_config_capabilities = bp->cmd_rsp_virt->smt_mib_get.smt_config_capabilities;
2111 bp->stats.smt_config_policy = bp->cmd_rsp_virt->smt_mib_get.smt_config_policy;
2112 bp->stats.smt_connection_policy = bp->cmd_rsp_virt->smt_mib_get.smt_connection_policy;
2113 bp->stats.smt_t_notify = bp->cmd_rsp_virt->smt_mib_get.smt_t_notify;
2114 bp->stats.smt_stat_rpt_policy = bp->cmd_rsp_virt->smt_mib_get.smt_stat_rpt_policy;
2115 bp->stats.smt_trace_max_expiration = bp->cmd_rsp_virt->smt_mib_get.smt_trace_max_expiration;
2116 bp->stats.smt_bypass_present = bp->cmd_rsp_virt->smt_mib_get.smt_bypass_present;
2117 bp->stats.smt_ecm_state = bp->cmd_rsp_virt->smt_mib_get.smt_ecm_state;
2118 bp->stats.smt_cf_state = bp->cmd_rsp_virt->smt_mib_get.smt_cf_state;
2119 bp->stats.smt_remote_disconnect_flag = bp->cmd_rsp_virt->smt_mib_get.smt_remote_disconnect_flag;
2120 bp->stats.smt_station_status = bp->cmd_rsp_virt->smt_mib_get.smt_station_status;
2121 bp->stats.smt_peer_wrap_flag = bp->cmd_rsp_virt->smt_mib_get.smt_peer_wrap_flag;
2122 bp->stats.smt_time_stamp = bp->cmd_rsp_virt->smt_mib_get.smt_msg_time_stamp.ls;
2123 bp->stats.smt_transition_time_stamp = bp->cmd_rsp_virt->smt_mib_get.smt_transition_time_stamp.ls;
2124 bp->stats.mac_frame_status_functions = bp->cmd_rsp_virt->smt_mib_get.mac_frame_status_functions;
2125 bp->stats.mac_t_max_capability = bp->cmd_rsp_virt->smt_mib_get.mac_t_max_capability;
2126 bp->stats.mac_tvx_capability = bp->cmd_rsp_virt->smt_mib_get.mac_tvx_capability;
2127 bp->stats.mac_available_paths = bp->cmd_rsp_virt->smt_mib_get.mac_available_paths;
2128 bp->stats.mac_current_path = bp->cmd_rsp_virt->smt_mib_get.mac_current_path;
2129 memcpy(bp->stats.mac_upstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_upstream_nbr, FDDI_K_ALEN);
2130 memcpy(bp->stats.mac_downstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_downstream_nbr, FDDI_K_ALEN);
2131 memcpy(bp->stats.mac_old_upstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_old_upstream_nbr, FDDI_K_ALEN);
2132 memcpy(bp->stats.mac_old_downstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_old_downstream_nbr, FDDI_K_ALEN);
2133 bp->stats.mac_dup_address_test = bp->cmd_rsp_virt->smt_mib_get.mac_dup_address_test;
2134 bp->stats.mac_requested_paths = bp->cmd_rsp_virt->smt_mib_get.mac_requested_paths;
2135 bp->stats.mac_downstream_port_type = bp->cmd_rsp_virt->smt_mib_get.mac_downstream_port_type;
2136 memcpy(bp->stats.mac_smt_address, &bp->cmd_rsp_virt->smt_mib_get.mac_smt_address, FDDI_K_ALEN);
2137 bp->stats.mac_t_req = bp->cmd_rsp_virt->smt_mib_get.mac_t_req;
2138 bp->stats.mac_t_neg = bp->cmd_rsp_virt->smt_mib_get.mac_t_neg;
2139 bp->stats.mac_t_max = bp->cmd_rsp_virt->smt_mib_get.mac_t_max;
2140 bp->stats.mac_tvx_value = bp->cmd_rsp_virt->smt_mib_get.mac_tvx_value;
2141 bp->stats.mac_frame_error_threshold = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_threshold;
2142 bp->stats.mac_frame_error_ratio = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_ratio;
2143 bp->stats.mac_rmt_state = bp->cmd_rsp_virt->smt_mib_get.mac_rmt_state;
2144 bp->stats.mac_da_flag = bp->cmd_rsp_virt->smt_mib_get.mac_da_flag;
2145 bp->stats.mac_una_da_flag = bp->cmd_rsp_virt->smt_mib_get.mac_unda_flag;
2146 bp->stats.mac_frame_error_flag = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_flag;
2147 bp->stats.mac_ma_unitdata_available = bp->cmd_rsp_virt->smt_mib_get.mac_ma_unitdata_available;
2148 bp->stats.mac_hardware_present = bp->cmd_rsp_virt->smt_mib_get.mac_hardware_present;
2149 bp->stats.mac_ma_unitdata_enable = bp->cmd_rsp_virt->smt_mib_get.mac_ma_unitdata_enable;
2150 bp->stats.path_tvx_lower_bound = bp->cmd_rsp_virt->smt_mib_get.path_tvx_lower_bound;
2151 bp->stats.path_t_max_lower_bound = bp->cmd_rsp_virt->smt_mib_get.path_t_max_lower_bound;
2152 bp->stats.path_max_t_req = bp->cmd_rsp_virt->smt_mib_get.path_max_t_req;
2153 memcpy(bp->stats.path_configuration, &bp->cmd_rsp_virt->smt_mib_get.path_configuration, sizeof(bp->cmd_rsp_virt->smt_mib_get.path_configuration));
2154 bp->stats.port_my_type[0] = bp->cmd_rsp_virt->smt_mib_get.port_my_type[0];
2155 bp->stats.port_my_type[1] = bp->cmd_rsp_virt->smt_mib_get.port_my_type[1];
2156 bp->stats.port_neighbor_type[0] = bp->cmd_rsp_virt->smt_mib_get.port_neighbor_type[0];
2157 bp->stats.port_neighbor_type[1] = bp->cmd_rsp_virt->smt_mib_get.port_neighbor_type[1];
2158 bp->stats.port_connection_policies[0] = bp->cmd_rsp_virt->smt_mib_get.port_connection_policies[0];
2159 bp->stats.port_connection_policies[1] = bp->cmd_rsp_virt->smt_mib_get.port_connection_policies[1];
2160 bp->stats.port_mac_indicated[0] = bp->cmd_rsp_virt->smt_mib_get.port_mac_indicated[0];
2161 bp->stats.port_mac_indicated[1] = bp->cmd_rsp_virt->smt_mib_get.port_mac_indicated[1];
2162 bp->stats.port_current_path[0] = bp->cmd_rsp_virt->smt_mib_get.port_current_path[0];
2163 bp->stats.port_current_path[1] = bp->cmd_rsp_virt->smt_mib_get.port_current_path[1];
2164 memcpy(&bp->stats.port_requested_paths[0*3], &bp->cmd_rsp_virt->smt_mib_get.port_requested_paths[0], 3);
2165 memcpy(&bp->stats.port_requested_paths[1*3], &bp->cmd_rsp_virt->smt_mib_get.port_requested_paths[1], 3);
2166 bp->stats.port_mac_placement[0] = bp->cmd_rsp_virt->smt_mib_get.port_mac_placement[0];
2167 bp->stats.port_mac_placement[1] = bp->cmd_rsp_virt->smt_mib_get.port_mac_placement[1];
2168 bp->stats.port_available_paths[0] = bp->cmd_rsp_virt->smt_mib_get.port_available_paths[0];
2169 bp->stats.port_available_paths[1] = bp->cmd_rsp_virt->smt_mib_get.port_available_paths[1];
2170 bp->stats.port_pmd_class[0] = bp->cmd_rsp_virt->smt_mib_get.port_pmd_class[0];
2171 bp->stats.port_pmd_class[1] = bp->cmd_rsp_virt->smt_mib_get.port_pmd_class[1];
2172 bp->stats.port_connection_capabilities[0] = bp->cmd_rsp_virt->smt_mib_get.port_connection_capabilities[0];
2173 bp->stats.port_connection_capabilities[1] = bp->cmd_rsp_virt->smt_mib_get.port_connection_capabilities[1];
2174 bp->stats.port_bs_flag[0] = bp->cmd_rsp_virt->smt_mib_get.port_bs_flag[0];
2175 bp->stats.port_bs_flag[1] = bp->cmd_rsp_virt->smt_mib_get.port_bs_flag[1];
2176 bp->stats.port_ler_estimate[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_estimate[0];
2177 bp->stats.port_ler_estimate[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_estimate[1];
2178 bp->stats.port_ler_cutoff[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_cutoff[0];
2179 bp->stats.port_ler_cutoff[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_cutoff[1];
2180 bp->stats.port_ler_alarm[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_alarm[0];
2181 bp->stats.port_ler_alarm[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_alarm[1];
2182 bp->stats.port_connect_state[0] = bp->cmd_rsp_virt->smt_mib_get.port_connect_state[0];
2183 bp->stats.port_connect_state[1] = bp->cmd_rsp_virt->smt_mib_get.port_connect_state[1];
2184 bp->stats.port_pcm_state[0] = bp->cmd_rsp_virt->smt_mib_get.port_pcm_state[0];
2185 bp->stats.port_pcm_state[1] = bp->cmd_rsp_virt->smt_mib_get.port_pcm_state[1];
2186 bp->stats.port_pc_withhold[0] = bp->cmd_rsp_virt->smt_mib_get.port_pc_withhold[0];
2187 bp->stats.port_pc_withhold[1] = bp->cmd_rsp_virt->smt_mib_get.port_pc_withhold[1];
2188 bp->stats.port_ler_flag[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_flag[0];
2189 bp->stats.port_ler_flag[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_flag[1];
2190 bp->stats.port_hardware_present[0] = bp->cmd_rsp_virt->smt_mib_get.port_hardware_present[0];
2191 bp->stats.port_hardware_present[1] = bp->cmd_rsp_virt->smt_mib_get.port_hardware_present[1];
2192
2193
2194
2195 bp->cmd_req_virt->cmd_type = PI_CMD_K_CNTRS_GET;
2196 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
2197 return (struct net_device_stats *)&bp->stats;
2198
2199
2200
2201 bp->stats.mac_frame_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.frame_cnt.ls;
2202 bp->stats.mac_copied_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.copied_cnt.ls;
2203 bp->stats.mac_transmit_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.transmit_cnt.ls;
2204 bp->stats.mac_error_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.error_cnt.ls;
2205 bp->stats.mac_lost_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.lost_cnt.ls;
2206 bp->stats.port_lct_fail_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.lct_rejects[0].ls;
2207 bp->stats.port_lct_fail_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.lct_rejects[1].ls;
2208 bp->stats.port_lem_reject_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.lem_rejects[0].ls;
2209 bp->stats.port_lem_reject_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.lem_rejects[1].ls;
2210 bp->stats.port_lem_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[0].ls;
2211 bp->stats.port_lem_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[1].ls;
2212
2213 return (struct net_device_stats *)&bp->stats;
2214 }
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260static void dfx_ctl_set_multicast_list(struct net_device *dev)
2261{
2262 DFX_board_t *bp = netdev_priv(dev);
2263 int i;
2264 struct netdev_hw_addr *ha;
2265
2266
2267
2268 if (dev->flags & IFF_PROMISC)
2269 bp->ind_group_prom = PI_FSTATE_K_PASS;
2270
2271
2272
2273 else
2274 {
2275 bp->ind_group_prom = PI_FSTATE_K_BLOCK;
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296 if (netdev_mc_count(dev) > (PI_CMD_ADDR_FILTER_K_SIZE - bp->uc_count))
2297 {
2298 bp->group_prom = PI_FSTATE_K_PASS;
2299 bp->mc_count = 0;
2300 }
2301 else
2302 {
2303 bp->group_prom = PI_FSTATE_K_BLOCK;
2304 bp->mc_count = netdev_mc_count(dev);
2305 }
2306
2307
2308
2309 i = 0;
2310 netdev_for_each_mc_addr(ha, dev)
2311 memcpy(&bp->mc_table[i++ * FDDI_K_ALEN],
2312 ha->addr, FDDI_K_ALEN);
2313
2314 if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS)
2315 {
2316 DBG_printk("%s: Could not update multicast address table!\n", dev->name);
2317 }
2318 else
2319 {
2320 DBG_printk("%s: Multicast address table updated! Added %d addresses.\n", dev->name, bp->mc_count);
2321 }
2322 }
2323
2324
2325
2326 if (dfx_ctl_update_filters(bp) != DFX_K_SUCCESS)
2327 {
2328 DBG_printk("%s: Could not update adapter filters!\n", dev->name);
2329 }
2330 else
2331 {
2332 DBG_printk("%s: Adapter filters updated!\n", dev->name);
2333 }
2334 }
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373static int dfx_ctl_set_mac_address(struct net_device *dev, void *addr)
2374 {
2375 struct sockaddr *p_sockaddr = (struct sockaddr *)addr;
2376 DFX_board_t *bp = netdev_priv(dev);
2377
2378
2379
2380 memcpy(dev->dev_addr, p_sockaddr->sa_data, FDDI_K_ALEN);
2381 memcpy(&bp->uc_table[0], p_sockaddr->sa_data, FDDI_K_ALEN);
2382 bp->uc_count = 1;
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396 if ((bp->uc_count + bp->mc_count) > PI_CMD_ADDR_FILTER_K_SIZE)
2397 {
2398 bp->group_prom = PI_FSTATE_K_PASS;
2399 bp->mc_count = 0;
2400
2401
2402
2403 if (dfx_ctl_update_filters(bp) != DFX_K_SUCCESS)
2404 {
2405 DBG_printk("%s: Could not update adapter filters!\n", dev->name);
2406 }
2407 else
2408 {
2409 DBG_printk("%s: Adapter filters updated!\n", dev->name);
2410 }
2411 }
2412
2413
2414
2415 if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS)
2416 {
2417 DBG_printk("%s: Could not set new MAC address!\n", dev->name);
2418 }
2419 else
2420 {
2421 DBG_printk("%s: Adapter CAM updated with new MAC address\n", dev->name);
2422 }
2423 return 0;
2424 }
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460static int dfx_ctl_update_cam(DFX_board_t *bp)
2461 {
2462 int i;
2463 PI_LAN_ADDR *p_addr;
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478 memset(bp->cmd_req_virt, 0, PI_CMD_REQ_K_SIZE_MAX);
2479 bp->cmd_req_virt->cmd_type = PI_CMD_K_ADDR_FILTER_SET;
2480 p_addr = &bp->cmd_req_virt->addr_filter_set.entry[0];
2481
2482
2483
2484 for (i=0; i < (int)bp->uc_count; i++)
2485 {
2486 if (i < PI_CMD_ADDR_FILTER_K_SIZE)
2487 {
2488 memcpy(p_addr, &bp->uc_table[i*FDDI_K_ALEN], FDDI_K_ALEN);
2489 p_addr++;
2490 }
2491 }
2492
2493
2494
2495 for (i=0; i < (int)bp->mc_count; i++)
2496 {
2497 if ((i + bp->uc_count) < PI_CMD_ADDR_FILTER_K_SIZE)
2498 {
2499 memcpy(p_addr, &bp->mc_table[i*FDDI_K_ALEN], FDDI_K_ALEN);
2500 p_addr++;
2501 }
2502 }
2503
2504
2505
2506 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
2507 return DFX_K_FAILURE;
2508 return DFX_K_SUCCESS;
2509 }
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543static int dfx_ctl_update_filters(DFX_board_t *bp)
2544 {
2545 int i = 0;
2546
2547
2548
2549 bp->cmd_req_virt->cmd_type = PI_CMD_K_FILTERS_SET;
2550
2551
2552
2553 bp->cmd_req_virt->filter_set.item[i].item_code = PI_ITEM_K_BROADCAST;
2554 bp->cmd_req_virt->filter_set.item[i++].value = PI_FSTATE_K_PASS;
2555
2556
2557
2558 bp->cmd_req_virt->filter_set.item[i].item_code = PI_ITEM_K_IND_GROUP_PROM;
2559 bp->cmd_req_virt->filter_set.item[i++].value = bp->ind_group_prom;
2560
2561
2562
2563 bp->cmd_req_virt->filter_set.item[i].item_code = PI_ITEM_K_GROUP_PROM;
2564 bp->cmd_req_virt->filter_set.item[i++].value = bp->group_prom;
2565
2566
2567
2568 bp->cmd_req_virt->filter_set.item[i].item_code = PI_ITEM_K_EOL;
2569
2570
2571
2572 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
2573 return DFX_K_FAILURE;
2574 return DFX_K_SUCCESS;
2575 }
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618static int dfx_hw_dma_cmd_req(DFX_board_t *bp)
2619 {
2620 int status;
2621 int timeout_cnt;
2622
2623
2624
2625 status = dfx_hw_adap_state_rd(bp);
2626 if ((status == PI_STATE_K_RESET) ||
2627 (status == PI_STATE_K_HALTED) ||
2628 (status == PI_STATE_K_DMA_UNAVAIL) ||
2629 (status == PI_STATE_K_UPGRADE))
2630 return DFX_K_OUTSTATE;
2631
2632
2633
2634 bp->descr_block_virt->cmd_rsp[bp->cmd_rsp_reg.index.prod].long_0 = (u32) (PI_RCV_DESCR_M_SOP |
2635 ((PI_CMD_RSP_K_SIZE_MAX / PI_ALIGN_K_CMD_RSP_BUFF) << PI_RCV_DESCR_V_SEG_LEN));
2636 bp->descr_block_virt->cmd_rsp[bp->cmd_rsp_reg.index.prod].long_1 = bp->cmd_rsp_phys;
2637
2638
2639
2640 bp->cmd_rsp_reg.index.prod += 1;
2641 bp->cmd_rsp_reg.index.prod &= PI_CMD_RSP_K_NUM_ENTRIES-1;
2642 dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_RSP_PROD, bp->cmd_rsp_reg.lword);
2643
2644
2645
2646 bp->descr_block_virt->cmd_req[bp->cmd_req_reg.index.prod].long_0 = (u32) (PI_XMT_DESCR_M_SOP |
2647 PI_XMT_DESCR_M_EOP | (PI_CMD_REQ_K_SIZE_MAX << PI_XMT_DESCR_V_SEG_LEN));
2648 bp->descr_block_virt->cmd_req[bp->cmd_req_reg.index.prod].long_1 = bp->cmd_req_phys;
2649
2650
2651
2652 bp->cmd_req_reg.index.prod += 1;
2653 bp->cmd_req_reg.index.prod &= PI_CMD_REQ_K_NUM_ENTRIES-1;
2654 dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_REQ_PROD, bp->cmd_req_reg.lword);
2655
2656
2657
2658
2659
2660
2661 for (timeout_cnt = 20000; timeout_cnt > 0; timeout_cnt--)
2662 {
2663 if (bp->cmd_req_reg.index.prod == (u8)(bp->cons_block_virt->cmd_req))
2664 break;
2665 udelay(100);
2666 }
2667 if (timeout_cnt == 0)
2668 return DFX_K_HW_TIMEOUT;
2669
2670
2671
2672 bp->cmd_req_reg.index.comp += 1;
2673 bp->cmd_req_reg.index.comp &= PI_CMD_REQ_K_NUM_ENTRIES-1;
2674 dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_REQ_PROD, bp->cmd_req_reg.lword);
2675
2676
2677
2678
2679
2680
2681 for (timeout_cnt = 20000; timeout_cnt > 0; timeout_cnt--)
2682 {
2683 if (bp->cmd_rsp_reg.index.prod == (u8)(bp->cons_block_virt->cmd_rsp))
2684 break;
2685 udelay(100);
2686 }
2687 if (timeout_cnt == 0)
2688 return DFX_K_HW_TIMEOUT;
2689
2690
2691
2692 bp->cmd_rsp_reg.index.comp += 1;
2693 bp->cmd_rsp_reg.index.comp &= PI_CMD_RSP_K_NUM_ENTRIES-1;
2694 dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_RSP_PROD, bp->cmd_rsp_reg.lword);
2695 return DFX_K_SUCCESS;
2696 }
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732static int dfx_hw_port_ctrl_req(
2733 DFX_board_t *bp,
2734 PI_UINT32 command,
2735 PI_UINT32 data_a,
2736 PI_UINT32 data_b,
2737 PI_UINT32 *host_data
2738 )
2739
2740 {
2741 PI_UINT32 port_cmd;
2742 int timeout_cnt;
2743
2744
2745
2746 port_cmd = (PI_UINT32) (command | PI_PCTRL_M_CMD_ERROR);
2747
2748
2749
2750 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_DATA_A, data_a);
2751 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_DATA_B, data_b);
2752 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_CTRL, port_cmd);
2753
2754
2755
2756 if (command == PI_PCTRL_M_BLAST_FLASH)
2757 timeout_cnt = 600000;
2758 else
2759 timeout_cnt = 20000;
2760
2761 for (; timeout_cnt > 0; timeout_cnt--)
2762 {
2763 dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_CTRL, &port_cmd);
2764 if (!(port_cmd & PI_PCTRL_M_CMD_ERROR))
2765 break;
2766 udelay(100);
2767 }
2768 if (timeout_cnt == 0)
2769 return DFX_K_HW_TIMEOUT;
2770
2771
2772
2773
2774
2775
2776
2777 if (host_data != NULL)
2778 dfx_port_read_long(bp, PI_PDQ_K_REG_HOST_DATA, host_data);
2779 return DFX_K_SUCCESS;
2780 }
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816static void dfx_hw_adap_reset(
2817 DFX_board_t *bp,
2818 PI_UINT32 type
2819 )
2820
2821 {
2822
2823
2824 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_DATA_A, type);
2825 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_RESET, PI_RESET_M_ASSERT_RESET);
2826
2827
2828
2829 udelay(20);
2830
2831
2832
2833 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_RESET, 0);
2834 }
2835
2836
2837
2838
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864static int dfx_hw_adap_state_rd(DFX_board_t *bp)
2865 {
2866 PI_UINT32 port_status;
2867
2868 dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &port_status);
2869 return (port_status & PI_PSTATUS_M_STATE) >> PI_PSTATUS_V_STATE;
2870 }
2871
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904static int dfx_hw_dma_uninit(DFX_board_t *bp, PI_UINT32 type)
2905 {
2906 int timeout_cnt;
2907
2908
2909
2910 dfx_hw_adap_reset(bp, type);
2911
2912
2913
2914 for (timeout_cnt = 100000; timeout_cnt > 0; timeout_cnt--)
2915 {
2916 if (dfx_hw_adap_state_rd(bp) == PI_STATE_K_DMA_UNAVAIL)
2917 break;
2918 udelay(100);
2919 }
2920 if (timeout_cnt == 0)
2921 return DFX_K_HW_TIMEOUT;
2922 return DFX_K_SUCCESS;
2923 }
2924
2925
2926
2927
2928
2929#ifdef DYNAMIC_BUFFERS
2930static void my_skb_align(struct sk_buff *skb, int n)
2931{
2932 unsigned long x = (unsigned long)skb->data;
2933 unsigned long v;
2934
2935 v = ALIGN(x, n);
2936
2937 skb_reserve(skb, v - x);
2938}
2939#endif
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976static int dfx_rcv_init(DFX_board_t *bp, int get_buffers)
2977 {
2978 int i, j;
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996
2997
2998 if (get_buffers) {
2999#ifdef DYNAMIC_BUFFERS
3000 for (i = 0; i < (int)(bp->rcv_bufs_to_post); i++)
3001 for (j = 0; (i + j) < (int)PI_RCV_DATA_K_NUM_ENTRIES; j += bp->rcv_bufs_to_post)
3002 {
3003 struct sk_buff *newskb;
3004 dma_addr_t dma_addr;
3005
3006 newskb = __netdev_alloc_skb(bp->dev, NEW_SKB_SIZE,
3007 GFP_NOIO);
3008 if (!newskb)
3009 return -ENOMEM;
3010
3011
3012
3013
3014
3015 my_skb_align(newskb, 128);
3016 dma_addr = dma_map_single(bp->bus_dev,
3017 newskb->data,
3018 PI_RCV_DATA_K_SIZE_MAX,
3019 DMA_FROM_DEVICE);
3020 if (dma_mapping_error(bp->bus_dev, dma_addr)) {
3021 dev_kfree_skb(newskb);
3022 return -ENOMEM;
3023 }
3024 bp->descr_block_virt->rcv_data[i + j].long_0 =
3025 (u32)(PI_RCV_DESCR_M_SOP |
3026 ((PI_RCV_DATA_K_SIZE_MAX /
3027 PI_ALIGN_K_RCV_DATA_BUFF) <<
3028 PI_RCV_DESCR_V_SEG_LEN));
3029 bp->descr_block_virt->rcv_data[i + j].long_1 =
3030 (u32)dma_addr;
3031
3032
3033
3034
3035
3036 bp->p_rcv_buff_va[i+j] = (char *) newskb;
3037 }
3038#else
3039 for (i=0; i < (int)(bp->rcv_bufs_to_post); i++)
3040 for (j=0; (i + j) < (int)PI_RCV_DATA_K_NUM_ENTRIES; j += bp->rcv_bufs_to_post)
3041 {
3042 bp->descr_block_virt->rcv_data[i+j].long_0 = (u32) (PI_RCV_DESCR_M_SOP |
3043 ((PI_RCV_DATA_K_SIZE_MAX / PI_ALIGN_K_RCV_DATA_BUFF) << PI_RCV_DESCR_V_SEG_LEN));
3044 bp->descr_block_virt->rcv_data[i+j].long_1 = (u32) (bp->rcv_block_phys + (i * PI_RCV_DATA_K_SIZE_MAX));
3045 bp->p_rcv_buff_va[i+j] = (bp->rcv_block_virt + (i * PI_RCV_DATA_K_SIZE_MAX));
3046 }
3047#endif
3048 }
3049
3050
3051
3052 bp->rcv_xmt_reg.index.rcv_prod = bp->rcv_bufs_to_post;
3053 dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_2_PROD, bp->rcv_xmt_reg.lword);
3054 return 0;
3055 }
3056
3057
3058
3059
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090static void dfx_rcv_queue_process(
3091 DFX_board_t *bp
3092 )
3093
3094 {
3095 PI_TYPE_2_CONSUMER *p_type_2_cons;
3096 char *p_buff;
3097 u32 descr, pkt_len;
3098 struct sk_buff *skb = NULL;
3099
3100
3101
3102 p_type_2_cons = (PI_TYPE_2_CONSUMER *)(&bp->cons_block_virt->xmt_rcv_data);
3103 while (bp->rcv_xmt_reg.index.rcv_comp != p_type_2_cons->index.rcv_cons)
3104 {
3105
3106 dma_addr_t dma_addr;
3107 int entry;
3108
3109 entry = bp->rcv_xmt_reg.index.rcv_comp;
3110#ifdef DYNAMIC_BUFFERS
3111 p_buff = (char *) (((struct sk_buff *)bp->p_rcv_buff_va[entry])->data);
3112#else
3113 p_buff = bp->p_rcv_buff_va[entry];
3114#endif
3115 dma_addr = bp->descr_block_virt->rcv_data[entry].long_1;
3116 dma_sync_single_for_cpu(bp->bus_dev,
3117 dma_addr + RCV_BUFF_K_DESCR,
3118 sizeof(u32),
3119 DMA_FROM_DEVICE);
3120 memcpy(&descr, p_buff + RCV_BUFF_K_DESCR, sizeof(u32));
3121
3122 if (descr & PI_FMC_DESCR_M_RCC_FLUSH)
3123 {
3124 if (descr & PI_FMC_DESCR_M_RCC_CRC)
3125 bp->rcv_crc_errors++;
3126 else
3127 bp->rcv_frame_status_errors++;
3128 }
3129 else
3130 {
3131 int rx_in_place = 0;
3132
3133
3134
3135 pkt_len = (u32)((descr & PI_FMC_DESCR_M_LEN) >> PI_FMC_DESCR_V_LEN);
3136 pkt_len -= 4;
3137 if (!IN_RANGE(pkt_len, FDDI_K_LLC_ZLEN, FDDI_K_LLC_LEN))
3138 bp->rcv_length_errors++;
3139 else{
3140#ifdef DYNAMIC_BUFFERS
3141 struct sk_buff *newskb = NULL;
3142
3143 if (pkt_len > SKBUFF_RX_COPYBREAK) {
3144 dma_addr_t new_dma_addr;
3145
3146 newskb = netdev_alloc_skb(bp->dev,
3147 NEW_SKB_SIZE);
3148 if (newskb){
3149 my_skb_align(newskb, 128);
3150 new_dma_addr = dma_map_single(
3151 bp->bus_dev,
3152 newskb->data,
3153 PI_RCV_DATA_K_SIZE_MAX,
3154 DMA_FROM_DEVICE);
3155 if (dma_mapping_error(
3156 bp->bus_dev,
3157 new_dma_addr)) {
3158 dev_kfree_skb(newskb);
3159 newskb = NULL;
3160 }
3161 }
3162 if (newskb) {
3163 rx_in_place = 1;
3164
3165 skb = (struct sk_buff *)bp->p_rcv_buff_va[entry];
3166 dma_unmap_single(bp->bus_dev,
3167 dma_addr,
3168 PI_RCV_DATA_K_SIZE_MAX,
3169 DMA_FROM_DEVICE);
3170 skb_reserve(skb, RCV_BUFF_K_PADDING);
3171 bp->p_rcv_buff_va[entry] = (char *)newskb;
3172 bp->descr_block_virt->rcv_data[entry].long_1 = (u32)new_dma_addr;
3173 }
3174 }
3175 if (!newskb)
3176#endif
3177
3178
3179 skb = netdev_alloc_skb(bp->dev,
3180 pkt_len + 3);
3181 if (skb == NULL)
3182 {
3183 printk("%s: Could not allocate receive buffer. Dropping packet.\n", bp->dev->name);
3184 bp->rcv_discards++;
3185 break;
3186 }
3187 else {
3188 if (!rx_in_place) {
3189
3190 dma_sync_single_for_cpu(
3191 bp->bus_dev,
3192 dma_addr +
3193 RCV_BUFF_K_PADDING,
3194 pkt_len + 3,
3195 DMA_FROM_DEVICE);
3196
3197 skb_copy_to_linear_data(skb,
3198 p_buff + RCV_BUFF_K_PADDING,
3199 pkt_len + 3);
3200 }
3201
3202 skb_reserve(skb,3);
3203 skb_put(skb, pkt_len);
3204 skb->protocol = fddi_type_trans(skb, bp->dev);
3205 bp->rcv_total_bytes += skb->len;
3206 netif_rx(skb);
3207
3208
3209 bp->rcv_total_frames++;
3210 if (*(p_buff + RCV_BUFF_K_DA) & 0x01)
3211 bp->rcv_multicast_frames++;
3212 }
3213 }
3214 }
3215
3216
3217
3218
3219
3220
3221
3222
3223
3224 bp->rcv_xmt_reg.index.rcv_prod += 1;
3225 bp->rcv_xmt_reg.index.rcv_comp += 1;
3226 }
3227 }
3228
3229
3230
3231
3232
3233
3234
3235
3236
3237
3238
3239
3240
3241
3242
3243
3244
3245
3246
3247
3248
3249
3250
3251
3252
3253
3254
3255
3256
3257
3258
3259
3260
3261
3262
3263
3264
3265
3266
3267
3268
3269
3270
3271
3272
3273
3274
3275
3276
3277
3278
3279
3280
3281
3282
3283
3284
3285
3286
3287
3288
3289
3290
3291static netdev_tx_t dfx_xmt_queue_pkt(struct sk_buff *skb,
3292 struct net_device *dev)
3293 {
3294 DFX_board_t *bp = netdev_priv(dev);
3295 u8 prod;
3296 PI_XMT_DESCR *p_xmt_descr;
3297 XMT_DRIVER_DESCR *p_xmt_drv_descr;
3298 dma_addr_t dma_addr;
3299 unsigned long flags;
3300
3301 netif_stop_queue(dev);
3302
3303
3304
3305
3306
3307
3308
3309
3310
3311
3312 if (!IN_RANGE(skb->len, FDDI_K_LLC_ZLEN, FDDI_K_LLC_LEN))
3313 {
3314 printk("%s: Invalid packet length - %u bytes\n",
3315 dev->name, skb->len);
3316 bp->xmt_length_errors++;
3317 netif_wake_queue(dev);
3318 dev_kfree_skb(skb);
3319 return NETDEV_TX_OK;
3320 }
3321
3322
3323
3324
3325
3326
3327
3328
3329
3330
3331
3332
3333 if (bp->link_available == PI_K_FALSE)
3334 {
3335 if (dfx_hw_adap_state_rd(bp) == PI_STATE_K_LINK_AVAIL)
3336 bp->link_available = PI_K_TRUE;
3337 else
3338 {
3339 bp->xmt_discards++;
3340 dev_kfree_skb(skb);
3341 netif_wake_queue(dev);
3342 return NETDEV_TX_OK;
3343 }
3344 }
3345
3346
3347
3348 skb_push(skb, 3);
3349 skb->data[0] = DFX_PRH0_BYTE;
3350 skb->data[1] = DFX_PRH1_BYTE;
3351 skb->data[2] = DFX_PRH2_BYTE;
3352
3353 dma_addr = dma_map_single(bp->bus_dev, skb->data, skb->len,
3354 DMA_TO_DEVICE);
3355 if (dma_mapping_error(bp->bus_dev, dma_addr)) {
3356 skb_pull(skb, 3);
3357 return NETDEV_TX_BUSY;
3358 }
3359
3360 spin_lock_irqsave(&bp->lock, flags);
3361
3362
3363
3364 prod = bp->rcv_xmt_reg.index.xmt_prod;
3365 p_xmt_descr = &(bp->descr_block_virt->xmt_data[prod]);
3366
3367
3368
3369
3370
3371
3372
3373
3374
3375
3376
3377
3378 p_xmt_drv_descr = &(bp->xmt_drv_descr_blk[prod++]);
3379
3380
3381
3382
3383
3384
3385
3386
3387
3388
3389
3390
3391
3392
3393
3394
3395
3396
3397
3398
3399
3400
3401
3402
3403
3404
3405
3406
3407 p_xmt_descr->long_0 = (u32) (PI_XMT_DESCR_M_SOP | PI_XMT_DESCR_M_EOP | ((skb->len) << PI_XMT_DESCR_V_SEG_LEN));
3408 p_xmt_descr->long_1 = (u32)dma_addr;
3409
3410
3411
3412
3413
3414
3415
3416
3417
3418
3419
3420
3421 if (prod == bp->rcv_xmt_reg.index.xmt_comp)
3422 {
3423 skb_pull(skb,3);
3424 spin_unlock_irqrestore(&bp->lock, flags);
3425 return NETDEV_TX_BUSY;
3426 }
3427
3428
3429
3430
3431
3432
3433
3434
3435
3436
3437
3438
3439
3440
3441
3442
3443
3444 p_xmt_drv_descr->p_skb = skb;
3445
3446
3447
3448 bp->rcv_xmt_reg.index.xmt_prod = prod;
3449 dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_2_PROD, bp->rcv_xmt_reg.lword);
3450 spin_unlock_irqrestore(&bp->lock, flags);
3451 netif_wake_queue(dev);
3452 return NETDEV_TX_OK;
3453 }
3454
3455
3456
3457
3458
3459
3460
3461
3462
3463
3464
3465
3466
3467
3468
3469
3470
3471
3472
3473
3474
3475
3476
3477
3478
3479
3480
3481
3482
3483
3484
3485
3486
3487
3488static int dfx_xmt_done(DFX_board_t *bp)
3489 {
3490 XMT_DRIVER_DESCR *p_xmt_drv_descr;
3491 PI_TYPE_2_CONSUMER *p_type_2_cons;
3492 u8 comp;
3493 int freed = 0;
3494
3495
3496
3497 p_type_2_cons = (PI_TYPE_2_CONSUMER *)(&bp->cons_block_virt->xmt_rcv_data);
3498 while (bp->rcv_xmt_reg.index.xmt_comp != p_type_2_cons->index.xmt_cons)
3499 {
3500
3501
3502 p_xmt_drv_descr = &(bp->xmt_drv_descr_blk[bp->rcv_xmt_reg.index.xmt_comp]);
3503
3504
3505
3506 bp->xmt_total_frames++;
3507 bp->xmt_total_bytes += p_xmt_drv_descr->p_skb->len;
3508
3509
3510 comp = bp->rcv_xmt_reg.index.xmt_comp;
3511 dma_unmap_single(bp->bus_dev,
3512 bp->descr_block_virt->xmt_data[comp].long_1,
3513 p_xmt_drv_descr->p_skb->len,
3514 DMA_TO_DEVICE);
3515 dev_kfree_skb_irq(p_xmt_drv_descr->p_skb);
3516
3517
3518
3519
3520
3521
3522
3523
3524
3525
3526
3527
3528 bp->rcv_xmt_reg.index.xmt_comp += 1;
3529 freed++;
3530 }
3531 return freed;
3532 }
3533
3534
3535
3536
3537
3538
3539
3540
3541
3542
3543
3544
3545
3546
3547
3548
3549
3550
3551
3552
3553
3554
3555
3556
3557
3558
3559
3560
3561#ifdef DYNAMIC_BUFFERS
3562static void dfx_rcv_flush( DFX_board_t *bp )
3563 {
3564 int i, j;
3565
3566 for (i = 0; i < (int)(bp->rcv_bufs_to_post); i++)
3567 for (j = 0; (i + j) < (int)PI_RCV_DATA_K_NUM_ENTRIES; j += bp->rcv_bufs_to_post)
3568 {
3569 struct sk_buff *skb;
3570 skb = (struct sk_buff *)bp->p_rcv_buff_va[i+j];
3571 if (skb) {
3572 dma_unmap_single(bp->bus_dev,
3573 bp->descr_block_virt->rcv_data[i+j].long_1,
3574 PI_RCV_DATA_K_SIZE_MAX,
3575 DMA_FROM_DEVICE);
3576 dev_kfree_skb(skb);
3577 }
3578 bp->p_rcv_buff_va[i+j] = NULL;
3579 }
3580
3581 }
3582#endif
3583
3584
3585
3586
3587
3588
3589
3590
3591
3592
3593
3594
3595
3596
3597
3598
3599
3600
3601
3602
3603
3604
3605
3606
3607
3608
3609
3610
3611
3612
3613
3614
3615
3616
3617
3618
3619
3620static void dfx_xmt_flush( DFX_board_t *bp )
3621 {
3622 u32 prod_cons;
3623 XMT_DRIVER_DESCR *p_xmt_drv_descr;
3624 u8 comp;
3625
3626
3627
3628 while (bp->rcv_xmt_reg.index.xmt_comp != bp->rcv_xmt_reg.index.xmt_prod)
3629 {
3630
3631
3632 p_xmt_drv_descr = &(bp->xmt_drv_descr_blk[bp->rcv_xmt_reg.index.xmt_comp]);
3633
3634
3635 comp = bp->rcv_xmt_reg.index.xmt_comp;
3636 dma_unmap_single(bp->bus_dev,
3637 bp->descr_block_virt->xmt_data[comp].long_1,
3638 p_xmt_drv_descr->p_skb->len,
3639 DMA_TO_DEVICE);
3640 dev_kfree_skb(p_xmt_drv_descr->p_skb);
3641
3642
3643
3644 bp->xmt_discards++;
3645
3646
3647
3648
3649
3650
3651
3652
3653
3654
3655
3656
3657 bp->rcv_xmt_reg.index.xmt_comp += 1;
3658 }
3659
3660
3661
3662 prod_cons = (u32)(bp->cons_block_virt->xmt_rcv_data & ~PI_CONS_M_XMT_INDEX);
3663 prod_cons |= (u32)(bp->rcv_xmt_reg.index.xmt_prod << PI_CONS_V_XMT_INDEX);
3664 bp->cons_block_virt->xmt_rcv_data = prod_cons;
3665 }
3666
3667
3668
3669
3670
3671
3672
3673
3674
3675
3676
3677
3678
3679
3680
3681
3682
3683
3684
3685
3686
3687
3688
3689
3690
3691
3692
3693static void dfx_unregister(struct device *bdev)
3694{
3695 struct net_device *dev = dev_get_drvdata(bdev);
3696 DFX_board_t *bp = netdev_priv(dev);
3697 int dfx_bus_pci = dev_is_pci(bdev);
3698 int dfx_bus_tc = DFX_BUS_TC(bdev);
3699 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
3700 resource_size_t bar_start[3] = {0};
3701 resource_size_t bar_len[3] = {0};
3702 int alloc_size;
3703
3704 unregister_netdev(dev);
3705
3706 alloc_size = sizeof(PI_DESCR_BLOCK) +
3707 PI_CMD_REQ_K_SIZE_MAX + PI_CMD_RSP_K_SIZE_MAX +
3708#ifndef DYNAMIC_BUFFERS
3709 (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX) +
3710#endif
3711 sizeof(PI_CONSUMER_BLOCK) +
3712 (PI_ALIGN_K_DESC_BLK - 1);
3713 if (bp->kmalloced)
3714 dma_free_coherent(bdev, alloc_size,
3715 bp->kmalloced, bp->kmalloced_dma);
3716
3717 dfx_bus_uninit(dev);
3718
3719 dfx_get_bars(bdev, bar_start, bar_len);
3720 if (bar_start[2] != 0)
3721 release_region(bar_start[2], bar_len[2]);
3722 if (bar_start[1] != 0)
3723 release_region(bar_start[1], bar_len[1]);
3724 if (dfx_use_mmio) {
3725 iounmap(bp->base.mem);
3726 release_mem_region(bar_start[0], bar_len[0]);
3727 } else
3728 release_region(bar_start[0], bar_len[0]);
3729
3730 if (dfx_bus_pci)
3731 pci_disable_device(to_pci_dev(bdev));
3732
3733 free_netdev(dev);
3734}
3735
3736
3737static int __maybe_unused dfx_dev_register(struct device *);
3738static int __maybe_unused dfx_dev_unregister(struct device *);
3739
3740#ifdef CONFIG_PCI
3741static int dfx_pci_register(struct pci_dev *, const struct pci_device_id *);
3742static void dfx_pci_unregister(struct pci_dev *);
3743
3744static const struct pci_device_id dfx_pci_table[] = {
3745 { PCI_DEVICE(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_FDDI) },
3746 { }
3747};
3748MODULE_DEVICE_TABLE(pci, dfx_pci_table);
3749
3750static struct pci_driver dfx_pci_driver = {
3751 .name = "defxx",
3752 .id_table = dfx_pci_table,
3753 .probe = dfx_pci_register,
3754 .remove = dfx_pci_unregister,
3755};
3756
3757static int dfx_pci_register(struct pci_dev *pdev,
3758 const struct pci_device_id *ent)
3759{
3760 return dfx_register(&pdev->dev);
3761}
3762
3763static void dfx_pci_unregister(struct pci_dev *pdev)
3764{
3765 dfx_unregister(&pdev->dev);
3766}
3767#endif
3768
3769#ifdef CONFIG_EISA
3770static struct eisa_device_id dfx_eisa_table[] = {
3771 { "DEC3001", DEFEA_PROD_ID_1 },
3772 { "DEC3002", DEFEA_PROD_ID_2 },
3773 { "DEC3003", DEFEA_PROD_ID_3 },
3774 { "DEC3004", DEFEA_PROD_ID_4 },
3775 { }
3776};
3777MODULE_DEVICE_TABLE(eisa, dfx_eisa_table);
3778
3779static struct eisa_driver dfx_eisa_driver = {
3780 .id_table = dfx_eisa_table,
3781 .driver = {
3782 .name = "defxx",
3783 .bus = &eisa_bus_type,
3784 .probe = dfx_dev_register,
3785 .remove = dfx_dev_unregister,
3786 },
3787};
3788#endif
3789
3790#ifdef CONFIG_TC
3791static struct tc_device_id const dfx_tc_table[] = {
3792 { "DEC ", "PMAF-FA " },
3793 { "DEC ", "PMAF-FD " },
3794 { "DEC ", "PMAF-FS " },
3795 { "DEC ", "PMAF-FU " },
3796 { }
3797};
3798MODULE_DEVICE_TABLE(tc, dfx_tc_table);
3799
3800static struct tc_driver dfx_tc_driver = {
3801 .id_table = dfx_tc_table,
3802 .driver = {
3803 .name = "defxx",
3804 .bus = &tc_bus_type,
3805 .probe = dfx_dev_register,
3806 .remove = dfx_dev_unregister,
3807 },
3808};
3809#endif
3810
3811static int __maybe_unused dfx_dev_register(struct device *dev)
3812{
3813 int status;
3814
3815 status = dfx_register(dev);
3816 if (!status)
3817 get_device(dev);
3818 return status;
3819}
3820
3821static int __maybe_unused dfx_dev_unregister(struct device *dev)
3822{
3823 put_device(dev);
3824 dfx_unregister(dev);
3825 return 0;
3826}
3827
3828
3829static int dfx_init(void)
3830{
3831 int status;
3832
3833 status = pci_register_driver(&dfx_pci_driver);
3834 if (!status)
3835 status = eisa_driver_register(&dfx_eisa_driver);
3836 if (!status)
3837 status = tc_register_driver(&dfx_tc_driver);
3838 return status;
3839}
3840
3841static void dfx_cleanup(void)
3842{
3843 tc_unregister_driver(&dfx_tc_driver);
3844 eisa_driver_unregister(&dfx_eisa_driver);
3845 pci_unregister_driver(&dfx_pci_driver);
3846}
3847
3848module_init(dfx_init);
3849module_exit(dfx_cleanup);
3850MODULE_AUTHOR("Lawrence V. Stefani");
3851MODULE_DESCRIPTION("DEC FDDIcontroller TC/EISA/PCI (DEFTA/DEFEA/DEFPA) driver "
3852 DRV_VERSION " " DRV_RELDATE);
3853MODULE_LICENSE("GPL");
3854