1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64#define VERSION "2.07"
65
66static const char * const boot_msg =
67 "SysKonnect FDDI PCI Adapter driver v" VERSION " for\n"
68 " SK-55xx/SK-58xx adapters (SK-NET FDDI-FP/UP/LP)";
69
70
71
72#include <linux/capability.h>
73#include <linux/compat.h>
74#include <linux/module.h>
75#include <linux/kernel.h>
76#include <linux/errno.h>
77#include <linux/ioport.h>
78#include <linux/interrupt.h>
79#include <linux/pci.h>
80#include <linux/netdevice.h>
81#include <linux/fddidevice.h>
82#include <linux/skbuff.h>
83#include <linux/bitops.h>
84#include <linux/gfp.h>
85
86#include <asm/byteorder.h>
87#include <asm/io.h>
88#include <linux/uaccess.h>
89
90#include "h/types.h"
91#undef ADDR
92#include "h/skfbi.h"
93#include "h/fddi.h"
94#include "h/smc.h"
95#include "h/smtstate.h"
96
97
98
99static int skfp_driver_init(struct net_device *dev);
100static int skfp_open(struct net_device *dev);
101static int skfp_close(struct net_device *dev);
102static irqreturn_t skfp_interrupt(int irq, void *dev_id);
103static struct net_device_stats *skfp_ctl_get_stats(struct net_device *dev);
104static void skfp_ctl_set_multicast_list(struct net_device *dev);
105static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev);
106static int skfp_ctl_set_mac_address(struct net_device *dev, void *addr);
107static int skfp_siocdevprivate(struct net_device *dev, struct ifreq *rq,
108 void __user *data, int cmd);
109static netdev_tx_t skfp_send_pkt(struct sk_buff *skb,
110 struct net_device *dev);
111static void send_queued_packets(struct s_smc *smc);
112static void CheckSourceAddress(unsigned char *frame, unsigned char *hw_addr);
113static void ResetAdapter(struct s_smc *smc);
114
115
116
117void *mac_drv_get_space(struct s_smc *smc, u_int size);
118void *mac_drv_get_desc_mem(struct s_smc *smc, u_int size);
119unsigned long mac_drv_virt2phys(struct s_smc *smc, void *virt);
120unsigned long dma_master(struct s_smc *smc, void *virt, int len, int flag);
121void dma_complete(struct s_smc *smc, volatile union s_fp_descr *descr,
122 int flag);
123void mac_drv_tx_complete(struct s_smc *smc, volatile struct s_smt_fp_txd *txd);
124void llc_restart_tx(struct s_smc *smc);
125void mac_drv_rx_complete(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
126 int frag_count, int len);
127void mac_drv_requeue_rxd(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
128 int frag_count);
129void mac_drv_fill_rxd(struct s_smc *smc);
130void mac_drv_clear_rxd(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
131 int frag_count);
132int mac_drv_rx_init(struct s_smc *smc, int len, int fc, char *look_ahead,
133 int la_len);
134void dump_data(unsigned char *Data, int length);
135
136
137extern u_int mac_drv_check_space(void);
138extern int mac_drv_init(struct s_smc *smc);
139extern void hwm_tx_frag(struct s_smc *smc, char far * virt, u_long phys,
140 int len, int frame_status);
141extern int hwm_tx_init(struct s_smc *smc, u_char fc, int frag_count,
142 int frame_len, int frame_status);
143extern void fddi_isr(struct s_smc *smc);
144extern void hwm_rx_frag(struct s_smc *smc, char far * virt, u_long phys,
145 int len, int frame_status);
146extern void mac_drv_rx_mode(struct s_smc *smc, int mode);
147extern void mac_drv_clear_rx_queue(struct s_smc *smc);
148extern void enable_tx_irq(struct s_smc *smc, u_short queue);
149
150static const struct pci_device_id skfddi_pci_tbl[] = {
151 { PCI_VENDOR_ID_SK, PCI_DEVICE_ID_SK_FP, PCI_ANY_ID, PCI_ANY_ID, },
152 { }
153};
154MODULE_DEVICE_TABLE(pci, skfddi_pci_tbl);
155MODULE_LICENSE("GPL");
156MODULE_AUTHOR("Mirko Lindner <mlindner@syskonnect.de>");
157
158
159
160static int num_boards;
161
162static const struct net_device_ops skfp_netdev_ops = {
163 .ndo_open = skfp_open,
164 .ndo_stop = skfp_close,
165 .ndo_start_xmit = skfp_send_pkt,
166 .ndo_get_stats = skfp_ctl_get_stats,
167 .ndo_set_rx_mode = skfp_ctl_set_multicast_list,
168 .ndo_set_mac_address = skfp_ctl_set_mac_address,
169 .ndo_siocdevprivate = skfp_siocdevprivate,
170};
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201static int skfp_init_one(struct pci_dev *pdev,
202 const struct pci_device_id *ent)
203{
204 struct net_device *dev;
205 struct s_smc *smc;
206 void __iomem *mem;
207 int err;
208
209 pr_debug("entering skfp_init_one\n");
210
211 if (num_boards == 0)
212 printk("%s\n", boot_msg);
213
214 err = pci_enable_device(pdev);
215 if (err)
216 return err;
217
218 err = pci_request_regions(pdev, "skfddi");
219 if (err)
220 goto err_out1;
221
222 pci_set_master(pdev);
223
224#ifdef MEM_MAPPED_IO
225 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
226 printk(KERN_ERR "skfp: region is not an MMIO resource\n");
227 err = -EIO;
228 goto err_out2;
229 }
230
231 mem = ioremap(pci_resource_start(pdev, 0), 0x4000);
232#else
233 if (!(pci_resource_flags(pdev, 1) & IO_RESOURCE_IO)) {
234 printk(KERN_ERR "skfp: region is not PIO resource\n");
235 err = -EIO;
236 goto err_out2;
237 }
238
239 mem = ioport_map(pci_resource_start(pdev, 1), FP_IO_LEN);
240#endif
241 if (!mem) {
242 printk(KERN_ERR "skfp: Unable to map register, "
243 "FDDI adapter will be disabled.\n");
244 err = -EIO;
245 goto err_out2;
246 }
247
248 dev = alloc_fddidev(sizeof(struct s_smc));
249 if (!dev) {
250 printk(KERN_ERR "skfp: Unable to allocate fddi device, "
251 "FDDI adapter will be disabled.\n");
252 err = -ENOMEM;
253 goto err_out3;
254 }
255
256 dev->irq = pdev->irq;
257 dev->netdev_ops = &skfp_netdev_ops;
258
259 SET_NETDEV_DEV(dev, &pdev->dev);
260
261
262 smc = netdev_priv(dev);
263 smc->os.dev = dev;
264 smc->os.bus_type = SK_BUS_TYPE_PCI;
265 smc->os.pdev = *pdev;
266 smc->os.QueueSkb = MAX_TX_QUEUE_LEN;
267 smc->os.MaxFrameSize = MAX_FRAME_SIZE;
268 smc->os.dev = dev;
269 smc->hw.slot = -1;
270 smc->hw.iop = mem;
271 smc->os.ResetRequested = FALSE;
272 skb_queue_head_init(&smc->os.SendSkbQueue);
273
274 dev->base_addr = (unsigned long)mem;
275
276 err = skfp_driver_init(dev);
277 if (err)
278 goto err_out4;
279
280 err = register_netdev(dev);
281 if (err)
282 goto err_out5;
283
284 ++num_boards;
285 pci_set_drvdata(pdev, dev);
286
287 if ((pdev->subsystem_device & 0xff00) == 0x5500 ||
288 (pdev->subsystem_device & 0xff00) == 0x5800)
289 printk("%s: SysKonnect FDDI PCI adapter"
290 " found (SK-%04X)\n", dev->name,
291 pdev->subsystem_device);
292 else
293 printk("%s: FDDI PCI adapter found\n", dev->name);
294
295 return 0;
296err_out5:
297 if (smc->os.SharedMemAddr)
298 dma_free_coherent(&pdev->dev, smc->os.SharedMemSize,
299 smc->os.SharedMemAddr,
300 smc->os.SharedMemDMA);
301 dma_free_coherent(&pdev->dev, MAX_FRAME_SIZE,
302 smc->os.LocalRxBuffer, smc->os.LocalRxBufferDMA);
303err_out4:
304 free_netdev(dev);
305err_out3:
306#ifdef MEM_MAPPED_IO
307 iounmap(mem);
308#else
309 ioport_unmap(mem);
310#endif
311err_out2:
312 pci_release_regions(pdev);
313err_out1:
314 pci_disable_device(pdev);
315 return err;
316}
317
318
319
320
321static void skfp_remove_one(struct pci_dev *pdev)
322{
323 struct net_device *p = pci_get_drvdata(pdev);
324 struct s_smc *lp = netdev_priv(p);
325
326 unregister_netdev(p);
327
328 if (lp->os.SharedMemAddr) {
329 dma_free_coherent(&pdev->dev,
330 lp->os.SharedMemSize,
331 lp->os.SharedMemAddr,
332 lp->os.SharedMemDMA);
333 lp->os.SharedMemAddr = NULL;
334 }
335 if (lp->os.LocalRxBuffer) {
336 dma_free_coherent(&pdev->dev,
337 MAX_FRAME_SIZE,
338 lp->os.LocalRxBuffer,
339 lp->os.LocalRxBufferDMA);
340 lp->os.LocalRxBuffer = NULL;
341 }
342#ifdef MEM_MAPPED_IO
343 iounmap(lp->hw.iop);
344#else
345 ioport_unmap(lp->hw.iop);
346#endif
347 pci_release_regions(pdev);
348 free_netdev(p);
349
350 pci_disable_device(pdev);
351}
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378static int skfp_driver_init(struct net_device *dev)
379{
380 struct s_smc *smc = netdev_priv(dev);
381 skfddi_priv *bp = &smc->os;
382 int err = -EIO;
383
384 pr_debug("entering skfp_driver_init\n");
385
386
387 bp->base_addr = dev->base_addr;
388
389
390 smc->hw.irq = dev->irq;
391
392 spin_lock_init(&bp->DriverLock);
393
394
395 bp->LocalRxBuffer = dma_alloc_coherent(&bp->pdev.dev, MAX_FRAME_SIZE,
396 &bp->LocalRxBufferDMA,
397 GFP_ATOMIC);
398 if (!bp->LocalRxBuffer) {
399 printk("could not allocate mem for ");
400 printk("LocalRxBuffer: %d byte\n", MAX_FRAME_SIZE);
401 goto fail;
402 }
403
404
405 bp->SharedMemSize = mac_drv_check_space();
406 pr_debug("Memory for HWM: %ld\n", bp->SharedMemSize);
407 if (bp->SharedMemSize > 0) {
408 bp->SharedMemSize += 16;
409
410 bp->SharedMemAddr = dma_alloc_coherent(&bp->pdev.dev,
411 bp->SharedMemSize,
412 &bp->SharedMemDMA,
413 GFP_ATOMIC);
414 if (!bp->SharedMemAddr) {
415 printk("could not allocate mem for ");
416 printk("hardware module: %ld byte\n",
417 bp->SharedMemSize);
418 goto fail;
419 }
420
421 } else {
422 bp->SharedMemAddr = NULL;
423 }
424
425 bp->SharedMemHeap = 0;
426
427 card_stop(smc);
428
429 pr_debug("mac_drv_init()..\n");
430 if (mac_drv_init(smc) != 0) {
431 pr_debug("mac_drv_init() failed\n");
432 goto fail;
433 }
434 read_address(smc, NULL);
435 pr_debug("HW-Addr: %pMF\n", smc->hw.fddi_canon_addr.a);
436 memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, ETH_ALEN);
437
438 smt_reset_defaults(smc, 0);
439
440 return 0;
441
442fail:
443 if (bp->SharedMemAddr) {
444 dma_free_coherent(&bp->pdev.dev,
445 bp->SharedMemSize,
446 bp->SharedMemAddr,
447 bp->SharedMemDMA);
448 bp->SharedMemAddr = NULL;
449 }
450 if (bp->LocalRxBuffer) {
451 dma_free_coherent(&bp->pdev.dev, MAX_FRAME_SIZE,
452 bp->LocalRxBuffer, bp->LocalRxBufferDMA);
453 bp->LocalRxBuffer = NULL;
454 }
455 return err;
456}
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480static int skfp_open(struct net_device *dev)
481{
482 struct s_smc *smc = netdev_priv(dev);
483 int err;
484
485 pr_debug("entering skfp_open\n");
486
487 err = request_irq(dev->irq, skfp_interrupt, IRQF_SHARED,
488 dev->name, dev);
489 if (err)
490 return err;
491
492
493
494
495
496
497
498
499
500
501
502 read_address(smc, NULL);
503 memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, ETH_ALEN);
504
505 init_smt(smc, NULL);
506 smt_online(smc, 1);
507 STI_FBI();
508
509
510 mac_clear_multicast(smc);
511
512
513 mac_drv_rx_mode(smc, RX_DISABLE_PROMISC);
514
515 netif_start_queue(dev);
516 return 0;
517}
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547static int skfp_close(struct net_device *dev)
548{
549 struct s_smc *smc = netdev_priv(dev);
550 skfddi_priv *bp = &smc->os;
551
552 CLI_FBI();
553 smt_reset_defaults(smc, 1);
554 card_stop(smc);
555 mac_drv_clear_tx_queue(smc);
556 mac_drv_clear_rx_queue(smc);
557
558 netif_stop_queue(dev);
559
560 free_irq(dev->irq, dev);
561
562 skb_queue_purge(&bp->SendSkbQueue);
563 bp->QueueSkb = MAX_TX_QUEUE_LEN;
564
565 return 0;
566}
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604static irqreturn_t skfp_interrupt(int irq, void *dev_id)
605{
606 struct net_device *dev = dev_id;
607 struct s_smc *smc;
608 skfddi_priv *bp;
609
610 smc = netdev_priv(dev);
611 bp = &smc->os;
612
613
614 if (inpd(ADDR(B0_IMSK)) == 0) {
615
616 return IRQ_NONE;
617 }
618
619 if ((inpd(ISR_A) & smc->hw.is_imask) == 0) {
620
621 return IRQ_NONE;
622 }
623 CLI_FBI();
624 spin_lock(&bp->DriverLock);
625
626
627 fddi_isr(smc);
628
629 if (smc->os.ResetRequested) {
630 ResetAdapter(smc);
631 smc->os.ResetRequested = FALSE;
632 }
633 spin_unlock(&bp->DriverLock);
634 STI_FBI();
635
636 return IRQ_HANDLED;
637}
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671static struct net_device_stats *skfp_ctl_get_stats(struct net_device *dev)
672{
673 struct s_smc *bp = netdev_priv(dev);
674
675
676
677 bp->os.MacStat.port_bs_flag[0] = 0x1234;
678 bp->os.MacStat.port_bs_flag[1] = 0x5678;
679
680#if 0
681
682
683
684
685 memcpy(bp->stats.smt_station_id, &bp->cmd_rsp_virt->smt_mib_get.smt_station_id, sizeof(bp->cmd_rsp_virt->smt_mib_get.smt_station_id));
686 bp->stats.smt_op_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_op_version_id;
687 bp->stats.smt_hi_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_hi_version_id;
688 bp->stats.smt_lo_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_lo_version_id;
689 memcpy(bp->stats.smt_user_data, &bp->cmd_rsp_virt->smt_mib_get.smt_user_data, sizeof(bp->cmd_rsp_virt->smt_mib_get.smt_user_data));
690 bp->stats.smt_mib_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_mib_version_id;
691 bp->stats.smt_mac_cts = bp->cmd_rsp_virt->smt_mib_get.smt_mac_ct;
692 bp->stats.smt_non_master_cts = bp->cmd_rsp_virt->smt_mib_get.smt_non_master_ct;
693 bp->stats.smt_master_cts = bp->cmd_rsp_virt->smt_mib_get.smt_master_ct;
694 bp->stats.smt_available_paths = bp->cmd_rsp_virt->smt_mib_get.smt_available_paths;
695 bp->stats.smt_config_capabilities = bp->cmd_rsp_virt->smt_mib_get.smt_config_capabilities;
696 bp->stats.smt_config_policy = bp->cmd_rsp_virt->smt_mib_get.smt_config_policy;
697 bp->stats.smt_connection_policy = bp->cmd_rsp_virt->smt_mib_get.smt_connection_policy;
698 bp->stats.smt_t_notify = bp->cmd_rsp_virt->smt_mib_get.smt_t_notify;
699 bp->stats.smt_stat_rpt_policy = bp->cmd_rsp_virt->smt_mib_get.smt_stat_rpt_policy;
700 bp->stats.smt_trace_max_expiration = bp->cmd_rsp_virt->smt_mib_get.smt_trace_max_expiration;
701 bp->stats.smt_bypass_present = bp->cmd_rsp_virt->smt_mib_get.smt_bypass_present;
702 bp->stats.smt_ecm_state = bp->cmd_rsp_virt->smt_mib_get.smt_ecm_state;
703 bp->stats.smt_cf_state = bp->cmd_rsp_virt->smt_mib_get.smt_cf_state;
704 bp->stats.smt_remote_disconnect_flag = bp->cmd_rsp_virt->smt_mib_get.smt_remote_disconnect_flag;
705 bp->stats.smt_station_status = bp->cmd_rsp_virt->smt_mib_get.smt_station_status;
706 bp->stats.smt_peer_wrap_flag = bp->cmd_rsp_virt->smt_mib_get.smt_peer_wrap_flag;
707 bp->stats.smt_time_stamp = bp->cmd_rsp_virt->smt_mib_get.smt_msg_time_stamp.ls;
708 bp->stats.smt_transition_time_stamp = bp->cmd_rsp_virt->smt_mib_get.smt_transition_time_stamp.ls;
709 bp->stats.mac_frame_status_functions = bp->cmd_rsp_virt->smt_mib_get.mac_frame_status_functions;
710 bp->stats.mac_t_max_capability = bp->cmd_rsp_virt->smt_mib_get.mac_t_max_capability;
711 bp->stats.mac_tvx_capability = bp->cmd_rsp_virt->smt_mib_get.mac_tvx_capability;
712 bp->stats.mac_available_paths = bp->cmd_rsp_virt->smt_mib_get.mac_available_paths;
713 bp->stats.mac_current_path = bp->cmd_rsp_virt->smt_mib_get.mac_current_path;
714 memcpy(bp->stats.mac_upstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_upstream_nbr, FDDI_K_ALEN);
715 memcpy(bp->stats.mac_downstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_downstream_nbr, FDDI_K_ALEN);
716 memcpy(bp->stats.mac_old_upstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_old_upstream_nbr, FDDI_K_ALEN);
717 memcpy(bp->stats.mac_old_downstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_old_downstream_nbr, FDDI_K_ALEN);
718 bp->stats.mac_dup_address_test = bp->cmd_rsp_virt->smt_mib_get.mac_dup_address_test;
719 bp->stats.mac_requested_paths = bp->cmd_rsp_virt->smt_mib_get.mac_requested_paths;
720 bp->stats.mac_downstream_port_type = bp->cmd_rsp_virt->smt_mib_get.mac_downstream_port_type;
721 memcpy(bp->stats.mac_smt_address, &bp->cmd_rsp_virt->smt_mib_get.mac_smt_address, FDDI_K_ALEN);
722 bp->stats.mac_t_req = bp->cmd_rsp_virt->smt_mib_get.mac_t_req;
723 bp->stats.mac_t_neg = bp->cmd_rsp_virt->smt_mib_get.mac_t_neg;
724 bp->stats.mac_t_max = bp->cmd_rsp_virt->smt_mib_get.mac_t_max;
725 bp->stats.mac_tvx_value = bp->cmd_rsp_virt->smt_mib_get.mac_tvx_value;
726 bp->stats.mac_frame_error_threshold = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_threshold;
727 bp->stats.mac_frame_error_ratio = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_ratio;
728 bp->stats.mac_rmt_state = bp->cmd_rsp_virt->smt_mib_get.mac_rmt_state;
729 bp->stats.mac_da_flag = bp->cmd_rsp_virt->smt_mib_get.mac_da_flag;
730 bp->stats.mac_una_da_flag = bp->cmd_rsp_virt->smt_mib_get.mac_unda_flag;
731 bp->stats.mac_frame_error_flag = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_flag;
732 bp->stats.mac_ma_unitdata_available = bp->cmd_rsp_virt->smt_mib_get.mac_ma_unitdata_available;
733 bp->stats.mac_hardware_present = bp->cmd_rsp_virt->smt_mib_get.mac_hardware_present;
734 bp->stats.mac_ma_unitdata_enable = bp->cmd_rsp_virt->smt_mib_get.mac_ma_unitdata_enable;
735 bp->stats.path_tvx_lower_bound = bp->cmd_rsp_virt->smt_mib_get.path_tvx_lower_bound;
736 bp->stats.path_t_max_lower_bound = bp->cmd_rsp_virt->smt_mib_get.path_t_max_lower_bound;
737 bp->stats.path_max_t_req = bp->cmd_rsp_virt->smt_mib_get.path_max_t_req;
738 memcpy(bp->stats.path_configuration, &bp->cmd_rsp_virt->smt_mib_get.path_configuration, sizeof(bp->cmd_rsp_virt->smt_mib_get.path_configuration));
739 bp->stats.port_my_type[0] = bp->cmd_rsp_virt->smt_mib_get.port_my_type[0];
740 bp->stats.port_my_type[1] = bp->cmd_rsp_virt->smt_mib_get.port_my_type[1];
741 bp->stats.port_neighbor_type[0] = bp->cmd_rsp_virt->smt_mib_get.port_neighbor_type[0];
742 bp->stats.port_neighbor_type[1] = bp->cmd_rsp_virt->smt_mib_get.port_neighbor_type[1];
743 bp->stats.port_connection_policies[0] = bp->cmd_rsp_virt->smt_mib_get.port_connection_policies[0];
744 bp->stats.port_connection_policies[1] = bp->cmd_rsp_virt->smt_mib_get.port_connection_policies[1];
745 bp->stats.port_mac_indicated[0] = bp->cmd_rsp_virt->smt_mib_get.port_mac_indicated[0];
746 bp->stats.port_mac_indicated[1] = bp->cmd_rsp_virt->smt_mib_get.port_mac_indicated[1];
747 bp->stats.port_current_path[0] = bp->cmd_rsp_virt->smt_mib_get.port_current_path[0];
748 bp->stats.port_current_path[1] = bp->cmd_rsp_virt->smt_mib_get.port_current_path[1];
749 memcpy(&bp->stats.port_requested_paths[0 * 3], &bp->cmd_rsp_virt->smt_mib_get.port_requested_paths[0], 3);
750 memcpy(&bp->stats.port_requested_paths[1 * 3], &bp->cmd_rsp_virt->smt_mib_get.port_requested_paths[1], 3);
751 bp->stats.port_mac_placement[0] = bp->cmd_rsp_virt->smt_mib_get.port_mac_placement[0];
752 bp->stats.port_mac_placement[1] = bp->cmd_rsp_virt->smt_mib_get.port_mac_placement[1];
753 bp->stats.port_available_paths[0] = bp->cmd_rsp_virt->smt_mib_get.port_available_paths[0];
754 bp->stats.port_available_paths[1] = bp->cmd_rsp_virt->smt_mib_get.port_available_paths[1];
755 bp->stats.port_pmd_class[0] = bp->cmd_rsp_virt->smt_mib_get.port_pmd_class[0];
756 bp->stats.port_pmd_class[1] = bp->cmd_rsp_virt->smt_mib_get.port_pmd_class[1];
757 bp->stats.port_connection_capabilities[0] = bp->cmd_rsp_virt->smt_mib_get.port_connection_capabilities[0];
758 bp->stats.port_connection_capabilities[1] = bp->cmd_rsp_virt->smt_mib_get.port_connection_capabilities[1];
759 bp->stats.port_bs_flag[0] = bp->cmd_rsp_virt->smt_mib_get.port_bs_flag[0];
760 bp->stats.port_bs_flag[1] = bp->cmd_rsp_virt->smt_mib_get.port_bs_flag[1];
761 bp->stats.port_ler_estimate[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_estimate[0];
762 bp->stats.port_ler_estimate[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_estimate[1];
763 bp->stats.port_ler_cutoff[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_cutoff[0];
764 bp->stats.port_ler_cutoff[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_cutoff[1];
765 bp->stats.port_ler_alarm[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_alarm[0];
766 bp->stats.port_ler_alarm[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_alarm[1];
767 bp->stats.port_connect_state[0] = bp->cmd_rsp_virt->smt_mib_get.port_connect_state[0];
768 bp->stats.port_connect_state[1] = bp->cmd_rsp_virt->smt_mib_get.port_connect_state[1];
769 bp->stats.port_pcm_state[0] = bp->cmd_rsp_virt->smt_mib_get.port_pcm_state[0];
770 bp->stats.port_pcm_state[1] = bp->cmd_rsp_virt->smt_mib_get.port_pcm_state[1];
771 bp->stats.port_pc_withhold[0] = bp->cmd_rsp_virt->smt_mib_get.port_pc_withhold[0];
772 bp->stats.port_pc_withhold[1] = bp->cmd_rsp_virt->smt_mib_get.port_pc_withhold[1];
773 bp->stats.port_ler_flag[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_flag[0];
774 bp->stats.port_ler_flag[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_flag[1];
775 bp->stats.port_hardware_present[0] = bp->cmd_rsp_virt->smt_mib_get.port_hardware_present[0];
776 bp->stats.port_hardware_present[1] = bp->cmd_rsp_virt->smt_mib_get.port_hardware_present[1];
777
778
779
780
781 bp->stats.mac_frame_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.frame_cnt.ls;
782 bp->stats.mac_copied_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.copied_cnt.ls;
783 bp->stats.mac_transmit_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.transmit_cnt.ls;
784 bp->stats.mac_error_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.error_cnt.ls;
785 bp->stats.mac_lost_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.lost_cnt.ls;
786 bp->stats.port_lct_fail_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.lct_rejects[0].ls;
787 bp->stats.port_lct_fail_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.lct_rejects[1].ls;
788 bp->stats.port_lem_reject_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.lem_rejects[0].ls;
789 bp->stats.port_lem_reject_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.lem_rejects[1].ls;
790 bp->stats.port_lem_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[0].ls;
791 bp->stats.port_lem_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[1].ls;
792
793#endif
794 return (struct net_device_stats *)&bp->os.MacStat;
795}
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835static void skfp_ctl_set_multicast_list(struct net_device *dev)
836{
837 struct s_smc *smc = netdev_priv(dev);
838 skfddi_priv *bp = &smc->os;
839 unsigned long Flags;
840
841 spin_lock_irqsave(&bp->DriverLock, Flags);
842 skfp_ctl_set_multicast_list_wo_lock(dev);
843 spin_unlock_irqrestore(&bp->DriverLock, Flags);
844}
845
846
847
848static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev)
849{
850 struct s_smc *smc = netdev_priv(dev);
851 struct netdev_hw_addr *ha;
852
853
854 if (dev->flags & IFF_PROMISC) {
855 mac_drv_rx_mode(smc, RX_ENABLE_PROMISC);
856 pr_debug("PROMISCUOUS MODE ENABLED\n");
857 }
858
859 else {
860 mac_drv_rx_mode(smc, RX_DISABLE_PROMISC);
861 pr_debug("PROMISCUOUS MODE DISABLED\n");
862
863
864 mac_clear_multicast(smc);
865 mac_drv_rx_mode(smc, RX_DISABLE_ALLMULTI);
866
867 if (dev->flags & IFF_ALLMULTI) {
868 mac_drv_rx_mode(smc, RX_ENABLE_ALLMULTI);
869 pr_debug("ENABLE ALL MC ADDRESSES\n");
870 } else if (!netdev_mc_empty(dev)) {
871 if (netdev_mc_count(dev) <= FPMAX_MULTICAST) {
872
873
874
875 netdev_for_each_mc_addr(ha, dev) {
876 mac_add_multicast(smc,
877 (struct fddi_addr *)ha->addr,
878 1);
879
880 pr_debug("ENABLE MC ADDRESS: %pMF\n",
881 ha->addr);
882 }
883
884 } else {
885
886 mac_drv_rx_mode(smc, RX_ENABLE_ALLMULTI);
887 pr_debug("ENABLE ALL MC ADDRESSES\n");
888 }
889 } else {
890
891 pr_debug("DISABLE ALL MC ADDRESSES\n");
892 }
893
894
895 mac_update_multicast(smc);
896 }
897}
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919static int skfp_ctl_set_mac_address(struct net_device *dev, void *addr)
920{
921 struct s_smc *smc = netdev_priv(dev);
922 struct sockaddr *p_sockaddr = (struct sockaddr *) addr;
923 skfddi_priv *bp = &smc->os;
924 unsigned long Flags;
925
926
927 memcpy(dev->dev_addr, p_sockaddr->sa_data, FDDI_K_ALEN);
928 spin_lock_irqsave(&bp->DriverLock, Flags);
929 ResetAdapter(smc);
930 spin_unlock_irqrestore(&bp->DriverLock, Flags);
931
932 return 0;
933}
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959static int skfp_siocdevprivate(struct net_device *dev, struct ifreq *rq, void __user *data, int cmd)
960{
961 struct s_smc *smc = netdev_priv(dev);
962 skfddi_priv *lp = &smc->os;
963 struct s_skfp_ioctl ioc;
964 int status = 0;
965
966 if (copy_from_user(&ioc, data, sizeof(struct s_skfp_ioctl)))
967 return -EFAULT;
968
969 if (in_compat_syscall())
970 return -EOPNOTSUPP;
971
972 switch (ioc.cmd) {
973 case SKFP_GET_STATS:
974 ioc.len = sizeof(lp->MacStat);
975 status = copy_to_user(ioc.data, skfp_ctl_get_stats(dev), ioc.len)
976 ? -EFAULT : 0;
977 break;
978 case SKFP_CLR_STATS:
979 if (!capable(CAP_NET_ADMIN)) {
980 status = -EPERM;
981 } else {
982 memset(&lp->MacStat, 0, sizeof(lp->MacStat));
983 }
984 break;
985 default:
986 printk("ioctl for %s: unknown cmd: %04x\n", dev->name, ioc.cmd);
987 status = -EOPNOTSUPP;
988
989 }
990
991 return status;
992}
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042static netdev_tx_t skfp_send_pkt(struct sk_buff *skb,
1043 struct net_device *dev)
1044{
1045 struct s_smc *smc = netdev_priv(dev);
1046 skfddi_priv *bp = &smc->os;
1047
1048 pr_debug("skfp_send_pkt\n");
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059 if (!(skb->len >= FDDI_K_LLC_ZLEN && skb->len <= FDDI_K_LLC_LEN)) {
1060 bp->MacStat.gen.tx_errors++;
1061
1062 netif_start_queue(dev);
1063 dev_kfree_skb(skb);
1064 return NETDEV_TX_OK;
1065 }
1066 if (bp->QueueSkb == 0) {
1067
1068 netif_stop_queue(dev);
1069 return NETDEV_TX_BUSY;
1070 }
1071 bp->QueueSkb--;
1072 skb_queue_tail(&bp->SendSkbQueue, skb);
1073 send_queued_packets(netdev_priv(dev));
1074 if (bp->QueueSkb == 0) {
1075 netif_stop_queue(dev);
1076 }
1077 return NETDEV_TX_OK;
1078
1079}
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105static void send_queued_packets(struct s_smc *smc)
1106{
1107 skfddi_priv *bp = &smc->os;
1108 struct sk_buff *skb;
1109 unsigned char fc;
1110 int queue;
1111 struct s_smt_fp_txd *txd;
1112 dma_addr_t dma_address;
1113 unsigned long Flags;
1114
1115 int frame_status;
1116
1117 pr_debug("send queued packets\n");
1118 for (;;) {
1119
1120 skb = skb_dequeue(&bp->SendSkbQueue);
1121
1122 if (!skb) {
1123 pr_debug("queue empty\n");
1124 return;
1125 }
1126
1127 spin_lock_irqsave(&bp->DriverLock, Flags);
1128 fc = skb->data[0];
1129 queue = (fc & FC_SYNC_BIT) ? QUEUE_S : QUEUE_A0;
1130#ifdef ESS
1131
1132
1133 if ((fc & ~(FC_SYNC_BIT | FC_LLC_PRIOR)) == FC_ASYNC_LLC) {
1134
1135 if (!smc->ess.sync_bw_available)
1136 fc &= ~FC_SYNC_BIT;
1137
1138 else {
1139
1140 if (smc->mib.fddiESSSynchTxMode) {
1141
1142 fc |= FC_SYNC_BIT;
1143 }
1144 }
1145 }
1146#endif
1147 frame_status = hwm_tx_init(smc, fc, 1, skb->len, queue);
1148
1149 if ((frame_status & (LOC_TX | LAN_TX)) == 0) {
1150
1151
1152 if ((frame_status & RING_DOWN) != 0) {
1153
1154 pr_debug("Tx attempt while ring down.\n");
1155 } else if ((frame_status & OUT_OF_TXD) != 0) {
1156 pr_debug("%s: out of TXDs.\n", bp->dev->name);
1157 } else {
1158 pr_debug("%s: out of transmit resources",
1159 bp->dev->name);
1160 }
1161
1162
1163
1164 skb_queue_head(&bp->SendSkbQueue, skb);
1165 spin_unlock_irqrestore(&bp->DriverLock, Flags);
1166 return;
1167
1168 }
1169
1170 bp->QueueSkb++;
1171
1172
1173 CheckSourceAddress(skb->data, smc->hw.fddi_canon_addr.a);
1174
1175 txd = (struct s_smt_fp_txd *) HWM_GET_CURR_TXD(smc, queue);
1176
1177 dma_address = dma_map_single(&(&bp->pdev)->dev, skb->data,
1178 skb->len, DMA_TO_DEVICE);
1179 if (frame_status & LAN_TX) {
1180 txd->txd_os.skb = skb;
1181 txd->txd_os.dma_addr = dma_address;
1182 }
1183 hwm_tx_frag(smc, skb->data, dma_address, skb->len,
1184 frame_status | FIRST_FRAG | LAST_FRAG | EN_IRQ_EOF);
1185
1186 if (!(frame_status & LAN_TX)) {
1187 dma_unmap_single(&(&bp->pdev)->dev, dma_address,
1188 skb->len, DMA_TO_DEVICE);
1189 dev_kfree_skb_irq(skb);
1190 }
1191 spin_unlock_irqrestore(&bp->DriverLock, Flags);
1192 }
1193
1194 return;
1195
1196}
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206static void CheckSourceAddress(unsigned char *frame, unsigned char *hw_addr)
1207{
1208 unsigned char SRBit;
1209
1210 if ((((unsigned long) frame[1 + 6]) & ~0x01) != 0)
1211
1212 return;
1213 if ((unsigned short) frame[1 + 10] != 0)
1214 return;
1215 SRBit = frame[1 + 6] & 0x01;
1216 memcpy(&frame[1 + 6], hw_addr, ETH_ALEN);
1217 frame[8] |= SRBit;
1218}
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232static void ResetAdapter(struct s_smc *smc)
1233{
1234
1235 pr_debug("[fddi: ResetAdapter]\n");
1236
1237
1238
1239 card_stop(smc);
1240
1241
1242 mac_drv_clear_tx_queue(smc);
1243 mac_drv_clear_rx_queue(smc);
1244
1245
1246
1247 smt_reset_defaults(smc, 1);
1248
1249 init_smt(smc, (smc->os.dev)->dev_addr);
1250
1251 smt_online(smc, 1);
1252 STI_FBI();
1253
1254
1255 skfp_ctl_set_multicast_list_wo_lock(smc->os.dev);
1256}
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277void llc_restart_tx(struct s_smc *smc)
1278{
1279 skfddi_priv *bp = &smc->os;
1280
1281 pr_debug("[llc_restart_tx]\n");
1282
1283
1284 spin_unlock(&bp->DriverLock);
1285 send_queued_packets(smc);
1286 spin_lock(&bp->DriverLock);
1287 netif_start_queue(bp->dev);
1288
1289}
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307void *mac_drv_get_space(struct s_smc *smc, unsigned int size)
1308{
1309 void *virt;
1310
1311 pr_debug("mac_drv_get_space (%d bytes), ", size);
1312 virt = (void *) (smc->os.SharedMemAddr + smc->os.SharedMemHeap);
1313
1314 if ((smc->os.SharedMemHeap + size) > smc->os.SharedMemSize) {
1315 printk("Unexpected SMT memory size requested: %d\n", size);
1316 return NULL;
1317 }
1318 smc->os.SharedMemHeap += size;
1319
1320 pr_debug("mac_drv_get_space end\n");
1321 pr_debug("virt addr: %lx\n", (ulong) virt);
1322 pr_debug("bus addr: %lx\n", (ulong)
1323 (smc->os.SharedMemDMA +
1324 ((char *) virt - (char *)smc->os.SharedMemAddr)));
1325 return virt;
1326}
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347void *mac_drv_get_desc_mem(struct s_smc *smc, unsigned int size)
1348{
1349
1350 char *virt;
1351
1352 pr_debug("mac_drv_get_desc_mem\n");
1353
1354
1355
1356 virt = mac_drv_get_space(smc, size);
1357
1358 size = (u_int) (16 - (((unsigned long) virt) & 15UL));
1359 size = size % 16;
1360
1361 pr_debug("Allocate %u bytes alignment gap ", size);
1362 pr_debug("for descriptor memory.\n");
1363
1364 if (!mac_drv_get_space(smc, size)) {
1365 printk("fddi: Unable to align descriptor memory.\n");
1366 return NULL;
1367 }
1368 return virt + size;
1369}
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385unsigned long mac_drv_virt2phys(struct s_smc *smc, void *virt)
1386{
1387 return smc->os.SharedMemDMA +
1388 ((char *) virt - (char *)smc->os.SharedMemAddr);
1389}
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420u_long dma_master(struct s_smc * smc, void *virt, int len, int flag)
1421{
1422 return smc->os.SharedMemDMA +
1423 ((char *) virt - (char *)smc->os.SharedMemAddr);
1424}
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448void dma_complete(struct s_smc *smc, volatile union s_fp_descr *descr, int flag)
1449{
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462 if (flag & DMA_WR) {
1463 skfddi_priv *bp = &smc->os;
1464 volatile struct s_smt_fp_rxd *r = &descr->r;
1465
1466
1467 if (r->rxd_os.skb && r->rxd_os.dma_addr) {
1468 int MaxFrameSize = bp->MaxFrameSize;
1469
1470 dma_unmap_single(&(&bp->pdev)->dev,
1471 r->rxd_os.dma_addr, MaxFrameSize,
1472 DMA_FROM_DEVICE);
1473 r->rxd_os.dma_addr = 0;
1474 }
1475 }
1476}
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493void mac_drv_tx_complete(struct s_smc *smc, volatile struct s_smt_fp_txd *txd)
1494{
1495 struct sk_buff *skb;
1496
1497 pr_debug("entering mac_drv_tx_complete\n");
1498
1499
1500 if (!(skb = txd->txd_os.skb)) {
1501 pr_debug("TXD with no skb assigned.\n");
1502 return;
1503 }
1504 txd->txd_os.skb = NULL;
1505
1506
1507 dma_unmap_single(&(&smc->os.pdev)->dev, txd->txd_os.dma_addr,
1508 skb->len, DMA_TO_DEVICE);
1509 txd->txd_os.dma_addr = 0;
1510
1511 smc->os.MacStat.gen.tx_packets++;
1512 smc->os.MacStat.gen.tx_bytes+=skb->len;
1513
1514
1515 dev_kfree_skb_irq(skb);
1516
1517 pr_debug("leaving mac_drv_tx_complete\n");
1518}
1519
1520
1521
1522
1523
1524
1525
1526#ifdef DUMPPACKETS
1527void dump_data(unsigned char *Data, int length)
1528{
1529 printk(KERN_INFO "---Packet start---\n");
1530 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 16, 1, Data, min_t(size_t, length, 64), false);
1531 printk(KERN_INFO "------------------\n");
1532}
1533#else
1534#define dump_data(data,len)
1535#endif
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561void mac_drv_rx_complete(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
1562 int frag_count, int len)
1563{
1564 skfddi_priv *bp = &smc->os;
1565 struct sk_buff *skb;
1566 unsigned char *virt, *cp;
1567 unsigned short ri;
1568 u_int RifLength;
1569
1570 pr_debug("entering mac_drv_rx_complete (len=%d)\n", len);
1571 if (frag_count != 1) {
1572
1573 printk("fddi: Multi-fragment receive!\n");
1574 goto RequeueRxd;
1575
1576 }
1577 skb = rxd->rxd_os.skb;
1578 if (!skb) {
1579 pr_debug("No skb in rxd\n");
1580 smc->os.MacStat.gen.rx_errors++;
1581 goto RequeueRxd;
1582 }
1583 virt = skb->data;
1584
1585
1586
1587 dump_data(skb->data, len);
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604 if ((virt[1 + 6] & FDDI_RII) == 0)
1605 RifLength = 0;
1606 else {
1607 int n;
1608
1609 pr_debug("RIF found\n");
1610
1611 cp = virt + FDDI_MAC_HDR_LEN;
1612
1613 ri = ntohs(*((__be16 *) cp));
1614 RifLength = ri & FDDI_RCF_LEN_MASK;
1615 if (len < (int) (FDDI_MAC_HDR_LEN + RifLength)) {
1616 printk("fddi: Invalid RIF.\n");
1617 goto RequeueRxd;
1618
1619 }
1620 virt[1 + 6] &= ~FDDI_RII;
1621
1622
1623 virt = cp + RifLength;
1624 for (n = FDDI_MAC_HDR_LEN; n; n--)
1625 *--virt = *--cp;
1626
1627 skb_pull(skb, RifLength);
1628 len -= RifLength;
1629 RifLength = 0;
1630 }
1631
1632
1633 smc->os.MacStat.gen.rx_packets++;
1634
1635 smc->os.MacStat.gen.rx_bytes+=len;
1636
1637
1638 if (virt[1] & 0x01) {
1639
1640 smc->os.MacStat.gen.multicast++;
1641 }
1642
1643
1644 rxd->rxd_os.skb = NULL;
1645 skb_trim(skb, len);
1646 skb->protocol = fddi_type_trans(skb, bp->dev);
1647
1648 netif_rx(skb);
1649
1650 HWM_RX_CHECK(smc, RX_LOW_WATERMARK);
1651 return;
1652
1653 RequeueRxd:
1654 pr_debug("Rx: re-queue RXD.\n");
1655 mac_drv_requeue_rxd(smc, rxd, frag_count);
1656 smc->os.MacStat.gen.rx_errors++;
1657
1658
1659}
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680void mac_drv_requeue_rxd(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
1681 int frag_count)
1682{
1683 volatile struct s_smt_fp_rxd *next_rxd;
1684 volatile struct s_smt_fp_rxd *src_rxd;
1685 struct sk_buff *skb;
1686 int MaxFrameSize;
1687 unsigned char *v_addr;
1688 dma_addr_t b_addr;
1689
1690 if (frag_count != 1)
1691
1692 printk("fddi: Multi-fragment requeue!\n");
1693
1694 MaxFrameSize = smc->os.MaxFrameSize;
1695 src_rxd = rxd;
1696 for (; frag_count > 0; frag_count--) {
1697 next_rxd = src_rxd->rxd_next;
1698 rxd = HWM_GET_CURR_RXD(smc);
1699
1700 skb = src_rxd->rxd_os.skb;
1701 if (skb == NULL) {
1702
1703 pr_debug("Requeue with no skb in rxd!\n");
1704 skb = alloc_skb(MaxFrameSize + 3, GFP_ATOMIC);
1705 if (skb) {
1706
1707 rxd->rxd_os.skb = skb;
1708 skb_reserve(skb, 3);
1709 skb_put(skb, MaxFrameSize);
1710 v_addr = skb->data;
1711 b_addr = dma_map_single(&(&smc->os.pdev)->dev,
1712 v_addr, MaxFrameSize,
1713 DMA_FROM_DEVICE);
1714 rxd->rxd_os.dma_addr = b_addr;
1715 } else {
1716
1717 pr_debug("Queueing invalid buffer!\n");
1718 rxd->rxd_os.skb = NULL;
1719 v_addr = smc->os.LocalRxBuffer;
1720 b_addr = smc->os.LocalRxBufferDMA;
1721 }
1722 } else {
1723
1724 rxd->rxd_os.skb = skb;
1725 v_addr = skb->data;
1726 b_addr = dma_map_single(&(&smc->os.pdev)->dev, v_addr,
1727 MaxFrameSize, DMA_FROM_DEVICE);
1728 rxd->rxd_os.dma_addr = b_addr;
1729 }
1730 hwm_rx_frag(smc, v_addr, b_addr, MaxFrameSize,
1731 FIRST_FRAG | LAST_FRAG);
1732
1733 src_rxd = next_rxd;
1734 }
1735}
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754void mac_drv_fill_rxd(struct s_smc *smc)
1755{
1756 int MaxFrameSize;
1757 unsigned char *v_addr;
1758 unsigned long b_addr;
1759 struct sk_buff *skb;
1760 volatile struct s_smt_fp_rxd *rxd;
1761
1762 pr_debug("entering mac_drv_fill_rxd\n");
1763
1764
1765
1766
1767 MaxFrameSize = smc->os.MaxFrameSize;
1768
1769 while (HWM_GET_RX_FREE(smc) > 0) {
1770 pr_debug(".\n");
1771
1772 rxd = HWM_GET_CURR_RXD(smc);
1773 skb = alloc_skb(MaxFrameSize + 3, GFP_ATOMIC);
1774 if (skb) {
1775
1776 skb_reserve(skb, 3);
1777 skb_put(skb, MaxFrameSize);
1778 v_addr = skb->data;
1779 b_addr = dma_map_single(&(&smc->os.pdev)->dev, v_addr,
1780 MaxFrameSize, DMA_FROM_DEVICE);
1781 rxd->rxd_os.dma_addr = b_addr;
1782 } else {
1783
1784
1785
1786
1787
1788 pr_debug("Queueing invalid buffer!\n");
1789 v_addr = smc->os.LocalRxBuffer;
1790 b_addr = smc->os.LocalRxBufferDMA;
1791 }
1792
1793 rxd->rxd_os.skb = skb;
1794
1795
1796 hwm_rx_frag(smc, v_addr, b_addr, MaxFrameSize,
1797 FIRST_FRAG | LAST_FRAG);
1798 }
1799 pr_debug("leaving mac_drv_fill_rxd\n");
1800}
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819void mac_drv_clear_rxd(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
1820 int frag_count)
1821{
1822
1823 struct sk_buff *skb;
1824
1825 pr_debug("entering mac_drv_clear_rxd\n");
1826
1827 if (frag_count != 1)
1828
1829 printk("fddi: Multi-fragment clear!\n");
1830
1831 for (; frag_count > 0; frag_count--) {
1832 skb = rxd->rxd_os.skb;
1833 if (skb != NULL) {
1834 skfddi_priv *bp = &smc->os;
1835 int MaxFrameSize = bp->MaxFrameSize;
1836
1837 dma_unmap_single(&(&bp->pdev)->dev,
1838 rxd->rxd_os.dma_addr, MaxFrameSize,
1839 DMA_FROM_DEVICE);
1840
1841 dev_kfree_skb(skb);
1842 rxd->rxd_os.skb = NULL;
1843 }
1844 rxd = rxd->rxd_next;
1845
1846 }
1847}
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877int mac_drv_rx_init(struct s_smc *smc, int len, int fc,
1878 char *look_ahead, int la_len)
1879{
1880 struct sk_buff *skb;
1881
1882 pr_debug("entering mac_drv_rx_init(len=%d)\n", len);
1883
1884
1885
1886 if (len != la_len || len < FDDI_MAC_HDR_LEN || !look_ahead) {
1887 pr_debug("fddi: Discard invalid local SMT frame\n");
1888 pr_debug(" len=%d, la_len=%d, (ULONG) look_ahead=%08lXh.\n",
1889 len, la_len, (unsigned long) look_ahead);
1890 return 0;
1891 }
1892 skb = alloc_skb(len + 3, GFP_ATOMIC);
1893 if (!skb) {
1894 pr_debug("fddi: Local SMT: skb memory exhausted.\n");
1895 return 0;
1896 }
1897 skb_reserve(skb, 3);
1898 skb_put(skb, len);
1899 skb_copy_to_linear_data(skb, look_ahead, len);
1900
1901
1902 skb->protocol = fddi_type_trans(skb, smc->os.dev);
1903 netif_rx(skb);
1904
1905 return 0;
1906}
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924void smt_timer_poll(struct s_smc *smc)
1925{
1926}
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942void ring_status_indication(struct s_smc *smc, u_long status)
1943{
1944 pr_debug("ring_status_indication( ");
1945 if (status & RS_RES15)
1946 pr_debug("RS_RES15 ");
1947 if (status & RS_HARDERROR)
1948 pr_debug("RS_HARDERROR ");
1949 if (status & RS_SOFTERROR)
1950 pr_debug("RS_SOFTERROR ");
1951 if (status & RS_BEACON)
1952 pr_debug("RS_BEACON ");
1953 if (status & RS_PATHTEST)
1954 pr_debug("RS_PATHTEST ");
1955 if (status & RS_SELFTEST)
1956 pr_debug("RS_SELFTEST ");
1957 if (status & RS_RES9)
1958 pr_debug("RS_RES9 ");
1959 if (status & RS_DISCONNECT)
1960 pr_debug("RS_DISCONNECT ");
1961 if (status & RS_RES7)
1962 pr_debug("RS_RES7 ");
1963 if (status & RS_DUPADDR)
1964 pr_debug("RS_DUPADDR ");
1965 if (status & RS_NORINGOP)
1966 pr_debug("RS_NORINGOP ");
1967 if (status & RS_VERSION)
1968 pr_debug("RS_VERSION ");
1969 if (status & RS_STUCKBYPASSS)
1970 pr_debug("RS_STUCKBYPASSS ");
1971 if (status & RS_EVENT)
1972 pr_debug("RS_EVENT ");
1973 if (status & RS_RINGOPCHANGE)
1974 pr_debug("RS_RINGOPCHANGE ");
1975 if (status & RS_RES0)
1976 pr_debug("RS_RES0 ");
1977 pr_debug("]\n");
1978}
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996unsigned long smt_get_time(void)
1997{
1998 return jiffies;
1999}
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016void smt_stat_counter(struct s_smc *smc, int stat)
2017{
2018
2019
2020 pr_debug("smt_stat_counter\n");
2021 switch (stat) {
2022 case 0:
2023 pr_debug("Ring operational change.\n");
2024 break;
2025 case 1:
2026 pr_debug("Receive fifo overflow.\n");
2027 smc->os.MacStat.gen.rx_errors++;
2028 break;
2029 default:
2030 pr_debug("Unknown status (%d).\n", stat);
2031 break;
2032 }
2033}
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052void cfm_state_change(struct s_smc *smc, int c_state)
2053{
2054#ifdef DRIVERDEBUG
2055 char *s;
2056
2057 switch (c_state) {
2058 case SC0_ISOLATED:
2059 s = "SC0_ISOLATED";
2060 break;
2061 case SC1_WRAP_A:
2062 s = "SC1_WRAP_A";
2063 break;
2064 case SC2_WRAP_B:
2065 s = "SC2_WRAP_B";
2066 break;
2067 case SC4_THRU_A:
2068 s = "SC4_THRU_A";
2069 break;
2070 case SC5_THRU_B:
2071 s = "SC5_THRU_B";
2072 break;
2073 case SC7_WRAP_S:
2074 s = "SC7_WRAP_S";
2075 break;
2076 case SC9_C_WRAP_A:
2077 s = "SC9_C_WRAP_A";
2078 break;
2079 case SC10_C_WRAP_B:
2080 s = "SC10_C_WRAP_B";
2081 break;
2082 case SC11_C_WRAP_S:
2083 s = "SC11_C_WRAP_S";
2084 break;
2085 default:
2086 pr_debug("cfm_state_change: unknown %d\n", c_state);
2087 return;
2088 }
2089 pr_debug("cfm_state_change: %s\n", s);
2090#endif
2091}
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110void ecm_state_change(struct s_smc *smc, int e_state)
2111{
2112#ifdef DRIVERDEBUG
2113 char *s;
2114
2115 switch (e_state) {
2116 case EC0_OUT:
2117 s = "EC0_OUT";
2118 break;
2119 case EC1_IN:
2120 s = "EC1_IN";
2121 break;
2122 case EC2_TRACE:
2123 s = "EC2_TRACE";
2124 break;
2125 case EC3_LEAVE:
2126 s = "EC3_LEAVE";
2127 break;
2128 case EC4_PATH_TEST:
2129 s = "EC4_PATH_TEST";
2130 break;
2131 case EC5_INSERT:
2132 s = "EC5_INSERT";
2133 break;
2134 case EC6_CHECK:
2135 s = "EC6_CHECK";
2136 break;
2137 case EC7_DEINSERT:
2138 s = "EC7_DEINSERT";
2139 break;
2140 default:
2141 s = "unknown";
2142 break;
2143 }
2144 pr_debug("ecm_state_change: %s\n", s);
2145#endif
2146}
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165void rmt_state_change(struct s_smc *smc, int r_state)
2166{
2167#ifdef DRIVERDEBUG
2168 char *s;
2169
2170 switch (r_state) {
2171 case RM0_ISOLATED:
2172 s = "RM0_ISOLATED";
2173 break;
2174 case RM1_NON_OP:
2175 s = "RM1_NON_OP - not operational";
2176 break;
2177 case RM2_RING_OP:
2178 s = "RM2_RING_OP - ring operational";
2179 break;
2180 case RM3_DETECT:
2181 s = "RM3_DETECT - detect dupl addresses";
2182 break;
2183 case RM4_NON_OP_DUP:
2184 s = "RM4_NON_OP_DUP - dupl. addr detected";
2185 break;
2186 case RM5_RING_OP_DUP:
2187 s = "RM5_RING_OP_DUP - ring oper. with dupl. addr";
2188 break;
2189 case RM6_DIRECTED:
2190 s = "RM6_DIRECTED - sending directed beacons";
2191 break;
2192 case RM7_TRACE:
2193 s = "RM7_TRACE - trace initiated";
2194 break;
2195 default:
2196 s = "unknown";
2197 break;
2198 }
2199 pr_debug("[rmt_state_change: %s]\n", s);
2200#endif
2201}
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217void drv_reset_indication(struct s_smc *smc)
2218{
2219 pr_debug("entering drv_reset_indication\n");
2220
2221 smc->os.ResetRequested = TRUE;
2222
2223}
2224
2225static struct pci_driver skfddi_pci_driver = {
2226 .name = "skfddi",
2227 .id_table = skfddi_pci_tbl,
2228 .probe = skfp_init_one,
2229 .remove = skfp_remove_one,
2230};
2231
2232module_pci_driver(skfddi_pci_driver);
2233