1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64#define VERSION "2.07"
65
66static const char * const boot_msg =
67 "SysKonnect FDDI PCI Adapter driver v" VERSION " for\n"
68 " SK-55xx/SK-58xx adapters (SK-NET FDDI-FP/UP/LP)";
69
70
71
72#include <linux/capability.h>
73#include <linux/module.h>
74#include <linux/kernel.h>
75#include <linux/errno.h>
76#include <linux/ioport.h>
77#include <linux/interrupt.h>
78#include <linux/pci.h>
79#include <linux/netdevice.h>
80#include <linux/fddidevice.h>
81#include <linux/skbuff.h>
82#include <linux/bitops.h>
83#include <linux/gfp.h>
84
85#include <asm/byteorder.h>
86#include <asm/io.h>
87#include <linux/uaccess.h>
88
89#include "h/types.h"
90#undef ADDR
91#include "h/skfbi.h"
92#include "h/fddi.h"
93#include "h/smc.h"
94#include "h/smtstate.h"
95
96
97
98static int skfp_driver_init(struct net_device *dev);
99static int skfp_open(struct net_device *dev);
100static int skfp_close(struct net_device *dev);
101static irqreturn_t skfp_interrupt(int irq, void *dev_id);
102static struct net_device_stats *skfp_ctl_get_stats(struct net_device *dev);
103static void skfp_ctl_set_multicast_list(struct net_device *dev);
104static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev);
105static int skfp_ctl_set_mac_address(struct net_device *dev, void *addr);
106static int skfp_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
107static netdev_tx_t skfp_send_pkt(struct sk_buff *skb,
108 struct net_device *dev);
109static void send_queued_packets(struct s_smc *smc);
110static void CheckSourceAddress(unsigned char *frame, unsigned char *hw_addr);
111static void ResetAdapter(struct s_smc *smc);
112
113
114
115void *mac_drv_get_space(struct s_smc *smc, u_int size);
116void *mac_drv_get_desc_mem(struct s_smc *smc, u_int size);
117unsigned long mac_drv_virt2phys(struct s_smc *smc, void *virt);
118unsigned long dma_master(struct s_smc *smc, void *virt, int len, int flag);
119void dma_complete(struct s_smc *smc, volatile union s_fp_descr *descr,
120 int flag);
121void mac_drv_tx_complete(struct s_smc *smc, volatile struct s_smt_fp_txd *txd);
122void llc_restart_tx(struct s_smc *smc);
123void mac_drv_rx_complete(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
124 int frag_count, int len);
125void mac_drv_requeue_rxd(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
126 int frag_count);
127void mac_drv_fill_rxd(struct s_smc *smc);
128void mac_drv_clear_rxd(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
129 int frag_count);
130int mac_drv_rx_init(struct s_smc *smc, int len, int fc, char *look_ahead,
131 int la_len);
132void dump_data(unsigned char *Data, int length);
133
134
135extern u_int mac_drv_check_space(void);
136extern int mac_drv_init(struct s_smc *smc);
137extern void hwm_tx_frag(struct s_smc *smc, char far * virt, u_long phys,
138 int len, int frame_status);
139extern int hwm_tx_init(struct s_smc *smc, u_char fc, int frag_count,
140 int frame_len, int frame_status);
141extern void fddi_isr(struct s_smc *smc);
142extern void hwm_rx_frag(struct s_smc *smc, char far * virt, u_long phys,
143 int len, int frame_status);
144extern void mac_drv_rx_mode(struct s_smc *smc, int mode);
145extern void mac_drv_clear_rx_queue(struct s_smc *smc);
146extern void enable_tx_irq(struct s_smc *smc, u_short queue);
147
148static const struct pci_device_id skfddi_pci_tbl[] = {
149 { PCI_VENDOR_ID_SK, PCI_DEVICE_ID_SK_FP, PCI_ANY_ID, PCI_ANY_ID, },
150 { }
151};
152MODULE_DEVICE_TABLE(pci, skfddi_pci_tbl);
153MODULE_LICENSE("GPL");
154MODULE_AUTHOR("Mirko Lindner <mlindner@syskonnect.de>");
155
156
157
158static int num_boards;
159
160static const struct net_device_ops skfp_netdev_ops = {
161 .ndo_open = skfp_open,
162 .ndo_stop = skfp_close,
163 .ndo_start_xmit = skfp_send_pkt,
164 .ndo_get_stats = skfp_ctl_get_stats,
165 .ndo_set_rx_mode = skfp_ctl_set_multicast_list,
166 .ndo_set_mac_address = skfp_ctl_set_mac_address,
167 .ndo_do_ioctl = skfp_ioctl,
168};
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199static int skfp_init_one(struct pci_dev *pdev,
200 const struct pci_device_id *ent)
201{
202 struct net_device *dev;
203 struct s_smc *smc;
204 void __iomem *mem;
205 int err;
206
207 pr_debug("entering skfp_init_one\n");
208
209 if (num_boards == 0)
210 printk("%s\n", boot_msg);
211
212 err = pci_enable_device(pdev);
213 if (err)
214 return err;
215
216 err = pci_request_regions(pdev, "skfddi");
217 if (err)
218 goto err_out1;
219
220 pci_set_master(pdev);
221
222#ifdef MEM_MAPPED_IO
223 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
224 printk(KERN_ERR "skfp: region is not an MMIO resource\n");
225 err = -EIO;
226 goto err_out2;
227 }
228
229 mem = ioremap(pci_resource_start(pdev, 0), 0x4000);
230#else
231 if (!(pci_resource_flags(pdev, 1) & IO_RESOURCE_IO)) {
232 printk(KERN_ERR "skfp: region is not PIO resource\n");
233 err = -EIO;
234 goto err_out2;
235 }
236
237 mem = ioport_map(pci_resource_start(pdev, 1), FP_IO_LEN);
238#endif
239 if (!mem) {
240 printk(KERN_ERR "skfp: Unable to map register, "
241 "FDDI adapter will be disabled.\n");
242 err = -EIO;
243 goto err_out2;
244 }
245
246 dev = alloc_fddidev(sizeof(struct s_smc));
247 if (!dev) {
248 printk(KERN_ERR "skfp: Unable to allocate fddi device, "
249 "FDDI adapter will be disabled.\n");
250 err = -ENOMEM;
251 goto err_out3;
252 }
253
254 dev->irq = pdev->irq;
255 dev->netdev_ops = &skfp_netdev_ops;
256
257 SET_NETDEV_DEV(dev, &pdev->dev);
258
259
260 smc = netdev_priv(dev);
261 smc->os.dev = dev;
262 smc->os.bus_type = SK_BUS_TYPE_PCI;
263 smc->os.pdev = *pdev;
264 smc->os.QueueSkb = MAX_TX_QUEUE_LEN;
265 smc->os.MaxFrameSize = MAX_FRAME_SIZE;
266 smc->os.dev = dev;
267 smc->hw.slot = -1;
268 smc->hw.iop = mem;
269 smc->os.ResetRequested = FALSE;
270 skb_queue_head_init(&smc->os.SendSkbQueue);
271
272 dev->base_addr = (unsigned long)mem;
273
274 err = skfp_driver_init(dev);
275 if (err)
276 goto err_out4;
277
278 err = register_netdev(dev);
279 if (err)
280 goto err_out5;
281
282 ++num_boards;
283 pci_set_drvdata(pdev, dev);
284
285 if ((pdev->subsystem_device & 0xff00) == 0x5500 ||
286 (pdev->subsystem_device & 0xff00) == 0x5800)
287 printk("%s: SysKonnect FDDI PCI adapter"
288 " found (SK-%04X)\n", dev->name,
289 pdev->subsystem_device);
290 else
291 printk("%s: FDDI PCI adapter found\n", dev->name);
292
293 return 0;
294err_out5:
295 if (smc->os.SharedMemAddr)
296 dma_free_coherent(&pdev->dev, smc->os.SharedMemSize,
297 smc->os.SharedMemAddr,
298 smc->os.SharedMemDMA);
299 dma_free_coherent(&pdev->dev, MAX_FRAME_SIZE,
300 smc->os.LocalRxBuffer, smc->os.LocalRxBufferDMA);
301err_out4:
302 free_netdev(dev);
303err_out3:
304#ifdef MEM_MAPPED_IO
305 iounmap(mem);
306#else
307 ioport_unmap(mem);
308#endif
309err_out2:
310 pci_release_regions(pdev);
311err_out1:
312 pci_disable_device(pdev);
313 return err;
314}
315
316
317
318
319static void skfp_remove_one(struct pci_dev *pdev)
320{
321 struct net_device *p = pci_get_drvdata(pdev);
322 struct s_smc *lp = netdev_priv(p);
323
324 unregister_netdev(p);
325
326 if (lp->os.SharedMemAddr) {
327 dma_free_coherent(&pdev->dev,
328 lp->os.SharedMemSize,
329 lp->os.SharedMemAddr,
330 lp->os.SharedMemDMA);
331 lp->os.SharedMemAddr = NULL;
332 }
333 if (lp->os.LocalRxBuffer) {
334 dma_free_coherent(&pdev->dev,
335 MAX_FRAME_SIZE,
336 lp->os.LocalRxBuffer,
337 lp->os.LocalRxBufferDMA);
338 lp->os.LocalRxBuffer = NULL;
339 }
340#ifdef MEM_MAPPED_IO
341 iounmap(lp->hw.iop);
342#else
343 ioport_unmap(lp->hw.iop);
344#endif
345 pci_release_regions(pdev);
346 free_netdev(p);
347
348 pci_disable_device(pdev);
349}
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376static int skfp_driver_init(struct net_device *dev)
377{
378 struct s_smc *smc = netdev_priv(dev);
379 skfddi_priv *bp = &smc->os;
380 int err = -EIO;
381
382 pr_debug("entering skfp_driver_init\n");
383
384
385 bp->base_addr = dev->base_addr;
386
387
388 smc->hw.irq = dev->irq;
389
390 spin_lock_init(&bp->DriverLock);
391
392
393 bp->LocalRxBuffer = dma_alloc_coherent(&bp->pdev.dev, MAX_FRAME_SIZE,
394 &bp->LocalRxBufferDMA,
395 GFP_ATOMIC);
396 if (!bp->LocalRxBuffer) {
397 printk("could not allocate mem for ");
398 printk("LocalRxBuffer: %d byte\n", MAX_FRAME_SIZE);
399 goto fail;
400 }
401
402
403 bp->SharedMemSize = mac_drv_check_space();
404 pr_debug("Memory for HWM: %ld\n", bp->SharedMemSize);
405 if (bp->SharedMemSize > 0) {
406 bp->SharedMemSize += 16;
407
408 bp->SharedMemAddr = dma_alloc_coherent(&bp->pdev.dev,
409 bp->SharedMemSize,
410 &bp->SharedMemDMA,
411 GFP_ATOMIC);
412 if (!bp->SharedMemAddr) {
413 printk("could not allocate mem for ");
414 printk("hardware module: %ld byte\n",
415 bp->SharedMemSize);
416 goto fail;
417 }
418
419 } else {
420 bp->SharedMemAddr = NULL;
421 }
422
423 bp->SharedMemHeap = 0;
424
425 card_stop(smc);
426
427 pr_debug("mac_drv_init()..\n");
428 if (mac_drv_init(smc) != 0) {
429 pr_debug("mac_drv_init() failed\n");
430 goto fail;
431 }
432 read_address(smc, NULL);
433 pr_debug("HW-Addr: %pMF\n", smc->hw.fddi_canon_addr.a);
434 memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, ETH_ALEN);
435
436 smt_reset_defaults(smc, 0);
437
438 return 0;
439
440fail:
441 if (bp->SharedMemAddr) {
442 dma_free_coherent(&bp->pdev.dev,
443 bp->SharedMemSize,
444 bp->SharedMemAddr,
445 bp->SharedMemDMA);
446 bp->SharedMemAddr = NULL;
447 }
448 if (bp->LocalRxBuffer) {
449 dma_free_coherent(&bp->pdev.dev, MAX_FRAME_SIZE,
450 bp->LocalRxBuffer, bp->LocalRxBufferDMA);
451 bp->LocalRxBuffer = NULL;
452 }
453 return err;
454}
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478static int skfp_open(struct net_device *dev)
479{
480 struct s_smc *smc = netdev_priv(dev);
481 int err;
482
483 pr_debug("entering skfp_open\n");
484
485 err = request_irq(dev->irq, skfp_interrupt, IRQF_SHARED,
486 dev->name, dev);
487 if (err)
488 return err;
489
490
491
492
493
494
495
496
497
498
499
500 read_address(smc, NULL);
501 memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, ETH_ALEN);
502
503 init_smt(smc, NULL);
504 smt_online(smc, 1);
505 STI_FBI();
506
507
508 mac_clear_multicast(smc);
509
510
511 mac_drv_rx_mode(smc, RX_DISABLE_PROMISC);
512
513 netif_start_queue(dev);
514 return 0;
515}
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545static int skfp_close(struct net_device *dev)
546{
547 struct s_smc *smc = netdev_priv(dev);
548 skfddi_priv *bp = &smc->os;
549
550 CLI_FBI();
551 smt_reset_defaults(smc, 1);
552 card_stop(smc);
553 mac_drv_clear_tx_queue(smc);
554 mac_drv_clear_rx_queue(smc);
555
556 netif_stop_queue(dev);
557
558 free_irq(dev->irq, dev);
559
560 skb_queue_purge(&bp->SendSkbQueue);
561 bp->QueueSkb = MAX_TX_QUEUE_LEN;
562
563 return 0;
564}
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602static irqreturn_t skfp_interrupt(int irq, void *dev_id)
603{
604 struct net_device *dev = dev_id;
605 struct s_smc *smc;
606 skfddi_priv *bp;
607
608 smc = netdev_priv(dev);
609 bp = &smc->os;
610
611
612 if (inpd(ADDR(B0_IMSK)) == 0) {
613
614 return IRQ_NONE;
615 }
616
617 if ((inpd(ISR_A) & smc->hw.is_imask) == 0) {
618
619 return IRQ_NONE;
620 }
621 CLI_FBI();
622 spin_lock(&bp->DriverLock);
623
624
625 fddi_isr(smc);
626
627 if (smc->os.ResetRequested) {
628 ResetAdapter(smc);
629 smc->os.ResetRequested = FALSE;
630 }
631 spin_unlock(&bp->DriverLock);
632 STI_FBI();
633
634 return IRQ_HANDLED;
635}
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669static struct net_device_stats *skfp_ctl_get_stats(struct net_device *dev)
670{
671 struct s_smc *bp = netdev_priv(dev);
672
673
674
675 bp->os.MacStat.port_bs_flag[0] = 0x1234;
676 bp->os.MacStat.port_bs_flag[1] = 0x5678;
677
678#if 0
679
680
681
682
683 memcpy(bp->stats.smt_station_id, &bp->cmd_rsp_virt->smt_mib_get.smt_station_id, sizeof(bp->cmd_rsp_virt->smt_mib_get.smt_station_id));
684 bp->stats.smt_op_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_op_version_id;
685 bp->stats.smt_hi_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_hi_version_id;
686 bp->stats.smt_lo_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_lo_version_id;
687 memcpy(bp->stats.smt_user_data, &bp->cmd_rsp_virt->smt_mib_get.smt_user_data, sizeof(bp->cmd_rsp_virt->smt_mib_get.smt_user_data));
688 bp->stats.smt_mib_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_mib_version_id;
689 bp->stats.smt_mac_cts = bp->cmd_rsp_virt->smt_mib_get.smt_mac_ct;
690 bp->stats.smt_non_master_cts = bp->cmd_rsp_virt->smt_mib_get.smt_non_master_ct;
691 bp->stats.smt_master_cts = bp->cmd_rsp_virt->smt_mib_get.smt_master_ct;
692 bp->stats.smt_available_paths = bp->cmd_rsp_virt->smt_mib_get.smt_available_paths;
693 bp->stats.smt_config_capabilities = bp->cmd_rsp_virt->smt_mib_get.smt_config_capabilities;
694 bp->stats.smt_config_policy = bp->cmd_rsp_virt->smt_mib_get.smt_config_policy;
695 bp->stats.smt_connection_policy = bp->cmd_rsp_virt->smt_mib_get.smt_connection_policy;
696 bp->stats.smt_t_notify = bp->cmd_rsp_virt->smt_mib_get.smt_t_notify;
697 bp->stats.smt_stat_rpt_policy = bp->cmd_rsp_virt->smt_mib_get.smt_stat_rpt_policy;
698 bp->stats.smt_trace_max_expiration = bp->cmd_rsp_virt->smt_mib_get.smt_trace_max_expiration;
699 bp->stats.smt_bypass_present = bp->cmd_rsp_virt->smt_mib_get.smt_bypass_present;
700 bp->stats.smt_ecm_state = bp->cmd_rsp_virt->smt_mib_get.smt_ecm_state;
701 bp->stats.smt_cf_state = bp->cmd_rsp_virt->smt_mib_get.smt_cf_state;
702 bp->stats.smt_remote_disconnect_flag = bp->cmd_rsp_virt->smt_mib_get.smt_remote_disconnect_flag;
703 bp->stats.smt_station_status = bp->cmd_rsp_virt->smt_mib_get.smt_station_status;
704 bp->stats.smt_peer_wrap_flag = bp->cmd_rsp_virt->smt_mib_get.smt_peer_wrap_flag;
705 bp->stats.smt_time_stamp = bp->cmd_rsp_virt->smt_mib_get.smt_msg_time_stamp.ls;
706 bp->stats.smt_transition_time_stamp = bp->cmd_rsp_virt->smt_mib_get.smt_transition_time_stamp.ls;
707 bp->stats.mac_frame_status_functions = bp->cmd_rsp_virt->smt_mib_get.mac_frame_status_functions;
708 bp->stats.mac_t_max_capability = bp->cmd_rsp_virt->smt_mib_get.mac_t_max_capability;
709 bp->stats.mac_tvx_capability = bp->cmd_rsp_virt->smt_mib_get.mac_tvx_capability;
710 bp->stats.mac_available_paths = bp->cmd_rsp_virt->smt_mib_get.mac_available_paths;
711 bp->stats.mac_current_path = bp->cmd_rsp_virt->smt_mib_get.mac_current_path;
712 memcpy(bp->stats.mac_upstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_upstream_nbr, FDDI_K_ALEN);
713 memcpy(bp->stats.mac_downstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_downstream_nbr, FDDI_K_ALEN);
714 memcpy(bp->stats.mac_old_upstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_old_upstream_nbr, FDDI_K_ALEN);
715 memcpy(bp->stats.mac_old_downstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_old_downstream_nbr, FDDI_K_ALEN);
716 bp->stats.mac_dup_address_test = bp->cmd_rsp_virt->smt_mib_get.mac_dup_address_test;
717 bp->stats.mac_requested_paths = bp->cmd_rsp_virt->smt_mib_get.mac_requested_paths;
718 bp->stats.mac_downstream_port_type = bp->cmd_rsp_virt->smt_mib_get.mac_downstream_port_type;
719 memcpy(bp->stats.mac_smt_address, &bp->cmd_rsp_virt->smt_mib_get.mac_smt_address, FDDI_K_ALEN);
720 bp->stats.mac_t_req = bp->cmd_rsp_virt->smt_mib_get.mac_t_req;
721 bp->stats.mac_t_neg = bp->cmd_rsp_virt->smt_mib_get.mac_t_neg;
722 bp->stats.mac_t_max = bp->cmd_rsp_virt->smt_mib_get.mac_t_max;
723 bp->stats.mac_tvx_value = bp->cmd_rsp_virt->smt_mib_get.mac_tvx_value;
724 bp->stats.mac_frame_error_threshold = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_threshold;
725 bp->stats.mac_frame_error_ratio = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_ratio;
726 bp->stats.mac_rmt_state = bp->cmd_rsp_virt->smt_mib_get.mac_rmt_state;
727 bp->stats.mac_da_flag = bp->cmd_rsp_virt->smt_mib_get.mac_da_flag;
728 bp->stats.mac_una_da_flag = bp->cmd_rsp_virt->smt_mib_get.mac_unda_flag;
729 bp->stats.mac_frame_error_flag = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_flag;
730 bp->stats.mac_ma_unitdata_available = bp->cmd_rsp_virt->smt_mib_get.mac_ma_unitdata_available;
731 bp->stats.mac_hardware_present = bp->cmd_rsp_virt->smt_mib_get.mac_hardware_present;
732 bp->stats.mac_ma_unitdata_enable = bp->cmd_rsp_virt->smt_mib_get.mac_ma_unitdata_enable;
733 bp->stats.path_tvx_lower_bound = bp->cmd_rsp_virt->smt_mib_get.path_tvx_lower_bound;
734 bp->stats.path_t_max_lower_bound = bp->cmd_rsp_virt->smt_mib_get.path_t_max_lower_bound;
735 bp->stats.path_max_t_req = bp->cmd_rsp_virt->smt_mib_get.path_max_t_req;
736 memcpy(bp->stats.path_configuration, &bp->cmd_rsp_virt->smt_mib_get.path_configuration, sizeof(bp->cmd_rsp_virt->smt_mib_get.path_configuration));
737 bp->stats.port_my_type[0] = bp->cmd_rsp_virt->smt_mib_get.port_my_type[0];
738 bp->stats.port_my_type[1] = bp->cmd_rsp_virt->smt_mib_get.port_my_type[1];
739 bp->stats.port_neighbor_type[0] = bp->cmd_rsp_virt->smt_mib_get.port_neighbor_type[0];
740 bp->stats.port_neighbor_type[1] = bp->cmd_rsp_virt->smt_mib_get.port_neighbor_type[1];
741 bp->stats.port_connection_policies[0] = bp->cmd_rsp_virt->smt_mib_get.port_connection_policies[0];
742 bp->stats.port_connection_policies[1] = bp->cmd_rsp_virt->smt_mib_get.port_connection_policies[1];
743 bp->stats.port_mac_indicated[0] = bp->cmd_rsp_virt->smt_mib_get.port_mac_indicated[0];
744 bp->stats.port_mac_indicated[1] = bp->cmd_rsp_virt->smt_mib_get.port_mac_indicated[1];
745 bp->stats.port_current_path[0] = bp->cmd_rsp_virt->smt_mib_get.port_current_path[0];
746 bp->stats.port_current_path[1] = bp->cmd_rsp_virt->smt_mib_get.port_current_path[1];
747 memcpy(&bp->stats.port_requested_paths[0 * 3], &bp->cmd_rsp_virt->smt_mib_get.port_requested_paths[0], 3);
748 memcpy(&bp->stats.port_requested_paths[1 * 3], &bp->cmd_rsp_virt->smt_mib_get.port_requested_paths[1], 3);
749 bp->stats.port_mac_placement[0] = bp->cmd_rsp_virt->smt_mib_get.port_mac_placement[0];
750 bp->stats.port_mac_placement[1] = bp->cmd_rsp_virt->smt_mib_get.port_mac_placement[1];
751 bp->stats.port_available_paths[0] = bp->cmd_rsp_virt->smt_mib_get.port_available_paths[0];
752 bp->stats.port_available_paths[1] = bp->cmd_rsp_virt->smt_mib_get.port_available_paths[1];
753 bp->stats.port_pmd_class[0] = bp->cmd_rsp_virt->smt_mib_get.port_pmd_class[0];
754 bp->stats.port_pmd_class[1] = bp->cmd_rsp_virt->smt_mib_get.port_pmd_class[1];
755 bp->stats.port_connection_capabilities[0] = bp->cmd_rsp_virt->smt_mib_get.port_connection_capabilities[0];
756 bp->stats.port_connection_capabilities[1] = bp->cmd_rsp_virt->smt_mib_get.port_connection_capabilities[1];
757 bp->stats.port_bs_flag[0] = bp->cmd_rsp_virt->smt_mib_get.port_bs_flag[0];
758 bp->stats.port_bs_flag[1] = bp->cmd_rsp_virt->smt_mib_get.port_bs_flag[1];
759 bp->stats.port_ler_estimate[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_estimate[0];
760 bp->stats.port_ler_estimate[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_estimate[1];
761 bp->stats.port_ler_cutoff[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_cutoff[0];
762 bp->stats.port_ler_cutoff[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_cutoff[1];
763 bp->stats.port_ler_alarm[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_alarm[0];
764 bp->stats.port_ler_alarm[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_alarm[1];
765 bp->stats.port_connect_state[0] = bp->cmd_rsp_virt->smt_mib_get.port_connect_state[0];
766 bp->stats.port_connect_state[1] = bp->cmd_rsp_virt->smt_mib_get.port_connect_state[1];
767 bp->stats.port_pcm_state[0] = bp->cmd_rsp_virt->smt_mib_get.port_pcm_state[0];
768 bp->stats.port_pcm_state[1] = bp->cmd_rsp_virt->smt_mib_get.port_pcm_state[1];
769 bp->stats.port_pc_withhold[0] = bp->cmd_rsp_virt->smt_mib_get.port_pc_withhold[0];
770 bp->stats.port_pc_withhold[1] = bp->cmd_rsp_virt->smt_mib_get.port_pc_withhold[1];
771 bp->stats.port_ler_flag[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_flag[0];
772 bp->stats.port_ler_flag[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_flag[1];
773 bp->stats.port_hardware_present[0] = bp->cmd_rsp_virt->smt_mib_get.port_hardware_present[0];
774 bp->stats.port_hardware_present[1] = bp->cmd_rsp_virt->smt_mib_get.port_hardware_present[1];
775
776
777
778
779 bp->stats.mac_frame_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.frame_cnt.ls;
780 bp->stats.mac_copied_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.copied_cnt.ls;
781 bp->stats.mac_transmit_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.transmit_cnt.ls;
782 bp->stats.mac_error_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.error_cnt.ls;
783 bp->stats.mac_lost_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.lost_cnt.ls;
784 bp->stats.port_lct_fail_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.lct_rejects[0].ls;
785 bp->stats.port_lct_fail_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.lct_rejects[1].ls;
786 bp->stats.port_lem_reject_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.lem_rejects[0].ls;
787 bp->stats.port_lem_reject_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.lem_rejects[1].ls;
788 bp->stats.port_lem_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[0].ls;
789 bp->stats.port_lem_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[1].ls;
790
791#endif
792 return (struct net_device_stats *)&bp->os.MacStat;
793}
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833static void skfp_ctl_set_multicast_list(struct net_device *dev)
834{
835 struct s_smc *smc = netdev_priv(dev);
836 skfddi_priv *bp = &smc->os;
837 unsigned long Flags;
838
839 spin_lock_irqsave(&bp->DriverLock, Flags);
840 skfp_ctl_set_multicast_list_wo_lock(dev);
841 spin_unlock_irqrestore(&bp->DriverLock, Flags);
842}
843
844
845
846static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev)
847{
848 struct s_smc *smc = netdev_priv(dev);
849 struct netdev_hw_addr *ha;
850
851
852 if (dev->flags & IFF_PROMISC) {
853 mac_drv_rx_mode(smc, RX_ENABLE_PROMISC);
854 pr_debug("PROMISCUOUS MODE ENABLED\n");
855 }
856
857 else {
858 mac_drv_rx_mode(smc, RX_DISABLE_PROMISC);
859 pr_debug("PROMISCUOUS MODE DISABLED\n");
860
861
862 mac_clear_multicast(smc);
863 mac_drv_rx_mode(smc, RX_DISABLE_ALLMULTI);
864
865 if (dev->flags & IFF_ALLMULTI) {
866 mac_drv_rx_mode(smc, RX_ENABLE_ALLMULTI);
867 pr_debug("ENABLE ALL MC ADDRESSES\n");
868 } else if (!netdev_mc_empty(dev)) {
869 if (netdev_mc_count(dev) <= FPMAX_MULTICAST) {
870
871
872
873 netdev_for_each_mc_addr(ha, dev) {
874 mac_add_multicast(smc,
875 (struct fddi_addr *)ha->addr,
876 1);
877
878 pr_debug("ENABLE MC ADDRESS: %pMF\n",
879 ha->addr);
880 }
881
882 } else {
883
884 mac_drv_rx_mode(smc, RX_ENABLE_ALLMULTI);
885 pr_debug("ENABLE ALL MC ADDRESSES\n");
886 }
887 } else {
888
889 pr_debug("DISABLE ALL MC ADDRESSES\n");
890 }
891
892
893 mac_update_multicast(smc);
894 }
895}
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917static int skfp_ctl_set_mac_address(struct net_device *dev, void *addr)
918{
919 struct s_smc *smc = netdev_priv(dev);
920 struct sockaddr *p_sockaddr = (struct sockaddr *) addr;
921 skfddi_priv *bp = &smc->os;
922 unsigned long Flags;
923
924
925 memcpy(dev->dev_addr, p_sockaddr->sa_data, FDDI_K_ALEN);
926 spin_lock_irqsave(&bp->DriverLock, Flags);
927 ResetAdapter(smc);
928 spin_unlock_irqrestore(&bp->DriverLock, Flags);
929
930 return 0;
931}
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957static int skfp_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
958{
959 struct s_smc *smc = netdev_priv(dev);
960 skfddi_priv *lp = &smc->os;
961 struct s_skfp_ioctl ioc;
962 int status = 0;
963
964 if (copy_from_user(&ioc, rq->ifr_data, sizeof(struct s_skfp_ioctl)))
965 return -EFAULT;
966
967 switch (ioc.cmd) {
968 case SKFP_GET_STATS:
969 ioc.len = sizeof(lp->MacStat);
970 status = copy_to_user(ioc.data, skfp_ctl_get_stats(dev), ioc.len)
971 ? -EFAULT : 0;
972 break;
973 case SKFP_CLR_STATS:
974 if (!capable(CAP_NET_ADMIN)) {
975 status = -EPERM;
976 } else {
977 memset(&lp->MacStat, 0, sizeof(lp->MacStat));
978 }
979 break;
980 default:
981 printk("ioctl for %s: unknown cmd: %04x\n", dev->name, ioc.cmd);
982 status = -EOPNOTSUPP;
983
984 }
985
986 return status;
987}
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037static netdev_tx_t skfp_send_pkt(struct sk_buff *skb,
1038 struct net_device *dev)
1039{
1040 struct s_smc *smc = netdev_priv(dev);
1041 skfddi_priv *bp = &smc->os;
1042
1043 pr_debug("skfp_send_pkt\n");
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054 if (!(skb->len >= FDDI_K_LLC_ZLEN && skb->len <= FDDI_K_LLC_LEN)) {
1055 bp->MacStat.gen.tx_errors++;
1056
1057 netif_start_queue(dev);
1058 dev_kfree_skb(skb);
1059 return NETDEV_TX_OK;
1060 }
1061 if (bp->QueueSkb == 0) {
1062
1063 netif_stop_queue(dev);
1064 return NETDEV_TX_BUSY;
1065 }
1066 bp->QueueSkb--;
1067 skb_queue_tail(&bp->SendSkbQueue, skb);
1068 send_queued_packets(netdev_priv(dev));
1069 if (bp->QueueSkb == 0) {
1070 netif_stop_queue(dev);
1071 }
1072 return NETDEV_TX_OK;
1073
1074}
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100static void send_queued_packets(struct s_smc *smc)
1101{
1102 skfddi_priv *bp = &smc->os;
1103 struct sk_buff *skb;
1104 unsigned char fc;
1105 int queue;
1106 struct s_smt_fp_txd *txd;
1107 dma_addr_t dma_address;
1108 unsigned long Flags;
1109
1110 int frame_status;
1111
1112 pr_debug("send queued packets\n");
1113 for (;;) {
1114
1115 skb = skb_dequeue(&bp->SendSkbQueue);
1116
1117 if (!skb) {
1118 pr_debug("queue empty\n");
1119 return;
1120 }
1121
1122 spin_lock_irqsave(&bp->DriverLock, Flags);
1123 fc = skb->data[0];
1124 queue = (fc & FC_SYNC_BIT) ? QUEUE_S : QUEUE_A0;
1125#ifdef ESS
1126
1127
1128 if ((fc & ~(FC_SYNC_BIT | FC_LLC_PRIOR)) == FC_ASYNC_LLC) {
1129
1130 if (!smc->ess.sync_bw_available)
1131 fc &= ~FC_SYNC_BIT;
1132
1133 else {
1134
1135 if (smc->mib.fddiESSSynchTxMode) {
1136
1137 fc |= FC_SYNC_BIT;
1138 }
1139 }
1140 }
1141#endif
1142 frame_status = hwm_tx_init(smc, fc, 1, skb->len, queue);
1143
1144 if ((frame_status & (LOC_TX | LAN_TX)) == 0) {
1145
1146
1147 if ((frame_status & RING_DOWN) != 0) {
1148
1149 pr_debug("Tx attempt while ring down.\n");
1150 } else if ((frame_status & OUT_OF_TXD) != 0) {
1151 pr_debug("%s: out of TXDs.\n", bp->dev->name);
1152 } else {
1153 pr_debug("%s: out of transmit resources",
1154 bp->dev->name);
1155 }
1156
1157
1158
1159 skb_queue_head(&bp->SendSkbQueue, skb);
1160 spin_unlock_irqrestore(&bp->DriverLock, Flags);
1161 return;
1162
1163 }
1164
1165 bp->QueueSkb++;
1166
1167
1168 CheckSourceAddress(skb->data, smc->hw.fddi_canon_addr.a);
1169
1170 txd = (struct s_smt_fp_txd *) HWM_GET_CURR_TXD(smc, queue);
1171
1172 dma_address = pci_map_single(&bp->pdev, skb->data,
1173 skb->len, PCI_DMA_TODEVICE);
1174 if (frame_status & LAN_TX) {
1175 txd->txd_os.skb = skb;
1176 txd->txd_os.dma_addr = dma_address;
1177 }
1178 hwm_tx_frag(smc, skb->data, dma_address, skb->len,
1179 frame_status | FIRST_FRAG | LAST_FRAG | EN_IRQ_EOF);
1180
1181 if (!(frame_status & LAN_TX)) {
1182 pci_unmap_single(&bp->pdev, dma_address,
1183 skb->len, PCI_DMA_TODEVICE);
1184 dev_kfree_skb_irq(skb);
1185 }
1186 spin_unlock_irqrestore(&bp->DriverLock, Flags);
1187 }
1188
1189 return;
1190
1191}
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201static void CheckSourceAddress(unsigned char *frame, unsigned char *hw_addr)
1202{
1203 unsigned char SRBit;
1204
1205 if ((((unsigned long) frame[1 + 6]) & ~0x01) != 0)
1206
1207 return;
1208 if ((unsigned short) frame[1 + 10] != 0)
1209 return;
1210 SRBit = frame[1 + 6] & 0x01;
1211 memcpy(&frame[1 + 6], hw_addr, ETH_ALEN);
1212 frame[8] |= SRBit;
1213}
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227static void ResetAdapter(struct s_smc *smc)
1228{
1229
1230 pr_debug("[fddi: ResetAdapter]\n");
1231
1232
1233
1234 card_stop(smc);
1235
1236
1237 mac_drv_clear_tx_queue(smc);
1238 mac_drv_clear_rx_queue(smc);
1239
1240
1241
1242 smt_reset_defaults(smc, 1);
1243
1244 init_smt(smc, (smc->os.dev)->dev_addr);
1245
1246 smt_online(smc, 1);
1247 STI_FBI();
1248
1249
1250 skfp_ctl_set_multicast_list_wo_lock(smc->os.dev);
1251}
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272void llc_restart_tx(struct s_smc *smc)
1273{
1274 skfddi_priv *bp = &smc->os;
1275
1276 pr_debug("[llc_restart_tx]\n");
1277
1278
1279 spin_unlock(&bp->DriverLock);
1280 send_queued_packets(smc);
1281 spin_lock(&bp->DriverLock);
1282 netif_start_queue(bp->dev);
1283
1284}
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302void *mac_drv_get_space(struct s_smc *smc, unsigned int size)
1303{
1304 void *virt;
1305
1306 pr_debug("mac_drv_get_space (%d bytes), ", size);
1307 virt = (void *) (smc->os.SharedMemAddr + smc->os.SharedMemHeap);
1308
1309 if ((smc->os.SharedMemHeap + size) > smc->os.SharedMemSize) {
1310 printk("Unexpected SMT memory size requested: %d\n", size);
1311 return NULL;
1312 }
1313 smc->os.SharedMemHeap += size;
1314
1315 pr_debug("mac_drv_get_space end\n");
1316 pr_debug("virt addr: %lx\n", (ulong) virt);
1317 pr_debug("bus addr: %lx\n", (ulong)
1318 (smc->os.SharedMemDMA +
1319 ((char *) virt - (char *)smc->os.SharedMemAddr)));
1320 return virt;
1321}
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342void *mac_drv_get_desc_mem(struct s_smc *smc, unsigned int size)
1343{
1344
1345 char *virt;
1346
1347 pr_debug("mac_drv_get_desc_mem\n");
1348
1349
1350
1351 virt = mac_drv_get_space(smc, size);
1352
1353 size = (u_int) (16 - (((unsigned long) virt) & 15UL));
1354 size = size % 16;
1355
1356 pr_debug("Allocate %u bytes alignment gap ", size);
1357 pr_debug("for descriptor memory.\n");
1358
1359 if (!mac_drv_get_space(smc, size)) {
1360 printk("fddi: Unable to align descriptor memory.\n");
1361 return NULL;
1362 }
1363 return virt + size;
1364}
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380unsigned long mac_drv_virt2phys(struct s_smc *smc, void *virt)
1381{
1382 return smc->os.SharedMemDMA +
1383 ((char *) virt - (char *)smc->os.SharedMemAddr);
1384}
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415u_long dma_master(struct s_smc * smc, void *virt, int len, int flag)
1416{
1417 return smc->os.SharedMemDMA +
1418 ((char *) virt - (char *)smc->os.SharedMemAddr);
1419}
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443void dma_complete(struct s_smc *smc, volatile union s_fp_descr *descr, int flag)
1444{
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457 if (flag & DMA_WR) {
1458 skfddi_priv *bp = &smc->os;
1459 volatile struct s_smt_fp_rxd *r = &descr->r;
1460
1461
1462 if (r->rxd_os.skb && r->rxd_os.dma_addr) {
1463 int MaxFrameSize = bp->MaxFrameSize;
1464
1465 pci_unmap_single(&bp->pdev, r->rxd_os.dma_addr,
1466 MaxFrameSize, PCI_DMA_FROMDEVICE);
1467 r->rxd_os.dma_addr = 0;
1468 }
1469 }
1470}
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487void mac_drv_tx_complete(struct s_smc *smc, volatile struct s_smt_fp_txd *txd)
1488{
1489 struct sk_buff *skb;
1490
1491 pr_debug("entering mac_drv_tx_complete\n");
1492
1493
1494 if (!(skb = txd->txd_os.skb)) {
1495 pr_debug("TXD with no skb assigned.\n");
1496 return;
1497 }
1498 txd->txd_os.skb = NULL;
1499
1500
1501 pci_unmap_single(&smc->os.pdev, txd->txd_os.dma_addr,
1502 skb->len, PCI_DMA_TODEVICE);
1503 txd->txd_os.dma_addr = 0;
1504
1505 smc->os.MacStat.gen.tx_packets++;
1506 smc->os.MacStat.gen.tx_bytes+=skb->len;
1507
1508
1509 dev_kfree_skb_irq(skb);
1510
1511 pr_debug("leaving mac_drv_tx_complete\n");
1512}
1513
1514
1515
1516
1517
1518
1519
1520#ifdef DUMPPACKETS
1521void dump_data(unsigned char *Data, int length)
1522{
1523 printk(KERN_INFO "---Packet start---\n");
1524 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 16, 1, Data, min_t(size_t, length, 64), false);
1525 printk(KERN_INFO "------------------\n");
1526}
1527#else
1528#define dump_data(data,len)
1529#endif
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555void mac_drv_rx_complete(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
1556 int frag_count, int len)
1557{
1558 skfddi_priv *bp = &smc->os;
1559 struct sk_buff *skb;
1560 unsigned char *virt, *cp;
1561 unsigned short ri;
1562 u_int RifLength;
1563
1564 pr_debug("entering mac_drv_rx_complete (len=%d)\n", len);
1565 if (frag_count != 1) {
1566
1567 printk("fddi: Multi-fragment receive!\n");
1568 goto RequeueRxd;
1569
1570 }
1571 skb = rxd->rxd_os.skb;
1572 if (!skb) {
1573 pr_debug("No skb in rxd\n");
1574 smc->os.MacStat.gen.rx_errors++;
1575 goto RequeueRxd;
1576 }
1577 virt = skb->data;
1578
1579
1580
1581 dump_data(skb->data, len);
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598 if ((virt[1 + 6] & FDDI_RII) == 0)
1599 RifLength = 0;
1600 else {
1601 int n;
1602
1603 pr_debug("RIF found\n");
1604
1605 cp = virt + FDDI_MAC_HDR_LEN;
1606
1607 ri = ntohs(*((__be16 *) cp));
1608 RifLength = ri & FDDI_RCF_LEN_MASK;
1609 if (len < (int) (FDDI_MAC_HDR_LEN + RifLength)) {
1610 printk("fddi: Invalid RIF.\n");
1611 goto RequeueRxd;
1612
1613 }
1614 virt[1 + 6] &= ~FDDI_RII;
1615
1616
1617 virt = cp + RifLength;
1618 for (n = FDDI_MAC_HDR_LEN; n; n--)
1619 *--virt = *--cp;
1620
1621 skb_pull(skb, RifLength);
1622 len -= RifLength;
1623 RifLength = 0;
1624 }
1625
1626
1627 smc->os.MacStat.gen.rx_packets++;
1628
1629 smc->os.MacStat.gen.rx_bytes+=len;
1630
1631
1632 if (virt[1] & 0x01) {
1633
1634 smc->os.MacStat.gen.multicast++;
1635 }
1636
1637
1638 rxd->rxd_os.skb = NULL;
1639 skb_trim(skb, len);
1640 skb->protocol = fddi_type_trans(skb, bp->dev);
1641
1642 netif_rx(skb);
1643
1644 HWM_RX_CHECK(smc, RX_LOW_WATERMARK);
1645 return;
1646
1647 RequeueRxd:
1648 pr_debug("Rx: re-queue RXD.\n");
1649 mac_drv_requeue_rxd(smc, rxd, frag_count);
1650 smc->os.MacStat.gen.rx_errors++;
1651
1652
1653}
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674void mac_drv_requeue_rxd(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
1675 int frag_count)
1676{
1677 volatile struct s_smt_fp_rxd *next_rxd;
1678 volatile struct s_smt_fp_rxd *src_rxd;
1679 struct sk_buff *skb;
1680 int MaxFrameSize;
1681 unsigned char *v_addr;
1682 dma_addr_t b_addr;
1683
1684 if (frag_count != 1)
1685
1686 printk("fddi: Multi-fragment requeue!\n");
1687
1688 MaxFrameSize = smc->os.MaxFrameSize;
1689 src_rxd = rxd;
1690 for (; frag_count > 0; frag_count--) {
1691 next_rxd = src_rxd->rxd_next;
1692 rxd = HWM_GET_CURR_RXD(smc);
1693
1694 skb = src_rxd->rxd_os.skb;
1695 if (skb == NULL) {
1696
1697 pr_debug("Requeue with no skb in rxd!\n");
1698 skb = alloc_skb(MaxFrameSize + 3, GFP_ATOMIC);
1699 if (skb) {
1700
1701 rxd->rxd_os.skb = skb;
1702 skb_reserve(skb, 3);
1703 skb_put(skb, MaxFrameSize);
1704 v_addr = skb->data;
1705 b_addr = pci_map_single(&smc->os.pdev,
1706 v_addr,
1707 MaxFrameSize,
1708 PCI_DMA_FROMDEVICE);
1709 rxd->rxd_os.dma_addr = b_addr;
1710 } else {
1711
1712 pr_debug("Queueing invalid buffer!\n");
1713 rxd->rxd_os.skb = NULL;
1714 v_addr = smc->os.LocalRxBuffer;
1715 b_addr = smc->os.LocalRxBufferDMA;
1716 }
1717 } else {
1718
1719 rxd->rxd_os.skb = skb;
1720 v_addr = skb->data;
1721 b_addr = pci_map_single(&smc->os.pdev,
1722 v_addr,
1723 MaxFrameSize,
1724 PCI_DMA_FROMDEVICE);
1725 rxd->rxd_os.dma_addr = b_addr;
1726 }
1727 hwm_rx_frag(smc, v_addr, b_addr, MaxFrameSize,
1728 FIRST_FRAG | LAST_FRAG);
1729
1730 src_rxd = next_rxd;
1731 }
1732}
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751void mac_drv_fill_rxd(struct s_smc *smc)
1752{
1753 int MaxFrameSize;
1754 unsigned char *v_addr;
1755 unsigned long b_addr;
1756 struct sk_buff *skb;
1757 volatile struct s_smt_fp_rxd *rxd;
1758
1759 pr_debug("entering mac_drv_fill_rxd\n");
1760
1761
1762
1763
1764 MaxFrameSize = smc->os.MaxFrameSize;
1765
1766 while (HWM_GET_RX_FREE(smc) > 0) {
1767 pr_debug(".\n");
1768
1769 rxd = HWM_GET_CURR_RXD(smc);
1770 skb = alloc_skb(MaxFrameSize + 3, GFP_ATOMIC);
1771 if (skb) {
1772
1773 skb_reserve(skb, 3);
1774 skb_put(skb, MaxFrameSize);
1775 v_addr = skb->data;
1776 b_addr = pci_map_single(&smc->os.pdev,
1777 v_addr,
1778 MaxFrameSize,
1779 PCI_DMA_FROMDEVICE);
1780 rxd->rxd_os.dma_addr = b_addr;
1781 } else {
1782
1783
1784
1785
1786
1787 pr_debug("Queueing invalid buffer!\n");
1788 v_addr = smc->os.LocalRxBuffer;
1789 b_addr = smc->os.LocalRxBufferDMA;
1790 }
1791
1792 rxd->rxd_os.skb = skb;
1793
1794
1795 hwm_rx_frag(smc, v_addr, b_addr, MaxFrameSize,
1796 FIRST_FRAG | LAST_FRAG);
1797 }
1798 pr_debug("leaving mac_drv_fill_rxd\n");
1799}
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818void mac_drv_clear_rxd(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
1819 int frag_count)
1820{
1821
1822 struct sk_buff *skb;
1823
1824 pr_debug("entering mac_drv_clear_rxd\n");
1825
1826 if (frag_count != 1)
1827
1828 printk("fddi: Multi-fragment clear!\n");
1829
1830 for (; frag_count > 0; frag_count--) {
1831 skb = rxd->rxd_os.skb;
1832 if (skb != NULL) {
1833 skfddi_priv *bp = &smc->os;
1834 int MaxFrameSize = bp->MaxFrameSize;
1835
1836 pci_unmap_single(&bp->pdev, rxd->rxd_os.dma_addr,
1837 MaxFrameSize, PCI_DMA_FROMDEVICE);
1838
1839 dev_kfree_skb(skb);
1840 rxd->rxd_os.skb = NULL;
1841 }
1842 rxd = rxd->rxd_next;
1843
1844 }
1845}
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875int mac_drv_rx_init(struct s_smc *smc, int len, int fc,
1876 char *look_ahead, int la_len)
1877{
1878 struct sk_buff *skb;
1879
1880 pr_debug("entering mac_drv_rx_init(len=%d)\n", len);
1881
1882
1883
1884 if (len != la_len || len < FDDI_MAC_HDR_LEN || !look_ahead) {
1885 pr_debug("fddi: Discard invalid local SMT frame\n");
1886 pr_debug(" len=%d, la_len=%d, (ULONG) look_ahead=%08lXh.\n",
1887 len, la_len, (unsigned long) look_ahead);
1888 return 0;
1889 }
1890 skb = alloc_skb(len + 3, GFP_ATOMIC);
1891 if (!skb) {
1892 pr_debug("fddi: Local SMT: skb memory exhausted.\n");
1893 return 0;
1894 }
1895 skb_reserve(skb, 3);
1896 skb_put(skb, len);
1897 skb_copy_to_linear_data(skb, look_ahead, len);
1898
1899
1900 skb->protocol = fddi_type_trans(skb, smc->os.dev);
1901 netif_rx(skb);
1902
1903 return 0;
1904}
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922void smt_timer_poll(struct s_smc *smc)
1923{
1924}
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940void ring_status_indication(struct s_smc *smc, u_long status)
1941{
1942 pr_debug("ring_status_indication( ");
1943 if (status & RS_RES15)
1944 pr_debug("RS_RES15 ");
1945 if (status & RS_HARDERROR)
1946 pr_debug("RS_HARDERROR ");
1947 if (status & RS_SOFTERROR)
1948 pr_debug("RS_SOFTERROR ");
1949 if (status & RS_BEACON)
1950 pr_debug("RS_BEACON ");
1951 if (status & RS_PATHTEST)
1952 pr_debug("RS_PATHTEST ");
1953 if (status & RS_SELFTEST)
1954 pr_debug("RS_SELFTEST ");
1955 if (status & RS_RES9)
1956 pr_debug("RS_RES9 ");
1957 if (status & RS_DISCONNECT)
1958 pr_debug("RS_DISCONNECT ");
1959 if (status & RS_RES7)
1960 pr_debug("RS_RES7 ");
1961 if (status & RS_DUPADDR)
1962 pr_debug("RS_DUPADDR ");
1963 if (status & RS_NORINGOP)
1964 pr_debug("RS_NORINGOP ");
1965 if (status & RS_VERSION)
1966 pr_debug("RS_VERSION ");
1967 if (status & RS_STUCKBYPASSS)
1968 pr_debug("RS_STUCKBYPASSS ");
1969 if (status & RS_EVENT)
1970 pr_debug("RS_EVENT ");
1971 if (status & RS_RINGOPCHANGE)
1972 pr_debug("RS_RINGOPCHANGE ");
1973 if (status & RS_RES0)
1974 pr_debug("RS_RES0 ");
1975 pr_debug("]\n");
1976}
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994unsigned long smt_get_time(void)
1995{
1996 return jiffies;
1997}
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014void smt_stat_counter(struct s_smc *smc, int stat)
2015{
2016
2017
2018 pr_debug("smt_stat_counter\n");
2019 switch (stat) {
2020 case 0:
2021 pr_debug("Ring operational change.\n");
2022 break;
2023 case 1:
2024 pr_debug("Receive fifo overflow.\n");
2025 smc->os.MacStat.gen.rx_errors++;
2026 break;
2027 default:
2028 pr_debug("Unknown status (%d).\n", stat);
2029 break;
2030 }
2031}
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050void cfm_state_change(struct s_smc *smc, int c_state)
2051{
2052#ifdef DRIVERDEBUG
2053 char *s;
2054
2055 switch (c_state) {
2056 case SC0_ISOLATED:
2057 s = "SC0_ISOLATED";
2058 break;
2059 case SC1_WRAP_A:
2060 s = "SC1_WRAP_A";
2061 break;
2062 case SC2_WRAP_B:
2063 s = "SC2_WRAP_B";
2064 break;
2065 case SC4_THRU_A:
2066 s = "SC4_THRU_A";
2067 break;
2068 case SC5_THRU_B:
2069 s = "SC5_THRU_B";
2070 break;
2071 case SC7_WRAP_S:
2072 s = "SC7_WRAP_S";
2073 break;
2074 case SC9_C_WRAP_A:
2075 s = "SC9_C_WRAP_A";
2076 break;
2077 case SC10_C_WRAP_B:
2078 s = "SC10_C_WRAP_B";
2079 break;
2080 case SC11_C_WRAP_S:
2081 s = "SC11_C_WRAP_S";
2082 break;
2083 default:
2084 pr_debug("cfm_state_change: unknown %d\n", c_state);
2085 return;
2086 }
2087 pr_debug("cfm_state_change: %s\n", s);
2088#endif
2089}
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108void ecm_state_change(struct s_smc *smc, int e_state)
2109{
2110#ifdef DRIVERDEBUG
2111 char *s;
2112
2113 switch (e_state) {
2114 case EC0_OUT:
2115 s = "EC0_OUT";
2116 break;
2117 case EC1_IN:
2118 s = "EC1_IN";
2119 break;
2120 case EC2_TRACE:
2121 s = "EC2_TRACE";
2122 break;
2123 case EC3_LEAVE:
2124 s = "EC3_LEAVE";
2125 break;
2126 case EC4_PATH_TEST:
2127 s = "EC4_PATH_TEST";
2128 break;
2129 case EC5_INSERT:
2130 s = "EC5_INSERT";
2131 break;
2132 case EC6_CHECK:
2133 s = "EC6_CHECK";
2134 break;
2135 case EC7_DEINSERT:
2136 s = "EC7_DEINSERT";
2137 break;
2138 default:
2139 s = "unknown";
2140 break;
2141 }
2142 pr_debug("ecm_state_change: %s\n", s);
2143#endif
2144}
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163void rmt_state_change(struct s_smc *smc, int r_state)
2164{
2165#ifdef DRIVERDEBUG
2166 char *s;
2167
2168 switch (r_state) {
2169 case RM0_ISOLATED:
2170 s = "RM0_ISOLATED";
2171 break;
2172 case RM1_NON_OP:
2173 s = "RM1_NON_OP - not operational";
2174 break;
2175 case RM2_RING_OP:
2176 s = "RM2_RING_OP - ring operational";
2177 break;
2178 case RM3_DETECT:
2179 s = "RM3_DETECT - detect dupl addresses";
2180 break;
2181 case RM4_NON_OP_DUP:
2182 s = "RM4_NON_OP_DUP - dupl. addr detected";
2183 break;
2184 case RM5_RING_OP_DUP:
2185 s = "RM5_RING_OP_DUP - ring oper. with dupl. addr";
2186 break;
2187 case RM6_DIRECTED:
2188 s = "RM6_DIRECTED - sending directed beacons";
2189 break;
2190 case RM7_TRACE:
2191 s = "RM7_TRACE - trace initiated";
2192 break;
2193 default:
2194 s = "unknown";
2195 break;
2196 }
2197 pr_debug("[rmt_state_change: %s]\n", s);
2198#endif
2199}
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215void drv_reset_indication(struct s_smc *smc)
2216{
2217 pr_debug("entering drv_reset_indication\n");
2218
2219 smc->os.ResetRequested = TRUE;
2220
2221}
2222
2223static struct pci_driver skfddi_pci_driver = {
2224 .name = "skfddi",
2225 .id_table = skfddi_pci_tbl,
2226 .probe = skfp_init_one,
2227 .remove = skfp_remove_one,
2228};
2229
2230module_pci_driver(skfddi_pci_driver);
2231