1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173#include <linux/module.h>
174#include <linux/init.h>
175#include <linux/ioport.h>
176#include <linux/eisa.h>
177#include <linux/pci.h>
178#include <linux/dma-mapping.h>
179#include <linux/netdevice.h>
180#include <linux/etherdevice.h>
181#include <linux/delay.h>
182#include <linux/spinlock.h>
183#include <linux/workqueue.h>
184#include <linux/mii.h>
185
186#include "tlan.h"
187
188typedef u32 (TLanIntVectorFunc)( struct net_device *, u16 );
189
190
191
192static struct net_device *TLan_Eisa_Devices;
193
194static int TLanDevicesInstalled;
195
196
197static int aui[MAX_TLAN_BOARDS];
198static int duplex[MAX_TLAN_BOARDS];
199static int speed[MAX_TLAN_BOARDS];
200static int boards_found;
201module_param_array(aui, int, NULL, 0);
202module_param_array(duplex, int, NULL, 0);
203module_param_array(speed, int, NULL, 0);
204MODULE_PARM_DESC(aui, "ThunderLAN use AUI port(s) (0-1)");
205MODULE_PARM_DESC(duplex, "ThunderLAN duplex setting(s) (0-default, 1-half, 2-full)");
206MODULE_PARM_DESC(speed, "ThunderLAN port speen setting(s) (0,10,100)");
207
208MODULE_AUTHOR("Maintainer: Samuel Chessman <chessman@tux.org>");
209MODULE_DESCRIPTION("Driver for TI ThunderLAN based ethernet PCI adapters");
210MODULE_LICENSE("GPL");
211
212
213
214#undef MONITOR
215
216
217static int debug;
218module_param(debug, int, 0);
219MODULE_PARM_DESC(debug, "ThunderLAN debug mask");
220
221static const char TLanSignature[] = "TLAN";
222static const char tlan_banner[] = "ThunderLAN driver v1.15a\n";
223static int tlan_have_pci;
224static int tlan_have_eisa;
225
226static const char *media[] = {
227 "10BaseT-HD ", "10BaseT-FD ","100baseTx-HD ",
228 "100baseTx-FD", "100baseT4", NULL
229};
230
231static struct board {
232 const char *deviceLabel;
233 u32 flags;
234 u16 addrOfs;
235} board_info[] = {
236 { "Compaq Netelligent 10 T PCI UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
237 { "Compaq Netelligent 10/100 TX PCI UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
238 { "Compaq Integrated NetFlex-3/P", TLAN_ADAPTER_NONE, 0x83 },
239 { "Compaq NetFlex-3/P",
240 TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 },
241 { "Compaq NetFlex-3/P", TLAN_ADAPTER_NONE, 0x83 },
242 { "Compaq Netelligent Integrated 10/100 TX UTP",
243 TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
244 { "Compaq Netelligent Dual 10/100 TX PCI UTP", TLAN_ADAPTER_NONE, 0x83 },
245 { "Compaq Netelligent 10/100 TX Embedded UTP", TLAN_ADAPTER_NONE, 0x83 },
246 { "Olicom OC-2183/2185", TLAN_ADAPTER_USE_INTERN_10, 0x83 },
247 { "Olicom OC-2325", TLAN_ADAPTER_UNMANAGED_PHY, 0xF8 },
248 { "Olicom OC-2326", TLAN_ADAPTER_USE_INTERN_10, 0xF8 },
249 { "Compaq Netelligent 10/100 TX UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
250 { "Compaq Netelligent 10 T/2 PCI UTP/Coax", TLAN_ADAPTER_NONE, 0x83 },
251 { "Compaq NetFlex-3/E",
252 TLAN_ADAPTER_ACTIVITY_LED |
253 TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 },
254 { "Compaq NetFlex-3/E", TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
255};
256
257static struct pci_device_id tlan_pci_tbl[] = {
258 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL10,
259 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
260 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100,
261 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
262 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETFLEX3I,
263 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
264 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_THUNDER,
265 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 },
266 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETFLEX3B,
267 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
268 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100PI,
269 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
270 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100D,
271 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
272 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100I,
273 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 7 },
274 { PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2183,
275 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
276 { PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2325,
277 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 9 },
278 { PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2326,
279 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 10 },
280 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_NETELLIGENT_10_100_WS_5100,
281 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 11 },
282 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_NETELLIGENT_10_T2,
283 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 12 },
284 { 0,}
285};
286MODULE_DEVICE_TABLE(pci, tlan_pci_tbl);
287
288static void TLan_EisaProbe( void );
289static void TLan_Eisa_Cleanup( void );
290static int TLan_Init( struct net_device * );
291static int TLan_Open( struct net_device *dev );
292static netdev_tx_t TLan_StartTx( struct sk_buff *, struct net_device *);
293static irqreturn_t TLan_HandleInterrupt( int, void *);
294static int TLan_Close( struct net_device *);
295static struct net_device_stats *TLan_GetStats( struct net_device *);
296static void TLan_SetMulticastList( struct net_device *);
297static int TLan_ioctl( struct net_device *dev, struct ifreq *rq, int cmd);
298static int TLan_probe1( struct pci_dev *pdev, long ioaddr,
299 int irq, int rev, const struct pci_device_id *ent);
300static void TLan_tx_timeout( struct net_device *dev);
301static void TLan_tx_timeout_work(struct work_struct *work);
302static int tlan_init_one( struct pci_dev *pdev, const struct pci_device_id *ent);
303
304static u32 TLan_HandleTxEOF( struct net_device *, u16 );
305static u32 TLan_HandleStatOverflow( struct net_device *, u16 );
306static u32 TLan_HandleRxEOF( struct net_device *, u16 );
307static u32 TLan_HandleDummy( struct net_device *, u16 );
308static u32 TLan_HandleTxEOC( struct net_device *, u16 );
309static u32 TLan_HandleStatusCheck( struct net_device *, u16 );
310static u32 TLan_HandleRxEOC( struct net_device *, u16 );
311
312static void TLan_Timer( unsigned long );
313
314static void TLan_ResetLists( struct net_device * );
315static void TLan_FreeLists( struct net_device * );
316static void TLan_PrintDio( u16 );
317static void TLan_PrintList( TLanList *, char *, int );
318static void TLan_ReadAndClearStats( struct net_device *, int );
319static void TLan_ResetAdapter( struct net_device * );
320static void TLan_FinishReset( struct net_device * );
321static void TLan_SetMac( struct net_device *, int areg, char *mac );
322
323static void TLan_PhyPrint( struct net_device * );
324static void TLan_PhyDetect( struct net_device * );
325static void TLan_PhyPowerDown( struct net_device * );
326static void TLan_PhyPowerUp( struct net_device * );
327static void TLan_PhyReset( struct net_device * );
328static void TLan_PhyStartLink( struct net_device * );
329static void TLan_PhyFinishAutoNeg( struct net_device * );
330#ifdef MONITOR
331static void TLan_PhyMonitor( struct net_device * );
332#endif
333
334
335
336
337
338
339
340
341static int TLan_MiiReadReg( struct net_device *, u16, u16, u16 * );
342static void TLan_MiiSendData( u16, u32, unsigned );
343static void TLan_MiiSync( u16 );
344static void TLan_MiiWriteReg( struct net_device *, u16, u16, u16 );
345
346static void TLan_EeSendStart( u16 );
347static int TLan_EeSendByte( u16, u8, int );
348static void TLan_EeReceiveByte( u16, u8 *, int );
349static int TLan_EeReadByte( struct net_device *, u8, u8 * );
350
351
352static inline void
353TLan_StoreSKB( struct tlan_list_tag *tag, struct sk_buff *skb)
354{
355 unsigned long addr = (unsigned long)skb;
356 tag->buffer[9].address = addr;
357 tag->buffer[8].address = upper_32_bits(addr);
358}
359
360static inline struct sk_buff *
361TLan_GetSKB( const struct tlan_list_tag *tag)
362{
363 unsigned long addr;
364
365 addr = tag->buffer[9].address;
366 addr |= (tag->buffer[8].address << 16) << 16;
367 return (struct sk_buff *) addr;
368}
369
370
371static TLanIntVectorFunc *TLanIntVector[TLAN_INT_NUMBER_OF_INTS] = {
372 NULL,
373 TLan_HandleTxEOF,
374 TLan_HandleStatOverflow,
375 TLan_HandleRxEOF,
376 TLan_HandleDummy,
377 TLan_HandleTxEOC,
378 TLan_HandleStatusCheck,
379 TLan_HandleRxEOC
380};
381
382static inline void
383TLan_SetTimer( struct net_device *dev, u32 ticks, u32 type )
384{
385 TLanPrivateInfo *priv = netdev_priv(dev);
386 unsigned long flags = 0;
387
388 if (!in_irq())
389 spin_lock_irqsave(&priv->lock, flags);
390 if ( priv->timer.function != NULL &&
391 priv->timerType != TLAN_TIMER_ACTIVITY ) {
392 if (!in_irq())
393 spin_unlock_irqrestore(&priv->lock, flags);
394 return;
395 }
396 priv->timer.function = &TLan_Timer;
397 if (!in_irq())
398 spin_unlock_irqrestore(&priv->lock, flags);
399
400 priv->timer.data = (unsigned long) dev;
401 priv->timerSetAt = jiffies;
402 priv->timerType = type;
403 mod_timer(&priv->timer, jiffies + ticks);
404
405}
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438static void __devexit tlan_remove_one( struct pci_dev *pdev)
439{
440 struct net_device *dev = pci_get_drvdata( pdev );
441 TLanPrivateInfo *priv = netdev_priv(dev);
442
443 unregister_netdev( dev );
444
445 if ( priv->dmaStorage ) {
446 pci_free_consistent(priv->pciDev,
447 priv->dmaSize, priv->dmaStorage,
448 priv->dmaStorageDMA );
449 }
450
451#ifdef CONFIG_PCI
452 pci_release_regions(pdev);
453#endif
454
455 free_netdev( dev );
456
457 pci_set_drvdata( pdev, NULL );
458}
459
460static struct pci_driver tlan_driver = {
461 .name = "tlan",
462 .id_table = tlan_pci_tbl,
463 .probe = tlan_init_one,
464 .remove = __devexit_p(tlan_remove_one),
465};
466
467static int __init tlan_probe(void)
468{
469 int rc = -ENODEV;
470
471 printk(KERN_INFO "%s", tlan_banner);
472
473 TLAN_DBG(TLAN_DEBUG_PROBE, "Starting PCI Probe....\n");
474
475
476
477 rc = pci_register_driver(&tlan_driver);
478
479 if (rc != 0) {
480 printk(KERN_ERR "TLAN: Could not register pci driver.\n");
481 goto err_out_pci_free;
482 }
483
484 TLAN_DBG(TLAN_DEBUG_PROBE, "Starting EISA Probe....\n");
485 TLan_EisaProbe();
486
487 printk(KERN_INFO "TLAN: %d device%s installed, PCI: %d EISA: %d\n",
488 TLanDevicesInstalled, TLanDevicesInstalled == 1 ? "" : "s",
489 tlan_have_pci, tlan_have_eisa);
490
491 if (TLanDevicesInstalled == 0) {
492 rc = -ENODEV;
493 goto err_out_pci_unreg;
494 }
495 return 0;
496
497err_out_pci_unreg:
498 pci_unregister_driver(&tlan_driver);
499err_out_pci_free:
500 return rc;
501}
502
503
504static int __devinit tlan_init_one( struct pci_dev *pdev,
505 const struct pci_device_id *ent)
506{
507 return TLan_probe1( pdev, -1, -1, 0, ent);
508}
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530static int __devinit TLan_probe1(struct pci_dev *pdev,
531 long ioaddr, int irq, int rev,
532 const struct pci_device_id *ent )
533{
534
535 struct net_device *dev;
536 TLanPrivateInfo *priv;
537 u16 device_id;
538 int reg, rc = -ENODEV;
539
540#ifdef CONFIG_PCI
541 if (pdev) {
542 rc = pci_enable_device(pdev);
543 if (rc)
544 return rc;
545
546 rc = pci_request_regions(pdev, TLanSignature);
547 if (rc) {
548 printk(KERN_ERR "TLAN: Could not reserve IO regions\n");
549 goto err_out;
550 }
551 }
552#endif
553
554 dev = alloc_etherdev(sizeof(TLanPrivateInfo));
555 if (dev == NULL) {
556 printk(KERN_ERR "TLAN: Could not allocate memory for device.\n");
557 rc = -ENOMEM;
558 goto err_out_regions;
559 }
560 SET_NETDEV_DEV(dev, &pdev->dev);
561
562 priv = netdev_priv(dev);
563
564 priv->pciDev = pdev;
565 priv->dev = dev;
566
567
568 if (pdev) {
569 u32 pci_io_base = 0;
570
571 priv->adapter = &board_info[ent->driver_data];
572
573 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
574 if (rc) {
575 printk(KERN_ERR "TLAN: No suitable PCI mapping available.\n");
576 goto err_out_free_dev;
577 }
578
579 for ( reg= 0; reg <= 5; reg ++ ) {
580 if (pci_resource_flags(pdev, reg) & IORESOURCE_IO) {
581 pci_io_base = pci_resource_start(pdev, reg);
582 TLAN_DBG( TLAN_DEBUG_GNRL, "IO mapping is available at %x.\n",
583 pci_io_base);
584 break;
585 }
586 }
587 if (!pci_io_base) {
588 printk(KERN_ERR "TLAN: No IO mappings available\n");
589 rc = -EIO;
590 goto err_out_free_dev;
591 }
592
593 dev->base_addr = pci_io_base;
594 dev->irq = pdev->irq;
595 priv->adapterRev = pdev->revision;
596 pci_set_master(pdev);
597 pci_set_drvdata(pdev, dev);
598
599 } else {
600
601
602 device_id = inw(ioaddr + EISA_ID2);
603 priv->is_eisa = 1;
604 if (device_id == 0x20F1) {
605 priv->adapter = &board_info[13];
606 priv->adapterRev = 23;
607 } else {
608 priv->adapter = &board_info[14];
609 priv->adapterRev = 10;
610 }
611 dev->base_addr = ioaddr;
612 dev->irq = irq;
613 }
614
615
616 if (dev->mem_start) {
617 priv->aui = dev->mem_start & 0x01;
618 priv->duplex = ((dev->mem_start & 0x06) == 0x06) ? 0
619 : (dev->mem_start & 0x06) >> 1;
620 priv->speed = ((dev->mem_start & 0x18) == 0x18) ? 0
621 : (dev->mem_start & 0x18) >> 3;
622
623 if (priv->speed == 0x1) {
624 priv->speed = TLAN_SPEED_10;
625 } else if (priv->speed == 0x2) {
626 priv->speed = TLAN_SPEED_100;
627 }
628 debug = priv->debug = dev->mem_end;
629 } else {
630 priv->aui = aui[boards_found];
631 priv->speed = speed[boards_found];
632 priv->duplex = duplex[boards_found];
633 priv->debug = debug;
634 }
635
636
637
638 INIT_WORK(&priv->tlan_tqueue, TLan_tx_timeout_work);
639
640 spin_lock_init(&priv->lock);
641
642 rc = TLan_Init(dev);
643 if (rc) {
644 printk(KERN_ERR "TLAN: Could not set up device.\n");
645 goto err_out_free_dev;
646 }
647
648 rc = register_netdev(dev);
649 if (rc) {
650 printk(KERN_ERR "TLAN: Could not register device.\n");
651 goto err_out_uninit;
652 }
653
654
655 TLanDevicesInstalled++;
656 boards_found++;
657
658
659 if (pdev)
660 tlan_have_pci++;
661 else {
662 priv->nextDevice = TLan_Eisa_Devices;
663 TLan_Eisa_Devices = dev;
664 tlan_have_eisa++;
665 }
666
667 printk(KERN_INFO "TLAN: %s irq=%2d, io=%04x, %s, Rev. %d\n",
668 dev->name,
669 (int) dev->irq,
670 (int) dev->base_addr,
671 priv->adapter->deviceLabel,
672 priv->adapterRev);
673 return 0;
674
675err_out_uninit:
676 pci_free_consistent(priv->pciDev, priv->dmaSize, priv->dmaStorage,
677 priv->dmaStorageDMA );
678err_out_free_dev:
679 free_netdev(dev);
680err_out_regions:
681#ifdef CONFIG_PCI
682 if (pdev)
683 pci_release_regions(pdev);
684#endif
685err_out:
686 if (pdev)
687 pci_disable_device(pdev);
688 return rc;
689}
690
691
692static void TLan_Eisa_Cleanup(void)
693{
694 struct net_device *dev;
695 TLanPrivateInfo *priv;
696
697 while( tlan_have_eisa ) {
698 dev = TLan_Eisa_Devices;
699 priv = netdev_priv(dev);
700 if (priv->dmaStorage) {
701 pci_free_consistent(priv->pciDev, priv->dmaSize,
702 priv->dmaStorage, priv->dmaStorageDMA );
703 }
704 release_region( dev->base_addr, 0x10);
705 unregister_netdev( dev );
706 TLan_Eisa_Devices = priv->nextDevice;
707 free_netdev( dev );
708 tlan_have_eisa--;
709 }
710}
711
712
713static void __exit tlan_exit(void)
714{
715 pci_unregister_driver(&tlan_driver);
716
717 if (tlan_have_eisa)
718 TLan_Eisa_Cleanup();
719
720}
721
722
723
724module_init(tlan_probe);
725module_exit(tlan_exit);
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742static void __init TLan_EisaProbe (void)
743{
744 long ioaddr;
745 int rc = -ENODEV;
746 int irq;
747 u16 device_id;
748
749 if (!EISA_bus) {
750 TLAN_DBG(TLAN_DEBUG_PROBE, "No EISA bus present\n");
751 return;
752 }
753
754
755 for (ioaddr = 0x1000; ioaddr < 0x9000; ioaddr += 0x1000) {
756
757 TLAN_DBG(TLAN_DEBUG_PROBE,"EISA_ID 0x%4x: 0x%4x\n",
758 (int) ioaddr + 0xC80, inw(ioaddr + EISA_ID));
759 TLAN_DBG(TLAN_DEBUG_PROBE,"EISA_ID 0x%4x: 0x%4x\n",
760 (int) ioaddr + 0xC82, inw(ioaddr + EISA_ID2));
761
762
763 TLAN_DBG(TLAN_DEBUG_PROBE, "Probing for EISA adapter at IO: 0x%4x : ",
764 (int) ioaddr);
765 if (request_region(ioaddr, 0x10, TLanSignature) == NULL)
766 goto out;
767
768 if (inw(ioaddr + EISA_ID) != 0x110E) {
769 release_region(ioaddr, 0x10);
770 goto out;
771 }
772
773 device_id = inw(ioaddr + EISA_ID2);
774 if (device_id != 0x20F1 && device_id != 0x40F1) {
775 release_region (ioaddr, 0x10);
776 goto out;
777 }
778
779 if (inb(ioaddr + EISA_CR) != 0x1) {
780 release_region (ioaddr, 0x10);
781 goto out2;
782 }
783
784 if (debug == 0x10)
785 printk("Found one\n");
786
787
788
789 switch (inb(ioaddr + 0xCC0)) {
790 case(0x10):
791 irq=5;
792 break;
793 case(0x20):
794 irq=9;
795 break;
796 case(0x40):
797 irq=10;
798 break;
799 case(0x80):
800 irq=11;
801 break;
802 default:
803 goto out;
804 }
805
806
807
808 rc = TLan_probe1( NULL, ioaddr, irq,
809 12, NULL);
810 continue;
811
812 out:
813 if (debug == 0x10)
814 printk("None found\n");
815 continue;
816
817 out2: if (debug == 0x10)
818 printk("Card found but it is not enabled, skipping\n");
819 continue;
820
821 }
822
823}
824
825#ifdef CONFIG_NET_POLL_CONTROLLER
826static void TLan_Poll(struct net_device *dev)
827{
828 disable_irq(dev->irq);
829 TLan_HandleInterrupt(dev->irq, dev);
830 enable_irq(dev->irq);
831}
832#endif
833
834static const struct net_device_ops TLan_netdev_ops = {
835 .ndo_open = TLan_Open,
836 .ndo_stop = TLan_Close,
837 .ndo_start_xmit = TLan_StartTx,
838 .ndo_tx_timeout = TLan_tx_timeout,
839 .ndo_get_stats = TLan_GetStats,
840 .ndo_set_multicast_list = TLan_SetMulticastList,
841 .ndo_do_ioctl = TLan_ioctl,
842 .ndo_change_mtu = eth_change_mtu,
843 .ndo_set_mac_address = eth_mac_addr,
844 .ndo_validate_addr = eth_validate_addr,
845#ifdef CONFIG_NET_POLL_CONTROLLER
846 .ndo_poll_controller = TLan_Poll,
847#endif
848};
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869static int TLan_Init( struct net_device *dev )
870{
871 int dma_size;
872 int err;
873 int i;
874 TLanPrivateInfo *priv;
875
876 priv = netdev_priv(dev);
877
878 dma_size = ( TLAN_NUM_RX_LISTS + TLAN_NUM_TX_LISTS )
879 * ( sizeof(TLanList) );
880 priv->dmaStorage = pci_alloc_consistent(priv->pciDev,
881 dma_size, &priv->dmaStorageDMA);
882 priv->dmaSize = dma_size;
883
884 if ( priv->dmaStorage == NULL ) {
885 printk(KERN_ERR "TLAN: Could not allocate lists and buffers for %s.\n",
886 dev->name );
887 return -ENOMEM;
888 }
889 memset( priv->dmaStorage, 0, dma_size );
890 priv->rxList = (TLanList *) ALIGN((unsigned long)priv->dmaStorage, 8);
891 priv->rxListDMA = ALIGN(priv->dmaStorageDMA, 8);
892 priv->txList = priv->rxList + TLAN_NUM_RX_LISTS;
893 priv->txListDMA = priv->rxListDMA + sizeof(TLanList) * TLAN_NUM_RX_LISTS;
894
895 err = 0;
896 for ( i = 0; i < 6 ; i++ )
897 err |= TLan_EeReadByte( dev,
898 (u8) priv->adapter->addrOfs + i,
899 (u8 *) &dev->dev_addr[i] );
900 if ( err ) {
901 printk(KERN_ERR "TLAN: %s: Error reading MAC from eeprom: %d\n",
902 dev->name,
903 err );
904 }
905 dev->addr_len = 6;
906
907 netif_carrier_off(dev);
908
909
910 dev->netdev_ops = &TLan_netdev_ops;
911 dev->watchdog_timeo = TX_TIMEOUT;
912
913 return 0;
914
915}
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937static int TLan_Open( struct net_device *dev )
938{
939 TLanPrivateInfo *priv = netdev_priv(dev);
940 int err;
941
942 priv->tlanRev = TLan_DioRead8( dev->base_addr, TLAN_DEF_REVISION );
943 err = request_irq( dev->irq, TLan_HandleInterrupt, IRQF_SHARED,
944 dev->name, dev );
945
946 if ( err ) {
947 pr_err("TLAN: Cannot open %s because IRQ %d is already in use.\n",
948 dev->name, dev->irq );
949 return err;
950 }
951
952 init_timer(&priv->timer);
953 netif_start_queue(dev);
954
955
956
957
958 TLan_ResetLists( dev );
959 TLan_ReadAndClearStats( dev, TLAN_IGNORE );
960 TLan_ResetAdapter( dev );
961
962 TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Opened. TLAN Chip Rev: %x\n",
963 dev->name, priv->tlanRev );
964
965 return 0;
966
967}
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986static int TLan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
987{
988 TLanPrivateInfo *priv = netdev_priv(dev);
989 struct mii_ioctl_data *data = if_mii(rq);
990 u32 phy = priv->phy[priv->phyNum];
991
992 if (!priv->phyOnline)
993 return -EAGAIN;
994
995 switch(cmd) {
996 case SIOCGMIIPHY:
997 data->phy_id = phy;
998
999
1000 case SIOCGMIIREG:
1001 TLan_MiiReadReg(dev, data->phy_id & 0x1f,
1002 data->reg_num & 0x1f, &data->val_out);
1003 return 0;
1004
1005
1006 case SIOCSMIIREG:
1007 TLan_MiiWriteReg(dev, data->phy_id & 0x1f,
1008 data->reg_num & 0x1f, data->val_in);
1009 return 0;
1010 default:
1011 return -EOPNOTSUPP;
1012 }
1013}
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027static void TLan_tx_timeout(struct net_device *dev)
1028{
1029
1030 TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Transmit timed out.\n", dev->name);
1031
1032
1033 TLan_FreeLists( dev );
1034 TLan_ResetLists( dev );
1035 TLan_ReadAndClearStats( dev, TLAN_IGNORE );
1036 TLan_ResetAdapter( dev );
1037 dev->trans_start = jiffies;
1038 netif_wake_queue( dev );
1039
1040}
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053static void TLan_tx_timeout_work(struct work_struct *work)
1054{
1055 TLanPrivateInfo *priv =
1056 container_of(work, TLanPrivateInfo, tlan_tqueue);
1057
1058 TLan_tx_timeout(priv->dev);
1059}
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084static netdev_tx_t TLan_StartTx( struct sk_buff *skb, struct net_device *dev )
1085{
1086 TLanPrivateInfo *priv = netdev_priv(dev);
1087 dma_addr_t tail_list_phys;
1088 TLanList *tail_list;
1089 unsigned long flags;
1090 unsigned int txlen;
1091
1092 if ( ! priv->phyOnline ) {
1093 TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: %s PHY is not ready\n",
1094 dev->name );
1095 dev_kfree_skb_any(skb);
1096 return NETDEV_TX_OK;
1097 }
1098
1099 if (skb_padto(skb, TLAN_MIN_FRAME_SIZE))
1100 return NETDEV_TX_OK;
1101 txlen = max(skb->len, (unsigned int)TLAN_MIN_FRAME_SIZE);
1102
1103 tail_list = priv->txList + priv->txTail;
1104 tail_list_phys = priv->txListDMA + sizeof(TLanList) * priv->txTail;
1105
1106 if ( tail_list->cStat != TLAN_CSTAT_UNUSED ) {
1107 TLAN_DBG( TLAN_DEBUG_TX,
1108 "TRANSMIT: %s is busy (Head=%d Tail=%d)\n",
1109 dev->name, priv->txHead, priv->txTail );
1110 netif_stop_queue(dev);
1111 priv->txBusyCount++;
1112 return NETDEV_TX_BUSY;
1113 }
1114
1115 tail_list->forward = 0;
1116
1117 tail_list->buffer[0].address = pci_map_single(priv->pciDev,
1118 skb->data, txlen,
1119 PCI_DMA_TODEVICE);
1120 TLan_StoreSKB(tail_list, skb);
1121
1122 tail_list->frameSize = (u16) txlen;
1123 tail_list->buffer[0].count = TLAN_LAST_BUFFER | (u32) txlen;
1124 tail_list->buffer[1].count = 0;
1125 tail_list->buffer[1].address = 0;
1126
1127 spin_lock_irqsave(&priv->lock, flags);
1128 tail_list->cStat = TLAN_CSTAT_READY;
1129 if ( ! priv->txInProgress ) {
1130 priv->txInProgress = 1;
1131 TLAN_DBG( TLAN_DEBUG_TX,
1132 "TRANSMIT: Starting TX on buffer %d\n", priv->txTail );
1133 outl( tail_list_phys, dev->base_addr + TLAN_CH_PARM );
1134 outl( TLAN_HC_GO, dev->base_addr + TLAN_HOST_CMD );
1135 } else {
1136 TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: Adding buffer %d to TX channel\n",
1137 priv->txTail );
1138 if ( priv->txTail == 0 ) {
1139 ( priv->txList + ( TLAN_NUM_TX_LISTS - 1 ) )->forward
1140 = tail_list_phys;
1141 } else {
1142 ( priv->txList + ( priv->txTail - 1 ) )->forward
1143 = tail_list_phys;
1144 }
1145 }
1146 spin_unlock_irqrestore(&priv->lock, flags);
1147
1148 CIRC_INC( priv->txTail, TLAN_NUM_TX_LISTS );
1149
1150 dev->trans_start = jiffies;
1151 return NETDEV_TX_OK;
1152
1153}
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178static irqreturn_t TLan_HandleInterrupt(int irq, void *dev_id)
1179{
1180 struct net_device *dev = dev_id;
1181 TLanPrivateInfo *priv = netdev_priv(dev);
1182 u16 host_int;
1183 u16 type;
1184
1185 spin_lock(&priv->lock);
1186
1187 host_int = inw( dev->base_addr + TLAN_HOST_INT );
1188 type = ( host_int & TLAN_HI_IT_MASK ) >> 2;
1189 if ( type ) {
1190 u32 ack;
1191 u32 host_cmd;
1192
1193 outw( host_int, dev->base_addr + TLAN_HOST_INT );
1194 ack = TLanIntVector[type]( dev, host_int );
1195
1196 if ( ack ) {
1197 host_cmd = TLAN_HC_ACK | ack | ( type << 18 );
1198 outl( host_cmd, dev->base_addr + TLAN_HOST_CMD );
1199 }
1200 }
1201
1202 spin_unlock(&priv->lock);
1203
1204 return IRQ_RETVAL(type);
1205}
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225static int TLan_Close(struct net_device *dev)
1226{
1227 TLanPrivateInfo *priv = netdev_priv(dev);
1228
1229 netif_stop_queue(dev);
1230 priv->neg_be_verbose = 0;
1231
1232 TLan_ReadAndClearStats( dev, TLAN_RECORD );
1233 outl( TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD );
1234 if ( priv->timer.function != NULL ) {
1235 del_timer_sync( &priv->timer );
1236 priv->timer.function = NULL;
1237 }
1238
1239 free_irq( dev->irq, dev );
1240 TLan_FreeLists( dev );
1241 TLAN_DBG( TLAN_DEBUG_GNRL, "Device %s closed.\n", dev->name );
1242
1243 return 0;
1244
1245}
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265static struct net_device_stats *TLan_GetStats( struct net_device *dev )
1266{
1267 TLanPrivateInfo *priv = netdev_priv(dev);
1268 int i;
1269
1270
1271 TLan_ReadAndClearStats( dev, TLAN_RECORD );
1272
1273 TLAN_DBG( TLAN_DEBUG_RX, "RECEIVE: %s EOC count = %d\n", dev->name,
1274 priv->rxEocCount );
1275 TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: %s Busy count = %d\n", dev->name,
1276 priv->txBusyCount );
1277 if ( debug & TLAN_DEBUG_GNRL ) {
1278 TLan_PrintDio( dev->base_addr );
1279 TLan_PhyPrint( dev );
1280 }
1281 if ( debug & TLAN_DEBUG_LIST ) {
1282 for ( i = 0; i < TLAN_NUM_RX_LISTS; i++ )
1283 TLan_PrintList( priv->rxList + i, "RX", i );
1284 for ( i = 0; i < TLAN_NUM_TX_LISTS; i++ )
1285 TLan_PrintList( priv->txList + i, "TX", i );
1286 }
1287
1288 return &dev->stats;
1289
1290}
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315static void TLan_SetMulticastList( struct net_device *dev )
1316{
1317 struct dev_mc_list *dmi = dev->mc_list;
1318 u32 hash1 = 0;
1319 u32 hash2 = 0;
1320 int i;
1321 u32 offset;
1322 u8 tmp;
1323
1324 if ( dev->flags & IFF_PROMISC ) {
1325 tmp = TLan_DioRead8( dev->base_addr, TLAN_NET_CMD );
1326 TLan_DioWrite8( dev->base_addr,
1327 TLAN_NET_CMD, tmp | TLAN_NET_CMD_CAF );
1328 } else {
1329 tmp = TLan_DioRead8( dev->base_addr, TLAN_NET_CMD );
1330 TLan_DioWrite8( dev->base_addr,
1331 TLAN_NET_CMD, tmp & ~TLAN_NET_CMD_CAF );
1332 if ( dev->flags & IFF_ALLMULTI ) {
1333 for ( i = 0; i < 3; i++ )
1334 TLan_SetMac( dev, i + 1, NULL );
1335 TLan_DioWrite32( dev->base_addr, TLAN_HASH_1, 0xFFFFFFFF );
1336 TLan_DioWrite32( dev->base_addr, TLAN_HASH_2, 0xFFFFFFFF );
1337 } else {
1338 for ( i = 0; i < dev->mc_count; i++ ) {
1339 if ( i < 3 ) {
1340 TLan_SetMac( dev, i + 1,
1341 (char *) &dmi->dmi_addr );
1342 } else {
1343 offset = TLan_HashFunc( (u8 *) &dmi->dmi_addr );
1344 if ( offset < 32 )
1345 hash1 |= ( 1 << offset );
1346 else
1347 hash2 |= ( 1 << ( offset - 32 ) );
1348 }
1349 dmi = dmi->next;
1350 }
1351 for ( ; i < 3; i++ )
1352 TLan_SetMac( dev, i + 1, NULL );
1353 TLan_DioWrite32( dev->base_addr, TLAN_HASH_1, hash1 );
1354 TLan_DioWrite32( dev->base_addr, TLAN_HASH_2, hash2 );
1355 }
1356 }
1357
1358}
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399static u32 TLan_HandleTxEOF( struct net_device *dev, u16 host_int )
1400{
1401 TLanPrivateInfo *priv = netdev_priv(dev);
1402 int eoc = 0;
1403 TLanList *head_list;
1404 dma_addr_t head_list_phys;
1405 u32 ack = 0;
1406 u16 tmpCStat;
1407
1408 TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: Handling TX EOF (Head=%d Tail=%d)\n",
1409 priv->txHead, priv->txTail );
1410 head_list = priv->txList + priv->txHead;
1411
1412 while (((tmpCStat = head_list->cStat ) & TLAN_CSTAT_FRM_CMP) && (ack < 255)) {
1413 struct sk_buff *skb = TLan_GetSKB(head_list);
1414
1415 ack++;
1416 pci_unmap_single(priv->pciDev, head_list->buffer[0].address,
1417 max(skb->len,
1418 (unsigned int)TLAN_MIN_FRAME_SIZE),
1419 PCI_DMA_TODEVICE);
1420 dev_kfree_skb_any(skb);
1421 head_list->buffer[8].address = 0;
1422 head_list->buffer[9].address = 0;
1423
1424 if ( tmpCStat & TLAN_CSTAT_EOC )
1425 eoc = 1;
1426
1427 dev->stats.tx_bytes += head_list->frameSize;
1428
1429 head_list->cStat = TLAN_CSTAT_UNUSED;
1430 netif_start_queue(dev);
1431 CIRC_INC( priv->txHead, TLAN_NUM_TX_LISTS );
1432 head_list = priv->txList + priv->txHead;
1433 }
1434
1435 if (!ack)
1436 printk(KERN_INFO "TLAN: Received interrupt for uncompleted TX frame.\n");
1437
1438 if ( eoc ) {
1439 TLAN_DBG( TLAN_DEBUG_TX,
1440 "TRANSMIT: Handling TX EOC (Head=%d Tail=%d)\n",
1441 priv->txHead, priv->txTail );
1442 head_list = priv->txList + priv->txHead;
1443 head_list_phys = priv->txListDMA + sizeof(TLanList) * priv->txHead;
1444 if ( ( head_list->cStat & TLAN_CSTAT_READY ) == TLAN_CSTAT_READY ) {
1445 outl(head_list_phys, dev->base_addr + TLAN_CH_PARM );
1446 ack |= TLAN_HC_GO;
1447 } else {
1448 priv->txInProgress = 0;
1449 }
1450 }
1451
1452 if ( priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED ) {
1453 TLan_DioWrite8( dev->base_addr,
1454 TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT );
1455 if ( priv->timer.function == NULL ) {
1456 priv->timer.function = &TLan_Timer;
1457 priv->timer.data = (unsigned long) dev;
1458 priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY;
1459 priv->timerSetAt = jiffies;
1460 priv->timerType = TLAN_TIMER_ACTIVITY;
1461 add_timer(&priv->timer);
1462 } else if ( priv->timerType == TLAN_TIMER_ACTIVITY ) {
1463 priv->timerSetAt = jiffies;
1464 }
1465 }
1466
1467 return ack;
1468
1469}
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491static u32 TLan_HandleStatOverflow( struct net_device *dev, u16 host_int )
1492{
1493 TLan_ReadAndClearStats( dev, TLAN_RECORD );
1494
1495 return 1;
1496
1497}
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527static u32 TLan_HandleRxEOF( struct net_device *dev, u16 host_int )
1528{
1529 TLanPrivateInfo *priv = netdev_priv(dev);
1530 u32 ack = 0;
1531 int eoc = 0;
1532 TLanList *head_list;
1533 struct sk_buff *skb;
1534 TLanList *tail_list;
1535 u16 tmpCStat;
1536 dma_addr_t head_list_phys;
1537
1538 TLAN_DBG( TLAN_DEBUG_RX, "RECEIVE: Handling RX EOF (Head=%d Tail=%d)\n",
1539 priv->rxHead, priv->rxTail );
1540 head_list = priv->rxList + priv->rxHead;
1541 head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead;
1542
1543 while (((tmpCStat = head_list->cStat) & TLAN_CSTAT_FRM_CMP) && (ack < 255)) {
1544 dma_addr_t frameDma = head_list->buffer[0].address;
1545 u32 frameSize = head_list->frameSize;
1546 struct sk_buff *new_skb;
1547
1548 ack++;
1549 if (tmpCStat & TLAN_CSTAT_EOC)
1550 eoc = 1;
1551
1552 new_skb = netdev_alloc_skb(dev, TLAN_MAX_FRAME_SIZE + 7 );
1553 if ( !new_skb )
1554 goto drop_and_reuse;
1555
1556 skb = TLan_GetSKB(head_list);
1557 pci_unmap_single(priv->pciDev, frameDma,
1558 TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE);
1559 skb_put( skb, frameSize );
1560
1561 dev->stats.rx_bytes += frameSize;
1562
1563 skb->protocol = eth_type_trans( skb, dev );
1564 netif_rx( skb );
1565
1566 skb_reserve( new_skb, NET_IP_ALIGN );
1567 head_list->buffer[0].address = pci_map_single(priv->pciDev,
1568 new_skb->data,
1569 TLAN_MAX_FRAME_SIZE,
1570 PCI_DMA_FROMDEVICE);
1571
1572 TLan_StoreSKB(head_list, new_skb);
1573drop_and_reuse:
1574 head_list->forward = 0;
1575 head_list->cStat = 0;
1576 tail_list = priv->rxList + priv->rxTail;
1577 tail_list->forward = head_list_phys;
1578
1579 CIRC_INC( priv->rxHead, TLAN_NUM_RX_LISTS );
1580 CIRC_INC( priv->rxTail, TLAN_NUM_RX_LISTS );
1581 head_list = priv->rxList + priv->rxHead;
1582 head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead;
1583 }
1584
1585 if (!ack)
1586 printk(KERN_INFO "TLAN: Received interrupt for uncompleted RX frame.\n");
1587
1588
1589 if ( eoc ) {
1590 TLAN_DBG( TLAN_DEBUG_RX,
1591 "RECEIVE: Handling RX EOC (Head=%d Tail=%d)\n",
1592 priv->rxHead, priv->rxTail );
1593 head_list = priv->rxList + priv->rxHead;
1594 head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead;
1595 outl(head_list_phys, dev->base_addr + TLAN_CH_PARM );
1596 ack |= TLAN_HC_GO | TLAN_HC_RT;
1597 priv->rxEocCount++;
1598 }
1599
1600 if ( priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED ) {
1601 TLan_DioWrite8( dev->base_addr,
1602 TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT );
1603 if ( priv->timer.function == NULL ) {
1604 priv->timer.function = &TLan_Timer;
1605 priv->timer.data = (unsigned long) dev;
1606 priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY;
1607 priv->timerSetAt = jiffies;
1608 priv->timerType = TLAN_TIMER_ACTIVITY;
1609 add_timer(&priv->timer);
1610 } else if ( priv->timerType == TLAN_TIMER_ACTIVITY ) {
1611 priv->timerSetAt = jiffies;
1612 }
1613 }
1614
1615 return ack;
1616
1617}
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639static u32 TLan_HandleDummy( struct net_device *dev, u16 host_int )
1640{
1641 printk( "TLAN: Test interrupt on %s.\n", dev->name );
1642 return 1;
1643
1644}
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669static u32 TLan_HandleTxEOC( struct net_device *dev, u16 host_int )
1670{
1671 TLanPrivateInfo *priv = netdev_priv(dev);
1672 TLanList *head_list;
1673 dma_addr_t head_list_phys;
1674 u32 ack = 1;
1675
1676 host_int = 0;
1677 if ( priv->tlanRev < 0x30 ) {
1678 TLAN_DBG( TLAN_DEBUG_TX,
1679 "TRANSMIT: Handling TX EOC (Head=%d Tail=%d) -- IRQ\n",
1680 priv->txHead, priv->txTail );
1681 head_list = priv->txList + priv->txHead;
1682 head_list_phys = priv->txListDMA + sizeof(TLanList) * priv->txHead;
1683 if ( ( head_list->cStat & TLAN_CSTAT_READY ) == TLAN_CSTAT_READY ) {
1684 netif_stop_queue(dev);
1685 outl( head_list_phys, dev->base_addr + TLAN_CH_PARM );
1686 ack |= TLAN_HC_GO;
1687 } else {
1688 priv->txInProgress = 0;
1689 }
1690 }
1691
1692 return ack;
1693
1694}
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719static u32 TLan_HandleStatusCheck( struct net_device *dev, u16 host_int )
1720{
1721 TLanPrivateInfo *priv = netdev_priv(dev);
1722 u32 ack;
1723 u32 error;
1724 u8 net_sts;
1725 u32 phy;
1726 u16 tlphy_ctl;
1727 u16 tlphy_sts;
1728
1729 ack = 1;
1730 if ( host_int & TLAN_HI_IV_MASK ) {
1731 netif_stop_queue( dev );
1732 error = inl( dev->base_addr + TLAN_CH_PARM );
1733 printk( "TLAN: %s: Adaptor Error = 0x%x\n", dev->name, error );
1734 TLan_ReadAndClearStats( dev, TLAN_RECORD );
1735 outl( TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD );
1736
1737 schedule_work(&priv->tlan_tqueue);
1738
1739 netif_wake_queue(dev);
1740 ack = 0;
1741 } else {
1742 TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Status Check\n", dev->name );
1743 phy = priv->phy[priv->phyNum];
1744
1745 net_sts = TLan_DioRead8( dev->base_addr, TLAN_NET_STS );
1746 if ( net_sts ) {
1747 TLan_DioWrite8( dev->base_addr, TLAN_NET_STS, net_sts );
1748 TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Net_Sts = %x\n",
1749 dev->name, (unsigned) net_sts );
1750 }
1751 if ( ( net_sts & TLAN_NET_STS_MIRQ ) && ( priv->phyNum == 0 ) ) {
1752 TLan_MiiReadReg( dev, phy, TLAN_TLPHY_STS, &tlphy_sts );
1753 TLan_MiiReadReg( dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl );
1754 if ( ! ( tlphy_sts & TLAN_TS_POLOK ) &&
1755 ! ( tlphy_ctl & TLAN_TC_SWAPOL ) ) {
1756 tlphy_ctl |= TLAN_TC_SWAPOL;
1757 TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tlphy_ctl);
1758 } else if ( ( tlphy_sts & TLAN_TS_POLOK )
1759 && ( tlphy_ctl & TLAN_TC_SWAPOL ) ) {
1760 tlphy_ctl &= ~TLAN_TC_SWAPOL;
1761 TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tlphy_ctl);
1762 }
1763
1764 if (debug) {
1765 TLan_PhyPrint( dev );
1766 }
1767 }
1768 }
1769
1770 return ack;
1771
1772}
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797static u32 TLan_HandleRxEOC( struct net_device *dev, u16 host_int )
1798{
1799 TLanPrivateInfo *priv = netdev_priv(dev);
1800 dma_addr_t head_list_phys;
1801 u32 ack = 1;
1802
1803 if ( priv->tlanRev < 0x30 ) {
1804 TLAN_DBG( TLAN_DEBUG_RX,
1805 "RECEIVE: Handling RX EOC (Head=%d Tail=%d) -- IRQ\n",
1806 priv->rxHead, priv->rxTail );
1807 head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead;
1808 outl( head_list_phys, dev->base_addr + TLAN_CH_PARM );
1809 ack |= TLAN_HC_GO | TLAN_HC_RT;
1810 priv->rxEocCount++;
1811 }
1812
1813 return ack;
1814
1815}
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859static void TLan_Timer( unsigned long data )
1860{
1861 struct net_device *dev = (struct net_device *) data;
1862 TLanPrivateInfo *priv = netdev_priv(dev);
1863 u32 elapsed;
1864 unsigned long flags = 0;
1865
1866 priv->timer.function = NULL;
1867
1868 switch ( priv->timerType ) {
1869#ifdef MONITOR
1870 case TLAN_TIMER_LINK_BEAT:
1871 TLan_PhyMonitor( dev );
1872 break;
1873#endif
1874 case TLAN_TIMER_PHY_PDOWN:
1875 TLan_PhyPowerDown( dev );
1876 break;
1877 case TLAN_TIMER_PHY_PUP:
1878 TLan_PhyPowerUp( dev );
1879 break;
1880 case TLAN_TIMER_PHY_RESET:
1881 TLan_PhyReset( dev );
1882 break;
1883 case TLAN_TIMER_PHY_START_LINK:
1884 TLan_PhyStartLink( dev );
1885 break;
1886 case TLAN_TIMER_PHY_FINISH_AN:
1887 TLan_PhyFinishAutoNeg( dev );
1888 break;
1889 case TLAN_TIMER_FINISH_RESET:
1890 TLan_FinishReset( dev );
1891 break;
1892 case TLAN_TIMER_ACTIVITY:
1893 spin_lock_irqsave(&priv->lock, flags);
1894 if ( priv->timer.function == NULL ) {
1895 elapsed = jiffies - priv->timerSetAt;
1896 if ( elapsed >= TLAN_TIMER_ACT_DELAY ) {
1897 TLan_DioWrite8( dev->base_addr,
1898 TLAN_LED_REG, TLAN_LED_LINK );
1899 } else {
1900 priv->timer.function = &TLan_Timer;
1901 priv->timer.expires = priv->timerSetAt
1902 + TLAN_TIMER_ACT_DELAY;
1903 spin_unlock_irqrestore(&priv->lock, flags);
1904 add_timer( &priv->timer );
1905 break;
1906 }
1907 }
1908 spin_unlock_irqrestore(&priv->lock, flags);
1909 break;
1910 default:
1911 break;
1912 }
1913
1914}
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942static void TLan_ResetLists( struct net_device *dev )
1943{
1944 TLanPrivateInfo *priv = netdev_priv(dev);
1945 int i;
1946 TLanList *list;
1947 dma_addr_t list_phys;
1948 struct sk_buff *skb;
1949
1950 priv->txHead = 0;
1951 priv->txTail = 0;
1952 for ( i = 0; i < TLAN_NUM_TX_LISTS; i++ ) {
1953 list = priv->txList + i;
1954 list->cStat = TLAN_CSTAT_UNUSED;
1955 list->buffer[0].address = 0;
1956 list->buffer[2].count = 0;
1957 list->buffer[2].address = 0;
1958 list->buffer[8].address = 0;
1959 list->buffer[9].address = 0;
1960 }
1961
1962 priv->rxHead = 0;
1963 priv->rxTail = TLAN_NUM_RX_LISTS - 1;
1964 for ( i = 0; i < TLAN_NUM_RX_LISTS; i++ ) {
1965 list = priv->rxList + i;
1966 list_phys = priv->rxListDMA + sizeof(TLanList) * i;
1967 list->cStat = TLAN_CSTAT_READY;
1968 list->frameSize = TLAN_MAX_FRAME_SIZE;
1969 list->buffer[0].count = TLAN_MAX_FRAME_SIZE | TLAN_LAST_BUFFER;
1970 skb = netdev_alloc_skb(dev, TLAN_MAX_FRAME_SIZE + 7 );
1971 if ( !skb ) {
1972 pr_err("TLAN: out of memory for received data.\n" );
1973 break;
1974 }
1975
1976 skb_reserve( skb, NET_IP_ALIGN );
1977 list->buffer[0].address = pci_map_single(priv->pciDev,
1978 skb->data,
1979 TLAN_MAX_FRAME_SIZE,
1980 PCI_DMA_FROMDEVICE);
1981 TLan_StoreSKB(list, skb);
1982 list->buffer[1].count = 0;
1983 list->buffer[1].address = 0;
1984 list->forward = list_phys + sizeof(TLanList);
1985 }
1986
1987
1988 while (i < TLAN_NUM_RX_LISTS) {
1989 TLan_StoreSKB(priv->rxList + i, NULL);
1990 ++i;
1991 }
1992 list->forward = 0;
1993
1994}
1995
1996
1997static void TLan_FreeLists( struct net_device *dev )
1998{
1999 TLanPrivateInfo *priv = netdev_priv(dev);
2000 int i;
2001 TLanList *list;
2002 struct sk_buff *skb;
2003
2004 for ( i = 0; i < TLAN_NUM_TX_LISTS; i++ ) {
2005 list = priv->txList + i;
2006 skb = TLan_GetSKB(list);
2007 if ( skb ) {
2008 pci_unmap_single(
2009 priv->pciDev,
2010 list->buffer[0].address,
2011 max(skb->len,
2012 (unsigned int)TLAN_MIN_FRAME_SIZE),
2013 PCI_DMA_TODEVICE);
2014 dev_kfree_skb_any( skb );
2015 list->buffer[8].address = 0;
2016 list->buffer[9].address = 0;
2017 }
2018 }
2019
2020 for ( i = 0; i < TLAN_NUM_RX_LISTS; i++ ) {
2021 list = priv->rxList + i;
2022 skb = TLan_GetSKB(list);
2023 if ( skb ) {
2024 pci_unmap_single(priv->pciDev,
2025 list->buffer[0].address,
2026 TLAN_MAX_FRAME_SIZE,
2027 PCI_DMA_FROMDEVICE);
2028 dev_kfree_skb_any( skb );
2029 list->buffer[8].address = 0;
2030 list->buffer[9].address = 0;
2031 }
2032 }
2033}
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052static void TLan_PrintDio( u16 io_base )
2053{
2054 u32 data0, data1;
2055 int i;
2056
2057 printk( "TLAN: Contents of internal registers for io base 0x%04hx.\n",
2058 io_base );
2059 printk( "TLAN: Off. +0 +4\n" );
2060 for ( i = 0; i < 0x4C; i+= 8 ) {
2061 data0 = TLan_DioRead32( io_base, i );
2062 data1 = TLan_DioRead32( io_base, i + 0x4 );
2063 printk( "TLAN: 0x%02x 0x%08x 0x%08x\n", i, data0, data1 );
2064 }
2065
2066}
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088static void TLan_PrintList( TLanList *list, char *type, int num)
2089{
2090 int i;
2091
2092 printk( "TLAN: %s List %d at %p\n", type, num, list );
2093 printk( "TLAN: Forward = 0x%08x\n", list->forward );
2094 printk( "TLAN: CSTAT = 0x%04hx\n", list->cStat );
2095 printk( "TLAN: Frame Size = 0x%04hx\n", list->frameSize );
2096
2097 for ( i = 0; i < 2; i++ ) {
2098 printk( "TLAN: Buffer[%d].count, addr = 0x%08x, 0x%08x\n",
2099 i, list->buffer[i].count, list->buffer[i].address );
2100 }
2101
2102}
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125static void TLan_ReadAndClearStats( struct net_device *dev, int record )
2126{
2127 u32 tx_good, tx_under;
2128 u32 rx_good, rx_over;
2129 u32 def_tx, crc, code;
2130 u32 multi_col, single_col;
2131 u32 excess_col, late_col, loss;
2132
2133 outw( TLAN_GOOD_TX_FRMS, dev->base_addr + TLAN_DIO_ADR );
2134 tx_good = inb( dev->base_addr + TLAN_DIO_DATA );
2135 tx_good += inb( dev->base_addr + TLAN_DIO_DATA + 1 ) << 8;
2136 tx_good += inb( dev->base_addr + TLAN_DIO_DATA + 2 ) << 16;
2137 tx_under = inb( dev->base_addr + TLAN_DIO_DATA + 3 );
2138
2139 outw( TLAN_GOOD_RX_FRMS, dev->base_addr + TLAN_DIO_ADR );
2140 rx_good = inb( dev->base_addr + TLAN_DIO_DATA );
2141 rx_good += inb( dev->base_addr + TLAN_DIO_DATA + 1 ) << 8;
2142 rx_good += inb( dev->base_addr + TLAN_DIO_DATA + 2 ) << 16;
2143 rx_over = inb( dev->base_addr + TLAN_DIO_DATA + 3 );
2144
2145 outw( TLAN_DEFERRED_TX, dev->base_addr + TLAN_DIO_ADR );
2146 def_tx = inb( dev->base_addr + TLAN_DIO_DATA );
2147 def_tx += inb( dev->base_addr + TLAN_DIO_DATA + 1 ) << 8;
2148 crc = inb( dev->base_addr + TLAN_DIO_DATA + 2 );
2149 code = inb( dev->base_addr + TLAN_DIO_DATA + 3 );
2150
2151 outw( TLAN_MULTICOL_FRMS, dev->base_addr + TLAN_DIO_ADR );
2152 multi_col = inb( dev->base_addr + TLAN_DIO_DATA );
2153 multi_col += inb( dev->base_addr + TLAN_DIO_DATA + 1 ) << 8;
2154 single_col = inb( dev->base_addr + TLAN_DIO_DATA + 2 );
2155 single_col += inb( dev->base_addr + TLAN_DIO_DATA + 3 ) << 8;
2156
2157 outw( TLAN_EXCESSCOL_FRMS, dev->base_addr + TLAN_DIO_ADR );
2158 excess_col = inb( dev->base_addr + TLAN_DIO_DATA );
2159 late_col = inb( dev->base_addr + TLAN_DIO_DATA + 1 );
2160 loss = inb( dev->base_addr + TLAN_DIO_DATA + 2 );
2161
2162 if ( record ) {
2163 dev->stats.rx_packets += rx_good;
2164 dev->stats.rx_errors += rx_over + crc + code;
2165 dev->stats.tx_packets += tx_good;
2166 dev->stats.tx_errors += tx_under + loss;
2167 dev->stats.collisions += multi_col + single_col + excess_col + late_col;
2168
2169 dev->stats.rx_over_errors += rx_over;
2170 dev->stats.rx_crc_errors += crc;
2171 dev->stats.rx_frame_errors += code;
2172
2173 dev->stats.tx_aborted_errors += tx_under;
2174 dev->stats.tx_carrier_errors += loss;
2175 }
2176
2177}
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199static void
2200TLan_ResetAdapter( struct net_device *dev )
2201{
2202 TLanPrivateInfo *priv = netdev_priv(dev);
2203 int i;
2204 u32 addr;
2205 u32 data;
2206 u8 data8;
2207
2208 priv->tlanFullDuplex = FALSE;
2209 priv->phyOnline=0;
2210 netif_carrier_off(dev);
2211
2212
2213
2214 data = inl(dev->base_addr + TLAN_HOST_CMD);
2215 data |= TLAN_HC_AD_RST;
2216 outl(data, dev->base_addr + TLAN_HOST_CMD);
2217
2218 udelay(1000);
2219
2220
2221
2222 data = inl(dev->base_addr + TLAN_HOST_CMD);
2223 data |= TLAN_HC_INT_OFF;
2224 outl(data, dev->base_addr + TLAN_HOST_CMD);
2225
2226
2227
2228 for ( i = TLAN_AREG_0; i <= TLAN_HASH_2; i += 4 ) {
2229 TLan_DioWrite32( dev->base_addr, (u16) i, 0 );
2230 }
2231
2232
2233
2234 data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | TLAN_NET_CFG_PHY_EN;
2235 TLan_DioWrite16( dev->base_addr, TLAN_NET_CONFIG, (u16) data );
2236
2237
2238
2239 outl( TLAN_HC_LD_TMR | 0x3f, dev->base_addr + TLAN_HOST_CMD );
2240 outl( TLAN_HC_LD_THR | 0x9, dev->base_addr + TLAN_HOST_CMD );
2241
2242
2243
2244 outw( TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR );
2245 addr = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
2246 TLan_SetBit( TLAN_NET_SIO_NMRST, addr );
2247
2248
2249
2250 if ( priv->tlanRev >= 0x30 ) {
2251 data8 = TLAN_ID_TX_EOC | TLAN_ID_RX_EOC;
2252 TLan_DioWrite8( dev->base_addr, TLAN_INT_DIS, data8 );
2253 }
2254 TLan_PhyDetect( dev );
2255 data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN;
2256
2257 if ( priv->adapter->flags & TLAN_ADAPTER_BIT_RATE_PHY ) {
2258 data |= TLAN_NET_CFG_BIT;
2259 if ( priv->aui == 1 ) {
2260 TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x0a );
2261 } else if ( priv->duplex == TLAN_DUPLEX_FULL ) {
2262 TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x00 );
2263 priv->tlanFullDuplex = TRUE;
2264 } else {
2265 TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x08 );
2266 }
2267 }
2268
2269 if ( priv->phyNum == 0 ) {
2270 data |= TLAN_NET_CFG_PHY_EN;
2271 }
2272 TLan_DioWrite16( dev->base_addr, TLAN_NET_CONFIG, (u16) data );
2273
2274 if ( priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY ) {
2275 TLan_FinishReset( dev );
2276 } else {
2277 TLan_PhyPowerDown( dev );
2278 }
2279
2280}
2281
2282
2283
2284
2285static void
2286TLan_FinishReset( struct net_device *dev )
2287{
2288 TLanPrivateInfo *priv = netdev_priv(dev);
2289 u8 data;
2290 u32 phy;
2291 u8 sio;
2292 u16 status;
2293 u16 partner;
2294 u16 tlphy_ctl;
2295 u16 tlphy_par;
2296 u16 tlphy_id1, tlphy_id2;
2297 int i;
2298
2299 phy = priv->phy[priv->phyNum];
2300
2301 data = TLAN_NET_CMD_NRESET | TLAN_NET_CMD_NWRAP;
2302 if ( priv->tlanFullDuplex ) {
2303 data |= TLAN_NET_CMD_DUPLEX;
2304 }
2305 TLan_DioWrite8( dev->base_addr, TLAN_NET_CMD, data );
2306 data = TLAN_NET_MASK_MASK4 | TLAN_NET_MASK_MASK5;
2307 if ( priv->phyNum == 0 ) {
2308 data |= TLAN_NET_MASK_MASK7;
2309 }
2310 TLan_DioWrite8( dev->base_addr, TLAN_NET_MASK, data );
2311 TLan_DioWrite16( dev->base_addr, TLAN_MAX_RX, ((1536)+7)&~7 );
2312 TLan_MiiReadReg( dev, phy, MII_GEN_ID_HI, &tlphy_id1 );
2313 TLan_MiiReadReg( dev, phy, MII_GEN_ID_LO, &tlphy_id2 );
2314
2315 if ( ( priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY ) ||
2316 ( priv->aui ) ) {
2317 status = MII_GS_LINK;
2318 printk( "TLAN: %s: Link forced.\n", dev->name );
2319 } else {
2320 TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status );
2321 udelay( 1000 );
2322 TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status );
2323 if ( (status & MII_GS_LINK) &&
2324
2325 (tlphy_id1 == NAT_SEM_ID1) &&
2326 (tlphy_id2 == NAT_SEM_ID2) ) {
2327 TLan_MiiReadReg( dev, phy, MII_AN_LPA, &partner );
2328 TLan_MiiReadReg( dev, phy, TLAN_TLPHY_PAR, &tlphy_par );
2329
2330 printk( "TLAN: %s: Link active with ", dev->name );
2331 if (!(tlphy_par & TLAN_PHY_AN_EN_STAT)) {
2332 printk( "forced 10%sMbps %s-Duplex\n",
2333 tlphy_par & TLAN_PHY_SPEED_100 ? "" : "0",
2334 tlphy_par & TLAN_PHY_DUPLEX_FULL ? "Full" : "Half");
2335 } else {
2336 printk( "AutoNegotiation enabled, at 10%sMbps %s-Duplex\n",
2337 tlphy_par & TLAN_PHY_SPEED_100 ? "" : "0",
2338 tlphy_par & TLAN_PHY_DUPLEX_FULL ? "Full" : "Half");
2339 printk("TLAN: Partner capability: ");
2340 for (i = 5; i <= 10; i++)
2341 if (partner & (1<<i))
2342 printk("%s",media[i-5]);
2343 printk("\n");
2344 }
2345
2346 TLan_DioWrite8( dev->base_addr, TLAN_LED_REG, TLAN_LED_LINK );
2347#ifdef MONITOR
2348
2349 priv->link = 1;
2350
2351 TLan_SetTimer( dev, (10*HZ), TLAN_TIMER_LINK_BEAT );
2352#endif
2353 } else if (status & MII_GS_LINK) {
2354 printk( "TLAN: %s: Link active\n", dev->name );
2355 TLan_DioWrite8( dev->base_addr, TLAN_LED_REG, TLAN_LED_LINK );
2356 }
2357 }
2358
2359 if ( priv->phyNum == 0 ) {
2360 TLan_MiiReadReg( dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl );
2361 tlphy_ctl |= TLAN_TC_INTEN;
2362 TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tlphy_ctl );
2363 sio = TLan_DioRead8( dev->base_addr, TLAN_NET_SIO );
2364 sio |= TLAN_NET_SIO_MINTEN;
2365 TLan_DioWrite8( dev->base_addr, TLAN_NET_SIO, sio );
2366 }
2367
2368 if ( status & MII_GS_LINK ) {
2369 TLan_SetMac( dev, 0, dev->dev_addr );
2370 priv->phyOnline = 1;
2371 outb( ( TLAN_HC_INT_ON >> 8 ), dev->base_addr + TLAN_HOST_CMD + 1 );
2372 if ( debug >= 1 && debug != TLAN_DEBUG_PROBE ) {
2373 outb( ( TLAN_HC_REQ_INT >> 8 ), dev->base_addr + TLAN_HOST_CMD + 1 );
2374 }
2375 outl( priv->rxListDMA, dev->base_addr + TLAN_CH_PARM );
2376 outl( TLAN_HC_GO | TLAN_HC_RT, dev->base_addr + TLAN_HOST_CMD );
2377 netif_carrier_on(dev);
2378 } else {
2379 printk( "TLAN: %s: Link inactive, will retry in 10 secs...\n",
2380 dev->name );
2381 TLan_SetTimer( dev, (10*HZ), TLAN_TIMER_FINISH_RESET );
2382 return;
2383 }
2384 TLan_SetMulticastList(dev);
2385
2386}
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412static void TLan_SetMac( struct net_device *dev, int areg, char *mac )
2413{
2414 int i;
2415
2416 areg *= 6;
2417
2418 if ( mac != NULL ) {
2419 for ( i = 0; i < 6; i++ )
2420 TLan_DioWrite8( dev->base_addr,
2421 TLAN_AREG_0 + areg + i, mac[i] );
2422 } else {
2423 for ( i = 0; i < 6; i++ )
2424 TLan_DioWrite8( dev->base_addr,
2425 TLAN_AREG_0 + areg + i, 0 );
2426 }
2427
2428}
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456static void TLan_PhyPrint( struct net_device *dev )
2457{
2458 TLanPrivateInfo *priv = netdev_priv(dev);
2459 u16 i, data0, data1, data2, data3, phy;
2460
2461 phy = priv->phy[priv->phyNum];
2462
2463 if ( priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY ) {
2464 printk( "TLAN: Device %s, Unmanaged PHY.\n", dev->name );
2465 } else if ( phy <= TLAN_PHY_MAX_ADDR ) {
2466 printk( "TLAN: Device %s, PHY 0x%02x.\n", dev->name, phy );
2467 printk( "TLAN: Off. +0 +1 +2 +3 \n" );
2468 for ( i = 0; i < 0x20; i+= 4 ) {
2469 printk( "TLAN: 0x%02x", i );
2470 TLan_MiiReadReg( dev, phy, i, &data0 );
2471 printk( " 0x%04hx", data0 );
2472 TLan_MiiReadReg( dev, phy, i + 1, &data1 );
2473 printk( " 0x%04hx", data1 );
2474 TLan_MiiReadReg( dev, phy, i + 2, &data2 );
2475 printk( " 0x%04hx", data2 );
2476 TLan_MiiReadReg( dev, phy, i + 3, &data3 );
2477 printk( " 0x%04hx\n", data3 );
2478 }
2479 } else {
2480 printk( "TLAN: Device %s, Invalid PHY.\n", dev->name );
2481 }
2482
2483}
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505static void TLan_PhyDetect( struct net_device *dev )
2506{
2507 TLanPrivateInfo *priv = netdev_priv(dev);
2508 u16 control;
2509 u16 hi;
2510 u16 lo;
2511 u32 phy;
2512
2513 if ( priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY ) {
2514 priv->phyNum = 0xFFFF;
2515 return;
2516 }
2517
2518 TLan_MiiReadReg( dev, TLAN_PHY_MAX_ADDR, MII_GEN_ID_HI, &hi );
2519
2520 if ( hi != 0xFFFF ) {
2521 priv->phy[0] = TLAN_PHY_MAX_ADDR;
2522 } else {
2523 priv->phy[0] = TLAN_PHY_NONE;
2524 }
2525
2526 priv->phy[1] = TLAN_PHY_NONE;
2527 for ( phy = 0; phy <= TLAN_PHY_MAX_ADDR; phy++ ) {
2528 TLan_MiiReadReg( dev, phy, MII_GEN_CTL, &control );
2529 TLan_MiiReadReg( dev, phy, MII_GEN_ID_HI, &hi );
2530 TLan_MiiReadReg( dev, phy, MII_GEN_ID_LO, &lo );
2531 if ( ( control != 0xFFFF ) ||
2532 ( hi != 0xFFFF ) || ( lo != 0xFFFF ) ) {
2533 TLAN_DBG( TLAN_DEBUG_GNRL,
2534 "PHY found at %02x %04x %04x %04x\n",
2535 phy, control, hi, lo );
2536 if ( ( priv->phy[1] == TLAN_PHY_NONE ) &&
2537 ( phy != TLAN_PHY_MAX_ADDR ) ) {
2538 priv->phy[1] = phy;
2539 }
2540 }
2541 }
2542
2543 if ( priv->phy[1] != TLAN_PHY_NONE ) {
2544 priv->phyNum = 1;
2545 } else if ( priv->phy[0] != TLAN_PHY_NONE ) {
2546 priv->phyNum = 0;
2547 } else {
2548 printk( "TLAN: Cannot initialize device, no PHY was found!\n" );
2549 }
2550
2551}
2552
2553
2554
2555
2556static void TLan_PhyPowerDown( struct net_device *dev )
2557{
2558 TLanPrivateInfo *priv = netdev_priv(dev);
2559 u16 value;
2560
2561 TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Powering down PHY(s).\n", dev->name );
2562 value = MII_GC_PDOWN | MII_GC_LOOPBK | MII_GC_ISOLATE;
2563 TLan_MiiSync( dev->base_addr );
2564 TLan_MiiWriteReg( dev, priv->phy[priv->phyNum], MII_GEN_CTL, value );
2565 if ( ( priv->phyNum == 0 ) &&
2566 ( priv->phy[1] != TLAN_PHY_NONE ) &&
2567 ( ! ( priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10 ) ) ) {
2568 TLan_MiiSync( dev->base_addr );
2569 TLan_MiiWriteReg( dev, priv->phy[1], MII_GEN_CTL, value );
2570 }
2571
2572
2573
2574
2575
2576 TLan_SetTimer( dev, (HZ/20), TLAN_TIMER_PHY_PUP );
2577
2578}
2579
2580
2581
2582
2583static void TLan_PhyPowerUp( struct net_device *dev )
2584{
2585 TLanPrivateInfo *priv = netdev_priv(dev);
2586 u16 value;
2587
2588 TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Powering up PHY.\n", dev->name );
2589 TLan_MiiSync( dev->base_addr );
2590 value = MII_GC_LOOPBK;
2591 TLan_MiiWriteReg( dev, priv->phy[priv->phyNum], MII_GEN_CTL, value );
2592 TLan_MiiSync(dev->base_addr);
2593
2594
2595
2596
2597 TLan_SetTimer( dev, (HZ/20), TLAN_TIMER_PHY_RESET );
2598
2599}
2600
2601
2602
2603
2604static void TLan_PhyReset( struct net_device *dev )
2605{
2606 TLanPrivateInfo *priv = netdev_priv(dev);
2607 u16 phy;
2608 u16 value;
2609
2610 phy = priv->phy[priv->phyNum];
2611
2612 TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Reseting PHY.\n", dev->name );
2613 TLan_MiiSync( dev->base_addr );
2614 value = MII_GC_LOOPBK | MII_GC_RESET;
2615 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, value );
2616 TLan_MiiReadReg( dev, phy, MII_GEN_CTL, &value );
2617 while ( value & MII_GC_RESET ) {
2618 TLan_MiiReadReg( dev, phy, MII_GEN_CTL, &value );
2619 }
2620
2621
2622
2623
2624
2625 TLan_SetTimer( dev, (HZ/20), TLAN_TIMER_PHY_START_LINK );
2626
2627}
2628
2629
2630
2631
2632static void TLan_PhyStartLink( struct net_device *dev )
2633{
2634 TLanPrivateInfo *priv = netdev_priv(dev);
2635 u16 ability;
2636 u16 control;
2637 u16 data;
2638 u16 phy;
2639 u16 status;
2640 u16 tctl;
2641
2642 phy = priv->phy[priv->phyNum];
2643 TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Trying to activate link.\n", dev->name );
2644 TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status );
2645 TLan_MiiReadReg( dev, phy, MII_GEN_STS, &ability );
2646
2647 if ( ( status & MII_GS_AUTONEG ) &&
2648 ( ! priv->aui ) ) {
2649 ability = status >> 11;
2650 if ( priv->speed == TLAN_SPEED_10 &&
2651 priv->duplex == TLAN_DUPLEX_HALF) {
2652 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x0000);
2653 } else if ( priv->speed == TLAN_SPEED_10 &&
2654 priv->duplex == TLAN_DUPLEX_FULL) {
2655 priv->tlanFullDuplex = TRUE;
2656 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x0100);
2657 } else if ( priv->speed == TLAN_SPEED_100 &&
2658 priv->duplex == TLAN_DUPLEX_HALF) {
2659 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x2000);
2660 } else if ( priv->speed == TLAN_SPEED_100 &&
2661 priv->duplex == TLAN_DUPLEX_FULL) {
2662 priv->tlanFullDuplex = TRUE;
2663 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x2100);
2664 } else {
2665
2666
2667 TLan_MiiWriteReg( dev, phy, MII_AN_ADV, (ability << 5) | 1);
2668
2669 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x1000 );
2670
2671 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x1200 );
2672
2673
2674
2675
2676
2677 printk( "TLAN: %s: Starting autonegotiation.\n", dev->name );
2678 TLan_SetTimer( dev, (2*HZ), TLAN_TIMER_PHY_FINISH_AN );
2679 return;
2680 }
2681
2682 }
2683
2684 if ( ( priv->aui ) && ( priv->phyNum != 0 ) ) {
2685 priv->phyNum = 0;
2686 data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | TLAN_NET_CFG_PHY_EN;
2687 TLan_DioWrite16( dev->base_addr, TLAN_NET_CONFIG, data );
2688 TLan_SetTimer( dev, (40*HZ/1000), TLAN_TIMER_PHY_PDOWN );
2689 return;
2690 } else if ( priv->phyNum == 0 ) {
2691 control = 0;
2692 TLan_MiiReadReg( dev, phy, TLAN_TLPHY_CTL, &tctl );
2693 if ( priv->aui ) {
2694 tctl |= TLAN_TC_AUISEL;
2695 } else {
2696 tctl &= ~TLAN_TC_AUISEL;
2697 if ( priv->duplex == TLAN_DUPLEX_FULL ) {
2698 control |= MII_GC_DUPLEX;
2699 priv->tlanFullDuplex = TRUE;
2700 }
2701 if ( priv->speed == TLAN_SPEED_100 ) {
2702 control |= MII_GC_SPEEDSEL;
2703 }
2704 }
2705 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, control );
2706 TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tctl );
2707 }
2708
2709
2710
2711
2712 TLan_SetTimer( dev, (4*HZ), TLAN_TIMER_FINISH_RESET );
2713
2714}
2715
2716
2717
2718
2719static void TLan_PhyFinishAutoNeg( struct net_device *dev )
2720{
2721 TLanPrivateInfo *priv = netdev_priv(dev);
2722 u16 an_adv;
2723 u16 an_lpa;
2724 u16 data;
2725 u16 mode;
2726 u16 phy;
2727 u16 status;
2728
2729 phy = priv->phy[priv->phyNum];
2730
2731 TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status );
2732 udelay( 1000 );
2733 TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status );
2734
2735 if ( ! ( status & MII_GS_AUTOCMPLT ) ) {
2736
2737
2738
2739 if (!priv->neg_be_verbose++) {
2740 pr_info("TLAN: Giving autonegotiation more time.\n");
2741 pr_info("TLAN: Please check that your adapter has\n");
2742 pr_info("TLAN: been properly connected to a HUB or Switch.\n");
2743 pr_info("TLAN: Trying to establish link in the background...\n");
2744 }
2745 TLan_SetTimer( dev, (8*HZ), TLAN_TIMER_PHY_FINISH_AN );
2746 return;
2747 }
2748
2749 printk( "TLAN: %s: Autonegotiation complete.\n", dev->name );
2750 TLan_MiiReadReg( dev, phy, MII_AN_ADV, &an_adv );
2751 TLan_MiiReadReg( dev, phy, MII_AN_LPA, &an_lpa );
2752 mode = an_adv & an_lpa & 0x03E0;
2753 if ( mode & 0x0100 ) {
2754 priv->tlanFullDuplex = TRUE;
2755 } else if ( ! ( mode & 0x0080 ) && ( mode & 0x0040 ) ) {
2756 priv->tlanFullDuplex = TRUE;
2757 }
2758
2759 if ( ( ! ( mode & 0x0180 ) ) &&
2760 ( priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10 ) &&
2761 ( priv->phyNum != 0 ) ) {
2762 priv->phyNum = 0;
2763 data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | TLAN_NET_CFG_PHY_EN;
2764 TLan_DioWrite16( dev->base_addr, TLAN_NET_CONFIG, data );
2765 TLan_SetTimer( dev, (400*HZ/1000), TLAN_TIMER_PHY_PDOWN );
2766 return;
2767 }
2768
2769 if ( priv->phyNum == 0 ) {
2770 if ( ( priv->duplex == TLAN_DUPLEX_FULL ) ||
2771 ( an_adv & an_lpa & 0x0040 ) ) {
2772 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL,
2773 MII_GC_AUTOENB | MII_GC_DUPLEX );
2774 pr_info("TLAN: Starting internal PHY with FULL-DUPLEX\n" );
2775 } else {
2776 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, MII_GC_AUTOENB );
2777 pr_info( "TLAN: Starting internal PHY with HALF-DUPLEX\n" );
2778 }
2779 }
2780
2781
2782
2783 TLan_SetTimer( dev, (HZ/10), TLAN_TIMER_FINISH_RESET );
2784
2785}
2786
2787#ifdef MONITOR
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807void TLan_PhyMonitor( struct net_device *dev )
2808{
2809 TLanPrivateInfo *priv = netdev_priv(dev);
2810 u16 phy;
2811 u16 phy_status;
2812
2813 phy = priv->phy[priv->phyNum];
2814
2815
2816 TLan_MiiReadReg( dev, phy, MII_GEN_STS, &phy_status );
2817
2818
2819 if (!(phy_status & MII_GS_LINK)) {
2820 if (priv->link) {
2821 priv->link = 0;
2822 printk(KERN_DEBUG "TLAN: %s has lost link\n", dev->name);
2823 netif_carrier_off(dev);
2824 TLan_SetTimer( dev, (2*HZ), TLAN_TIMER_LINK_BEAT );
2825 return;
2826 }
2827 }
2828
2829
2830 if ((phy_status & MII_GS_LINK) && !priv->link) {
2831 priv->link = 1;
2832 printk(KERN_DEBUG "TLAN: %s has reestablished link\n", dev->name);
2833 netif_carrier_on(dev);
2834 }
2835
2836
2837 TLan_SetTimer( dev, (2*HZ), TLAN_TIMER_LINK_BEAT );
2838}
2839
2840#endif
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875
2876
2877
2878
2879static int TLan_MiiReadReg( struct net_device *dev, u16 phy, u16 reg, u16 *val )
2880{
2881 u8 nack;
2882 u16 sio, tmp;
2883 u32 i;
2884 int err;
2885 int minten;
2886 TLanPrivateInfo *priv = netdev_priv(dev);
2887 unsigned long flags = 0;
2888
2889 err = FALSE;
2890 outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR);
2891 sio = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
2892
2893 if (!in_irq())
2894 spin_lock_irqsave(&priv->lock, flags);
2895
2896 TLan_MiiSync(dev->base_addr);
2897
2898 minten = TLan_GetBit( TLAN_NET_SIO_MINTEN, sio );
2899 if ( minten )
2900 TLan_ClearBit(TLAN_NET_SIO_MINTEN, sio);
2901
2902 TLan_MiiSendData( dev->base_addr, 0x1, 2 );
2903 TLan_MiiSendData( dev->base_addr, 0x2, 2 );
2904 TLan_MiiSendData( dev->base_addr, phy, 5 );
2905 TLan_MiiSendData( dev->base_addr, reg, 5 );
2906
2907
2908 TLan_ClearBit(TLAN_NET_SIO_MTXEN, sio);
2909
2910 TLan_ClearBit(TLAN_NET_SIO_MCLK, sio);
2911 TLan_SetBit(TLAN_NET_SIO_MCLK, sio);
2912 TLan_ClearBit(TLAN_NET_SIO_MCLK, sio);
2913
2914 nack = TLan_GetBit(TLAN_NET_SIO_MDATA, sio);
2915 TLan_SetBit(TLAN_NET_SIO_MCLK, sio);
2916 if (nack) {
2917 for (i = 0; i < 16; i++) {
2918 TLan_ClearBit(TLAN_NET_SIO_MCLK, sio);
2919 TLan_SetBit(TLAN_NET_SIO_MCLK, sio);
2920 }
2921 tmp = 0xffff;
2922 err = TRUE;
2923 } else {
2924 for (tmp = 0, i = 0x8000; i; i >>= 1) {
2925 TLan_ClearBit(TLAN_NET_SIO_MCLK, sio);
2926 if (TLan_GetBit(TLAN_NET_SIO_MDATA, sio))
2927 tmp |= i;
2928 TLan_SetBit(TLAN_NET_SIO_MCLK, sio);
2929 }
2930 }
2931
2932
2933 TLan_ClearBit(TLAN_NET_SIO_MCLK, sio);
2934 TLan_SetBit(TLAN_NET_SIO_MCLK, sio);
2935
2936 if ( minten )
2937 TLan_SetBit(TLAN_NET_SIO_MINTEN, sio);
2938
2939 *val = tmp;
2940
2941 if (!in_irq())
2942 spin_unlock_irqrestore(&priv->lock, flags);
2943
2944 return err;
2945
2946}
2947
2948
2949
2950
2951
2952
2953
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969static void TLan_MiiSendData( u16 base_port, u32 data, unsigned num_bits )
2970{
2971 u16 sio;
2972 u32 i;
2973
2974 if ( num_bits == 0 )
2975 return;
2976
2977 outw( TLAN_NET_SIO, base_port + TLAN_DIO_ADR );
2978 sio = base_port + TLAN_DIO_DATA + TLAN_NET_SIO;
2979 TLan_SetBit( TLAN_NET_SIO_MTXEN, sio );
2980
2981 for ( i = ( 0x1 << ( num_bits - 1 ) ); i; i >>= 1 ) {
2982 TLan_ClearBit( TLAN_NET_SIO_MCLK, sio );
2983 (void) TLan_GetBit( TLAN_NET_SIO_MCLK, sio );
2984 if ( data & i )
2985 TLan_SetBit( TLAN_NET_SIO_MDATA, sio );
2986 else
2987 TLan_ClearBit( TLAN_NET_SIO_MDATA, sio );
2988 TLan_SetBit( TLAN_NET_SIO_MCLK, sio );
2989 (void) TLan_GetBit( TLAN_NET_SIO_MCLK, sio );
2990 }
2991
2992}
2993
2994
2995
2996
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008
3009
3010
3011static void TLan_MiiSync( u16 base_port )
3012{
3013 int i;
3014 u16 sio;
3015
3016 outw( TLAN_NET_SIO, base_port + TLAN_DIO_ADR );
3017 sio = base_port + TLAN_DIO_DATA + TLAN_NET_SIO;
3018
3019 TLan_ClearBit( TLAN_NET_SIO_MTXEN, sio );
3020 for ( i = 0; i < 32; i++ ) {
3021 TLan_ClearBit( TLAN_NET_SIO_MCLK, sio );
3022 TLan_SetBit( TLAN_NET_SIO_MCLK, sio );
3023 }
3024
3025}
3026
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041
3042
3043
3044
3045
3046
3047
3048
3049
3050static void TLan_MiiWriteReg( struct net_device *dev, u16 phy, u16 reg, u16 val )
3051{
3052 u16 sio;
3053 int minten;
3054 unsigned long flags = 0;
3055 TLanPrivateInfo *priv = netdev_priv(dev);
3056
3057 outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR);
3058 sio = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
3059
3060 if (!in_irq())
3061 spin_lock_irqsave(&priv->lock, flags);
3062
3063 TLan_MiiSync( dev->base_addr );
3064
3065 minten = TLan_GetBit( TLAN_NET_SIO_MINTEN, sio );
3066 if ( minten )
3067 TLan_ClearBit( TLAN_NET_SIO_MINTEN, sio );
3068
3069 TLan_MiiSendData( dev->base_addr, 0x1, 2 );
3070 TLan_MiiSendData( dev->base_addr, 0x1, 2 );
3071 TLan_MiiSendData( dev->base_addr, phy, 5 );
3072 TLan_MiiSendData( dev->base_addr, reg, 5 );
3073
3074 TLan_MiiSendData( dev->base_addr, 0x2, 2 );
3075 TLan_MiiSendData( dev->base_addr, val, 16 );
3076
3077 TLan_ClearBit( TLAN_NET_SIO_MCLK, sio );
3078 TLan_SetBit( TLAN_NET_SIO_MCLK, sio );
3079
3080 if ( minten )
3081 TLan_SetBit( TLAN_NET_SIO_MINTEN, sio );
3082
3083 if (!in_irq())
3084 spin_unlock_irqrestore(&priv->lock, flags);
3085
3086}
3087
3088
3089
3090
3091
3092
3093
3094
3095
3096
3097
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110
3111
3112
3113
3114
3115
3116
3117
3118
3119
3120static void TLan_EeSendStart( u16 io_base )
3121{
3122 u16 sio;
3123
3124 outw( TLAN_NET_SIO, io_base + TLAN_DIO_ADR );
3125 sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
3126
3127 TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
3128 TLan_SetBit( TLAN_NET_SIO_EDATA, sio );
3129 TLan_SetBit( TLAN_NET_SIO_ETXEN, sio );
3130 TLan_ClearBit( TLAN_NET_SIO_EDATA, sio );
3131 TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio );
3132
3133}
3134
3135
3136
3137
3138
3139
3140
3141
3142
3143
3144
3145
3146
3147
3148
3149
3150
3151
3152
3153
3154
3155
3156
3157
3158
3159
3160static int TLan_EeSendByte( u16 io_base, u8 data, int stop )
3161{
3162 int err;
3163 u8 place;
3164 u16 sio;
3165
3166 outw( TLAN_NET_SIO, io_base + TLAN_DIO_ADR );
3167 sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
3168
3169
3170 for ( place = 0x80; place != 0; place >>= 1 ) {
3171 if ( place & data )
3172 TLan_SetBit( TLAN_NET_SIO_EDATA, sio );
3173 else
3174 TLan_ClearBit( TLAN_NET_SIO_EDATA, sio );
3175 TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
3176 TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio );
3177 }
3178 TLan_ClearBit( TLAN_NET_SIO_ETXEN, sio );
3179 TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
3180 err = TLan_GetBit( TLAN_NET_SIO_EDATA, sio );
3181 TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio );
3182 TLan_SetBit( TLAN_NET_SIO_ETXEN, sio );
3183
3184 if ( ( ! err ) && stop ) {
3185
3186 TLan_ClearBit( TLAN_NET_SIO_EDATA, sio );
3187 TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
3188 TLan_SetBit( TLAN_NET_SIO_EDATA, sio );
3189 }
3190
3191 return ( err );
3192
3193}
3194
3195
3196
3197
3198
3199
3200
3201
3202
3203
3204
3205
3206
3207
3208
3209
3210
3211
3212
3213
3214
3215
3216
3217
3218
3219
3220
3221
3222static void TLan_EeReceiveByte( u16 io_base, u8 *data, int stop )
3223{
3224 u8 place;
3225 u16 sio;
3226
3227 outw( TLAN_NET_SIO, io_base + TLAN_DIO_ADR );
3228 sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
3229 *data = 0;
3230
3231
3232 TLan_ClearBit( TLAN_NET_SIO_ETXEN, sio );
3233 for ( place = 0x80; place; place >>= 1 ) {
3234 TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
3235 if ( TLan_GetBit( TLAN_NET_SIO_EDATA, sio ) )
3236 *data |= place;
3237 TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio );
3238 }
3239
3240 TLan_SetBit( TLAN_NET_SIO_ETXEN, sio );
3241 if ( ! stop ) {
3242 TLan_ClearBit( TLAN_NET_SIO_EDATA, sio );
3243 TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
3244 TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio );
3245 } else {
3246 TLan_SetBit( TLAN_NET_SIO_EDATA, sio );
3247 TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
3248 TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio );
3249
3250 TLan_ClearBit( TLAN_NET_SIO_EDATA, sio );
3251 TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
3252 TLan_SetBit( TLAN_NET_SIO_EDATA, sio );
3253 }
3254
3255}
3256
3257
3258
3259
3260
3261
3262
3263
3264
3265
3266
3267
3268
3269
3270
3271
3272
3273
3274
3275
3276
3277
3278
3279
3280
3281static int TLan_EeReadByte( struct net_device *dev, u8 ee_addr, u8 *data )
3282{
3283 int err;
3284 TLanPrivateInfo *priv = netdev_priv(dev);
3285 unsigned long flags = 0;
3286 int ret=0;
3287
3288 spin_lock_irqsave(&priv->lock, flags);
3289
3290 TLan_EeSendStart( dev->base_addr );
3291 err = TLan_EeSendByte( dev->base_addr, 0xA0, TLAN_EEPROM_ACK );
3292 if (err)
3293 {
3294 ret=1;
3295 goto fail;
3296 }
3297 err = TLan_EeSendByte( dev->base_addr, ee_addr, TLAN_EEPROM_ACK );
3298 if (err)
3299 {
3300 ret=2;
3301 goto fail;
3302 }
3303 TLan_EeSendStart( dev->base_addr );
3304 err = TLan_EeSendByte( dev->base_addr, 0xA1, TLAN_EEPROM_ACK );
3305 if (err)
3306 {
3307 ret=3;
3308 goto fail;
3309 }
3310 TLan_EeReceiveByte( dev->base_addr, data, TLAN_EEPROM_STOP );
3311fail:
3312 spin_unlock_irqrestore(&priv->lock, flags);
3313
3314 return ret;
3315
3316}
3317
3318
3319
3320