1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31
32#include <linux/hardirq.h>
33#include <linux/module.h>
34#include <linux/init.h>
35#include <linux/interrupt.h>
36#include <linux/ioport.h>
37#include <linux/eisa.h>
38#include <linux/pci.h>
39#include <linux/dma-mapping.h>
40#include <linux/netdevice.h>
41#include <linux/etherdevice.h>
42#include <linux/delay.h>
43#include <linux/spinlock.h>
44#include <linux/workqueue.h>
45#include <linux/mii.h>
46
47#include "tlan.h"
48
49
50
51static struct net_device *tlan_eisa_devices;
52
53static int tlan_devices_installed;
54
55
56static int aui[MAX_TLAN_BOARDS];
57static int duplex[MAX_TLAN_BOARDS];
58static int speed[MAX_TLAN_BOARDS];
59static int boards_found;
60module_param_array(aui, int, NULL, 0);
61module_param_array(duplex, int, NULL, 0);
62module_param_array(speed, int, NULL, 0);
63MODULE_PARM_DESC(aui, "ThunderLAN use AUI port(s) (0-1)");
64MODULE_PARM_DESC(duplex,
65 "ThunderLAN duplex setting(s) (0-default, 1-half, 2-full)");
66MODULE_PARM_DESC(speed, "ThunderLAN port speed setting(s) (0,10,100)");
67
68MODULE_AUTHOR("Maintainer: Samuel Chessman <chessman@tux.org>");
69MODULE_DESCRIPTION("Driver for TI ThunderLAN based ethernet PCI adapters");
70MODULE_LICENSE("GPL");
71
72
73
74#undef MONITOR
75
76
77static int debug;
78module_param(debug, int, 0);
79MODULE_PARM_DESC(debug, "ThunderLAN debug mask");
80
81static const char tlan_signature[] = "TLAN";
82static const char tlan_banner[] = "ThunderLAN driver v1.17\n";
83static int tlan_have_pci;
84static int tlan_have_eisa;
85
86static const char * const media[] = {
87 "10BaseT-HD", "10BaseT-FD", "100baseTx-HD",
88 "100BaseTx-FD", "100BaseT4", NULL
89};
90
91static struct board {
92 const char *device_label;
93 u32 flags;
94 u16 addr_ofs;
95} board_info[] = {
96 { "Compaq Netelligent 10 T PCI UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
97 { "Compaq Netelligent 10/100 TX PCI UTP",
98 TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
99 { "Compaq Integrated NetFlex-3/P", TLAN_ADAPTER_NONE, 0x83 },
100 { "Compaq NetFlex-3/P",
101 TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 },
102 { "Compaq NetFlex-3/P", TLAN_ADAPTER_NONE, 0x83 },
103 { "Compaq Netelligent Integrated 10/100 TX UTP",
104 TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
105 { "Compaq Netelligent Dual 10/100 TX PCI UTP",
106 TLAN_ADAPTER_NONE, 0x83 },
107 { "Compaq Netelligent 10/100 TX Embedded UTP",
108 TLAN_ADAPTER_NONE, 0x83 },
109 { "Olicom OC-2183/2185", TLAN_ADAPTER_USE_INTERN_10, 0x83 },
110 { "Olicom OC-2325", TLAN_ADAPTER_UNMANAGED_PHY, 0xf8 },
111 { "Olicom OC-2326", TLAN_ADAPTER_USE_INTERN_10, 0xf8 },
112 { "Compaq Netelligent 10/100 TX UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
113 { "Compaq Netelligent 10 T/2 PCI UTP/coax", TLAN_ADAPTER_NONE, 0x83 },
114 { "Compaq NetFlex-3/E",
115 TLAN_ADAPTER_ACTIVITY_LED |
116 TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 },
117 { "Compaq NetFlex-3/E",
118 TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
119};
120
121static const struct pci_device_id tlan_pci_tbl[] = {
122 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL10,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
124 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100,
125 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
126 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETFLEX3I,
127 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
128 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_THUNDER,
129 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 },
130 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETFLEX3B,
131 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
132 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100PI,
133 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
134 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100D,
135 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
136 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100I,
137 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 7 },
138 { PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2183,
139 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
140 { PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2325,
141 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 9 },
142 { PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2326,
143 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 10 },
144 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_NETELLIGENT_10_100_WS_5100,
145 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 11 },
146 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_NETELLIGENT_10_T2,
147 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 12 },
148 { 0,}
149};
150MODULE_DEVICE_TABLE(pci, tlan_pci_tbl);
151
152static void tlan_eisa_probe(void);
153static void tlan_eisa_cleanup(void);
154static int tlan_init(struct net_device *);
155static int tlan_open(struct net_device *dev);
156static netdev_tx_t tlan_start_tx(struct sk_buff *, struct net_device *);
157static irqreturn_t tlan_handle_interrupt(int, void *);
158static int tlan_close(struct net_device *);
159static struct net_device_stats *tlan_get_stats(struct net_device *);
160static void tlan_set_multicast_list(struct net_device *);
161static int tlan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
162static int tlan_probe1(struct pci_dev *pdev, long ioaddr,
163 int irq, int rev, const struct pci_device_id *ent);
164static void tlan_tx_timeout(struct net_device *dev);
165static void tlan_tx_timeout_work(struct work_struct *work);
166static int tlan_init_one(struct pci_dev *pdev,
167 const struct pci_device_id *ent);
168
169static u32 tlan_handle_tx_eof(struct net_device *, u16);
170static u32 tlan_handle_stat_overflow(struct net_device *, u16);
171static u32 tlan_handle_rx_eof(struct net_device *, u16);
172static u32 tlan_handle_dummy(struct net_device *, u16);
173static u32 tlan_handle_tx_eoc(struct net_device *, u16);
174static u32 tlan_handle_status_check(struct net_device *, u16);
175static u32 tlan_handle_rx_eoc(struct net_device *, u16);
176
177static void tlan_timer(unsigned long);
178
179static void tlan_reset_lists(struct net_device *);
180static void tlan_free_lists(struct net_device *);
181static void tlan_print_dio(u16);
182static void tlan_print_list(struct tlan_list *, char *, int);
183static void tlan_read_and_clear_stats(struct net_device *, int);
184static void tlan_reset_adapter(struct net_device *);
185static void tlan_finish_reset(struct net_device *);
186static void tlan_set_mac(struct net_device *, int areg, char *mac);
187
188static void tlan_phy_print(struct net_device *);
189static void tlan_phy_detect(struct net_device *);
190static void tlan_phy_power_down(struct net_device *);
191static void tlan_phy_power_up(struct net_device *);
192static void tlan_phy_reset(struct net_device *);
193static void tlan_phy_start_link(struct net_device *);
194static void tlan_phy_finish_auto_neg(struct net_device *);
195#ifdef MONITOR
196static void tlan_phy_monitor(struct net_device *);
197#endif
198
199
200
201
202
203
204
205
206static bool tlan_mii_read_reg(struct net_device *, u16, u16, u16 *);
207static void tlan_mii_send_data(u16, u32, unsigned);
208static void tlan_mii_sync(u16);
209static void tlan_mii_write_reg(struct net_device *, u16, u16, u16);
210
211static void tlan_ee_send_start(u16);
212static int tlan_ee_send_byte(u16, u8, int);
213static void tlan_ee_receive_byte(u16, u8 *, int);
214static int tlan_ee_read_byte(struct net_device *, u8, u8 *);
215
216
217static inline void
218tlan_store_skb(struct tlan_list *tag, struct sk_buff *skb)
219{
220 unsigned long addr = (unsigned long)skb;
221 tag->buffer[9].address = addr;
222 tag->buffer[8].address = upper_32_bits(addr);
223}
224
225static inline struct sk_buff *
226tlan_get_skb(const struct tlan_list *tag)
227{
228 unsigned long addr;
229
230 addr = tag->buffer[9].address;
231 addr |= ((unsigned long) tag->buffer[8].address << 16) << 16;
232 return (struct sk_buff *) addr;
233}
234
235static u32
236(*tlan_int_vector[TLAN_INT_NUMBER_OF_INTS])(struct net_device *, u16) = {
237 NULL,
238 tlan_handle_tx_eof,
239 tlan_handle_stat_overflow,
240 tlan_handle_rx_eof,
241 tlan_handle_dummy,
242 tlan_handle_tx_eoc,
243 tlan_handle_status_check,
244 tlan_handle_rx_eoc
245};
246
247static inline void
248tlan_set_timer(struct net_device *dev, u32 ticks, u32 type)
249{
250 struct tlan_priv *priv = netdev_priv(dev);
251 unsigned long flags = 0;
252
253 if (!in_irq())
254 spin_lock_irqsave(&priv->lock, flags);
255 if (priv->timer.function != NULL &&
256 priv->timer_type != TLAN_TIMER_ACTIVITY) {
257 if (!in_irq())
258 spin_unlock_irqrestore(&priv->lock, flags);
259 return;
260 }
261 priv->timer.function = tlan_timer;
262 if (!in_irq())
263 spin_unlock_irqrestore(&priv->lock, flags);
264
265 priv->timer.data = (unsigned long) dev;
266 priv->timer_set_at = jiffies;
267 priv->timer_type = type;
268 mod_timer(&priv->timer, jiffies + ticks);
269
270}
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303static void tlan_remove_one(struct pci_dev *pdev)
304{
305 struct net_device *dev = pci_get_drvdata(pdev);
306 struct tlan_priv *priv = netdev_priv(dev);
307
308 unregister_netdev(dev);
309
310 if (priv->dma_storage) {
311 pci_free_consistent(priv->pci_dev,
312 priv->dma_size, priv->dma_storage,
313 priv->dma_storage_dma);
314 }
315
316#ifdef CONFIG_PCI
317 pci_release_regions(pdev);
318#endif
319
320 free_netdev(dev);
321
322 pci_set_drvdata(pdev, NULL);
323 cancel_work_sync(&priv->tlan_tqueue);
324}
325
326static void tlan_start(struct net_device *dev)
327{
328 tlan_reset_lists(dev);
329
330
331
332 tlan_read_and_clear_stats(dev, TLAN_IGNORE);
333 tlan_reset_adapter(dev);
334 netif_wake_queue(dev);
335}
336
337static void tlan_stop(struct net_device *dev)
338{
339 struct tlan_priv *priv = netdev_priv(dev);
340
341 tlan_read_and_clear_stats(dev, TLAN_RECORD);
342 outl(TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD);
343
344 tlan_reset_adapter(dev);
345 if (priv->timer.function != NULL) {
346 del_timer_sync(&priv->timer);
347 priv->timer.function = NULL;
348 }
349}
350
351#ifdef CONFIG_PM
352
353static int tlan_suspend(struct pci_dev *pdev, pm_message_t state)
354{
355 struct net_device *dev = pci_get_drvdata(pdev);
356
357 if (netif_running(dev))
358 tlan_stop(dev);
359
360 netif_device_detach(dev);
361 pci_save_state(pdev);
362 pci_disable_device(pdev);
363 pci_wake_from_d3(pdev, false);
364 pci_set_power_state(pdev, PCI_D3hot);
365
366 return 0;
367}
368
369static int tlan_resume(struct pci_dev *pdev)
370{
371 struct net_device *dev = pci_get_drvdata(pdev);
372
373 pci_set_power_state(pdev, PCI_D0);
374 pci_restore_state(pdev);
375 pci_enable_wake(pdev, 0, 0);
376 netif_device_attach(dev);
377
378 if (netif_running(dev))
379 tlan_start(dev);
380
381 return 0;
382}
383
384#else
385
386#define tlan_suspend NULL
387#define tlan_resume NULL
388
389#endif
390
391
392static struct pci_driver tlan_driver = {
393 .name = "tlan",
394 .id_table = tlan_pci_tbl,
395 .probe = tlan_init_one,
396 .remove = tlan_remove_one,
397 .suspend = tlan_suspend,
398 .resume = tlan_resume,
399};
400
401static int __init tlan_probe(void)
402{
403 int rc = -ENODEV;
404
405 pr_info("%s", tlan_banner);
406
407 TLAN_DBG(TLAN_DEBUG_PROBE, "Starting PCI Probe....\n");
408
409
410
411 rc = pci_register_driver(&tlan_driver);
412
413 if (rc != 0) {
414 pr_err("Could not register pci driver\n");
415 goto err_out_pci_free;
416 }
417
418 TLAN_DBG(TLAN_DEBUG_PROBE, "Starting EISA Probe....\n");
419 tlan_eisa_probe();
420
421 pr_info("%d device%s installed, PCI: %d EISA: %d\n",
422 tlan_devices_installed, tlan_devices_installed == 1 ? "" : "s",
423 tlan_have_pci, tlan_have_eisa);
424
425 if (tlan_devices_installed == 0) {
426 rc = -ENODEV;
427 goto err_out_pci_unreg;
428 }
429 return 0;
430
431err_out_pci_unreg:
432 pci_unregister_driver(&tlan_driver);
433err_out_pci_free:
434 return rc;
435}
436
437
438static int tlan_init_one(struct pci_dev *pdev,
439 const struct pci_device_id *ent)
440{
441 return tlan_probe1(pdev, -1, -1, 0, ent);
442}
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464static int tlan_probe1(struct pci_dev *pdev, long ioaddr, int irq, int rev,
465 const struct pci_device_id *ent)
466{
467
468 struct net_device *dev;
469 struct tlan_priv *priv;
470 u16 device_id;
471 int reg, rc = -ENODEV;
472
473#ifdef CONFIG_PCI
474 if (pdev) {
475 rc = pci_enable_device(pdev);
476 if (rc)
477 return rc;
478
479 rc = pci_request_regions(pdev, tlan_signature);
480 if (rc) {
481 pr_err("Could not reserve IO regions\n");
482 goto err_out;
483 }
484 }
485#endif
486
487 dev = alloc_etherdev(sizeof(struct tlan_priv));
488 if (dev == NULL) {
489 rc = -ENOMEM;
490 goto err_out_regions;
491 }
492 SET_NETDEV_DEV(dev, &pdev->dev);
493
494 priv = netdev_priv(dev);
495
496 priv->pci_dev = pdev;
497 priv->dev = dev;
498
499
500 if (pdev) {
501 u32 pci_io_base = 0;
502
503 priv->adapter = &board_info[ent->driver_data];
504
505 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
506 if (rc) {
507 pr_err("No suitable PCI mapping available\n");
508 goto err_out_free_dev;
509 }
510
511 for (reg = 0; reg <= 5; reg++) {
512 if (pci_resource_flags(pdev, reg) & IORESOURCE_IO) {
513 pci_io_base = pci_resource_start(pdev, reg);
514 TLAN_DBG(TLAN_DEBUG_GNRL,
515 "IO mapping is available at %x.\n",
516 pci_io_base);
517 break;
518 }
519 }
520 if (!pci_io_base) {
521 pr_err("No IO mappings available\n");
522 rc = -EIO;
523 goto err_out_free_dev;
524 }
525
526 dev->base_addr = pci_io_base;
527 dev->irq = pdev->irq;
528 priv->adapter_rev = pdev->revision;
529 pci_set_master(pdev);
530 pci_set_drvdata(pdev, dev);
531
532 } else {
533
534
535 device_id = inw(ioaddr + EISA_ID2);
536 priv->is_eisa = 1;
537 if (device_id == 0x20F1) {
538 priv->adapter = &board_info[13];
539 priv->adapter_rev = 23;
540 } else {
541 priv->adapter = &board_info[14];
542 priv->adapter_rev = 10;
543 }
544 dev->base_addr = ioaddr;
545 dev->irq = irq;
546 }
547
548
549 if (dev->mem_start) {
550 priv->aui = dev->mem_start & 0x01;
551 priv->duplex = ((dev->mem_start & 0x06) == 0x06) ? 0
552 : (dev->mem_start & 0x06) >> 1;
553 priv->speed = ((dev->mem_start & 0x18) == 0x18) ? 0
554 : (dev->mem_start & 0x18) >> 3;
555
556 if (priv->speed == 0x1)
557 priv->speed = TLAN_SPEED_10;
558 else if (priv->speed == 0x2)
559 priv->speed = TLAN_SPEED_100;
560
561 debug = priv->debug = dev->mem_end;
562 } else {
563 priv->aui = aui[boards_found];
564 priv->speed = speed[boards_found];
565 priv->duplex = duplex[boards_found];
566 priv->debug = debug;
567 }
568
569
570
571 INIT_WORK(&priv->tlan_tqueue, tlan_tx_timeout_work);
572
573 spin_lock_init(&priv->lock);
574
575 rc = tlan_init(dev);
576 if (rc) {
577 pr_err("Could not set up device\n");
578 goto err_out_free_dev;
579 }
580
581 rc = register_netdev(dev);
582 if (rc) {
583 pr_err("Could not register device\n");
584 goto err_out_uninit;
585 }
586
587
588 tlan_devices_installed++;
589 boards_found++;
590
591
592 if (pdev)
593 tlan_have_pci++;
594 else {
595 priv->next_device = tlan_eisa_devices;
596 tlan_eisa_devices = dev;
597 tlan_have_eisa++;
598 }
599
600 netdev_info(dev, "irq=%2d, io=%04x, %s, Rev. %d\n",
601 (int)dev->irq,
602 (int)dev->base_addr,
603 priv->adapter->device_label,
604 priv->adapter_rev);
605 return 0;
606
607err_out_uninit:
608 pci_free_consistent(priv->pci_dev, priv->dma_size, priv->dma_storage,
609 priv->dma_storage_dma);
610err_out_free_dev:
611 free_netdev(dev);
612err_out_regions:
613#ifdef CONFIG_PCI
614 if (pdev)
615 pci_release_regions(pdev);
616#endif
617err_out:
618 if (pdev)
619 pci_disable_device(pdev);
620 return rc;
621}
622
623
624static void tlan_eisa_cleanup(void)
625{
626 struct net_device *dev;
627 struct tlan_priv *priv;
628
629 while (tlan_have_eisa) {
630 dev = tlan_eisa_devices;
631 priv = netdev_priv(dev);
632 if (priv->dma_storage) {
633 pci_free_consistent(priv->pci_dev, priv->dma_size,
634 priv->dma_storage,
635 priv->dma_storage_dma);
636 }
637 release_region(dev->base_addr, 0x10);
638 unregister_netdev(dev);
639 tlan_eisa_devices = priv->next_device;
640 free_netdev(dev);
641 tlan_have_eisa--;
642 }
643}
644
645
646static void __exit tlan_exit(void)
647{
648 pci_unregister_driver(&tlan_driver);
649
650 if (tlan_have_eisa)
651 tlan_eisa_cleanup();
652
653}
654
655
656
657module_init(tlan_probe);
658module_exit(tlan_exit);
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675static void __init tlan_eisa_probe(void)
676{
677 long ioaddr;
678 int rc = -ENODEV;
679 int irq;
680 u16 device_id;
681
682 if (!EISA_bus) {
683 TLAN_DBG(TLAN_DEBUG_PROBE, "No EISA bus present\n");
684 return;
685 }
686
687
688 for (ioaddr = 0x1000; ioaddr < 0x9000; ioaddr += 0x1000) {
689
690 TLAN_DBG(TLAN_DEBUG_PROBE, "EISA_ID 0x%4x: 0x%4x\n",
691 (int) ioaddr + 0xc80, inw(ioaddr + EISA_ID));
692 TLAN_DBG(TLAN_DEBUG_PROBE, "EISA_ID 0x%4x: 0x%4x\n",
693 (int) ioaddr + 0xc82, inw(ioaddr + EISA_ID2));
694
695
696 TLAN_DBG(TLAN_DEBUG_PROBE,
697 "Probing for EISA adapter at IO: 0x%4x : ",
698 (int) ioaddr);
699 if (request_region(ioaddr, 0x10, tlan_signature) == NULL)
700 goto out;
701
702 if (inw(ioaddr + EISA_ID) != 0x110E) {
703 release_region(ioaddr, 0x10);
704 goto out;
705 }
706
707 device_id = inw(ioaddr + EISA_ID2);
708 if (device_id != 0x20F1 && device_id != 0x40F1) {
709 release_region(ioaddr, 0x10);
710 goto out;
711 }
712
713
714 if (inb(ioaddr + EISA_CR) != 0x1) {
715 release_region(ioaddr, 0x10);
716 goto out2;
717 }
718
719 if (debug == 0x10)
720 pr_info("Found one\n");
721
722
723
724 switch (inb(ioaddr + 0xcc0)) {
725 case(0x10):
726 irq = 5;
727 break;
728 case(0x20):
729 irq = 9;
730 break;
731 case(0x40):
732 irq = 10;
733 break;
734 case(0x80):
735 irq = 11;
736 break;
737 default:
738 goto out;
739 }
740
741
742
743 rc = tlan_probe1(NULL, ioaddr, irq,
744 12, NULL);
745 continue;
746
747out:
748 if (debug == 0x10)
749 pr_info("None found\n");
750 continue;
751
752out2:
753 if (debug == 0x10)
754 pr_info("Card found but it is not enabled, skipping\n");
755 continue;
756
757 }
758
759}
760
761#ifdef CONFIG_NET_POLL_CONTROLLER
762static void tlan_poll(struct net_device *dev)
763{
764 disable_irq(dev->irq);
765 tlan_handle_interrupt(dev->irq, dev);
766 enable_irq(dev->irq);
767}
768#endif
769
770static const struct net_device_ops tlan_netdev_ops = {
771 .ndo_open = tlan_open,
772 .ndo_stop = tlan_close,
773 .ndo_start_xmit = tlan_start_tx,
774 .ndo_tx_timeout = tlan_tx_timeout,
775 .ndo_get_stats = tlan_get_stats,
776 .ndo_set_rx_mode = tlan_set_multicast_list,
777 .ndo_do_ioctl = tlan_ioctl,
778 .ndo_change_mtu_rh74 = eth_change_mtu,
779 .ndo_set_mac_address = eth_mac_addr,
780 .ndo_validate_addr = eth_validate_addr,
781#ifdef CONFIG_NET_POLL_CONTROLLER
782 .ndo_poll_controller = tlan_poll,
783#endif
784};
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805static int tlan_init(struct net_device *dev)
806{
807 int dma_size;
808 int err;
809 int i;
810 struct tlan_priv *priv;
811
812 priv = netdev_priv(dev);
813
814 dma_size = (TLAN_NUM_RX_LISTS + TLAN_NUM_TX_LISTS)
815 * (sizeof(struct tlan_list));
816 priv->dma_storage = pci_alloc_consistent(priv->pci_dev,
817 dma_size,
818 &priv->dma_storage_dma);
819 priv->dma_size = dma_size;
820
821 if (priv->dma_storage == NULL) {
822 pr_err("Could not allocate lists and buffers for %s\n",
823 dev->name);
824 return -ENOMEM;
825 }
826 memset(priv->dma_storage, 0, dma_size);
827 priv->rx_list = (struct tlan_list *)
828 ALIGN((unsigned long)priv->dma_storage, 8);
829 priv->rx_list_dma = ALIGN(priv->dma_storage_dma, 8);
830 priv->tx_list = priv->rx_list + TLAN_NUM_RX_LISTS;
831 priv->tx_list_dma =
832 priv->rx_list_dma + sizeof(struct tlan_list)*TLAN_NUM_RX_LISTS;
833
834 err = 0;
835 for (i = 0; i < 6 ; i++)
836 err |= tlan_ee_read_byte(dev,
837 (u8) priv->adapter->addr_ofs + i,
838 (u8 *) &dev->dev_addr[i]);
839 if (err) {
840 pr_err("%s: Error reading MAC from eeprom: %d\n",
841 dev->name, err);
842 }
843 dev->addr_len = 6;
844
845 netif_carrier_off(dev);
846
847
848 dev->netdev_ops = &tlan_netdev_ops;
849 dev->watchdog_timeo = TX_TIMEOUT;
850
851 return 0;
852
853}
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875static int tlan_open(struct net_device *dev)
876{
877 struct tlan_priv *priv = netdev_priv(dev);
878 int err;
879
880 priv->tlan_rev = tlan_dio_read8(dev->base_addr, TLAN_DEF_REVISION);
881 err = request_irq(dev->irq, tlan_handle_interrupt, IRQF_SHARED,
882 dev->name, dev);
883
884 if (err) {
885 netdev_err(dev, "Cannot open because IRQ %d is already in use\n",
886 dev->irq);
887 return err;
888 }
889
890 init_timer(&priv->timer);
891
892 tlan_start(dev);
893
894 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Opened. TLAN Chip Rev: %x\n",
895 dev->name, priv->tlan_rev);
896
897 return 0;
898
899}
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918static int tlan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
919{
920 struct tlan_priv *priv = netdev_priv(dev);
921 struct mii_ioctl_data *data = if_mii(rq);
922 u32 phy = priv->phy[priv->phy_num];
923
924 if (!priv->phy_online)
925 return -EAGAIN;
926
927 switch (cmd) {
928 case SIOCGMIIPHY:
929 data->phy_id = phy;
930
931
932 case SIOCGMIIREG:
933 tlan_mii_read_reg(dev, data->phy_id & 0x1f,
934 data->reg_num & 0x1f, &data->val_out);
935 return 0;
936
937
938 case SIOCSMIIREG:
939 tlan_mii_write_reg(dev, data->phy_id & 0x1f,
940 data->reg_num & 0x1f, data->val_in);
941 return 0;
942 default:
943 return -EOPNOTSUPP;
944 }
945}
946
947
948
949
950
951
952
953
954
955
956
957
958
959static void tlan_tx_timeout(struct net_device *dev)
960{
961
962 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Transmit timed out.\n", dev->name);
963
964
965 tlan_free_lists(dev);
966 tlan_reset_lists(dev);
967 tlan_read_and_clear_stats(dev, TLAN_IGNORE);
968 tlan_reset_adapter(dev);
969 netif_trans_update(dev);
970 netif_wake_queue(dev);
971
972}
973
974
975
976
977
978
979
980
981
982
983
984
985static void tlan_tx_timeout_work(struct work_struct *work)
986{
987 struct tlan_priv *priv =
988 container_of(work, struct tlan_priv, tlan_tqueue);
989
990 tlan_tx_timeout(priv->dev);
991}
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016static netdev_tx_t tlan_start_tx(struct sk_buff *skb, struct net_device *dev)
1017{
1018 struct tlan_priv *priv = netdev_priv(dev);
1019 dma_addr_t tail_list_phys;
1020 struct tlan_list *tail_list;
1021 unsigned long flags;
1022 unsigned int txlen;
1023
1024 if (!priv->phy_online) {
1025 TLAN_DBG(TLAN_DEBUG_TX, "TRANSMIT: %s PHY is not ready\n",
1026 dev->name);
1027 dev_kfree_skb_any(skb);
1028 return NETDEV_TX_OK;
1029 }
1030
1031 if (skb_padto(skb, TLAN_MIN_FRAME_SIZE))
1032 return NETDEV_TX_OK;
1033 txlen = max(skb->len, (unsigned int)TLAN_MIN_FRAME_SIZE);
1034
1035 tail_list = priv->tx_list + priv->tx_tail;
1036 tail_list_phys =
1037 priv->tx_list_dma + sizeof(struct tlan_list)*priv->tx_tail;
1038
1039 if (tail_list->c_stat != TLAN_CSTAT_UNUSED) {
1040 TLAN_DBG(TLAN_DEBUG_TX,
1041 "TRANSMIT: %s is busy (Head=%d Tail=%d)\n",
1042 dev->name, priv->tx_head, priv->tx_tail);
1043 netif_stop_queue(dev);
1044 priv->tx_busy_count++;
1045 return NETDEV_TX_BUSY;
1046 }
1047
1048 tail_list->forward = 0;
1049
1050 tail_list->buffer[0].address = pci_map_single(priv->pci_dev,
1051 skb->data, txlen,
1052 PCI_DMA_TODEVICE);
1053 tlan_store_skb(tail_list, skb);
1054
1055 tail_list->frame_size = (u16) txlen;
1056 tail_list->buffer[0].count = TLAN_LAST_BUFFER | (u32) txlen;
1057 tail_list->buffer[1].count = 0;
1058 tail_list->buffer[1].address = 0;
1059
1060 spin_lock_irqsave(&priv->lock, flags);
1061 tail_list->c_stat = TLAN_CSTAT_READY;
1062 if (!priv->tx_in_progress) {
1063 priv->tx_in_progress = 1;
1064 TLAN_DBG(TLAN_DEBUG_TX,
1065 "TRANSMIT: Starting TX on buffer %d\n",
1066 priv->tx_tail);
1067 outl(tail_list_phys, dev->base_addr + TLAN_CH_PARM);
1068 outl(TLAN_HC_GO, dev->base_addr + TLAN_HOST_CMD);
1069 } else {
1070 TLAN_DBG(TLAN_DEBUG_TX,
1071 "TRANSMIT: Adding buffer %d to TX channel\n",
1072 priv->tx_tail);
1073 if (priv->tx_tail == 0) {
1074 (priv->tx_list + (TLAN_NUM_TX_LISTS - 1))->forward
1075 = tail_list_phys;
1076 } else {
1077 (priv->tx_list + (priv->tx_tail - 1))->forward
1078 = tail_list_phys;
1079 }
1080 }
1081 spin_unlock_irqrestore(&priv->lock, flags);
1082
1083 CIRC_INC(priv->tx_tail, TLAN_NUM_TX_LISTS);
1084
1085 return NETDEV_TX_OK;
1086
1087}
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112static irqreturn_t tlan_handle_interrupt(int irq, void *dev_id)
1113{
1114 struct net_device *dev = dev_id;
1115 struct tlan_priv *priv = netdev_priv(dev);
1116 u16 host_int;
1117 u16 type;
1118
1119 spin_lock(&priv->lock);
1120
1121 host_int = inw(dev->base_addr + TLAN_HOST_INT);
1122 type = (host_int & TLAN_HI_IT_MASK) >> 2;
1123 if (type) {
1124 u32 ack;
1125 u32 host_cmd;
1126
1127 outw(host_int, dev->base_addr + TLAN_HOST_INT);
1128 ack = tlan_int_vector[type](dev, host_int);
1129
1130 if (ack) {
1131 host_cmd = TLAN_HC_ACK | ack | (type << 18);
1132 outl(host_cmd, dev->base_addr + TLAN_HOST_CMD);
1133 }
1134 }
1135
1136 spin_unlock(&priv->lock);
1137
1138 return IRQ_RETVAL(type);
1139}
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159static int tlan_close(struct net_device *dev)
1160{
1161 struct tlan_priv *priv = netdev_priv(dev);
1162
1163 priv->neg_be_verbose = 0;
1164 tlan_stop(dev);
1165
1166 free_irq(dev->irq, dev);
1167 tlan_free_lists(dev);
1168 TLAN_DBG(TLAN_DEBUG_GNRL, "Device %s closed.\n", dev->name);
1169
1170 return 0;
1171
1172}
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192static struct net_device_stats *tlan_get_stats(struct net_device *dev)
1193{
1194 struct tlan_priv *priv = netdev_priv(dev);
1195 int i;
1196
1197
1198 tlan_read_and_clear_stats(dev, TLAN_RECORD);
1199
1200 TLAN_DBG(TLAN_DEBUG_RX, "RECEIVE: %s EOC count = %d\n", dev->name,
1201 priv->rx_eoc_count);
1202 TLAN_DBG(TLAN_DEBUG_TX, "TRANSMIT: %s Busy count = %d\n", dev->name,
1203 priv->tx_busy_count);
1204 if (debug & TLAN_DEBUG_GNRL) {
1205 tlan_print_dio(dev->base_addr);
1206 tlan_phy_print(dev);
1207 }
1208 if (debug & TLAN_DEBUG_LIST) {
1209 for (i = 0; i < TLAN_NUM_RX_LISTS; i++)
1210 tlan_print_list(priv->rx_list + i, "RX", i);
1211 for (i = 0; i < TLAN_NUM_TX_LISTS; i++)
1212 tlan_print_list(priv->tx_list + i, "TX", i);
1213 }
1214
1215 return &dev->stats;
1216
1217}
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242static void tlan_set_multicast_list(struct net_device *dev)
1243{
1244 struct netdev_hw_addr *ha;
1245 u32 hash1 = 0;
1246 u32 hash2 = 0;
1247 int i;
1248 u32 offset;
1249 u8 tmp;
1250
1251 if (dev->flags & IFF_PROMISC) {
1252 tmp = tlan_dio_read8(dev->base_addr, TLAN_NET_CMD);
1253 tlan_dio_write8(dev->base_addr,
1254 TLAN_NET_CMD, tmp | TLAN_NET_CMD_CAF);
1255 } else {
1256 tmp = tlan_dio_read8(dev->base_addr, TLAN_NET_CMD);
1257 tlan_dio_write8(dev->base_addr,
1258 TLAN_NET_CMD, tmp & ~TLAN_NET_CMD_CAF);
1259 if (dev->flags & IFF_ALLMULTI) {
1260 for (i = 0; i < 3; i++)
1261 tlan_set_mac(dev, i + 1, NULL);
1262 tlan_dio_write32(dev->base_addr, TLAN_HASH_1,
1263 0xffffffff);
1264 tlan_dio_write32(dev->base_addr, TLAN_HASH_2,
1265 0xffffffff);
1266 } else {
1267 i = 0;
1268 netdev_for_each_mc_addr(ha, dev) {
1269 if (i < 3) {
1270 tlan_set_mac(dev, i + 1,
1271 (char *) &ha->addr);
1272 } else {
1273 offset =
1274 tlan_hash_func((u8 *)&ha->addr);
1275 if (offset < 32)
1276 hash1 |= (1 << offset);
1277 else
1278 hash2 |= (1 << (offset - 32));
1279 }
1280 i++;
1281 }
1282 for ( ; i < 3; i++)
1283 tlan_set_mac(dev, i + 1, NULL);
1284 tlan_dio_write32(dev->base_addr, TLAN_HASH_1, hash1);
1285 tlan_dio_write32(dev->base_addr, TLAN_HASH_2, hash2);
1286 }
1287 }
1288
1289}
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330static u32 tlan_handle_tx_eof(struct net_device *dev, u16 host_int)
1331{
1332 struct tlan_priv *priv = netdev_priv(dev);
1333 int eoc = 0;
1334 struct tlan_list *head_list;
1335 dma_addr_t head_list_phys;
1336 u32 ack = 0;
1337 u16 tmp_c_stat;
1338
1339 TLAN_DBG(TLAN_DEBUG_TX,
1340 "TRANSMIT: Handling TX EOF (Head=%d Tail=%d)\n",
1341 priv->tx_head, priv->tx_tail);
1342 head_list = priv->tx_list + priv->tx_head;
1343
1344 while (((tmp_c_stat = head_list->c_stat) & TLAN_CSTAT_FRM_CMP)
1345 && (ack < 255)) {
1346 struct sk_buff *skb = tlan_get_skb(head_list);
1347
1348 ack++;
1349 pci_unmap_single(priv->pci_dev, head_list->buffer[0].address,
1350 max(skb->len,
1351 (unsigned int)TLAN_MIN_FRAME_SIZE),
1352 PCI_DMA_TODEVICE);
1353 dev_kfree_skb_any(skb);
1354 head_list->buffer[8].address = 0;
1355 head_list->buffer[9].address = 0;
1356
1357 if (tmp_c_stat & TLAN_CSTAT_EOC)
1358 eoc = 1;
1359
1360 dev->stats.tx_bytes += head_list->frame_size;
1361
1362 head_list->c_stat = TLAN_CSTAT_UNUSED;
1363 netif_start_queue(dev);
1364 CIRC_INC(priv->tx_head, TLAN_NUM_TX_LISTS);
1365 head_list = priv->tx_list + priv->tx_head;
1366 }
1367
1368 if (!ack)
1369 netdev_info(dev,
1370 "Received interrupt for uncompleted TX frame\n");
1371
1372 if (eoc) {
1373 TLAN_DBG(TLAN_DEBUG_TX,
1374 "TRANSMIT: handling TX EOC (Head=%d Tail=%d)\n",
1375 priv->tx_head, priv->tx_tail);
1376 head_list = priv->tx_list + priv->tx_head;
1377 head_list_phys = priv->tx_list_dma
1378 + sizeof(struct tlan_list)*priv->tx_head;
1379 if ((head_list->c_stat & TLAN_CSTAT_READY)
1380 == TLAN_CSTAT_READY) {
1381 outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
1382 ack |= TLAN_HC_GO;
1383 } else {
1384 priv->tx_in_progress = 0;
1385 }
1386 }
1387
1388 if (priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED) {
1389 tlan_dio_write8(dev->base_addr,
1390 TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT);
1391 if (priv->timer.function == NULL) {
1392 priv->timer.function = tlan_timer;
1393 priv->timer.data = (unsigned long) dev;
1394 priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY;
1395 priv->timer_set_at = jiffies;
1396 priv->timer_type = TLAN_TIMER_ACTIVITY;
1397 add_timer(&priv->timer);
1398 } else if (priv->timer_type == TLAN_TIMER_ACTIVITY) {
1399 priv->timer_set_at = jiffies;
1400 }
1401 }
1402
1403 return ack;
1404
1405}
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427static u32 tlan_handle_stat_overflow(struct net_device *dev, u16 host_int)
1428{
1429 tlan_read_and_clear_stats(dev, TLAN_RECORD);
1430
1431 return 1;
1432
1433}
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463static u32 tlan_handle_rx_eof(struct net_device *dev, u16 host_int)
1464{
1465 struct tlan_priv *priv = netdev_priv(dev);
1466 u32 ack = 0;
1467 int eoc = 0;
1468 struct tlan_list *head_list;
1469 struct sk_buff *skb;
1470 struct tlan_list *tail_list;
1471 u16 tmp_c_stat;
1472 dma_addr_t head_list_phys;
1473
1474 TLAN_DBG(TLAN_DEBUG_RX, "RECEIVE: handling RX EOF (Head=%d Tail=%d)\n",
1475 priv->rx_head, priv->rx_tail);
1476 head_list = priv->rx_list + priv->rx_head;
1477 head_list_phys =
1478 priv->rx_list_dma + sizeof(struct tlan_list)*priv->rx_head;
1479
1480 while (((tmp_c_stat = head_list->c_stat) & TLAN_CSTAT_FRM_CMP)
1481 && (ack < 255)) {
1482 dma_addr_t frame_dma = head_list->buffer[0].address;
1483 u32 frame_size = head_list->frame_size;
1484 struct sk_buff *new_skb;
1485
1486 ack++;
1487 if (tmp_c_stat & TLAN_CSTAT_EOC)
1488 eoc = 1;
1489
1490 new_skb = netdev_alloc_skb_ip_align(dev,
1491 TLAN_MAX_FRAME_SIZE + 5);
1492 if (!new_skb)
1493 goto drop_and_reuse;
1494
1495 skb = tlan_get_skb(head_list);
1496 pci_unmap_single(priv->pci_dev, frame_dma,
1497 TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE);
1498 skb_put(skb, frame_size);
1499
1500 dev->stats.rx_bytes += frame_size;
1501
1502 skb->protocol = eth_type_trans(skb, dev);
1503 netif_rx(skb);
1504
1505 head_list->buffer[0].address =
1506 pci_map_single(priv->pci_dev, new_skb->data,
1507 TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE);
1508
1509 tlan_store_skb(head_list, new_skb);
1510drop_and_reuse:
1511 head_list->forward = 0;
1512 head_list->c_stat = 0;
1513 tail_list = priv->rx_list + priv->rx_tail;
1514 tail_list->forward = head_list_phys;
1515
1516 CIRC_INC(priv->rx_head, TLAN_NUM_RX_LISTS);
1517 CIRC_INC(priv->rx_tail, TLAN_NUM_RX_LISTS);
1518 head_list = priv->rx_list + priv->rx_head;
1519 head_list_phys = priv->rx_list_dma
1520 + sizeof(struct tlan_list)*priv->rx_head;
1521 }
1522
1523 if (!ack)
1524 netdev_info(dev,
1525 "Received interrupt for uncompleted RX frame\n");
1526
1527
1528 if (eoc) {
1529 TLAN_DBG(TLAN_DEBUG_RX,
1530 "RECEIVE: handling RX EOC (Head=%d Tail=%d)\n",
1531 priv->rx_head, priv->rx_tail);
1532 head_list = priv->rx_list + priv->rx_head;
1533 head_list_phys = priv->rx_list_dma
1534 + sizeof(struct tlan_list)*priv->rx_head;
1535 outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
1536 ack |= TLAN_HC_GO | TLAN_HC_RT;
1537 priv->rx_eoc_count++;
1538 }
1539
1540 if (priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED) {
1541 tlan_dio_write8(dev->base_addr,
1542 TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT);
1543 if (priv->timer.function == NULL) {
1544 priv->timer.function = tlan_timer;
1545 priv->timer.data = (unsigned long) dev;
1546 priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY;
1547 priv->timer_set_at = jiffies;
1548 priv->timer_type = TLAN_TIMER_ACTIVITY;
1549 add_timer(&priv->timer);
1550 } else if (priv->timer_type == TLAN_TIMER_ACTIVITY) {
1551 priv->timer_set_at = jiffies;
1552 }
1553 }
1554
1555 return ack;
1556
1557}
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579static u32 tlan_handle_dummy(struct net_device *dev, u16 host_int)
1580{
1581 netdev_info(dev, "Test interrupt\n");
1582 return 1;
1583
1584}
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609static u32 tlan_handle_tx_eoc(struct net_device *dev, u16 host_int)
1610{
1611 struct tlan_priv *priv = netdev_priv(dev);
1612 struct tlan_list *head_list;
1613 dma_addr_t head_list_phys;
1614 u32 ack = 1;
1615
1616 host_int = 0;
1617 if (priv->tlan_rev < 0x30) {
1618 TLAN_DBG(TLAN_DEBUG_TX,
1619 "TRANSMIT: handling TX EOC (Head=%d Tail=%d) -- IRQ\n",
1620 priv->tx_head, priv->tx_tail);
1621 head_list = priv->tx_list + priv->tx_head;
1622 head_list_phys = priv->tx_list_dma
1623 + sizeof(struct tlan_list)*priv->tx_head;
1624 if ((head_list->c_stat & TLAN_CSTAT_READY)
1625 == TLAN_CSTAT_READY) {
1626 netif_stop_queue(dev);
1627 outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
1628 ack |= TLAN_HC_GO;
1629 } else {
1630 priv->tx_in_progress = 0;
1631 }
1632 }
1633
1634 return ack;
1635
1636}
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661static u32 tlan_handle_status_check(struct net_device *dev, u16 host_int)
1662{
1663 struct tlan_priv *priv = netdev_priv(dev);
1664 u32 ack;
1665 u32 error;
1666 u8 net_sts;
1667 u32 phy;
1668 u16 tlphy_ctl;
1669 u16 tlphy_sts;
1670
1671 ack = 1;
1672 if (host_int & TLAN_HI_IV_MASK) {
1673 netif_stop_queue(dev);
1674 error = inl(dev->base_addr + TLAN_CH_PARM);
1675 netdev_info(dev, "Adaptor Error = 0x%x\n", error);
1676 tlan_read_and_clear_stats(dev, TLAN_RECORD);
1677 outl(TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD);
1678
1679 schedule_work(&priv->tlan_tqueue);
1680
1681 netif_wake_queue(dev);
1682 ack = 0;
1683 } else {
1684 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Status Check\n", dev->name);
1685 phy = priv->phy[priv->phy_num];
1686
1687 net_sts = tlan_dio_read8(dev->base_addr, TLAN_NET_STS);
1688 if (net_sts) {
1689 tlan_dio_write8(dev->base_addr, TLAN_NET_STS, net_sts);
1690 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Net_Sts = %x\n",
1691 dev->name, (unsigned) net_sts);
1692 }
1693 if ((net_sts & TLAN_NET_STS_MIRQ) && (priv->phy_num == 0)) {
1694 tlan_mii_read_reg(dev, phy, TLAN_TLPHY_STS, &tlphy_sts);
1695 tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl);
1696 if (!(tlphy_sts & TLAN_TS_POLOK) &&
1697 !(tlphy_ctl & TLAN_TC_SWAPOL)) {
1698 tlphy_ctl |= TLAN_TC_SWAPOL;
1699 tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL,
1700 tlphy_ctl);
1701 } else if ((tlphy_sts & TLAN_TS_POLOK) &&
1702 (tlphy_ctl & TLAN_TC_SWAPOL)) {
1703 tlphy_ctl &= ~TLAN_TC_SWAPOL;
1704 tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL,
1705 tlphy_ctl);
1706 }
1707
1708 if (debug)
1709 tlan_phy_print(dev);
1710 }
1711 }
1712
1713 return ack;
1714
1715}
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740static u32 tlan_handle_rx_eoc(struct net_device *dev, u16 host_int)
1741{
1742 struct tlan_priv *priv = netdev_priv(dev);
1743 dma_addr_t head_list_phys;
1744 u32 ack = 1;
1745
1746 if (priv->tlan_rev < 0x30) {
1747 TLAN_DBG(TLAN_DEBUG_RX,
1748 "RECEIVE: Handling RX EOC (head=%d tail=%d) -- IRQ\n",
1749 priv->rx_head, priv->rx_tail);
1750 head_list_phys = priv->rx_list_dma
1751 + sizeof(struct tlan_list)*priv->rx_head;
1752 outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
1753 ack |= TLAN_HC_GO | TLAN_HC_RT;
1754 priv->rx_eoc_count++;
1755 }
1756
1757 return ack;
1758
1759}
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803static void tlan_timer(unsigned long data)
1804{
1805 struct net_device *dev = (struct net_device *) data;
1806 struct tlan_priv *priv = netdev_priv(dev);
1807 u32 elapsed;
1808 unsigned long flags = 0;
1809
1810 priv->timer.function = NULL;
1811
1812 switch (priv->timer_type) {
1813#ifdef MONITOR
1814 case TLAN_TIMER_LINK_BEAT:
1815 tlan_phy_monitor(dev);
1816 break;
1817#endif
1818 case TLAN_TIMER_PHY_PDOWN:
1819 tlan_phy_power_down(dev);
1820 break;
1821 case TLAN_TIMER_PHY_PUP:
1822 tlan_phy_power_up(dev);
1823 break;
1824 case TLAN_TIMER_PHY_RESET:
1825 tlan_phy_reset(dev);
1826 break;
1827 case TLAN_TIMER_PHY_START_LINK:
1828 tlan_phy_start_link(dev);
1829 break;
1830 case TLAN_TIMER_PHY_FINISH_AN:
1831 tlan_phy_finish_auto_neg(dev);
1832 break;
1833 case TLAN_TIMER_FINISH_RESET:
1834 tlan_finish_reset(dev);
1835 break;
1836 case TLAN_TIMER_ACTIVITY:
1837 spin_lock_irqsave(&priv->lock, flags);
1838 if (priv->timer.function == NULL) {
1839 elapsed = jiffies - priv->timer_set_at;
1840 if (elapsed >= TLAN_TIMER_ACT_DELAY) {
1841 tlan_dio_write8(dev->base_addr,
1842 TLAN_LED_REG, TLAN_LED_LINK);
1843 } else {
1844 priv->timer.function = tlan_timer;
1845 priv->timer.expires = priv->timer_set_at
1846 + TLAN_TIMER_ACT_DELAY;
1847 spin_unlock_irqrestore(&priv->lock, flags);
1848 add_timer(&priv->timer);
1849 break;
1850 }
1851 }
1852 spin_unlock_irqrestore(&priv->lock, flags);
1853 break;
1854 default:
1855 break;
1856 }
1857
1858}
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886static void tlan_reset_lists(struct net_device *dev)
1887{
1888 struct tlan_priv *priv = netdev_priv(dev);
1889 int i;
1890 struct tlan_list *list;
1891 dma_addr_t list_phys;
1892 struct sk_buff *skb;
1893
1894 priv->tx_head = 0;
1895 priv->tx_tail = 0;
1896 for (i = 0; i < TLAN_NUM_TX_LISTS; i++) {
1897 list = priv->tx_list + i;
1898 list->c_stat = TLAN_CSTAT_UNUSED;
1899 list->buffer[0].address = 0;
1900 list->buffer[2].count = 0;
1901 list->buffer[2].address = 0;
1902 list->buffer[8].address = 0;
1903 list->buffer[9].address = 0;
1904 }
1905
1906 priv->rx_head = 0;
1907 priv->rx_tail = TLAN_NUM_RX_LISTS - 1;
1908 for (i = 0; i < TLAN_NUM_RX_LISTS; i++) {
1909 list = priv->rx_list + i;
1910 list_phys = priv->rx_list_dma + sizeof(struct tlan_list)*i;
1911 list->c_stat = TLAN_CSTAT_READY;
1912 list->frame_size = TLAN_MAX_FRAME_SIZE;
1913 list->buffer[0].count = TLAN_MAX_FRAME_SIZE | TLAN_LAST_BUFFER;
1914 skb = netdev_alloc_skb_ip_align(dev, TLAN_MAX_FRAME_SIZE + 5);
1915 if (!skb)
1916 break;
1917
1918 list->buffer[0].address = pci_map_single(priv->pci_dev,
1919 skb->data,
1920 TLAN_MAX_FRAME_SIZE,
1921 PCI_DMA_FROMDEVICE);
1922 tlan_store_skb(list, skb);
1923 list->buffer[1].count = 0;
1924 list->buffer[1].address = 0;
1925 list->forward = list_phys + sizeof(struct tlan_list);
1926 }
1927
1928
1929 while (i < TLAN_NUM_RX_LISTS) {
1930 tlan_store_skb(priv->rx_list + i, NULL);
1931 ++i;
1932 }
1933 list->forward = 0;
1934
1935}
1936
1937
1938static void tlan_free_lists(struct net_device *dev)
1939{
1940 struct tlan_priv *priv = netdev_priv(dev);
1941 int i;
1942 struct tlan_list *list;
1943 struct sk_buff *skb;
1944
1945 for (i = 0; i < TLAN_NUM_TX_LISTS; i++) {
1946 list = priv->tx_list + i;
1947 skb = tlan_get_skb(list);
1948 if (skb) {
1949 pci_unmap_single(
1950 priv->pci_dev,
1951 list->buffer[0].address,
1952 max(skb->len,
1953 (unsigned int)TLAN_MIN_FRAME_SIZE),
1954 PCI_DMA_TODEVICE);
1955 dev_kfree_skb_any(skb);
1956 list->buffer[8].address = 0;
1957 list->buffer[9].address = 0;
1958 }
1959 }
1960
1961 for (i = 0; i < TLAN_NUM_RX_LISTS; i++) {
1962 list = priv->rx_list + i;
1963 skb = tlan_get_skb(list);
1964 if (skb) {
1965 pci_unmap_single(priv->pci_dev,
1966 list->buffer[0].address,
1967 TLAN_MAX_FRAME_SIZE,
1968 PCI_DMA_FROMDEVICE);
1969 dev_kfree_skb_any(skb);
1970 list->buffer[8].address = 0;
1971 list->buffer[9].address = 0;
1972 }
1973 }
1974}
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993static void tlan_print_dio(u16 io_base)
1994{
1995 u32 data0, data1;
1996 int i;
1997
1998 pr_info("Contents of internal registers for io base 0x%04hx\n",
1999 io_base);
2000 pr_info("Off. +0 +4\n");
2001 for (i = 0; i < 0x4C; i += 8) {
2002 data0 = tlan_dio_read32(io_base, i);
2003 data1 = tlan_dio_read32(io_base, i + 0x4);
2004 pr_info("0x%02x 0x%08x 0x%08x\n", i, data0, data1);
2005 }
2006
2007}
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029static void tlan_print_list(struct tlan_list *list, char *type, int num)
2030{
2031 int i;
2032
2033 pr_info("%s List %d at %p\n", type, num, list);
2034 pr_info(" Forward = 0x%08x\n", list->forward);
2035 pr_info(" CSTAT = 0x%04hx\n", list->c_stat);
2036 pr_info(" Frame Size = 0x%04hx\n", list->frame_size);
2037
2038 for (i = 0; i < 2; i++) {
2039 pr_info(" Buffer[%d].count, addr = 0x%08x, 0x%08x\n",
2040 i, list->buffer[i].count, list->buffer[i].address);
2041 }
2042
2043}
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066static void tlan_read_and_clear_stats(struct net_device *dev, int record)
2067{
2068 u32 tx_good, tx_under;
2069 u32 rx_good, rx_over;
2070 u32 def_tx, crc, code;
2071 u32 multi_col, single_col;
2072 u32 excess_col, late_col, loss;
2073
2074 outw(TLAN_GOOD_TX_FRMS, dev->base_addr + TLAN_DIO_ADR);
2075 tx_good = inb(dev->base_addr + TLAN_DIO_DATA);
2076 tx_good += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
2077 tx_good += inb(dev->base_addr + TLAN_DIO_DATA + 2) << 16;
2078 tx_under = inb(dev->base_addr + TLAN_DIO_DATA + 3);
2079
2080 outw(TLAN_GOOD_RX_FRMS, dev->base_addr + TLAN_DIO_ADR);
2081 rx_good = inb(dev->base_addr + TLAN_DIO_DATA);
2082 rx_good += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
2083 rx_good += inb(dev->base_addr + TLAN_DIO_DATA + 2) << 16;
2084 rx_over = inb(dev->base_addr + TLAN_DIO_DATA + 3);
2085
2086 outw(TLAN_DEFERRED_TX, dev->base_addr + TLAN_DIO_ADR);
2087 def_tx = inb(dev->base_addr + TLAN_DIO_DATA);
2088 def_tx += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
2089 crc = inb(dev->base_addr + TLAN_DIO_DATA + 2);
2090 code = inb(dev->base_addr + TLAN_DIO_DATA + 3);
2091
2092 outw(TLAN_MULTICOL_FRMS, dev->base_addr + TLAN_DIO_ADR);
2093 multi_col = inb(dev->base_addr + TLAN_DIO_DATA);
2094 multi_col += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
2095 single_col = inb(dev->base_addr + TLAN_DIO_DATA + 2);
2096 single_col += inb(dev->base_addr + TLAN_DIO_DATA + 3) << 8;
2097
2098 outw(TLAN_EXCESSCOL_FRMS, dev->base_addr + TLAN_DIO_ADR);
2099 excess_col = inb(dev->base_addr + TLAN_DIO_DATA);
2100 late_col = inb(dev->base_addr + TLAN_DIO_DATA + 1);
2101 loss = inb(dev->base_addr + TLAN_DIO_DATA + 2);
2102
2103 if (record) {
2104 dev->stats.rx_packets += rx_good;
2105 dev->stats.rx_errors += rx_over + crc + code;
2106 dev->stats.tx_packets += tx_good;
2107 dev->stats.tx_errors += tx_under + loss;
2108 dev->stats.collisions += multi_col
2109 + single_col + excess_col + late_col;
2110
2111 dev->stats.rx_over_errors += rx_over;
2112 dev->stats.rx_crc_errors += crc;
2113 dev->stats.rx_frame_errors += code;
2114
2115 dev->stats.tx_aborted_errors += tx_under;
2116 dev->stats.tx_carrier_errors += loss;
2117 }
2118
2119}
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141static void
2142tlan_reset_adapter(struct net_device *dev)
2143{
2144 struct tlan_priv *priv = netdev_priv(dev);
2145 int i;
2146 u32 addr;
2147 u32 data;
2148 u8 data8;
2149
2150 priv->tlan_full_duplex = false;
2151 priv->phy_online = 0;
2152 netif_carrier_off(dev);
2153
2154
2155
2156 data = inl(dev->base_addr + TLAN_HOST_CMD);
2157 data |= TLAN_HC_AD_RST;
2158 outl(data, dev->base_addr + TLAN_HOST_CMD);
2159
2160 udelay(1000);
2161
2162
2163
2164 data = inl(dev->base_addr + TLAN_HOST_CMD);
2165 data |= TLAN_HC_INT_OFF;
2166 outl(data, dev->base_addr + TLAN_HOST_CMD);
2167
2168
2169
2170 for (i = TLAN_AREG_0; i <= TLAN_HASH_2; i += 4)
2171 tlan_dio_write32(dev->base_addr, (u16) i, 0);
2172
2173
2174
2175 data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | TLAN_NET_CFG_PHY_EN;
2176 tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, (u16) data);
2177
2178
2179
2180 outl(TLAN_HC_LD_TMR | 0x3f, dev->base_addr + TLAN_HOST_CMD);
2181 outl(TLAN_HC_LD_THR | 0x9, dev->base_addr + TLAN_HOST_CMD);
2182
2183
2184
2185 outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR);
2186 addr = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
2187 tlan_set_bit(TLAN_NET_SIO_NMRST, addr);
2188
2189
2190
2191 if (priv->tlan_rev >= 0x30) {
2192 data8 = TLAN_ID_TX_EOC | TLAN_ID_RX_EOC;
2193 tlan_dio_write8(dev->base_addr, TLAN_INT_DIS, data8);
2194 }
2195 tlan_phy_detect(dev);
2196 data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN;
2197
2198 if (priv->adapter->flags & TLAN_ADAPTER_BIT_RATE_PHY) {
2199 data |= TLAN_NET_CFG_BIT;
2200 if (priv->aui == 1) {
2201 tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x0a);
2202 } else if (priv->duplex == TLAN_DUPLEX_FULL) {
2203 tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x00);
2204 priv->tlan_full_duplex = true;
2205 } else {
2206 tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x08);
2207 }
2208 }
2209
2210 if (priv->phy_num == 0)
2211 data |= TLAN_NET_CFG_PHY_EN;
2212 tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, (u16) data);
2213
2214 if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY)
2215 tlan_finish_reset(dev);
2216 else
2217 tlan_phy_power_down(dev);
2218
2219}
2220
2221
2222
2223
2224static void
2225tlan_finish_reset(struct net_device *dev)
2226{
2227 struct tlan_priv *priv = netdev_priv(dev);
2228 u8 data;
2229 u32 phy;
2230 u8 sio;
2231 u16 status;
2232 u16 partner;
2233 u16 tlphy_ctl;
2234 u16 tlphy_par;
2235 u16 tlphy_id1, tlphy_id2;
2236 int i;
2237
2238 phy = priv->phy[priv->phy_num];
2239
2240 data = TLAN_NET_CMD_NRESET | TLAN_NET_CMD_NWRAP;
2241 if (priv->tlan_full_duplex)
2242 data |= TLAN_NET_CMD_DUPLEX;
2243 tlan_dio_write8(dev->base_addr, TLAN_NET_CMD, data);
2244 data = TLAN_NET_MASK_MASK4 | TLAN_NET_MASK_MASK5;
2245 if (priv->phy_num == 0)
2246 data |= TLAN_NET_MASK_MASK7;
2247 tlan_dio_write8(dev->base_addr, TLAN_NET_MASK, data);
2248 tlan_dio_write16(dev->base_addr, TLAN_MAX_RX, ((1536)+7)&~7);
2249 tlan_mii_read_reg(dev, phy, MII_GEN_ID_HI, &tlphy_id1);
2250 tlan_mii_read_reg(dev, phy, MII_GEN_ID_LO, &tlphy_id2);
2251
2252 if ((priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) ||
2253 (priv->aui)) {
2254 status = MII_GS_LINK;
2255 netdev_info(dev, "Link forced\n");
2256 } else {
2257 tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
2258 udelay(1000);
2259 tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
2260 if ((status & MII_GS_LINK) &&
2261
2262 (tlphy_id1 == NAT_SEM_ID1) &&
2263 (tlphy_id2 == NAT_SEM_ID2)) {
2264 tlan_mii_read_reg(dev, phy, MII_AN_LPA, &partner);
2265 tlan_mii_read_reg(dev, phy, TLAN_TLPHY_PAR, &tlphy_par);
2266
2267 netdev_info(dev,
2268 "Link active with %s %uMbps %s-Duplex\n",
2269 !(tlphy_par & TLAN_PHY_AN_EN_STAT)
2270 ? "forced" : "Autonegotiation enabled,",
2271 tlphy_par & TLAN_PHY_SPEED_100
2272 ? 100 : 10,
2273 tlphy_par & TLAN_PHY_DUPLEX_FULL
2274 ? "Full" : "Half");
2275
2276 if (tlphy_par & TLAN_PHY_AN_EN_STAT) {
2277 netdev_info(dev, "Partner capability:");
2278 for (i = 5; i < 10; i++)
2279 if (partner & (1 << i))
2280 pr_cont(" %s", media[i-5]);
2281 pr_cont("\n");
2282 }
2283
2284 tlan_dio_write8(dev->base_addr, TLAN_LED_REG,
2285 TLAN_LED_LINK);
2286#ifdef MONITOR
2287
2288 priv->link = 1;
2289
2290 tlan_set_timer(dev, (10*HZ), TLAN_TIMER_LINK_BEAT);
2291#endif
2292 } else if (status & MII_GS_LINK) {
2293 netdev_info(dev, "Link active\n");
2294 tlan_dio_write8(dev->base_addr, TLAN_LED_REG,
2295 TLAN_LED_LINK);
2296 }
2297 }
2298
2299 if (priv->phy_num == 0) {
2300 tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl);
2301 tlphy_ctl |= TLAN_TC_INTEN;
2302 tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL, tlphy_ctl);
2303 sio = tlan_dio_read8(dev->base_addr, TLAN_NET_SIO);
2304 sio |= TLAN_NET_SIO_MINTEN;
2305 tlan_dio_write8(dev->base_addr, TLAN_NET_SIO, sio);
2306 }
2307
2308 if (status & MII_GS_LINK) {
2309 tlan_set_mac(dev, 0, dev->dev_addr);
2310 priv->phy_online = 1;
2311 outb((TLAN_HC_INT_ON >> 8), dev->base_addr + TLAN_HOST_CMD + 1);
2312 if (debug >= 1 && debug != TLAN_DEBUG_PROBE)
2313 outb((TLAN_HC_REQ_INT >> 8),
2314 dev->base_addr + TLAN_HOST_CMD + 1);
2315 outl(priv->rx_list_dma, dev->base_addr + TLAN_CH_PARM);
2316 outl(TLAN_HC_GO | TLAN_HC_RT, dev->base_addr + TLAN_HOST_CMD);
2317 netif_carrier_on(dev);
2318 } else {
2319 netdev_info(dev, "Link inactive, will retry in 10 secs...\n");
2320 tlan_set_timer(dev, (10*HZ), TLAN_TIMER_FINISH_RESET);
2321 return;
2322 }
2323 tlan_set_multicast_list(dev);
2324
2325}
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351static void tlan_set_mac(struct net_device *dev, int areg, char *mac)
2352{
2353 int i;
2354
2355 areg *= 6;
2356
2357 if (mac != NULL) {
2358 for (i = 0; i < 6; i++)
2359 tlan_dio_write8(dev->base_addr,
2360 TLAN_AREG_0 + areg + i, mac[i]);
2361 } else {
2362 for (i = 0; i < 6; i++)
2363 tlan_dio_write8(dev->base_addr,
2364 TLAN_AREG_0 + areg + i, 0);
2365 }
2366
2367}
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395static void tlan_phy_print(struct net_device *dev)
2396{
2397 struct tlan_priv *priv = netdev_priv(dev);
2398 u16 i, data0, data1, data2, data3, phy;
2399
2400 phy = priv->phy[priv->phy_num];
2401
2402 if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) {
2403 netdev_info(dev, "Unmanaged PHY\n");
2404 } else if (phy <= TLAN_PHY_MAX_ADDR) {
2405 netdev_info(dev, "PHY 0x%02x\n", phy);
2406 pr_info(" Off. +0 +1 +2 +3\n");
2407 for (i = 0; i < 0x20; i += 4) {
2408 tlan_mii_read_reg(dev, phy, i, &data0);
2409 tlan_mii_read_reg(dev, phy, i + 1, &data1);
2410 tlan_mii_read_reg(dev, phy, i + 2, &data2);
2411 tlan_mii_read_reg(dev, phy, i + 3, &data3);
2412 pr_info(" 0x%02x 0x%04hx 0x%04hx 0x%04hx 0x%04hx\n",
2413 i, data0, data1, data2, data3);
2414 }
2415 } else {
2416 netdev_info(dev, "Invalid PHY\n");
2417 }
2418
2419}
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441static void tlan_phy_detect(struct net_device *dev)
2442{
2443 struct tlan_priv *priv = netdev_priv(dev);
2444 u16 control;
2445 u16 hi;
2446 u16 lo;
2447 u32 phy;
2448
2449 if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) {
2450 priv->phy_num = 0xffff;
2451 return;
2452 }
2453
2454 tlan_mii_read_reg(dev, TLAN_PHY_MAX_ADDR, MII_GEN_ID_HI, &hi);
2455
2456 if (hi != 0xffff)
2457 priv->phy[0] = TLAN_PHY_MAX_ADDR;
2458 else
2459 priv->phy[0] = TLAN_PHY_NONE;
2460
2461 priv->phy[1] = TLAN_PHY_NONE;
2462 for (phy = 0; phy <= TLAN_PHY_MAX_ADDR; phy++) {
2463 tlan_mii_read_reg(dev, phy, MII_GEN_CTL, &control);
2464 tlan_mii_read_reg(dev, phy, MII_GEN_ID_HI, &hi);
2465 tlan_mii_read_reg(dev, phy, MII_GEN_ID_LO, &lo);
2466 if ((control != 0xffff) ||
2467 (hi != 0xffff) || (lo != 0xffff)) {
2468 TLAN_DBG(TLAN_DEBUG_GNRL,
2469 "PHY found at %02x %04x %04x %04x\n",
2470 phy, control, hi, lo);
2471 if ((priv->phy[1] == TLAN_PHY_NONE) &&
2472 (phy != TLAN_PHY_MAX_ADDR)) {
2473 priv->phy[1] = phy;
2474 }
2475 }
2476 }
2477
2478 if (priv->phy[1] != TLAN_PHY_NONE)
2479 priv->phy_num = 1;
2480 else if (priv->phy[0] != TLAN_PHY_NONE)
2481 priv->phy_num = 0;
2482 else
2483 netdev_info(dev, "Cannot initialize device, no PHY was found!\n");
2484
2485}
2486
2487
2488
2489
2490static void tlan_phy_power_down(struct net_device *dev)
2491{
2492 struct tlan_priv *priv = netdev_priv(dev);
2493 u16 value;
2494
2495 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Powering down PHY(s).\n", dev->name);
2496 value = MII_GC_PDOWN | MII_GC_LOOPBK | MII_GC_ISOLATE;
2497 tlan_mii_sync(dev->base_addr);
2498 tlan_mii_write_reg(dev, priv->phy[priv->phy_num], MII_GEN_CTL, value);
2499 if ((priv->phy_num == 0) &&
2500 (priv->phy[1] != TLAN_PHY_NONE) &&
2501 (!(priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10))) {
2502 tlan_mii_sync(dev->base_addr);
2503 tlan_mii_write_reg(dev, priv->phy[1], MII_GEN_CTL, value);
2504 }
2505
2506
2507
2508
2509
2510 tlan_set_timer(dev, (HZ/20), TLAN_TIMER_PHY_PUP);
2511
2512}
2513
2514
2515
2516
2517static void tlan_phy_power_up(struct net_device *dev)
2518{
2519 struct tlan_priv *priv = netdev_priv(dev);
2520 u16 value;
2521
2522 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Powering up PHY.\n", dev->name);
2523 tlan_mii_sync(dev->base_addr);
2524 value = MII_GC_LOOPBK;
2525 tlan_mii_write_reg(dev, priv->phy[priv->phy_num], MII_GEN_CTL, value);
2526 tlan_mii_sync(dev->base_addr);
2527
2528
2529
2530
2531 tlan_set_timer(dev, (HZ/20), TLAN_TIMER_PHY_RESET);
2532
2533}
2534
2535
2536
2537
2538static void tlan_phy_reset(struct net_device *dev)
2539{
2540 struct tlan_priv *priv = netdev_priv(dev);
2541 u16 phy;
2542 u16 value;
2543
2544 phy = priv->phy[priv->phy_num];
2545
2546 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Resetting PHY.\n", dev->name);
2547 tlan_mii_sync(dev->base_addr);
2548 value = MII_GC_LOOPBK | MII_GC_RESET;
2549 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, value);
2550 tlan_mii_read_reg(dev, phy, MII_GEN_CTL, &value);
2551 while (value & MII_GC_RESET)
2552 tlan_mii_read_reg(dev, phy, MII_GEN_CTL, &value);
2553
2554
2555
2556
2557
2558 tlan_set_timer(dev, (HZ/20), TLAN_TIMER_PHY_START_LINK);
2559
2560}
2561
2562
2563
2564
2565static void tlan_phy_start_link(struct net_device *dev)
2566{
2567 struct tlan_priv *priv = netdev_priv(dev);
2568 u16 ability;
2569 u16 control;
2570 u16 data;
2571 u16 phy;
2572 u16 status;
2573 u16 tctl;
2574
2575 phy = priv->phy[priv->phy_num];
2576 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Trying to activate link.\n", dev->name);
2577 tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
2578 tlan_mii_read_reg(dev, phy, MII_GEN_STS, &ability);
2579
2580 if ((status & MII_GS_AUTONEG) &&
2581 (!priv->aui)) {
2582 ability = status >> 11;
2583 if (priv->speed == TLAN_SPEED_10 &&
2584 priv->duplex == TLAN_DUPLEX_HALF) {
2585 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x0000);
2586 } else if (priv->speed == TLAN_SPEED_10 &&
2587 priv->duplex == TLAN_DUPLEX_FULL) {
2588 priv->tlan_full_duplex = true;
2589 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x0100);
2590 } else if (priv->speed == TLAN_SPEED_100 &&
2591 priv->duplex == TLAN_DUPLEX_HALF) {
2592 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x2000);
2593 } else if (priv->speed == TLAN_SPEED_100 &&
2594 priv->duplex == TLAN_DUPLEX_FULL) {
2595 priv->tlan_full_duplex = true;
2596 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x2100);
2597 } else {
2598
2599
2600 tlan_mii_write_reg(dev, phy, MII_AN_ADV,
2601 (ability << 5) | 1);
2602
2603 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x1000);
2604
2605 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x1200);
2606
2607
2608
2609
2610
2611 netdev_info(dev, "Starting autonegotiation\n");
2612 tlan_set_timer(dev, (2*HZ), TLAN_TIMER_PHY_FINISH_AN);
2613 return;
2614 }
2615
2616 }
2617
2618 if ((priv->aui) && (priv->phy_num != 0)) {
2619 priv->phy_num = 0;
2620 data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN
2621 | TLAN_NET_CFG_PHY_EN;
2622 tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, data);
2623 tlan_set_timer(dev, (40*HZ/1000), TLAN_TIMER_PHY_PDOWN);
2624 return;
2625 } else if (priv->phy_num == 0) {
2626 control = 0;
2627 tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tctl);
2628 if (priv->aui) {
2629 tctl |= TLAN_TC_AUISEL;
2630 } else {
2631 tctl &= ~TLAN_TC_AUISEL;
2632 if (priv->duplex == TLAN_DUPLEX_FULL) {
2633 control |= MII_GC_DUPLEX;
2634 priv->tlan_full_duplex = true;
2635 }
2636 if (priv->speed == TLAN_SPEED_100)
2637 control |= MII_GC_SPEEDSEL;
2638 }
2639 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, control);
2640 tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL, tctl);
2641 }
2642
2643
2644
2645
2646 tlan_set_timer(dev, (4*HZ), TLAN_TIMER_FINISH_RESET);
2647
2648}
2649
2650
2651
2652
2653static void tlan_phy_finish_auto_neg(struct net_device *dev)
2654{
2655 struct tlan_priv *priv = netdev_priv(dev);
2656 u16 an_adv;
2657 u16 an_lpa;
2658 u16 data;
2659 u16 mode;
2660 u16 phy;
2661 u16 status;
2662
2663 phy = priv->phy[priv->phy_num];
2664
2665 tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
2666 udelay(1000);
2667 tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
2668
2669 if (!(status & MII_GS_AUTOCMPLT)) {
2670
2671
2672
2673 if (!priv->neg_be_verbose++) {
2674 pr_info("Giving autonegotiation more time.\n");
2675 pr_info("Please check that your adapter has\n");
2676 pr_info("been properly connected to a HUB or Switch.\n");
2677 pr_info("Trying to establish link in the background...\n");
2678 }
2679 tlan_set_timer(dev, (8*HZ), TLAN_TIMER_PHY_FINISH_AN);
2680 return;
2681 }
2682
2683 netdev_info(dev, "Autonegotiation complete\n");
2684 tlan_mii_read_reg(dev, phy, MII_AN_ADV, &an_adv);
2685 tlan_mii_read_reg(dev, phy, MII_AN_LPA, &an_lpa);
2686 mode = an_adv & an_lpa & 0x03E0;
2687 if (mode & 0x0100)
2688 priv->tlan_full_duplex = true;
2689 else if (!(mode & 0x0080) && (mode & 0x0040))
2690 priv->tlan_full_duplex = true;
2691
2692 if ((!(mode & 0x0180)) &&
2693 (priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10) &&
2694 (priv->phy_num != 0)) {
2695 priv->phy_num = 0;
2696 data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN
2697 | TLAN_NET_CFG_PHY_EN;
2698 tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, data);
2699 tlan_set_timer(dev, (400*HZ/1000), TLAN_TIMER_PHY_PDOWN);
2700 return;
2701 }
2702
2703 if (priv->phy_num == 0) {
2704 if ((priv->duplex == TLAN_DUPLEX_FULL) ||
2705 (an_adv & an_lpa & 0x0040)) {
2706 tlan_mii_write_reg(dev, phy, MII_GEN_CTL,
2707 MII_GC_AUTOENB | MII_GC_DUPLEX);
2708 netdev_info(dev, "Starting internal PHY with FULL-DUPLEX\n");
2709 } else {
2710 tlan_mii_write_reg(dev, phy, MII_GEN_CTL,
2711 MII_GC_AUTOENB);
2712 netdev_info(dev, "Starting internal PHY with HALF-DUPLEX\n");
2713 }
2714 }
2715
2716
2717
2718 tlan_set_timer(dev, (HZ/10), TLAN_TIMER_FINISH_RESET);
2719
2720}
2721
2722#ifdef MONITOR
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742void tlan_phy_monitor(struct net_device *dev)
2743{
2744 struct tlan_priv *priv = netdev_priv(dev);
2745 u16 phy;
2746 u16 phy_status;
2747
2748 phy = priv->phy[priv->phy_num];
2749
2750
2751 tlan_mii_read_reg(dev, phy, MII_GEN_STS, &phy_status);
2752
2753
2754 if (!(phy_status & MII_GS_LINK)) {
2755 if (priv->link) {
2756 priv->link = 0;
2757 printk(KERN_DEBUG "TLAN: %s has lost link\n",
2758 dev->name);
2759 netif_carrier_off(dev);
2760 tlan_set_timer(dev, (2*HZ), TLAN_TIMER_LINK_BEAT);
2761 return;
2762 }
2763 }
2764
2765
2766 if ((phy_status & MII_GS_LINK) && !priv->link) {
2767 priv->link = 1;
2768 printk(KERN_DEBUG "TLAN: %s has reestablished link\n",
2769 dev->name);
2770 netif_carrier_on(dev);
2771 }
2772
2773
2774 tlan_set_timer(dev, (2*HZ), TLAN_TIMER_LINK_BEAT);
2775}
2776
2777#endif
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816static bool
2817tlan_mii_read_reg(struct net_device *dev, u16 phy, u16 reg, u16 *val)
2818{
2819 u8 nack;
2820 u16 sio, tmp;
2821 u32 i;
2822 bool err;
2823 int minten;
2824 struct tlan_priv *priv = netdev_priv(dev);
2825 unsigned long flags = 0;
2826
2827 err = false;
2828 outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR);
2829 sio = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
2830
2831 if (!in_irq())
2832 spin_lock_irqsave(&priv->lock, flags);
2833
2834 tlan_mii_sync(dev->base_addr);
2835
2836 minten = tlan_get_bit(TLAN_NET_SIO_MINTEN, sio);
2837 if (minten)
2838 tlan_clear_bit(TLAN_NET_SIO_MINTEN, sio);
2839
2840 tlan_mii_send_data(dev->base_addr, 0x1, 2);
2841 tlan_mii_send_data(dev->base_addr, 0x2, 2);
2842 tlan_mii_send_data(dev->base_addr, phy, 5);
2843 tlan_mii_send_data(dev->base_addr, reg, 5);
2844
2845
2846 tlan_clear_bit(TLAN_NET_SIO_MTXEN, sio);
2847
2848 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
2849 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
2850 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
2851
2852 nack = tlan_get_bit(TLAN_NET_SIO_MDATA, sio);
2853 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
2854 if (nack) {
2855 for (i = 0; i < 16; i++) {
2856 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
2857 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
2858 }
2859 tmp = 0xffff;
2860 err = true;
2861 } else {
2862 for (tmp = 0, i = 0x8000; i; i >>= 1) {
2863 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
2864 if (tlan_get_bit(TLAN_NET_SIO_MDATA, sio))
2865 tmp |= i;
2866 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
2867 }
2868 }
2869
2870
2871 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
2872 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
2873
2874 if (minten)
2875 tlan_set_bit(TLAN_NET_SIO_MINTEN, sio);
2876
2877 *val = tmp;
2878
2879 if (!in_irq())
2880 spin_unlock_irqrestore(&priv->lock, flags);
2881
2882 return err;
2883
2884}
2885
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907static void tlan_mii_send_data(u16 base_port, u32 data, unsigned num_bits)
2908{
2909 u16 sio;
2910 u32 i;
2911
2912 if (num_bits == 0)
2913 return;
2914
2915 outw(TLAN_NET_SIO, base_port + TLAN_DIO_ADR);
2916 sio = base_port + TLAN_DIO_DATA + TLAN_NET_SIO;
2917 tlan_set_bit(TLAN_NET_SIO_MTXEN, sio);
2918
2919 for (i = (0x1 << (num_bits - 1)); i; i >>= 1) {
2920 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
2921 (void) tlan_get_bit(TLAN_NET_SIO_MCLK, sio);
2922 if (data & i)
2923 tlan_set_bit(TLAN_NET_SIO_MDATA, sio);
2924 else
2925 tlan_clear_bit(TLAN_NET_SIO_MDATA, sio);
2926 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
2927 (void) tlan_get_bit(TLAN_NET_SIO_MCLK, sio);
2928 }
2929
2930}
2931
2932
2933
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949static void tlan_mii_sync(u16 base_port)
2950{
2951 int i;
2952 u16 sio;
2953
2954 outw(TLAN_NET_SIO, base_port + TLAN_DIO_ADR);
2955 sio = base_port + TLAN_DIO_DATA + TLAN_NET_SIO;
2956
2957 tlan_clear_bit(TLAN_NET_SIO_MTXEN, sio);
2958 for (i = 0; i < 32; i++) {
2959 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
2960 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
2961 }
2962
2963}
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988static void
2989tlan_mii_write_reg(struct net_device *dev, u16 phy, u16 reg, u16 val)
2990{
2991 u16 sio;
2992 int minten;
2993 unsigned long flags = 0;
2994 struct tlan_priv *priv = netdev_priv(dev);
2995
2996 outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR);
2997 sio = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
2998
2999 if (!in_irq())
3000 spin_lock_irqsave(&priv->lock, flags);
3001
3002 tlan_mii_sync(dev->base_addr);
3003
3004 minten = tlan_get_bit(TLAN_NET_SIO_MINTEN, sio);
3005 if (minten)
3006 tlan_clear_bit(TLAN_NET_SIO_MINTEN, sio);
3007
3008 tlan_mii_send_data(dev->base_addr, 0x1, 2);
3009 tlan_mii_send_data(dev->base_addr, 0x1, 2);
3010 tlan_mii_send_data(dev->base_addr, phy, 5);
3011 tlan_mii_send_data(dev->base_addr, reg, 5);
3012
3013 tlan_mii_send_data(dev->base_addr, 0x2, 2);
3014 tlan_mii_send_data(dev->base_addr, val, 16);
3015
3016 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
3017 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
3018
3019 if (minten)
3020 tlan_set_bit(TLAN_NET_SIO_MINTEN, sio);
3021
3022 if (!in_irq())
3023 spin_unlock_irqrestore(&priv->lock, flags);
3024
3025}
3026
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041
3042
3043
3044
3045
3046
3047
3048
3049
3050
3051
3052
3053
3054
3055
3056
3057
3058
3059static void tlan_ee_send_start(u16 io_base)
3060{
3061 u16 sio;
3062
3063 outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR);
3064 sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
3065
3066 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3067 tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
3068 tlan_set_bit(TLAN_NET_SIO_ETXEN, sio);
3069 tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
3070 tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
3071
3072}
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090
3091
3092
3093
3094
3095
3096
3097
3098
3099static int tlan_ee_send_byte(u16 io_base, u8 data, int stop)
3100{
3101 int err;
3102 u8 place;
3103 u16 sio;
3104
3105 outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR);
3106 sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
3107
3108
3109 for (place = 0x80; place != 0; place >>= 1) {
3110 if (place & data)
3111 tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
3112 else
3113 tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
3114 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3115 tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
3116 }
3117 tlan_clear_bit(TLAN_NET_SIO_ETXEN, sio);
3118 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3119 err = tlan_get_bit(TLAN_NET_SIO_EDATA, sio);
3120 tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
3121 tlan_set_bit(TLAN_NET_SIO_ETXEN, sio);
3122
3123 if ((!err) && stop) {
3124
3125 tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
3126 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3127 tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
3128 }
3129
3130 return err;
3131
3132}
3133
3134
3135
3136
3137
3138
3139
3140
3141
3142
3143
3144
3145
3146
3147
3148
3149
3150
3151
3152
3153
3154
3155
3156
3157
3158
3159
3160
3161static void tlan_ee_receive_byte(u16 io_base, u8 *data, int stop)
3162{
3163 u8 place;
3164 u16 sio;
3165
3166 outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR);
3167 sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
3168 *data = 0;
3169
3170
3171 tlan_clear_bit(TLAN_NET_SIO_ETXEN, sio);
3172 for (place = 0x80; place; place >>= 1) {
3173 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3174 if (tlan_get_bit(TLAN_NET_SIO_EDATA, sio))
3175 *data |= place;
3176 tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
3177 }
3178
3179 tlan_set_bit(TLAN_NET_SIO_ETXEN, sio);
3180 if (!stop) {
3181 tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
3182 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3183 tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
3184 } else {
3185 tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
3186 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3187 tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
3188
3189 tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
3190 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3191 tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
3192 }
3193
3194}
3195
3196
3197
3198
3199
3200
3201
3202
3203
3204
3205
3206
3207
3208
3209
3210
3211
3212
3213
3214
3215
3216
3217
3218
3219
3220static int tlan_ee_read_byte(struct net_device *dev, u8 ee_addr, u8 *data)
3221{
3222 int err;
3223 struct tlan_priv *priv = netdev_priv(dev);
3224 unsigned long flags = 0;
3225 int ret = 0;
3226
3227 spin_lock_irqsave(&priv->lock, flags);
3228
3229 tlan_ee_send_start(dev->base_addr);
3230 err = tlan_ee_send_byte(dev->base_addr, 0xa0, TLAN_EEPROM_ACK);
3231 if (err) {
3232 ret = 1;
3233 goto fail;
3234 }
3235 err = tlan_ee_send_byte(dev->base_addr, ee_addr, TLAN_EEPROM_ACK);
3236 if (err) {
3237 ret = 2;
3238 goto fail;
3239 }
3240 tlan_ee_send_start(dev->base_addr);
3241 err = tlan_ee_send_byte(dev->base_addr, 0xa1, TLAN_EEPROM_ACK);
3242 if (err) {
3243 ret = 3;
3244 goto fail;
3245 }
3246 tlan_ee_receive_byte(dev->base_addr, data, TLAN_EEPROM_STOP);
3247fail:
3248 spin_unlock_irqrestore(&priv->lock, flags);
3249
3250 return ret;
3251
3252}
3253
3254
3255
3256