1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31
32#include <linux/hardirq.h>
33#include <linux/module.h>
34#include <linux/init.h>
35#include <linux/interrupt.h>
36#include <linux/ioport.h>
37#include <linux/eisa.h>
38#include <linux/pci.h>
39#include <linux/dma-mapping.h>
40#include <linux/netdevice.h>
41#include <linux/etherdevice.h>
42#include <linux/delay.h>
43#include <linux/spinlock.h>
44#include <linux/workqueue.h>
45#include <linux/mii.h>
46
47#include "tlan.h"
48
49
50
51static struct net_device *tlan_eisa_devices;
52
53static int tlan_devices_installed;
54
55
56static int aui[MAX_TLAN_BOARDS];
57static int duplex[MAX_TLAN_BOARDS];
58static int speed[MAX_TLAN_BOARDS];
59static int boards_found;
60module_param_array(aui, int, NULL, 0);
61module_param_array(duplex, int, NULL, 0);
62module_param_array(speed, int, NULL, 0);
63MODULE_PARM_DESC(aui, "ThunderLAN use AUI port(s) (0-1)");
64MODULE_PARM_DESC(duplex,
65 "ThunderLAN duplex setting(s) (0-default, 1-half, 2-full)");
66MODULE_PARM_DESC(speed, "ThunderLAN port speed setting(s) (0,10,100)");
67
68MODULE_AUTHOR("Maintainer: Samuel Chessman <chessman@tux.org>");
69MODULE_DESCRIPTION("Driver for TI ThunderLAN based ethernet PCI adapters");
70MODULE_LICENSE("GPL");
71
72
73
74#undef MONITOR
75
76
77static int debug;
78module_param(debug, int, 0);
79MODULE_PARM_DESC(debug, "ThunderLAN debug mask");
80
81static const char tlan_signature[] = "TLAN";
82static const char tlan_banner[] = "ThunderLAN driver v1.17\n";
83static int tlan_have_pci;
84static int tlan_have_eisa;
85
86static const char * const media[] = {
87 "10BaseT-HD", "10BaseT-FD", "100baseTx-HD",
88 "100BaseTx-FD", "100BaseT4", NULL
89};
90
91static struct board {
92 const char *device_label;
93 u32 flags;
94 u16 addr_ofs;
95} board_info[] = {
96 { "Compaq Netelligent 10 T PCI UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
97 { "Compaq Netelligent 10/100 TX PCI UTP",
98 TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
99 { "Compaq Integrated NetFlex-3/P", TLAN_ADAPTER_NONE, 0x83 },
100 { "Compaq NetFlex-3/P",
101 TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 },
102 { "Compaq NetFlex-3/P", TLAN_ADAPTER_NONE, 0x83 },
103 { "Compaq Netelligent Integrated 10/100 TX UTP",
104 TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
105 { "Compaq Netelligent Dual 10/100 TX PCI UTP",
106 TLAN_ADAPTER_NONE, 0x83 },
107 { "Compaq Netelligent 10/100 TX Embedded UTP",
108 TLAN_ADAPTER_NONE, 0x83 },
109 { "Olicom OC-2183/2185", TLAN_ADAPTER_USE_INTERN_10, 0x83 },
110 { "Olicom OC-2325", TLAN_ADAPTER_UNMANAGED_PHY, 0xf8 },
111 { "Olicom OC-2326", TLAN_ADAPTER_USE_INTERN_10, 0xf8 },
112 { "Compaq Netelligent 10/100 TX UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
113 { "Compaq Netelligent 10 T/2 PCI UTP/coax", TLAN_ADAPTER_NONE, 0x83 },
114 { "Compaq NetFlex-3/E",
115 TLAN_ADAPTER_ACTIVITY_LED |
116 TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 },
117 { "Compaq NetFlex-3/E",
118 TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
119};
120
121static DEFINE_PCI_DEVICE_TABLE(tlan_pci_tbl) = {
122 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL10,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
124 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100,
125 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
126 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETFLEX3I,
127 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
128 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_THUNDER,
129 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 },
130 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETFLEX3B,
131 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
132 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100PI,
133 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
134 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100D,
135 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
136 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100I,
137 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 7 },
138 { PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2183,
139 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
140 { PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2325,
141 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 9 },
142 { PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2326,
143 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 10 },
144 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_NETELLIGENT_10_100_WS_5100,
145 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 11 },
146 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_NETELLIGENT_10_T2,
147 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 12 },
148 { 0,}
149};
150MODULE_DEVICE_TABLE(pci, tlan_pci_tbl);
151
152static void tlan_eisa_probe(void);
153static void tlan_eisa_cleanup(void);
154static int tlan_init(struct net_device *);
155static int tlan_open(struct net_device *dev);
156static netdev_tx_t tlan_start_tx(struct sk_buff *, struct net_device *);
157static irqreturn_t tlan_handle_interrupt(int, void *);
158static int tlan_close(struct net_device *);
159static struct net_device_stats *tlan_get_stats(struct net_device *);
160static void tlan_set_multicast_list(struct net_device *);
161static int tlan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
162static int tlan_probe1(struct pci_dev *pdev, long ioaddr,
163 int irq, int rev, const struct pci_device_id *ent);
164static void tlan_tx_timeout(struct net_device *dev);
165static void tlan_tx_timeout_work(struct work_struct *work);
166static int tlan_init_one(struct pci_dev *pdev,
167 const struct pci_device_id *ent);
168
169static u32 tlan_handle_tx_eof(struct net_device *, u16);
170static u32 tlan_handle_stat_overflow(struct net_device *, u16);
171static u32 tlan_handle_rx_eof(struct net_device *, u16);
172static u32 tlan_handle_dummy(struct net_device *, u16);
173static u32 tlan_handle_tx_eoc(struct net_device *, u16);
174static u32 tlan_handle_status_check(struct net_device *, u16);
175static u32 tlan_handle_rx_eoc(struct net_device *, u16);
176
177static void tlan_timer(unsigned long);
178
179static void tlan_reset_lists(struct net_device *);
180static void tlan_free_lists(struct net_device *);
181static void tlan_print_dio(u16);
182static void tlan_print_list(struct tlan_list *, char *, int);
183static void tlan_read_and_clear_stats(struct net_device *, int);
184static void tlan_reset_adapter(struct net_device *);
185static void tlan_finish_reset(struct net_device *);
186static void tlan_set_mac(struct net_device *, int areg, char *mac);
187
188static void tlan_phy_print(struct net_device *);
189static void tlan_phy_detect(struct net_device *);
190static void tlan_phy_power_down(struct net_device *);
191static void tlan_phy_power_up(struct net_device *);
192static void tlan_phy_reset(struct net_device *);
193static void tlan_phy_start_link(struct net_device *);
194static void tlan_phy_finish_auto_neg(struct net_device *);
195#ifdef MONITOR
196static void tlan_phy_monitor(struct net_device *);
197#endif
198
199
200
201
202
203
204
205
206static bool tlan_mii_read_reg(struct net_device *, u16, u16, u16 *);
207static void tlan_mii_send_data(u16, u32, unsigned);
208static void tlan_mii_sync(u16);
209static void tlan_mii_write_reg(struct net_device *, u16, u16, u16);
210
211static void tlan_ee_send_start(u16);
212static int tlan_ee_send_byte(u16, u8, int);
213static void tlan_ee_receive_byte(u16, u8 *, int);
214static int tlan_ee_read_byte(struct net_device *, u8, u8 *);
215
216
217static inline void
218tlan_store_skb(struct tlan_list *tag, struct sk_buff *skb)
219{
220 unsigned long addr = (unsigned long)skb;
221 tag->buffer[9].address = addr;
222 tag->buffer[8].address = upper_32_bits(addr);
223}
224
225static inline struct sk_buff *
226tlan_get_skb(const struct tlan_list *tag)
227{
228 unsigned long addr;
229
230 addr = tag->buffer[9].address;
231 addr |= ((unsigned long) tag->buffer[8].address << 16) << 16;
232 return (struct sk_buff *) addr;
233}
234
235static u32
236(*tlan_int_vector[TLAN_INT_NUMBER_OF_INTS])(struct net_device *, u16) = {
237 NULL,
238 tlan_handle_tx_eof,
239 tlan_handle_stat_overflow,
240 tlan_handle_rx_eof,
241 tlan_handle_dummy,
242 tlan_handle_tx_eoc,
243 tlan_handle_status_check,
244 tlan_handle_rx_eoc
245};
246
247static inline void
248tlan_set_timer(struct net_device *dev, u32 ticks, u32 type)
249{
250 struct tlan_priv *priv = netdev_priv(dev);
251 unsigned long flags = 0;
252
253 if (!in_irq())
254 spin_lock_irqsave(&priv->lock, flags);
255 if (priv->timer.function != NULL &&
256 priv->timer_type != TLAN_TIMER_ACTIVITY) {
257 if (!in_irq())
258 spin_unlock_irqrestore(&priv->lock, flags);
259 return;
260 }
261 priv->timer.function = tlan_timer;
262 if (!in_irq())
263 spin_unlock_irqrestore(&priv->lock, flags);
264
265 priv->timer.data = (unsigned long) dev;
266 priv->timer_set_at = jiffies;
267 priv->timer_type = type;
268 mod_timer(&priv->timer, jiffies + ticks);
269
270}
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303static void tlan_remove_one(struct pci_dev *pdev)
304{
305 struct net_device *dev = pci_get_drvdata(pdev);
306 struct tlan_priv *priv = netdev_priv(dev);
307
308 unregister_netdev(dev);
309
310 if (priv->dma_storage) {
311 pci_free_consistent(priv->pci_dev,
312 priv->dma_size, priv->dma_storage,
313 priv->dma_storage_dma);
314 }
315
316#ifdef CONFIG_PCI
317 pci_release_regions(pdev);
318#endif
319
320 free_netdev(dev);
321
322 pci_set_drvdata(pdev, NULL);
323}
324
325static void tlan_start(struct net_device *dev)
326{
327 tlan_reset_lists(dev);
328
329
330
331 tlan_read_and_clear_stats(dev, TLAN_IGNORE);
332 tlan_reset_adapter(dev);
333 netif_wake_queue(dev);
334}
335
336static void tlan_stop(struct net_device *dev)
337{
338 struct tlan_priv *priv = netdev_priv(dev);
339
340 tlan_read_and_clear_stats(dev, TLAN_RECORD);
341 outl(TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD);
342
343 tlan_reset_adapter(dev);
344 if (priv->timer.function != NULL) {
345 del_timer_sync(&priv->timer);
346 priv->timer.function = NULL;
347 }
348}
349
350#ifdef CONFIG_PM
351
352static int tlan_suspend(struct pci_dev *pdev, pm_message_t state)
353{
354 struct net_device *dev = pci_get_drvdata(pdev);
355
356 if (netif_running(dev))
357 tlan_stop(dev);
358
359 netif_device_detach(dev);
360 pci_save_state(pdev);
361 pci_disable_device(pdev);
362 pci_wake_from_d3(pdev, false);
363 pci_set_power_state(pdev, PCI_D3hot);
364
365 return 0;
366}
367
368static int tlan_resume(struct pci_dev *pdev)
369{
370 struct net_device *dev = pci_get_drvdata(pdev);
371
372 pci_set_power_state(pdev, PCI_D0);
373 pci_restore_state(pdev);
374 pci_enable_wake(pdev, 0, 0);
375 netif_device_attach(dev);
376
377 if (netif_running(dev))
378 tlan_start(dev);
379
380 return 0;
381}
382
383#else
384
385#define tlan_suspend NULL
386#define tlan_resume NULL
387
388#endif
389
390
391static struct pci_driver tlan_driver = {
392 .name = "tlan",
393 .id_table = tlan_pci_tbl,
394 .probe = tlan_init_one,
395 .remove = tlan_remove_one,
396 .suspend = tlan_suspend,
397 .resume = tlan_resume,
398};
399
400static int __init tlan_probe(void)
401{
402 int rc = -ENODEV;
403
404 pr_info("%s", tlan_banner);
405
406 TLAN_DBG(TLAN_DEBUG_PROBE, "Starting PCI Probe....\n");
407
408
409
410 rc = pci_register_driver(&tlan_driver);
411
412 if (rc != 0) {
413 pr_err("Could not register pci driver\n");
414 goto err_out_pci_free;
415 }
416
417 TLAN_DBG(TLAN_DEBUG_PROBE, "Starting EISA Probe....\n");
418 tlan_eisa_probe();
419
420 pr_info("%d device%s installed, PCI: %d EISA: %d\n",
421 tlan_devices_installed, tlan_devices_installed == 1 ? "" : "s",
422 tlan_have_pci, tlan_have_eisa);
423
424 if (tlan_devices_installed == 0) {
425 rc = -ENODEV;
426 goto err_out_pci_unreg;
427 }
428 return 0;
429
430err_out_pci_unreg:
431 pci_unregister_driver(&tlan_driver);
432err_out_pci_free:
433 return rc;
434}
435
436
437static int tlan_init_one(struct pci_dev *pdev,
438 const struct pci_device_id *ent)
439{
440 return tlan_probe1(pdev, -1, -1, 0, ent);
441}
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463static int tlan_probe1(struct pci_dev *pdev, long ioaddr, int irq, int rev,
464 const struct pci_device_id *ent)
465{
466
467 struct net_device *dev;
468 struct tlan_priv *priv;
469 u16 device_id;
470 int reg, rc = -ENODEV;
471
472#ifdef CONFIG_PCI
473 if (pdev) {
474 rc = pci_enable_device(pdev);
475 if (rc)
476 return rc;
477
478 rc = pci_request_regions(pdev, tlan_signature);
479 if (rc) {
480 pr_err("Could not reserve IO regions\n");
481 goto err_out;
482 }
483 }
484#endif
485
486 dev = alloc_etherdev(sizeof(struct tlan_priv));
487 if (dev == NULL) {
488 rc = -ENOMEM;
489 goto err_out_regions;
490 }
491 SET_NETDEV_DEV(dev, &pdev->dev);
492
493 priv = netdev_priv(dev);
494
495 priv->pci_dev = pdev;
496 priv->dev = dev;
497
498
499 if (pdev) {
500 u32 pci_io_base = 0;
501
502 priv->adapter = &board_info[ent->driver_data];
503
504 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
505 if (rc) {
506 pr_err("No suitable PCI mapping available\n");
507 goto err_out_free_dev;
508 }
509
510 for (reg = 0; reg <= 5; reg++) {
511 if (pci_resource_flags(pdev, reg) & IORESOURCE_IO) {
512 pci_io_base = pci_resource_start(pdev, reg);
513 TLAN_DBG(TLAN_DEBUG_GNRL,
514 "IO mapping is available at %x.\n",
515 pci_io_base);
516 break;
517 }
518 }
519 if (!pci_io_base) {
520 pr_err("No IO mappings available\n");
521 rc = -EIO;
522 goto err_out_free_dev;
523 }
524
525 dev->base_addr = pci_io_base;
526 dev->irq = pdev->irq;
527 priv->adapter_rev = pdev->revision;
528 pci_set_master(pdev);
529 pci_set_drvdata(pdev, dev);
530
531 } else {
532
533
534 device_id = inw(ioaddr + EISA_ID2);
535 priv->is_eisa = 1;
536 if (device_id == 0x20F1) {
537 priv->adapter = &board_info[13];
538 priv->adapter_rev = 23;
539 } else {
540 priv->adapter = &board_info[14];
541 priv->adapter_rev = 10;
542 }
543 dev->base_addr = ioaddr;
544 dev->irq = irq;
545 }
546
547
548 if (dev->mem_start) {
549 priv->aui = dev->mem_start & 0x01;
550 priv->duplex = ((dev->mem_start & 0x06) == 0x06) ? 0
551 : (dev->mem_start & 0x06) >> 1;
552 priv->speed = ((dev->mem_start & 0x18) == 0x18) ? 0
553 : (dev->mem_start & 0x18) >> 3;
554
555 if (priv->speed == 0x1)
556 priv->speed = TLAN_SPEED_10;
557 else if (priv->speed == 0x2)
558 priv->speed = TLAN_SPEED_100;
559
560 debug = priv->debug = dev->mem_end;
561 } else {
562 priv->aui = aui[boards_found];
563 priv->speed = speed[boards_found];
564 priv->duplex = duplex[boards_found];
565 priv->debug = debug;
566 }
567
568
569
570 INIT_WORK(&priv->tlan_tqueue, tlan_tx_timeout_work);
571
572 spin_lock_init(&priv->lock);
573
574 rc = tlan_init(dev);
575 if (rc) {
576 pr_err("Could not set up device\n");
577 goto err_out_free_dev;
578 }
579
580 rc = register_netdev(dev);
581 if (rc) {
582 pr_err("Could not register device\n");
583 goto err_out_uninit;
584 }
585
586
587 tlan_devices_installed++;
588 boards_found++;
589
590
591 if (pdev)
592 tlan_have_pci++;
593 else {
594 priv->next_device = tlan_eisa_devices;
595 tlan_eisa_devices = dev;
596 tlan_have_eisa++;
597 }
598
599 netdev_info(dev, "irq=%2d, io=%04x, %s, Rev. %d\n",
600 (int)dev->irq,
601 (int)dev->base_addr,
602 priv->adapter->device_label,
603 priv->adapter_rev);
604 return 0;
605
606err_out_uninit:
607 pci_free_consistent(priv->pci_dev, priv->dma_size, priv->dma_storage,
608 priv->dma_storage_dma);
609err_out_free_dev:
610 free_netdev(dev);
611err_out_regions:
612#ifdef CONFIG_PCI
613 if (pdev)
614 pci_release_regions(pdev);
615#endif
616err_out:
617 if (pdev)
618 pci_disable_device(pdev);
619 return rc;
620}
621
622
623static void tlan_eisa_cleanup(void)
624{
625 struct net_device *dev;
626 struct tlan_priv *priv;
627
628 while (tlan_have_eisa) {
629 dev = tlan_eisa_devices;
630 priv = netdev_priv(dev);
631 if (priv->dma_storage) {
632 pci_free_consistent(priv->pci_dev, priv->dma_size,
633 priv->dma_storage,
634 priv->dma_storage_dma);
635 }
636 release_region(dev->base_addr, 0x10);
637 unregister_netdev(dev);
638 tlan_eisa_devices = priv->next_device;
639 free_netdev(dev);
640 tlan_have_eisa--;
641 }
642}
643
644
645static void __exit tlan_exit(void)
646{
647 pci_unregister_driver(&tlan_driver);
648
649 if (tlan_have_eisa)
650 tlan_eisa_cleanup();
651
652}
653
654
655
656module_init(tlan_probe);
657module_exit(tlan_exit);
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674static void __init tlan_eisa_probe(void)
675{
676 long ioaddr;
677 int rc = -ENODEV;
678 int irq;
679 u16 device_id;
680
681 if (!EISA_bus) {
682 TLAN_DBG(TLAN_DEBUG_PROBE, "No EISA bus present\n");
683 return;
684 }
685
686
687 for (ioaddr = 0x1000; ioaddr < 0x9000; ioaddr += 0x1000) {
688
689 TLAN_DBG(TLAN_DEBUG_PROBE, "EISA_ID 0x%4x: 0x%4x\n",
690 (int) ioaddr + 0xc80, inw(ioaddr + EISA_ID));
691 TLAN_DBG(TLAN_DEBUG_PROBE, "EISA_ID 0x%4x: 0x%4x\n",
692 (int) ioaddr + 0xc82, inw(ioaddr + EISA_ID2));
693
694
695 TLAN_DBG(TLAN_DEBUG_PROBE,
696 "Probing for EISA adapter at IO: 0x%4x : ",
697 (int) ioaddr);
698 if (request_region(ioaddr, 0x10, tlan_signature) == NULL)
699 goto out;
700
701 if (inw(ioaddr + EISA_ID) != 0x110E) {
702 release_region(ioaddr, 0x10);
703 goto out;
704 }
705
706 device_id = inw(ioaddr + EISA_ID2);
707 if (device_id != 0x20F1 && device_id != 0x40F1) {
708 release_region(ioaddr, 0x10);
709 goto out;
710 }
711
712
713 if (inb(ioaddr + EISA_CR) != 0x1) {
714 release_region(ioaddr, 0x10);
715 goto out2;
716 }
717
718 if (debug == 0x10)
719 pr_info("Found one\n");
720
721
722
723 switch (inb(ioaddr + 0xcc0)) {
724 case(0x10):
725 irq = 5;
726 break;
727 case(0x20):
728 irq = 9;
729 break;
730 case(0x40):
731 irq = 10;
732 break;
733 case(0x80):
734 irq = 11;
735 break;
736 default:
737 goto out;
738 }
739
740
741
742 rc = tlan_probe1(NULL, ioaddr, irq,
743 12, NULL);
744 continue;
745
746out:
747 if (debug == 0x10)
748 pr_info("None found\n");
749 continue;
750
751out2:
752 if (debug == 0x10)
753 pr_info("Card found but it is not enabled, skipping\n");
754 continue;
755
756 }
757
758}
759
760#ifdef CONFIG_NET_POLL_CONTROLLER
761static void tlan_poll(struct net_device *dev)
762{
763 disable_irq(dev->irq);
764 tlan_handle_interrupt(dev->irq, dev);
765 enable_irq(dev->irq);
766}
767#endif
768
769static const struct net_device_ops tlan_netdev_ops = {
770 .ndo_open = tlan_open,
771 .ndo_stop = tlan_close,
772 .ndo_start_xmit = tlan_start_tx,
773 .ndo_tx_timeout = tlan_tx_timeout,
774 .ndo_get_stats = tlan_get_stats,
775 .ndo_set_rx_mode = tlan_set_multicast_list,
776 .ndo_do_ioctl = tlan_ioctl,
777 .ndo_change_mtu = eth_change_mtu,
778 .ndo_set_mac_address = eth_mac_addr,
779 .ndo_validate_addr = eth_validate_addr,
780#ifdef CONFIG_NET_POLL_CONTROLLER
781 .ndo_poll_controller = tlan_poll,
782#endif
783};
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804static int tlan_init(struct net_device *dev)
805{
806 int dma_size;
807 int err;
808 int i;
809 struct tlan_priv *priv;
810
811 priv = netdev_priv(dev);
812
813 dma_size = (TLAN_NUM_RX_LISTS + TLAN_NUM_TX_LISTS)
814 * (sizeof(struct tlan_list));
815 priv->dma_storage = pci_alloc_consistent(priv->pci_dev,
816 dma_size,
817 &priv->dma_storage_dma);
818 priv->dma_size = dma_size;
819
820 if (priv->dma_storage == NULL) {
821 pr_err("Could not allocate lists and buffers for %s\n",
822 dev->name);
823 return -ENOMEM;
824 }
825 memset(priv->dma_storage, 0, dma_size);
826 priv->rx_list = (struct tlan_list *)
827 ALIGN((unsigned long)priv->dma_storage, 8);
828 priv->rx_list_dma = ALIGN(priv->dma_storage_dma, 8);
829 priv->tx_list = priv->rx_list + TLAN_NUM_RX_LISTS;
830 priv->tx_list_dma =
831 priv->rx_list_dma + sizeof(struct tlan_list)*TLAN_NUM_RX_LISTS;
832
833 err = 0;
834 for (i = 0; i < 6 ; i++)
835 err |= tlan_ee_read_byte(dev,
836 (u8) priv->adapter->addr_ofs + i,
837 (u8 *) &dev->dev_addr[i]);
838 if (err) {
839 pr_err("%s: Error reading MAC from eeprom: %d\n",
840 dev->name, err);
841 }
842 dev->addr_len = 6;
843
844 netif_carrier_off(dev);
845
846
847 dev->netdev_ops = &tlan_netdev_ops;
848 dev->watchdog_timeo = TX_TIMEOUT;
849
850 return 0;
851
852}
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874static int tlan_open(struct net_device *dev)
875{
876 struct tlan_priv *priv = netdev_priv(dev);
877 int err;
878
879 priv->tlan_rev = tlan_dio_read8(dev->base_addr, TLAN_DEF_REVISION);
880 err = request_irq(dev->irq, tlan_handle_interrupt, IRQF_SHARED,
881 dev->name, dev);
882
883 if (err) {
884 netdev_err(dev, "Cannot open because IRQ %d is already in use\n",
885 dev->irq);
886 return err;
887 }
888
889 init_timer(&priv->timer);
890
891 tlan_start(dev);
892
893 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Opened. TLAN Chip Rev: %x\n",
894 dev->name, priv->tlan_rev);
895
896 return 0;
897
898}
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917static int tlan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
918{
919 struct tlan_priv *priv = netdev_priv(dev);
920 struct mii_ioctl_data *data = if_mii(rq);
921 u32 phy = priv->phy[priv->phy_num];
922
923 if (!priv->phy_online)
924 return -EAGAIN;
925
926 switch (cmd) {
927 case SIOCGMIIPHY:
928 data->phy_id = phy;
929
930
931 case SIOCGMIIREG:
932 tlan_mii_read_reg(dev, data->phy_id & 0x1f,
933 data->reg_num & 0x1f, &data->val_out);
934 return 0;
935
936
937 case SIOCSMIIREG:
938 tlan_mii_write_reg(dev, data->phy_id & 0x1f,
939 data->reg_num & 0x1f, data->val_in);
940 return 0;
941 default:
942 return -EOPNOTSUPP;
943 }
944}
945
946
947
948
949
950
951
952
953
954
955
956
957
958static void tlan_tx_timeout(struct net_device *dev)
959{
960
961 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Transmit timed out.\n", dev->name);
962
963
964 tlan_free_lists(dev);
965 tlan_reset_lists(dev);
966 tlan_read_and_clear_stats(dev, TLAN_IGNORE);
967 tlan_reset_adapter(dev);
968 dev->trans_start = jiffies;
969 netif_wake_queue(dev);
970
971}
972
973
974
975
976
977
978
979
980
981
982
983
984static void tlan_tx_timeout_work(struct work_struct *work)
985{
986 struct tlan_priv *priv =
987 container_of(work, struct tlan_priv, tlan_tqueue);
988
989 tlan_tx_timeout(priv->dev);
990}
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015static netdev_tx_t tlan_start_tx(struct sk_buff *skb, struct net_device *dev)
1016{
1017 struct tlan_priv *priv = netdev_priv(dev);
1018 dma_addr_t tail_list_phys;
1019 struct tlan_list *tail_list;
1020 unsigned long flags;
1021 unsigned int txlen;
1022
1023 if (!priv->phy_online) {
1024 TLAN_DBG(TLAN_DEBUG_TX, "TRANSMIT: %s PHY is not ready\n",
1025 dev->name);
1026 dev_kfree_skb_any(skb);
1027 return NETDEV_TX_OK;
1028 }
1029
1030 if (skb_padto(skb, TLAN_MIN_FRAME_SIZE))
1031 return NETDEV_TX_OK;
1032 txlen = max(skb->len, (unsigned int)TLAN_MIN_FRAME_SIZE);
1033
1034 tail_list = priv->tx_list + priv->tx_tail;
1035 tail_list_phys =
1036 priv->tx_list_dma + sizeof(struct tlan_list)*priv->tx_tail;
1037
1038 if (tail_list->c_stat != TLAN_CSTAT_UNUSED) {
1039 TLAN_DBG(TLAN_DEBUG_TX,
1040 "TRANSMIT: %s is busy (Head=%d Tail=%d)\n",
1041 dev->name, priv->tx_head, priv->tx_tail);
1042 netif_stop_queue(dev);
1043 priv->tx_busy_count++;
1044 return NETDEV_TX_BUSY;
1045 }
1046
1047 tail_list->forward = 0;
1048
1049 tail_list->buffer[0].address = pci_map_single(priv->pci_dev,
1050 skb->data, txlen,
1051 PCI_DMA_TODEVICE);
1052 tlan_store_skb(tail_list, skb);
1053
1054 tail_list->frame_size = (u16) txlen;
1055 tail_list->buffer[0].count = TLAN_LAST_BUFFER | (u32) txlen;
1056 tail_list->buffer[1].count = 0;
1057 tail_list->buffer[1].address = 0;
1058
1059 spin_lock_irqsave(&priv->lock, flags);
1060 tail_list->c_stat = TLAN_CSTAT_READY;
1061 if (!priv->tx_in_progress) {
1062 priv->tx_in_progress = 1;
1063 TLAN_DBG(TLAN_DEBUG_TX,
1064 "TRANSMIT: Starting TX on buffer %d\n",
1065 priv->tx_tail);
1066 outl(tail_list_phys, dev->base_addr + TLAN_CH_PARM);
1067 outl(TLAN_HC_GO, dev->base_addr + TLAN_HOST_CMD);
1068 } else {
1069 TLAN_DBG(TLAN_DEBUG_TX,
1070 "TRANSMIT: Adding buffer %d to TX channel\n",
1071 priv->tx_tail);
1072 if (priv->tx_tail == 0) {
1073 (priv->tx_list + (TLAN_NUM_TX_LISTS - 1))->forward
1074 = tail_list_phys;
1075 } else {
1076 (priv->tx_list + (priv->tx_tail - 1))->forward
1077 = tail_list_phys;
1078 }
1079 }
1080 spin_unlock_irqrestore(&priv->lock, flags);
1081
1082 CIRC_INC(priv->tx_tail, TLAN_NUM_TX_LISTS);
1083
1084 return NETDEV_TX_OK;
1085
1086}
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111static irqreturn_t tlan_handle_interrupt(int irq, void *dev_id)
1112{
1113 struct net_device *dev = dev_id;
1114 struct tlan_priv *priv = netdev_priv(dev);
1115 u16 host_int;
1116 u16 type;
1117
1118 spin_lock(&priv->lock);
1119
1120 host_int = inw(dev->base_addr + TLAN_HOST_INT);
1121 type = (host_int & TLAN_HI_IT_MASK) >> 2;
1122 if (type) {
1123 u32 ack;
1124 u32 host_cmd;
1125
1126 outw(host_int, dev->base_addr + TLAN_HOST_INT);
1127 ack = tlan_int_vector[type](dev, host_int);
1128
1129 if (ack) {
1130 host_cmd = TLAN_HC_ACK | ack | (type << 18);
1131 outl(host_cmd, dev->base_addr + TLAN_HOST_CMD);
1132 }
1133 }
1134
1135 spin_unlock(&priv->lock);
1136
1137 return IRQ_RETVAL(type);
1138}
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158static int tlan_close(struct net_device *dev)
1159{
1160 struct tlan_priv *priv = netdev_priv(dev);
1161
1162 priv->neg_be_verbose = 0;
1163 tlan_stop(dev);
1164
1165 free_irq(dev->irq, dev);
1166 tlan_free_lists(dev);
1167 TLAN_DBG(TLAN_DEBUG_GNRL, "Device %s closed.\n", dev->name);
1168
1169 return 0;
1170
1171}
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191static struct net_device_stats *tlan_get_stats(struct net_device *dev)
1192{
1193 struct tlan_priv *priv = netdev_priv(dev);
1194 int i;
1195
1196
1197 tlan_read_and_clear_stats(dev, TLAN_RECORD);
1198
1199 TLAN_DBG(TLAN_DEBUG_RX, "RECEIVE: %s EOC count = %d\n", dev->name,
1200 priv->rx_eoc_count);
1201 TLAN_DBG(TLAN_DEBUG_TX, "TRANSMIT: %s Busy count = %d\n", dev->name,
1202 priv->tx_busy_count);
1203 if (debug & TLAN_DEBUG_GNRL) {
1204 tlan_print_dio(dev->base_addr);
1205 tlan_phy_print(dev);
1206 }
1207 if (debug & TLAN_DEBUG_LIST) {
1208 for (i = 0; i < TLAN_NUM_RX_LISTS; i++)
1209 tlan_print_list(priv->rx_list + i, "RX", i);
1210 for (i = 0; i < TLAN_NUM_TX_LISTS; i++)
1211 tlan_print_list(priv->tx_list + i, "TX", i);
1212 }
1213
1214 return &dev->stats;
1215
1216}
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241static void tlan_set_multicast_list(struct net_device *dev)
1242{
1243 struct netdev_hw_addr *ha;
1244 u32 hash1 = 0;
1245 u32 hash2 = 0;
1246 int i;
1247 u32 offset;
1248 u8 tmp;
1249
1250 if (dev->flags & IFF_PROMISC) {
1251 tmp = tlan_dio_read8(dev->base_addr, TLAN_NET_CMD);
1252 tlan_dio_write8(dev->base_addr,
1253 TLAN_NET_CMD, tmp | TLAN_NET_CMD_CAF);
1254 } else {
1255 tmp = tlan_dio_read8(dev->base_addr, TLAN_NET_CMD);
1256 tlan_dio_write8(dev->base_addr,
1257 TLAN_NET_CMD, tmp & ~TLAN_NET_CMD_CAF);
1258 if (dev->flags & IFF_ALLMULTI) {
1259 for (i = 0; i < 3; i++)
1260 tlan_set_mac(dev, i + 1, NULL);
1261 tlan_dio_write32(dev->base_addr, TLAN_HASH_1,
1262 0xffffffff);
1263 tlan_dio_write32(dev->base_addr, TLAN_HASH_2,
1264 0xffffffff);
1265 } else {
1266 i = 0;
1267 netdev_for_each_mc_addr(ha, dev) {
1268 if (i < 3) {
1269 tlan_set_mac(dev, i + 1,
1270 (char *) &ha->addr);
1271 } else {
1272 offset =
1273 tlan_hash_func((u8 *)&ha->addr);
1274 if (offset < 32)
1275 hash1 |= (1 << offset);
1276 else
1277 hash2 |= (1 << (offset - 32));
1278 }
1279 i++;
1280 }
1281 for ( ; i < 3; i++)
1282 tlan_set_mac(dev, i + 1, NULL);
1283 tlan_dio_write32(dev->base_addr, TLAN_HASH_1, hash1);
1284 tlan_dio_write32(dev->base_addr, TLAN_HASH_2, hash2);
1285 }
1286 }
1287
1288}
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329static u32 tlan_handle_tx_eof(struct net_device *dev, u16 host_int)
1330{
1331 struct tlan_priv *priv = netdev_priv(dev);
1332 int eoc = 0;
1333 struct tlan_list *head_list;
1334 dma_addr_t head_list_phys;
1335 u32 ack = 0;
1336 u16 tmp_c_stat;
1337
1338 TLAN_DBG(TLAN_DEBUG_TX,
1339 "TRANSMIT: Handling TX EOF (Head=%d Tail=%d)\n",
1340 priv->tx_head, priv->tx_tail);
1341 head_list = priv->tx_list + priv->tx_head;
1342
1343 while (((tmp_c_stat = head_list->c_stat) & TLAN_CSTAT_FRM_CMP)
1344 && (ack < 255)) {
1345 struct sk_buff *skb = tlan_get_skb(head_list);
1346
1347 ack++;
1348 pci_unmap_single(priv->pci_dev, head_list->buffer[0].address,
1349 max(skb->len,
1350 (unsigned int)TLAN_MIN_FRAME_SIZE),
1351 PCI_DMA_TODEVICE);
1352 dev_kfree_skb_any(skb);
1353 head_list->buffer[8].address = 0;
1354 head_list->buffer[9].address = 0;
1355
1356 if (tmp_c_stat & TLAN_CSTAT_EOC)
1357 eoc = 1;
1358
1359 dev->stats.tx_bytes += head_list->frame_size;
1360
1361 head_list->c_stat = TLAN_CSTAT_UNUSED;
1362 netif_start_queue(dev);
1363 CIRC_INC(priv->tx_head, TLAN_NUM_TX_LISTS);
1364 head_list = priv->tx_list + priv->tx_head;
1365 }
1366
1367 if (!ack)
1368 netdev_info(dev,
1369 "Received interrupt for uncompleted TX frame\n");
1370
1371 if (eoc) {
1372 TLAN_DBG(TLAN_DEBUG_TX,
1373 "TRANSMIT: handling TX EOC (Head=%d Tail=%d)\n",
1374 priv->tx_head, priv->tx_tail);
1375 head_list = priv->tx_list + priv->tx_head;
1376 head_list_phys = priv->tx_list_dma
1377 + sizeof(struct tlan_list)*priv->tx_head;
1378 if ((head_list->c_stat & TLAN_CSTAT_READY)
1379 == TLAN_CSTAT_READY) {
1380 outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
1381 ack |= TLAN_HC_GO;
1382 } else {
1383 priv->tx_in_progress = 0;
1384 }
1385 }
1386
1387 if (priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED) {
1388 tlan_dio_write8(dev->base_addr,
1389 TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT);
1390 if (priv->timer.function == NULL) {
1391 priv->timer.function = tlan_timer;
1392 priv->timer.data = (unsigned long) dev;
1393 priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY;
1394 priv->timer_set_at = jiffies;
1395 priv->timer_type = TLAN_TIMER_ACTIVITY;
1396 add_timer(&priv->timer);
1397 } else if (priv->timer_type == TLAN_TIMER_ACTIVITY) {
1398 priv->timer_set_at = jiffies;
1399 }
1400 }
1401
1402 return ack;
1403
1404}
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426static u32 tlan_handle_stat_overflow(struct net_device *dev, u16 host_int)
1427{
1428 tlan_read_and_clear_stats(dev, TLAN_RECORD);
1429
1430 return 1;
1431
1432}
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462static u32 tlan_handle_rx_eof(struct net_device *dev, u16 host_int)
1463{
1464 struct tlan_priv *priv = netdev_priv(dev);
1465 u32 ack = 0;
1466 int eoc = 0;
1467 struct tlan_list *head_list;
1468 struct sk_buff *skb;
1469 struct tlan_list *tail_list;
1470 u16 tmp_c_stat;
1471 dma_addr_t head_list_phys;
1472
1473 TLAN_DBG(TLAN_DEBUG_RX, "RECEIVE: handling RX EOF (Head=%d Tail=%d)\n",
1474 priv->rx_head, priv->rx_tail);
1475 head_list = priv->rx_list + priv->rx_head;
1476 head_list_phys =
1477 priv->rx_list_dma + sizeof(struct tlan_list)*priv->rx_head;
1478
1479 while (((tmp_c_stat = head_list->c_stat) & TLAN_CSTAT_FRM_CMP)
1480 && (ack < 255)) {
1481 dma_addr_t frame_dma = head_list->buffer[0].address;
1482 u32 frame_size = head_list->frame_size;
1483 struct sk_buff *new_skb;
1484
1485 ack++;
1486 if (tmp_c_stat & TLAN_CSTAT_EOC)
1487 eoc = 1;
1488
1489 new_skb = netdev_alloc_skb_ip_align(dev,
1490 TLAN_MAX_FRAME_SIZE + 5);
1491 if (!new_skb)
1492 goto drop_and_reuse;
1493
1494 skb = tlan_get_skb(head_list);
1495 pci_unmap_single(priv->pci_dev, frame_dma,
1496 TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE);
1497 skb_put(skb, frame_size);
1498
1499 dev->stats.rx_bytes += frame_size;
1500
1501 skb->protocol = eth_type_trans(skb, dev);
1502 netif_rx(skb);
1503
1504 head_list->buffer[0].address =
1505 pci_map_single(priv->pci_dev, new_skb->data,
1506 TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE);
1507
1508 tlan_store_skb(head_list, new_skb);
1509drop_and_reuse:
1510 head_list->forward = 0;
1511 head_list->c_stat = 0;
1512 tail_list = priv->rx_list + priv->rx_tail;
1513 tail_list->forward = head_list_phys;
1514
1515 CIRC_INC(priv->rx_head, TLAN_NUM_RX_LISTS);
1516 CIRC_INC(priv->rx_tail, TLAN_NUM_RX_LISTS);
1517 head_list = priv->rx_list + priv->rx_head;
1518 head_list_phys = priv->rx_list_dma
1519 + sizeof(struct tlan_list)*priv->rx_head;
1520 }
1521
1522 if (!ack)
1523 netdev_info(dev,
1524 "Received interrupt for uncompleted RX frame\n");
1525
1526
1527 if (eoc) {
1528 TLAN_DBG(TLAN_DEBUG_RX,
1529 "RECEIVE: handling RX EOC (Head=%d Tail=%d)\n",
1530 priv->rx_head, priv->rx_tail);
1531 head_list = priv->rx_list + priv->rx_head;
1532 head_list_phys = priv->rx_list_dma
1533 + sizeof(struct tlan_list)*priv->rx_head;
1534 outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
1535 ack |= TLAN_HC_GO | TLAN_HC_RT;
1536 priv->rx_eoc_count++;
1537 }
1538
1539 if (priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED) {
1540 tlan_dio_write8(dev->base_addr,
1541 TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT);
1542 if (priv->timer.function == NULL) {
1543 priv->timer.function = tlan_timer;
1544 priv->timer.data = (unsigned long) dev;
1545 priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY;
1546 priv->timer_set_at = jiffies;
1547 priv->timer_type = TLAN_TIMER_ACTIVITY;
1548 add_timer(&priv->timer);
1549 } else if (priv->timer_type == TLAN_TIMER_ACTIVITY) {
1550 priv->timer_set_at = jiffies;
1551 }
1552 }
1553
1554 return ack;
1555
1556}
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578static u32 tlan_handle_dummy(struct net_device *dev, u16 host_int)
1579{
1580 netdev_info(dev, "Test interrupt\n");
1581 return 1;
1582
1583}
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608static u32 tlan_handle_tx_eoc(struct net_device *dev, u16 host_int)
1609{
1610 struct tlan_priv *priv = netdev_priv(dev);
1611 struct tlan_list *head_list;
1612 dma_addr_t head_list_phys;
1613 u32 ack = 1;
1614
1615 host_int = 0;
1616 if (priv->tlan_rev < 0x30) {
1617 TLAN_DBG(TLAN_DEBUG_TX,
1618 "TRANSMIT: handling TX EOC (Head=%d Tail=%d) -- IRQ\n",
1619 priv->tx_head, priv->tx_tail);
1620 head_list = priv->tx_list + priv->tx_head;
1621 head_list_phys = priv->tx_list_dma
1622 + sizeof(struct tlan_list)*priv->tx_head;
1623 if ((head_list->c_stat & TLAN_CSTAT_READY)
1624 == TLAN_CSTAT_READY) {
1625 netif_stop_queue(dev);
1626 outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
1627 ack |= TLAN_HC_GO;
1628 } else {
1629 priv->tx_in_progress = 0;
1630 }
1631 }
1632
1633 return ack;
1634
1635}
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660static u32 tlan_handle_status_check(struct net_device *dev, u16 host_int)
1661{
1662 struct tlan_priv *priv = netdev_priv(dev);
1663 u32 ack;
1664 u32 error;
1665 u8 net_sts;
1666 u32 phy;
1667 u16 tlphy_ctl;
1668 u16 tlphy_sts;
1669
1670 ack = 1;
1671 if (host_int & TLAN_HI_IV_MASK) {
1672 netif_stop_queue(dev);
1673 error = inl(dev->base_addr + TLAN_CH_PARM);
1674 netdev_info(dev, "Adaptor Error = 0x%x\n", error);
1675 tlan_read_and_clear_stats(dev, TLAN_RECORD);
1676 outl(TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD);
1677
1678 schedule_work(&priv->tlan_tqueue);
1679
1680 netif_wake_queue(dev);
1681 ack = 0;
1682 } else {
1683 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Status Check\n", dev->name);
1684 phy = priv->phy[priv->phy_num];
1685
1686 net_sts = tlan_dio_read8(dev->base_addr, TLAN_NET_STS);
1687 if (net_sts) {
1688 tlan_dio_write8(dev->base_addr, TLAN_NET_STS, net_sts);
1689 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Net_Sts = %x\n",
1690 dev->name, (unsigned) net_sts);
1691 }
1692 if ((net_sts & TLAN_NET_STS_MIRQ) && (priv->phy_num == 0)) {
1693 tlan_mii_read_reg(dev, phy, TLAN_TLPHY_STS, &tlphy_sts);
1694 tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl);
1695 if (!(tlphy_sts & TLAN_TS_POLOK) &&
1696 !(tlphy_ctl & TLAN_TC_SWAPOL)) {
1697 tlphy_ctl |= TLAN_TC_SWAPOL;
1698 tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL,
1699 tlphy_ctl);
1700 } else if ((tlphy_sts & TLAN_TS_POLOK) &&
1701 (tlphy_ctl & TLAN_TC_SWAPOL)) {
1702 tlphy_ctl &= ~TLAN_TC_SWAPOL;
1703 tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL,
1704 tlphy_ctl);
1705 }
1706
1707 if (debug)
1708 tlan_phy_print(dev);
1709 }
1710 }
1711
1712 return ack;
1713
1714}
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739static u32 tlan_handle_rx_eoc(struct net_device *dev, u16 host_int)
1740{
1741 struct tlan_priv *priv = netdev_priv(dev);
1742 dma_addr_t head_list_phys;
1743 u32 ack = 1;
1744
1745 if (priv->tlan_rev < 0x30) {
1746 TLAN_DBG(TLAN_DEBUG_RX,
1747 "RECEIVE: Handling RX EOC (head=%d tail=%d) -- IRQ\n",
1748 priv->rx_head, priv->rx_tail);
1749 head_list_phys = priv->rx_list_dma
1750 + sizeof(struct tlan_list)*priv->rx_head;
1751 outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
1752 ack |= TLAN_HC_GO | TLAN_HC_RT;
1753 priv->rx_eoc_count++;
1754 }
1755
1756 return ack;
1757
1758}
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802static void tlan_timer(unsigned long data)
1803{
1804 struct net_device *dev = (struct net_device *) data;
1805 struct tlan_priv *priv = netdev_priv(dev);
1806 u32 elapsed;
1807 unsigned long flags = 0;
1808
1809 priv->timer.function = NULL;
1810
1811 switch (priv->timer_type) {
1812#ifdef MONITOR
1813 case TLAN_TIMER_LINK_BEAT:
1814 tlan_phy_monitor(dev);
1815 break;
1816#endif
1817 case TLAN_TIMER_PHY_PDOWN:
1818 tlan_phy_power_down(dev);
1819 break;
1820 case TLAN_TIMER_PHY_PUP:
1821 tlan_phy_power_up(dev);
1822 break;
1823 case TLAN_TIMER_PHY_RESET:
1824 tlan_phy_reset(dev);
1825 break;
1826 case TLAN_TIMER_PHY_START_LINK:
1827 tlan_phy_start_link(dev);
1828 break;
1829 case TLAN_TIMER_PHY_FINISH_AN:
1830 tlan_phy_finish_auto_neg(dev);
1831 break;
1832 case TLAN_TIMER_FINISH_RESET:
1833 tlan_finish_reset(dev);
1834 break;
1835 case TLAN_TIMER_ACTIVITY:
1836 spin_lock_irqsave(&priv->lock, flags);
1837 if (priv->timer.function == NULL) {
1838 elapsed = jiffies - priv->timer_set_at;
1839 if (elapsed >= TLAN_TIMER_ACT_DELAY) {
1840 tlan_dio_write8(dev->base_addr,
1841 TLAN_LED_REG, TLAN_LED_LINK);
1842 } else {
1843 priv->timer.function = tlan_timer;
1844 priv->timer.expires = priv->timer_set_at
1845 + TLAN_TIMER_ACT_DELAY;
1846 spin_unlock_irqrestore(&priv->lock, flags);
1847 add_timer(&priv->timer);
1848 break;
1849 }
1850 }
1851 spin_unlock_irqrestore(&priv->lock, flags);
1852 break;
1853 default:
1854 break;
1855 }
1856
1857}
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885static void tlan_reset_lists(struct net_device *dev)
1886{
1887 struct tlan_priv *priv = netdev_priv(dev);
1888 int i;
1889 struct tlan_list *list;
1890 dma_addr_t list_phys;
1891 struct sk_buff *skb;
1892
1893 priv->tx_head = 0;
1894 priv->tx_tail = 0;
1895 for (i = 0; i < TLAN_NUM_TX_LISTS; i++) {
1896 list = priv->tx_list + i;
1897 list->c_stat = TLAN_CSTAT_UNUSED;
1898 list->buffer[0].address = 0;
1899 list->buffer[2].count = 0;
1900 list->buffer[2].address = 0;
1901 list->buffer[8].address = 0;
1902 list->buffer[9].address = 0;
1903 }
1904
1905 priv->rx_head = 0;
1906 priv->rx_tail = TLAN_NUM_RX_LISTS - 1;
1907 for (i = 0; i < TLAN_NUM_RX_LISTS; i++) {
1908 list = priv->rx_list + i;
1909 list_phys = priv->rx_list_dma + sizeof(struct tlan_list)*i;
1910 list->c_stat = TLAN_CSTAT_READY;
1911 list->frame_size = TLAN_MAX_FRAME_SIZE;
1912 list->buffer[0].count = TLAN_MAX_FRAME_SIZE | TLAN_LAST_BUFFER;
1913 skb = netdev_alloc_skb_ip_align(dev, TLAN_MAX_FRAME_SIZE + 5);
1914 if (!skb) {
1915 netdev_err(dev, "Out of memory for received data\n");
1916 break;
1917 }
1918
1919 list->buffer[0].address = pci_map_single(priv->pci_dev,
1920 skb->data,
1921 TLAN_MAX_FRAME_SIZE,
1922 PCI_DMA_FROMDEVICE);
1923 tlan_store_skb(list, skb);
1924 list->buffer[1].count = 0;
1925 list->buffer[1].address = 0;
1926 list->forward = list_phys + sizeof(struct tlan_list);
1927 }
1928
1929
1930 while (i < TLAN_NUM_RX_LISTS) {
1931 tlan_store_skb(priv->rx_list + i, NULL);
1932 ++i;
1933 }
1934 list->forward = 0;
1935
1936}
1937
1938
1939static void tlan_free_lists(struct net_device *dev)
1940{
1941 struct tlan_priv *priv = netdev_priv(dev);
1942 int i;
1943 struct tlan_list *list;
1944 struct sk_buff *skb;
1945
1946 for (i = 0; i < TLAN_NUM_TX_LISTS; i++) {
1947 list = priv->tx_list + i;
1948 skb = tlan_get_skb(list);
1949 if (skb) {
1950 pci_unmap_single(
1951 priv->pci_dev,
1952 list->buffer[0].address,
1953 max(skb->len,
1954 (unsigned int)TLAN_MIN_FRAME_SIZE),
1955 PCI_DMA_TODEVICE);
1956 dev_kfree_skb_any(skb);
1957 list->buffer[8].address = 0;
1958 list->buffer[9].address = 0;
1959 }
1960 }
1961
1962 for (i = 0; i < TLAN_NUM_RX_LISTS; i++) {
1963 list = priv->rx_list + i;
1964 skb = tlan_get_skb(list);
1965 if (skb) {
1966 pci_unmap_single(priv->pci_dev,
1967 list->buffer[0].address,
1968 TLAN_MAX_FRAME_SIZE,
1969 PCI_DMA_FROMDEVICE);
1970 dev_kfree_skb_any(skb);
1971 list->buffer[8].address = 0;
1972 list->buffer[9].address = 0;
1973 }
1974 }
1975}
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994static void tlan_print_dio(u16 io_base)
1995{
1996 u32 data0, data1;
1997 int i;
1998
1999 pr_info("Contents of internal registers for io base 0x%04hx\n",
2000 io_base);
2001 pr_info("Off. +0 +4\n");
2002 for (i = 0; i < 0x4C; i += 8) {
2003 data0 = tlan_dio_read32(io_base, i);
2004 data1 = tlan_dio_read32(io_base, i + 0x4);
2005 pr_info("0x%02x 0x%08x 0x%08x\n", i, data0, data1);
2006 }
2007
2008}
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030static void tlan_print_list(struct tlan_list *list, char *type, int num)
2031{
2032 int i;
2033
2034 pr_info("%s List %d at %p\n", type, num, list);
2035 pr_info(" Forward = 0x%08x\n", list->forward);
2036 pr_info(" CSTAT = 0x%04hx\n", list->c_stat);
2037 pr_info(" Frame Size = 0x%04hx\n", list->frame_size);
2038
2039 for (i = 0; i < 2; i++) {
2040 pr_info(" Buffer[%d].count, addr = 0x%08x, 0x%08x\n",
2041 i, list->buffer[i].count, list->buffer[i].address);
2042 }
2043
2044}
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067static void tlan_read_and_clear_stats(struct net_device *dev, int record)
2068{
2069 u32 tx_good, tx_under;
2070 u32 rx_good, rx_over;
2071 u32 def_tx, crc, code;
2072 u32 multi_col, single_col;
2073 u32 excess_col, late_col, loss;
2074
2075 outw(TLAN_GOOD_TX_FRMS, dev->base_addr + TLAN_DIO_ADR);
2076 tx_good = inb(dev->base_addr + TLAN_DIO_DATA);
2077 tx_good += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
2078 tx_good += inb(dev->base_addr + TLAN_DIO_DATA + 2) << 16;
2079 tx_under = inb(dev->base_addr + TLAN_DIO_DATA + 3);
2080
2081 outw(TLAN_GOOD_RX_FRMS, dev->base_addr + TLAN_DIO_ADR);
2082 rx_good = inb(dev->base_addr + TLAN_DIO_DATA);
2083 rx_good += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
2084 rx_good += inb(dev->base_addr + TLAN_DIO_DATA + 2) << 16;
2085 rx_over = inb(dev->base_addr + TLAN_DIO_DATA + 3);
2086
2087 outw(TLAN_DEFERRED_TX, dev->base_addr + TLAN_DIO_ADR);
2088 def_tx = inb(dev->base_addr + TLAN_DIO_DATA);
2089 def_tx += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
2090 crc = inb(dev->base_addr + TLAN_DIO_DATA + 2);
2091 code = inb(dev->base_addr + TLAN_DIO_DATA + 3);
2092
2093 outw(TLAN_MULTICOL_FRMS, dev->base_addr + TLAN_DIO_ADR);
2094 multi_col = inb(dev->base_addr + TLAN_DIO_DATA);
2095 multi_col += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
2096 single_col = inb(dev->base_addr + TLAN_DIO_DATA + 2);
2097 single_col += inb(dev->base_addr + TLAN_DIO_DATA + 3) << 8;
2098
2099 outw(TLAN_EXCESSCOL_FRMS, dev->base_addr + TLAN_DIO_ADR);
2100 excess_col = inb(dev->base_addr + TLAN_DIO_DATA);
2101 late_col = inb(dev->base_addr + TLAN_DIO_DATA + 1);
2102 loss = inb(dev->base_addr + TLAN_DIO_DATA + 2);
2103
2104 if (record) {
2105 dev->stats.rx_packets += rx_good;
2106 dev->stats.rx_errors += rx_over + crc + code;
2107 dev->stats.tx_packets += tx_good;
2108 dev->stats.tx_errors += tx_under + loss;
2109 dev->stats.collisions += multi_col
2110 + single_col + excess_col + late_col;
2111
2112 dev->stats.rx_over_errors += rx_over;
2113 dev->stats.rx_crc_errors += crc;
2114 dev->stats.rx_frame_errors += code;
2115
2116 dev->stats.tx_aborted_errors += tx_under;
2117 dev->stats.tx_carrier_errors += loss;
2118 }
2119
2120}
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142static void
2143tlan_reset_adapter(struct net_device *dev)
2144{
2145 struct tlan_priv *priv = netdev_priv(dev);
2146 int i;
2147 u32 addr;
2148 u32 data;
2149 u8 data8;
2150
2151 priv->tlan_full_duplex = false;
2152 priv->phy_online = 0;
2153 netif_carrier_off(dev);
2154
2155
2156
2157 data = inl(dev->base_addr + TLAN_HOST_CMD);
2158 data |= TLAN_HC_AD_RST;
2159 outl(data, dev->base_addr + TLAN_HOST_CMD);
2160
2161 udelay(1000);
2162
2163
2164
2165 data = inl(dev->base_addr + TLAN_HOST_CMD);
2166 data |= TLAN_HC_INT_OFF;
2167 outl(data, dev->base_addr + TLAN_HOST_CMD);
2168
2169
2170
2171 for (i = TLAN_AREG_0; i <= TLAN_HASH_2; i += 4)
2172 tlan_dio_write32(dev->base_addr, (u16) i, 0);
2173
2174
2175
2176 data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | TLAN_NET_CFG_PHY_EN;
2177 tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, (u16) data);
2178
2179
2180
2181 outl(TLAN_HC_LD_TMR | 0x3f, dev->base_addr + TLAN_HOST_CMD);
2182 outl(TLAN_HC_LD_THR | 0x9, dev->base_addr + TLAN_HOST_CMD);
2183
2184
2185
2186 outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR);
2187 addr = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
2188 tlan_set_bit(TLAN_NET_SIO_NMRST, addr);
2189
2190
2191
2192 if (priv->tlan_rev >= 0x30) {
2193 data8 = TLAN_ID_TX_EOC | TLAN_ID_RX_EOC;
2194 tlan_dio_write8(dev->base_addr, TLAN_INT_DIS, data8);
2195 }
2196 tlan_phy_detect(dev);
2197 data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN;
2198
2199 if (priv->adapter->flags & TLAN_ADAPTER_BIT_RATE_PHY) {
2200 data |= TLAN_NET_CFG_BIT;
2201 if (priv->aui == 1) {
2202 tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x0a);
2203 } else if (priv->duplex == TLAN_DUPLEX_FULL) {
2204 tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x00);
2205 priv->tlan_full_duplex = true;
2206 } else {
2207 tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x08);
2208 }
2209 }
2210
2211 if (priv->phy_num == 0)
2212 data |= TLAN_NET_CFG_PHY_EN;
2213 tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, (u16) data);
2214
2215 if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY)
2216 tlan_finish_reset(dev);
2217 else
2218 tlan_phy_power_down(dev);
2219
2220}
2221
2222
2223
2224
2225static void
2226tlan_finish_reset(struct net_device *dev)
2227{
2228 struct tlan_priv *priv = netdev_priv(dev);
2229 u8 data;
2230 u32 phy;
2231 u8 sio;
2232 u16 status;
2233 u16 partner;
2234 u16 tlphy_ctl;
2235 u16 tlphy_par;
2236 u16 tlphy_id1, tlphy_id2;
2237 int i;
2238
2239 phy = priv->phy[priv->phy_num];
2240
2241 data = TLAN_NET_CMD_NRESET | TLAN_NET_CMD_NWRAP;
2242 if (priv->tlan_full_duplex)
2243 data |= TLAN_NET_CMD_DUPLEX;
2244 tlan_dio_write8(dev->base_addr, TLAN_NET_CMD, data);
2245 data = TLAN_NET_MASK_MASK4 | TLAN_NET_MASK_MASK5;
2246 if (priv->phy_num == 0)
2247 data |= TLAN_NET_MASK_MASK7;
2248 tlan_dio_write8(dev->base_addr, TLAN_NET_MASK, data);
2249 tlan_dio_write16(dev->base_addr, TLAN_MAX_RX, ((1536)+7)&~7);
2250 tlan_mii_read_reg(dev, phy, MII_GEN_ID_HI, &tlphy_id1);
2251 tlan_mii_read_reg(dev, phy, MII_GEN_ID_LO, &tlphy_id2);
2252
2253 if ((priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) ||
2254 (priv->aui)) {
2255 status = MII_GS_LINK;
2256 netdev_info(dev, "Link forced\n");
2257 } else {
2258 tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
2259 udelay(1000);
2260 tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
2261 if ((status & MII_GS_LINK) &&
2262
2263 (tlphy_id1 == NAT_SEM_ID1) &&
2264 (tlphy_id2 == NAT_SEM_ID2)) {
2265 tlan_mii_read_reg(dev, phy, MII_AN_LPA, &partner);
2266 tlan_mii_read_reg(dev, phy, TLAN_TLPHY_PAR, &tlphy_par);
2267
2268 netdev_info(dev,
2269 "Link active with %s %uMbps %s-Duplex\n",
2270 !(tlphy_par & TLAN_PHY_AN_EN_STAT)
2271 ? "forced" : "Autonegotiation enabled,",
2272 tlphy_par & TLAN_PHY_SPEED_100
2273 ? 100 : 10,
2274 tlphy_par & TLAN_PHY_DUPLEX_FULL
2275 ? "Full" : "Half");
2276
2277 if (tlphy_par & TLAN_PHY_AN_EN_STAT) {
2278 netdev_info(dev, "Partner capability:");
2279 for (i = 5; i < 10; i++)
2280 if (partner & (1 << i))
2281 pr_cont(" %s", media[i-5]);
2282 pr_cont("\n");
2283 }
2284
2285 tlan_dio_write8(dev->base_addr, TLAN_LED_REG,
2286 TLAN_LED_LINK);
2287#ifdef MONITOR
2288
2289 priv->link = 1;
2290
2291 tlan_set_timer(dev, (10*HZ), TLAN_TIMER_LINK_BEAT);
2292#endif
2293 } else if (status & MII_GS_LINK) {
2294 netdev_info(dev, "Link active\n");
2295 tlan_dio_write8(dev->base_addr, TLAN_LED_REG,
2296 TLAN_LED_LINK);
2297 }
2298 }
2299
2300 if (priv->phy_num == 0) {
2301 tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl);
2302 tlphy_ctl |= TLAN_TC_INTEN;
2303 tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL, tlphy_ctl);
2304 sio = tlan_dio_read8(dev->base_addr, TLAN_NET_SIO);
2305 sio |= TLAN_NET_SIO_MINTEN;
2306 tlan_dio_write8(dev->base_addr, TLAN_NET_SIO, sio);
2307 }
2308
2309 if (status & MII_GS_LINK) {
2310 tlan_set_mac(dev, 0, dev->dev_addr);
2311 priv->phy_online = 1;
2312 outb((TLAN_HC_INT_ON >> 8), dev->base_addr + TLAN_HOST_CMD + 1);
2313 if (debug >= 1 && debug != TLAN_DEBUG_PROBE)
2314 outb((TLAN_HC_REQ_INT >> 8),
2315 dev->base_addr + TLAN_HOST_CMD + 1);
2316 outl(priv->rx_list_dma, dev->base_addr + TLAN_CH_PARM);
2317 outl(TLAN_HC_GO | TLAN_HC_RT, dev->base_addr + TLAN_HOST_CMD);
2318 netif_carrier_on(dev);
2319 } else {
2320 netdev_info(dev, "Link inactive, will retry in 10 secs...\n");
2321 tlan_set_timer(dev, (10*HZ), TLAN_TIMER_FINISH_RESET);
2322 return;
2323 }
2324 tlan_set_multicast_list(dev);
2325
2326}
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352static void tlan_set_mac(struct net_device *dev, int areg, char *mac)
2353{
2354 int i;
2355
2356 areg *= 6;
2357
2358 if (mac != NULL) {
2359 for (i = 0; i < 6; i++)
2360 tlan_dio_write8(dev->base_addr,
2361 TLAN_AREG_0 + areg + i, mac[i]);
2362 } else {
2363 for (i = 0; i < 6; i++)
2364 tlan_dio_write8(dev->base_addr,
2365 TLAN_AREG_0 + areg + i, 0);
2366 }
2367
2368}
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396static void tlan_phy_print(struct net_device *dev)
2397{
2398 struct tlan_priv *priv = netdev_priv(dev);
2399 u16 i, data0, data1, data2, data3, phy;
2400
2401 phy = priv->phy[priv->phy_num];
2402
2403 if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) {
2404 netdev_info(dev, "Unmanaged PHY\n");
2405 } else if (phy <= TLAN_PHY_MAX_ADDR) {
2406 netdev_info(dev, "PHY 0x%02x\n", phy);
2407 pr_info(" Off. +0 +1 +2 +3\n");
2408 for (i = 0; i < 0x20; i += 4) {
2409 tlan_mii_read_reg(dev, phy, i, &data0);
2410 tlan_mii_read_reg(dev, phy, i + 1, &data1);
2411 tlan_mii_read_reg(dev, phy, i + 2, &data2);
2412 tlan_mii_read_reg(dev, phy, i + 3, &data3);
2413 pr_info(" 0x%02x 0x%04hx 0x%04hx 0x%04hx 0x%04hx\n",
2414 i, data0, data1, data2, data3);
2415 }
2416 } else {
2417 netdev_info(dev, "Invalid PHY\n");
2418 }
2419
2420}
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442static void tlan_phy_detect(struct net_device *dev)
2443{
2444 struct tlan_priv *priv = netdev_priv(dev);
2445 u16 control;
2446 u16 hi;
2447 u16 lo;
2448 u32 phy;
2449
2450 if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) {
2451 priv->phy_num = 0xffff;
2452 return;
2453 }
2454
2455 tlan_mii_read_reg(dev, TLAN_PHY_MAX_ADDR, MII_GEN_ID_HI, &hi);
2456
2457 if (hi != 0xffff)
2458 priv->phy[0] = TLAN_PHY_MAX_ADDR;
2459 else
2460 priv->phy[0] = TLAN_PHY_NONE;
2461
2462 priv->phy[1] = TLAN_PHY_NONE;
2463 for (phy = 0; phy <= TLAN_PHY_MAX_ADDR; phy++) {
2464 tlan_mii_read_reg(dev, phy, MII_GEN_CTL, &control);
2465 tlan_mii_read_reg(dev, phy, MII_GEN_ID_HI, &hi);
2466 tlan_mii_read_reg(dev, phy, MII_GEN_ID_LO, &lo);
2467 if ((control != 0xffff) ||
2468 (hi != 0xffff) || (lo != 0xffff)) {
2469 TLAN_DBG(TLAN_DEBUG_GNRL,
2470 "PHY found at %02x %04x %04x %04x\n",
2471 phy, control, hi, lo);
2472 if ((priv->phy[1] == TLAN_PHY_NONE) &&
2473 (phy != TLAN_PHY_MAX_ADDR)) {
2474 priv->phy[1] = phy;
2475 }
2476 }
2477 }
2478
2479 if (priv->phy[1] != TLAN_PHY_NONE)
2480 priv->phy_num = 1;
2481 else if (priv->phy[0] != TLAN_PHY_NONE)
2482 priv->phy_num = 0;
2483 else
2484 netdev_info(dev, "Cannot initialize device, no PHY was found!\n");
2485
2486}
2487
2488
2489
2490
2491static void tlan_phy_power_down(struct net_device *dev)
2492{
2493 struct tlan_priv *priv = netdev_priv(dev);
2494 u16 value;
2495
2496 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Powering down PHY(s).\n", dev->name);
2497 value = MII_GC_PDOWN | MII_GC_LOOPBK | MII_GC_ISOLATE;
2498 tlan_mii_sync(dev->base_addr);
2499 tlan_mii_write_reg(dev, priv->phy[priv->phy_num], MII_GEN_CTL, value);
2500 if ((priv->phy_num == 0) &&
2501 (priv->phy[1] != TLAN_PHY_NONE) &&
2502 (!(priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10))) {
2503 tlan_mii_sync(dev->base_addr);
2504 tlan_mii_write_reg(dev, priv->phy[1], MII_GEN_CTL, value);
2505 }
2506
2507
2508
2509
2510
2511 tlan_set_timer(dev, (HZ/20), TLAN_TIMER_PHY_PUP);
2512
2513}
2514
2515
2516
2517
2518static void tlan_phy_power_up(struct net_device *dev)
2519{
2520 struct tlan_priv *priv = netdev_priv(dev);
2521 u16 value;
2522
2523 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Powering up PHY.\n", dev->name);
2524 tlan_mii_sync(dev->base_addr);
2525 value = MII_GC_LOOPBK;
2526 tlan_mii_write_reg(dev, priv->phy[priv->phy_num], MII_GEN_CTL, value);
2527 tlan_mii_sync(dev->base_addr);
2528
2529
2530
2531
2532 tlan_set_timer(dev, (HZ/20), TLAN_TIMER_PHY_RESET);
2533
2534}
2535
2536
2537
2538
2539static void tlan_phy_reset(struct net_device *dev)
2540{
2541 struct tlan_priv *priv = netdev_priv(dev);
2542 u16 phy;
2543 u16 value;
2544
2545 phy = priv->phy[priv->phy_num];
2546
2547 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Resetting PHY.\n", dev->name);
2548 tlan_mii_sync(dev->base_addr);
2549 value = MII_GC_LOOPBK | MII_GC_RESET;
2550 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, value);
2551 tlan_mii_read_reg(dev, phy, MII_GEN_CTL, &value);
2552 while (value & MII_GC_RESET)
2553 tlan_mii_read_reg(dev, phy, MII_GEN_CTL, &value);
2554
2555
2556
2557
2558
2559 tlan_set_timer(dev, (HZ/20), TLAN_TIMER_PHY_START_LINK);
2560
2561}
2562
2563
2564
2565
2566static void tlan_phy_start_link(struct net_device *dev)
2567{
2568 struct tlan_priv *priv = netdev_priv(dev);
2569 u16 ability;
2570 u16 control;
2571 u16 data;
2572 u16 phy;
2573 u16 status;
2574 u16 tctl;
2575
2576 phy = priv->phy[priv->phy_num];
2577 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Trying to activate link.\n", dev->name);
2578 tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
2579 tlan_mii_read_reg(dev, phy, MII_GEN_STS, &ability);
2580
2581 if ((status & MII_GS_AUTONEG) &&
2582 (!priv->aui)) {
2583 ability = status >> 11;
2584 if (priv->speed == TLAN_SPEED_10 &&
2585 priv->duplex == TLAN_DUPLEX_HALF) {
2586 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x0000);
2587 } else if (priv->speed == TLAN_SPEED_10 &&
2588 priv->duplex == TLAN_DUPLEX_FULL) {
2589 priv->tlan_full_duplex = true;
2590 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x0100);
2591 } else if (priv->speed == TLAN_SPEED_100 &&
2592 priv->duplex == TLAN_DUPLEX_HALF) {
2593 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x2000);
2594 } else if (priv->speed == TLAN_SPEED_100 &&
2595 priv->duplex == TLAN_DUPLEX_FULL) {
2596 priv->tlan_full_duplex = true;
2597 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x2100);
2598 } else {
2599
2600
2601 tlan_mii_write_reg(dev, phy, MII_AN_ADV,
2602 (ability << 5) | 1);
2603
2604 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x1000);
2605
2606 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x1200);
2607
2608
2609
2610
2611
2612 netdev_info(dev, "Starting autonegotiation\n");
2613 tlan_set_timer(dev, (2*HZ), TLAN_TIMER_PHY_FINISH_AN);
2614 return;
2615 }
2616
2617 }
2618
2619 if ((priv->aui) && (priv->phy_num != 0)) {
2620 priv->phy_num = 0;
2621 data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN
2622 | TLAN_NET_CFG_PHY_EN;
2623 tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, data);
2624 tlan_set_timer(dev, (40*HZ/1000), TLAN_TIMER_PHY_PDOWN);
2625 return;
2626 } else if (priv->phy_num == 0) {
2627 control = 0;
2628 tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tctl);
2629 if (priv->aui) {
2630 tctl |= TLAN_TC_AUISEL;
2631 } else {
2632 tctl &= ~TLAN_TC_AUISEL;
2633 if (priv->duplex == TLAN_DUPLEX_FULL) {
2634 control |= MII_GC_DUPLEX;
2635 priv->tlan_full_duplex = true;
2636 }
2637 if (priv->speed == TLAN_SPEED_100)
2638 control |= MII_GC_SPEEDSEL;
2639 }
2640 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, control);
2641 tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL, tctl);
2642 }
2643
2644
2645
2646
2647 tlan_set_timer(dev, (4*HZ), TLAN_TIMER_FINISH_RESET);
2648
2649}
2650
2651
2652
2653
2654static void tlan_phy_finish_auto_neg(struct net_device *dev)
2655{
2656 struct tlan_priv *priv = netdev_priv(dev);
2657 u16 an_adv;
2658 u16 an_lpa;
2659 u16 data;
2660 u16 mode;
2661 u16 phy;
2662 u16 status;
2663
2664 phy = priv->phy[priv->phy_num];
2665
2666 tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
2667 udelay(1000);
2668 tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
2669
2670 if (!(status & MII_GS_AUTOCMPLT)) {
2671
2672
2673
2674 if (!priv->neg_be_verbose++) {
2675 pr_info("Giving autonegotiation more time.\n");
2676 pr_info("Please check that your adapter has\n");
2677 pr_info("been properly connected to a HUB or Switch.\n");
2678 pr_info("Trying to establish link in the background...\n");
2679 }
2680 tlan_set_timer(dev, (8*HZ), TLAN_TIMER_PHY_FINISH_AN);
2681 return;
2682 }
2683
2684 netdev_info(dev, "Autonegotiation complete\n");
2685 tlan_mii_read_reg(dev, phy, MII_AN_ADV, &an_adv);
2686 tlan_mii_read_reg(dev, phy, MII_AN_LPA, &an_lpa);
2687 mode = an_adv & an_lpa & 0x03E0;
2688 if (mode & 0x0100)
2689 priv->tlan_full_duplex = true;
2690 else if (!(mode & 0x0080) && (mode & 0x0040))
2691 priv->tlan_full_duplex = true;
2692
2693 if ((!(mode & 0x0180)) &&
2694 (priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10) &&
2695 (priv->phy_num != 0)) {
2696 priv->phy_num = 0;
2697 data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN
2698 | TLAN_NET_CFG_PHY_EN;
2699 tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, data);
2700 tlan_set_timer(dev, (400*HZ/1000), TLAN_TIMER_PHY_PDOWN);
2701 return;
2702 }
2703
2704 if (priv->phy_num == 0) {
2705 if ((priv->duplex == TLAN_DUPLEX_FULL) ||
2706 (an_adv & an_lpa & 0x0040)) {
2707 tlan_mii_write_reg(dev, phy, MII_GEN_CTL,
2708 MII_GC_AUTOENB | MII_GC_DUPLEX);
2709 netdev_info(dev, "Starting internal PHY with FULL-DUPLEX\n");
2710 } else {
2711 tlan_mii_write_reg(dev, phy, MII_GEN_CTL,
2712 MII_GC_AUTOENB);
2713 netdev_info(dev, "Starting internal PHY with HALF-DUPLEX\n");
2714 }
2715 }
2716
2717
2718
2719 tlan_set_timer(dev, (HZ/10), TLAN_TIMER_FINISH_RESET);
2720
2721}
2722
2723#ifdef MONITOR
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743void tlan_phy_monitor(struct net_device *dev)
2744{
2745 struct tlan_priv *priv = netdev_priv(dev);
2746 u16 phy;
2747 u16 phy_status;
2748
2749 phy = priv->phy[priv->phy_num];
2750
2751
2752 tlan_mii_read_reg(dev, phy, MII_GEN_STS, &phy_status);
2753
2754
2755 if (!(phy_status & MII_GS_LINK)) {
2756 if (priv->link) {
2757 priv->link = 0;
2758 printk(KERN_DEBUG "TLAN: %s has lost link\n",
2759 dev->name);
2760 netif_carrier_off(dev);
2761 tlan_set_timer(dev, (2*HZ), TLAN_TIMER_LINK_BEAT);
2762 return;
2763 }
2764 }
2765
2766
2767 if ((phy_status & MII_GS_LINK) && !priv->link) {
2768 priv->link = 1;
2769 printk(KERN_DEBUG "TLAN: %s has reestablished link\n",
2770 dev->name);
2771 netif_carrier_on(dev);
2772 }
2773
2774
2775 tlan_set_timer(dev, (2*HZ), TLAN_TIMER_LINK_BEAT);
2776}
2777
2778#endif
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817static bool
2818tlan_mii_read_reg(struct net_device *dev, u16 phy, u16 reg, u16 *val)
2819{
2820 u8 nack;
2821 u16 sio, tmp;
2822 u32 i;
2823 bool err;
2824 int minten;
2825 struct tlan_priv *priv = netdev_priv(dev);
2826 unsigned long flags = 0;
2827
2828 err = false;
2829 outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR);
2830 sio = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
2831
2832 if (!in_irq())
2833 spin_lock_irqsave(&priv->lock, flags);
2834
2835 tlan_mii_sync(dev->base_addr);
2836
2837 minten = tlan_get_bit(TLAN_NET_SIO_MINTEN, sio);
2838 if (minten)
2839 tlan_clear_bit(TLAN_NET_SIO_MINTEN, sio);
2840
2841 tlan_mii_send_data(dev->base_addr, 0x1, 2);
2842 tlan_mii_send_data(dev->base_addr, 0x2, 2);
2843 tlan_mii_send_data(dev->base_addr, phy, 5);
2844 tlan_mii_send_data(dev->base_addr, reg, 5);
2845
2846
2847 tlan_clear_bit(TLAN_NET_SIO_MTXEN, sio);
2848
2849 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
2850 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
2851 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
2852
2853 nack = tlan_get_bit(TLAN_NET_SIO_MDATA, sio);
2854 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
2855 if (nack) {
2856 for (i = 0; i < 16; i++) {
2857 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
2858 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
2859 }
2860 tmp = 0xffff;
2861 err = true;
2862 } else {
2863 for (tmp = 0, i = 0x8000; i; i >>= 1) {
2864 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
2865 if (tlan_get_bit(TLAN_NET_SIO_MDATA, sio))
2866 tmp |= i;
2867 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
2868 }
2869 }
2870
2871
2872 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
2873 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
2874
2875 if (minten)
2876 tlan_set_bit(TLAN_NET_SIO_MINTEN, sio);
2877
2878 *val = tmp;
2879
2880 if (!in_irq())
2881 spin_unlock_irqrestore(&priv->lock, flags);
2882
2883 return err;
2884
2885}
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908static void tlan_mii_send_data(u16 base_port, u32 data, unsigned num_bits)
2909{
2910 u16 sio;
2911 u32 i;
2912
2913 if (num_bits == 0)
2914 return;
2915
2916 outw(TLAN_NET_SIO, base_port + TLAN_DIO_ADR);
2917 sio = base_port + TLAN_DIO_DATA + TLAN_NET_SIO;
2918 tlan_set_bit(TLAN_NET_SIO_MTXEN, sio);
2919
2920 for (i = (0x1 << (num_bits - 1)); i; i >>= 1) {
2921 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
2922 (void) tlan_get_bit(TLAN_NET_SIO_MCLK, sio);
2923 if (data & i)
2924 tlan_set_bit(TLAN_NET_SIO_MDATA, sio);
2925 else
2926 tlan_clear_bit(TLAN_NET_SIO_MDATA, sio);
2927 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
2928 (void) tlan_get_bit(TLAN_NET_SIO_MCLK, sio);
2929 }
2930
2931}
2932
2933
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950static void tlan_mii_sync(u16 base_port)
2951{
2952 int i;
2953 u16 sio;
2954
2955 outw(TLAN_NET_SIO, base_port + TLAN_DIO_ADR);
2956 sio = base_port + TLAN_DIO_DATA + TLAN_NET_SIO;
2957
2958 tlan_clear_bit(TLAN_NET_SIO_MTXEN, sio);
2959 for (i = 0; i < 32; i++) {
2960 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
2961 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
2962 }
2963
2964}
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989static void
2990tlan_mii_write_reg(struct net_device *dev, u16 phy, u16 reg, u16 val)
2991{
2992 u16 sio;
2993 int minten;
2994 unsigned long flags = 0;
2995 struct tlan_priv *priv = netdev_priv(dev);
2996
2997 outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR);
2998 sio = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
2999
3000 if (!in_irq())
3001 spin_lock_irqsave(&priv->lock, flags);
3002
3003 tlan_mii_sync(dev->base_addr);
3004
3005 minten = tlan_get_bit(TLAN_NET_SIO_MINTEN, sio);
3006 if (minten)
3007 tlan_clear_bit(TLAN_NET_SIO_MINTEN, sio);
3008
3009 tlan_mii_send_data(dev->base_addr, 0x1, 2);
3010 tlan_mii_send_data(dev->base_addr, 0x1, 2);
3011 tlan_mii_send_data(dev->base_addr, phy, 5);
3012 tlan_mii_send_data(dev->base_addr, reg, 5);
3013
3014 tlan_mii_send_data(dev->base_addr, 0x2, 2);
3015 tlan_mii_send_data(dev->base_addr, val, 16);
3016
3017 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
3018 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
3019
3020 if (minten)
3021 tlan_set_bit(TLAN_NET_SIO_MINTEN, sio);
3022
3023 if (!in_irq())
3024 spin_unlock_irqrestore(&priv->lock, flags);
3025
3026}
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041
3042
3043
3044
3045
3046
3047
3048
3049
3050
3051
3052
3053
3054
3055
3056
3057
3058
3059
3060static void tlan_ee_send_start(u16 io_base)
3061{
3062 u16 sio;
3063
3064 outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR);
3065 sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
3066
3067 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3068 tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
3069 tlan_set_bit(TLAN_NET_SIO_ETXEN, sio);
3070 tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
3071 tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
3072
3073}
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090
3091
3092
3093
3094
3095
3096
3097
3098
3099
3100static int tlan_ee_send_byte(u16 io_base, u8 data, int stop)
3101{
3102 int err;
3103 u8 place;
3104 u16 sio;
3105
3106 outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR);
3107 sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
3108
3109
3110 for (place = 0x80; place != 0; place >>= 1) {
3111 if (place & data)
3112 tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
3113 else
3114 tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
3115 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3116 tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
3117 }
3118 tlan_clear_bit(TLAN_NET_SIO_ETXEN, sio);
3119 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3120 err = tlan_get_bit(TLAN_NET_SIO_EDATA, sio);
3121 tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
3122 tlan_set_bit(TLAN_NET_SIO_ETXEN, sio);
3123
3124 if ((!err) && stop) {
3125
3126 tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
3127 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3128 tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
3129 }
3130
3131 return err;
3132
3133}
3134
3135
3136
3137
3138
3139
3140
3141
3142
3143
3144
3145
3146
3147
3148
3149
3150
3151
3152
3153
3154
3155
3156
3157
3158
3159
3160
3161
3162static void tlan_ee_receive_byte(u16 io_base, u8 *data, int stop)
3163{
3164 u8 place;
3165 u16 sio;
3166
3167 outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR);
3168 sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
3169 *data = 0;
3170
3171
3172 tlan_clear_bit(TLAN_NET_SIO_ETXEN, sio);
3173 for (place = 0x80; place; place >>= 1) {
3174 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3175 if (tlan_get_bit(TLAN_NET_SIO_EDATA, sio))
3176 *data |= place;
3177 tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
3178 }
3179
3180 tlan_set_bit(TLAN_NET_SIO_ETXEN, sio);
3181 if (!stop) {
3182 tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
3183 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3184 tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
3185 } else {
3186 tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
3187 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3188 tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
3189
3190 tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
3191 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3192 tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
3193 }
3194
3195}
3196
3197
3198
3199
3200
3201
3202
3203
3204
3205
3206
3207
3208
3209
3210
3211
3212
3213
3214
3215
3216
3217
3218
3219
3220
3221static int tlan_ee_read_byte(struct net_device *dev, u8 ee_addr, u8 *data)
3222{
3223 int err;
3224 struct tlan_priv *priv = netdev_priv(dev);
3225 unsigned long flags = 0;
3226 int ret = 0;
3227
3228 spin_lock_irqsave(&priv->lock, flags);
3229
3230 tlan_ee_send_start(dev->base_addr);
3231 err = tlan_ee_send_byte(dev->base_addr, 0xa0, TLAN_EEPROM_ACK);
3232 if (err) {
3233 ret = 1;
3234 goto fail;
3235 }
3236 err = tlan_ee_send_byte(dev->base_addr, ee_addr, TLAN_EEPROM_ACK);
3237 if (err) {
3238 ret = 2;
3239 goto fail;
3240 }
3241 tlan_ee_send_start(dev->base_addr);
3242 err = tlan_ee_send_byte(dev->base_addr, 0xa1, TLAN_EEPROM_ACK);
3243 if (err) {
3244 ret = 3;
3245 goto fail;
3246 }
3247 tlan_ee_receive_byte(dev->base_addr, data, TLAN_EEPROM_STOP);
3248fail:
3249 spin_unlock_irqrestore(&priv->lock, flags);
3250
3251 return ret;
3252
3253}
3254
3255
3256
3257