1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31
32#include <linux/hardirq.h>
33#include <linux/module.h>
34#include <linux/init.h>
35#include <linux/interrupt.h>
36#include <linux/ioport.h>
37#include <linux/eisa.h>
38#include <linux/pci.h>
39#include <linux/dma-mapping.h>
40#include <linux/netdevice.h>
41#include <linux/etherdevice.h>
42#include <linux/delay.h>
43#include <linux/spinlock.h>
44#include <linux/workqueue.h>
45#include <linux/mii.h>
46
47#include "tlan.h"
48
49
50
51static struct net_device *tlan_eisa_devices;
52
53static int tlan_devices_installed;
54
55
56static int aui[MAX_TLAN_BOARDS];
57static int duplex[MAX_TLAN_BOARDS];
58static int speed[MAX_TLAN_BOARDS];
59static int boards_found;
60module_param_array(aui, int, NULL, 0);
61module_param_array(duplex, int, NULL, 0);
62module_param_array(speed, int, NULL, 0);
63MODULE_PARM_DESC(aui, "ThunderLAN use AUI port(s) (0-1)");
64MODULE_PARM_DESC(duplex,
65 "ThunderLAN duplex setting(s) (0-default, 1-half, 2-full)");
66MODULE_PARM_DESC(speed, "ThunderLAN port speed setting(s) (0,10,100)");
67
68MODULE_AUTHOR("Maintainer: Samuel Chessman <chessman@tux.org>");
69MODULE_DESCRIPTION("Driver for TI ThunderLAN based ethernet PCI adapters");
70MODULE_LICENSE("GPL");
71
72
73
74
75static int debug;
76module_param(debug, int, 0);
77MODULE_PARM_DESC(debug, "ThunderLAN debug mask");
78
79static const char tlan_signature[] = "TLAN";
80static const char tlan_banner[] = "ThunderLAN driver v1.17\n";
81static int tlan_have_pci;
82static int tlan_have_eisa;
83
84static const char * const media[] = {
85 "10BaseT-HD", "10BaseT-FD", "100baseTx-HD",
86 "100BaseTx-FD", "100BaseT4", NULL
87};
88
89static struct board {
90 const char *device_label;
91 u32 flags;
92 u16 addr_ofs;
93} board_info[] = {
94 { "Compaq Netelligent 10 T PCI UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
95 { "Compaq Netelligent 10/100 TX PCI UTP",
96 TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
97 { "Compaq Integrated NetFlex-3/P", TLAN_ADAPTER_NONE, 0x83 },
98 { "Compaq NetFlex-3/P",
99 TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 },
100 { "Compaq NetFlex-3/P", TLAN_ADAPTER_NONE, 0x83 },
101 { "Compaq Netelligent Integrated 10/100 TX UTP",
102 TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
103 { "Compaq Netelligent Dual 10/100 TX PCI UTP",
104 TLAN_ADAPTER_NONE, 0x83 },
105 { "Compaq Netelligent 10/100 TX Embedded UTP",
106 TLAN_ADAPTER_NONE, 0x83 },
107 { "Olicom OC-2183/2185", TLAN_ADAPTER_USE_INTERN_10, 0x83 },
108 { "Olicom OC-2325", TLAN_ADAPTER_ACTIVITY_LED |
109 TLAN_ADAPTER_UNMANAGED_PHY, 0xf8 },
110 { "Olicom OC-2326", TLAN_ADAPTER_ACTIVITY_LED |
111 TLAN_ADAPTER_USE_INTERN_10, 0xf8 },
112 { "Compaq Netelligent 10/100 TX UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
113 { "Compaq Netelligent 10 T/2 PCI UTP/coax", TLAN_ADAPTER_NONE, 0x83 },
114 { "Compaq NetFlex-3/E",
115 TLAN_ADAPTER_ACTIVITY_LED |
116 TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 },
117 { "Compaq NetFlex-3/E",
118 TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
119};
120
121static const struct pci_device_id tlan_pci_tbl[] = {
122 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL10,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
124 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100,
125 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
126 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETFLEX3I,
127 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
128 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_THUNDER,
129 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 },
130 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETFLEX3B,
131 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
132 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100PI,
133 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
134 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100D,
135 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
136 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100I,
137 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 7 },
138 { PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2183,
139 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
140 { PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2325,
141 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 9 },
142 { PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2326,
143 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 10 },
144 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_NETELLIGENT_10_100_WS_5100,
145 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 11 },
146 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_NETELLIGENT_10_T2,
147 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 12 },
148 { 0,}
149};
150MODULE_DEVICE_TABLE(pci, tlan_pci_tbl);
151
152static void tlan_eisa_probe(void);
153static void tlan_eisa_cleanup(void);
154static int tlan_init(struct net_device *);
155static int tlan_open(struct net_device *dev);
156static netdev_tx_t tlan_start_tx(struct sk_buff *, struct net_device *);
157static irqreturn_t tlan_handle_interrupt(int, void *);
158static int tlan_close(struct net_device *);
159static struct net_device_stats *tlan_get_stats(struct net_device *);
160static void tlan_set_multicast_list(struct net_device *);
161static int tlan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
162static int tlan_probe1(struct pci_dev *pdev, long ioaddr,
163 int irq, int rev, const struct pci_device_id *ent);
164static void tlan_tx_timeout(struct net_device *dev);
165static void tlan_tx_timeout_work(struct work_struct *work);
166static int tlan_init_one(struct pci_dev *pdev,
167 const struct pci_device_id *ent);
168
169static u32 tlan_handle_tx_eof(struct net_device *, u16);
170static u32 tlan_handle_stat_overflow(struct net_device *, u16);
171static u32 tlan_handle_rx_eof(struct net_device *, u16);
172static u32 tlan_handle_dummy(struct net_device *, u16);
173static u32 tlan_handle_tx_eoc(struct net_device *, u16);
174static u32 tlan_handle_status_check(struct net_device *, u16);
175static u32 tlan_handle_rx_eoc(struct net_device *, u16);
176
177static void tlan_timer(struct timer_list *t);
178static void tlan_phy_monitor(struct timer_list *t);
179
180static void tlan_reset_lists(struct net_device *);
181static void tlan_free_lists(struct net_device *);
182static void tlan_print_dio(u16);
183static void tlan_print_list(struct tlan_list *, char *, int);
184static void tlan_read_and_clear_stats(struct net_device *, int);
185static void tlan_reset_adapter(struct net_device *);
186static void tlan_finish_reset(struct net_device *);
187static void tlan_set_mac(struct net_device *, int areg, char *mac);
188
189static void tlan_phy_print(struct net_device *);
190static void tlan_phy_detect(struct net_device *);
191static void tlan_phy_power_down(struct net_device *);
192static void tlan_phy_power_up(struct net_device *);
193static void tlan_phy_reset(struct net_device *);
194static void tlan_phy_start_link(struct net_device *);
195static void tlan_phy_finish_auto_neg(struct net_device *);
196
197
198
199
200
201
202
203
204static bool tlan_mii_read_reg(struct net_device *, u16, u16, u16 *);
205static void tlan_mii_send_data(u16, u32, unsigned);
206static void tlan_mii_sync(u16);
207static void tlan_mii_write_reg(struct net_device *, u16, u16, u16);
208
209static void tlan_ee_send_start(u16);
210static int tlan_ee_send_byte(u16, u8, int);
211static void tlan_ee_receive_byte(u16, u8 *, int);
212static int tlan_ee_read_byte(struct net_device *, u8, u8 *);
213
214
215static inline void
216tlan_store_skb(struct tlan_list *tag, struct sk_buff *skb)
217{
218 unsigned long addr = (unsigned long)skb;
219 tag->buffer[9].address = addr;
220 tag->buffer[8].address = upper_32_bits(addr);
221}
222
223static inline struct sk_buff *
224tlan_get_skb(const struct tlan_list *tag)
225{
226 unsigned long addr;
227
228 addr = tag->buffer[9].address;
229 addr |= ((unsigned long) tag->buffer[8].address << 16) << 16;
230 return (struct sk_buff *) addr;
231}
232
233static u32
234(*tlan_int_vector[TLAN_INT_NUMBER_OF_INTS])(struct net_device *, u16) = {
235 NULL,
236 tlan_handle_tx_eof,
237 tlan_handle_stat_overflow,
238 tlan_handle_rx_eof,
239 tlan_handle_dummy,
240 tlan_handle_tx_eoc,
241 tlan_handle_status_check,
242 tlan_handle_rx_eoc
243};
244
245static inline void
246tlan_set_timer(struct net_device *dev, u32 ticks, u32 type)
247{
248 struct tlan_priv *priv = netdev_priv(dev);
249 unsigned long flags = 0;
250
251 if (!in_irq())
252 spin_lock_irqsave(&priv->lock, flags);
253 if (priv->timer.function != NULL &&
254 priv->timer_type != TLAN_TIMER_ACTIVITY) {
255 if (!in_irq())
256 spin_unlock_irqrestore(&priv->lock, flags);
257 return;
258 }
259 priv->timer.function = tlan_timer;
260 if (!in_irq())
261 spin_unlock_irqrestore(&priv->lock, flags);
262
263 priv->timer_set_at = jiffies;
264 priv->timer_type = type;
265 mod_timer(&priv->timer, jiffies + ticks);
266
267}
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300static void tlan_remove_one(struct pci_dev *pdev)
301{
302 struct net_device *dev = pci_get_drvdata(pdev);
303 struct tlan_priv *priv = netdev_priv(dev);
304
305 unregister_netdev(dev);
306
307 if (priv->dma_storage) {
308 pci_free_consistent(priv->pci_dev,
309 priv->dma_size, priv->dma_storage,
310 priv->dma_storage_dma);
311 }
312
313#ifdef CONFIG_PCI
314 pci_release_regions(pdev);
315#endif
316
317 free_netdev(dev);
318
319 cancel_work_sync(&priv->tlan_tqueue);
320}
321
322static void tlan_start(struct net_device *dev)
323{
324 tlan_reset_lists(dev);
325
326
327
328 tlan_read_and_clear_stats(dev, TLAN_IGNORE);
329 tlan_reset_adapter(dev);
330 netif_wake_queue(dev);
331}
332
333static void tlan_stop(struct net_device *dev)
334{
335 struct tlan_priv *priv = netdev_priv(dev);
336
337 del_timer_sync(&priv->media_timer);
338 tlan_read_and_clear_stats(dev, TLAN_RECORD);
339 outl(TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD);
340
341 tlan_reset_adapter(dev);
342 if (priv->timer.function != NULL) {
343 del_timer_sync(&priv->timer);
344 priv->timer.function = NULL;
345 }
346}
347
348#ifdef CONFIG_PM
349
350static int tlan_suspend(struct pci_dev *pdev, pm_message_t state)
351{
352 struct net_device *dev = pci_get_drvdata(pdev);
353
354 if (netif_running(dev))
355 tlan_stop(dev);
356
357 netif_device_detach(dev);
358 pci_save_state(pdev);
359 pci_disable_device(pdev);
360 pci_wake_from_d3(pdev, false);
361 pci_set_power_state(pdev, PCI_D3hot);
362
363 return 0;
364}
365
366static int tlan_resume(struct pci_dev *pdev)
367{
368 struct net_device *dev = pci_get_drvdata(pdev);
369 int rc = pci_enable_device(pdev);
370
371 if (rc)
372 return rc;
373 pci_restore_state(pdev);
374 pci_enable_wake(pdev, PCI_D0, 0);
375 netif_device_attach(dev);
376
377 if (netif_running(dev))
378 tlan_start(dev);
379
380 return 0;
381}
382
383#else
384
385#define tlan_suspend NULL
386#define tlan_resume NULL
387
388#endif
389
390
391static struct pci_driver tlan_driver = {
392 .name = "tlan",
393 .id_table = tlan_pci_tbl,
394 .probe = tlan_init_one,
395 .remove = tlan_remove_one,
396 .suspend = tlan_suspend,
397 .resume = tlan_resume,
398};
399
400static int __init tlan_probe(void)
401{
402 int rc = -ENODEV;
403
404 pr_info("%s", tlan_banner);
405
406 TLAN_DBG(TLAN_DEBUG_PROBE, "Starting PCI Probe....\n");
407
408
409
410 rc = pci_register_driver(&tlan_driver);
411
412 if (rc != 0) {
413 pr_err("Could not register pci driver\n");
414 goto err_out_pci_free;
415 }
416
417 TLAN_DBG(TLAN_DEBUG_PROBE, "Starting EISA Probe....\n");
418 tlan_eisa_probe();
419
420 pr_info("%d device%s installed, PCI: %d EISA: %d\n",
421 tlan_devices_installed, tlan_devices_installed == 1 ? "" : "s",
422 tlan_have_pci, tlan_have_eisa);
423
424 if (tlan_devices_installed == 0) {
425 rc = -ENODEV;
426 goto err_out_pci_unreg;
427 }
428 return 0;
429
430err_out_pci_unreg:
431 pci_unregister_driver(&tlan_driver);
432err_out_pci_free:
433 return rc;
434}
435
436
437static int tlan_init_one(struct pci_dev *pdev,
438 const struct pci_device_id *ent)
439{
440 return tlan_probe1(pdev, -1, -1, 0, ent);
441}
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463static int tlan_probe1(struct pci_dev *pdev, long ioaddr, int irq, int rev,
464 const struct pci_device_id *ent)
465{
466
467 struct net_device *dev;
468 struct tlan_priv *priv;
469 u16 device_id;
470 int reg, rc = -ENODEV;
471
472#ifdef CONFIG_PCI
473 if (pdev) {
474 rc = pci_enable_device(pdev);
475 if (rc)
476 return rc;
477
478 rc = pci_request_regions(pdev, tlan_signature);
479 if (rc) {
480 pr_err("Could not reserve IO regions\n");
481 goto err_out;
482 }
483 }
484#endif
485
486 dev = alloc_etherdev(sizeof(struct tlan_priv));
487 if (dev == NULL) {
488 rc = -ENOMEM;
489 goto err_out_regions;
490 }
491 SET_NETDEV_DEV(dev, &pdev->dev);
492
493 priv = netdev_priv(dev);
494
495 priv->pci_dev = pdev;
496 priv->dev = dev;
497
498
499 if (pdev) {
500 u32 pci_io_base = 0;
501
502 priv->adapter = &board_info[ent->driver_data];
503
504 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
505 if (rc) {
506 pr_err("No suitable PCI mapping available\n");
507 goto err_out_free_dev;
508 }
509
510 for (reg = 0; reg <= 5; reg++) {
511 if (pci_resource_flags(pdev, reg) & IORESOURCE_IO) {
512 pci_io_base = pci_resource_start(pdev, reg);
513 TLAN_DBG(TLAN_DEBUG_GNRL,
514 "IO mapping is available at %x.\n",
515 pci_io_base);
516 break;
517 }
518 }
519 if (!pci_io_base) {
520 pr_err("No IO mappings available\n");
521 rc = -EIO;
522 goto err_out_free_dev;
523 }
524
525 dev->base_addr = pci_io_base;
526 dev->irq = pdev->irq;
527 priv->adapter_rev = pdev->revision;
528 pci_set_master(pdev);
529 pci_set_drvdata(pdev, dev);
530
531 } else {
532
533
534 device_id = inw(ioaddr + EISA_ID2);
535 if (device_id == 0x20F1) {
536 priv->adapter = &board_info[13];
537 priv->adapter_rev = 23;
538 } else {
539 priv->adapter = &board_info[14];
540 priv->adapter_rev = 10;
541 }
542 dev->base_addr = ioaddr;
543 dev->irq = irq;
544 }
545
546
547 if (dev->mem_start) {
548 priv->aui = dev->mem_start & 0x01;
549 priv->duplex = ((dev->mem_start & 0x06) == 0x06) ? 0
550 : (dev->mem_start & 0x06) >> 1;
551 priv->speed = ((dev->mem_start & 0x18) == 0x18) ? 0
552 : (dev->mem_start & 0x18) >> 3;
553
554 if (priv->speed == 0x1)
555 priv->speed = TLAN_SPEED_10;
556 else if (priv->speed == 0x2)
557 priv->speed = TLAN_SPEED_100;
558
559 debug = priv->debug = dev->mem_end;
560 } else {
561 priv->aui = aui[boards_found];
562 priv->speed = speed[boards_found];
563 priv->duplex = duplex[boards_found];
564 priv->debug = debug;
565 }
566
567
568
569 INIT_WORK(&priv->tlan_tqueue, tlan_tx_timeout_work);
570
571 spin_lock_init(&priv->lock);
572
573 rc = tlan_init(dev);
574 if (rc) {
575 pr_err("Could not set up device\n");
576 goto err_out_free_dev;
577 }
578
579 rc = register_netdev(dev);
580 if (rc) {
581 pr_err("Could not register device\n");
582 goto err_out_uninit;
583 }
584
585
586 tlan_devices_installed++;
587 boards_found++;
588
589
590 if (pdev)
591 tlan_have_pci++;
592 else {
593 priv->next_device = tlan_eisa_devices;
594 tlan_eisa_devices = dev;
595 tlan_have_eisa++;
596 }
597
598 netdev_info(dev, "irq=%2d, io=%04x, %s, Rev. %d\n",
599 (int)dev->irq,
600 (int)dev->base_addr,
601 priv->adapter->device_label,
602 priv->adapter_rev);
603 return 0;
604
605err_out_uninit:
606 pci_free_consistent(priv->pci_dev, priv->dma_size, priv->dma_storage,
607 priv->dma_storage_dma);
608err_out_free_dev:
609 free_netdev(dev);
610err_out_regions:
611#ifdef CONFIG_PCI
612 if (pdev)
613 pci_release_regions(pdev);
614err_out:
615#endif
616 if (pdev)
617 pci_disable_device(pdev);
618 return rc;
619}
620
621
622static void tlan_eisa_cleanup(void)
623{
624 struct net_device *dev;
625 struct tlan_priv *priv;
626
627 while (tlan_have_eisa) {
628 dev = tlan_eisa_devices;
629 priv = netdev_priv(dev);
630 if (priv->dma_storage) {
631 pci_free_consistent(priv->pci_dev, priv->dma_size,
632 priv->dma_storage,
633 priv->dma_storage_dma);
634 }
635 release_region(dev->base_addr, 0x10);
636 unregister_netdev(dev);
637 tlan_eisa_devices = priv->next_device;
638 free_netdev(dev);
639 tlan_have_eisa--;
640 }
641}
642
643
644static void __exit tlan_exit(void)
645{
646 pci_unregister_driver(&tlan_driver);
647
648 if (tlan_have_eisa)
649 tlan_eisa_cleanup();
650
651}
652
653
654
655module_init(tlan_probe);
656module_exit(tlan_exit);
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673static void __init tlan_eisa_probe(void)
674{
675 long ioaddr;
676 int rc = -ENODEV;
677 int irq;
678 u16 device_id;
679
680 if (!EISA_bus) {
681 TLAN_DBG(TLAN_DEBUG_PROBE, "No EISA bus present\n");
682 return;
683 }
684
685
686 for (ioaddr = 0x1000; ioaddr < 0x9000; ioaddr += 0x1000) {
687
688 TLAN_DBG(TLAN_DEBUG_PROBE, "EISA_ID 0x%4x: 0x%4x\n",
689 (int) ioaddr + 0xc80, inw(ioaddr + EISA_ID));
690 TLAN_DBG(TLAN_DEBUG_PROBE, "EISA_ID 0x%4x: 0x%4x\n",
691 (int) ioaddr + 0xc82, inw(ioaddr + EISA_ID2));
692
693
694 TLAN_DBG(TLAN_DEBUG_PROBE,
695 "Probing for EISA adapter at IO: 0x%4x : ",
696 (int) ioaddr);
697 if (request_region(ioaddr, 0x10, tlan_signature) == NULL)
698 goto out;
699
700 if (inw(ioaddr + EISA_ID) != 0x110E) {
701 release_region(ioaddr, 0x10);
702 goto out;
703 }
704
705 device_id = inw(ioaddr + EISA_ID2);
706 if (device_id != 0x20F1 && device_id != 0x40F1) {
707 release_region(ioaddr, 0x10);
708 goto out;
709 }
710
711
712 if (inb(ioaddr + EISA_CR) != 0x1) {
713 release_region(ioaddr, 0x10);
714 goto out2;
715 }
716
717 if (debug == 0x10)
718 pr_info("Found one\n");
719
720
721
722 switch (inb(ioaddr + 0xcc0)) {
723 case(0x10):
724 irq = 5;
725 break;
726 case(0x20):
727 irq = 9;
728 break;
729 case(0x40):
730 irq = 10;
731 break;
732 case(0x80):
733 irq = 11;
734 break;
735 default:
736 goto out;
737 }
738
739
740
741 rc = tlan_probe1(NULL, ioaddr, irq,
742 12, NULL);
743 continue;
744
745out:
746 if (debug == 0x10)
747 pr_info("None found\n");
748 continue;
749
750out2:
751 if (debug == 0x10)
752 pr_info("Card found but it is not enabled, skipping\n");
753 continue;
754
755 }
756
757}
758
759#ifdef CONFIG_NET_POLL_CONTROLLER
760static void tlan_poll(struct net_device *dev)
761{
762 disable_irq(dev->irq);
763 tlan_handle_interrupt(dev->irq, dev);
764 enable_irq(dev->irq);
765}
766#endif
767
768static const struct net_device_ops tlan_netdev_ops = {
769 .ndo_open = tlan_open,
770 .ndo_stop = tlan_close,
771 .ndo_start_xmit = tlan_start_tx,
772 .ndo_tx_timeout = tlan_tx_timeout,
773 .ndo_get_stats = tlan_get_stats,
774 .ndo_set_rx_mode = tlan_set_multicast_list,
775 .ndo_do_ioctl = tlan_ioctl,
776 .ndo_set_mac_address = eth_mac_addr,
777 .ndo_validate_addr = eth_validate_addr,
778#ifdef CONFIG_NET_POLL_CONTROLLER
779 .ndo_poll_controller = tlan_poll,
780#endif
781};
782
783static void tlan_get_drvinfo(struct net_device *dev,
784 struct ethtool_drvinfo *info)
785{
786 struct tlan_priv *priv = netdev_priv(dev);
787
788 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
789 if (priv->pci_dev)
790 strlcpy(info->bus_info, pci_name(priv->pci_dev),
791 sizeof(info->bus_info));
792 else
793 strlcpy(info->bus_info, "EISA", sizeof(info->bus_info));
794}
795
796static int tlan_get_eeprom_len(struct net_device *dev)
797{
798 return TLAN_EEPROM_SIZE;
799}
800
801static int tlan_get_eeprom(struct net_device *dev,
802 struct ethtool_eeprom *eeprom, u8 *data)
803{
804 int i;
805
806 for (i = 0; i < TLAN_EEPROM_SIZE; i++)
807 if (tlan_ee_read_byte(dev, i, &data[i]))
808 return -EIO;
809
810 return 0;
811}
812
813static const struct ethtool_ops tlan_ethtool_ops = {
814 .get_drvinfo = tlan_get_drvinfo,
815 .get_link = ethtool_op_get_link,
816 .get_eeprom_len = tlan_get_eeprom_len,
817 .get_eeprom = tlan_get_eeprom,
818};
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837static int tlan_init(struct net_device *dev)
838{
839 int dma_size;
840 int err;
841 int i;
842 struct tlan_priv *priv;
843
844 priv = netdev_priv(dev);
845
846 dma_size = (TLAN_NUM_RX_LISTS + TLAN_NUM_TX_LISTS)
847 * (sizeof(struct tlan_list));
848 priv->dma_storage = pci_alloc_consistent(priv->pci_dev,
849 dma_size,
850 &priv->dma_storage_dma);
851 priv->dma_size = dma_size;
852
853 if (priv->dma_storage == NULL) {
854 pr_err("Could not allocate lists and buffers for %s\n",
855 dev->name);
856 return -ENOMEM;
857 }
858 priv->rx_list = (struct tlan_list *)
859 ALIGN((unsigned long)priv->dma_storage, 8);
860 priv->rx_list_dma = ALIGN(priv->dma_storage_dma, 8);
861 priv->tx_list = priv->rx_list + TLAN_NUM_RX_LISTS;
862 priv->tx_list_dma =
863 priv->rx_list_dma + sizeof(struct tlan_list)*TLAN_NUM_RX_LISTS;
864
865 err = 0;
866 for (i = 0; i < ETH_ALEN; i++)
867 err |= tlan_ee_read_byte(dev,
868 (u8) priv->adapter->addr_ofs + i,
869 (u8 *) &dev->dev_addr[i]);
870 if (err) {
871 pr_err("%s: Error reading MAC from eeprom: %d\n",
872 dev->name, err);
873 }
874
875 if (priv->adapter->addr_ofs == 0xf8) {
876 for (i = 0; i < ETH_ALEN; i += 2) {
877 char tmp = dev->dev_addr[i];
878 dev->dev_addr[i] = dev->dev_addr[i + 1];
879 dev->dev_addr[i + 1] = tmp;
880 }
881 }
882
883 netif_carrier_off(dev);
884
885
886 dev->netdev_ops = &tlan_netdev_ops;
887 dev->ethtool_ops = &tlan_ethtool_ops;
888 dev->watchdog_timeo = TX_TIMEOUT;
889
890 return 0;
891
892}
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914static int tlan_open(struct net_device *dev)
915{
916 struct tlan_priv *priv = netdev_priv(dev);
917 int err;
918
919 priv->tlan_rev = tlan_dio_read8(dev->base_addr, TLAN_DEF_REVISION);
920 err = request_irq(dev->irq, tlan_handle_interrupt, IRQF_SHARED,
921 dev->name, dev);
922
923 if (err) {
924 netdev_err(dev, "Cannot open because IRQ %d is already in use\n",
925 dev->irq);
926 return err;
927 }
928
929 timer_setup(&priv->timer, NULL, 0);
930 timer_setup(&priv->media_timer, tlan_phy_monitor, 0);
931
932 tlan_start(dev);
933
934 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Opened. TLAN Chip Rev: %x\n",
935 dev->name, priv->tlan_rev);
936
937 return 0;
938
939}
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958static int tlan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
959{
960 struct tlan_priv *priv = netdev_priv(dev);
961 struct mii_ioctl_data *data = if_mii(rq);
962 u32 phy = priv->phy[priv->phy_num];
963
964 if (!priv->phy_online)
965 return -EAGAIN;
966
967 switch (cmd) {
968 case SIOCGMIIPHY:
969 data->phy_id = phy;
970
971
972
973 case SIOCGMIIREG:
974 tlan_mii_read_reg(dev, data->phy_id & 0x1f,
975 data->reg_num & 0x1f, &data->val_out);
976 return 0;
977
978
979 case SIOCSMIIREG:
980 tlan_mii_write_reg(dev, data->phy_id & 0x1f,
981 data->reg_num & 0x1f, data->val_in);
982 return 0;
983 default:
984 return -EOPNOTSUPP;
985 }
986}
987
988
989
990
991
992
993
994
995
996
997
998
999
1000static void tlan_tx_timeout(struct net_device *dev)
1001{
1002
1003 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Transmit timed out.\n", dev->name);
1004
1005
1006 tlan_free_lists(dev);
1007 tlan_reset_lists(dev);
1008 tlan_read_and_clear_stats(dev, TLAN_IGNORE);
1009 tlan_reset_adapter(dev);
1010 netif_trans_update(dev);
1011 netif_wake_queue(dev);
1012
1013}
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026static void tlan_tx_timeout_work(struct work_struct *work)
1027{
1028 struct tlan_priv *priv =
1029 container_of(work, struct tlan_priv, tlan_tqueue);
1030
1031 tlan_tx_timeout(priv->dev);
1032}
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057static netdev_tx_t tlan_start_tx(struct sk_buff *skb, struct net_device *dev)
1058{
1059 struct tlan_priv *priv = netdev_priv(dev);
1060 dma_addr_t tail_list_phys;
1061 struct tlan_list *tail_list;
1062 unsigned long flags;
1063 unsigned int txlen;
1064
1065 if (!priv->phy_online) {
1066 TLAN_DBG(TLAN_DEBUG_TX, "TRANSMIT: %s PHY is not ready\n",
1067 dev->name);
1068 dev_kfree_skb_any(skb);
1069 return NETDEV_TX_OK;
1070 }
1071
1072 if (skb_padto(skb, TLAN_MIN_FRAME_SIZE))
1073 return NETDEV_TX_OK;
1074 txlen = max(skb->len, (unsigned int)TLAN_MIN_FRAME_SIZE);
1075
1076 tail_list = priv->tx_list + priv->tx_tail;
1077 tail_list_phys =
1078 priv->tx_list_dma + sizeof(struct tlan_list)*priv->tx_tail;
1079
1080 if (tail_list->c_stat != TLAN_CSTAT_UNUSED) {
1081 TLAN_DBG(TLAN_DEBUG_TX,
1082 "TRANSMIT: %s is busy (Head=%d Tail=%d)\n",
1083 dev->name, priv->tx_head, priv->tx_tail);
1084 netif_stop_queue(dev);
1085 priv->tx_busy_count++;
1086 return NETDEV_TX_BUSY;
1087 }
1088
1089 tail_list->forward = 0;
1090
1091 tail_list->buffer[0].address = pci_map_single(priv->pci_dev,
1092 skb->data, txlen,
1093 PCI_DMA_TODEVICE);
1094 tlan_store_skb(tail_list, skb);
1095
1096 tail_list->frame_size = (u16) txlen;
1097 tail_list->buffer[0].count = TLAN_LAST_BUFFER | (u32) txlen;
1098 tail_list->buffer[1].count = 0;
1099 tail_list->buffer[1].address = 0;
1100
1101 spin_lock_irqsave(&priv->lock, flags);
1102 tail_list->c_stat = TLAN_CSTAT_READY;
1103 if (!priv->tx_in_progress) {
1104 priv->tx_in_progress = 1;
1105 TLAN_DBG(TLAN_DEBUG_TX,
1106 "TRANSMIT: Starting TX on buffer %d\n",
1107 priv->tx_tail);
1108 outl(tail_list_phys, dev->base_addr + TLAN_CH_PARM);
1109 outl(TLAN_HC_GO, dev->base_addr + TLAN_HOST_CMD);
1110 } else {
1111 TLAN_DBG(TLAN_DEBUG_TX,
1112 "TRANSMIT: Adding buffer %d to TX channel\n",
1113 priv->tx_tail);
1114 if (priv->tx_tail == 0) {
1115 (priv->tx_list + (TLAN_NUM_TX_LISTS - 1))->forward
1116 = tail_list_phys;
1117 } else {
1118 (priv->tx_list + (priv->tx_tail - 1))->forward
1119 = tail_list_phys;
1120 }
1121 }
1122 spin_unlock_irqrestore(&priv->lock, flags);
1123
1124 CIRC_INC(priv->tx_tail, TLAN_NUM_TX_LISTS);
1125
1126 return NETDEV_TX_OK;
1127
1128}
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153static irqreturn_t tlan_handle_interrupt(int irq, void *dev_id)
1154{
1155 struct net_device *dev = dev_id;
1156 struct tlan_priv *priv = netdev_priv(dev);
1157 u16 host_int;
1158 u16 type;
1159
1160 spin_lock(&priv->lock);
1161
1162 host_int = inw(dev->base_addr + TLAN_HOST_INT);
1163 type = (host_int & TLAN_HI_IT_MASK) >> 2;
1164 if (type) {
1165 u32 ack;
1166 u32 host_cmd;
1167
1168 outw(host_int, dev->base_addr + TLAN_HOST_INT);
1169 ack = tlan_int_vector[type](dev, host_int);
1170
1171 if (ack) {
1172 host_cmd = TLAN_HC_ACK | ack | (type << 18);
1173 outl(host_cmd, dev->base_addr + TLAN_HOST_CMD);
1174 }
1175 }
1176
1177 spin_unlock(&priv->lock);
1178
1179 return IRQ_RETVAL(type);
1180}
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200static int tlan_close(struct net_device *dev)
1201{
1202 tlan_stop(dev);
1203
1204 free_irq(dev->irq, dev);
1205 tlan_free_lists(dev);
1206 TLAN_DBG(TLAN_DEBUG_GNRL, "Device %s closed.\n", dev->name);
1207
1208 return 0;
1209
1210}
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230static struct net_device_stats *tlan_get_stats(struct net_device *dev)
1231{
1232 struct tlan_priv *priv = netdev_priv(dev);
1233 int i;
1234
1235
1236 tlan_read_and_clear_stats(dev, TLAN_RECORD);
1237
1238 TLAN_DBG(TLAN_DEBUG_RX, "RECEIVE: %s EOC count = %d\n", dev->name,
1239 priv->rx_eoc_count);
1240 TLAN_DBG(TLAN_DEBUG_TX, "TRANSMIT: %s Busy count = %d\n", dev->name,
1241 priv->tx_busy_count);
1242 if (debug & TLAN_DEBUG_GNRL) {
1243 tlan_print_dio(dev->base_addr);
1244 tlan_phy_print(dev);
1245 }
1246 if (debug & TLAN_DEBUG_LIST) {
1247 for (i = 0; i < TLAN_NUM_RX_LISTS; i++)
1248 tlan_print_list(priv->rx_list + i, "RX", i);
1249 for (i = 0; i < TLAN_NUM_TX_LISTS; i++)
1250 tlan_print_list(priv->tx_list + i, "TX", i);
1251 }
1252
1253 return &dev->stats;
1254
1255}
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280static void tlan_set_multicast_list(struct net_device *dev)
1281{
1282 struct netdev_hw_addr *ha;
1283 u32 hash1 = 0;
1284 u32 hash2 = 0;
1285 int i;
1286 u32 offset;
1287 u8 tmp;
1288
1289 if (dev->flags & IFF_PROMISC) {
1290 tmp = tlan_dio_read8(dev->base_addr, TLAN_NET_CMD);
1291 tlan_dio_write8(dev->base_addr,
1292 TLAN_NET_CMD, tmp | TLAN_NET_CMD_CAF);
1293 } else {
1294 tmp = tlan_dio_read8(dev->base_addr, TLAN_NET_CMD);
1295 tlan_dio_write8(dev->base_addr,
1296 TLAN_NET_CMD, tmp & ~TLAN_NET_CMD_CAF);
1297 if (dev->flags & IFF_ALLMULTI) {
1298 for (i = 0; i < 3; i++)
1299 tlan_set_mac(dev, i + 1, NULL);
1300 tlan_dio_write32(dev->base_addr, TLAN_HASH_1,
1301 0xffffffff);
1302 tlan_dio_write32(dev->base_addr, TLAN_HASH_2,
1303 0xffffffff);
1304 } else {
1305 i = 0;
1306 netdev_for_each_mc_addr(ha, dev) {
1307 if (i < 3) {
1308 tlan_set_mac(dev, i + 1,
1309 (char *) &ha->addr);
1310 } else {
1311 offset =
1312 tlan_hash_func((u8 *)&ha->addr);
1313 if (offset < 32)
1314 hash1 |= (1 << offset);
1315 else
1316 hash2 |= (1 << (offset - 32));
1317 }
1318 i++;
1319 }
1320 for ( ; i < 3; i++)
1321 tlan_set_mac(dev, i + 1, NULL);
1322 tlan_dio_write32(dev->base_addr, TLAN_HASH_1, hash1);
1323 tlan_dio_write32(dev->base_addr, TLAN_HASH_2, hash2);
1324 }
1325 }
1326
1327}
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368static u32 tlan_handle_tx_eof(struct net_device *dev, u16 host_int)
1369{
1370 struct tlan_priv *priv = netdev_priv(dev);
1371 int eoc = 0;
1372 struct tlan_list *head_list;
1373 dma_addr_t head_list_phys;
1374 u32 ack = 0;
1375 u16 tmp_c_stat;
1376
1377 TLAN_DBG(TLAN_DEBUG_TX,
1378 "TRANSMIT: Handling TX EOF (Head=%d Tail=%d)\n",
1379 priv->tx_head, priv->tx_tail);
1380 head_list = priv->tx_list + priv->tx_head;
1381
1382 while (((tmp_c_stat = head_list->c_stat) & TLAN_CSTAT_FRM_CMP)
1383 && (ack < 255)) {
1384 struct sk_buff *skb = tlan_get_skb(head_list);
1385
1386 ack++;
1387 pci_unmap_single(priv->pci_dev, head_list->buffer[0].address,
1388 max(skb->len,
1389 (unsigned int)TLAN_MIN_FRAME_SIZE),
1390 PCI_DMA_TODEVICE);
1391 dev_kfree_skb_any(skb);
1392 head_list->buffer[8].address = 0;
1393 head_list->buffer[9].address = 0;
1394
1395 if (tmp_c_stat & TLAN_CSTAT_EOC)
1396 eoc = 1;
1397
1398 dev->stats.tx_bytes += head_list->frame_size;
1399
1400 head_list->c_stat = TLAN_CSTAT_UNUSED;
1401 netif_start_queue(dev);
1402 CIRC_INC(priv->tx_head, TLAN_NUM_TX_LISTS);
1403 head_list = priv->tx_list + priv->tx_head;
1404 }
1405
1406 if (!ack)
1407 netdev_info(dev,
1408 "Received interrupt for uncompleted TX frame\n");
1409
1410 if (eoc) {
1411 TLAN_DBG(TLAN_DEBUG_TX,
1412 "TRANSMIT: handling TX EOC (Head=%d Tail=%d)\n",
1413 priv->tx_head, priv->tx_tail);
1414 head_list = priv->tx_list + priv->tx_head;
1415 head_list_phys = priv->tx_list_dma
1416 + sizeof(struct tlan_list)*priv->tx_head;
1417 if ((head_list->c_stat & TLAN_CSTAT_READY)
1418 == TLAN_CSTAT_READY) {
1419 outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
1420 ack |= TLAN_HC_GO;
1421 } else {
1422 priv->tx_in_progress = 0;
1423 }
1424 }
1425
1426 if (priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED) {
1427 tlan_dio_write8(dev->base_addr,
1428 TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT);
1429 if (priv->timer.function == NULL) {
1430 priv->timer.function = tlan_timer;
1431 priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY;
1432 priv->timer_set_at = jiffies;
1433 priv->timer_type = TLAN_TIMER_ACTIVITY;
1434 add_timer(&priv->timer);
1435 } else if (priv->timer_type == TLAN_TIMER_ACTIVITY) {
1436 priv->timer_set_at = jiffies;
1437 }
1438 }
1439
1440 return ack;
1441
1442}
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464static u32 tlan_handle_stat_overflow(struct net_device *dev, u16 host_int)
1465{
1466 tlan_read_and_clear_stats(dev, TLAN_RECORD);
1467
1468 return 1;
1469
1470}
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500static u32 tlan_handle_rx_eof(struct net_device *dev, u16 host_int)
1501{
1502 struct tlan_priv *priv = netdev_priv(dev);
1503 u32 ack = 0;
1504 int eoc = 0;
1505 struct tlan_list *head_list;
1506 struct sk_buff *skb;
1507 struct tlan_list *tail_list;
1508 u16 tmp_c_stat;
1509 dma_addr_t head_list_phys;
1510
1511 TLAN_DBG(TLAN_DEBUG_RX, "RECEIVE: handling RX EOF (Head=%d Tail=%d)\n",
1512 priv->rx_head, priv->rx_tail);
1513 head_list = priv->rx_list + priv->rx_head;
1514 head_list_phys =
1515 priv->rx_list_dma + sizeof(struct tlan_list)*priv->rx_head;
1516
1517 while (((tmp_c_stat = head_list->c_stat) & TLAN_CSTAT_FRM_CMP)
1518 && (ack < 255)) {
1519 dma_addr_t frame_dma = head_list->buffer[0].address;
1520 u32 frame_size = head_list->frame_size;
1521 struct sk_buff *new_skb;
1522
1523 ack++;
1524 if (tmp_c_stat & TLAN_CSTAT_EOC)
1525 eoc = 1;
1526
1527 new_skb = netdev_alloc_skb_ip_align(dev,
1528 TLAN_MAX_FRAME_SIZE + 5);
1529 if (!new_skb)
1530 goto drop_and_reuse;
1531
1532 skb = tlan_get_skb(head_list);
1533 pci_unmap_single(priv->pci_dev, frame_dma,
1534 TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE);
1535 skb_put(skb, frame_size);
1536
1537 dev->stats.rx_bytes += frame_size;
1538
1539 skb->protocol = eth_type_trans(skb, dev);
1540 netif_rx(skb);
1541
1542 head_list->buffer[0].address =
1543 pci_map_single(priv->pci_dev, new_skb->data,
1544 TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE);
1545
1546 tlan_store_skb(head_list, new_skb);
1547drop_and_reuse:
1548 head_list->forward = 0;
1549 head_list->c_stat = 0;
1550 tail_list = priv->rx_list + priv->rx_tail;
1551 tail_list->forward = head_list_phys;
1552
1553 CIRC_INC(priv->rx_head, TLAN_NUM_RX_LISTS);
1554 CIRC_INC(priv->rx_tail, TLAN_NUM_RX_LISTS);
1555 head_list = priv->rx_list + priv->rx_head;
1556 head_list_phys = priv->rx_list_dma
1557 + sizeof(struct tlan_list)*priv->rx_head;
1558 }
1559
1560 if (!ack)
1561 netdev_info(dev,
1562 "Received interrupt for uncompleted RX frame\n");
1563
1564
1565 if (eoc) {
1566 TLAN_DBG(TLAN_DEBUG_RX,
1567 "RECEIVE: handling RX EOC (Head=%d Tail=%d)\n",
1568 priv->rx_head, priv->rx_tail);
1569 head_list = priv->rx_list + priv->rx_head;
1570 head_list_phys = priv->rx_list_dma
1571 + sizeof(struct tlan_list)*priv->rx_head;
1572 outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
1573 ack |= TLAN_HC_GO | TLAN_HC_RT;
1574 priv->rx_eoc_count++;
1575 }
1576
1577 if (priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED) {
1578 tlan_dio_write8(dev->base_addr,
1579 TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT);
1580 if (priv->timer.function == NULL) {
1581 priv->timer.function = tlan_timer;
1582 priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY;
1583 priv->timer_set_at = jiffies;
1584 priv->timer_type = TLAN_TIMER_ACTIVITY;
1585 add_timer(&priv->timer);
1586 } else if (priv->timer_type == TLAN_TIMER_ACTIVITY) {
1587 priv->timer_set_at = jiffies;
1588 }
1589 }
1590
1591 return ack;
1592
1593}
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615static u32 tlan_handle_dummy(struct net_device *dev, u16 host_int)
1616{
1617 netdev_info(dev, "Test interrupt\n");
1618 return 1;
1619
1620}
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645static u32 tlan_handle_tx_eoc(struct net_device *dev, u16 host_int)
1646{
1647 struct tlan_priv *priv = netdev_priv(dev);
1648 struct tlan_list *head_list;
1649 dma_addr_t head_list_phys;
1650 u32 ack = 1;
1651
1652 if (priv->tlan_rev < 0x30) {
1653 TLAN_DBG(TLAN_DEBUG_TX,
1654 "TRANSMIT: handling TX EOC (Head=%d Tail=%d) -- IRQ\n",
1655 priv->tx_head, priv->tx_tail);
1656 head_list = priv->tx_list + priv->tx_head;
1657 head_list_phys = priv->tx_list_dma
1658 + sizeof(struct tlan_list)*priv->tx_head;
1659 if ((head_list->c_stat & TLAN_CSTAT_READY)
1660 == TLAN_CSTAT_READY) {
1661 netif_stop_queue(dev);
1662 outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
1663 ack |= TLAN_HC_GO;
1664 } else {
1665 priv->tx_in_progress = 0;
1666 }
1667 }
1668
1669 return ack;
1670
1671}
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696static u32 tlan_handle_status_check(struct net_device *dev, u16 host_int)
1697{
1698 struct tlan_priv *priv = netdev_priv(dev);
1699 u32 ack;
1700 u32 error;
1701 u8 net_sts;
1702 u32 phy;
1703 u16 tlphy_ctl;
1704 u16 tlphy_sts;
1705
1706 ack = 1;
1707 if (host_int & TLAN_HI_IV_MASK) {
1708 netif_stop_queue(dev);
1709 error = inl(dev->base_addr + TLAN_CH_PARM);
1710 netdev_info(dev, "Adaptor Error = 0x%x\n", error);
1711 tlan_read_and_clear_stats(dev, TLAN_RECORD);
1712 outl(TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD);
1713
1714 schedule_work(&priv->tlan_tqueue);
1715
1716 netif_wake_queue(dev);
1717 ack = 0;
1718 } else {
1719 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Status Check\n", dev->name);
1720 phy = priv->phy[priv->phy_num];
1721
1722 net_sts = tlan_dio_read8(dev->base_addr, TLAN_NET_STS);
1723 if (net_sts) {
1724 tlan_dio_write8(dev->base_addr, TLAN_NET_STS, net_sts);
1725 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Net_Sts = %x\n",
1726 dev->name, (unsigned) net_sts);
1727 }
1728 if ((net_sts & TLAN_NET_STS_MIRQ) && (priv->phy_num == 0)) {
1729 tlan_mii_read_reg(dev, phy, TLAN_TLPHY_STS, &tlphy_sts);
1730 tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl);
1731 if (!(tlphy_sts & TLAN_TS_POLOK) &&
1732 !(tlphy_ctl & TLAN_TC_SWAPOL)) {
1733 tlphy_ctl |= TLAN_TC_SWAPOL;
1734 tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL,
1735 tlphy_ctl);
1736 } else if ((tlphy_sts & TLAN_TS_POLOK) &&
1737 (tlphy_ctl & TLAN_TC_SWAPOL)) {
1738 tlphy_ctl &= ~TLAN_TC_SWAPOL;
1739 tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL,
1740 tlphy_ctl);
1741 }
1742
1743 if (debug)
1744 tlan_phy_print(dev);
1745 }
1746 }
1747
1748 return ack;
1749
1750}
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775static u32 tlan_handle_rx_eoc(struct net_device *dev, u16 host_int)
1776{
1777 struct tlan_priv *priv = netdev_priv(dev);
1778 dma_addr_t head_list_phys;
1779 u32 ack = 1;
1780
1781 if (priv->tlan_rev < 0x30) {
1782 TLAN_DBG(TLAN_DEBUG_RX,
1783 "RECEIVE: Handling RX EOC (head=%d tail=%d) -- IRQ\n",
1784 priv->rx_head, priv->rx_tail);
1785 head_list_phys = priv->rx_list_dma
1786 + sizeof(struct tlan_list)*priv->rx_head;
1787 outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
1788 ack |= TLAN_HC_GO | TLAN_HC_RT;
1789 priv->rx_eoc_count++;
1790 }
1791
1792 return ack;
1793
1794}
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838static void tlan_timer(struct timer_list *t)
1839{
1840 struct tlan_priv *priv = from_timer(priv, t, timer);
1841 struct net_device *dev = priv->dev;
1842 u32 elapsed;
1843 unsigned long flags = 0;
1844
1845 priv->timer.function = NULL;
1846
1847 switch (priv->timer_type) {
1848 case TLAN_TIMER_PHY_PDOWN:
1849 tlan_phy_power_down(dev);
1850 break;
1851 case TLAN_TIMER_PHY_PUP:
1852 tlan_phy_power_up(dev);
1853 break;
1854 case TLAN_TIMER_PHY_RESET:
1855 tlan_phy_reset(dev);
1856 break;
1857 case TLAN_TIMER_PHY_START_LINK:
1858 tlan_phy_start_link(dev);
1859 break;
1860 case TLAN_TIMER_PHY_FINISH_AN:
1861 tlan_phy_finish_auto_neg(dev);
1862 break;
1863 case TLAN_TIMER_FINISH_RESET:
1864 tlan_finish_reset(dev);
1865 break;
1866 case TLAN_TIMER_ACTIVITY:
1867 spin_lock_irqsave(&priv->lock, flags);
1868 if (priv->timer.function == NULL) {
1869 elapsed = jiffies - priv->timer_set_at;
1870 if (elapsed >= TLAN_TIMER_ACT_DELAY) {
1871 tlan_dio_write8(dev->base_addr,
1872 TLAN_LED_REG, TLAN_LED_LINK);
1873 } else {
1874 priv->timer.expires = priv->timer_set_at
1875 + TLAN_TIMER_ACT_DELAY;
1876 spin_unlock_irqrestore(&priv->lock, flags);
1877 add_timer(&priv->timer);
1878 break;
1879 }
1880 }
1881 spin_unlock_irqrestore(&priv->lock, flags);
1882 break;
1883 default:
1884 break;
1885 }
1886
1887}
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913static void tlan_reset_lists(struct net_device *dev)
1914{
1915 struct tlan_priv *priv = netdev_priv(dev);
1916 int i;
1917 struct tlan_list *list;
1918 dma_addr_t list_phys;
1919 struct sk_buff *skb;
1920
1921 priv->tx_head = 0;
1922 priv->tx_tail = 0;
1923 for (i = 0; i < TLAN_NUM_TX_LISTS; i++) {
1924 list = priv->tx_list + i;
1925 list->c_stat = TLAN_CSTAT_UNUSED;
1926 list->buffer[0].address = 0;
1927 list->buffer[2].count = 0;
1928 list->buffer[2].address = 0;
1929 list->buffer[8].address = 0;
1930 list->buffer[9].address = 0;
1931 }
1932
1933 priv->rx_head = 0;
1934 priv->rx_tail = TLAN_NUM_RX_LISTS - 1;
1935 for (i = 0; i < TLAN_NUM_RX_LISTS; i++) {
1936 list = priv->rx_list + i;
1937 list_phys = priv->rx_list_dma + sizeof(struct tlan_list)*i;
1938 list->c_stat = TLAN_CSTAT_READY;
1939 list->frame_size = TLAN_MAX_FRAME_SIZE;
1940 list->buffer[0].count = TLAN_MAX_FRAME_SIZE | TLAN_LAST_BUFFER;
1941 skb = netdev_alloc_skb_ip_align(dev, TLAN_MAX_FRAME_SIZE + 5);
1942 if (!skb)
1943 break;
1944
1945 list->buffer[0].address = pci_map_single(priv->pci_dev,
1946 skb->data,
1947 TLAN_MAX_FRAME_SIZE,
1948 PCI_DMA_FROMDEVICE);
1949 tlan_store_skb(list, skb);
1950 list->buffer[1].count = 0;
1951 list->buffer[1].address = 0;
1952 list->forward = list_phys + sizeof(struct tlan_list);
1953 }
1954
1955
1956 while (i < TLAN_NUM_RX_LISTS) {
1957 tlan_store_skb(priv->rx_list + i, NULL);
1958 ++i;
1959 }
1960 list->forward = 0;
1961
1962}
1963
1964
1965static void tlan_free_lists(struct net_device *dev)
1966{
1967 struct tlan_priv *priv = netdev_priv(dev);
1968 int i;
1969 struct tlan_list *list;
1970 struct sk_buff *skb;
1971
1972 for (i = 0; i < TLAN_NUM_TX_LISTS; i++) {
1973 list = priv->tx_list + i;
1974 skb = tlan_get_skb(list);
1975 if (skb) {
1976 pci_unmap_single(
1977 priv->pci_dev,
1978 list->buffer[0].address,
1979 max(skb->len,
1980 (unsigned int)TLAN_MIN_FRAME_SIZE),
1981 PCI_DMA_TODEVICE);
1982 dev_kfree_skb_any(skb);
1983 list->buffer[8].address = 0;
1984 list->buffer[9].address = 0;
1985 }
1986 }
1987
1988 for (i = 0; i < TLAN_NUM_RX_LISTS; i++) {
1989 list = priv->rx_list + i;
1990 skb = tlan_get_skb(list);
1991 if (skb) {
1992 pci_unmap_single(priv->pci_dev,
1993 list->buffer[0].address,
1994 TLAN_MAX_FRAME_SIZE,
1995 PCI_DMA_FROMDEVICE);
1996 dev_kfree_skb_any(skb);
1997 list->buffer[8].address = 0;
1998 list->buffer[9].address = 0;
1999 }
2000 }
2001}
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020static void tlan_print_dio(u16 io_base)
2021{
2022 u32 data0, data1;
2023 int i;
2024
2025 pr_info("Contents of internal registers for io base 0x%04hx\n",
2026 io_base);
2027 pr_info("Off. +0 +4\n");
2028 for (i = 0; i < 0x4C; i += 8) {
2029 data0 = tlan_dio_read32(io_base, i);
2030 data1 = tlan_dio_read32(io_base, i + 0x4);
2031 pr_info("0x%02x 0x%08x 0x%08x\n", i, data0, data1);
2032 }
2033
2034}
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056static void tlan_print_list(struct tlan_list *list, char *type, int num)
2057{
2058 int i;
2059
2060 pr_info("%s List %d at %p\n", type, num, list);
2061 pr_info(" Forward = 0x%08x\n", list->forward);
2062 pr_info(" CSTAT = 0x%04hx\n", list->c_stat);
2063 pr_info(" Frame Size = 0x%04hx\n", list->frame_size);
2064
2065 for (i = 0; i < 2; i++) {
2066 pr_info(" Buffer[%d].count, addr = 0x%08x, 0x%08x\n",
2067 i, list->buffer[i].count, list->buffer[i].address);
2068 }
2069
2070}
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093static void tlan_read_and_clear_stats(struct net_device *dev, int record)
2094{
2095 u32 tx_good, tx_under;
2096 u32 rx_good, rx_over;
2097 u32 def_tx, crc, code;
2098 u32 multi_col, single_col;
2099 u32 excess_col, late_col, loss;
2100
2101 outw(TLAN_GOOD_TX_FRMS, dev->base_addr + TLAN_DIO_ADR);
2102 tx_good = inb(dev->base_addr + TLAN_DIO_DATA);
2103 tx_good += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
2104 tx_good += inb(dev->base_addr + TLAN_DIO_DATA + 2) << 16;
2105 tx_under = inb(dev->base_addr + TLAN_DIO_DATA + 3);
2106
2107 outw(TLAN_GOOD_RX_FRMS, dev->base_addr + TLAN_DIO_ADR);
2108 rx_good = inb(dev->base_addr + TLAN_DIO_DATA);
2109 rx_good += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
2110 rx_good += inb(dev->base_addr + TLAN_DIO_DATA + 2) << 16;
2111 rx_over = inb(dev->base_addr + TLAN_DIO_DATA + 3);
2112
2113 outw(TLAN_DEFERRED_TX, dev->base_addr + TLAN_DIO_ADR);
2114 def_tx = inb(dev->base_addr + TLAN_DIO_DATA);
2115 def_tx += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
2116 crc = inb(dev->base_addr + TLAN_DIO_DATA + 2);
2117 code = inb(dev->base_addr + TLAN_DIO_DATA + 3);
2118
2119 outw(TLAN_MULTICOL_FRMS, dev->base_addr + TLAN_DIO_ADR);
2120 multi_col = inb(dev->base_addr + TLAN_DIO_DATA);
2121 multi_col += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
2122 single_col = inb(dev->base_addr + TLAN_DIO_DATA + 2);
2123 single_col += inb(dev->base_addr + TLAN_DIO_DATA + 3) << 8;
2124
2125 outw(TLAN_EXCESSCOL_FRMS, dev->base_addr + TLAN_DIO_ADR);
2126 excess_col = inb(dev->base_addr + TLAN_DIO_DATA);
2127 late_col = inb(dev->base_addr + TLAN_DIO_DATA + 1);
2128 loss = inb(dev->base_addr + TLAN_DIO_DATA + 2);
2129
2130 if (record) {
2131 dev->stats.rx_packets += rx_good;
2132 dev->stats.rx_errors += rx_over + crc + code;
2133 dev->stats.tx_packets += tx_good;
2134 dev->stats.tx_errors += tx_under + loss;
2135 dev->stats.collisions += multi_col
2136 + single_col + excess_col + late_col;
2137
2138 dev->stats.rx_over_errors += rx_over;
2139 dev->stats.rx_crc_errors += crc;
2140 dev->stats.rx_frame_errors += code;
2141
2142 dev->stats.tx_aborted_errors += tx_under;
2143 dev->stats.tx_carrier_errors += loss;
2144 }
2145
2146}
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168static void
2169tlan_reset_adapter(struct net_device *dev)
2170{
2171 struct tlan_priv *priv = netdev_priv(dev);
2172 int i;
2173 u32 addr;
2174 u32 data;
2175 u8 data8;
2176
2177 priv->tlan_full_duplex = false;
2178 priv->phy_online = 0;
2179 netif_carrier_off(dev);
2180
2181
2182
2183 data = inl(dev->base_addr + TLAN_HOST_CMD);
2184 data |= TLAN_HC_AD_RST;
2185 outl(data, dev->base_addr + TLAN_HOST_CMD);
2186
2187 udelay(1000);
2188
2189
2190
2191 data = inl(dev->base_addr + TLAN_HOST_CMD);
2192 data |= TLAN_HC_INT_OFF;
2193 outl(data, dev->base_addr + TLAN_HOST_CMD);
2194
2195
2196
2197 for (i = TLAN_AREG_0; i <= TLAN_HASH_2; i += 4)
2198 tlan_dio_write32(dev->base_addr, (u16) i, 0);
2199
2200
2201
2202 data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | TLAN_NET_CFG_PHY_EN;
2203 tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, (u16) data);
2204
2205
2206
2207 outl(TLAN_HC_LD_TMR | 0x3f, dev->base_addr + TLAN_HOST_CMD);
2208 outl(TLAN_HC_LD_THR | 0x9, dev->base_addr + TLAN_HOST_CMD);
2209
2210
2211
2212 outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR);
2213 addr = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
2214 tlan_set_bit(TLAN_NET_SIO_NMRST, addr);
2215
2216
2217
2218 if (priv->tlan_rev >= 0x30) {
2219 data8 = TLAN_ID_TX_EOC | TLAN_ID_RX_EOC;
2220 tlan_dio_write8(dev->base_addr, TLAN_INT_DIS, data8);
2221 }
2222 tlan_phy_detect(dev);
2223 data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN;
2224
2225 if (priv->adapter->flags & TLAN_ADAPTER_BIT_RATE_PHY) {
2226 data |= TLAN_NET_CFG_BIT;
2227 if (priv->aui == 1) {
2228 tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x0a);
2229 } else if (priv->duplex == TLAN_DUPLEX_FULL) {
2230 tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x00);
2231 priv->tlan_full_duplex = true;
2232 } else {
2233 tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x08);
2234 }
2235 }
2236
2237
2238 if (priv->phy_num == 0 ||
2239 (priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10))
2240 data |= TLAN_NET_CFG_PHY_EN;
2241 tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, (u16) data);
2242
2243 if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY)
2244 tlan_finish_reset(dev);
2245 else
2246 tlan_phy_power_down(dev);
2247
2248}
2249
2250
2251
2252
2253static void
2254tlan_finish_reset(struct net_device *dev)
2255{
2256 struct tlan_priv *priv = netdev_priv(dev);
2257 u8 data;
2258 u32 phy;
2259 u8 sio;
2260 u16 status;
2261 u16 partner;
2262 u16 tlphy_ctl;
2263 u16 tlphy_par;
2264 u16 tlphy_id1, tlphy_id2;
2265 int i;
2266
2267 phy = priv->phy[priv->phy_num];
2268
2269 data = TLAN_NET_CMD_NRESET | TLAN_NET_CMD_NWRAP;
2270 if (priv->tlan_full_duplex)
2271 data |= TLAN_NET_CMD_DUPLEX;
2272 tlan_dio_write8(dev->base_addr, TLAN_NET_CMD, data);
2273 data = TLAN_NET_MASK_MASK4 | TLAN_NET_MASK_MASK5;
2274 if (priv->phy_num == 0)
2275 data |= TLAN_NET_MASK_MASK7;
2276 tlan_dio_write8(dev->base_addr, TLAN_NET_MASK, data);
2277 tlan_dio_write16(dev->base_addr, TLAN_MAX_RX, ((1536)+7)&~7);
2278 tlan_mii_read_reg(dev, phy, MII_GEN_ID_HI, &tlphy_id1);
2279 tlan_mii_read_reg(dev, phy, MII_GEN_ID_LO, &tlphy_id2);
2280
2281 if ((priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) ||
2282 (priv->aui)) {
2283 status = MII_GS_LINK;
2284 netdev_info(dev, "Link forced\n");
2285 } else {
2286 tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
2287 udelay(1000);
2288 tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
2289 if (status & MII_GS_LINK) {
2290
2291 if ((tlphy_id1 == NAT_SEM_ID1) &&
2292 (tlphy_id2 == NAT_SEM_ID2)) {
2293 tlan_mii_read_reg(dev, phy, MII_AN_LPA,
2294 &partner);
2295 tlan_mii_read_reg(dev, phy, TLAN_TLPHY_PAR,
2296 &tlphy_par);
2297
2298 netdev_info(dev,
2299 "Link active, %s %uMbps %s-Duplex\n",
2300 !(tlphy_par & TLAN_PHY_AN_EN_STAT)
2301 ? "forced" : "Autonegotiation enabled,",
2302 tlphy_par & TLAN_PHY_SPEED_100
2303 ? 100 : 10,
2304 tlphy_par & TLAN_PHY_DUPLEX_FULL
2305 ? "Full" : "Half");
2306
2307 if (tlphy_par & TLAN_PHY_AN_EN_STAT) {
2308 netdev_info(dev, "Partner capability:");
2309 for (i = 5; i < 10; i++)
2310 if (partner & (1 << i))
2311 pr_cont(" %s",
2312 media[i-5]);
2313 pr_cont("\n");
2314 }
2315 } else
2316 netdev_info(dev, "Link active\n");
2317
2318 priv->media_timer.expires = jiffies + HZ;
2319 add_timer(&priv->media_timer);
2320 }
2321 }
2322
2323 if (priv->phy_num == 0) {
2324 tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl);
2325 tlphy_ctl |= TLAN_TC_INTEN;
2326 tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL, tlphy_ctl);
2327 sio = tlan_dio_read8(dev->base_addr, TLAN_NET_SIO);
2328 sio |= TLAN_NET_SIO_MINTEN;
2329 tlan_dio_write8(dev->base_addr, TLAN_NET_SIO, sio);
2330 }
2331
2332 if (status & MII_GS_LINK) {
2333 tlan_set_mac(dev, 0, dev->dev_addr);
2334 priv->phy_online = 1;
2335 outb((TLAN_HC_INT_ON >> 8), dev->base_addr + TLAN_HOST_CMD + 1);
2336 if (debug >= 1 && debug != TLAN_DEBUG_PROBE)
2337 outb((TLAN_HC_REQ_INT >> 8),
2338 dev->base_addr + TLAN_HOST_CMD + 1);
2339 outl(priv->rx_list_dma, dev->base_addr + TLAN_CH_PARM);
2340 outl(TLAN_HC_GO | TLAN_HC_RT, dev->base_addr + TLAN_HOST_CMD);
2341 tlan_dio_write8(dev->base_addr, TLAN_LED_REG, TLAN_LED_LINK);
2342 netif_carrier_on(dev);
2343 } else {
2344 netdev_info(dev, "Link inactive, will retry in 10 secs...\n");
2345 tlan_set_timer(dev, (10*HZ), TLAN_TIMER_FINISH_RESET);
2346 return;
2347 }
2348 tlan_set_multicast_list(dev);
2349
2350}
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376static void tlan_set_mac(struct net_device *dev, int areg, char *mac)
2377{
2378 int i;
2379
2380 areg *= 6;
2381
2382 if (mac != NULL) {
2383 for (i = 0; i < 6; i++)
2384 tlan_dio_write8(dev->base_addr,
2385 TLAN_AREG_0 + areg + i, mac[i]);
2386 } else {
2387 for (i = 0; i < 6; i++)
2388 tlan_dio_write8(dev->base_addr,
2389 TLAN_AREG_0 + areg + i, 0);
2390 }
2391
2392}
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420static void tlan_phy_print(struct net_device *dev)
2421{
2422 struct tlan_priv *priv = netdev_priv(dev);
2423 u16 i, data0, data1, data2, data3, phy;
2424
2425 phy = priv->phy[priv->phy_num];
2426
2427 if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) {
2428 netdev_info(dev, "Unmanaged PHY\n");
2429 } else if (phy <= TLAN_PHY_MAX_ADDR) {
2430 netdev_info(dev, "PHY 0x%02x\n", phy);
2431 pr_info(" Off. +0 +1 +2 +3\n");
2432 for (i = 0; i < 0x20; i += 4) {
2433 tlan_mii_read_reg(dev, phy, i, &data0);
2434 tlan_mii_read_reg(dev, phy, i + 1, &data1);
2435 tlan_mii_read_reg(dev, phy, i + 2, &data2);
2436 tlan_mii_read_reg(dev, phy, i + 3, &data3);
2437 pr_info(" 0x%02x 0x%04hx 0x%04hx 0x%04hx 0x%04hx\n",
2438 i, data0, data1, data2, data3);
2439 }
2440 } else {
2441 netdev_info(dev, "Invalid PHY\n");
2442 }
2443
2444}
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466static void tlan_phy_detect(struct net_device *dev)
2467{
2468 struct tlan_priv *priv = netdev_priv(dev);
2469 u16 control;
2470 u16 hi;
2471 u16 lo;
2472 u32 phy;
2473
2474 if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) {
2475 priv->phy_num = 0xffff;
2476 return;
2477 }
2478
2479 tlan_mii_read_reg(dev, TLAN_PHY_MAX_ADDR, MII_GEN_ID_HI, &hi);
2480
2481 if (hi != 0xffff)
2482 priv->phy[0] = TLAN_PHY_MAX_ADDR;
2483 else
2484 priv->phy[0] = TLAN_PHY_NONE;
2485
2486 priv->phy[1] = TLAN_PHY_NONE;
2487 for (phy = 0; phy <= TLAN_PHY_MAX_ADDR; phy++) {
2488 tlan_mii_read_reg(dev, phy, MII_GEN_CTL, &control);
2489 tlan_mii_read_reg(dev, phy, MII_GEN_ID_HI, &hi);
2490 tlan_mii_read_reg(dev, phy, MII_GEN_ID_LO, &lo);
2491 if ((control != 0xffff) ||
2492 (hi != 0xffff) || (lo != 0xffff)) {
2493 TLAN_DBG(TLAN_DEBUG_GNRL,
2494 "PHY found at %02x %04x %04x %04x\n",
2495 phy, control, hi, lo);
2496 if ((priv->phy[1] == TLAN_PHY_NONE) &&
2497 (phy != TLAN_PHY_MAX_ADDR)) {
2498 priv->phy[1] = phy;
2499 }
2500 }
2501 }
2502
2503 if (priv->phy[1] != TLAN_PHY_NONE)
2504 priv->phy_num = 1;
2505 else if (priv->phy[0] != TLAN_PHY_NONE)
2506 priv->phy_num = 0;
2507 else
2508 netdev_info(dev, "Cannot initialize device, no PHY was found!\n");
2509
2510}
2511
2512
2513
2514
2515static void tlan_phy_power_down(struct net_device *dev)
2516{
2517 struct tlan_priv *priv = netdev_priv(dev);
2518 u16 value;
2519
2520 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Powering down PHY(s).\n", dev->name);
2521 value = MII_GC_PDOWN | MII_GC_LOOPBK | MII_GC_ISOLATE;
2522 tlan_mii_sync(dev->base_addr);
2523 tlan_mii_write_reg(dev, priv->phy[priv->phy_num], MII_GEN_CTL, value);
2524 if ((priv->phy_num == 0) && (priv->phy[1] != TLAN_PHY_NONE)) {
2525
2526 if (priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10)
2527 value = MII_GC_ISOLATE;
2528 tlan_mii_sync(dev->base_addr);
2529 tlan_mii_write_reg(dev, priv->phy[1], MII_GEN_CTL, value);
2530 }
2531
2532
2533
2534
2535
2536 tlan_set_timer(dev, msecs_to_jiffies(50), TLAN_TIMER_PHY_PUP);
2537
2538}
2539
2540
2541
2542
2543static void tlan_phy_power_up(struct net_device *dev)
2544{
2545 struct tlan_priv *priv = netdev_priv(dev);
2546 u16 value;
2547
2548 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Powering up PHY.\n", dev->name);
2549 tlan_mii_sync(dev->base_addr);
2550 value = MII_GC_LOOPBK;
2551 tlan_mii_write_reg(dev, priv->phy[priv->phy_num], MII_GEN_CTL, value);
2552 tlan_mii_sync(dev->base_addr);
2553
2554
2555
2556
2557 tlan_set_timer(dev, msecs_to_jiffies(500), TLAN_TIMER_PHY_RESET);
2558
2559}
2560
2561
2562
2563
2564static void tlan_phy_reset(struct net_device *dev)
2565{
2566 struct tlan_priv *priv = netdev_priv(dev);
2567 u16 phy;
2568 u16 value;
2569 unsigned long timeout = jiffies + HZ;
2570
2571 phy = priv->phy[priv->phy_num];
2572
2573 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Resetting PHY.\n", dev->name);
2574 tlan_mii_sync(dev->base_addr);
2575 value = MII_GC_LOOPBK | MII_GC_RESET;
2576 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, value);
2577 do {
2578 tlan_mii_read_reg(dev, phy, MII_GEN_CTL, &value);
2579 if (time_after(jiffies, timeout)) {
2580 netdev_err(dev, "PHY reset timeout\n");
2581 return;
2582 }
2583 } while (value & MII_GC_RESET);
2584
2585
2586
2587
2588
2589 tlan_set_timer(dev, msecs_to_jiffies(50), TLAN_TIMER_PHY_START_LINK);
2590
2591}
2592
2593
2594
2595
2596static void tlan_phy_start_link(struct net_device *dev)
2597{
2598 struct tlan_priv *priv = netdev_priv(dev);
2599 u16 ability;
2600 u16 control;
2601 u16 data;
2602 u16 phy;
2603 u16 status;
2604 u16 tctl;
2605
2606 phy = priv->phy[priv->phy_num];
2607 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Trying to activate link.\n", dev->name);
2608 tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
2609 tlan_mii_read_reg(dev, phy, MII_GEN_STS, &ability);
2610
2611 if ((status & MII_GS_AUTONEG) &&
2612 (!priv->aui)) {
2613 ability = status >> 11;
2614 if (priv->speed == TLAN_SPEED_10 &&
2615 priv->duplex == TLAN_DUPLEX_HALF) {
2616 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x0000);
2617 } else if (priv->speed == TLAN_SPEED_10 &&
2618 priv->duplex == TLAN_DUPLEX_FULL) {
2619 priv->tlan_full_duplex = true;
2620 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x0100);
2621 } else if (priv->speed == TLAN_SPEED_100 &&
2622 priv->duplex == TLAN_DUPLEX_HALF) {
2623 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x2000);
2624 } else if (priv->speed == TLAN_SPEED_100 &&
2625 priv->duplex == TLAN_DUPLEX_FULL) {
2626 priv->tlan_full_duplex = true;
2627 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x2100);
2628 } else {
2629
2630
2631 tlan_mii_write_reg(dev, phy, MII_AN_ADV,
2632 (ability << 5) | 1);
2633
2634 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x1000);
2635
2636 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x1200);
2637
2638
2639
2640
2641
2642 netdev_info(dev, "Starting autonegotiation\n");
2643 tlan_set_timer(dev, (2*HZ), TLAN_TIMER_PHY_FINISH_AN);
2644 return;
2645 }
2646
2647 }
2648
2649 if ((priv->aui) && (priv->phy_num != 0)) {
2650 priv->phy_num = 0;
2651 data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN
2652 | TLAN_NET_CFG_PHY_EN;
2653 tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, data);
2654 tlan_set_timer(dev, msecs_to_jiffies(40), TLAN_TIMER_PHY_PDOWN);
2655 return;
2656 } else if (priv->phy_num == 0) {
2657 control = 0;
2658 tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tctl);
2659 if (priv->aui) {
2660 tctl |= TLAN_TC_AUISEL;
2661 } else {
2662 tctl &= ~TLAN_TC_AUISEL;
2663 if (priv->duplex == TLAN_DUPLEX_FULL) {
2664 control |= MII_GC_DUPLEX;
2665 priv->tlan_full_duplex = true;
2666 }
2667 if (priv->speed == TLAN_SPEED_100)
2668 control |= MII_GC_SPEEDSEL;
2669 }
2670 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, control);
2671 tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL, tctl);
2672 }
2673
2674
2675
2676
2677 tlan_set_timer(dev, (4*HZ), TLAN_TIMER_FINISH_RESET);
2678
2679}
2680
2681
2682
2683
2684static void tlan_phy_finish_auto_neg(struct net_device *dev)
2685{
2686 struct tlan_priv *priv = netdev_priv(dev);
2687 u16 an_adv;
2688 u16 an_lpa;
2689 u16 mode;
2690 u16 phy;
2691 u16 status;
2692
2693 phy = priv->phy[priv->phy_num];
2694
2695 tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
2696 udelay(1000);
2697 tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
2698
2699 if (!(status & MII_GS_AUTOCMPLT)) {
2700
2701
2702
2703 tlan_set_timer(dev, 2 * HZ, TLAN_TIMER_PHY_FINISH_AN);
2704 return;
2705 }
2706
2707 netdev_info(dev, "Autonegotiation complete\n");
2708 tlan_mii_read_reg(dev, phy, MII_AN_ADV, &an_adv);
2709 tlan_mii_read_reg(dev, phy, MII_AN_LPA, &an_lpa);
2710 mode = an_adv & an_lpa & 0x03E0;
2711 if (mode & 0x0100)
2712 priv->tlan_full_duplex = true;
2713 else if (!(mode & 0x0080) && (mode & 0x0040))
2714 priv->tlan_full_duplex = true;
2715
2716
2717 if ((!(mode & 0x0180)) &&
2718 (priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10) &&
2719 (priv->phy_num != 0)) {
2720 priv->phy_num = 0;
2721 tlan_set_timer(dev, msecs_to_jiffies(400), TLAN_TIMER_PHY_PDOWN);
2722 return;
2723 }
2724
2725 if (priv->phy_num == 0) {
2726 if ((priv->duplex == TLAN_DUPLEX_FULL) ||
2727 (an_adv & an_lpa & 0x0040)) {
2728 tlan_mii_write_reg(dev, phy, MII_GEN_CTL,
2729 MII_GC_AUTOENB | MII_GC_DUPLEX);
2730 netdev_info(dev, "Starting internal PHY with FULL-DUPLEX\n");
2731 } else {
2732 tlan_mii_write_reg(dev, phy, MII_GEN_CTL,
2733 MII_GC_AUTOENB);
2734 netdev_info(dev, "Starting internal PHY with HALF-DUPLEX\n");
2735 }
2736 }
2737
2738
2739
2740 tlan_set_timer(dev, msecs_to_jiffies(100), TLAN_TIMER_FINISH_RESET);
2741
2742}
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762static void tlan_phy_monitor(struct timer_list *t)
2763{
2764 struct tlan_priv *priv = from_timer(priv, t, media_timer);
2765 struct net_device *dev = priv->dev;
2766 u16 phy;
2767 u16 phy_status;
2768
2769 phy = priv->phy[priv->phy_num];
2770
2771
2772 tlan_mii_read_reg(dev, phy, MII_GEN_STS, &phy_status);
2773
2774
2775 if (!(phy_status & MII_GS_LINK)) {
2776 if (netif_carrier_ok(dev)) {
2777 printk(KERN_DEBUG "TLAN: %s has lost link\n",
2778 dev->name);
2779 tlan_dio_write8(dev->base_addr, TLAN_LED_REG, 0);
2780 netif_carrier_off(dev);
2781 if (priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10) {
2782
2783 u16 data = MII_GC_PDOWN | MII_GC_LOOPBK |
2784 MII_GC_ISOLATE;
2785
2786 tlan_mii_sync(dev->base_addr);
2787 tlan_mii_write_reg(dev, priv->phy[0],
2788 MII_GEN_CTL, data);
2789
2790 priv->phy_num = 1;
2791
2792 tlan_set_timer(dev, msecs_to_jiffies(400),
2793 TLAN_TIMER_PHY_PDOWN);
2794 return;
2795 }
2796 }
2797 }
2798
2799
2800 if ((phy_status & MII_GS_LINK) && !netif_carrier_ok(dev)) {
2801 tlan_dio_write8(dev->base_addr, TLAN_LED_REG, TLAN_LED_LINK);
2802 printk(KERN_DEBUG "TLAN: %s has reestablished link\n",
2803 dev->name);
2804 netif_carrier_on(dev);
2805 }
2806 priv->media_timer.expires = jiffies + HZ;
2807 add_timer(&priv->media_timer);
2808}
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842
2843
2844
2845
2846
2847static bool
2848tlan_mii_read_reg(struct net_device *dev, u16 phy, u16 reg, u16 *val)
2849{
2850 u8 nack;
2851 u16 sio, tmp;
2852 u32 i;
2853 bool err;
2854 int minten;
2855 struct tlan_priv *priv = netdev_priv(dev);
2856 unsigned long flags = 0;
2857
2858 err = false;
2859 outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR);
2860 sio = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
2861
2862 if (!in_irq())
2863 spin_lock_irqsave(&priv->lock, flags);
2864
2865 tlan_mii_sync(dev->base_addr);
2866
2867 minten = tlan_get_bit(TLAN_NET_SIO_MINTEN, sio);
2868 if (minten)
2869 tlan_clear_bit(TLAN_NET_SIO_MINTEN, sio);
2870
2871 tlan_mii_send_data(dev->base_addr, 0x1, 2);
2872 tlan_mii_send_data(dev->base_addr, 0x2, 2);
2873 tlan_mii_send_data(dev->base_addr, phy, 5);
2874 tlan_mii_send_data(dev->base_addr, reg, 5);
2875
2876
2877 tlan_clear_bit(TLAN_NET_SIO_MTXEN, sio);
2878
2879 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
2880 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
2881 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
2882
2883 nack = tlan_get_bit(TLAN_NET_SIO_MDATA, sio);
2884 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
2885 if (nack) {
2886 for (i = 0; i < 16; i++) {
2887 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
2888 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
2889 }
2890 tmp = 0xffff;
2891 err = true;
2892 } else {
2893 for (tmp = 0, i = 0x8000; i; i >>= 1) {
2894 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
2895 if (tlan_get_bit(TLAN_NET_SIO_MDATA, sio))
2896 tmp |= i;
2897 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
2898 }
2899 }
2900
2901
2902 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
2903 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
2904
2905 if (minten)
2906 tlan_set_bit(TLAN_NET_SIO_MINTEN, sio);
2907
2908 *val = tmp;
2909
2910 if (!in_irq())
2911 spin_unlock_irqrestore(&priv->lock, flags);
2912
2913 return err;
2914
2915}
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936
2937
2938static void tlan_mii_send_data(u16 base_port, u32 data, unsigned num_bits)
2939{
2940 u16 sio;
2941 u32 i;
2942
2943 if (num_bits == 0)
2944 return;
2945
2946 outw(TLAN_NET_SIO, base_port + TLAN_DIO_ADR);
2947 sio = base_port + TLAN_DIO_DATA + TLAN_NET_SIO;
2948 tlan_set_bit(TLAN_NET_SIO_MTXEN, sio);
2949
2950 for (i = (0x1 << (num_bits - 1)); i; i >>= 1) {
2951 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
2952 (void) tlan_get_bit(TLAN_NET_SIO_MCLK, sio);
2953 if (data & i)
2954 tlan_set_bit(TLAN_NET_SIO_MDATA, sio);
2955 else
2956 tlan_clear_bit(TLAN_NET_SIO_MDATA, sio);
2957 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
2958 (void) tlan_get_bit(TLAN_NET_SIO_MCLK, sio);
2959 }
2960
2961}
2962
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980static void tlan_mii_sync(u16 base_port)
2981{
2982 int i;
2983 u16 sio;
2984
2985 outw(TLAN_NET_SIO, base_port + TLAN_DIO_ADR);
2986 sio = base_port + TLAN_DIO_DATA + TLAN_NET_SIO;
2987
2988 tlan_clear_bit(TLAN_NET_SIO_MTXEN, sio);
2989 for (i = 0; i < 32; i++) {
2990 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
2991 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
2992 }
2993
2994}
2995
2996
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008
3009
3010
3011
3012
3013
3014
3015
3016
3017
3018
3019static void
3020tlan_mii_write_reg(struct net_device *dev, u16 phy, u16 reg, u16 val)
3021{
3022 u16 sio;
3023 int minten;
3024 unsigned long flags = 0;
3025 struct tlan_priv *priv = netdev_priv(dev);
3026
3027 outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR);
3028 sio = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
3029
3030 if (!in_irq())
3031 spin_lock_irqsave(&priv->lock, flags);
3032
3033 tlan_mii_sync(dev->base_addr);
3034
3035 minten = tlan_get_bit(TLAN_NET_SIO_MINTEN, sio);
3036 if (minten)
3037 tlan_clear_bit(TLAN_NET_SIO_MINTEN, sio);
3038
3039 tlan_mii_send_data(dev->base_addr, 0x1, 2);
3040 tlan_mii_send_data(dev->base_addr, 0x1, 2);
3041 tlan_mii_send_data(dev->base_addr, phy, 5);
3042 tlan_mii_send_data(dev->base_addr, reg, 5);
3043
3044 tlan_mii_send_data(dev->base_addr, 0x2, 2);
3045 tlan_mii_send_data(dev->base_addr, val, 16);
3046
3047 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
3048 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
3049
3050 if (minten)
3051 tlan_set_bit(TLAN_NET_SIO_MINTEN, sio);
3052
3053 if (!in_irq())
3054 spin_unlock_irqrestore(&priv->lock, flags);
3055
3056}
3057
3058
3059
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090static void tlan_ee_send_start(u16 io_base)
3091{
3092 u16 sio;
3093
3094 outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR);
3095 sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
3096
3097 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3098 tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
3099 tlan_set_bit(TLAN_NET_SIO_ETXEN, sio);
3100 tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
3101 tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
3102
3103}
3104
3105
3106
3107
3108
3109
3110
3111
3112
3113
3114
3115
3116
3117
3118
3119
3120
3121
3122
3123
3124
3125
3126
3127
3128
3129
3130static int tlan_ee_send_byte(u16 io_base, u8 data, int stop)
3131{
3132 int err;
3133 u8 place;
3134 u16 sio;
3135
3136 outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR);
3137 sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
3138
3139
3140 for (place = 0x80; place != 0; place >>= 1) {
3141 if (place & data)
3142 tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
3143 else
3144 tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
3145 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3146 tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
3147 }
3148 tlan_clear_bit(TLAN_NET_SIO_ETXEN, sio);
3149 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3150 err = tlan_get_bit(TLAN_NET_SIO_EDATA, sio);
3151 tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
3152 tlan_set_bit(TLAN_NET_SIO_ETXEN, sio);
3153
3154 if ((!err) && stop) {
3155
3156 tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
3157 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3158 tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
3159 }
3160
3161 return err;
3162
3163}
3164
3165
3166
3167
3168
3169
3170
3171
3172
3173
3174
3175
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185
3186
3187
3188
3189
3190
3191
3192static void tlan_ee_receive_byte(u16 io_base, u8 *data, int stop)
3193{
3194 u8 place;
3195 u16 sio;
3196
3197 outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR);
3198 sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
3199 *data = 0;
3200
3201
3202 tlan_clear_bit(TLAN_NET_SIO_ETXEN, sio);
3203 for (place = 0x80; place; place >>= 1) {
3204 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3205 if (tlan_get_bit(TLAN_NET_SIO_EDATA, sio))
3206 *data |= place;
3207 tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
3208 }
3209
3210 tlan_set_bit(TLAN_NET_SIO_ETXEN, sio);
3211 if (!stop) {
3212 tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
3213 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3214 tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
3215 } else {
3216 tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
3217 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3218 tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
3219
3220 tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
3221 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3222 tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
3223 }
3224
3225}
3226
3227
3228
3229
3230
3231
3232
3233
3234
3235
3236
3237
3238
3239
3240
3241
3242
3243
3244
3245
3246
3247
3248
3249
3250
3251static int tlan_ee_read_byte(struct net_device *dev, u8 ee_addr, u8 *data)
3252{
3253 int err;
3254 struct tlan_priv *priv = netdev_priv(dev);
3255 unsigned long flags = 0;
3256 int ret = 0;
3257
3258 spin_lock_irqsave(&priv->lock, flags);
3259
3260 tlan_ee_send_start(dev->base_addr);
3261 err = tlan_ee_send_byte(dev->base_addr, 0xa0, TLAN_EEPROM_ACK);
3262 if (err) {
3263 ret = 1;
3264 goto fail;
3265 }
3266 err = tlan_ee_send_byte(dev->base_addr, ee_addr, TLAN_EEPROM_ACK);
3267 if (err) {
3268 ret = 2;
3269 goto fail;
3270 }
3271 tlan_ee_send_start(dev->base_addr);
3272 err = tlan_ee_send_byte(dev->base_addr, 0xa1, TLAN_EEPROM_ACK);
3273 if (err) {
3274 ret = 3;
3275 goto fail;
3276 }
3277 tlan_ee_receive_byte(dev->base_addr, data, TLAN_EEPROM_STOP);
3278fail:
3279 spin_unlock_irqrestore(&priv->lock, flags);
3280
3281 return ret;
3282
3283}
3284
3285
3286
3287