1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31
32#include <linux/hardirq.h>
33#include <linux/module.h>
34#include <linux/init.h>
35#include <linux/interrupt.h>
36#include <linux/ioport.h>
37#include <linux/eisa.h>
38#include <linux/pci.h>
39#include <linux/dma-mapping.h>
40#include <linux/netdevice.h>
41#include <linux/etherdevice.h>
42#include <linux/delay.h>
43#include <linux/spinlock.h>
44#include <linux/workqueue.h>
45#include <linux/mii.h>
46
47#include "tlan.h"
48
49
50
51static struct net_device *tlan_eisa_devices;
52
53static int tlan_devices_installed;
54
55
56static int aui[MAX_TLAN_BOARDS];
57static int duplex[MAX_TLAN_BOARDS];
58static int speed[MAX_TLAN_BOARDS];
59static int boards_found;
60module_param_array(aui, int, NULL, 0);
61module_param_array(duplex, int, NULL, 0);
62module_param_array(speed, int, NULL, 0);
63MODULE_PARM_DESC(aui, "ThunderLAN use AUI port(s) (0-1)");
64MODULE_PARM_DESC(duplex,
65 "ThunderLAN duplex setting(s) (0-default, 1-half, 2-full)");
66MODULE_PARM_DESC(speed, "ThunderLAN port speed setting(s) (0,10,100)");
67
68MODULE_AUTHOR("Maintainer: Samuel Chessman <chessman@tux.org>");
69MODULE_DESCRIPTION("Driver for TI ThunderLAN based ethernet PCI adapters");
70MODULE_LICENSE("GPL");
71
72
73
74
75static int debug;
76module_param(debug, int, 0);
77MODULE_PARM_DESC(debug, "ThunderLAN debug mask");
78
79static const char tlan_signature[] = "TLAN";
80static const char tlan_banner[] = "ThunderLAN driver v1.17\n";
81static int tlan_have_pci;
82static int tlan_have_eisa;
83
84static const char * const media[] = {
85 "10BaseT-HD", "10BaseT-FD", "100baseTx-HD",
86 "100BaseTx-FD", "100BaseT4", NULL
87};
88
89static struct board {
90 const char *device_label;
91 u32 flags;
92 u16 addr_ofs;
93} board_info[] = {
94 { "Compaq Netelligent 10 T PCI UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
95 { "Compaq Netelligent 10/100 TX PCI UTP",
96 TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
97 { "Compaq Integrated NetFlex-3/P", TLAN_ADAPTER_NONE, 0x83 },
98 { "Compaq NetFlex-3/P",
99 TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 },
100 { "Compaq NetFlex-3/P", TLAN_ADAPTER_NONE, 0x83 },
101 { "Compaq Netelligent Integrated 10/100 TX UTP",
102 TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
103 { "Compaq Netelligent Dual 10/100 TX PCI UTP",
104 TLAN_ADAPTER_NONE, 0x83 },
105 { "Compaq Netelligent 10/100 TX Embedded UTP",
106 TLAN_ADAPTER_NONE, 0x83 },
107 { "Olicom OC-2183/2185", TLAN_ADAPTER_USE_INTERN_10, 0x83 },
108 { "Olicom OC-2325", TLAN_ADAPTER_ACTIVITY_LED |
109 TLAN_ADAPTER_UNMANAGED_PHY, 0xf8 },
110 { "Olicom OC-2326", TLAN_ADAPTER_ACTIVITY_LED |
111 TLAN_ADAPTER_USE_INTERN_10, 0xf8 },
112 { "Compaq Netelligent 10/100 TX UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
113 { "Compaq Netelligent 10 T/2 PCI UTP/coax", TLAN_ADAPTER_NONE, 0x83 },
114 { "Compaq NetFlex-3/E",
115 TLAN_ADAPTER_ACTIVITY_LED |
116 TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 },
117 { "Compaq NetFlex-3/E",
118 TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
119};
120
121static const struct pci_device_id tlan_pci_tbl[] = {
122 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL10,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
124 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100,
125 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
126 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETFLEX3I,
127 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
128 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_THUNDER,
129 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 },
130 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETFLEX3B,
131 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
132 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100PI,
133 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
134 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100D,
135 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
136 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100I,
137 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 7 },
138 { PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2183,
139 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
140 { PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2325,
141 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 9 },
142 { PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2326,
143 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 10 },
144 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_NETELLIGENT_10_100_WS_5100,
145 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 11 },
146 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_NETELLIGENT_10_T2,
147 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 12 },
148 { 0,}
149};
150MODULE_DEVICE_TABLE(pci, tlan_pci_tbl);
151
152static void tlan_eisa_probe(void);
153static void tlan_eisa_cleanup(void);
154static int tlan_init(struct net_device *);
155static int tlan_open(struct net_device *dev);
156static netdev_tx_t tlan_start_tx(struct sk_buff *, struct net_device *);
157static irqreturn_t tlan_handle_interrupt(int, void *);
158static int tlan_close(struct net_device *);
159static struct net_device_stats *tlan_get_stats(struct net_device *);
160static void tlan_set_multicast_list(struct net_device *);
161static int tlan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
162static int tlan_probe1(struct pci_dev *pdev, long ioaddr,
163 int irq, int rev, const struct pci_device_id *ent);
164static void tlan_tx_timeout(struct net_device *dev);
165static void tlan_tx_timeout_work(struct work_struct *work);
166static int tlan_init_one(struct pci_dev *pdev,
167 const struct pci_device_id *ent);
168
169static u32 tlan_handle_tx_eof(struct net_device *, u16);
170static u32 tlan_handle_stat_overflow(struct net_device *, u16);
171static u32 tlan_handle_rx_eof(struct net_device *, u16);
172static u32 tlan_handle_dummy(struct net_device *, u16);
173static u32 tlan_handle_tx_eoc(struct net_device *, u16);
174static u32 tlan_handle_status_check(struct net_device *, u16);
175static u32 tlan_handle_rx_eoc(struct net_device *, u16);
176
177static void tlan_timer(struct timer_list *t);
178static void tlan_phy_monitor(struct timer_list *t);
179
180static void tlan_reset_lists(struct net_device *);
181static void tlan_free_lists(struct net_device *);
182static void tlan_print_dio(u16);
183static void tlan_print_list(struct tlan_list *, char *, int);
184static void tlan_read_and_clear_stats(struct net_device *, int);
185static void tlan_reset_adapter(struct net_device *);
186static void tlan_finish_reset(struct net_device *);
187static void tlan_set_mac(struct net_device *, int areg, char *mac);
188
189static void tlan_phy_print(struct net_device *);
190static void tlan_phy_detect(struct net_device *);
191static void tlan_phy_power_down(struct net_device *);
192static void tlan_phy_power_up(struct net_device *);
193static void tlan_phy_reset(struct net_device *);
194static void tlan_phy_start_link(struct net_device *);
195static void tlan_phy_finish_auto_neg(struct net_device *);
196
197
198
199
200
201
202
203
204static bool tlan_mii_read_reg(struct net_device *, u16, u16, u16 *);
205static void tlan_mii_send_data(u16, u32, unsigned);
206static void tlan_mii_sync(u16);
207static void tlan_mii_write_reg(struct net_device *, u16, u16, u16);
208
209static void tlan_ee_send_start(u16);
210static int tlan_ee_send_byte(u16, u8, int);
211static void tlan_ee_receive_byte(u16, u8 *, int);
212static int tlan_ee_read_byte(struct net_device *, u8, u8 *);
213
214
215static inline void
216tlan_store_skb(struct tlan_list *tag, struct sk_buff *skb)
217{
218 unsigned long addr = (unsigned long)skb;
219 tag->buffer[9].address = addr;
220 tag->buffer[8].address = upper_32_bits(addr);
221}
222
223static inline struct sk_buff *
224tlan_get_skb(const struct tlan_list *tag)
225{
226 unsigned long addr;
227
228 addr = tag->buffer[9].address;
229 addr |= ((unsigned long) tag->buffer[8].address << 16) << 16;
230 return (struct sk_buff *) addr;
231}
232
233static u32
234(*tlan_int_vector[TLAN_INT_NUMBER_OF_INTS])(struct net_device *, u16) = {
235 NULL,
236 tlan_handle_tx_eof,
237 tlan_handle_stat_overflow,
238 tlan_handle_rx_eof,
239 tlan_handle_dummy,
240 tlan_handle_tx_eoc,
241 tlan_handle_status_check,
242 tlan_handle_rx_eoc
243};
244
245static inline void
246tlan_set_timer(struct net_device *dev, u32 ticks, u32 type)
247{
248 struct tlan_priv *priv = netdev_priv(dev);
249 unsigned long flags = 0;
250
251 if (!in_irq())
252 spin_lock_irqsave(&priv->lock, flags);
253 if (priv->timer.function != NULL &&
254 priv->timer_type != TLAN_TIMER_ACTIVITY) {
255 if (!in_irq())
256 spin_unlock_irqrestore(&priv->lock, flags);
257 return;
258 }
259 priv->timer.function = tlan_timer;
260 if (!in_irq())
261 spin_unlock_irqrestore(&priv->lock, flags);
262
263 priv->timer_set_at = jiffies;
264 priv->timer_type = type;
265 mod_timer(&priv->timer, jiffies + ticks);
266
267}
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300static void tlan_remove_one(struct pci_dev *pdev)
301{
302 struct net_device *dev = pci_get_drvdata(pdev);
303 struct tlan_priv *priv = netdev_priv(dev);
304
305 unregister_netdev(dev);
306
307 if (priv->dma_storage) {
308 pci_free_consistent(priv->pci_dev,
309 priv->dma_size, priv->dma_storage,
310 priv->dma_storage_dma);
311 }
312
313#ifdef CONFIG_PCI
314 pci_release_regions(pdev);
315#endif
316
317 free_netdev(dev);
318
319 cancel_work_sync(&priv->tlan_tqueue);
320}
321
322static void tlan_start(struct net_device *dev)
323{
324 tlan_reset_lists(dev);
325
326
327
328 tlan_read_and_clear_stats(dev, TLAN_IGNORE);
329 tlan_reset_adapter(dev);
330 netif_wake_queue(dev);
331}
332
333static void tlan_stop(struct net_device *dev)
334{
335 struct tlan_priv *priv = netdev_priv(dev);
336
337 del_timer_sync(&priv->media_timer);
338 tlan_read_and_clear_stats(dev, TLAN_RECORD);
339 outl(TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD);
340
341 tlan_reset_adapter(dev);
342 if (priv->timer.function != NULL) {
343 del_timer_sync(&priv->timer);
344 priv->timer.function = NULL;
345 }
346}
347
348#ifdef CONFIG_PM
349
350static int tlan_suspend(struct pci_dev *pdev, pm_message_t state)
351{
352 struct net_device *dev = pci_get_drvdata(pdev);
353
354 if (netif_running(dev))
355 tlan_stop(dev);
356
357 netif_device_detach(dev);
358 pci_save_state(pdev);
359 pci_disable_device(pdev);
360 pci_wake_from_d3(pdev, false);
361 pci_set_power_state(pdev, PCI_D3hot);
362
363 return 0;
364}
365
366static int tlan_resume(struct pci_dev *pdev)
367{
368 struct net_device *dev = pci_get_drvdata(pdev);
369 int rc = pci_enable_device(pdev);
370
371 if (rc)
372 return rc;
373 pci_restore_state(pdev);
374 pci_enable_wake(pdev, PCI_D0, 0);
375 netif_device_attach(dev);
376
377 if (netif_running(dev))
378 tlan_start(dev);
379
380 return 0;
381}
382
383#else
384
385#define tlan_suspend NULL
386#define tlan_resume NULL
387
388#endif
389
390
391static struct pci_driver tlan_driver = {
392 .name = "tlan",
393 .id_table = tlan_pci_tbl,
394 .probe = tlan_init_one,
395 .remove = tlan_remove_one,
396 .suspend = tlan_suspend,
397 .resume = tlan_resume,
398};
399
400static int __init tlan_probe(void)
401{
402 int rc = -ENODEV;
403
404 pr_info("%s", tlan_banner);
405
406 TLAN_DBG(TLAN_DEBUG_PROBE, "Starting PCI Probe....\n");
407
408
409
410 rc = pci_register_driver(&tlan_driver);
411
412 if (rc != 0) {
413 pr_err("Could not register pci driver\n");
414 goto err_out_pci_free;
415 }
416
417 TLAN_DBG(TLAN_DEBUG_PROBE, "Starting EISA Probe....\n");
418 tlan_eisa_probe();
419
420 pr_info("%d device%s installed, PCI: %d EISA: %d\n",
421 tlan_devices_installed, tlan_devices_installed == 1 ? "" : "s",
422 tlan_have_pci, tlan_have_eisa);
423
424 if (tlan_devices_installed == 0) {
425 rc = -ENODEV;
426 goto err_out_pci_unreg;
427 }
428 return 0;
429
430err_out_pci_unreg:
431 pci_unregister_driver(&tlan_driver);
432err_out_pci_free:
433 return rc;
434}
435
436
437static int tlan_init_one(struct pci_dev *pdev,
438 const struct pci_device_id *ent)
439{
440 return tlan_probe1(pdev, -1, -1, 0, ent);
441}
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463static int tlan_probe1(struct pci_dev *pdev, long ioaddr, int irq, int rev,
464 const struct pci_device_id *ent)
465{
466
467 struct net_device *dev;
468 struct tlan_priv *priv;
469 u16 device_id;
470 int reg, rc = -ENODEV;
471
472#ifdef CONFIG_PCI
473 if (pdev) {
474 rc = pci_enable_device(pdev);
475 if (rc)
476 return rc;
477
478 rc = pci_request_regions(pdev, tlan_signature);
479 if (rc) {
480 pr_err("Could not reserve IO regions\n");
481 goto err_out;
482 }
483 }
484#endif
485
486 dev = alloc_etherdev(sizeof(struct tlan_priv));
487 if (dev == NULL) {
488 rc = -ENOMEM;
489 goto err_out_regions;
490 }
491 SET_NETDEV_DEV(dev, &pdev->dev);
492
493 priv = netdev_priv(dev);
494
495 priv->pci_dev = pdev;
496 priv->dev = dev;
497
498
499 if (pdev) {
500 u32 pci_io_base = 0;
501
502 priv->adapter = &board_info[ent->driver_data];
503
504 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
505 if (rc) {
506 pr_err("No suitable PCI mapping available\n");
507 goto err_out_free_dev;
508 }
509
510 for (reg = 0; reg <= 5; reg++) {
511 if (pci_resource_flags(pdev, reg) & IORESOURCE_IO) {
512 pci_io_base = pci_resource_start(pdev, reg);
513 TLAN_DBG(TLAN_DEBUG_GNRL,
514 "IO mapping is available at %x.\n",
515 pci_io_base);
516 break;
517 }
518 }
519 if (!pci_io_base) {
520 pr_err("No IO mappings available\n");
521 rc = -EIO;
522 goto err_out_free_dev;
523 }
524
525 dev->base_addr = pci_io_base;
526 dev->irq = pdev->irq;
527 priv->adapter_rev = pdev->revision;
528 pci_set_master(pdev);
529 pci_set_drvdata(pdev, dev);
530
531 } else {
532
533
534 device_id = inw(ioaddr + EISA_ID2);
535 if (device_id == 0x20F1) {
536 priv->adapter = &board_info[13];
537 priv->adapter_rev = 23;
538 } else {
539 priv->adapter = &board_info[14];
540 priv->adapter_rev = 10;
541 }
542 dev->base_addr = ioaddr;
543 dev->irq = irq;
544 }
545
546
547 if (dev->mem_start) {
548 priv->aui = dev->mem_start & 0x01;
549 priv->duplex = ((dev->mem_start & 0x06) == 0x06) ? 0
550 : (dev->mem_start & 0x06) >> 1;
551 priv->speed = ((dev->mem_start & 0x18) == 0x18) ? 0
552 : (dev->mem_start & 0x18) >> 3;
553
554 if (priv->speed == 0x1)
555 priv->speed = TLAN_SPEED_10;
556 else if (priv->speed == 0x2)
557 priv->speed = TLAN_SPEED_100;
558
559 debug = priv->debug = dev->mem_end;
560 } else {
561 priv->aui = aui[boards_found];
562 priv->speed = speed[boards_found];
563 priv->duplex = duplex[boards_found];
564 priv->debug = debug;
565 }
566
567
568
569 INIT_WORK(&priv->tlan_tqueue, tlan_tx_timeout_work);
570
571 spin_lock_init(&priv->lock);
572
573 rc = tlan_init(dev);
574 if (rc) {
575 pr_err("Could not set up device\n");
576 goto err_out_free_dev;
577 }
578
579 rc = register_netdev(dev);
580 if (rc) {
581 pr_err("Could not register device\n");
582 goto err_out_uninit;
583 }
584
585
586 tlan_devices_installed++;
587 boards_found++;
588
589
590 if (pdev)
591 tlan_have_pci++;
592 else {
593 priv->next_device = tlan_eisa_devices;
594 tlan_eisa_devices = dev;
595 tlan_have_eisa++;
596 }
597
598 netdev_info(dev, "irq=%2d, io=%04x, %s, Rev. %d\n",
599 (int)dev->irq,
600 (int)dev->base_addr,
601 priv->adapter->device_label,
602 priv->adapter_rev);
603 return 0;
604
605err_out_uninit:
606 pci_free_consistent(priv->pci_dev, priv->dma_size, priv->dma_storage,
607 priv->dma_storage_dma);
608err_out_free_dev:
609 free_netdev(dev);
610err_out_regions:
611#ifdef CONFIG_PCI
612 if (pdev)
613 pci_release_regions(pdev);
614err_out:
615#endif
616 if (pdev)
617 pci_disable_device(pdev);
618 return rc;
619}
620
621
622static void tlan_eisa_cleanup(void)
623{
624 struct net_device *dev;
625 struct tlan_priv *priv;
626
627 while (tlan_have_eisa) {
628 dev = tlan_eisa_devices;
629 priv = netdev_priv(dev);
630 if (priv->dma_storage) {
631 pci_free_consistent(priv->pci_dev, priv->dma_size,
632 priv->dma_storage,
633 priv->dma_storage_dma);
634 }
635 release_region(dev->base_addr, 0x10);
636 unregister_netdev(dev);
637 tlan_eisa_devices = priv->next_device;
638 free_netdev(dev);
639 tlan_have_eisa--;
640 }
641}
642
643
644static void __exit tlan_exit(void)
645{
646 pci_unregister_driver(&tlan_driver);
647
648 if (tlan_have_eisa)
649 tlan_eisa_cleanup();
650
651}
652
653
654
655module_init(tlan_probe);
656module_exit(tlan_exit);
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673static void __init tlan_eisa_probe(void)
674{
675 long ioaddr;
676 int rc = -ENODEV;
677 int irq;
678 u16 device_id;
679
680 if (!EISA_bus) {
681 TLAN_DBG(TLAN_DEBUG_PROBE, "No EISA bus present\n");
682 return;
683 }
684
685
686 for (ioaddr = 0x1000; ioaddr < 0x9000; ioaddr += 0x1000) {
687
688 TLAN_DBG(TLAN_DEBUG_PROBE, "EISA_ID 0x%4x: 0x%4x\n",
689 (int) ioaddr + 0xc80, inw(ioaddr + EISA_ID));
690 TLAN_DBG(TLAN_DEBUG_PROBE, "EISA_ID 0x%4x: 0x%4x\n",
691 (int) ioaddr + 0xc82, inw(ioaddr + EISA_ID2));
692
693
694 TLAN_DBG(TLAN_DEBUG_PROBE,
695 "Probing for EISA adapter at IO: 0x%4x : ",
696 (int) ioaddr);
697 if (request_region(ioaddr, 0x10, tlan_signature) == NULL)
698 goto out;
699
700 if (inw(ioaddr + EISA_ID) != 0x110E) {
701 release_region(ioaddr, 0x10);
702 goto out;
703 }
704
705 device_id = inw(ioaddr + EISA_ID2);
706 if (device_id != 0x20F1 && device_id != 0x40F1) {
707 release_region(ioaddr, 0x10);
708 goto out;
709 }
710
711
712 if (inb(ioaddr + EISA_CR) != 0x1) {
713 release_region(ioaddr, 0x10);
714 goto out2;
715 }
716
717 if (debug == 0x10)
718 pr_info("Found one\n");
719
720
721
722 switch (inb(ioaddr + 0xcc0)) {
723 case(0x10):
724 irq = 5;
725 break;
726 case(0x20):
727 irq = 9;
728 break;
729 case(0x40):
730 irq = 10;
731 break;
732 case(0x80):
733 irq = 11;
734 break;
735 default:
736 goto out;
737 }
738
739
740
741 rc = tlan_probe1(NULL, ioaddr, irq,
742 12, NULL);
743 continue;
744
745out:
746 if (debug == 0x10)
747 pr_info("None found\n");
748 continue;
749
750out2:
751 if (debug == 0x10)
752 pr_info("Card found but it is not enabled, skipping\n");
753 continue;
754
755 }
756
757}
758
759#ifdef CONFIG_NET_POLL_CONTROLLER
760static void tlan_poll(struct net_device *dev)
761{
762 disable_irq(dev->irq);
763 tlan_handle_interrupt(dev->irq, dev);
764 enable_irq(dev->irq);
765}
766#endif
767
768static const struct net_device_ops tlan_netdev_ops = {
769 .ndo_open = tlan_open,
770 .ndo_stop = tlan_close,
771 .ndo_start_xmit = tlan_start_tx,
772 .ndo_tx_timeout = tlan_tx_timeout,
773 .ndo_get_stats = tlan_get_stats,
774 .ndo_set_rx_mode = tlan_set_multicast_list,
775 .ndo_do_ioctl = tlan_ioctl,
776 .ndo_set_mac_address = eth_mac_addr,
777 .ndo_validate_addr = eth_validate_addr,
778#ifdef CONFIG_NET_POLL_CONTROLLER
779 .ndo_poll_controller = tlan_poll,
780#endif
781};
782
783static void tlan_get_drvinfo(struct net_device *dev,
784 struct ethtool_drvinfo *info)
785{
786 struct tlan_priv *priv = netdev_priv(dev);
787
788 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
789 if (priv->pci_dev)
790 strlcpy(info->bus_info, pci_name(priv->pci_dev),
791 sizeof(info->bus_info));
792 else
793 strlcpy(info->bus_info, "EISA", sizeof(info->bus_info));
794}
795
796static int tlan_get_eeprom_len(struct net_device *dev)
797{
798 return TLAN_EEPROM_SIZE;
799}
800
801static int tlan_get_eeprom(struct net_device *dev,
802 struct ethtool_eeprom *eeprom, u8 *data)
803{
804 int i;
805
806 for (i = 0; i < TLAN_EEPROM_SIZE; i++)
807 if (tlan_ee_read_byte(dev, i, &data[i]))
808 return -EIO;
809
810 return 0;
811}
812
813static const struct ethtool_ops tlan_ethtool_ops = {
814 .get_drvinfo = tlan_get_drvinfo,
815 .get_link = ethtool_op_get_link,
816 .get_eeprom_len = tlan_get_eeprom_len,
817 .get_eeprom = tlan_get_eeprom,
818};
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837static int tlan_init(struct net_device *dev)
838{
839 int dma_size;
840 int err;
841 int i;
842 struct tlan_priv *priv;
843
844 priv = netdev_priv(dev);
845
846 dma_size = (TLAN_NUM_RX_LISTS + TLAN_NUM_TX_LISTS)
847 * (sizeof(struct tlan_list));
848 priv->dma_storage = pci_alloc_consistent(priv->pci_dev,
849 dma_size,
850 &priv->dma_storage_dma);
851 priv->dma_size = dma_size;
852
853 if (priv->dma_storage == NULL) {
854 pr_err("Could not allocate lists and buffers for %s\n",
855 dev->name);
856 return -ENOMEM;
857 }
858 memset(priv->dma_storage, 0, dma_size);
859 priv->rx_list = (struct tlan_list *)
860 ALIGN((unsigned long)priv->dma_storage, 8);
861 priv->rx_list_dma = ALIGN(priv->dma_storage_dma, 8);
862 priv->tx_list = priv->rx_list + TLAN_NUM_RX_LISTS;
863 priv->tx_list_dma =
864 priv->rx_list_dma + sizeof(struct tlan_list)*TLAN_NUM_RX_LISTS;
865
866 err = 0;
867 for (i = 0; i < ETH_ALEN; i++)
868 err |= tlan_ee_read_byte(dev,
869 (u8) priv->adapter->addr_ofs + i,
870 (u8 *) &dev->dev_addr[i]);
871 if (err) {
872 pr_err("%s: Error reading MAC from eeprom: %d\n",
873 dev->name, err);
874 }
875
876 if (priv->adapter->addr_ofs == 0xf8) {
877 for (i = 0; i < ETH_ALEN; i += 2) {
878 char tmp = dev->dev_addr[i];
879 dev->dev_addr[i] = dev->dev_addr[i + 1];
880 dev->dev_addr[i + 1] = tmp;
881 }
882 }
883
884 netif_carrier_off(dev);
885
886
887 dev->netdev_ops = &tlan_netdev_ops;
888 dev->ethtool_ops = &tlan_ethtool_ops;
889 dev->watchdog_timeo = TX_TIMEOUT;
890
891 return 0;
892
893}
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915static int tlan_open(struct net_device *dev)
916{
917 struct tlan_priv *priv = netdev_priv(dev);
918 int err;
919
920 priv->tlan_rev = tlan_dio_read8(dev->base_addr, TLAN_DEF_REVISION);
921 err = request_irq(dev->irq, tlan_handle_interrupt, IRQF_SHARED,
922 dev->name, dev);
923
924 if (err) {
925 netdev_err(dev, "Cannot open because IRQ %d is already in use\n",
926 dev->irq);
927 return err;
928 }
929
930 timer_setup(&priv->timer, NULL, 0);
931 timer_setup(&priv->media_timer, tlan_phy_monitor, 0);
932
933 tlan_start(dev);
934
935 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Opened. TLAN Chip Rev: %x\n",
936 dev->name, priv->tlan_rev);
937
938 return 0;
939
940}
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959static int tlan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
960{
961 struct tlan_priv *priv = netdev_priv(dev);
962 struct mii_ioctl_data *data = if_mii(rq);
963 u32 phy = priv->phy[priv->phy_num];
964
965 if (!priv->phy_online)
966 return -EAGAIN;
967
968 switch (cmd) {
969 case SIOCGMIIPHY:
970 data->phy_id = phy;
971
972
973
974 case SIOCGMIIREG:
975 tlan_mii_read_reg(dev, data->phy_id & 0x1f,
976 data->reg_num & 0x1f, &data->val_out);
977 return 0;
978
979
980 case SIOCSMIIREG:
981 tlan_mii_write_reg(dev, data->phy_id & 0x1f,
982 data->reg_num & 0x1f, data->val_in);
983 return 0;
984 default:
985 return -EOPNOTSUPP;
986 }
987}
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001static void tlan_tx_timeout(struct net_device *dev)
1002{
1003
1004 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Transmit timed out.\n", dev->name);
1005
1006
1007 tlan_free_lists(dev);
1008 tlan_reset_lists(dev);
1009 tlan_read_and_clear_stats(dev, TLAN_IGNORE);
1010 tlan_reset_adapter(dev);
1011 netif_trans_update(dev);
1012 netif_wake_queue(dev);
1013
1014}
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027static void tlan_tx_timeout_work(struct work_struct *work)
1028{
1029 struct tlan_priv *priv =
1030 container_of(work, struct tlan_priv, tlan_tqueue);
1031
1032 tlan_tx_timeout(priv->dev);
1033}
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058static netdev_tx_t tlan_start_tx(struct sk_buff *skb, struct net_device *dev)
1059{
1060 struct tlan_priv *priv = netdev_priv(dev);
1061 dma_addr_t tail_list_phys;
1062 struct tlan_list *tail_list;
1063 unsigned long flags;
1064 unsigned int txlen;
1065
1066 if (!priv->phy_online) {
1067 TLAN_DBG(TLAN_DEBUG_TX, "TRANSMIT: %s PHY is not ready\n",
1068 dev->name);
1069 dev_kfree_skb_any(skb);
1070 return NETDEV_TX_OK;
1071 }
1072
1073 if (skb_padto(skb, TLAN_MIN_FRAME_SIZE))
1074 return NETDEV_TX_OK;
1075 txlen = max(skb->len, (unsigned int)TLAN_MIN_FRAME_SIZE);
1076
1077 tail_list = priv->tx_list + priv->tx_tail;
1078 tail_list_phys =
1079 priv->tx_list_dma + sizeof(struct tlan_list)*priv->tx_tail;
1080
1081 if (tail_list->c_stat != TLAN_CSTAT_UNUSED) {
1082 TLAN_DBG(TLAN_DEBUG_TX,
1083 "TRANSMIT: %s is busy (Head=%d Tail=%d)\n",
1084 dev->name, priv->tx_head, priv->tx_tail);
1085 netif_stop_queue(dev);
1086 priv->tx_busy_count++;
1087 return NETDEV_TX_BUSY;
1088 }
1089
1090 tail_list->forward = 0;
1091
1092 tail_list->buffer[0].address = pci_map_single(priv->pci_dev,
1093 skb->data, txlen,
1094 PCI_DMA_TODEVICE);
1095 tlan_store_skb(tail_list, skb);
1096
1097 tail_list->frame_size = (u16) txlen;
1098 tail_list->buffer[0].count = TLAN_LAST_BUFFER | (u32) txlen;
1099 tail_list->buffer[1].count = 0;
1100 tail_list->buffer[1].address = 0;
1101
1102 spin_lock_irqsave(&priv->lock, flags);
1103 tail_list->c_stat = TLAN_CSTAT_READY;
1104 if (!priv->tx_in_progress) {
1105 priv->tx_in_progress = 1;
1106 TLAN_DBG(TLAN_DEBUG_TX,
1107 "TRANSMIT: Starting TX on buffer %d\n",
1108 priv->tx_tail);
1109 outl(tail_list_phys, dev->base_addr + TLAN_CH_PARM);
1110 outl(TLAN_HC_GO, dev->base_addr + TLAN_HOST_CMD);
1111 } else {
1112 TLAN_DBG(TLAN_DEBUG_TX,
1113 "TRANSMIT: Adding buffer %d to TX channel\n",
1114 priv->tx_tail);
1115 if (priv->tx_tail == 0) {
1116 (priv->tx_list + (TLAN_NUM_TX_LISTS - 1))->forward
1117 = tail_list_phys;
1118 } else {
1119 (priv->tx_list + (priv->tx_tail - 1))->forward
1120 = tail_list_phys;
1121 }
1122 }
1123 spin_unlock_irqrestore(&priv->lock, flags);
1124
1125 CIRC_INC(priv->tx_tail, TLAN_NUM_TX_LISTS);
1126
1127 return NETDEV_TX_OK;
1128
1129}
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154static irqreturn_t tlan_handle_interrupt(int irq, void *dev_id)
1155{
1156 struct net_device *dev = dev_id;
1157 struct tlan_priv *priv = netdev_priv(dev);
1158 u16 host_int;
1159 u16 type;
1160
1161 spin_lock(&priv->lock);
1162
1163 host_int = inw(dev->base_addr + TLAN_HOST_INT);
1164 type = (host_int & TLAN_HI_IT_MASK) >> 2;
1165 if (type) {
1166 u32 ack;
1167 u32 host_cmd;
1168
1169 outw(host_int, dev->base_addr + TLAN_HOST_INT);
1170 ack = tlan_int_vector[type](dev, host_int);
1171
1172 if (ack) {
1173 host_cmd = TLAN_HC_ACK | ack | (type << 18);
1174 outl(host_cmd, dev->base_addr + TLAN_HOST_CMD);
1175 }
1176 }
1177
1178 spin_unlock(&priv->lock);
1179
1180 return IRQ_RETVAL(type);
1181}
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201static int tlan_close(struct net_device *dev)
1202{
1203 tlan_stop(dev);
1204
1205 free_irq(dev->irq, dev);
1206 tlan_free_lists(dev);
1207 TLAN_DBG(TLAN_DEBUG_GNRL, "Device %s closed.\n", dev->name);
1208
1209 return 0;
1210
1211}
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231static struct net_device_stats *tlan_get_stats(struct net_device *dev)
1232{
1233 struct tlan_priv *priv = netdev_priv(dev);
1234 int i;
1235
1236
1237 tlan_read_and_clear_stats(dev, TLAN_RECORD);
1238
1239 TLAN_DBG(TLAN_DEBUG_RX, "RECEIVE: %s EOC count = %d\n", dev->name,
1240 priv->rx_eoc_count);
1241 TLAN_DBG(TLAN_DEBUG_TX, "TRANSMIT: %s Busy count = %d\n", dev->name,
1242 priv->tx_busy_count);
1243 if (debug & TLAN_DEBUG_GNRL) {
1244 tlan_print_dio(dev->base_addr);
1245 tlan_phy_print(dev);
1246 }
1247 if (debug & TLAN_DEBUG_LIST) {
1248 for (i = 0; i < TLAN_NUM_RX_LISTS; i++)
1249 tlan_print_list(priv->rx_list + i, "RX", i);
1250 for (i = 0; i < TLAN_NUM_TX_LISTS; i++)
1251 tlan_print_list(priv->tx_list + i, "TX", i);
1252 }
1253
1254 return &dev->stats;
1255
1256}
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281static void tlan_set_multicast_list(struct net_device *dev)
1282{
1283 struct netdev_hw_addr *ha;
1284 u32 hash1 = 0;
1285 u32 hash2 = 0;
1286 int i;
1287 u32 offset;
1288 u8 tmp;
1289
1290 if (dev->flags & IFF_PROMISC) {
1291 tmp = tlan_dio_read8(dev->base_addr, TLAN_NET_CMD);
1292 tlan_dio_write8(dev->base_addr,
1293 TLAN_NET_CMD, tmp | TLAN_NET_CMD_CAF);
1294 } else {
1295 tmp = tlan_dio_read8(dev->base_addr, TLAN_NET_CMD);
1296 tlan_dio_write8(dev->base_addr,
1297 TLAN_NET_CMD, tmp & ~TLAN_NET_CMD_CAF);
1298 if (dev->flags & IFF_ALLMULTI) {
1299 for (i = 0; i < 3; i++)
1300 tlan_set_mac(dev, i + 1, NULL);
1301 tlan_dio_write32(dev->base_addr, TLAN_HASH_1,
1302 0xffffffff);
1303 tlan_dio_write32(dev->base_addr, TLAN_HASH_2,
1304 0xffffffff);
1305 } else {
1306 i = 0;
1307 netdev_for_each_mc_addr(ha, dev) {
1308 if (i < 3) {
1309 tlan_set_mac(dev, i + 1,
1310 (char *) &ha->addr);
1311 } else {
1312 offset =
1313 tlan_hash_func((u8 *)&ha->addr);
1314 if (offset < 32)
1315 hash1 |= (1 << offset);
1316 else
1317 hash2 |= (1 << (offset - 32));
1318 }
1319 i++;
1320 }
1321 for ( ; i < 3; i++)
1322 tlan_set_mac(dev, i + 1, NULL);
1323 tlan_dio_write32(dev->base_addr, TLAN_HASH_1, hash1);
1324 tlan_dio_write32(dev->base_addr, TLAN_HASH_2, hash2);
1325 }
1326 }
1327
1328}
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369static u32 tlan_handle_tx_eof(struct net_device *dev, u16 host_int)
1370{
1371 struct tlan_priv *priv = netdev_priv(dev);
1372 int eoc = 0;
1373 struct tlan_list *head_list;
1374 dma_addr_t head_list_phys;
1375 u32 ack = 0;
1376 u16 tmp_c_stat;
1377
1378 TLAN_DBG(TLAN_DEBUG_TX,
1379 "TRANSMIT: Handling TX EOF (Head=%d Tail=%d)\n",
1380 priv->tx_head, priv->tx_tail);
1381 head_list = priv->tx_list + priv->tx_head;
1382
1383 while (((tmp_c_stat = head_list->c_stat) & TLAN_CSTAT_FRM_CMP)
1384 && (ack < 255)) {
1385 struct sk_buff *skb = tlan_get_skb(head_list);
1386
1387 ack++;
1388 pci_unmap_single(priv->pci_dev, head_list->buffer[0].address,
1389 max(skb->len,
1390 (unsigned int)TLAN_MIN_FRAME_SIZE),
1391 PCI_DMA_TODEVICE);
1392 dev_kfree_skb_any(skb);
1393 head_list->buffer[8].address = 0;
1394 head_list->buffer[9].address = 0;
1395
1396 if (tmp_c_stat & TLAN_CSTAT_EOC)
1397 eoc = 1;
1398
1399 dev->stats.tx_bytes += head_list->frame_size;
1400
1401 head_list->c_stat = TLAN_CSTAT_UNUSED;
1402 netif_start_queue(dev);
1403 CIRC_INC(priv->tx_head, TLAN_NUM_TX_LISTS);
1404 head_list = priv->tx_list + priv->tx_head;
1405 }
1406
1407 if (!ack)
1408 netdev_info(dev,
1409 "Received interrupt for uncompleted TX frame\n");
1410
1411 if (eoc) {
1412 TLAN_DBG(TLAN_DEBUG_TX,
1413 "TRANSMIT: handling TX EOC (Head=%d Tail=%d)\n",
1414 priv->tx_head, priv->tx_tail);
1415 head_list = priv->tx_list + priv->tx_head;
1416 head_list_phys = priv->tx_list_dma
1417 + sizeof(struct tlan_list)*priv->tx_head;
1418 if ((head_list->c_stat & TLAN_CSTAT_READY)
1419 == TLAN_CSTAT_READY) {
1420 outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
1421 ack |= TLAN_HC_GO;
1422 } else {
1423 priv->tx_in_progress = 0;
1424 }
1425 }
1426
1427 if (priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED) {
1428 tlan_dio_write8(dev->base_addr,
1429 TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT);
1430 if (priv->timer.function == NULL) {
1431 priv->timer.function = tlan_timer;
1432 priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY;
1433 priv->timer_set_at = jiffies;
1434 priv->timer_type = TLAN_TIMER_ACTIVITY;
1435 add_timer(&priv->timer);
1436 } else if (priv->timer_type == TLAN_TIMER_ACTIVITY) {
1437 priv->timer_set_at = jiffies;
1438 }
1439 }
1440
1441 return ack;
1442
1443}
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465static u32 tlan_handle_stat_overflow(struct net_device *dev, u16 host_int)
1466{
1467 tlan_read_and_clear_stats(dev, TLAN_RECORD);
1468
1469 return 1;
1470
1471}
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501static u32 tlan_handle_rx_eof(struct net_device *dev, u16 host_int)
1502{
1503 struct tlan_priv *priv = netdev_priv(dev);
1504 u32 ack = 0;
1505 int eoc = 0;
1506 struct tlan_list *head_list;
1507 struct sk_buff *skb;
1508 struct tlan_list *tail_list;
1509 u16 tmp_c_stat;
1510 dma_addr_t head_list_phys;
1511
1512 TLAN_DBG(TLAN_DEBUG_RX, "RECEIVE: handling RX EOF (Head=%d Tail=%d)\n",
1513 priv->rx_head, priv->rx_tail);
1514 head_list = priv->rx_list + priv->rx_head;
1515 head_list_phys =
1516 priv->rx_list_dma + sizeof(struct tlan_list)*priv->rx_head;
1517
1518 while (((tmp_c_stat = head_list->c_stat) & TLAN_CSTAT_FRM_CMP)
1519 && (ack < 255)) {
1520 dma_addr_t frame_dma = head_list->buffer[0].address;
1521 u32 frame_size = head_list->frame_size;
1522 struct sk_buff *new_skb;
1523
1524 ack++;
1525 if (tmp_c_stat & TLAN_CSTAT_EOC)
1526 eoc = 1;
1527
1528 new_skb = netdev_alloc_skb_ip_align(dev,
1529 TLAN_MAX_FRAME_SIZE + 5);
1530 if (!new_skb)
1531 goto drop_and_reuse;
1532
1533 skb = tlan_get_skb(head_list);
1534 pci_unmap_single(priv->pci_dev, frame_dma,
1535 TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE);
1536 skb_put(skb, frame_size);
1537
1538 dev->stats.rx_bytes += frame_size;
1539
1540 skb->protocol = eth_type_trans(skb, dev);
1541 netif_rx(skb);
1542
1543 head_list->buffer[0].address =
1544 pci_map_single(priv->pci_dev, new_skb->data,
1545 TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE);
1546
1547 tlan_store_skb(head_list, new_skb);
1548drop_and_reuse:
1549 head_list->forward = 0;
1550 head_list->c_stat = 0;
1551 tail_list = priv->rx_list + priv->rx_tail;
1552 tail_list->forward = head_list_phys;
1553
1554 CIRC_INC(priv->rx_head, TLAN_NUM_RX_LISTS);
1555 CIRC_INC(priv->rx_tail, TLAN_NUM_RX_LISTS);
1556 head_list = priv->rx_list + priv->rx_head;
1557 head_list_phys = priv->rx_list_dma
1558 + sizeof(struct tlan_list)*priv->rx_head;
1559 }
1560
1561 if (!ack)
1562 netdev_info(dev,
1563 "Received interrupt for uncompleted RX frame\n");
1564
1565
1566 if (eoc) {
1567 TLAN_DBG(TLAN_DEBUG_RX,
1568 "RECEIVE: handling RX EOC (Head=%d Tail=%d)\n",
1569 priv->rx_head, priv->rx_tail);
1570 head_list = priv->rx_list + priv->rx_head;
1571 head_list_phys = priv->rx_list_dma
1572 + sizeof(struct tlan_list)*priv->rx_head;
1573 outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
1574 ack |= TLAN_HC_GO | TLAN_HC_RT;
1575 priv->rx_eoc_count++;
1576 }
1577
1578 if (priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED) {
1579 tlan_dio_write8(dev->base_addr,
1580 TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT);
1581 if (priv->timer.function == NULL) {
1582 priv->timer.function = tlan_timer;
1583 priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY;
1584 priv->timer_set_at = jiffies;
1585 priv->timer_type = TLAN_TIMER_ACTIVITY;
1586 add_timer(&priv->timer);
1587 } else if (priv->timer_type == TLAN_TIMER_ACTIVITY) {
1588 priv->timer_set_at = jiffies;
1589 }
1590 }
1591
1592 return ack;
1593
1594}
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616static u32 tlan_handle_dummy(struct net_device *dev, u16 host_int)
1617{
1618 netdev_info(dev, "Test interrupt\n");
1619 return 1;
1620
1621}
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646static u32 tlan_handle_tx_eoc(struct net_device *dev, u16 host_int)
1647{
1648 struct tlan_priv *priv = netdev_priv(dev);
1649 struct tlan_list *head_list;
1650 dma_addr_t head_list_phys;
1651 u32 ack = 1;
1652
1653 if (priv->tlan_rev < 0x30) {
1654 TLAN_DBG(TLAN_DEBUG_TX,
1655 "TRANSMIT: handling TX EOC (Head=%d Tail=%d) -- IRQ\n",
1656 priv->tx_head, priv->tx_tail);
1657 head_list = priv->tx_list + priv->tx_head;
1658 head_list_phys = priv->tx_list_dma
1659 + sizeof(struct tlan_list)*priv->tx_head;
1660 if ((head_list->c_stat & TLAN_CSTAT_READY)
1661 == TLAN_CSTAT_READY) {
1662 netif_stop_queue(dev);
1663 outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
1664 ack |= TLAN_HC_GO;
1665 } else {
1666 priv->tx_in_progress = 0;
1667 }
1668 }
1669
1670 return ack;
1671
1672}
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697static u32 tlan_handle_status_check(struct net_device *dev, u16 host_int)
1698{
1699 struct tlan_priv *priv = netdev_priv(dev);
1700 u32 ack;
1701 u32 error;
1702 u8 net_sts;
1703 u32 phy;
1704 u16 tlphy_ctl;
1705 u16 tlphy_sts;
1706
1707 ack = 1;
1708 if (host_int & TLAN_HI_IV_MASK) {
1709 netif_stop_queue(dev);
1710 error = inl(dev->base_addr + TLAN_CH_PARM);
1711 netdev_info(dev, "Adaptor Error = 0x%x\n", error);
1712 tlan_read_and_clear_stats(dev, TLAN_RECORD);
1713 outl(TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD);
1714
1715 schedule_work(&priv->tlan_tqueue);
1716
1717 netif_wake_queue(dev);
1718 ack = 0;
1719 } else {
1720 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Status Check\n", dev->name);
1721 phy = priv->phy[priv->phy_num];
1722
1723 net_sts = tlan_dio_read8(dev->base_addr, TLAN_NET_STS);
1724 if (net_sts) {
1725 tlan_dio_write8(dev->base_addr, TLAN_NET_STS, net_sts);
1726 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Net_Sts = %x\n",
1727 dev->name, (unsigned) net_sts);
1728 }
1729 if ((net_sts & TLAN_NET_STS_MIRQ) && (priv->phy_num == 0)) {
1730 tlan_mii_read_reg(dev, phy, TLAN_TLPHY_STS, &tlphy_sts);
1731 tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl);
1732 if (!(tlphy_sts & TLAN_TS_POLOK) &&
1733 !(tlphy_ctl & TLAN_TC_SWAPOL)) {
1734 tlphy_ctl |= TLAN_TC_SWAPOL;
1735 tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL,
1736 tlphy_ctl);
1737 } else if ((tlphy_sts & TLAN_TS_POLOK) &&
1738 (tlphy_ctl & TLAN_TC_SWAPOL)) {
1739 tlphy_ctl &= ~TLAN_TC_SWAPOL;
1740 tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL,
1741 tlphy_ctl);
1742 }
1743
1744 if (debug)
1745 tlan_phy_print(dev);
1746 }
1747 }
1748
1749 return ack;
1750
1751}
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776static u32 tlan_handle_rx_eoc(struct net_device *dev, u16 host_int)
1777{
1778 struct tlan_priv *priv = netdev_priv(dev);
1779 dma_addr_t head_list_phys;
1780 u32 ack = 1;
1781
1782 if (priv->tlan_rev < 0x30) {
1783 TLAN_DBG(TLAN_DEBUG_RX,
1784 "RECEIVE: Handling RX EOC (head=%d tail=%d) -- IRQ\n",
1785 priv->rx_head, priv->rx_tail);
1786 head_list_phys = priv->rx_list_dma
1787 + sizeof(struct tlan_list)*priv->rx_head;
1788 outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
1789 ack |= TLAN_HC_GO | TLAN_HC_RT;
1790 priv->rx_eoc_count++;
1791 }
1792
1793 return ack;
1794
1795}
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839static void tlan_timer(struct timer_list *t)
1840{
1841 struct tlan_priv *priv = from_timer(priv, t, timer);
1842 struct net_device *dev = priv->dev;
1843 u32 elapsed;
1844 unsigned long flags = 0;
1845
1846 priv->timer.function = NULL;
1847
1848 switch (priv->timer_type) {
1849 case TLAN_TIMER_PHY_PDOWN:
1850 tlan_phy_power_down(dev);
1851 break;
1852 case TLAN_TIMER_PHY_PUP:
1853 tlan_phy_power_up(dev);
1854 break;
1855 case TLAN_TIMER_PHY_RESET:
1856 tlan_phy_reset(dev);
1857 break;
1858 case TLAN_TIMER_PHY_START_LINK:
1859 tlan_phy_start_link(dev);
1860 break;
1861 case TLAN_TIMER_PHY_FINISH_AN:
1862 tlan_phy_finish_auto_neg(dev);
1863 break;
1864 case TLAN_TIMER_FINISH_RESET:
1865 tlan_finish_reset(dev);
1866 break;
1867 case TLAN_TIMER_ACTIVITY:
1868 spin_lock_irqsave(&priv->lock, flags);
1869 if (priv->timer.function == NULL) {
1870 elapsed = jiffies - priv->timer_set_at;
1871 if (elapsed >= TLAN_TIMER_ACT_DELAY) {
1872 tlan_dio_write8(dev->base_addr,
1873 TLAN_LED_REG, TLAN_LED_LINK);
1874 } else {
1875 priv->timer.expires = priv->timer_set_at
1876 + TLAN_TIMER_ACT_DELAY;
1877 spin_unlock_irqrestore(&priv->lock, flags);
1878 add_timer(&priv->timer);
1879 break;
1880 }
1881 }
1882 spin_unlock_irqrestore(&priv->lock, flags);
1883 break;
1884 default:
1885 break;
1886 }
1887
1888}
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914static void tlan_reset_lists(struct net_device *dev)
1915{
1916 struct tlan_priv *priv = netdev_priv(dev);
1917 int i;
1918 struct tlan_list *list;
1919 dma_addr_t list_phys;
1920 struct sk_buff *skb;
1921
1922 priv->tx_head = 0;
1923 priv->tx_tail = 0;
1924 for (i = 0; i < TLAN_NUM_TX_LISTS; i++) {
1925 list = priv->tx_list + i;
1926 list->c_stat = TLAN_CSTAT_UNUSED;
1927 list->buffer[0].address = 0;
1928 list->buffer[2].count = 0;
1929 list->buffer[2].address = 0;
1930 list->buffer[8].address = 0;
1931 list->buffer[9].address = 0;
1932 }
1933
1934 priv->rx_head = 0;
1935 priv->rx_tail = TLAN_NUM_RX_LISTS - 1;
1936 for (i = 0; i < TLAN_NUM_RX_LISTS; i++) {
1937 list = priv->rx_list + i;
1938 list_phys = priv->rx_list_dma + sizeof(struct tlan_list)*i;
1939 list->c_stat = TLAN_CSTAT_READY;
1940 list->frame_size = TLAN_MAX_FRAME_SIZE;
1941 list->buffer[0].count = TLAN_MAX_FRAME_SIZE | TLAN_LAST_BUFFER;
1942 skb = netdev_alloc_skb_ip_align(dev, TLAN_MAX_FRAME_SIZE + 5);
1943 if (!skb)
1944 break;
1945
1946 list->buffer[0].address = pci_map_single(priv->pci_dev,
1947 skb->data,
1948 TLAN_MAX_FRAME_SIZE,
1949 PCI_DMA_FROMDEVICE);
1950 tlan_store_skb(list, skb);
1951 list->buffer[1].count = 0;
1952 list->buffer[1].address = 0;
1953 list->forward = list_phys + sizeof(struct tlan_list);
1954 }
1955
1956
1957 while (i < TLAN_NUM_RX_LISTS) {
1958 tlan_store_skb(priv->rx_list + i, NULL);
1959 ++i;
1960 }
1961 list->forward = 0;
1962
1963}
1964
1965
1966static void tlan_free_lists(struct net_device *dev)
1967{
1968 struct tlan_priv *priv = netdev_priv(dev);
1969 int i;
1970 struct tlan_list *list;
1971 struct sk_buff *skb;
1972
1973 for (i = 0; i < TLAN_NUM_TX_LISTS; i++) {
1974 list = priv->tx_list + i;
1975 skb = tlan_get_skb(list);
1976 if (skb) {
1977 pci_unmap_single(
1978 priv->pci_dev,
1979 list->buffer[0].address,
1980 max(skb->len,
1981 (unsigned int)TLAN_MIN_FRAME_SIZE),
1982 PCI_DMA_TODEVICE);
1983 dev_kfree_skb_any(skb);
1984 list->buffer[8].address = 0;
1985 list->buffer[9].address = 0;
1986 }
1987 }
1988
1989 for (i = 0; i < TLAN_NUM_RX_LISTS; i++) {
1990 list = priv->rx_list + i;
1991 skb = tlan_get_skb(list);
1992 if (skb) {
1993 pci_unmap_single(priv->pci_dev,
1994 list->buffer[0].address,
1995 TLAN_MAX_FRAME_SIZE,
1996 PCI_DMA_FROMDEVICE);
1997 dev_kfree_skb_any(skb);
1998 list->buffer[8].address = 0;
1999 list->buffer[9].address = 0;
2000 }
2001 }
2002}
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021static void tlan_print_dio(u16 io_base)
2022{
2023 u32 data0, data1;
2024 int i;
2025
2026 pr_info("Contents of internal registers for io base 0x%04hx\n",
2027 io_base);
2028 pr_info("Off. +0 +4\n");
2029 for (i = 0; i < 0x4C; i += 8) {
2030 data0 = tlan_dio_read32(io_base, i);
2031 data1 = tlan_dio_read32(io_base, i + 0x4);
2032 pr_info("0x%02x 0x%08x 0x%08x\n", i, data0, data1);
2033 }
2034
2035}
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057static void tlan_print_list(struct tlan_list *list, char *type, int num)
2058{
2059 int i;
2060
2061 pr_info("%s List %d at %p\n", type, num, list);
2062 pr_info(" Forward = 0x%08x\n", list->forward);
2063 pr_info(" CSTAT = 0x%04hx\n", list->c_stat);
2064 pr_info(" Frame Size = 0x%04hx\n", list->frame_size);
2065
2066 for (i = 0; i < 2; i++) {
2067 pr_info(" Buffer[%d].count, addr = 0x%08x, 0x%08x\n",
2068 i, list->buffer[i].count, list->buffer[i].address);
2069 }
2070
2071}
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094static void tlan_read_and_clear_stats(struct net_device *dev, int record)
2095{
2096 u32 tx_good, tx_under;
2097 u32 rx_good, rx_over;
2098 u32 def_tx, crc, code;
2099 u32 multi_col, single_col;
2100 u32 excess_col, late_col, loss;
2101
2102 outw(TLAN_GOOD_TX_FRMS, dev->base_addr + TLAN_DIO_ADR);
2103 tx_good = inb(dev->base_addr + TLAN_DIO_DATA);
2104 tx_good += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
2105 tx_good += inb(dev->base_addr + TLAN_DIO_DATA + 2) << 16;
2106 tx_under = inb(dev->base_addr + TLAN_DIO_DATA + 3);
2107
2108 outw(TLAN_GOOD_RX_FRMS, dev->base_addr + TLAN_DIO_ADR);
2109 rx_good = inb(dev->base_addr + TLAN_DIO_DATA);
2110 rx_good += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
2111 rx_good += inb(dev->base_addr + TLAN_DIO_DATA + 2) << 16;
2112 rx_over = inb(dev->base_addr + TLAN_DIO_DATA + 3);
2113
2114 outw(TLAN_DEFERRED_TX, dev->base_addr + TLAN_DIO_ADR);
2115 def_tx = inb(dev->base_addr + TLAN_DIO_DATA);
2116 def_tx += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
2117 crc = inb(dev->base_addr + TLAN_DIO_DATA + 2);
2118 code = inb(dev->base_addr + TLAN_DIO_DATA + 3);
2119
2120 outw(TLAN_MULTICOL_FRMS, dev->base_addr + TLAN_DIO_ADR);
2121 multi_col = inb(dev->base_addr + TLAN_DIO_DATA);
2122 multi_col += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
2123 single_col = inb(dev->base_addr + TLAN_DIO_DATA + 2);
2124 single_col += inb(dev->base_addr + TLAN_DIO_DATA + 3) << 8;
2125
2126 outw(TLAN_EXCESSCOL_FRMS, dev->base_addr + TLAN_DIO_ADR);
2127 excess_col = inb(dev->base_addr + TLAN_DIO_DATA);
2128 late_col = inb(dev->base_addr + TLAN_DIO_DATA + 1);
2129 loss = inb(dev->base_addr + TLAN_DIO_DATA + 2);
2130
2131 if (record) {
2132 dev->stats.rx_packets += rx_good;
2133 dev->stats.rx_errors += rx_over + crc + code;
2134 dev->stats.tx_packets += tx_good;
2135 dev->stats.tx_errors += tx_under + loss;
2136 dev->stats.collisions += multi_col
2137 + single_col + excess_col + late_col;
2138
2139 dev->stats.rx_over_errors += rx_over;
2140 dev->stats.rx_crc_errors += crc;
2141 dev->stats.rx_frame_errors += code;
2142
2143 dev->stats.tx_aborted_errors += tx_under;
2144 dev->stats.tx_carrier_errors += loss;
2145 }
2146
2147}
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169static void
2170tlan_reset_adapter(struct net_device *dev)
2171{
2172 struct tlan_priv *priv = netdev_priv(dev);
2173 int i;
2174 u32 addr;
2175 u32 data;
2176 u8 data8;
2177
2178 priv->tlan_full_duplex = false;
2179 priv->phy_online = 0;
2180 netif_carrier_off(dev);
2181
2182
2183
2184 data = inl(dev->base_addr + TLAN_HOST_CMD);
2185 data |= TLAN_HC_AD_RST;
2186 outl(data, dev->base_addr + TLAN_HOST_CMD);
2187
2188 udelay(1000);
2189
2190
2191
2192 data = inl(dev->base_addr + TLAN_HOST_CMD);
2193 data |= TLAN_HC_INT_OFF;
2194 outl(data, dev->base_addr + TLAN_HOST_CMD);
2195
2196
2197
2198 for (i = TLAN_AREG_0; i <= TLAN_HASH_2; i += 4)
2199 tlan_dio_write32(dev->base_addr, (u16) i, 0);
2200
2201
2202
2203 data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | TLAN_NET_CFG_PHY_EN;
2204 tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, (u16) data);
2205
2206
2207
2208 outl(TLAN_HC_LD_TMR | 0x3f, dev->base_addr + TLAN_HOST_CMD);
2209 outl(TLAN_HC_LD_THR | 0x9, dev->base_addr + TLAN_HOST_CMD);
2210
2211
2212
2213 outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR);
2214 addr = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
2215 tlan_set_bit(TLAN_NET_SIO_NMRST, addr);
2216
2217
2218
2219 if (priv->tlan_rev >= 0x30) {
2220 data8 = TLAN_ID_TX_EOC | TLAN_ID_RX_EOC;
2221 tlan_dio_write8(dev->base_addr, TLAN_INT_DIS, data8);
2222 }
2223 tlan_phy_detect(dev);
2224 data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN;
2225
2226 if (priv->adapter->flags & TLAN_ADAPTER_BIT_RATE_PHY) {
2227 data |= TLAN_NET_CFG_BIT;
2228 if (priv->aui == 1) {
2229 tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x0a);
2230 } else if (priv->duplex == TLAN_DUPLEX_FULL) {
2231 tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x00);
2232 priv->tlan_full_duplex = true;
2233 } else {
2234 tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x08);
2235 }
2236 }
2237
2238
2239 if (priv->phy_num == 0 ||
2240 (priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10))
2241 data |= TLAN_NET_CFG_PHY_EN;
2242 tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, (u16) data);
2243
2244 if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY)
2245 tlan_finish_reset(dev);
2246 else
2247 tlan_phy_power_down(dev);
2248
2249}
2250
2251
2252
2253
2254static void
2255tlan_finish_reset(struct net_device *dev)
2256{
2257 struct tlan_priv *priv = netdev_priv(dev);
2258 u8 data;
2259 u32 phy;
2260 u8 sio;
2261 u16 status;
2262 u16 partner;
2263 u16 tlphy_ctl;
2264 u16 tlphy_par;
2265 u16 tlphy_id1, tlphy_id2;
2266 int i;
2267
2268 phy = priv->phy[priv->phy_num];
2269
2270 data = TLAN_NET_CMD_NRESET | TLAN_NET_CMD_NWRAP;
2271 if (priv->tlan_full_duplex)
2272 data |= TLAN_NET_CMD_DUPLEX;
2273 tlan_dio_write8(dev->base_addr, TLAN_NET_CMD, data);
2274 data = TLAN_NET_MASK_MASK4 | TLAN_NET_MASK_MASK5;
2275 if (priv->phy_num == 0)
2276 data |= TLAN_NET_MASK_MASK7;
2277 tlan_dio_write8(dev->base_addr, TLAN_NET_MASK, data);
2278 tlan_dio_write16(dev->base_addr, TLAN_MAX_RX, ((1536)+7)&~7);
2279 tlan_mii_read_reg(dev, phy, MII_GEN_ID_HI, &tlphy_id1);
2280 tlan_mii_read_reg(dev, phy, MII_GEN_ID_LO, &tlphy_id2);
2281
2282 if ((priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) ||
2283 (priv->aui)) {
2284 status = MII_GS_LINK;
2285 netdev_info(dev, "Link forced\n");
2286 } else {
2287 tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
2288 udelay(1000);
2289 tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
2290 if (status & MII_GS_LINK) {
2291
2292 if ((tlphy_id1 == NAT_SEM_ID1) &&
2293 (tlphy_id2 == NAT_SEM_ID2)) {
2294 tlan_mii_read_reg(dev, phy, MII_AN_LPA,
2295 &partner);
2296 tlan_mii_read_reg(dev, phy, TLAN_TLPHY_PAR,
2297 &tlphy_par);
2298
2299 netdev_info(dev,
2300 "Link active, %s %uMbps %s-Duplex\n",
2301 !(tlphy_par & TLAN_PHY_AN_EN_STAT)
2302 ? "forced" : "Autonegotiation enabled,",
2303 tlphy_par & TLAN_PHY_SPEED_100
2304 ? 100 : 10,
2305 tlphy_par & TLAN_PHY_DUPLEX_FULL
2306 ? "Full" : "Half");
2307
2308 if (tlphy_par & TLAN_PHY_AN_EN_STAT) {
2309 netdev_info(dev, "Partner capability:");
2310 for (i = 5; i < 10; i++)
2311 if (partner & (1 << i))
2312 pr_cont(" %s",
2313 media[i-5]);
2314 pr_cont("\n");
2315 }
2316 } else
2317 netdev_info(dev, "Link active\n");
2318
2319 priv->media_timer.expires = jiffies + HZ;
2320 add_timer(&priv->media_timer);
2321 }
2322 }
2323
2324 if (priv->phy_num == 0) {
2325 tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl);
2326 tlphy_ctl |= TLAN_TC_INTEN;
2327 tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL, tlphy_ctl);
2328 sio = tlan_dio_read8(dev->base_addr, TLAN_NET_SIO);
2329 sio |= TLAN_NET_SIO_MINTEN;
2330 tlan_dio_write8(dev->base_addr, TLAN_NET_SIO, sio);
2331 }
2332
2333 if (status & MII_GS_LINK) {
2334 tlan_set_mac(dev, 0, dev->dev_addr);
2335 priv->phy_online = 1;
2336 outb((TLAN_HC_INT_ON >> 8), dev->base_addr + TLAN_HOST_CMD + 1);
2337 if (debug >= 1 && debug != TLAN_DEBUG_PROBE)
2338 outb((TLAN_HC_REQ_INT >> 8),
2339 dev->base_addr + TLAN_HOST_CMD + 1);
2340 outl(priv->rx_list_dma, dev->base_addr + TLAN_CH_PARM);
2341 outl(TLAN_HC_GO | TLAN_HC_RT, dev->base_addr + TLAN_HOST_CMD);
2342 tlan_dio_write8(dev->base_addr, TLAN_LED_REG, TLAN_LED_LINK);
2343 netif_carrier_on(dev);
2344 } else {
2345 netdev_info(dev, "Link inactive, will retry in 10 secs...\n");
2346 tlan_set_timer(dev, (10*HZ), TLAN_TIMER_FINISH_RESET);
2347 return;
2348 }
2349 tlan_set_multicast_list(dev);
2350
2351}
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377static void tlan_set_mac(struct net_device *dev, int areg, char *mac)
2378{
2379 int i;
2380
2381 areg *= 6;
2382
2383 if (mac != NULL) {
2384 for (i = 0; i < 6; i++)
2385 tlan_dio_write8(dev->base_addr,
2386 TLAN_AREG_0 + areg + i, mac[i]);
2387 } else {
2388 for (i = 0; i < 6; i++)
2389 tlan_dio_write8(dev->base_addr,
2390 TLAN_AREG_0 + areg + i, 0);
2391 }
2392
2393}
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421static void tlan_phy_print(struct net_device *dev)
2422{
2423 struct tlan_priv *priv = netdev_priv(dev);
2424 u16 i, data0, data1, data2, data3, phy;
2425
2426 phy = priv->phy[priv->phy_num];
2427
2428 if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) {
2429 netdev_info(dev, "Unmanaged PHY\n");
2430 } else if (phy <= TLAN_PHY_MAX_ADDR) {
2431 netdev_info(dev, "PHY 0x%02x\n", phy);
2432 pr_info(" Off. +0 +1 +2 +3\n");
2433 for (i = 0; i < 0x20; i += 4) {
2434 tlan_mii_read_reg(dev, phy, i, &data0);
2435 tlan_mii_read_reg(dev, phy, i + 1, &data1);
2436 tlan_mii_read_reg(dev, phy, i + 2, &data2);
2437 tlan_mii_read_reg(dev, phy, i + 3, &data3);
2438 pr_info(" 0x%02x 0x%04hx 0x%04hx 0x%04hx 0x%04hx\n",
2439 i, data0, data1, data2, data3);
2440 }
2441 } else {
2442 netdev_info(dev, "Invalid PHY\n");
2443 }
2444
2445}
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467static void tlan_phy_detect(struct net_device *dev)
2468{
2469 struct tlan_priv *priv = netdev_priv(dev);
2470 u16 control;
2471 u16 hi;
2472 u16 lo;
2473 u32 phy;
2474
2475 if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) {
2476 priv->phy_num = 0xffff;
2477 return;
2478 }
2479
2480 tlan_mii_read_reg(dev, TLAN_PHY_MAX_ADDR, MII_GEN_ID_HI, &hi);
2481
2482 if (hi != 0xffff)
2483 priv->phy[0] = TLAN_PHY_MAX_ADDR;
2484 else
2485 priv->phy[0] = TLAN_PHY_NONE;
2486
2487 priv->phy[1] = TLAN_PHY_NONE;
2488 for (phy = 0; phy <= TLAN_PHY_MAX_ADDR; phy++) {
2489 tlan_mii_read_reg(dev, phy, MII_GEN_CTL, &control);
2490 tlan_mii_read_reg(dev, phy, MII_GEN_ID_HI, &hi);
2491 tlan_mii_read_reg(dev, phy, MII_GEN_ID_LO, &lo);
2492 if ((control != 0xffff) ||
2493 (hi != 0xffff) || (lo != 0xffff)) {
2494 TLAN_DBG(TLAN_DEBUG_GNRL,
2495 "PHY found at %02x %04x %04x %04x\n",
2496 phy, control, hi, lo);
2497 if ((priv->phy[1] == TLAN_PHY_NONE) &&
2498 (phy != TLAN_PHY_MAX_ADDR)) {
2499 priv->phy[1] = phy;
2500 }
2501 }
2502 }
2503
2504 if (priv->phy[1] != TLAN_PHY_NONE)
2505 priv->phy_num = 1;
2506 else if (priv->phy[0] != TLAN_PHY_NONE)
2507 priv->phy_num = 0;
2508 else
2509 netdev_info(dev, "Cannot initialize device, no PHY was found!\n");
2510
2511}
2512
2513
2514
2515
2516static void tlan_phy_power_down(struct net_device *dev)
2517{
2518 struct tlan_priv *priv = netdev_priv(dev);
2519 u16 value;
2520
2521 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Powering down PHY(s).\n", dev->name);
2522 value = MII_GC_PDOWN | MII_GC_LOOPBK | MII_GC_ISOLATE;
2523 tlan_mii_sync(dev->base_addr);
2524 tlan_mii_write_reg(dev, priv->phy[priv->phy_num], MII_GEN_CTL, value);
2525 if ((priv->phy_num == 0) && (priv->phy[1] != TLAN_PHY_NONE)) {
2526
2527 if (priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10)
2528 value = MII_GC_ISOLATE;
2529 tlan_mii_sync(dev->base_addr);
2530 tlan_mii_write_reg(dev, priv->phy[1], MII_GEN_CTL, value);
2531 }
2532
2533
2534
2535
2536
2537 tlan_set_timer(dev, msecs_to_jiffies(50), TLAN_TIMER_PHY_PUP);
2538
2539}
2540
2541
2542
2543
2544static void tlan_phy_power_up(struct net_device *dev)
2545{
2546 struct tlan_priv *priv = netdev_priv(dev);
2547 u16 value;
2548
2549 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Powering up PHY.\n", dev->name);
2550 tlan_mii_sync(dev->base_addr);
2551 value = MII_GC_LOOPBK;
2552 tlan_mii_write_reg(dev, priv->phy[priv->phy_num], MII_GEN_CTL, value);
2553 tlan_mii_sync(dev->base_addr);
2554
2555
2556
2557
2558 tlan_set_timer(dev, msecs_to_jiffies(500), TLAN_TIMER_PHY_RESET);
2559
2560}
2561
2562
2563
2564
2565static void tlan_phy_reset(struct net_device *dev)
2566{
2567 struct tlan_priv *priv = netdev_priv(dev);
2568 u16 phy;
2569 u16 value;
2570 unsigned long timeout = jiffies + HZ;
2571
2572 phy = priv->phy[priv->phy_num];
2573
2574 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Resetting PHY.\n", dev->name);
2575 tlan_mii_sync(dev->base_addr);
2576 value = MII_GC_LOOPBK | MII_GC_RESET;
2577 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, value);
2578 do {
2579 tlan_mii_read_reg(dev, phy, MII_GEN_CTL, &value);
2580 if (time_after(jiffies, timeout)) {
2581 netdev_err(dev, "PHY reset timeout\n");
2582 return;
2583 }
2584 } while (value & MII_GC_RESET);
2585
2586
2587
2588
2589
2590 tlan_set_timer(dev, msecs_to_jiffies(50), TLAN_TIMER_PHY_START_LINK);
2591
2592}
2593
2594
2595
2596
2597static void tlan_phy_start_link(struct net_device *dev)
2598{
2599 struct tlan_priv *priv = netdev_priv(dev);
2600 u16 ability;
2601 u16 control;
2602 u16 data;
2603 u16 phy;
2604 u16 status;
2605 u16 tctl;
2606
2607 phy = priv->phy[priv->phy_num];
2608 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Trying to activate link.\n", dev->name);
2609 tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
2610 tlan_mii_read_reg(dev, phy, MII_GEN_STS, &ability);
2611
2612 if ((status & MII_GS_AUTONEG) &&
2613 (!priv->aui)) {
2614 ability = status >> 11;
2615 if (priv->speed == TLAN_SPEED_10 &&
2616 priv->duplex == TLAN_DUPLEX_HALF) {
2617 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x0000);
2618 } else if (priv->speed == TLAN_SPEED_10 &&
2619 priv->duplex == TLAN_DUPLEX_FULL) {
2620 priv->tlan_full_duplex = true;
2621 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x0100);
2622 } else if (priv->speed == TLAN_SPEED_100 &&
2623 priv->duplex == TLAN_DUPLEX_HALF) {
2624 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x2000);
2625 } else if (priv->speed == TLAN_SPEED_100 &&
2626 priv->duplex == TLAN_DUPLEX_FULL) {
2627 priv->tlan_full_duplex = true;
2628 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x2100);
2629 } else {
2630
2631
2632 tlan_mii_write_reg(dev, phy, MII_AN_ADV,
2633 (ability << 5) | 1);
2634
2635 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x1000);
2636
2637 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x1200);
2638
2639
2640
2641
2642
2643 netdev_info(dev, "Starting autonegotiation\n");
2644 tlan_set_timer(dev, (2*HZ), TLAN_TIMER_PHY_FINISH_AN);
2645 return;
2646 }
2647
2648 }
2649
2650 if ((priv->aui) && (priv->phy_num != 0)) {
2651 priv->phy_num = 0;
2652 data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN
2653 | TLAN_NET_CFG_PHY_EN;
2654 tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, data);
2655 tlan_set_timer(dev, msecs_to_jiffies(40), TLAN_TIMER_PHY_PDOWN);
2656 return;
2657 } else if (priv->phy_num == 0) {
2658 control = 0;
2659 tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tctl);
2660 if (priv->aui) {
2661 tctl |= TLAN_TC_AUISEL;
2662 } else {
2663 tctl &= ~TLAN_TC_AUISEL;
2664 if (priv->duplex == TLAN_DUPLEX_FULL) {
2665 control |= MII_GC_DUPLEX;
2666 priv->tlan_full_duplex = true;
2667 }
2668 if (priv->speed == TLAN_SPEED_100)
2669 control |= MII_GC_SPEEDSEL;
2670 }
2671 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, control);
2672 tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL, tctl);
2673 }
2674
2675
2676
2677
2678 tlan_set_timer(dev, (4*HZ), TLAN_TIMER_FINISH_RESET);
2679
2680}
2681
2682
2683
2684
2685static void tlan_phy_finish_auto_neg(struct net_device *dev)
2686{
2687 struct tlan_priv *priv = netdev_priv(dev);
2688 u16 an_adv;
2689 u16 an_lpa;
2690 u16 mode;
2691 u16 phy;
2692 u16 status;
2693
2694 phy = priv->phy[priv->phy_num];
2695
2696 tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
2697 udelay(1000);
2698 tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
2699
2700 if (!(status & MII_GS_AUTOCMPLT)) {
2701
2702
2703
2704 tlan_set_timer(dev, 2 * HZ, TLAN_TIMER_PHY_FINISH_AN);
2705 return;
2706 }
2707
2708 netdev_info(dev, "Autonegotiation complete\n");
2709 tlan_mii_read_reg(dev, phy, MII_AN_ADV, &an_adv);
2710 tlan_mii_read_reg(dev, phy, MII_AN_LPA, &an_lpa);
2711 mode = an_adv & an_lpa & 0x03E0;
2712 if (mode & 0x0100)
2713 priv->tlan_full_duplex = true;
2714 else if (!(mode & 0x0080) && (mode & 0x0040))
2715 priv->tlan_full_duplex = true;
2716
2717
2718 if ((!(mode & 0x0180)) &&
2719 (priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10) &&
2720 (priv->phy_num != 0)) {
2721 priv->phy_num = 0;
2722 tlan_set_timer(dev, msecs_to_jiffies(400), TLAN_TIMER_PHY_PDOWN);
2723 return;
2724 }
2725
2726 if (priv->phy_num == 0) {
2727 if ((priv->duplex == TLAN_DUPLEX_FULL) ||
2728 (an_adv & an_lpa & 0x0040)) {
2729 tlan_mii_write_reg(dev, phy, MII_GEN_CTL,
2730 MII_GC_AUTOENB | MII_GC_DUPLEX);
2731 netdev_info(dev, "Starting internal PHY with FULL-DUPLEX\n");
2732 } else {
2733 tlan_mii_write_reg(dev, phy, MII_GEN_CTL,
2734 MII_GC_AUTOENB);
2735 netdev_info(dev, "Starting internal PHY with HALF-DUPLEX\n");
2736 }
2737 }
2738
2739
2740
2741 tlan_set_timer(dev, msecs_to_jiffies(100), TLAN_TIMER_FINISH_RESET);
2742
2743}
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763static void tlan_phy_monitor(struct timer_list *t)
2764{
2765 struct tlan_priv *priv = from_timer(priv, t, media_timer);
2766 struct net_device *dev = priv->dev;
2767 u16 phy;
2768 u16 phy_status;
2769
2770 phy = priv->phy[priv->phy_num];
2771
2772
2773 tlan_mii_read_reg(dev, phy, MII_GEN_STS, &phy_status);
2774
2775
2776 if (!(phy_status & MII_GS_LINK)) {
2777 if (netif_carrier_ok(dev)) {
2778 printk(KERN_DEBUG "TLAN: %s has lost link\n",
2779 dev->name);
2780 tlan_dio_write8(dev->base_addr, TLAN_LED_REG, 0);
2781 netif_carrier_off(dev);
2782 if (priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10) {
2783
2784 u16 data = MII_GC_PDOWN | MII_GC_LOOPBK |
2785 MII_GC_ISOLATE;
2786
2787 tlan_mii_sync(dev->base_addr);
2788 tlan_mii_write_reg(dev, priv->phy[0],
2789 MII_GEN_CTL, data);
2790
2791 priv->phy_num = 1;
2792
2793 tlan_set_timer(dev, msecs_to_jiffies(400),
2794 TLAN_TIMER_PHY_PDOWN);
2795 return;
2796 }
2797 }
2798 }
2799
2800
2801 if ((phy_status & MII_GS_LINK) && !netif_carrier_ok(dev)) {
2802 tlan_dio_write8(dev->base_addr, TLAN_LED_REG, TLAN_LED_LINK);
2803 printk(KERN_DEBUG "TLAN: %s has reestablished link\n",
2804 dev->name);
2805 netif_carrier_on(dev);
2806 }
2807 priv->media_timer.expires = jiffies + HZ;
2808 add_timer(&priv->media_timer);
2809}
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848static bool
2849tlan_mii_read_reg(struct net_device *dev, u16 phy, u16 reg, u16 *val)
2850{
2851 u8 nack;
2852 u16 sio, tmp;
2853 u32 i;
2854 bool err;
2855 int minten;
2856 struct tlan_priv *priv = netdev_priv(dev);
2857 unsigned long flags = 0;
2858
2859 err = false;
2860 outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR);
2861 sio = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
2862
2863 if (!in_irq())
2864 spin_lock_irqsave(&priv->lock, flags);
2865
2866 tlan_mii_sync(dev->base_addr);
2867
2868 minten = tlan_get_bit(TLAN_NET_SIO_MINTEN, sio);
2869 if (minten)
2870 tlan_clear_bit(TLAN_NET_SIO_MINTEN, sio);
2871
2872 tlan_mii_send_data(dev->base_addr, 0x1, 2);
2873 tlan_mii_send_data(dev->base_addr, 0x2, 2);
2874 tlan_mii_send_data(dev->base_addr, phy, 5);
2875 tlan_mii_send_data(dev->base_addr, reg, 5);
2876
2877
2878 tlan_clear_bit(TLAN_NET_SIO_MTXEN, sio);
2879
2880 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
2881 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
2882 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
2883
2884 nack = tlan_get_bit(TLAN_NET_SIO_MDATA, sio);
2885 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
2886 if (nack) {
2887 for (i = 0; i < 16; i++) {
2888 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
2889 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
2890 }
2891 tmp = 0xffff;
2892 err = true;
2893 } else {
2894 for (tmp = 0, i = 0x8000; i; i >>= 1) {
2895 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
2896 if (tlan_get_bit(TLAN_NET_SIO_MDATA, sio))
2897 tmp |= i;
2898 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
2899 }
2900 }
2901
2902
2903 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
2904 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
2905
2906 if (minten)
2907 tlan_set_bit(TLAN_NET_SIO_MINTEN, sio);
2908
2909 *val = tmp;
2910
2911 if (!in_irq())
2912 spin_unlock_irqrestore(&priv->lock, flags);
2913
2914 return err;
2915
2916}
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936
2937
2938
2939static void tlan_mii_send_data(u16 base_port, u32 data, unsigned num_bits)
2940{
2941 u16 sio;
2942 u32 i;
2943
2944 if (num_bits == 0)
2945 return;
2946
2947 outw(TLAN_NET_SIO, base_port + TLAN_DIO_ADR);
2948 sio = base_port + TLAN_DIO_DATA + TLAN_NET_SIO;
2949 tlan_set_bit(TLAN_NET_SIO_MTXEN, sio);
2950
2951 for (i = (0x1 << (num_bits - 1)); i; i >>= 1) {
2952 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
2953 (void) tlan_get_bit(TLAN_NET_SIO_MCLK, sio);
2954 if (data & i)
2955 tlan_set_bit(TLAN_NET_SIO_MDATA, sio);
2956 else
2957 tlan_clear_bit(TLAN_NET_SIO_MDATA, sio);
2958 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
2959 (void) tlan_get_bit(TLAN_NET_SIO_MCLK, sio);
2960 }
2961
2962}
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981static void tlan_mii_sync(u16 base_port)
2982{
2983 int i;
2984 u16 sio;
2985
2986 outw(TLAN_NET_SIO, base_port + TLAN_DIO_ADR);
2987 sio = base_port + TLAN_DIO_DATA + TLAN_NET_SIO;
2988
2989 tlan_clear_bit(TLAN_NET_SIO_MTXEN, sio);
2990 for (i = 0; i < 32; i++) {
2991 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
2992 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
2993 }
2994
2995}
2996
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008
3009
3010
3011
3012
3013
3014
3015
3016
3017
3018
3019
3020static void
3021tlan_mii_write_reg(struct net_device *dev, u16 phy, u16 reg, u16 val)
3022{
3023 u16 sio;
3024 int minten;
3025 unsigned long flags = 0;
3026 struct tlan_priv *priv = netdev_priv(dev);
3027
3028 outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR);
3029 sio = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
3030
3031 if (!in_irq())
3032 spin_lock_irqsave(&priv->lock, flags);
3033
3034 tlan_mii_sync(dev->base_addr);
3035
3036 minten = tlan_get_bit(TLAN_NET_SIO_MINTEN, sio);
3037 if (minten)
3038 tlan_clear_bit(TLAN_NET_SIO_MINTEN, sio);
3039
3040 tlan_mii_send_data(dev->base_addr, 0x1, 2);
3041 tlan_mii_send_data(dev->base_addr, 0x1, 2);
3042 tlan_mii_send_data(dev->base_addr, phy, 5);
3043 tlan_mii_send_data(dev->base_addr, reg, 5);
3044
3045 tlan_mii_send_data(dev->base_addr, 0x2, 2);
3046 tlan_mii_send_data(dev->base_addr, val, 16);
3047
3048 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
3049 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
3050
3051 if (minten)
3052 tlan_set_bit(TLAN_NET_SIO_MINTEN, sio);
3053
3054 if (!in_irq())
3055 spin_unlock_irqrestore(&priv->lock, flags);
3056
3057}
3058
3059
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090
3091static void tlan_ee_send_start(u16 io_base)
3092{
3093 u16 sio;
3094
3095 outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR);
3096 sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
3097
3098 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3099 tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
3100 tlan_set_bit(TLAN_NET_SIO_ETXEN, sio);
3101 tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
3102 tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
3103
3104}
3105
3106
3107
3108
3109
3110
3111
3112
3113
3114
3115
3116
3117
3118
3119
3120
3121
3122
3123
3124
3125
3126
3127
3128
3129
3130
3131static int tlan_ee_send_byte(u16 io_base, u8 data, int stop)
3132{
3133 int err;
3134 u8 place;
3135 u16 sio;
3136
3137 outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR);
3138 sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
3139
3140
3141 for (place = 0x80; place != 0; place >>= 1) {
3142 if (place & data)
3143 tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
3144 else
3145 tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
3146 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3147 tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
3148 }
3149 tlan_clear_bit(TLAN_NET_SIO_ETXEN, sio);
3150 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3151 err = tlan_get_bit(TLAN_NET_SIO_EDATA, sio);
3152 tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
3153 tlan_set_bit(TLAN_NET_SIO_ETXEN, sio);
3154
3155 if ((!err) && stop) {
3156
3157 tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
3158 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3159 tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
3160 }
3161
3162 return err;
3163
3164}
3165
3166
3167
3168
3169
3170
3171
3172
3173
3174
3175
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185
3186
3187
3188
3189
3190
3191
3192
3193static void tlan_ee_receive_byte(u16 io_base, u8 *data, int stop)
3194{
3195 u8 place;
3196 u16 sio;
3197
3198 outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR);
3199 sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
3200 *data = 0;
3201
3202
3203 tlan_clear_bit(TLAN_NET_SIO_ETXEN, sio);
3204 for (place = 0x80; place; place >>= 1) {
3205 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3206 if (tlan_get_bit(TLAN_NET_SIO_EDATA, sio))
3207 *data |= place;
3208 tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
3209 }
3210
3211 tlan_set_bit(TLAN_NET_SIO_ETXEN, sio);
3212 if (!stop) {
3213 tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
3214 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3215 tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
3216 } else {
3217 tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
3218 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3219 tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
3220
3221 tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
3222 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3223 tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
3224 }
3225
3226}
3227
3228
3229
3230
3231
3232
3233
3234
3235
3236
3237
3238
3239
3240
3241
3242
3243
3244
3245
3246
3247
3248
3249
3250
3251
3252static int tlan_ee_read_byte(struct net_device *dev, u8 ee_addr, u8 *data)
3253{
3254 int err;
3255 struct tlan_priv *priv = netdev_priv(dev);
3256 unsigned long flags = 0;
3257 int ret = 0;
3258
3259 spin_lock_irqsave(&priv->lock, flags);
3260
3261 tlan_ee_send_start(dev->base_addr);
3262 err = tlan_ee_send_byte(dev->base_addr, 0xa0, TLAN_EEPROM_ACK);
3263 if (err) {
3264 ret = 1;
3265 goto fail;
3266 }
3267 err = tlan_ee_send_byte(dev->base_addr, ee_addr, TLAN_EEPROM_ACK);
3268 if (err) {
3269 ret = 2;
3270 goto fail;
3271 }
3272 tlan_ee_send_start(dev->base_addr);
3273 err = tlan_ee_send_byte(dev->base_addr, 0xa1, TLAN_EEPROM_ACK);
3274 if (err) {
3275 ret = 3;
3276 goto fail;
3277 }
3278 tlan_ee_receive_byte(dev->base_addr, data, TLAN_EEPROM_STOP);
3279fail:
3280 spin_unlock_irqrestore(&priv->lock, flags);
3281
3282 return ret;
3283
3284}
3285
3286
3287
3288