1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31
32#include <linux/hardirq.h>
33#include <linux/module.h>
34#include <linux/init.h>
35#include <linux/interrupt.h>
36#include <linux/ioport.h>
37#include <linux/eisa.h>
38#include <linux/pci.h>
39#include <linux/dma-mapping.h>
40#include <linux/netdevice.h>
41#include <linux/etherdevice.h>
42#include <linux/delay.h>
43#include <linux/spinlock.h>
44#include <linux/workqueue.h>
45#include <linux/mii.h>
46
47#include "tlan.h"
48
49
50
51static struct net_device *tlan_eisa_devices;
52
53static int tlan_devices_installed;
54
55
56static int aui[MAX_TLAN_BOARDS];
57static int duplex[MAX_TLAN_BOARDS];
58static int speed[MAX_TLAN_BOARDS];
59static int boards_found;
60module_param_array(aui, int, NULL, 0);
61module_param_array(duplex, int, NULL, 0);
62module_param_array(speed, int, NULL, 0);
63MODULE_PARM_DESC(aui, "ThunderLAN use AUI port(s) (0-1)");
64MODULE_PARM_DESC(duplex,
65 "ThunderLAN duplex setting(s) (0-default, 1-half, 2-full)");
66MODULE_PARM_DESC(speed, "ThunderLAN port speed setting(s) (0,10,100)");
67
68MODULE_AUTHOR("Maintainer: Samuel Chessman <chessman@tux.org>");
69MODULE_DESCRIPTION("Driver for TI ThunderLAN based ethernet PCI adapters");
70MODULE_LICENSE("GPL");
71
72
73
74
75static int debug;
76module_param(debug, int, 0);
77MODULE_PARM_DESC(debug, "ThunderLAN debug mask");
78
79static const char tlan_signature[] = "TLAN";
80static const char tlan_banner[] = "ThunderLAN driver v1.17\n";
81static int tlan_have_pci;
82static int tlan_have_eisa;
83
84static const char * const media[] = {
85 "10BaseT-HD", "10BaseT-FD", "100baseTx-HD",
86 "100BaseTx-FD", "100BaseT4", NULL
87};
88
89static struct board {
90 const char *device_label;
91 u32 flags;
92 u16 addr_ofs;
93} board_info[] = {
94 { "Compaq Netelligent 10 T PCI UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
95 { "Compaq Netelligent 10/100 TX PCI UTP",
96 TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
97 { "Compaq Integrated NetFlex-3/P", TLAN_ADAPTER_NONE, 0x83 },
98 { "Compaq NetFlex-3/P",
99 TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 },
100 { "Compaq NetFlex-3/P", TLAN_ADAPTER_NONE, 0x83 },
101 { "Compaq Netelligent Integrated 10/100 TX UTP",
102 TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
103 { "Compaq Netelligent Dual 10/100 TX PCI UTP",
104 TLAN_ADAPTER_NONE, 0x83 },
105 { "Compaq Netelligent 10/100 TX Embedded UTP",
106 TLAN_ADAPTER_NONE, 0x83 },
107 { "Olicom OC-2183/2185", TLAN_ADAPTER_USE_INTERN_10, 0x83 },
108 { "Olicom OC-2325", TLAN_ADAPTER_ACTIVITY_LED |
109 TLAN_ADAPTER_UNMANAGED_PHY, 0xf8 },
110 { "Olicom OC-2326", TLAN_ADAPTER_ACTIVITY_LED |
111 TLAN_ADAPTER_USE_INTERN_10, 0xf8 },
112 { "Compaq Netelligent 10/100 TX UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
113 { "Compaq Netelligent 10 T/2 PCI UTP/coax", TLAN_ADAPTER_NONE, 0x83 },
114 { "Compaq NetFlex-3/E",
115 TLAN_ADAPTER_ACTIVITY_LED |
116 TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 },
117 { "Compaq NetFlex-3/E",
118 TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
119};
120
121static const struct pci_device_id tlan_pci_tbl[] = {
122 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL10,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
124 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100,
125 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
126 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETFLEX3I,
127 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
128 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_THUNDER,
129 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 },
130 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETFLEX3B,
131 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
132 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100PI,
133 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
134 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100D,
135 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
136 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100I,
137 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 7 },
138 { PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2183,
139 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
140 { PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2325,
141 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 9 },
142 { PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2326,
143 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 10 },
144 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_NETELLIGENT_10_100_WS_5100,
145 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 11 },
146 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_NETELLIGENT_10_T2,
147 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 12 },
148 { 0,}
149};
150MODULE_DEVICE_TABLE(pci, tlan_pci_tbl);
151
152static void tlan_eisa_probe(void);
153static void tlan_eisa_cleanup(void);
154static int tlan_init(struct net_device *);
155static int tlan_open(struct net_device *dev);
156static netdev_tx_t tlan_start_tx(struct sk_buff *, struct net_device *);
157static irqreturn_t tlan_handle_interrupt(int, void *);
158static int tlan_close(struct net_device *);
159static struct net_device_stats *tlan_get_stats(struct net_device *);
160static void tlan_set_multicast_list(struct net_device *);
161static int tlan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
162static int tlan_probe1(struct pci_dev *pdev, long ioaddr,
163 int irq, int rev, const struct pci_device_id *ent);
164static void tlan_tx_timeout(struct net_device *dev, unsigned int txqueue);
165static void tlan_tx_timeout_work(struct work_struct *work);
166static int tlan_init_one(struct pci_dev *pdev,
167 const struct pci_device_id *ent);
168
169static u32 tlan_handle_tx_eof(struct net_device *, u16);
170static u32 tlan_handle_stat_overflow(struct net_device *, u16);
171static u32 tlan_handle_rx_eof(struct net_device *, u16);
172static u32 tlan_handle_dummy(struct net_device *, u16);
173static u32 tlan_handle_tx_eoc(struct net_device *, u16);
174static u32 tlan_handle_status_check(struct net_device *, u16);
175static u32 tlan_handle_rx_eoc(struct net_device *, u16);
176
177static void tlan_timer(struct timer_list *t);
178static void tlan_phy_monitor(struct timer_list *t);
179
180static void tlan_reset_lists(struct net_device *);
181static void tlan_free_lists(struct net_device *);
182static void tlan_print_dio(u16);
183static void tlan_print_list(struct tlan_list *, char *, int);
184static void tlan_read_and_clear_stats(struct net_device *, int);
185static void tlan_reset_adapter(struct net_device *);
186static void tlan_finish_reset(struct net_device *);
187static void tlan_set_mac(struct net_device *, int areg, char *mac);
188
189static void tlan_phy_print(struct net_device *);
190static void tlan_phy_detect(struct net_device *);
191static void tlan_phy_power_down(struct net_device *);
192static void tlan_phy_power_up(struct net_device *);
193static void tlan_phy_reset(struct net_device *);
194static void tlan_phy_start_link(struct net_device *);
195static void tlan_phy_finish_auto_neg(struct net_device *);
196
197
198
199
200
201
202
203
204static bool tlan_mii_read_reg(struct net_device *, u16, u16, u16 *);
205static void tlan_mii_send_data(u16, u32, unsigned);
206static void tlan_mii_sync(u16);
207static void tlan_mii_write_reg(struct net_device *, u16, u16, u16);
208
209static void tlan_ee_send_start(u16);
210static int tlan_ee_send_byte(u16, u8, int);
211static void tlan_ee_receive_byte(u16, u8 *, int);
212static int tlan_ee_read_byte(struct net_device *, u8, u8 *);
213
214
215static inline void
216tlan_store_skb(struct tlan_list *tag, struct sk_buff *skb)
217{
218 unsigned long addr = (unsigned long)skb;
219 tag->buffer[9].address = addr;
220 tag->buffer[8].address = upper_32_bits(addr);
221}
222
223static inline struct sk_buff *
224tlan_get_skb(const struct tlan_list *tag)
225{
226 unsigned long addr;
227
228 addr = tag->buffer[9].address;
229 addr |= ((unsigned long) tag->buffer[8].address << 16) << 16;
230 return (struct sk_buff *) addr;
231}
232
233static u32
234(*tlan_int_vector[TLAN_INT_NUMBER_OF_INTS])(struct net_device *, u16) = {
235 NULL,
236 tlan_handle_tx_eof,
237 tlan_handle_stat_overflow,
238 tlan_handle_rx_eof,
239 tlan_handle_dummy,
240 tlan_handle_tx_eoc,
241 tlan_handle_status_check,
242 tlan_handle_rx_eoc
243};
244
245static inline void
246tlan_set_timer(struct net_device *dev, u32 ticks, u32 type)
247{
248 struct tlan_priv *priv = netdev_priv(dev);
249 unsigned long flags = 0;
250
251 if (!in_irq())
252 spin_lock_irqsave(&priv->lock, flags);
253 if (priv->timer.function != NULL &&
254 priv->timer_type != TLAN_TIMER_ACTIVITY) {
255 if (!in_irq())
256 spin_unlock_irqrestore(&priv->lock, flags);
257 return;
258 }
259 priv->timer.function = tlan_timer;
260 if (!in_irq())
261 spin_unlock_irqrestore(&priv->lock, flags);
262
263 priv->timer_set_at = jiffies;
264 priv->timer_type = type;
265 mod_timer(&priv->timer, jiffies + ticks);
266
267}
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300static void tlan_remove_one(struct pci_dev *pdev)
301{
302 struct net_device *dev = pci_get_drvdata(pdev);
303 struct tlan_priv *priv = netdev_priv(dev);
304
305 unregister_netdev(dev);
306
307 if (priv->dma_storage) {
308 dma_free_coherent(&priv->pci_dev->dev, priv->dma_size,
309 priv->dma_storage, priv->dma_storage_dma);
310 }
311
312#ifdef CONFIG_PCI
313 pci_release_regions(pdev);
314#endif
315
316 free_netdev(dev);
317
318 cancel_work_sync(&priv->tlan_tqueue);
319}
320
321static void tlan_start(struct net_device *dev)
322{
323 tlan_reset_lists(dev);
324
325
326
327 tlan_read_and_clear_stats(dev, TLAN_IGNORE);
328 tlan_reset_adapter(dev);
329 netif_wake_queue(dev);
330}
331
332static void tlan_stop(struct net_device *dev)
333{
334 struct tlan_priv *priv = netdev_priv(dev);
335
336 del_timer_sync(&priv->media_timer);
337 tlan_read_and_clear_stats(dev, TLAN_RECORD);
338 outl(TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD);
339
340 tlan_reset_adapter(dev);
341 if (priv->timer.function != NULL) {
342 del_timer_sync(&priv->timer);
343 priv->timer.function = NULL;
344 }
345}
346
347static int __maybe_unused tlan_suspend(struct device *dev_d)
348{
349 struct net_device *dev = dev_get_drvdata(dev_d);
350
351 if (netif_running(dev))
352 tlan_stop(dev);
353
354 netif_device_detach(dev);
355
356 return 0;
357}
358
359static int __maybe_unused tlan_resume(struct device *dev_d)
360{
361 struct net_device *dev = dev_get_drvdata(dev_d);
362 netif_device_attach(dev);
363
364 if (netif_running(dev))
365 tlan_start(dev);
366
367 return 0;
368}
369
370static SIMPLE_DEV_PM_OPS(tlan_pm_ops, tlan_suspend, tlan_resume);
371
372static struct pci_driver tlan_driver = {
373 .name = "tlan",
374 .id_table = tlan_pci_tbl,
375 .probe = tlan_init_one,
376 .remove = tlan_remove_one,
377 .driver.pm = &tlan_pm_ops,
378};
379
380static int __init tlan_probe(void)
381{
382 int rc = -ENODEV;
383
384 pr_info("%s", tlan_banner);
385
386 TLAN_DBG(TLAN_DEBUG_PROBE, "Starting PCI Probe....\n");
387
388
389
390 rc = pci_register_driver(&tlan_driver);
391
392 if (rc != 0) {
393 pr_err("Could not register pci driver\n");
394 goto err_out_pci_free;
395 }
396
397 TLAN_DBG(TLAN_DEBUG_PROBE, "Starting EISA Probe....\n");
398 tlan_eisa_probe();
399
400 pr_info("%d device%s installed, PCI: %d EISA: %d\n",
401 tlan_devices_installed, tlan_devices_installed == 1 ? "" : "s",
402 tlan_have_pci, tlan_have_eisa);
403
404 if (tlan_devices_installed == 0) {
405 rc = -ENODEV;
406 goto err_out_pci_unreg;
407 }
408 return 0;
409
410err_out_pci_unreg:
411 pci_unregister_driver(&tlan_driver);
412err_out_pci_free:
413 return rc;
414}
415
416
417static int tlan_init_one(struct pci_dev *pdev,
418 const struct pci_device_id *ent)
419{
420 return tlan_probe1(pdev, -1, -1, 0, ent);
421}
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443static int tlan_probe1(struct pci_dev *pdev, long ioaddr, int irq, int rev,
444 const struct pci_device_id *ent)
445{
446
447 struct net_device *dev;
448 struct tlan_priv *priv;
449 u16 device_id;
450 int reg, rc = -ENODEV;
451
452#ifdef CONFIG_PCI
453 if (pdev) {
454 rc = pci_enable_device(pdev);
455 if (rc)
456 return rc;
457
458 rc = pci_request_regions(pdev, tlan_signature);
459 if (rc) {
460 pr_err("Could not reserve IO regions\n");
461 goto err_out;
462 }
463 }
464#endif
465
466 dev = alloc_etherdev(sizeof(struct tlan_priv));
467 if (dev == NULL) {
468 rc = -ENOMEM;
469 goto err_out_regions;
470 }
471 SET_NETDEV_DEV(dev, &pdev->dev);
472
473 priv = netdev_priv(dev);
474
475 priv->pci_dev = pdev;
476 priv->dev = dev;
477
478
479 if (pdev) {
480 u32 pci_io_base = 0;
481
482 priv->adapter = &board_info[ent->driver_data];
483
484 rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
485 if (rc) {
486 pr_err("No suitable PCI mapping available\n");
487 goto err_out_free_dev;
488 }
489
490 for (reg = 0; reg <= 5; reg++) {
491 if (pci_resource_flags(pdev, reg) & IORESOURCE_IO) {
492 pci_io_base = pci_resource_start(pdev, reg);
493 TLAN_DBG(TLAN_DEBUG_GNRL,
494 "IO mapping is available at %x.\n",
495 pci_io_base);
496 break;
497 }
498 }
499 if (!pci_io_base) {
500 pr_err("No IO mappings available\n");
501 rc = -EIO;
502 goto err_out_free_dev;
503 }
504
505 dev->base_addr = pci_io_base;
506 dev->irq = pdev->irq;
507 priv->adapter_rev = pdev->revision;
508 pci_set_master(pdev);
509 pci_set_drvdata(pdev, dev);
510
511 } else {
512
513
514 device_id = inw(ioaddr + EISA_ID2);
515 if (device_id == 0x20F1) {
516 priv->adapter = &board_info[13];
517 priv->adapter_rev = 23;
518 } else {
519 priv->adapter = &board_info[14];
520 priv->adapter_rev = 10;
521 }
522 dev->base_addr = ioaddr;
523 dev->irq = irq;
524 }
525
526
527 if (dev->mem_start) {
528 priv->aui = dev->mem_start & 0x01;
529 priv->duplex = ((dev->mem_start & 0x06) == 0x06) ? 0
530 : (dev->mem_start & 0x06) >> 1;
531 priv->speed = ((dev->mem_start & 0x18) == 0x18) ? 0
532 : (dev->mem_start & 0x18) >> 3;
533
534 if (priv->speed == 0x1)
535 priv->speed = TLAN_SPEED_10;
536 else if (priv->speed == 0x2)
537 priv->speed = TLAN_SPEED_100;
538
539 debug = priv->debug = dev->mem_end;
540 } else {
541 priv->aui = aui[boards_found];
542 priv->speed = speed[boards_found];
543 priv->duplex = duplex[boards_found];
544 priv->debug = debug;
545 }
546
547
548
549 INIT_WORK(&priv->tlan_tqueue, tlan_tx_timeout_work);
550
551 spin_lock_init(&priv->lock);
552
553 rc = tlan_init(dev);
554 if (rc) {
555 pr_err("Could not set up device\n");
556 goto err_out_free_dev;
557 }
558
559 rc = register_netdev(dev);
560 if (rc) {
561 pr_err("Could not register device\n");
562 goto err_out_uninit;
563 }
564
565
566 tlan_devices_installed++;
567 boards_found++;
568
569
570 if (pdev)
571 tlan_have_pci++;
572 else {
573 priv->next_device = tlan_eisa_devices;
574 tlan_eisa_devices = dev;
575 tlan_have_eisa++;
576 }
577
578 netdev_info(dev, "irq=%2d, io=%04x, %s, Rev. %d\n",
579 (int)dev->irq,
580 (int)dev->base_addr,
581 priv->adapter->device_label,
582 priv->adapter_rev);
583 return 0;
584
585err_out_uninit:
586 dma_free_coherent(&priv->pci_dev->dev, priv->dma_size,
587 priv->dma_storage, priv->dma_storage_dma);
588err_out_free_dev:
589 free_netdev(dev);
590err_out_regions:
591#ifdef CONFIG_PCI
592 if (pdev)
593 pci_release_regions(pdev);
594err_out:
595#endif
596 if (pdev)
597 pci_disable_device(pdev);
598 return rc;
599}
600
601
602static void tlan_eisa_cleanup(void)
603{
604 struct net_device *dev;
605 struct tlan_priv *priv;
606
607 while (tlan_have_eisa) {
608 dev = tlan_eisa_devices;
609 priv = netdev_priv(dev);
610 if (priv->dma_storage) {
611 dma_free_coherent(&priv->pci_dev->dev, priv->dma_size,
612 priv->dma_storage,
613 priv->dma_storage_dma);
614 }
615 release_region(dev->base_addr, 0x10);
616 unregister_netdev(dev);
617 tlan_eisa_devices = priv->next_device;
618 free_netdev(dev);
619 tlan_have_eisa--;
620 }
621}
622
623
624static void __exit tlan_exit(void)
625{
626 pci_unregister_driver(&tlan_driver);
627
628 if (tlan_have_eisa)
629 tlan_eisa_cleanup();
630
631}
632
633
634
635module_init(tlan_probe);
636module_exit(tlan_exit);
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653static void __init tlan_eisa_probe(void)
654{
655 long ioaddr;
656 int irq;
657 u16 device_id;
658
659 if (!EISA_bus) {
660 TLAN_DBG(TLAN_DEBUG_PROBE, "No EISA bus present\n");
661 return;
662 }
663
664
665 for (ioaddr = 0x1000; ioaddr < 0x9000; ioaddr += 0x1000) {
666
667 TLAN_DBG(TLAN_DEBUG_PROBE, "EISA_ID 0x%4x: 0x%4x\n",
668 (int) ioaddr + 0xc80, inw(ioaddr + EISA_ID));
669 TLAN_DBG(TLAN_DEBUG_PROBE, "EISA_ID 0x%4x: 0x%4x\n",
670 (int) ioaddr + 0xc82, inw(ioaddr + EISA_ID2));
671
672
673 TLAN_DBG(TLAN_DEBUG_PROBE,
674 "Probing for EISA adapter at IO: 0x%4x : ",
675 (int) ioaddr);
676 if (request_region(ioaddr, 0x10, tlan_signature) == NULL)
677 goto out;
678
679 if (inw(ioaddr + EISA_ID) != 0x110E) {
680 release_region(ioaddr, 0x10);
681 goto out;
682 }
683
684 device_id = inw(ioaddr + EISA_ID2);
685 if (device_id != 0x20F1 && device_id != 0x40F1) {
686 release_region(ioaddr, 0x10);
687 goto out;
688 }
689
690
691 if (inb(ioaddr + EISA_CR) != 0x1) {
692 release_region(ioaddr, 0x10);
693 goto out2;
694 }
695
696 if (debug == 0x10)
697 pr_info("Found one\n");
698
699
700
701 switch (inb(ioaddr + 0xcc0)) {
702 case(0x10):
703 irq = 5;
704 break;
705 case(0x20):
706 irq = 9;
707 break;
708 case(0x40):
709 irq = 10;
710 break;
711 case(0x80):
712 irq = 11;
713 break;
714 default:
715 goto out;
716 }
717
718
719
720 tlan_probe1(NULL, ioaddr, irq, 12, NULL);
721 continue;
722
723out:
724 if (debug == 0x10)
725 pr_info("None found\n");
726 continue;
727
728out2:
729 if (debug == 0x10)
730 pr_info("Card found but it is not enabled, skipping\n");
731 continue;
732
733 }
734
735}
736
737#ifdef CONFIG_NET_POLL_CONTROLLER
738static void tlan_poll(struct net_device *dev)
739{
740 disable_irq(dev->irq);
741 tlan_handle_interrupt(dev->irq, dev);
742 enable_irq(dev->irq);
743}
744#endif
745
746static const struct net_device_ops tlan_netdev_ops = {
747 .ndo_open = tlan_open,
748 .ndo_stop = tlan_close,
749 .ndo_start_xmit = tlan_start_tx,
750 .ndo_tx_timeout = tlan_tx_timeout,
751 .ndo_get_stats = tlan_get_stats,
752 .ndo_set_rx_mode = tlan_set_multicast_list,
753 .ndo_do_ioctl = tlan_ioctl,
754 .ndo_set_mac_address = eth_mac_addr,
755 .ndo_validate_addr = eth_validate_addr,
756#ifdef CONFIG_NET_POLL_CONTROLLER
757 .ndo_poll_controller = tlan_poll,
758#endif
759};
760
761static void tlan_get_drvinfo(struct net_device *dev,
762 struct ethtool_drvinfo *info)
763{
764 struct tlan_priv *priv = netdev_priv(dev);
765
766 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
767 if (priv->pci_dev)
768 strlcpy(info->bus_info, pci_name(priv->pci_dev),
769 sizeof(info->bus_info));
770 else
771 strlcpy(info->bus_info, "EISA", sizeof(info->bus_info));
772}
773
774static int tlan_get_eeprom_len(struct net_device *dev)
775{
776 return TLAN_EEPROM_SIZE;
777}
778
779static int tlan_get_eeprom(struct net_device *dev,
780 struct ethtool_eeprom *eeprom, u8 *data)
781{
782 int i;
783
784 for (i = 0; i < TLAN_EEPROM_SIZE; i++)
785 if (tlan_ee_read_byte(dev, i, &data[i]))
786 return -EIO;
787
788 return 0;
789}
790
791static const struct ethtool_ops tlan_ethtool_ops = {
792 .get_drvinfo = tlan_get_drvinfo,
793 .get_link = ethtool_op_get_link,
794 .get_eeprom_len = tlan_get_eeprom_len,
795 .get_eeprom = tlan_get_eeprom,
796};
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815static int tlan_init(struct net_device *dev)
816{
817 int dma_size;
818 int err;
819 int i;
820 struct tlan_priv *priv;
821
822 priv = netdev_priv(dev);
823
824 dma_size = (TLAN_NUM_RX_LISTS + TLAN_NUM_TX_LISTS)
825 * (sizeof(struct tlan_list));
826 priv->dma_storage = dma_alloc_coherent(&priv->pci_dev->dev, dma_size,
827 &priv->dma_storage_dma, GFP_KERNEL);
828 priv->dma_size = dma_size;
829
830 if (priv->dma_storage == NULL) {
831 pr_err("Could not allocate lists and buffers for %s\n",
832 dev->name);
833 return -ENOMEM;
834 }
835 priv->rx_list = (struct tlan_list *)
836 ALIGN((unsigned long)priv->dma_storage, 8);
837 priv->rx_list_dma = ALIGN(priv->dma_storage_dma, 8);
838 priv->tx_list = priv->rx_list + TLAN_NUM_RX_LISTS;
839 priv->tx_list_dma =
840 priv->rx_list_dma + sizeof(struct tlan_list)*TLAN_NUM_RX_LISTS;
841
842 err = 0;
843 for (i = 0; i < ETH_ALEN; i++)
844 err |= tlan_ee_read_byte(dev,
845 (u8) priv->adapter->addr_ofs + i,
846 (u8 *) &dev->dev_addr[i]);
847 if (err) {
848 pr_err("%s: Error reading MAC from eeprom: %d\n",
849 dev->name, err);
850 }
851
852 if (priv->adapter->addr_ofs == 0xf8) {
853 for (i = 0; i < ETH_ALEN; i += 2) {
854 char tmp = dev->dev_addr[i];
855 dev->dev_addr[i] = dev->dev_addr[i + 1];
856 dev->dev_addr[i + 1] = tmp;
857 }
858 }
859
860 netif_carrier_off(dev);
861
862
863 dev->netdev_ops = &tlan_netdev_ops;
864 dev->ethtool_ops = &tlan_ethtool_ops;
865 dev->watchdog_timeo = TX_TIMEOUT;
866
867 return 0;
868
869}
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891static int tlan_open(struct net_device *dev)
892{
893 struct tlan_priv *priv = netdev_priv(dev);
894 int err;
895
896 priv->tlan_rev = tlan_dio_read8(dev->base_addr, TLAN_DEF_REVISION);
897 err = request_irq(dev->irq, tlan_handle_interrupt, IRQF_SHARED,
898 dev->name, dev);
899
900 if (err) {
901 netdev_err(dev, "Cannot open because IRQ %d is already in use\n",
902 dev->irq);
903 return err;
904 }
905
906 timer_setup(&priv->timer, NULL, 0);
907 timer_setup(&priv->media_timer, tlan_phy_monitor, 0);
908
909 tlan_start(dev);
910
911 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Opened. TLAN Chip Rev: %x\n",
912 dev->name, priv->tlan_rev);
913
914 return 0;
915
916}
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935static int tlan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
936{
937 struct tlan_priv *priv = netdev_priv(dev);
938 struct mii_ioctl_data *data = if_mii(rq);
939 u32 phy = priv->phy[priv->phy_num];
940
941 if (!priv->phy_online)
942 return -EAGAIN;
943
944 switch (cmd) {
945 case SIOCGMIIPHY:
946 data->phy_id = phy;
947 fallthrough;
948
949
950 case SIOCGMIIREG:
951 tlan_mii_read_reg(dev, data->phy_id & 0x1f,
952 data->reg_num & 0x1f, &data->val_out);
953 return 0;
954
955
956 case SIOCSMIIREG:
957 tlan_mii_write_reg(dev, data->phy_id & 0x1f,
958 data->reg_num & 0x1f, data->val_in);
959 return 0;
960 default:
961 return -EOPNOTSUPP;
962 }
963}
964
965
966
967
968
969
970
971
972
973
974
975
976
977static void tlan_tx_timeout(struct net_device *dev, unsigned int txqueue)
978{
979
980 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Transmit timed out.\n", dev->name);
981
982
983 tlan_free_lists(dev);
984 tlan_reset_lists(dev);
985 tlan_read_and_clear_stats(dev, TLAN_IGNORE);
986 tlan_reset_adapter(dev);
987 netif_trans_update(dev);
988 netif_wake_queue(dev);
989
990}
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003static void tlan_tx_timeout_work(struct work_struct *work)
1004{
1005 struct tlan_priv *priv =
1006 container_of(work, struct tlan_priv, tlan_tqueue);
1007
1008 tlan_tx_timeout(priv->dev, UINT_MAX);
1009}
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034static netdev_tx_t tlan_start_tx(struct sk_buff *skb, struct net_device *dev)
1035{
1036 struct tlan_priv *priv = netdev_priv(dev);
1037 dma_addr_t tail_list_phys;
1038 struct tlan_list *tail_list;
1039 unsigned long flags;
1040 unsigned int txlen;
1041
1042 if (!priv->phy_online) {
1043 TLAN_DBG(TLAN_DEBUG_TX, "TRANSMIT: %s PHY is not ready\n",
1044 dev->name);
1045 dev_kfree_skb_any(skb);
1046 return NETDEV_TX_OK;
1047 }
1048
1049 if (skb_padto(skb, TLAN_MIN_FRAME_SIZE))
1050 return NETDEV_TX_OK;
1051 txlen = max(skb->len, (unsigned int)TLAN_MIN_FRAME_SIZE);
1052
1053 tail_list = priv->tx_list + priv->tx_tail;
1054 tail_list_phys =
1055 priv->tx_list_dma + sizeof(struct tlan_list)*priv->tx_tail;
1056
1057 if (tail_list->c_stat != TLAN_CSTAT_UNUSED) {
1058 TLAN_DBG(TLAN_DEBUG_TX,
1059 "TRANSMIT: %s is busy (Head=%d Tail=%d)\n",
1060 dev->name, priv->tx_head, priv->tx_tail);
1061 netif_stop_queue(dev);
1062 priv->tx_busy_count++;
1063 return NETDEV_TX_BUSY;
1064 }
1065
1066 tail_list->forward = 0;
1067
1068 tail_list->buffer[0].address = dma_map_single(&priv->pci_dev->dev,
1069 skb->data, txlen,
1070 DMA_TO_DEVICE);
1071 tlan_store_skb(tail_list, skb);
1072
1073 tail_list->frame_size = (u16) txlen;
1074 tail_list->buffer[0].count = TLAN_LAST_BUFFER | (u32) txlen;
1075 tail_list->buffer[1].count = 0;
1076 tail_list->buffer[1].address = 0;
1077
1078 spin_lock_irqsave(&priv->lock, flags);
1079 tail_list->c_stat = TLAN_CSTAT_READY;
1080 if (!priv->tx_in_progress) {
1081 priv->tx_in_progress = 1;
1082 TLAN_DBG(TLAN_DEBUG_TX,
1083 "TRANSMIT: Starting TX on buffer %d\n",
1084 priv->tx_tail);
1085 outl(tail_list_phys, dev->base_addr + TLAN_CH_PARM);
1086 outl(TLAN_HC_GO, dev->base_addr + TLAN_HOST_CMD);
1087 } else {
1088 TLAN_DBG(TLAN_DEBUG_TX,
1089 "TRANSMIT: Adding buffer %d to TX channel\n",
1090 priv->tx_tail);
1091 if (priv->tx_tail == 0) {
1092 (priv->tx_list + (TLAN_NUM_TX_LISTS - 1))->forward
1093 = tail_list_phys;
1094 } else {
1095 (priv->tx_list + (priv->tx_tail - 1))->forward
1096 = tail_list_phys;
1097 }
1098 }
1099 spin_unlock_irqrestore(&priv->lock, flags);
1100
1101 CIRC_INC(priv->tx_tail, TLAN_NUM_TX_LISTS);
1102
1103 return NETDEV_TX_OK;
1104
1105}
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130static irqreturn_t tlan_handle_interrupt(int irq, void *dev_id)
1131{
1132 struct net_device *dev = dev_id;
1133 struct tlan_priv *priv = netdev_priv(dev);
1134 u16 host_int;
1135 u16 type;
1136
1137 spin_lock(&priv->lock);
1138
1139 host_int = inw(dev->base_addr + TLAN_HOST_INT);
1140 type = (host_int & TLAN_HI_IT_MASK) >> 2;
1141 if (type) {
1142 u32 ack;
1143 u32 host_cmd;
1144
1145 outw(host_int, dev->base_addr + TLAN_HOST_INT);
1146 ack = tlan_int_vector[type](dev, host_int);
1147
1148 if (ack) {
1149 host_cmd = TLAN_HC_ACK | ack | (type << 18);
1150 outl(host_cmd, dev->base_addr + TLAN_HOST_CMD);
1151 }
1152 }
1153
1154 spin_unlock(&priv->lock);
1155
1156 return IRQ_RETVAL(type);
1157}
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177static int tlan_close(struct net_device *dev)
1178{
1179 tlan_stop(dev);
1180
1181 free_irq(dev->irq, dev);
1182 tlan_free_lists(dev);
1183 TLAN_DBG(TLAN_DEBUG_GNRL, "Device %s closed.\n", dev->name);
1184
1185 return 0;
1186
1187}
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207static struct net_device_stats *tlan_get_stats(struct net_device *dev)
1208{
1209 struct tlan_priv *priv = netdev_priv(dev);
1210 int i;
1211
1212
1213 tlan_read_and_clear_stats(dev, TLAN_RECORD);
1214
1215 TLAN_DBG(TLAN_DEBUG_RX, "RECEIVE: %s EOC count = %d\n", dev->name,
1216 priv->rx_eoc_count);
1217 TLAN_DBG(TLAN_DEBUG_TX, "TRANSMIT: %s Busy count = %d\n", dev->name,
1218 priv->tx_busy_count);
1219 if (debug & TLAN_DEBUG_GNRL) {
1220 tlan_print_dio(dev->base_addr);
1221 tlan_phy_print(dev);
1222 }
1223 if (debug & TLAN_DEBUG_LIST) {
1224 for (i = 0; i < TLAN_NUM_RX_LISTS; i++)
1225 tlan_print_list(priv->rx_list + i, "RX", i);
1226 for (i = 0; i < TLAN_NUM_TX_LISTS; i++)
1227 tlan_print_list(priv->tx_list + i, "TX", i);
1228 }
1229
1230 return &dev->stats;
1231
1232}
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257static void tlan_set_multicast_list(struct net_device *dev)
1258{
1259 struct netdev_hw_addr *ha;
1260 u32 hash1 = 0;
1261 u32 hash2 = 0;
1262 int i;
1263 u32 offset;
1264 u8 tmp;
1265
1266 if (dev->flags & IFF_PROMISC) {
1267 tmp = tlan_dio_read8(dev->base_addr, TLAN_NET_CMD);
1268 tlan_dio_write8(dev->base_addr,
1269 TLAN_NET_CMD, tmp | TLAN_NET_CMD_CAF);
1270 } else {
1271 tmp = tlan_dio_read8(dev->base_addr, TLAN_NET_CMD);
1272 tlan_dio_write8(dev->base_addr,
1273 TLAN_NET_CMD, tmp & ~TLAN_NET_CMD_CAF);
1274 if (dev->flags & IFF_ALLMULTI) {
1275 for (i = 0; i < 3; i++)
1276 tlan_set_mac(dev, i + 1, NULL);
1277 tlan_dio_write32(dev->base_addr, TLAN_HASH_1,
1278 0xffffffff);
1279 tlan_dio_write32(dev->base_addr, TLAN_HASH_2,
1280 0xffffffff);
1281 } else {
1282 i = 0;
1283 netdev_for_each_mc_addr(ha, dev) {
1284 if (i < 3) {
1285 tlan_set_mac(dev, i + 1,
1286 (char *) &ha->addr);
1287 } else {
1288 offset =
1289 tlan_hash_func((u8 *)&ha->addr);
1290 if (offset < 32)
1291 hash1 |= (1 << offset);
1292 else
1293 hash2 |= (1 << (offset - 32));
1294 }
1295 i++;
1296 }
1297 for ( ; i < 3; i++)
1298 tlan_set_mac(dev, i + 1, NULL);
1299 tlan_dio_write32(dev->base_addr, TLAN_HASH_1, hash1);
1300 tlan_dio_write32(dev->base_addr, TLAN_HASH_2, hash2);
1301 }
1302 }
1303
1304}
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345static u32 tlan_handle_tx_eof(struct net_device *dev, u16 host_int)
1346{
1347 struct tlan_priv *priv = netdev_priv(dev);
1348 int eoc = 0;
1349 struct tlan_list *head_list;
1350 dma_addr_t head_list_phys;
1351 u32 ack = 0;
1352 u16 tmp_c_stat;
1353
1354 TLAN_DBG(TLAN_DEBUG_TX,
1355 "TRANSMIT: Handling TX EOF (Head=%d Tail=%d)\n",
1356 priv->tx_head, priv->tx_tail);
1357 head_list = priv->tx_list + priv->tx_head;
1358
1359 while (((tmp_c_stat = head_list->c_stat) & TLAN_CSTAT_FRM_CMP)
1360 && (ack < 255)) {
1361 struct sk_buff *skb = tlan_get_skb(head_list);
1362
1363 ack++;
1364 dma_unmap_single(&priv->pci_dev->dev,
1365 head_list->buffer[0].address,
1366 max(skb->len, (unsigned int)TLAN_MIN_FRAME_SIZE),
1367 DMA_TO_DEVICE);
1368 dev_kfree_skb_any(skb);
1369 head_list->buffer[8].address = 0;
1370 head_list->buffer[9].address = 0;
1371
1372 if (tmp_c_stat & TLAN_CSTAT_EOC)
1373 eoc = 1;
1374
1375 dev->stats.tx_bytes += head_list->frame_size;
1376
1377 head_list->c_stat = TLAN_CSTAT_UNUSED;
1378 netif_start_queue(dev);
1379 CIRC_INC(priv->tx_head, TLAN_NUM_TX_LISTS);
1380 head_list = priv->tx_list + priv->tx_head;
1381 }
1382
1383 if (!ack)
1384 netdev_info(dev,
1385 "Received interrupt for uncompleted TX frame\n");
1386
1387 if (eoc) {
1388 TLAN_DBG(TLAN_DEBUG_TX,
1389 "TRANSMIT: handling TX EOC (Head=%d Tail=%d)\n",
1390 priv->tx_head, priv->tx_tail);
1391 head_list = priv->tx_list + priv->tx_head;
1392 head_list_phys = priv->tx_list_dma
1393 + sizeof(struct tlan_list)*priv->tx_head;
1394 if ((head_list->c_stat & TLAN_CSTAT_READY)
1395 == TLAN_CSTAT_READY) {
1396 outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
1397 ack |= TLAN_HC_GO;
1398 } else {
1399 priv->tx_in_progress = 0;
1400 }
1401 }
1402
1403 if (priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED) {
1404 tlan_dio_write8(dev->base_addr,
1405 TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT);
1406 if (priv->timer.function == NULL) {
1407 priv->timer.function = tlan_timer;
1408 priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY;
1409 priv->timer_set_at = jiffies;
1410 priv->timer_type = TLAN_TIMER_ACTIVITY;
1411 add_timer(&priv->timer);
1412 } else if (priv->timer_type == TLAN_TIMER_ACTIVITY) {
1413 priv->timer_set_at = jiffies;
1414 }
1415 }
1416
1417 return ack;
1418
1419}
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441static u32 tlan_handle_stat_overflow(struct net_device *dev, u16 host_int)
1442{
1443 tlan_read_and_clear_stats(dev, TLAN_RECORD);
1444
1445 return 1;
1446
1447}
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477static u32 tlan_handle_rx_eof(struct net_device *dev, u16 host_int)
1478{
1479 struct tlan_priv *priv = netdev_priv(dev);
1480 u32 ack = 0;
1481 int eoc = 0;
1482 struct tlan_list *head_list;
1483 struct sk_buff *skb;
1484 struct tlan_list *tail_list;
1485 u16 tmp_c_stat;
1486 dma_addr_t head_list_phys;
1487
1488 TLAN_DBG(TLAN_DEBUG_RX, "RECEIVE: handling RX EOF (Head=%d Tail=%d)\n",
1489 priv->rx_head, priv->rx_tail);
1490 head_list = priv->rx_list + priv->rx_head;
1491 head_list_phys =
1492 priv->rx_list_dma + sizeof(struct tlan_list)*priv->rx_head;
1493
1494 while (((tmp_c_stat = head_list->c_stat) & TLAN_CSTAT_FRM_CMP)
1495 && (ack < 255)) {
1496 dma_addr_t frame_dma = head_list->buffer[0].address;
1497 u32 frame_size = head_list->frame_size;
1498 struct sk_buff *new_skb;
1499
1500 ack++;
1501 if (tmp_c_stat & TLAN_CSTAT_EOC)
1502 eoc = 1;
1503
1504 new_skb = netdev_alloc_skb_ip_align(dev,
1505 TLAN_MAX_FRAME_SIZE + 5);
1506 if (!new_skb)
1507 goto drop_and_reuse;
1508
1509 skb = tlan_get_skb(head_list);
1510 dma_unmap_single(&priv->pci_dev->dev, frame_dma,
1511 TLAN_MAX_FRAME_SIZE, DMA_FROM_DEVICE);
1512 skb_put(skb, frame_size);
1513
1514 dev->stats.rx_bytes += frame_size;
1515
1516 skb->protocol = eth_type_trans(skb, dev);
1517 netif_rx(skb);
1518
1519 head_list->buffer[0].address =
1520 dma_map_single(&priv->pci_dev->dev, new_skb->data,
1521 TLAN_MAX_FRAME_SIZE, DMA_FROM_DEVICE);
1522
1523 tlan_store_skb(head_list, new_skb);
1524drop_and_reuse:
1525 head_list->forward = 0;
1526 head_list->c_stat = 0;
1527 tail_list = priv->rx_list + priv->rx_tail;
1528 tail_list->forward = head_list_phys;
1529
1530 CIRC_INC(priv->rx_head, TLAN_NUM_RX_LISTS);
1531 CIRC_INC(priv->rx_tail, TLAN_NUM_RX_LISTS);
1532 head_list = priv->rx_list + priv->rx_head;
1533 head_list_phys = priv->rx_list_dma
1534 + sizeof(struct tlan_list)*priv->rx_head;
1535 }
1536
1537 if (!ack)
1538 netdev_info(dev,
1539 "Received interrupt for uncompleted RX frame\n");
1540
1541
1542 if (eoc) {
1543 TLAN_DBG(TLAN_DEBUG_RX,
1544 "RECEIVE: handling RX EOC (Head=%d Tail=%d)\n",
1545 priv->rx_head, priv->rx_tail);
1546 head_list = priv->rx_list + priv->rx_head;
1547 head_list_phys = priv->rx_list_dma
1548 + sizeof(struct tlan_list)*priv->rx_head;
1549 outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
1550 ack |= TLAN_HC_GO | TLAN_HC_RT;
1551 priv->rx_eoc_count++;
1552 }
1553
1554 if (priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED) {
1555 tlan_dio_write8(dev->base_addr,
1556 TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT);
1557 if (priv->timer.function == NULL) {
1558 priv->timer.function = tlan_timer;
1559 priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY;
1560 priv->timer_set_at = jiffies;
1561 priv->timer_type = TLAN_TIMER_ACTIVITY;
1562 add_timer(&priv->timer);
1563 } else if (priv->timer_type == TLAN_TIMER_ACTIVITY) {
1564 priv->timer_set_at = jiffies;
1565 }
1566 }
1567
1568 return ack;
1569
1570}
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592static u32 tlan_handle_dummy(struct net_device *dev, u16 host_int)
1593{
1594 netdev_info(dev, "Test interrupt\n");
1595 return 1;
1596
1597}
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622static u32 tlan_handle_tx_eoc(struct net_device *dev, u16 host_int)
1623{
1624 struct tlan_priv *priv = netdev_priv(dev);
1625 struct tlan_list *head_list;
1626 dma_addr_t head_list_phys;
1627 u32 ack = 1;
1628
1629 if (priv->tlan_rev < 0x30) {
1630 TLAN_DBG(TLAN_DEBUG_TX,
1631 "TRANSMIT: handling TX EOC (Head=%d Tail=%d) -- IRQ\n",
1632 priv->tx_head, priv->tx_tail);
1633 head_list = priv->tx_list + priv->tx_head;
1634 head_list_phys = priv->tx_list_dma
1635 + sizeof(struct tlan_list)*priv->tx_head;
1636 if ((head_list->c_stat & TLAN_CSTAT_READY)
1637 == TLAN_CSTAT_READY) {
1638 netif_stop_queue(dev);
1639 outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
1640 ack |= TLAN_HC_GO;
1641 } else {
1642 priv->tx_in_progress = 0;
1643 }
1644 }
1645
1646 return ack;
1647
1648}
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673static u32 tlan_handle_status_check(struct net_device *dev, u16 host_int)
1674{
1675 struct tlan_priv *priv = netdev_priv(dev);
1676 u32 ack;
1677 u32 error;
1678 u8 net_sts;
1679 u32 phy;
1680 u16 tlphy_ctl;
1681 u16 tlphy_sts;
1682
1683 ack = 1;
1684 if (host_int & TLAN_HI_IV_MASK) {
1685 netif_stop_queue(dev);
1686 error = inl(dev->base_addr + TLAN_CH_PARM);
1687 netdev_info(dev, "Adaptor Error = 0x%x\n", error);
1688 tlan_read_and_clear_stats(dev, TLAN_RECORD);
1689 outl(TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD);
1690
1691 schedule_work(&priv->tlan_tqueue);
1692
1693 netif_wake_queue(dev);
1694 ack = 0;
1695 } else {
1696 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Status Check\n", dev->name);
1697 phy = priv->phy[priv->phy_num];
1698
1699 net_sts = tlan_dio_read8(dev->base_addr, TLAN_NET_STS);
1700 if (net_sts) {
1701 tlan_dio_write8(dev->base_addr, TLAN_NET_STS, net_sts);
1702 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Net_Sts = %x\n",
1703 dev->name, (unsigned) net_sts);
1704 }
1705 if ((net_sts & TLAN_NET_STS_MIRQ) && (priv->phy_num == 0)) {
1706 tlan_mii_read_reg(dev, phy, TLAN_TLPHY_STS, &tlphy_sts);
1707 tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl);
1708 if (!(tlphy_sts & TLAN_TS_POLOK) &&
1709 !(tlphy_ctl & TLAN_TC_SWAPOL)) {
1710 tlphy_ctl |= TLAN_TC_SWAPOL;
1711 tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL,
1712 tlphy_ctl);
1713 } else if ((tlphy_sts & TLAN_TS_POLOK) &&
1714 (tlphy_ctl & TLAN_TC_SWAPOL)) {
1715 tlphy_ctl &= ~TLAN_TC_SWAPOL;
1716 tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL,
1717 tlphy_ctl);
1718 }
1719
1720 if (debug)
1721 tlan_phy_print(dev);
1722 }
1723 }
1724
1725 return ack;
1726
1727}
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752static u32 tlan_handle_rx_eoc(struct net_device *dev, u16 host_int)
1753{
1754 struct tlan_priv *priv = netdev_priv(dev);
1755 dma_addr_t head_list_phys;
1756 u32 ack = 1;
1757
1758 if (priv->tlan_rev < 0x30) {
1759 TLAN_DBG(TLAN_DEBUG_RX,
1760 "RECEIVE: Handling RX EOC (head=%d tail=%d) -- IRQ\n",
1761 priv->rx_head, priv->rx_tail);
1762 head_list_phys = priv->rx_list_dma
1763 + sizeof(struct tlan_list)*priv->rx_head;
1764 outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
1765 ack |= TLAN_HC_GO | TLAN_HC_RT;
1766 priv->rx_eoc_count++;
1767 }
1768
1769 return ack;
1770
1771}
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815static void tlan_timer(struct timer_list *t)
1816{
1817 struct tlan_priv *priv = from_timer(priv, t, timer);
1818 struct net_device *dev = priv->dev;
1819 u32 elapsed;
1820 unsigned long flags = 0;
1821
1822 priv->timer.function = NULL;
1823
1824 switch (priv->timer_type) {
1825 case TLAN_TIMER_PHY_PDOWN:
1826 tlan_phy_power_down(dev);
1827 break;
1828 case TLAN_TIMER_PHY_PUP:
1829 tlan_phy_power_up(dev);
1830 break;
1831 case TLAN_TIMER_PHY_RESET:
1832 tlan_phy_reset(dev);
1833 break;
1834 case TLAN_TIMER_PHY_START_LINK:
1835 tlan_phy_start_link(dev);
1836 break;
1837 case TLAN_TIMER_PHY_FINISH_AN:
1838 tlan_phy_finish_auto_neg(dev);
1839 break;
1840 case TLAN_TIMER_FINISH_RESET:
1841 tlan_finish_reset(dev);
1842 break;
1843 case TLAN_TIMER_ACTIVITY:
1844 spin_lock_irqsave(&priv->lock, flags);
1845 if (priv->timer.function == NULL) {
1846 elapsed = jiffies - priv->timer_set_at;
1847 if (elapsed >= TLAN_TIMER_ACT_DELAY) {
1848 tlan_dio_write8(dev->base_addr,
1849 TLAN_LED_REG, TLAN_LED_LINK);
1850 } else {
1851 priv->timer.expires = priv->timer_set_at
1852 + TLAN_TIMER_ACT_DELAY;
1853 spin_unlock_irqrestore(&priv->lock, flags);
1854 add_timer(&priv->timer);
1855 break;
1856 }
1857 }
1858 spin_unlock_irqrestore(&priv->lock, flags);
1859 break;
1860 default:
1861 break;
1862 }
1863
1864}
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890static void tlan_reset_lists(struct net_device *dev)
1891{
1892 struct tlan_priv *priv = netdev_priv(dev);
1893 int i;
1894 struct tlan_list *list;
1895 dma_addr_t list_phys;
1896 struct sk_buff *skb;
1897
1898 priv->tx_head = 0;
1899 priv->tx_tail = 0;
1900 for (i = 0; i < TLAN_NUM_TX_LISTS; i++) {
1901 list = priv->tx_list + i;
1902 list->c_stat = TLAN_CSTAT_UNUSED;
1903 list->buffer[0].address = 0;
1904 list->buffer[2].count = 0;
1905 list->buffer[2].address = 0;
1906 list->buffer[8].address = 0;
1907 list->buffer[9].address = 0;
1908 }
1909
1910 priv->rx_head = 0;
1911 priv->rx_tail = TLAN_NUM_RX_LISTS - 1;
1912 for (i = 0; i < TLAN_NUM_RX_LISTS; i++) {
1913 list = priv->rx_list + i;
1914 list_phys = priv->rx_list_dma + sizeof(struct tlan_list)*i;
1915 list->c_stat = TLAN_CSTAT_READY;
1916 list->frame_size = TLAN_MAX_FRAME_SIZE;
1917 list->buffer[0].count = TLAN_MAX_FRAME_SIZE | TLAN_LAST_BUFFER;
1918 skb = netdev_alloc_skb_ip_align(dev, TLAN_MAX_FRAME_SIZE + 5);
1919 if (!skb)
1920 break;
1921
1922 list->buffer[0].address = dma_map_single(&priv->pci_dev->dev,
1923 skb->data,
1924 TLAN_MAX_FRAME_SIZE,
1925 DMA_FROM_DEVICE);
1926 tlan_store_skb(list, skb);
1927 list->buffer[1].count = 0;
1928 list->buffer[1].address = 0;
1929 list->forward = list_phys + sizeof(struct tlan_list);
1930 }
1931
1932
1933 while (i < TLAN_NUM_RX_LISTS) {
1934 tlan_store_skb(priv->rx_list + i, NULL);
1935 ++i;
1936 }
1937 list->forward = 0;
1938
1939}
1940
1941
1942static void tlan_free_lists(struct net_device *dev)
1943{
1944 struct tlan_priv *priv = netdev_priv(dev);
1945 int i;
1946 struct tlan_list *list;
1947 struct sk_buff *skb;
1948
1949 for (i = 0; i < TLAN_NUM_TX_LISTS; i++) {
1950 list = priv->tx_list + i;
1951 skb = tlan_get_skb(list);
1952 if (skb) {
1953 dma_unmap_single(&priv->pci_dev->dev,
1954 list->buffer[0].address,
1955 max(skb->len, (unsigned int)TLAN_MIN_FRAME_SIZE),
1956 DMA_TO_DEVICE);
1957 dev_kfree_skb_any(skb);
1958 list->buffer[8].address = 0;
1959 list->buffer[9].address = 0;
1960 }
1961 }
1962
1963 for (i = 0; i < TLAN_NUM_RX_LISTS; i++) {
1964 list = priv->rx_list + i;
1965 skb = tlan_get_skb(list);
1966 if (skb) {
1967 dma_unmap_single(&priv->pci_dev->dev,
1968 list->buffer[0].address,
1969 TLAN_MAX_FRAME_SIZE, DMA_FROM_DEVICE);
1970 dev_kfree_skb_any(skb);
1971 list->buffer[8].address = 0;
1972 list->buffer[9].address = 0;
1973 }
1974 }
1975}
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994static void tlan_print_dio(u16 io_base)
1995{
1996 u32 data0, data1;
1997 int i;
1998
1999 pr_info("Contents of internal registers for io base 0x%04hx\n",
2000 io_base);
2001 pr_info("Off. +0 +4\n");
2002 for (i = 0; i < 0x4C; i += 8) {
2003 data0 = tlan_dio_read32(io_base, i);
2004 data1 = tlan_dio_read32(io_base, i + 0x4);
2005 pr_info("0x%02x 0x%08x 0x%08x\n", i, data0, data1);
2006 }
2007
2008}
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030static void tlan_print_list(struct tlan_list *list, char *type, int num)
2031{
2032 int i;
2033
2034 pr_info("%s List %d at %p\n", type, num, list);
2035 pr_info(" Forward = 0x%08x\n", list->forward);
2036 pr_info(" CSTAT = 0x%04hx\n", list->c_stat);
2037 pr_info(" Frame Size = 0x%04hx\n", list->frame_size);
2038
2039 for (i = 0; i < 2; i++) {
2040 pr_info(" Buffer[%d].count, addr = 0x%08x, 0x%08x\n",
2041 i, list->buffer[i].count, list->buffer[i].address);
2042 }
2043
2044}
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067static void tlan_read_and_clear_stats(struct net_device *dev, int record)
2068{
2069 u32 tx_good, tx_under;
2070 u32 rx_good, rx_over;
2071 u32 def_tx, crc, code;
2072 u32 multi_col, single_col;
2073 u32 excess_col, late_col, loss;
2074
2075 outw(TLAN_GOOD_TX_FRMS, dev->base_addr + TLAN_DIO_ADR);
2076 tx_good = inb(dev->base_addr + TLAN_DIO_DATA);
2077 tx_good += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
2078 tx_good += inb(dev->base_addr + TLAN_DIO_DATA + 2) << 16;
2079 tx_under = inb(dev->base_addr + TLAN_DIO_DATA + 3);
2080
2081 outw(TLAN_GOOD_RX_FRMS, dev->base_addr + TLAN_DIO_ADR);
2082 rx_good = inb(dev->base_addr + TLAN_DIO_DATA);
2083 rx_good += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
2084 rx_good += inb(dev->base_addr + TLAN_DIO_DATA + 2) << 16;
2085 rx_over = inb(dev->base_addr + TLAN_DIO_DATA + 3);
2086
2087 outw(TLAN_DEFERRED_TX, dev->base_addr + TLAN_DIO_ADR);
2088 def_tx = inb(dev->base_addr + TLAN_DIO_DATA);
2089 def_tx += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
2090 crc = inb(dev->base_addr + TLAN_DIO_DATA + 2);
2091 code = inb(dev->base_addr + TLAN_DIO_DATA + 3);
2092
2093 outw(TLAN_MULTICOL_FRMS, dev->base_addr + TLAN_DIO_ADR);
2094 multi_col = inb(dev->base_addr + TLAN_DIO_DATA);
2095 multi_col += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
2096 single_col = inb(dev->base_addr + TLAN_DIO_DATA + 2);
2097 single_col += inb(dev->base_addr + TLAN_DIO_DATA + 3) << 8;
2098
2099 outw(TLAN_EXCESSCOL_FRMS, dev->base_addr + TLAN_DIO_ADR);
2100 excess_col = inb(dev->base_addr + TLAN_DIO_DATA);
2101 late_col = inb(dev->base_addr + TLAN_DIO_DATA + 1);
2102 loss = inb(dev->base_addr + TLAN_DIO_DATA + 2);
2103
2104 if (record) {
2105 dev->stats.rx_packets += rx_good;
2106 dev->stats.rx_errors += rx_over + crc + code;
2107 dev->stats.tx_packets += tx_good;
2108 dev->stats.tx_errors += tx_under + loss;
2109 dev->stats.collisions += multi_col
2110 + single_col + excess_col + late_col;
2111
2112 dev->stats.rx_over_errors += rx_over;
2113 dev->stats.rx_crc_errors += crc;
2114 dev->stats.rx_frame_errors += code;
2115
2116 dev->stats.tx_aborted_errors += tx_under;
2117 dev->stats.tx_carrier_errors += loss;
2118 }
2119
2120}
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142static void
2143tlan_reset_adapter(struct net_device *dev)
2144{
2145 struct tlan_priv *priv = netdev_priv(dev);
2146 int i;
2147 u32 addr;
2148 u32 data;
2149 u8 data8;
2150
2151 priv->tlan_full_duplex = false;
2152 priv->phy_online = 0;
2153 netif_carrier_off(dev);
2154
2155
2156
2157 data = inl(dev->base_addr + TLAN_HOST_CMD);
2158 data |= TLAN_HC_AD_RST;
2159 outl(data, dev->base_addr + TLAN_HOST_CMD);
2160
2161 udelay(1000);
2162
2163
2164
2165 data = inl(dev->base_addr + TLAN_HOST_CMD);
2166 data |= TLAN_HC_INT_OFF;
2167 outl(data, dev->base_addr + TLAN_HOST_CMD);
2168
2169
2170
2171 for (i = TLAN_AREG_0; i <= TLAN_HASH_2; i += 4)
2172 tlan_dio_write32(dev->base_addr, (u16) i, 0);
2173
2174
2175
2176 data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | TLAN_NET_CFG_PHY_EN;
2177 tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, (u16) data);
2178
2179
2180
2181 outl(TLAN_HC_LD_TMR | 0x3f, dev->base_addr + TLAN_HOST_CMD);
2182 outl(TLAN_HC_LD_THR | 0x9, dev->base_addr + TLAN_HOST_CMD);
2183
2184
2185
2186 outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR);
2187 addr = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
2188 tlan_set_bit(TLAN_NET_SIO_NMRST, addr);
2189
2190
2191
2192 if (priv->tlan_rev >= 0x30) {
2193 data8 = TLAN_ID_TX_EOC | TLAN_ID_RX_EOC;
2194 tlan_dio_write8(dev->base_addr, TLAN_INT_DIS, data8);
2195 }
2196 tlan_phy_detect(dev);
2197 data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN;
2198
2199 if (priv->adapter->flags & TLAN_ADAPTER_BIT_RATE_PHY) {
2200 data |= TLAN_NET_CFG_BIT;
2201 if (priv->aui == 1) {
2202 tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x0a);
2203 } else if (priv->duplex == TLAN_DUPLEX_FULL) {
2204 tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x00);
2205 priv->tlan_full_duplex = true;
2206 } else {
2207 tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x08);
2208 }
2209 }
2210
2211
2212 if (priv->phy_num == 0 ||
2213 (priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10))
2214 data |= TLAN_NET_CFG_PHY_EN;
2215 tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, (u16) data);
2216
2217 if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY)
2218 tlan_finish_reset(dev);
2219 else
2220 tlan_phy_power_down(dev);
2221
2222}
2223
2224
2225
2226
2227static void
2228tlan_finish_reset(struct net_device *dev)
2229{
2230 struct tlan_priv *priv = netdev_priv(dev);
2231 u8 data;
2232 u32 phy;
2233 u8 sio;
2234 u16 status;
2235 u16 partner;
2236 u16 tlphy_ctl;
2237 u16 tlphy_par;
2238 u16 tlphy_id1, tlphy_id2;
2239 int i;
2240
2241 phy = priv->phy[priv->phy_num];
2242
2243 data = TLAN_NET_CMD_NRESET | TLAN_NET_CMD_NWRAP;
2244 if (priv->tlan_full_duplex)
2245 data |= TLAN_NET_CMD_DUPLEX;
2246 tlan_dio_write8(dev->base_addr, TLAN_NET_CMD, data);
2247 data = TLAN_NET_MASK_MASK4 | TLAN_NET_MASK_MASK5;
2248 if (priv->phy_num == 0)
2249 data |= TLAN_NET_MASK_MASK7;
2250 tlan_dio_write8(dev->base_addr, TLAN_NET_MASK, data);
2251 tlan_dio_write16(dev->base_addr, TLAN_MAX_RX, ((1536)+7)&~7);
2252 tlan_mii_read_reg(dev, phy, MII_GEN_ID_HI, &tlphy_id1);
2253 tlan_mii_read_reg(dev, phy, MII_GEN_ID_LO, &tlphy_id2);
2254
2255 if ((priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) ||
2256 (priv->aui)) {
2257 status = MII_GS_LINK;
2258 netdev_info(dev, "Link forced\n");
2259 } else {
2260 tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
2261 udelay(1000);
2262 tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
2263 if (status & MII_GS_LINK) {
2264
2265 if ((tlphy_id1 == NAT_SEM_ID1) &&
2266 (tlphy_id2 == NAT_SEM_ID2)) {
2267 tlan_mii_read_reg(dev, phy, MII_AN_LPA,
2268 &partner);
2269 tlan_mii_read_reg(dev, phy, TLAN_TLPHY_PAR,
2270 &tlphy_par);
2271
2272 netdev_info(dev,
2273 "Link active, %s %uMbps %s-Duplex\n",
2274 !(tlphy_par & TLAN_PHY_AN_EN_STAT)
2275 ? "forced" : "Autonegotiation enabled,",
2276 tlphy_par & TLAN_PHY_SPEED_100
2277 ? 100 : 10,
2278 tlphy_par & TLAN_PHY_DUPLEX_FULL
2279 ? "Full" : "Half");
2280
2281 if (tlphy_par & TLAN_PHY_AN_EN_STAT) {
2282 netdev_info(dev, "Partner capability:");
2283 for (i = 5; i < 10; i++)
2284 if (partner & (1 << i))
2285 pr_cont(" %s",
2286 media[i-5]);
2287 pr_cont("\n");
2288 }
2289 } else
2290 netdev_info(dev, "Link active\n");
2291
2292 priv->media_timer.expires = jiffies + HZ;
2293 add_timer(&priv->media_timer);
2294 }
2295 }
2296
2297 if (priv->phy_num == 0) {
2298 tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl);
2299 tlphy_ctl |= TLAN_TC_INTEN;
2300 tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL, tlphy_ctl);
2301 sio = tlan_dio_read8(dev->base_addr, TLAN_NET_SIO);
2302 sio |= TLAN_NET_SIO_MINTEN;
2303 tlan_dio_write8(dev->base_addr, TLAN_NET_SIO, sio);
2304 }
2305
2306 if (status & MII_GS_LINK) {
2307 tlan_set_mac(dev, 0, dev->dev_addr);
2308 priv->phy_online = 1;
2309 outb((TLAN_HC_INT_ON >> 8), dev->base_addr + TLAN_HOST_CMD + 1);
2310 if (debug >= 1 && debug != TLAN_DEBUG_PROBE)
2311 outb((TLAN_HC_REQ_INT >> 8),
2312 dev->base_addr + TLAN_HOST_CMD + 1);
2313 outl(priv->rx_list_dma, dev->base_addr + TLAN_CH_PARM);
2314 outl(TLAN_HC_GO | TLAN_HC_RT, dev->base_addr + TLAN_HOST_CMD);
2315 tlan_dio_write8(dev->base_addr, TLAN_LED_REG, TLAN_LED_LINK);
2316 netif_carrier_on(dev);
2317 } else {
2318 netdev_info(dev, "Link inactive, will retry in 10 secs...\n");
2319 tlan_set_timer(dev, (10*HZ), TLAN_TIMER_FINISH_RESET);
2320 return;
2321 }
2322 tlan_set_multicast_list(dev);
2323
2324}
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350static void tlan_set_mac(struct net_device *dev, int areg, char *mac)
2351{
2352 int i;
2353
2354 areg *= 6;
2355
2356 if (mac != NULL) {
2357 for (i = 0; i < 6; i++)
2358 tlan_dio_write8(dev->base_addr,
2359 TLAN_AREG_0 + areg + i, mac[i]);
2360 } else {
2361 for (i = 0; i < 6; i++)
2362 tlan_dio_write8(dev->base_addr,
2363 TLAN_AREG_0 + areg + i, 0);
2364 }
2365
2366}
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394static void tlan_phy_print(struct net_device *dev)
2395{
2396 struct tlan_priv *priv = netdev_priv(dev);
2397 u16 i, data0, data1, data2, data3, phy;
2398
2399 phy = priv->phy[priv->phy_num];
2400
2401 if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) {
2402 netdev_info(dev, "Unmanaged PHY\n");
2403 } else if (phy <= TLAN_PHY_MAX_ADDR) {
2404 netdev_info(dev, "PHY 0x%02x\n", phy);
2405 pr_info(" Off. +0 +1 +2 +3\n");
2406 for (i = 0; i < 0x20; i += 4) {
2407 tlan_mii_read_reg(dev, phy, i, &data0);
2408 tlan_mii_read_reg(dev, phy, i + 1, &data1);
2409 tlan_mii_read_reg(dev, phy, i + 2, &data2);
2410 tlan_mii_read_reg(dev, phy, i + 3, &data3);
2411 pr_info(" 0x%02x 0x%04hx 0x%04hx 0x%04hx 0x%04hx\n",
2412 i, data0, data1, data2, data3);
2413 }
2414 } else {
2415 netdev_info(dev, "Invalid PHY\n");
2416 }
2417
2418}
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440static void tlan_phy_detect(struct net_device *dev)
2441{
2442 struct tlan_priv *priv = netdev_priv(dev);
2443 u16 control;
2444 u16 hi;
2445 u16 lo;
2446 u32 phy;
2447
2448 if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) {
2449 priv->phy_num = 0xffff;
2450 return;
2451 }
2452
2453 tlan_mii_read_reg(dev, TLAN_PHY_MAX_ADDR, MII_GEN_ID_HI, &hi);
2454
2455 if (hi != 0xffff)
2456 priv->phy[0] = TLAN_PHY_MAX_ADDR;
2457 else
2458 priv->phy[0] = TLAN_PHY_NONE;
2459
2460 priv->phy[1] = TLAN_PHY_NONE;
2461 for (phy = 0; phy <= TLAN_PHY_MAX_ADDR; phy++) {
2462 tlan_mii_read_reg(dev, phy, MII_GEN_CTL, &control);
2463 tlan_mii_read_reg(dev, phy, MII_GEN_ID_HI, &hi);
2464 tlan_mii_read_reg(dev, phy, MII_GEN_ID_LO, &lo);
2465 if ((control != 0xffff) ||
2466 (hi != 0xffff) || (lo != 0xffff)) {
2467 TLAN_DBG(TLAN_DEBUG_GNRL,
2468 "PHY found at %02x %04x %04x %04x\n",
2469 phy, control, hi, lo);
2470 if ((priv->phy[1] == TLAN_PHY_NONE) &&
2471 (phy != TLAN_PHY_MAX_ADDR)) {
2472 priv->phy[1] = phy;
2473 }
2474 }
2475 }
2476
2477 if (priv->phy[1] != TLAN_PHY_NONE)
2478 priv->phy_num = 1;
2479 else if (priv->phy[0] != TLAN_PHY_NONE)
2480 priv->phy_num = 0;
2481 else
2482 netdev_info(dev, "Cannot initialize device, no PHY was found!\n");
2483
2484}
2485
2486
2487
2488
2489static void tlan_phy_power_down(struct net_device *dev)
2490{
2491 struct tlan_priv *priv = netdev_priv(dev);
2492 u16 value;
2493
2494 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Powering down PHY(s).\n", dev->name);
2495 value = MII_GC_PDOWN | MII_GC_LOOPBK | MII_GC_ISOLATE;
2496 tlan_mii_sync(dev->base_addr);
2497 tlan_mii_write_reg(dev, priv->phy[priv->phy_num], MII_GEN_CTL, value);
2498 if ((priv->phy_num == 0) && (priv->phy[1] != TLAN_PHY_NONE)) {
2499
2500 if (priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10)
2501 value = MII_GC_ISOLATE;
2502 tlan_mii_sync(dev->base_addr);
2503 tlan_mii_write_reg(dev, priv->phy[1], MII_GEN_CTL, value);
2504 }
2505
2506
2507
2508
2509
2510 tlan_set_timer(dev, msecs_to_jiffies(50), TLAN_TIMER_PHY_PUP);
2511
2512}
2513
2514
2515
2516
2517static void tlan_phy_power_up(struct net_device *dev)
2518{
2519 struct tlan_priv *priv = netdev_priv(dev);
2520 u16 value;
2521
2522 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Powering up PHY.\n", dev->name);
2523 tlan_mii_sync(dev->base_addr);
2524 value = MII_GC_LOOPBK;
2525 tlan_mii_write_reg(dev, priv->phy[priv->phy_num], MII_GEN_CTL, value);
2526 tlan_mii_sync(dev->base_addr);
2527
2528
2529
2530
2531 tlan_set_timer(dev, msecs_to_jiffies(500), TLAN_TIMER_PHY_RESET);
2532
2533}
2534
2535
2536
2537
2538static void tlan_phy_reset(struct net_device *dev)
2539{
2540 struct tlan_priv *priv = netdev_priv(dev);
2541 u16 phy;
2542 u16 value;
2543 unsigned long timeout = jiffies + HZ;
2544
2545 phy = priv->phy[priv->phy_num];
2546
2547 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Resetting PHY.\n", dev->name);
2548 tlan_mii_sync(dev->base_addr);
2549 value = MII_GC_LOOPBK | MII_GC_RESET;
2550 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, value);
2551 do {
2552 tlan_mii_read_reg(dev, phy, MII_GEN_CTL, &value);
2553 if (time_after(jiffies, timeout)) {
2554 netdev_err(dev, "PHY reset timeout\n");
2555 return;
2556 }
2557 } while (value & MII_GC_RESET);
2558
2559
2560
2561
2562
2563 tlan_set_timer(dev, msecs_to_jiffies(50), TLAN_TIMER_PHY_START_LINK);
2564
2565}
2566
2567
2568
2569
2570static void tlan_phy_start_link(struct net_device *dev)
2571{
2572 struct tlan_priv *priv = netdev_priv(dev);
2573 u16 ability;
2574 u16 control;
2575 u16 data;
2576 u16 phy;
2577 u16 status;
2578 u16 tctl;
2579
2580 phy = priv->phy[priv->phy_num];
2581 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Trying to activate link.\n", dev->name);
2582 tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
2583 tlan_mii_read_reg(dev, phy, MII_GEN_STS, &ability);
2584
2585 if ((status & MII_GS_AUTONEG) &&
2586 (!priv->aui)) {
2587 ability = status >> 11;
2588 if (priv->speed == TLAN_SPEED_10 &&
2589 priv->duplex == TLAN_DUPLEX_HALF) {
2590 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x0000);
2591 } else if (priv->speed == TLAN_SPEED_10 &&
2592 priv->duplex == TLAN_DUPLEX_FULL) {
2593 priv->tlan_full_duplex = true;
2594 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x0100);
2595 } else if (priv->speed == TLAN_SPEED_100 &&
2596 priv->duplex == TLAN_DUPLEX_HALF) {
2597 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x2000);
2598 } else if (priv->speed == TLAN_SPEED_100 &&
2599 priv->duplex == TLAN_DUPLEX_FULL) {
2600 priv->tlan_full_duplex = true;
2601 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x2100);
2602 } else {
2603
2604
2605 tlan_mii_write_reg(dev, phy, MII_AN_ADV,
2606 (ability << 5) | 1);
2607
2608 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x1000);
2609
2610 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x1200);
2611
2612
2613
2614
2615
2616 netdev_info(dev, "Starting autonegotiation\n");
2617 tlan_set_timer(dev, (2*HZ), TLAN_TIMER_PHY_FINISH_AN);
2618 return;
2619 }
2620
2621 }
2622
2623 if ((priv->aui) && (priv->phy_num != 0)) {
2624 priv->phy_num = 0;
2625 data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN
2626 | TLAN_NET_CFG_PHY_EN;
2627 tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, data);
2628 tlan_set_timer(dev, msecs_to_jiffies(40), TLAN_TIMER_PHY_PDOWN);
2629 return;
2630 } else if (priv->phy_num == 0) {
2631 control = 0;
2632 tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tctl);
2633 if (priv->aui) {
2634 tctl |= TLAN_TC_AUISEL;
2635 } else {
2636 tctl &= ~TLAN_TC_AUISEL;
2637 if (priv->duplex == TLAN_DUPLEX_FULL) {
2638 control |= MII_GC_DUPLEX;
2639 priv->tlan_full_duplex = true;
2640 }
2641 if (priv->speed == TLAN_SPEED_100)
2642 control |= MII_GC_SPEEDSEL;
2643 }
2644 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, control);
2645 tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL, tctl);
2646 }
2647
2648
2649
2650
2651 tlan_set_timer(dev, (4*HZ), TLAN_TIMER_FINISH_RESET);
2652
2653}
2654
2655
2656
2657
2658static void tlan_phy_finish_auto_neg(struct net_device *dev)
2659{
2660 struct tlan_priv *priv = netdev_priv(dev);
2661 u16 an_adv;
2662 u16 an_lpa;
2663 u16 mode;
2664 u16 phy;
2665 u16 status;
2666
2667 phy = priv->phy[priv->phy_num];
2668
2669 tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
2670 udelay(1000);
2671 tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
2672
2673 if (!(status & MII_GS_AUTOCMPLT)) {
2674
2675
2676
2677 tlan_set_timer(dev, 2 * HZ, TLAN_TIMER_PHY_FINISH_AN);
2678 return;
2679 }
2680
2681 netdev_info(dev, "Autonegotiation complete\n");
2682 tlan_mii_read_reg(dev, phy, MII_AN_ADV, &an_adv);
2683 tlan_mii_read_reg(dev, phy, MII_AN_LPA, &an_lpa);
2684 mode = an_adv & an_lpa & 0x03E0;
2685 if (mode & 0x0100)
2686 priv->tlan_full_duplex = true;
2687 else if (!(mode & 0x0080) && (mode & 0x0040))
2688 priv->tlan_full_duplex = true;
2689
2690
2691 if ((!(mode & 0x0180)) &&
2692 (priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10) &&
2693 (priv->phy_num != 0)) {
2694 priv->phy_num = 0;
2695 tlan_set_timer(dev, msecs_to_jiffies(400), TLAN_TIMER_PHY_PDOWN);
2696 return;
2697 }
2698
2699 if (priv->phy_num == 0) {
2700 if ((priv->duplex == TLAN_DUPLEX_FULL) ||
2701 (an_adv & an_lpa & 0x0040)) {
2702 tlan_mii_write_reg(dev, phy, MII_GEN_CTL,
2703 MII_GC_AUTOENB | MII_GC_DUPLEX);
2704 netdev_info(dev, "Starting internal PHY with FULL-DUPLEX\n");
2705 } else {
2706 tlan_mii_write_reg(dev, phy, MII_GEN_CTL,
2707 MII_GC_AUTOENB);
2708 netdev_info(dev, "Starting internal PHY with HALF-DUPLEX\n");
2709 }
2710 }
2711
2712
2713
2714 tlan_set_timer(dev, msecs_to_jiffies(100), TLAN_TIMER_FINISH_RESET);
2715
2716}
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736static void tlan_phy_monitor(struct timer_list *t)
2737{
2738 struct tlan_priv *priv = from_timer(priv, t, media_timer);
2739 struct net_device *dev = priv->dev;
2740 u16 phy;
2741 u16 phy_status;
2742
2743 phy = priv->phy[priv->phy_num];
2744
2745
2746 tlan_mii_read_reg(dev, phy, MII_GEN_STS, &phy_status);
2747
2748
2749 if (!(phy_status & MII_GS_LINK)) {
2750 if (netif_carrier_ok(dev)) {
2751 printk(KERN_DEBUG "TLAN: %s has lost link\n",
2752 dev->name);
2753 tlan_dio_write8(dev->base_addr, TLAN_LED_REG, 0);
2754 netif_carrier_off(dev);
2755 if (priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10) {
2756
2757 u16 data = MII_GC_PDOWN | MII_GC_LOOPBK |
2758 MII_GC_ISOLATE;
2759
2760 tlan_mii_sync(dev->base_addr);
2761 tlan_mii_write_reg(dev, priv->phy[0],
2762 MII_GEN_CTL, data);
2763
2764 priv->phy_num = 1;
2765
2766 tlan_set_timer(dev, msecs_to_jiffies(400),
2767 TLAN_TIMER_PHY_PDOWN);
2768 return;
2769 }
2770 }
2771 }
2772
2773
2774 if ((phy_status & MII_GS_LINK) && !netif_carrier_ok(dev)) {
2775 tlan_dio_write8(dev->base_addr, TLAN_LED_REG, TLAN_LED_LINK);
2776 printk(KERN_DEBUG "TLAN: %s has reestablished link\n",
2777 dev->name);
2778 netif_carrier_on(dev);
2779 }
2780 priv->media_timer.expires = jiffies + HZ;
2781 add_timer(&priv->media_timer);
2782}
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821static bool
2822tlan_mii_read_reg(struct net_device *dev, u16 phy, u16 reg, u16 *val)
2823{
2824 u8 nack;
2825 u16 sio, tmp;
2826 u32 i;
2827 bool err;
2828 int minten;
2829 struct tlan_priv *priv = netdev_priv(dev);
2830 unsigned long flags = 0;
2831
2832 err = false;
2833 outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR);
2834 sio = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
2835
2836 if (!in_irq())
2837 spin_lock_irqsave(&priv->lock, flags);
2838
2839 tlan_mii_sync(dev->base_addr);
2840
2841 minten = tlan_get_bit(TLAN_NET_SIO_MINTEN, sio);
2842 if (minten)
2843 tlan_clear_bit(TLAN_NET_SIO_MINTEN, sio);
2844
2845 tlan_mii_send_data(dev->base_addr, 0x1, 2);
2846 tlan_mii_send_data(dev->base_addr, 0x2, 2);
2847 tlan_mii_send_data(dev->base_addr, phy, 5);
2848 tlan_mii_send_data(dev->base_addr, reg, 5);
2849
2850
2851 tlan_clear_bit(TLAN_NET_SIO_MTXEN, sio);
2852
2853 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
2854 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
2855 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
2856
2857 nack = tlan_get_bit(TLAN_NET_SIO_MDATA, sio);
2858 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
2859 if (nack) {
2860 for (i = 0; i < 16; i++) {
2861 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
2862 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
2863 }
2864 tmp = 0xffff;
2865 err = true;
2866 } else {
2867 for (tmp = 0, i = 0x8000; i; i >>= 1) {
2868 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
2869 if (tlan_get_bit(TLAN_NET_SIO_MDATA, sio))
2870 tmp |= i;
2871 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
2872 }
2873 }
2874
2875
2876 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
2877 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
2878
2879 if (minten)
2880 tlan_set_bit(TLAN_NET_SIO_MINTEN, sio);
2881
2882 *val = tmp;
2883
2884 if (!in_irq())
2885 spin_unlock_irqrestore(&priv->lock, flags);
2886
2887 return err;
2888
2889}
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912static void tlan_mii_send_data(u16 base_port, u32 data, unsigned num_bits)
2913{
2914 u16 sio;
2915 u32 i;
2916
2917 if (num_bits == 0)
2918 return;
2919
2920 outw(TLAN_NET_SIO, base_port + TLAN_DIO_ADR);
2921 sio = base_port + TLAN_DIO_DATA + TLAN_NET_SIO;
2922 tlan_set_bit(TLAN_NET_SIO_MTXEN, sio);
2923
2924 for (i = (0x1 << (num_bits - 1)); i; i >>= 1) {
2925 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
2926 (void) tlan_get_bit(TLAN_NET_SIO_MCLK, sio);
2927 if (data & i)
2928 tlan_set_bit(TLAN_NET_SIO_MDATA, sio);
2929 else
2930 tlan_clear_bit(TLAN_NET_SIO_MDATA, sio);
2931 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
2932 (void) tlan_get_bit(TLAN_NET_SIO_MCLK, sio);
2933 }
2934
2935}
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954static void tlan_mii_sync(u16 base_port)
2955{
2956 int i;
2957 u16 sio;
2958
2959 outw(TLAN_NET_SIO, base_port + TLAN_DIO_ADR);
2960 sio = base_port + TLAN_DIO_DATA + TLAN_NET_SIO;
2961
2962 tlan_clear_bit(TLAN_NET_SIO_MTXEN, sio);
2963 for (i = 0; i < 32; i++) {
2964 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
2965 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
2966 }
2967
2968}
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993static void
2994tlan_mii_write_reg(struct net_device *dev, u16 phy, u16 reg, u16 val)
2995{
2996 u16 sio;
2997 int minten;
2998 unsigned long flags = 0;
2999 struct tlan_priv *priv = netdev_priv(dev);
3000
3001 outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR);
3002 sio = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
3003
3004 if (!in_irq())
3005 spin_lock_irqsave(&priv->lock, flags);
3006
3007 tlan_mii_sync(dev->base_addr);
3008
3009 minten = tlan_get_bit(TLAN_NET_SIO_MINTEN, sio);
3010 if (minten)
3011 tlan_clear_bit(TLAN_NET_SIO_MINTEN, sio);
3012
3013 tlan_mii_send_data(dev->base_addr, 0x1, 2);
3014 tlan_mii_send_data(dev->base_addr, 0x1, 2);
3015 tlan_mii_send_data(dev->base_addr, phy, 5);
3016 tlan_mii_send_data(dev->base_addr, reg, 5);
3017
3018 tlan_mii_send_data(dev->base_addr, 0x2, 2);
3019 tlan_mii_send_data(dev->base_addr, val, 16);
3020
3021 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
3022 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
3023
3024 if (minten)
3025 tlan_set_bit(TLAN_NET_SIO_MINTEN, sio);
3026
3027 if (!in_irq())
3028 spin_unlock_irqrestore(&priv->lock, flags);
3029
3030}
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041
3042
3043
3044
3045
3046
3047
3048
3049
3050
3051
3052
3053
3054
3055
3056
3057
3058
3059
3060
3061
3062
3063
3064static void tlan_ee_send_start(u16 io_base)
3065{
3066 u16 sio;
3067
3068 outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR);
3069 sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
3070
3071 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3072 tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
3073 tlan_set_bit(TLAN_NET_SIO_ETXEN, sio);
3074 tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
3075 tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
3076
3077}
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090
3091
3092
3093
3094
3095
3096
3097
3098
3099
3100
3101
3102
3103
3104static int tlan_ee_send_byte(u16 io_base, u8 data, int stop)
3105{
3106 int err;
3107 u8 place;
3108 u16 sio;
3109
3110 outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR);
3111 sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
3112
3113
3114 for (place = 0x80; place != 0; place >>= 1) {
3115 if (place & data)
3116 tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
3117 else
3118 tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
3119 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3120 tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
3121 }
3122 tlan_clear_bit(TLAN_NET_SIO_ETXEN, sio);
3123 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3124 err = tlan_get_bit(TLAN_NET_SIO_EDATA, sio);
3125 tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
3126 tlan_set_bit(TLAN_NET_SIO_ETXEN, sio);
3127
3128 if ((!err) && stop) {
3129
3130 tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
3131 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3132 tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
3133 }
3134
3135 return err;
3136
3137}
3138
3139
3140
3141
3142
3143
3144
3145
3146
3147
3148
3149
3150
3151
3152
3153
3154
3155
3156
3157
3158
3159
3160
3161
3162
3163
3164
3165
3166static void tlan_ee_receive_byte(u16 io_base, u8 *data, int stop)
3167{
3168 u8 place;
3169 u16 sio;
3170
3171 outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR);
3172 sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
3173 *data = 0;
3174
3175
3176 tlan_clear_bit(TLAN_NET_SIO_ETXEN, sio);
3177 for (place = 0x80; place; place >>= 1) {
3178 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3179 if (tlan_get_bit(TLAN_NET_SIO_EDATA, sio))
3180 *data |= place;
3181 tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
3182 }
3183
3184 tlan_set_bit(TLAN_NET_SIO_ETXEN, sio);
3185 if (!stop) {
3186 tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
3187 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3188 tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
3189 } else {
3190 tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
3191 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3192 tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
3193
3194 tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
3195 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3196 tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
3197 }
3198
3199}
3200
3201
3202
3203
3204
3205
3206
3207
3208
3209
3210
3211
3212
3213
3214
3215
3216
3217
3218
3219
3220
3221
3222
3223
3224
3225static int tlan_ee_read_byte(struct net_device *dev, u8 ee_addr, u8 *data)
3226{
3227 int err;
3228 struct tlan_priv *priv = netdev_priv(dev);
3229 unsigned long flags = 0;
3230 int ret = 0;
3231
3232 spin_lock_irqsave(&priv->lock, flags);
3233
3234 tlan_ee_send_start(dev->base_addr);
3235 err = tlan_ee_send_byte(dev->base_addr, 0xa0, TLAN_EEPROM_ACK);
3236 if (err) {
3237 ret = 1;
3238 goto fail;
3239 }
3240 err = tlan_ee_send_byte(dev->base_addr, ee_addr, TLAN_EEPROM_ACK);
3241 if (err) {
3242 ret = 2;
3243 goto fail;
3244 }
3245 tlan_ee_send_start(dev->base_addr);
3246 err = tlan_ee_send_byte(dev->base_addr, 0xa1, TLAN_EEPROM_ACK);
3247 if (err) {
3248 ret = 3;
3249 goto fail;
3250 }
3251 tlan_ee_receive_byte(dev->base_addr, data, TLAN_EEPROM_STOP);
3252fail:
3253 spin_unlock_irqrestore(&priv->lock, flags);
3254
3255 return ret;
3256
3257}
3258
3259
3260
3261