1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31
32#include <linux/hardirq.h>
33#include <linux/module.h>
34#include <linux/init.h>
35#include <linux/interrupt.h>
36#include <linux/ioport.h>
37#include <linux/eisa.h>
38#include <linux/pci.h>
39#include <linux/dma-mapping.h>
40#include <linux/netdevice.h>
41#include <linux/etherdevice.h>
42#include <linux/delay.h>
43#include <linux/spinlock.h>
44#include <linux/workqueue.h>
45#include <linux/mii.h>
46
47#include "tlan.h"
48
49
50
51static struct net_device *tlan_eisa_devices;
52
53static int tlan_devices_installed;
54
55
56static int aui[MAX_TLAN_BOARDS];
57static int duplex[MAX_TLAN_BOARDS];
58static int speed[MAX_TLAN_BOARDS];
59static int boards_found;
60module_param_array(aui, int, NULL, 0);
61module_param_array(duplex, int, NULL, 0);
62module_param_array(speed, int, NULL, 0);
63MODULE_PARM_DESC(aui, "ThunderLAN use AUI port(s) (0-1)");
64MODULE_PARM_DESC(duplex,
65 "ThunderLAN duplex setting(s) (0-default, 1-half, 2-full)");
66MODULE_PARM_DESC(speed, "ThunderLAN port speed setting(s) (0,10,100)");
67
68MODULE_AUTHOR("Maintainer: Samuel Chessman <chessman@tux.org>");
69MODULE_DESCRIPTION("Driver for TI ThunderLAN based ethernet PCI adapters");
70MODULE_LICENSE("GPL");
71
72
73
74#undef MONITOR
75
76
77static int debug;
78module_param(debug, int, 0);
79MODULE_PARM_DESC(debug, "ThunderLAN debug mask");
80
81static const char tlan_signature[] = "TLAN";
82static const char tlan_banner[] = "ThunderLAN driver v1.17\n";
83static int tlan_have_pci;
84static int tlan_have_eisa;
85
86static const char * const media[] = {
87 "10BaseT-HD", "10BaseT-FD", "100baseTx-HD",
88 "100BaseTx-FD", "100BaseT4", NULL
89};
90
91static struct board {
92 const char *device_label;
93 u32 flags;
94 u16 addr_ofs;
95} board_info[] = {
96 { "Compaq Netelligent 10 T PCI UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
97 { "Compaq Netelligent 10/100 TX PCI UTP",
98 TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
99 { "Compaq Integrated NetFlex-3/P", TLAN_ADAPTER_NONE, 0x83 },
100 { "Compaq NetFlex-3/P",
101 TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 },
102 { "Compaq NetFlex-3/P", TLAN_ADAPTER_NONE, 0x83 },
103 { "Compaq Netelligent Integrated 10/100 TX UTP",
104 TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
105 { "Compaq Netelligent Dual 10/100 TX PCI UTP",
106 TLAN_ADAPTER_NONE, 0x83 },
107 { "Compaq Netelligent 10/100 TX Embedded UTP",
108 TLAN_ADAPTER_NONE, 0x83 },
109 { "Olicom OC-2183/2185", TLAN_ADAPTER_USE_INTERN_10, 0x83 },
110 { "Olicom OC-2325", TLAN_ADAPTER_UNMANAGED_PHY, 0xf8 },
111 { "Olicom OC-2326", TLAN_ADAPTER_USE_INTERN_10, 0xf8 },
112 { "Compaq Netelligent 10/100 TX UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
113 { "Compaq Netelligent 10 T/2 PCI UTP/coax", TLAN_ADAPTER_NONE, 0x83 },
114 { "Compaq NetFlex-3/E",
115 TLAN_ADAPTER_ACTIVITY_LED |
116 TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 },
117 { "Compaq NetFlex-3/E",
118 TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
119};
120
121static DEFINE_PCI_DEVICE_TABLE(tlan_pci_tbl) = {
122 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL10,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
124 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100,
125 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
126 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETFLEX3I,
127 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
128 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_THUNDER,
129 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 },
130 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETFLEX3B,
131 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
132 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100PI,
133 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
134 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100D,
135 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
136 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100I,
137 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 7 },
138 { PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2183,
139 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
140 { PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2325,
141 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 9 },
142 { PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2326,
143 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 10 },
144 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_NETELLIGENT_10_100_WS_5100,
145 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 11 },
146 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_NETELLIGENT_10_T2,
147 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 12 },
148 { 0,}
149};
150MODULE_DEVICE_TABLE(pci, tlan_pci_tbl);
151
152static void tlan_eisa_probe(void);
153static void tlan_eisa_cleanup(void);
154static int tlan_init(struct net_device *);
155static int tlan_open(struct net_device *dev);
156static netdev_tx_t tlan_start_tx(struct sk_buff *, struct net_device *);
157static irqreturn_t tlan_handle_interrupt(int, void *);
158static int tlan_close(struct net_device *);
159static struct net_device_stats *tlan_get_stats(struct net_device *);
160static void tlan_set_multicast_list(struct net_device *);
161static int tlan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
162static int tlan_probe1(struct pci_dev *pdev, long ioaddr,
163 int irq, int rev, const struct pci_device_id *ent);
164static void tlan_tx_timeout(struct net_device *dev);
165static void tlan_tx_timeout_work(struct work_struct *work);
166static int tlan_init_one(struct pci_dev *pdev,
167 const struct pci_device_id *ent);
168
169static u32 tlan_handle_tx_eof(struct net_device *, u16);
170static u32 tlan_handle_stat_overflow(struct net_device *, u16);
171static u32 tlan_handle_rx_eof(struct net_device *, u16);
172static u32 tlan_handle_dummy(struct net_device *, u16);
173static u32 tlan_handle_tx_eoc(struct net_device *, u16);
174static u32 tlan_handle_status_check(struct net_device *, u16);
175static u32 tlan_handle_rx_eoc(struct net_device *, u16);
176
177static void tlan_timer(unsigned long);
178
179static void tlan_reset_lists(struct net_device *);
180static void tlan_free_lists(struct net_device *);
181static void tlan_print_dio(u16);
182static void tlan_print_list(struct tlan_list *, char *, int);
183static void tlan_read_and_clear_stats(struct net_device *, int);
184static void tlan_reset_adapter(struct net_device *);
185static void tlan_finish_reset(struct net_device *);
186static void tlan_set_mac(struct net_device *, int areg, char *mac);
187
188static void tlan_phy_print(struct net_device *);
189static void tlan_phy_detect(struct net_device *);
190static void tlan_phy_power_down(struct net_device *);
191static void tlan_phy_power_up(struct net_device *);
192static void tlan_phy_reset(struct net_device *);
193static void tlan_phy_start_link(struct net_device *);
194static void tlan_phy_finish_auto_neg(struct net_device *);
195#ifdef MONITOR
196static void tlan_phy_monitor(struct net_device *);
197#endif
198
199
200
201
202
203
204
205
206static bool tlan_mii_read_reg(struct net_device *, u16, u16, u16 *);
207static void tlan_mii_send_data(u16, u32, unsigned);
208static void tlan_mii_sync(u16);
209static void tlan_mii_write_reg(struct net_device *, u16, u16, u16);
210
211static void tlan_ee_send_start(u16);
212static int tlan_ee_send_byte(u16, u8, int);
213static void tlan_ee_receive_byte(u16, u8 *, int);
214static int tlan_ee_read_byte(struct net_device *, u8, u8 *);
215
216
217static inline void
218tlan_store_skb(struct tlan_list *tag, struct sk_buff *skb)
219{
220 unsigned long addr = (unsigned long)skb;
221 tag->buffer[9].address = addr;
222 tag->buffer[8].address = upper_32_bits(addr);
223}
224
225static inline struct sk_buff *
226tlan_get_skb(const struct tlan_list *tag)
227{
228 unsigned long addr;
229
230 addr = tag->buffer[9].address;
231 addr |= ((unsigned long) tag->buffer[8].address << 16) << 16;
232 return (struct sk_buff *) addr;
233}
234
235static u32
236(*tlan_int_vector[TLAN_INT_NUMBER_OF_INTS])(struct net_device *, u16) = {
237 NULL,
238 tlan_handle_tx_eof,
239 tlan_handle_stat_overflow,
240 tlan_handle_rx_eof,
241 tlan_handle_dummy,
242 tlan_handle_tx_eoc,
243 tlan_handle_status_check,
244 tlan_handle_rx_eoc
245};
246
247static inline void
248tlan_set_timer(struct net_device *dev, u32 ticks, u32 type)
249{
250 struct tlan_priv *priv = netdev_priv(dev);
251 unsigned long flags = 0;
252
253 if (!in_irq())
254 spin_lock_irqsave(&priv->lock, flags);
255 if (priv->timer.function != NULL &&
256 priv->timer_type != TLAN_TIMER_ACTIVITY) {
257 if (!in_irq())
258 spin_unlock_irqrestore(&priv->lock, flags);
259 return;
260 }
261 priv->timer.function = tlan_timer;
262 if (!in_irq())
263 spin_unlock_irqrestore(&priv->lock, flags);
264
265 priv->timer.data = (unsigned long) dev;
266 priv->timer_set_at = jiffies;
267 priv->timer_type = type;
268 mod_timer(&priv->timer, jiffies + ticks);
269
270}
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303static void __devexit tlan_remove_one(struct pci_dev *pdev)
304{
305 struct net_device *dev = pci_get_drvdata(pdev);
306 struct tlan_priv *priv = netdev_priv(dev);
307
308 unregister_netdev(dev);
309
310 if (priv->dma_storage) {
311 pci_free_consistent(priv->pci_dev,
312 priv->dma_size, priv->dma_storage,
313 priv->dma_storage_dma);
314 }
315
316#ifdef CONFIG_PCI
317 pci_release_regions(pdev);
318#endif
319
320 free_netdev(dev);
321
322 pci_set_drvdata(pdev, NULL);
323}
324
325static void tlan_start(struct net_device *dev)
326{
327 tlan_reset_lists(dev);
328
329
330
331 tlan_read_and_clear_stats(dev, TLAN_IGNORE);
332 tlan_reset_adapter(dev);
333 netif_wake_queue(dev);
334}
335
336static void tlan_stop(struct net_device *dev)
337{
338 struct tlan_priv *priv = netdev_priv(dev);
339
340 tlan_read_and_clear_stats(dev, TLAN_RECORD);
341 outl(TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD);
342
343 tlan_reset_adapter(dev);
344 if (priv->timer.function != NULL) {
345 del_timer_sync(&priv->timer);
346 priv->timer.function = NULL;
347 }
348}
349
350#ifdef CONFIG_PM
351
352static int tlan_suspend(struct pci_dev *pdev, pm_message_t state)
353{
354 struct net_device *dev = pci_get_drvdata(pdev);
355
356 if (netif_running(dev))
357 tlan_stop(dev);
358
359 netif_device_detach(dev);
360 pci_save_state(pdev);
361 pci_disable_device(pdev);
362 pci_wake_from_d3(pdev, false);
363 pci_set_power_state(pdev, PCI_D3hot);
364
365 return 0;
366}
367
368static int tlan_resume(struct pci_dev *pdev)
369{
370 struct net_device *dev = pci_get_drvdata(pdev);
371
372 pci_set_power_state(pdev, PCI_D0);
373 pci_restore_state(pdev);
374 pci_enable_wake(pdev, 0, 0);
375 netif_device_attach(dev);
376
377 if (netif_running(dev))
378 tlan_start(dev);
379
380 return 0;
381}
382
383#else
384
385#define tlan_suspend NULL
386#define tlan_resume NULL
387
388#endif
389
390
391static struct pci_driver tlan_driver = {
392 .name = "tlan",
393 .id_table = tlan_pci_tbl,
394 .probe = tlan_init_one,
395 .remove = __devexit_p(tlan_remove_one),
396 .suspend = tlan_suspend,
397 .resume = tlan_resume,
398};
399
400static int __init tlan_probe(void)
401{
402 int rc = -ENODEV;
403
404 pr_info("%s", tlan_banner);
405
406 TLAN_DBG(TLAN_DEBUG_PROBE, "Starting PCI Probe....\n");
407
408
409
410 rc = pci_register_driver(&tlan_driver);
411
412 if (rc != 0) {
413 pr_err("Could not register pci driver\n");
414 goto err_out_pci_free;
415 }
416
417 TLAN_DBG(TLAN_DEBUG_PROBE, "Starting EISA Probe....\n");
418 tlan_eisa_probe();
419
420 pr_info("%d device%s installed, PCI: %d EISA: %d\n",
421 tlan_devices_installed, tlan_devices_installed == 1 ? "" : "s",
422 tlan_have_pci, tlan_have_eisa);
423
424 if (tlan_devices_installed == 0) {
425 rc = -ENODEV;
426 goto err_out_pci_unreg;
427 }
428 return 0;
429
430err_out_pci_unreg:
431 pci_unregister_driver(&tlan_driver);
432err_out_pci_free:
433 return rc;
434}
435
436
437static int __devinit tlan_init_one(struct pci_dev *pdev,
438 const struct pci_device_id *ent)
439{
440 return tlan_probe1(pdev, -1, -1, 0, ent);
441}
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463static int __devinit tlan_probe1(struct pci_dev *pdev,
464 long ioaddr, int irq, int rev,
465 const struct pci_device_id *ent)
466{
467
468 struct net_device *dev;
469 struct tlan_priv *priv;
470 u16 device_id;
471 int reg, rc = -ENODEV;
472
473#ifdef CONFIG_PCI
474 if (pdev) {
475 rc = pci_enable_device(pdev);
476 if (rc)
477 return rc;
478
479 rc = pci_request_regions(pdev, tlan_signature);
480 if (rc) {
481 pr_err("Could not reserve IO regions\n");
482 goto err_out;
483 }
484 }
485#endif
486
487 dev = alloc_etherdev(sizeof(struct tlan_priv));
488 if (dev == NULL) {
489 rc = -ENOMEM;
490 goto err_out_regions;
491 }
492 SET_NETDEV_DEV(dev, &pdev->dev);
493
494 priv = netdev_priv(dev);
495
496 priv->pci_dev = pdev;
497 priv->dev = dev;
498
499
500 if (pdev) {
501 u32 pci_io_base = 0;
502
503 priv->adapter = &board_info[ent->driver_data];
504
505 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
506 if (rc) {
507 pr_err("No suitable PCI mapping available\n");
508 goto err_out_free_dev;
509 }
510
511 for (reg = 0; reg <= 5; reg++) {
512 if (pci_resource_flags(pdev, reg) & IORESOURCE_IO) {
513 pci_io_base = pci_resource_start(pdev, reg);
514 TLAN_DBG(TLAN_DEBUG_GNRL,
515 "IO mapping is available at %x.\n",
516 pci_io_base);
517 break;
518 }
519 }
520 if (!pci_io_base) {
521 pr_err("No IO mappings available\n");
522 rc = -EIO;
523 goto err_out_free_dev;
524 }
525
526 dev->base_addr = pci_io_base;
527 dev->irq = pdev->irq;
528 priv->adapter_rev = pdev->revision;
529 pci_set_master(pdev);
530 pci_set_drvdata(pdev, dev);
531
532 } else {
533
534
535 device_id = inw(ioaddr + EISA_ID2);
536 priv->is_eisa = 1;
537 if (device_id == 0x20F1) {
538 priv->adapter = &board_info[13];
539 priv->adapter_rev = 23;
540 } else {
541 priv->adapter = &board_info[14];
542 priv->adapter_rev = 10;
543 }
544 dev->base_addr = ioaddr;
545 dev->irq = irq;
546 }
547
548
549 if (dev->mem_start) {
550 priv->aui = dev->mem_start & 0x01;
551 priv->duplex = ((dev->mem_start & 0x06) == 0x06) ? 0
552 : (dev->mem_start & 0x06) >> 1;
553 priv->speed = ((dev->mem_start & 0x18) == 0x18) ? 0
554 : (dev->mem_start & 0x18) >> 3;
555
556 if (priv->speed == 0x1)
557 priv->speed = TLAN_SPEED_10;
558 else if (priv->speed == 0x2)
559 priv->speed = TLAN_SPEED_100;
560
561 debug = priv->debug = dev->mem_end;
562 } else {
563 priv->aui = aui[boards_found];
564 priv->speed = speed[boards_found];
565 priv->duplex = duplex[boards_found];
566 priv->debug = debug;
567 }
568
569
570
571 INIT_WORK(&priv->tlan_tqueue, tlan_tx_timeout_work);
572
573 spin_lock_init(&priv->lock);
574
575 rc = tlan_init(dev);
576 if (rc) {
577 pr_err("Could not set up device\n");
578 goto err_out_free_dev;
579 }
580
581 rc = register_netdev(dev);
582 if (rc) {
583 pr_err("Could not register device\n");
584 goto err_out_uninit;
585 }
586
587
588 tlan_devices_installed++;
589 boards_found++;
590
591
592 if (pdev)
593 tlan_have_pci++;
594 else {
595 priv->next_device = tlan_eisa_devices;
596 tlan_eisa_devices = dev;
597 tlan_have_eisa++;
598 }
599
600 netdev_info(dev, "irq=%2d, io=%04x, %s, Rev. %d\n",
601 (int)dev->irq,
602 (int)dev->base_addr,
603 priv->adapter->device_label,
604 priv->adapter_rev);
605 return 0;
606
607err_out_uninit:
608 pci_free_consistent(priv->pci_dev, priv->dma_size, priv->dma_storage,
609 priv->dma_storage_dma);
610err_out_free_dev:
611 free_netdev(dev);
612err_out_regions:
613#ifdef CONFIG_PCI
614 if (pdev)
615 pci_release_regions(pdev);
616#endif
617err_out:
618 if (pdev)
619 pci_disable_device(pdev);
620 return rc;
621}
622
623
624static void tlan_eisa_cleanup(void)
625{
626 struct net_device *dev;
627 struct tlan_priv *priv;
628
629 while (tlan_have_eisa) {
630 dev = tlan_eisa_devices;
631 priv = netdev_priv(dev);
632 if (priv->dma_storage) {
633 pci_free_consistent(priv->pci_dev, priv->dma_size,
634 priv->dma_storage,
635 priv->dma_storage_dma);
636 }
637 release_region(dev->base_addr, 0x10);
638 unregister_netdev(dev);
639 tlan_eisa_devices = priv->next_device;
640 free_netdev(dev);
641 tlan_have_eisa--;
642 }
643}
644
645
646static void __exit tlan_exit(void)
647{
648 pci_unregister_driver(&tlan_driver);
649
650 if (tlan_have_eisa)
651 tlan_eisa_cleanup();
652
653}
654
655
656
657module_init(tlan_probe);
658module_exit(tlan_exit);
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675static void __init tlan_eisa_probe(void)
676{
677 long ioaddr;
678 int rc = -ENODEV;
679 int irq;
680 u16 device_id;
681
682 if (!EISA_bus) {
683 TLAN_DBG(TLAN_DEBUG_PROBE, "No EISA bus present\n");
684 return;
685 }
686
687
688 for (ioaddr = 0x1000; ioaddr < 0x9000; ioaddr += 0x1000) {
689
690 TLAN_DBG(TLAN_DEBUG_PROBE, "EISA_ID 0x%4x: 0x%4x\n",
691 (int) ioaddr + 0xc80, inw(ioaddr + EISA_ID));
692 TLAN_DBG(TLAN_DEBUG_PROBE, "EISA_ID 0x%4x: 0x%4x\n",
693 (int) ioaddr + 0xc82, inw(ioaddr + EISA_ID2));
694
695
696 TLAN_DBG(TLAN_DEBUG_PROBE,
697 "Probing for EISA adapter at IO: 0x%4x : ",
698 (int) ioaddr);
699 if (request_region(ioaddr, 0x10, tlan_signature) == NULL)
700 goto out;
701
702 if (inw(ioaddr + EISA_ID) != 0x110E) {
703 release_region(ioaddr, 0x10);
704 goto out;
705 }
706
707 device_id = inw(ioaddr + EISA_ID2);
708 if (device_id != 0x20F1 && device_id != 0x40F1) {
709 release_region(ioaddr, 0x10);
710 goto out;
711 }
712
713
714 if (inb(ioaddr + EISA_CR) != 0x1) {
715 release_region(ioaddr, 0x10);
716 goto out2;
717 }
718
719 if (debug == 0x10)
720 pr_info("Found one\n");
721
722
723
724 switch (inb(ioaddr + 0xcc0)) {
725 case(0x10):
726 irq = 5;
727 break;
728 case(0x20):
729 irq = 9;
730 break;
731 case(0x40):
732 irq = 10;
733 break;
734 case(0x80):
735 irq = 11;
736 break;
737 default:
738 goto out;
739 }
740
741
742
743 rc = tlan_probe1(NULL, ioaddr, irq,
744 12, NULL);
745 continue;
746
747out:
748 if (debug == 0x10)
749 pr_info("None found\n");
750 continue;
751
752out2:
753 if (debug == 0x10)
754 pr_info("Card found but it is not enabled, skipping\n");
755 continue;
756
757 }
758
759}
760
761#ifdef CONFIG_NET_POLL_CONTROLLER
762static void tlan_poll(struct net_device *dev)
763{
764 disable_irq(dev->irq);
765 tlan_handle_interrupt(dev->irq, dev);
766 enable_irq(dev->irq);
767}
768#endif
769
770static const struct net_device_ops tlan_netdev_ops = {
771 .ndo_open = tlan_open,
772 .ndo_stop = tlan_close,
773 .ndo_start_xmit = tlan_start_tx,
774 .ndo_tx_timeout = tlan_tx_timeout,
775 .ndo_get_stats = tlan_get_stats,
776 .ndo_set_rx_mode = tlan_set_multicast_list,
777 .ndo_do_ioctl = tlan_ioctl,
778 .ndo_change_mtu = eth_change_mtu,
779 .ndo_set_mac_address = eth_mac_addr,
780 .ndo_validate_addr = eth_validate_addr,
781#ifdef CONFIG_NET_POLL_CONTROLLER
782 .ndo_poll_controller = tlan_poll,
783#endif
784};
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805static int tlan_init(struct net_device *dev)
806{
807 int dma_size;
808 int err;
809 int i;
810 struct tlan_priv *priv;
811
812 priv = netdev_priv(dev);
813
814 dma_size = (TLAN_NUM_RX_LISTS + TLAN_NUM_TX_LISTS)
815 * (sizeof(struct tlan_list));
816 priv->dma_storage = pci_alloc_consistent(priv->pci_dev,
817 dma_size,
818 &priv->dma_storage_dma);
819 priv->dma_size = dma_size;
820
821 if (priv->dma_storage == NULL) {
822 pr_err("Could not allocate lists and buffers for %s\n",
823 dev->name);
824 return -ENOMEM;
825 }
826 memset(priv->dma_storage, 0, dma_size);
827 priv->rx_list = (struct tlan_list *)
828 ALIGN((unsigned long)priv->dma_storage, 8);
829 priv->rx_list_dma = ALIGN(priv->dma_storage_dma, 8);
830 priv->tx_list = priv->rx_list + TLAN_NUM_RX_LISTS;
831 priv->tx_list_dma =
832 priv->rx_list_dma + sizeof(struct tlan_list)*TLAN_NUM_RX_LISTS;
833
834 err = 0;
835 for (i = 0; i < 6 ; i++)
836 err |= tlan_ee_read_byte(dev,
837 (u8) priv->adapter->addr_ofs + i,
838 (u8 *) &dev->dev_addr[i]);
839 if (err) {
840 pr_err("%s: Error reading MAC from eeprom: %d\n",
841 dev->name, err);
842 }
843 dev->addr_len = 6;
844
845 netif_carrier_off(dev);
846
847
848 dev->netdev_ops = &tlan_netdev_ops;
849 dev->watchdog_timeo = TX_TIMEOUT;
850
851 return 0;
852
853}
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875static int tlan_open(struct net_device *dev)
876{
877 struct tlan_priv *priv = netdev_priv(dev);
878 int err;
879
880 priv->tlan_rev = tlan_dio_read8(dev->base_addr, TLAN_DEF_REVISION);
881 err = request_irq(dev->irq, tlan_handle_interrupt, IRQF_SHARED,
882 dev->name, dev);
883
884 if (err) {
885 netdev_err(dev, "Cannot open because IRQ %d is already in use\n",
886 dev->irq);
887 return err;
888 }
889
890 init_timer(&priv->timer);
891
892 tlan_start(dev);
893
894 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Opened. TLAN Chip Rev: %x\n",
895 dev->name, priv->tlan_rev);
896
897 return 0;
898
899}
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918static int tlan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
919{
920 struct tlan_priv *priv = netdev_priv(dev);
921 struct mii_ioctl_data *data = if_mii(rq);
922 u32 phy = priv->phy[priv->phy_num];
923
924 if (!priv->phy_online)
925 return -EAGAIN;
926
927 switch (cmd) {
928 case SIOCGMIIPHY:
929 data->phy_id = phy;
930
931
932 case SIOCGMIIREG:
933 tlan_mii_read_reg(dev, data->phy_id & 0x1f,
934 data->reg_num & 0x1f, &data->val_out);
935 return 0;
936
937
938 case SIOCSMIIREG:
939 tlan_mii_write_reg(dev, data->phy_id & 0x1f,
940 data->reg_num & 0x1f, data->val_in);
941 return 0;
942 default:
943 return -EOPNOTSUPP;
944 }
945}
946
947
948
949
950
951
952
953
954
955
956
957
958
959static void tlan_tx_timeout(struct net_device *dev)
960{
961
962 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Transmit timed out.\n", dev->name);
963
964
965 tlan_free_lists(dev);
966 tlan_reset_lists(dev);
967 tlan_read_and_clear_stats(dev, TLAN_IGNORE);
968 tlan_reset_adapter(dev);
969 dev->trans_start = jiffies;
970 netif_wake_queue(dev);
971
972}
973
974
975
976
977
978
979
980
981
982
983
984
985static void tlan_tx_timeout_work(struct work_struct *work)
986{
987 struct tlan_priv *priv =
988 container_of(work, struct tlan_priv, tlan_tqueue);
989
990 tlan_tx_timeout(priv->dev);
991}
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016static netdev_tx_t tlan_start_tx(struct sk_buff *skb, struct net_device *dev)
1017{
1018 struct tlan_priv *priv = netdev_priv(dev);
1019 dma_addr_t tail_list_phys;
1020 struct tlan_list *tail_list;
1021 unsigned long flags;
1022 unsigned int txlen;
1023
1024 if (!priv->phy_online) {
1025 TLAN_DBG(TLAN_DEBUG_TX, "TRANSMIT: %s PHY is not ready\n",
1026 dev->name);
1027 dev_kfree_skb_any(skb);
1028 return NETDEV_TX_OK;
1029 }
1030
1031 if (skb_padto(skb, TLAN_MIN_FRAME_SIZE))
1032 return NETDEV_TX_OK;
1033 txlen = max(skb->len, (unsigned int)TLAN_MIN_FRAME_SIZE);
1034
1035 tail_list = priv->tx_list + priv->tx_tail;
1036 tail_list_phys =
1037 priv->tx_list_dma + sizeof(struct tlan_list)*priv->tx_tail;
1038
1039 if (tail_list->c_stat != TLAN_CSTAT_UNUSED) {
1040 TLAN_DBG(TLAN_DEBUG_TX,
1041 "TRANSMIT: %s is busy (Head=%d Tail=%d)\n",
1042 dev->name, priv->tx_head, priv->tx_tail);
1043 netif_stop_queue(dev);
1044 priv->tx_busy_count++;
1045 return NETDEV_TX_BUSY;
1046 }
1047
1048 tail_list->forward = 0;
1049
1050 tail_list->buffer[0].address = pci_map_single(priv->pci_dev,
1051 skb->data, txlen,
1052 PCI_DMA_TODEVICE);
1053 tlan_store_skb(tail_list, skb);
1054
1055 tail_list->frame_size = (u16) txlen;
1056 tail_list->buffer[0].count = TLAN_LAST_BUFFER | (u32) txlen;
1057 tail_list->buffer[1].count = 0;
1058 tail_list->buffer[1].address = 0;
1059
1060 spin_lock_irqsave(&priv->lock, flags);
1061 tail_list->c_stat = TLAN_CSTAT_READY;
1062 if (!priv->tx_in_progress) {
1063 priv->tx_in_progress = 1;
1064 TLAN_DBG(TLAN_DEBUG_TX,
1065 "TRANSMIT: Starting TX on buffer %d\n",
1066 priv->tx_tail);
1067 outl(tail_list_phys, dev->base_addr + TLAN_CH_PARM);
1068 outl(TLAN_HC_GO, dev->base_addr + TLAN_HOST_CMD);
1069 } else {
1070 TLAN_DBG(TLAN_DEBUG_TX,
1071 "TRANSMIT: Adding buffer %d to TX channel\n",
1072 priv->tx_tail);
1073 if (priv->tx_tail == 0) {
1074 (priv->tx_list + (TLAN_NUM_TX_LISTS - 1))->forward
1075 = tail_list_phys;
1076 } else {
1077 (priv->tx_list + (priv->tx_tail - 1))->forward
1078 = tail_list_phys;
1079 }
1080 }
1081 spin_unlock_irqrestore(&priv->lock, flags);
1082
1083 CIRC_INC(priv->tx_tail, TLAN_NUM_TX_LISTS);
1084
1085 return NETDEV_TX_OK;
1086
1087}
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112static irqreturn_t tlan_handle_interrupt(int irq, void *dev_id)
1113{
1114 struct net_device *dev = dev_id;
1115 struct tlan_priv *priv = netdev_priv(dev);
1116 u16 host_int;
1117 u16 type;
1118
1119 spin_lock(&priv->lock);
1120
1121 host_int = inw(dev->base_addr + TLAN_HOST_INT);
1122 type = (host_int & TLAN_HI_IT_MASK) >> 2;
1123 if (type) {
1124 u32 ack;
1125 u32 host_cmd;
1126
1127 outw(host_int, dev->base_addr + TLAN_HOST_INT);
1128 ack = tlan_int_vector[type](dev, host_int);
1129
1130 if (ack) {
1131 host_cmd = TLAN_HC_ACK | ack | (type << 18);
1132 outl(host_cmd, dev->base_addr + TLAN_HOST_CMD);
1133 }
1134 }
1135
1136 spin_unlock(&priv->lock);
1137
1138 return IRQ_RETVAL(type);
1139}
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159static int tlan_close(struct net_device *dev)
1160{
1161 struct tlan_priv *priv = netdev_priv(dev);
1162
1163 priv->neg_be_verbose = 0;
1164 tlan_stop(dev);
1165
1166 free_irq(dev->irq, dev);
1167 tlan_free_lists(dev);
1168 TLAN_DBG(TLAN_DEBUG_GNRL, "Device %s closed.\n", dev->name);
1169
1170 return 0;
1171
1172}
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192static struct net_device_stats *tlan_get_stats(struct net_device *dev)
1193{
1194 struct tlan_priv *priv = netdev_priv(dev);
1195 int i;
1196
1197
1198 tlan_read_and_clear_stats(dev, TLAN_RECORD);
1199
1200 TLAN_DBG(TLAN_DEBUG_RX, "RECEIVE: %s EOC count = %d\n", dev->name,
1201 priv->rx_eoc_count);
1202 TLAN_DBG(TLAN_DEBUG_TX, "TRANSMIT: %s Busy count = %d\n", dev->name,
1203 priv->tx_busy_count);
1204 if (debug & TLAN_DEBUG_GNRL) {
1205 tlan_print_dio(dev->base_addr);
1206 tlan_phy_print(dev);
1207 }
1208 if (debug & TLAN_DEBUG_LIST) {
1209 for (i = 0; i < TLAN_NUM_RX_LISTS; i++)
1210 tlan_print_list(priv->rx_list + i, "RX", i);
1211 for (i = 0; i < TLAN_NUM_TX_LISTS; i++)
1212 tlan_print_list(priv->tx_list + i, "TX", i);
1213 }
1214
1215 return &dev->stats;
1216
1217}
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242static void tlan_set_multicast_list(struct net_device *dev)
1243{
1244 struct netdev_hw_addr *ha;
1245 u32 hash1 = 0;
1246 u32 hash2 = 0;
1247 int i;
1248 u32 offset;
1249 u8 tmp;
1250
1251 if (dev->flags & IFF_PROMISC) {
1252 tmp = tlan_dio_read8(dev->base_addr, TLAN_NET_CMD);
1253 tlan_dio_write8(dev->base_addr,
1254 TLAN_NET_CMD, tmp | TLAN_NET_CMD_CAF);
1255 } else {
1256 tmp = tlan_dio_read8(dev->base_addr, TLAN_NET_CMD);
1257 tlan_dio_write8(dev->base_addr,
1258 TLAN_NET_CMD, tmp & ~TLAN_NET_CMD_CAF);
1259 if (dev->flags & IFF_ALLMULTI) {
1260 for (i = 0; i < 3; i++)
1261 tlan_set_mac(dev, i + 1, NULL);
1262 tlan_dio_write32(dev->base_addr, TLAN_HASH_1,
1263 0xffffffff);
1264 tlan_dio_write32(dev->base_addr, TLAN_HASH_2,
1265 0xffffffff);
1266 } else {
1267 i = 0;
1268 netdev_for_each_mc_addr(ha, dev) {
1269 if (i < 3) {
1270 tlan_set_mac(dev, i + 1,
1271 (char *) &ha->addr);
1272 } else {
1273 offset =
1274 tlan_hash_func((u8 *)&ha->addr);
1275 if (offset < 32)
1276 hash1 |= (1 << offset);
1277 else
1278 hash2 |= (1 << (offset - 32));
1279 }
1280 i++;
1281 }
1282 for ( ; i < 3; i++)
1283 tlan_set_mac(dev, i + 1, NULL);
1284 tlan_dio_write32(dev->base_addr, TLAN_HASH_1, hash1);
1285 tlan_dio_write32(dev->base_addr, TLAN_HASH_2, hash2);
1286 }
1287 }
1288
1289}
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330static u32 tlan_handle_tx_eof(struct net_device *dev, u16 host_int)
1331{
1332 struct tlan_priv *priv = netdev_priv(dev);
1333 int eoc = 0;
1334 struct tlan_list *head_list;
1335 dma_addr_t head_list_phys;
1336 u32 ack = 0;
1337 u16 tmp_c_stat;
1338
1339 TLAN_DBG(TLAN_DEBUG_TX,
1340 "TRANSMIT: Handling TX EOF (Head=%d Tail=%d)\n",
1341 priv->tx_head, priv->tx_tail);
1342 head_list = priv->tx_list + priv->tx_head;
1343
1344 while (((tmp_c_stat = head_list->c_stat) & TLAN_CSTAT_FRM_CMP)
1345 && (ack < 255)) {
1346 struct sk_buff *skb = tlan_get_skb(head_list);
1347
1348 ack++;
1349 pci_unmap_single(priv->pci_dev, head_list->buffer[0].address,
1350 max(skb->len,
1351 (unsigned int)TLAN_MIN_FRAME_SIZE),
1352 PCI_DMA_TODEVICE);
1353 dev_kfree_skb_any(skb);
1354 head_list->buffer[8].address = 0;
1355 head_list->buffer[9].address = 0;
1356
1357 if (tmp_c_stat & TLAN_CSTAT_EOC)
1358 eoc = 1;
1359
1360 dev->stats.tx_bytes += head_list->frame_size;
1361
1362 head_list->c_stat = TLAN_CSTAT_UNUSED;
1363 netif_start_queue(dev);
1364 CIRC_INC(priv->tx_head, TLAN_NUM_TX_LISTS);
1365 head_list = priv->tx_list + priv->tx_head;
1366 }
1367
1368 if (!ack)
1369 netdev_info(dev,
1370 "Received interrupt for uncompleted TX frame\n");
1371
1372 if (eoc) {
1373 TLAN_DBG(TLAN_DEBUG_TX,
1374 "TRANSMIT: handling TX EOC (Head=%d Tail=%d)\n",
1375 priv->tx_head, priv->tx_tail);
1376 head_list = priv->tx_list + priv->tx_head;
1377 head_list_phys = priv->tx_list_dma
1378 + sizeof(struct tlan_list)*priv->tx_head;
1379 if ((head_list->c_stat & TLAN_CSTAT_READY)
1380 == TLAN_CSTAT_READY) {
1381 outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
1382 ack |= TLAN_HC_GO;
1383 } else {
1384 priv->tx_in_progress = 0;
1385 }
1386 }
1387
1388 if (priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED) {
1389 tlan_dio_write8(dev->base_addr,
1390 TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT);
1391 if (priv->timer.function == NULL) {
1392 priv->timer.function = tlan_timer;
1393 priv->timer.data = (unsigned long) dev;
1394 priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY;
1395 priv->timer_set_at = jiffies;
1396 priv->timer_type = TLAN_TIMER_ACTIVITY;
1397 add_timer(&priv->timer);
1398 } else if (priv->timer_type == TLAN_TIMER_ACTIVITY) {
1399 priv->timer_set_at = jiffies;
1400 }
1401 }
1402
1403 return ack;
1404
1405}
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427static u32 tlan_handle_stat_overflow(struct net_device *dev, u16 host_int)
1428{
1429 tlan_read_and_clear_stats(dev, TLAN_RECORD);
1430
1431 return 1;
1432
1433}
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463static u32 tlan_handle_rx_eof(struct net_device *dev, u16 host_int)
1464{
1465 struct tlan_priv *priv = netdev_priv(dev);
1466 u32 ack = 0;
1467 int eoc = 0;
1468 struct tlan_list *head_list;
1469 struct sk_buff *skb;
1470 struct tlan_list *tail_list;
1471 u16 tmp_c_stat;
1472 dma_addr_t head_list_phys;
1473
1474 TLAN_DBG(TLAN_DEBUG_RX, "RECEIVE: handling RX EOF (Head=%d Tail=%d)\n",
1475 priv->rx_head, priv->rx_tail);
1476 head_list = priv->rx_list + priv->rx_head;
1477 head_list_phys =
1478 priv->rx_list_dma + sizeof(struct tlan_list)*priv->rx_head;
1479
1480 while (((tmp_c_stat = head_list->c_stat) & TLAN_CSTAT_FRM_CMP)
1481 && (ack < 255)) {
1482 dma_addr_t frame_dma = head_list->buffer[0].address;
1483 u32 frame_size = head_list->frame_size;
1484 struct sk_buff *new_skb;
1485
1486 ack++;
1487 if (tmp_c_stat & TLAN_CSTAT_EOC)
1488 eoc = 1;
1489
1490 new_skb = netdev_alloc_skb_ip_align(dev,
1491 TLAN_MAX_FRAME_SIZE + 5);
1492 if (!new_skb)
1493 goto drop_and_reuse;
1494
1495 skb = tlan_get_skb(head_list);
1496 pci_unmap_single(priv->pci_dev, frame_dma,
1497 TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE);
1498 skb_put(skb, frame_size);
1499
1500 dev->stats.rx_bytes += frame_size;
1501
1502 skb->protocol = eth_type_trans(skb, dev);
1503 netif_rx(skb);
1504
1505 head_list->buffer[0].address =
1506 pci_map_single(priv->pci_dev, new_skb->data,
1507 TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE);
1508
1509 tlan_store_skb(head_list, new_skb);
1510drop_and_reuse:
1511 head_list->forward = 0;
1512 head_list->c_stat = 0;
1513 tail_list = priv->rx_list + priv->rx_tail;
1514 tail_list->forward = head_list_phys;
1515
1516 CIRC_INC(priv->rx_head, TLAN_NUM_RX_LISTS);
1517 CIRC_INC(priv->rx_tail, TLAN_NUM_RX_LISTS);
1518 head_list = priv->rx_list + priv->rx_head;
1519 head_list_phys = priv->rx_list_dma
1520 + sizeof(struct tlan_list)*priv->rx_head;
1521 }
1522
1523 if (!ack)
1524 netdev_info(dev,
1525 "Received interrupt for uncompleted RX frame\n");
1526
1527
1528 if (eoc) {
1529 TLAN_DBG(TLAN_DEBUG_RX,
1530 "RECEIVE: handling RX EOC (Head=%d Tail=%d)\n",
1531 priv->rx_head, priv->rx_tail);
1532 head_list = priv->rx_list + priv->rx_head;
1533 head_list_phys = priv->rx_list_dma
1534 + sizeof(struct tlan_list)*priv->rx_head;
1535 outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
1536 ack |= TLAN_HC_GO | TLAN_HC_RT;
1537 priv->rx_eoc_count++;
1538 }
1539
1540 if (priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED) {
1541 tlan_dio_write8(dev->base_addr,
1542 TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT);
1543 if (priv->timer.function == NULL) {
1544 priv->timer.function = tlan_timer;
1545 priv->timer.data = (unsigned long) dev;
1546 priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY;
1547 priv->timer_set_at = jiffies;
1548 priv->timer_type = TLAN_TIMER_ACTIVITY;
1549 add_timer(&priv->timer);
1550 } else if (priv->timer_type == TLAN_TIMER_ACTIVITY) {
1551 priv->timer_set_at = jiffies;
1552 }
1553 }
1554
1555 return ack;
1556
1557}
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579static u32 tlan_handle_dummy(struct net_device *dev, u16 host_int)
1580{
1581 netdev_info(dev, "Test interrupt\n");
1582 return 1;
1583
1584}
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609static u32 tlan_handle_tx_eoc(struct net_device *dev, u16 host_int)
1610{
1611 struct tlan_priv *priv = netdev_priv(dev);
1612 struct tlan_list *head_list;
1613 dma_addr_t head_list_phys;
1614 u32 ack = 1;
1615
1616 host_int = 0;
1617 if (priv->tlan_rev < 0x30) {
1618 TLAN_DBG(TLAN_DEBUG_TX,
1619 "TRANSMIT: handling TX EOC (Head=%d Tail=%d) -- IRQ\n",
1620 priv->tx_head, priv->tx_tail);
1621 head_list = priv->tx_list + priv->tx_head;
1622 head_list_phys = priv->tx_list_dma
1623 + sizeof(struct tlan_list)*priv->tx_head;
1624 if ((head_list->c_stat & TLAN_CSTAT_READY)
1625 == TLAN_CSTAT_READY) {
1626 netif_stop_queue(dev);
1627 outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
1628 ack |= TLAN_HC_GO;
1629 } else {
1630 priv->tx_in_progress = 0;
1631 }
1632 }
1633
1634 return ack;
1635
1636}
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661static u32 tlan_handle_status_check(struct net_device *dev, u16 host_int)
1662{
1663 struct tlan_priv *priv = netdev_priv(dev);
1664 u32 ack;
1665 u32 error;
1666 u8 net_sts;
1667 u32 phy;
1668 u16 tlphy_ctl;
1669 u16 tlphy_sts;
1670
1671 ack = 1;
1672 if (host_int & TLAN_HI_IV_MASK) {
1673 netif_stop_queue(dev);
1674 error = inl(dev->base_addr + TLAN_CH_PARM);
1675 netdev_info(dev, "Adaptor Error = 0x%x\n", error);
1676 tlan_read_and_clear_stats(dev, TLAN_RECORD);
1677 outl(TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD);
1678
1679 schedule_work(&priv->tlan_tqueue);
1680
1681 netif_wake_queue(dev);
1682 ack = 0;
1683 } else {
1684 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Status Check\n", dev->name);
1685 phy = priv->phy[priv->phy_num];
1686
1687 net_sts = tlan_dio_read8(dev->base_addr, TLAN_NET_STS);
1688 if (net_sts) {
1689 tlan_dio_write8(dev->base_addr, TLAN_NET_STS, net_sts);
1690 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Net_Sts = %x\n",
1691 dev->name, (unsigned) net_sts);
1692 }
1693 if ((net_sts & TLAN_NET_STS_MIRQ) && (priv->phy_num == 0)) {
1694 tlan_mii_read_reg(dev, phy, TLAN_TLPHY_STS, &tlphy_sts);
1695 tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl);
1696 if (!(tlphy_sts & TLAN_TS_POLOK) &&
1697 !(tlphy_ctl & TLAN_TC_SWAPOL)) {
1698 tlphy_ctl |= TLAN_TC_SWAPOL;
1699 tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL,
1700 tlphy_ctl);
1701 } else if ((tlphy_sts & TLAN_TS_POLOK) &&
1702 (tlphy_ctl & TLAN_TC_SWAPOL)) {
1703 tlphy_ctl &= ~TLAN_TC_SWAPOL;
1704 tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL,
1705 tlphy_ctl);
1706 }
1707
1708 if (debug)
1709 tlan_phy_print(dev);
1710 }
1711 }
1712
1713 return ack;
1714
1715}
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740static u32 tlan_handle_rx_eoc(struct net_device *dev, u16 host_int)
1741{
1742 struct tlan_priv *priv = netdev_priv(dev);
1743 dma_addr_t head_list_phys;
1744 u32 ack = 1;
1745
1746 if (priv->tlan_rev < 0x30) {
1747 TLAN_DBG(TLAN_DEBUG_RX,
1748 "RECEIVE: Handling RX EOC (head=%d tail=%d) -- IRQ\n",
1749 priv->rx_head, priv->rx_tail);
1750 head_list_phys = priv->rx_list_dma
1751 + sizeof(struct tlan_list)*priv->rx_head;
1752 outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
1753 ack |= TLAN_HC_GO | TLAN_HC_RT;
1754 priv->rx_eoc_count++;
1755 }
1756
1757 return ack;
1758
1759}
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803static void tlan_timer(unsigned long data)
1804{
1805 struct net_device *dev = (struct net_device *) data;
1806 struct tlan_priv *priv = netdev_priv(dev);
1807 u32 elapsed;
1808 unsigned long flags = 0;
1809
1810 priv->timer.function = NULL;
1811
1812 switch (priv->timer_type) {
1813#ifdef MONITOR
1814 case TLAN_TIMER_LINK_BEAT:
1815 tlan_phy_monitor(dev);
1816 break;
1817#endif
1818 case TLAN_TIMER_PHY_PDOWN:
1819 tlan_phy_power_down(dev);
1820 break;
1821 case TLAN_TIMER_PHY_PUP:
1822 tlan_phy_power_up(dev);
1823 break;
1824 case TLAN_TIMER_PHY_RESET:
1825 tlan_phy_reset(dev);
1826 break;
1827 case TLAN_TIMER_PHY_START_LINK:
1828 tlan_phy_start_link(dev);
1829 break;
1830 case TLAN_TIMER_PHY_FINISH_AN:
1831 tlan_phy_finish_auto_neg(dev);
1832 break;
1833 case TLAN_TIMER_FINISH_RESET:
1834 tlan_finish_reset(dev);
1835 break;
1836 case TLAN_TIMER_ACTIVITY:
1837 spin_lock_irqsave(&priv->lock, flags);
1838 if (priv->timer.function == NULL) {
1839 elapsed = jiffies - priv->timer_set_at;
1840 if (elapsed >= TLAN_TIMER_ACT_DELAY) {
1841 tlan_dio_write8(dev->base_addr,
1842 TLAN_LED_REG, TLAN_LED_LINK);
1843 } else {
1844 priv->timer.function = tlan_timer;
1845 priv->timer.expires = priv->timer_set_at
1846 + TLAN_TIMER_ACT_DELAY;
1847 spin_unlock_irqrestore(&priv->lock, flags);
1848 add_timer(&priv->timer);
1849 break;
1850 }
1851 }
1852 spin_unlock_irqrestore(&priv->lock, flags);
1853 break;
1854 default:
1855 break;
1856 }
1857
1858}
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886static void tlan_reset_lists(struct net_device *dev)
1887{
1888 struct tlan_priv *priv = netdev_priv(dev);
1889 int i;
1890 struct tlan_list *list;
1891 dma_addr_t list_phys;
1892 struct sk_buff *skb;
1893
1894 priv->tx_head = 0;
1895 priv->tx_tail = 0;
1896 for (i = 0; i < TLAN_NUM_TX_LISTS; i++) {
1897 list = priv->tx_list + i;
1898 list->c_stat = TLAN_CSTAT_UNUSED;
1899 list->buffer[0].address = 0;
1900 list->buffer[2].count = 0;
1901 list->buffer[2].address = 0;
1902 list->buffer[8].address = 0;
1903 list->buffer[9].address = 0;
1904 }
1905
1906 priv->rx_head = 0;
1907 priv->rx_tail = TLAN_NUM_RX_LISTS - 1;
1908 for (i = 0; i < TLAN_NUM_RX_LISTS; i++) {
1909 list = priv->rx_list + i;
1910 list_phys = priv->rx_list_dma + sizeof(struct tlan_list)*i;
1911 list->c_stat = TLAN_CSTAT_READY;
1912 list->frame_size = TLAN_MAX_FRAME_SIZE;
1913 list->buffer[0].count = TLAN_MAX_FRAME_SIZE | TLAN_LAST_BUFFER;
1914 skb = netdev_alloc_skb_ip_align(dev, TLAN_MAX_FRAME_SIZE + 5);
1915 if (!skb) {
1916 netdev_err(dev, "Out of memory for received data\n");
1917 break;
1918 }
1919
1920 list->buffer[0].address = pci_map_single(priv->pci_dev,
1921 skb->data,
1922 TLAN_MAX_FRAME_SIZE,
1923 PCI_DMA_FROMDEVICE);
1924 tlan_store_skb(list, skb);
1925 list->buffer[1].count = 0;
1926 list->buffer[1].address = 0;
1927 list->forward = list_phys + sizeof(struct tlan_list);
1928 }
1929
1930
1931 while (i < TLAN_NUM_RX_LISTS) {
1932 tlan_store_skb(priv->rx_list + i, NULL);
1933 ++i;
1934 }
1935 list->forward = 0;
1936
1937}
1938
1939
1940static void tlan_free_lists(struct net_device *dev)
1941{
1942 struct tlan_priv *priv = netdev_priv(dev);
1943 int i;
1944 struct tlan_list *list;
1945 struct sk_buff *skb;
1946
1947 for (i = 0; i < TLAN_NUM_TX_LISTS; i++) {
1948 list = priv->tx_list + i;
1949 skb = tlan_get_skb(list);
1950 if (skb) {
1951 pci_unmap_single(
1952 priv->pci_dev,
1953 list->buffer[0].address,
1954 max(skb->len,
1955 (unsigned int)TLAN_MIN_FRAME_SIZE),
1956 PCI_DMA_TODEVICE);
1957 dev_kfree_skb_any(skb);
1958 list->buffer[8].address = 0;
1959 list->buffer[9].address = 0;
1960 }
1961 }
1962
1963 for (i = 0; i < TLAN_NUM_RX_LISTS; i++) {
1964 list = priv->rx_list + i;
1965 skb = tlan_get_skb(list);
1966 if (skb) {
1967 pci_unmap_single(priv->pci_dev,
1968 list->buffer[0].address,
1969 TLAN_MAX_FRAME_SIZE,
1970 PCI_DMA_FROMDEVICE);
1971 dev_kfree_skb_any(skb);
1972 list->buffer[8].address = 0;
1973 list->buffer[9].address = 0;
1974 }
1975 }
1976}
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995static void tlan_print_dio(u16 io_base)
1996{
1997 u32 data0, data1;
1998 int i;
1999
2000 pr_info("Contents of internal registers for io base 0x%04hx\n",
2001 io_base);
2002 pr_info("Off. +0 +4\n");
2003 for (i = 0; i < 0x4C; i += 8) {
2004 data0 = tlan_dio_read32(io_base, i);
2005 data1 = tlan_dio_read32(io_base, i + 0x4);
2006 pr_info("0x%02x 0x%08x 0x%08x\n", i, data0, data1);
2007 }
2008
2009}
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031static void tlan_print_list(struct tlan_list *list, char *type, int num)
2032{
2033 int i;
2034
2035 pr_info("%s List %d at %p\n", type, num, list);
2036 pr_info(" Forward = 0x%08x\n", list->forward);
2037 pr_info(" CSTAT = 0x%04hx\n", list->c_stat);
2038 pr_info(" Frame Size = 0x%04hx\n", list->frame_size);
2039
2040 for (i = 0; i < 2; i++) {
2041 pr_info(" Buffer[%d].count, addr = 0x%08x, 0x%08x\n",
2042 i, list->buffer[i].count, list->buffer[i].address);
2043 }
2044
2045}
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068static void tlan_read_and_clear_stats(struct net_device *dev, int record)
2069{
2070 u32 tx_good, tx_under;
2071 u32 rx_good, rx_over;
2072 u32 def_tx, crc, code;
2073 u32 multi_col, single_col;
2074 u32 excess_col, late_col, loss;
2075
2076 outw(TLAN_GOOD_TX_FRMS, dev->base_addr + TLAN_DIO_ADR);
2077 tx_good = inb(dev->base_addr + TLAN_DIO_DATA);
2078 tx_good += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
2079 tx_good += inb(dev->base_addr + TLAN_DIO_DATA + 2) << 16;
2080 tx_under = inb(dev->base_addr + TLAN_DIO_DATA + 3);
2081
2082 outw(TLAN_GOOD_RX_FRMS, dev->base_addr + TLAN_DIO_ADR);
2083 rx_good = inb(dev->base_addr + TLAN_DIO_DATA);
2084 rx_good += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
2085 rx_good += inb(dev->base_addr + TLAN_DIO_DATA + 2) << 16;
2086 rx_over = inb(dev->base_addr + TLAN_DIO_DATA + 3);
2087
2088 outw(TLAN_DEFERRED_TX, dev->base_addr + TLAN_DIO_ADR);
2089 def_tx = inb(dev->base_addr + TLAN_DIO_DATA);
2090 def_tx += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
2091 crc = inb(dev->base_addr + TLAN_DIO_DATA + 2);
2092 code = inb(dev->base_addr + TLAN_DIO_DATA + 3);
2093
2094 outw(TLAN_MULTICOL_FRMS, dev->base_addr + TLAN_DIO_ADR);
2095 multi_col = inb(dev->base_addr + TLAN_DIO_DATA);
2096 multi_col += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
2097 single_col = inb(dev->base_addr + TLAN_DIO_DATA + 2);
2098 single_col += inb(dev->base_addr + TLAN_DIO_DATA + 3) << 8;
2099
2100 outw(TLAN_EXCESSCOL_FRMS, dev->base_addr + TLAN_DIO_ADR);
2101 excess_col = inb(dev->base_addr + TLAN_DIO_DATA);
2102 late_col = inb(dev->base_addr + TLAN_DIO_DATA + 1);
2103 loss = inb(dev->base_addr + TLAN_DIO_DATA + 2);
2104
2105 if (record) {
2106 dev->stats.rx_packets += rx_good;
2107 dev->stats.rx_errors += rx_over + crc + code;
2108 dev->stats.tx_packets += tx_good;
2109 dev->stats.tx_errors += tx_under + loss;
2110 dev->stats.collisions += multi_col
2111 + single_col + excess_col + late_col;
2112
2113 dev->stats.rx_over_errors += rx_over;
2114 dev->stats.rx_crc_errors += crc;
2115 dev->stats.rx_frame_errors += code;
2116
2117 dev->stats.tx_aborted_errors += tx_under;
2118 dev->stats.tx_carrier_errors += loss;
2119 }
2120
2121}
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143static void
2144tlan_reset_adapter(struct net_device *dev)
2145{
2146 struct tlan_priv *priv = netdev_priv(dev);
2147 int i;
2148 u32 addr;
2149 u32 data;
2150 u8 data8;
2151
2152 priv->tlan_full_duplex = false;
2153 priv->phy_online = 0;
2154 netif_carrier_off(dev);
2155
2156
2157
2158 data = inl(dev->base_addr + TLAN_HOST_CMD);
2159 data |= TLAN_HC_AD_RST;
2160 outl(data, dev->base_addr + TLAN_HOST_CMD);
2161
2162 udelay(1000);
2163
2164
2165
2166 data = inl(dev->base_addr + TLAN_HOST_CMD);
2167 data |= TLAN_HC_INT_OFF;
2168 outl(data, dev->base_addr + TLAN_HOST_CMD);
2169
2170
2171
2172 for (i = TLAN_AREG_0; i <= TLAN_HASH_2; i += 4)
2173 tlan_dio_write32(dev->base_addr, (u16) i, 0);
2174
2175
2176
2177 data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | TLAN_NET_CFG_PHY_EN;
2178 tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, (u16) data);
2179
2180
2181
2182 outl(TLAN_HC_LD_TMR | 0x3f, dev->base_addr + TLAN_HOST_CMD);
2183 outl(TLAN_HC_LD_THR | 0x9, dev->base_addr + TLAN_HOST_CMD);
2184
2185
2186
2187 outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR);
2188 addr = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
2189 tlan_set_bit(TLAN_NET_SIO_NMRST, addr);
2190
2191
2192
2193 if (priv->tlan_rev >= 0x30) {
2194 data8 = TLAN_ID_TX_EOC | TLAN_ID_RX_EOC;
2195 tlan_dio_write8(dev->base_addr, TLAN_INT_DIS, data8);
2196 }
2197 tlan_phy_detect(dev);
2198 data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN;
2199
2200 if (priv->adapter->flags & TLAN_ADAPTER_BIT_RATE_PHY) {
2201 data |= TLAN_NET_CFG_BIT;
2202 if (priv->aui == 1) {
2203 tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x0a);
2204 } else if (priv->duplex == TLAN_DUPLEX_FULL) {
2205 tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x00);
2206 priv->tlan_full_duplex = true;
2207 } else {
2208 tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x08);
2209 }
2210 }
2211
2212 if (priv->phy_num == 0)
2213 data |= TLAN_NET_CFG_PHY_EN;
2214 tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, (u16) data);
2215
2216 if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY)
2217 tlan_finish_reset(dev);
2218 else
2219 tlan_phy_power_down(dev);
2220
2221}
2222
2223
2224
2225
2226static void
2227tlan_finish_reset(struct net_device *dev)
2228{
2229 struct tlan_priv *priv = netdev_priv(dev);
2230 u8 data;
2231 u32 phy;
2232 u8 sio;
2233 u16 status;
2234 u16 partner;
2235 u16 tlphy_ctl;
2236 u16 tlphy_par;
2237 u16 tlphy_id1, tlphy_id2;
2238 int i;
2239
2240 phy = priv->phy[priv->phy_num];
2241
2242 data = TLAN_NET_CMD_NRESET | TLAN_NET_CMD_NWRAP;
2243 if (priv->tlan_full_duplex)
2244 data |= TLAN_NET_CMD_DUPLEX;
2245 tlan_dio_write8(dev->base_addr, TLAN_NET_CMD, data);
2246 data = TLAN_NET_MASK_MASK4 | TLAN_NET_MASK_MASK5;
2247 if (priv->phy_num == 0)
2248 data |= TLAN_NET_MASK_MASK7;
2249 tlan_dio_write8(dev->base_addr, TLAN_NET_MASK, data);
2250 tlan_dio_write16(dev->base_addr, TLAN_MAX_RX, ((1536)+7)&~7);
2251 tlan_mii_read_reg(dev, phy, MII_GEN_ID_HI, &tlphy_id1);
2252 tlan_mii_read_reg(dev, phy, MII_GEN_ID_LO, &tlphy_id2);
2253
2254 if ((priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) ||
2255 (priv->aui)) {
2256 status = MII_GS_LINK;
2257 netdev_info(dev, "Link forced\n");
2258 } else {
2259 tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
2260 udelay(1000);
2261 tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
2262 if ((status & MII_GS_LINK) &&
2263
2264 (tlphy_id1 == NAT_SEM_ID1) &&
2265 (tlphy_id2 == NAT_SEM_ID2)) {
2266 tlan_mii_read_reg(dev, phy, MII_AN_LPA, &partner);
2267 tlan_mii_read_reg(dev, phy, TLAN_TLPHY_PAR, &tlphy_par);
2268
2269 netdev_info(dev,
2270 "Link active with %s %uMbps %s-Duplex\n",
2271 !(tlphy_par & TLAN_PHY_AN_EN_STAT)
2272 ? "forced" : "Autonegotiation enabled,",
2273 tlphy_par & TLAN_PHY_SPEED_100
2274 ? 100 : 10,
2275 tlphy_par & TLAN_PHY_DUPLEX_FULL
2276 ? "Full" : "Half");
2277
2278 if (tlphy_par & TLAN_PHY_AN_EN_STAT) {
2279 netdev_info(dev, "Partner capability:");
2280 for (i = 5; i < 10; i++)
2281 if (partner & (1 << i))
2282 pr_cont(" %s", media[i-5]);
2283 pr_cont("\n");
2284 }
2285
2286 tlan_dio_write8(dev->base_addr, TLAN_LED_REG,
2287 TLAN_LED_LINK);
2288#ifdef MONITOR
2289
2290 priv->link = 1;
2291
2292 tlan_set_timer(dev, (10*HZ), TLAN_TIMER_LINK_BEAT);
2293#endif
2294 } else if (status & MII_GS_LINK) {
2295 netdev_info(dev, "Link active\n");
2296 tlan_dio_write8(dev->base_addr, TLAN_LED_REG,
2297 TLAN_LED_LINK);
2298 }
2299 }
2300
2301 if (priv->phy_num == 0) {
2302 tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl);
2303 tlphy_ctl |= TLAN_TC_INTEN;
2304 tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL, tlphy_ctl);
2305 sio = tlan_dio_read8(dev->base_addr, TLAN_NET_SIO);
2306 sio |= TLAN_NET_SIO_MINTEN;
2307 tlan_dio_write8(dev->base_addr, TLAN_NET_SIO, sio);
2308 }
2309
2310 if (status & MII_GS_LINK) {
2311 tlan_set_mac(dev, 0, dev->dev_addr);
2312 priv->phy_online = 1;
2313 outb((TLAN_HC_INT_ON >> 8), dev->base_addr + TLAN_HOST_CMD + 1);
2314 if (debug >= 1 && debug != TLAN_DEBUG_PROBE)
2315 outb((TLAN_HC_REQ_INT >> 8),
2316 dev->base_addr + TLAN_HOST_CMD + 1);
2317 outl(priv->rx_list_dma, dev->base_addr + TLAN_CH_PARM);
2318 outl(TLAN_HC_GO | TLAN_HC_RT, dev->base_addr + TLAN_HOST_CMD);
2319 netif_carrier_on(dev);
2320 } else {
2321 netdev_info(dev, "Link inactive, will retry in 10 secs...\n");
2322 tlan_set_timer(dev, (10*HZ), TLAN_TIMER_FINISH_RESET);
2323 return;
2324 }
2325 tlan_set_multicast_list(dev);
2326
2327}
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353static void tlan_set_mac(struct net_device *dev, int areg, char *mac)
2354{
2355 int i;
2356
2357 areg *= 6;
2358
2359 if (mac != NULL) {
2360 for (i = 0; i < 6; i++)
2361 tlan_dio_write8(dev->base_addr,
2362 TLAN_AREG_0 + areg + i, mac[i]);
2363 } else {
2364 for (i = 0; i < 6; i++)
2365 tlan_dio_write8(dev->base_addr,
2366 TLAN_AREG_0 + areg + i, 0);
2367 }
2368
2369}
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397static void tlan_phy_print(struct net_device *dev)
2398{
2399 struct tlan_priv *priv = netdev_priv(dev);
2400 u16 i, data0, data1, data2, data3, phy;
2401
2402 phy = priv->phy[priv->phy_num];
2403
2404 if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) {
2405 netdev_info(dev, "Unmanaged PHY\n");
2406 } else if (phy <= TLAN_PHY_MAX_ADDR) {
2407 netdev_info(dev, "PHY 0x%02x\n", phy);
2408 pr_info(" Off. +0 +1 +2 +3\n");
2409 for (i = 0; i < 0x20; i += 4) {
2410 tlan_mii_read_reg(dev, phy, i, &data0);
2411 tlan_mii_read_reg(dev, phy, i + 1, &data1);
2412 tlan_mii_read_reg(dev, phy, i + 2, &data2);
2413 tlan_mii_read_reg(dev, phy, i + 3, &data3);
2414 pr_info(" 0x%02x 0x%04hx 0x%04hx 0x%04hx 0x%04hx\n",
2415 i, data0, data1, data2, data3);
2416 }
2417 } else {
2418 netdev_info(dev, "Invalid PHY\n");
2419 }
2420
2421}
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443static void tlan_phy_detect(struct net_device *dev)
2444{
2445 struct tlan_priv *priv = netdev_priv(dev);
2446 u16 control;
2447 u16 hi;
2448 u16 lo;
2449 u32 phy;
2450
2451 if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) {
2452 priv->phy_num = 0xffff;
2453 return;
2454 }
2455
2456 tlan_mii_read_reg(dev, TLAN_PHY_MAX_ADDR, MII_GEN_ID_HI, &hi);
2457
2458 if (hi != 0xffff)
2459 priv->phy[0] = TLAN_PHY_MAX_ADDR;
2460 else
2461 priv->phy[0] = TLAN_PHY_NONE;
2462
2463 priv->phy[1] = TLAN_PHY_NONE;
2464 for (phy = 0; phy <= TLAN_PHY_MAX_ADDR; phy++) {
2465 tlan_mii_read_reg(dev, phy, MII_GEN_CTL, &control);
2466 tlan_mii_read_reg(dev, phy, MII_GEN_ID_HI, &hi);
2467 tlan_mii_read_reg(dev, phy, MII_GEN_ID_LO, &lo);
2468 if ((control != 0xffff) ||
2469 (hi != 0xffff) || (lo != 0xffff)) {
2470 TLAN_DBG(TLAN_DEBUG_GNRL,
2471 "PHY found at %02x %04x %04x %04x\n",
2472 phy, control, hi, lo);
2473 if ((priv->phy[1] == TLAN_PHY_NONE) &&
2474 (phy != TLAN_PHY_MAX_ADDR)) {
2475 priv->phy[1] = phy;
2476 }
2477 }
2478 }
2479
2480 if (priv->phy[1] != TLAN_PHY_NONE)
2481 priv->phy_num = 1;
2482 else if (priv->phy[0] != TLAN_PHY_NONE)
2483 priv->phy_num = 0;
2484 else
2485 netdev_info(dev, "Cannot initialize device, no PHY was found!\n");
2486
2487}
2488
2489
2490
2491
2492static void tlan_phy_power_down(struct net_device *dev)
2493{
2494 struct tlan_priv *priv = netdev_priv(dev);
2495 u16 value;
2496
2497 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Powering down PHY(s).\n", dev->name);
2498 value = MII_GC_PDOWN | MII_GC_LOOPBK | MII_GC_ISOLATE;
2499 tlan_mii_sync(dev->base_addr);
2500 tlan_mii_write_reg(dev, priv->phy[priv->phy_num], MII_GEN_CTL, value);
2501 if ((priv->phy_num == 0) &&
2502 (priv->phy[1] != TLAN_PHY_NONE) &&
2503 (!(priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10))) {
2504 tlan_mii_sync(dev->base_addr);
2505 tlan_mii_write_reg(dev, priv->phy[1], MII_GEN_CTL, value);
2506 }
2507
2508
2509
2510
2511
2512 tlan_set_timer(dev, (HZ/20), TLAN_TIMER_PHY_PUP);
2513
2514}
2515
2516
2517
2518
2519static void tlan_phy_power_up(struct net_device *dev)
2520{
2521 struct tlan_priv *priv = netdev_priv(dev);
2522 u16 value;
2523
2524 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Powering up PHY.\n", dev->name);
2525 tlan_mii_sync(dev->base_addr);
2526 value = MII_GC_LOOPBK;
2527 tlan_mii_write_reg(dev, priv->phy[priv->phy_num], MII_GEN_CTL, value);
2528 tlan_mii_sync(dev->base_addr);
2529
2530
2531
2532
2533 tlan_set_timer(dev, (HZ/20), TLAN_TIMER_PHY_RESET);
2534
2535}
2536
2537
2538
2539
2540static void tlan_phy_reset(struct net_device *dev)
2541{
2542 struct tlan_priv *priv = netdev_priv(dev);
2543 u16 phy;
2544 u16 value;
2545
2546 phy = priv->phy[priv->phy_num];
2547
2548 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Reseting PHY.\n", dev->name);
2549 tlan_mii_sync(dev->base_addr);
2550 value = MII_GC_LOOPBK | MII_GC_RESET;
2551 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, value);
2552 tlan_mii_read_reg(dev, phy, MII_GEN_CTL, &value);
2553 while (value & MII_GC_RESET)
2554 tlan_mii_read_reg(dev, phy, MII_GEN_CTL, &value);
2555
2556
2557
2558
2559
2560 tlan_set_timer(dev, (HZ/20), TLAN_TIMER_PHY_START_LINK);
2561
2562}
2563
2564
2565
2566
2567static void tlan_phy_start_link(struct net_device *dev)
2568{
2569 struct tlan_priv *priv = netdev_priv(dev);
2570 u16 ability;
2571 u16 control;
2572 u16 data;
2573 u16 phy;
2574 u16 status;
2575 u16 tctl;
2576
2577 phy = priv->phy[priv->phy_num];
2578 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Trying to activate link.\n", dev->name);
2579 tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
2580 tlan_mii_read_reg(dev, phy, MII_GEN_STS, &ability);
2581
2582 if ((status & MII_GS_AUTONEG) &&
2583 (!priv->aui)) {
2584 ability = status >> 11;
2585 if (priv->speed == TLAN_SPEED_10 &&
2586 priv->duplex == TLAN_DUPLEX_HALF) {
2587 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x0000);
2588 } else if (priv->speed == TLAN_SPEED_10 &&
2589 priv->duplex == TLAN_DUPLEX_FULL) {
2590 priv->tlan_full_duplex = true;
2591 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x0100);
2592 } else if (priv->speed == TLAN_SPEED_100 &&
2593 priv->duplex == TLAN_DUPLEX_HALF) {
2594 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x2000);
2595 } else if (priv->speed == TLAN_SPEED_100 &&
2596 priv->duplex == TLAN_DUPLEX_FULL) {
2597 priv->tlan_full_duplex = true;
2598 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x2100);
2599 } else {
2600
2601
2602 tlan_mii_write_reg(dev, phy, MII_AN_ADV,
2603 (ability << 5) | 1);
2604
2605 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x1000);
2606
2607 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x1200);
2608
2609
2610
2611
2612
2613 netdev_info(dev, "Starting autonegotiation\n");
2614 tlan_set_timer(dev, (2*HZ), TLAN_TIMER_PHY_FINISH_AN);
2615 return;
2616 }
2617
2618 }
2619
2620 if ((priv->aui) && (priv->phy_num != 0)) {
2621 priv->phy_num = 0;
2622 data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN
2623 | TLAN_NET_CFG_PHY_EN;
2624 tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, data);
2625 tlan_set_timer(dev, (40*HZ/1000), TLAN_TIMER_PHY_PDOWN);
2626 return;
2627 } else if (priv->phy_num == 0) {
2628 control = 0;
2629 tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tctl);
2630 if (priv->aui) {
2631 tctl |= TLAN_TC_AUISEL;
2632 } else {
2633 tctl &= ~TLAN_TC_AUISEL;
2634 if (priv->duplex == TLAN_DUPLEX_FULL) {
2635 control |= MII_GC_DUPLEX;
2636 priv->tlan_full_duplex = true;
2637 }
2638 if (priv->speed == TLAN_SPEED_100)
2639 control |= MII_GC_SPEEDSEL;
2640 }
2641 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, control);
2642 tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL, tctl);
2643 }
2644
2645
2646
2647
2648 tlan_set_timer(dev, (4*HZ), TLAN_TIMER_FINISH_RESET);
2649
2650}
2651
2652
2653
2654
2655static void tlan_phy_finish_auto_neg(struct net_device *dev)
2656{
2657 struct tlan_priv *priv = netdev_priv(dev);
2658 u16 an_adv;
2659 u16 an_lpa;
2660 u16 data;
2661 u16 mode;
2662 u16 phy;
2663 u16 status;
2664
2665 phy = priv->phy[priv->phy_num];
2666
2667 tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
2668 udelay(1000);
2669 tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
2670
2671 if (!(status & MII_GS_AUTOCMPLT)) {
2672
2673
2674
2675 if (!priv->neg_be_verbose++) {
2676 pr_info("Giving autonegotiation more time.\n");
2677 pr_info("Please check that your adapter has\n");
2678 pr_info("been properly connected to a HUB or Switch.\n");
2679 pr_info("Trying to establish link in the background...\n");
2680 }
2681 tlan_set_timer(dev, (8*HZ), TLAN_TIMER_PHY_FINISH_AN);
2682 return;
2683 }
2684
2685 netdev_info(dev, "Autonegotiation complete\n");
2686 tlan_mii_read_reg(dev, phy, MII_AN_ADV, &an_adv);
2687 tlan_mii_read_reg(dev, phy, MII_AN_LPA, &an_lpa);
2688 mode = an_adv & an_lpa & 0x03E0;
2689 if (mode & 0x0100)
2690 priv->tlan_full_duplex = true;
2691 else if (!(mode & 0x0080) && (mode & 0x0040))
2692 priv->tlan_full_duplex = true;
2693
2694 if ((!(mode & 0x0180)) &&
2695 (priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10) &&
2696 (priv->phy_num != 0)) {
2697 priv->phy_num = 0;
2698 data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN
2699 | TLAN_NET_CFG_PHY_EN;
2700 tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, data);
2701 tlan_set_timer(dev, (400*HZ/1000), TLAN_TIMER_PHY_PDOWN);
2702 return;
2703 }
2704
2705 if (priv->phy_num == 0) {
2706 if ((priv->duplex == TLAN_DUPLEX_FULL) ||
2707 (an_adv & an_lpa & 0x0040)) {
2708 tlan_mii_write_reg(dev, phy, MII_GEN_CTL,
2709 MII_GC_AUTOENB | MII_GC_DUPLEX);
2710 netdev_info(dev, "Starting internal PHY with FULL-DUPLEX\n");
2711 } else {
2712 tlan_mii_write_reg(dev, phy, MII_GEN_CTL,
2713 MII_GC_AUTOENB);
2714 netdev_info(dev, "Starting internal PHY with HALF-DUPLEX\n");
2715 }
2716 }
2717
2718
2719
2720 tlan_set_timer(dev, (HZ/10), TLAN_TIMER_FINISH_RESET);
2721
2722}
2723
2724#ifdef MONITOR
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744void tlan_phy_monitor(struct net_device *dev)
2745{
2746 struct tlan_priv *priv = netdev_priv(dev);
2747 u16 phy;
2748 u16 phy_status;
2749
2750 phy = priv->phy[priv->phy_num];
2751
2752
2753 tlan_mii_read_reg(dev, phy, MII_GEN_STS, &phy_status);
2754
2755
2756 if (!(phy_status & MII_GS_LINK)) {
2757 if (priv->link) {
2758 priv->link = 0;
2759 printk(KERN_DEBUG "TLAN: %s has lost link\n",
2760 dev->name);
2761 netif_carrier_off(dev);
2762 tlan_set_timer(dev, (2*HZ), TLAN_TIMER_LINK_BEAT);
2763 return;
2764 }
2765 }
2766
2767
2768 if ((phy_status & MII_GS_LINK) && !priv->link) {
2769 priv->link = 1;
2770 printk(KERN_DEBUG "TLAN: %s has reestablished link\n",
2771 dev->name);
2772 netif_carrier_on(dev);
2773 }
2774
2775
2776 tlan_set_timer(dev, (2*HZ), TLAN_TIMER_LINK_BEAT);
2777}
2778
2779#endif
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818static bool
2819tlan_mii_read_reg(struct net_device *dev, u16 phy, u16 reg, u16 *val)
2820{
2821 u8 nack;
2822 u16 sio, tmp;
2823 u32 i;
2824 bool err;
2825 int minten;
2826 struct tlan_priv *priv = netdev_priv(dev);
2827 unsigned long flags = 0;
2828
2829 err = false;
2830 outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR);
2831 sio = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
2832
2833 if (!in_irq())
2834 spin_lock_irqsave(&priv->lock, flags);
2835
2836 tlan_mii_sync(dev->base_addr);
2837
2838 minten = tlan_get_bit(TLAN_NET_SIO_MINTEN, sio);
2839 if (minten)
2840 tlan_clear_bit(TLAN_NET_SIO_MINTEN, sio);
2841
2842 tlan_mii_send_data(dev->base_addr, 0x1, 2);
2843 tlan_mii_send_data(dev->base_addr, 0x2, 2);
2844 tlan_mii_send_data(dev->base_addr, phy, 5);
2845 tlan_mii_send_data(dev->base_addr, reg, 5);
2846
2847
2848 tlan_clear_bit(TLAN_NET_SIO_MTXEN, sio);
2849
2850 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
2851 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
2852 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
2853
2854 nack = tlan_get_bit(TLAN_NET_SIO_MDATA, sio);
2855 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
2856 if (nack) {
2857 for (i = 0; i < 16; i++) {
2858 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
2859 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
2860 }
2861 tmp = 0xffff;
2862 err = true;
2863 } else {
2864 for (tmp = 0, i = 0x8000; i; i >>= 1) {
2865 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
2866 if (tlan_get_bit(TLAN_NET_SIO_MDATA, sio))
2867 tmp |= i;
2868 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
2869 }
2870 }
2871
2872
2873 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
2874 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
2875
2876 if (minten)
2877 tlan_set_bit(TLAN_NET_SIO_MINTEN, sio);
2878
2879 *val = tmp;
2880
2881 if (!in_irq())
2882 spin_unlock_irqrestore(&priv->lock, flags);
2883
2884 return err;
2885
2886}
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909static void tlan_mii_send_data(u16 base_port, u32 data, unsigned num_bits)
2910{
2911 u16 sio;
2912 u32 i;
2913
2914 if (num_bits == 0)
2915 return;
2916
2917 outw(TLAN_NET_SIO, base_port + TLAN_DIO_ADR);
2918 sio = base_port + TLAN_DIO_DATA + TLAN_NET_SIO;
2919 tlan_set_bit(TLAN_NET_SIO_MTXEN, sio);
2920
2921 for (i = (0x1 << (num_bits - 1)); i; i >>= 1) {
2922 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
2923 (void) tlan_get_bit(TLAN_NET_SIO_MCLK, sio);
2924 if (data & i)
2925 tlan_set_bit(TLAN_NET_SIO_MDATA, sio);
2926 else
2927 tlan_clear_bit(TLAN_NET_SIO_MDATA, sio);
2928 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
2929 (void) tlan_get_bit(TLAN_NET_SIO_MCLK, sio);
2930 }
2931
2932}
2933
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951static void tlan_mii_sync(u16 base_port)
2952{
2953 int i;
2954 u16 sio;
2955
2956 outw(TLAN_NET_SIO, base_port + TLAN_DIO_ADR);
2957 sio = base_port + TLAN_DIO_DATA + TLAN_NET_SIO;
2958
2959 tlan_clear_bit(TLAN_NET_SIO_MTXEN, sio);
2960 for (i = 0; i < 32; i++) {
2961 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
2962 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
2963 }
2964
2965}
2966
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990static void
2991tlan_mii_write_reg(struct net_device *dev, u16 phy, u16 reg, u16 val)
2992{
2993 u16 sio;
2994 int minten;
2995 unsigned long flags = 0;
2996 struct tlan_priv *priv = netdev_priv(dev);
2997
2998 outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR);
2999 sio = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
3000
3001 if (!in_irq())
3002 spin_lock_irqsave(&priv->lock, flags);
3003
3004 tlan_mii_sync(dev->base_addr);
3005
3006 minten = tlan_get_bit(TLAN_NET_SIO_MINTEN, sio);
3007 if (minten)
3008 tlan_clear_bit(TLAN_NET_SIO_MINTEN, sio);
3009
3010 tlan_mii_send_data(dev->base_addr, 0x1, 2);
3011 tlan_mii_send_data(dev->base_addr, 0x1, 2);
3012 tlan_mii_send_data(dev->base_addr, phy, 5);
3013 tlan_mii_send_data(dev->base_addr, reg, 5);
3014
3015 tlan_mii_send_data(dev->base_addr, 0x2, 2);
3016 tlan_mii_send_data(dev->base_addr, val, 16);
3017
3018 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
3019 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
3020
3021 if (minten)
3022 tlan_set_bit(TLAN_NET_SIO_MINTEN, sio);
3023
3024 if (!in_irq())
3025 spin_unlock_irqrestore(&priv->lock, flags);
3026
3027}
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041
3042
3043
3044
3045
3046
3047
3048
3049
3050
3051
3052
3053
3054
3055
3056
3057
3058
3059
3060
3061static void tlan_ee_send_start(u16 io_base)
3062{
3063 u16 sio;
3064
3065 outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR);
3066 sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
3067
3068 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3069 tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
3070 tlan_set_bit(TLAN_NET_SIO_ETXEN, sio);
3071 tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
3072 tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
3073
3074}
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090
3091
3092
3093
3094
3095
3096
3097
3098
3099
3100
3101static int tlan_ee_send_byte(u16 io_base, u8 data, int stop)
3102{
3103 int err;
3104 u8 place;
3105 u16 sio;
3106
3107 outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR);
3108 sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
3109
3110
3111 for (place = 0x80; place != 0; place >>= 1) {
3112 if (place & data)
3113 tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
3114 else
3115 tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
3116 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3117 tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
3118 }
3119 tlan_clear_bit(TLAN_NET_SIO_ETXEN, sio);
3120 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3121 err = tlan_get_bit(TLAN_NET_SIO_EDATA, sio);
3122 tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
3123 tlan_set_bit(TLAN_NET_SIO_ETXEN, sio);
3124
3125 if ((!err) && stop) {
3126
3127 tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
3128 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3129 tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
3130 }
3131
3132 return err;
3133
3134}
3135
3136
3137
3138
3139
3140
3141
3142
3143
3144
3145
3146
3147
3148
3149
3150
3151
3152
3153
3154
3155
3156
3157
3158
3159
3160
3161
3162
3163static void tlan_ee_receive_byte(u16 io_base, u8 *data, int stop)
3164{
3165 u8 place;
3166 u16 sio;
3167
3168 outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR);
3169 sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
3170 *data = 0;
3171
3172
3173 tlan_clear_bit(TLAN_NET_SIO_ETXEN, sio);
3174 for (place = 0x80; place; place >>= 1) {
3175 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3176 if (tlan_get_bit(TLAN_NET_SIO_EDATA, sio))
3177 *data |= place;
3178 tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
3179 }
3180
3181 tlan_set_bit(TLAN_NET_SIO_ETXEN, sio);
3182 if (!stop) {
3183 tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
3184 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3185 tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
3186 } else {
3187 tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
3188 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3189 tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
3190
3191 tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
3192 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3193 tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
3194 }
3195
3196}
3197
3198
3199
3200
3201
3202
3203
3204
3205
3206
3207
3208
3209
3210
3211
3212
3213
3214
3215
3216
3217
3218
3219
3220
3221
3222static int tlan_ee_read_byte(struct net_device *dev, u8 ee_addr, u8 *data)
3223{
3224 int err;
3225 struct tlan_priv *priv = netdev_priv(dev);
3226 unsigned long flags = 0;
3227 int ret = 0;
3228
3229 spin_lock_irqsave(&priv->lock, flags);
3230
3231 tlan_ee_send_start(dev->base_addr);
3232 err = tlan_ee_send_byte(dev->base_addr, 0xa0, TLAN_EEPROM_ACK);
3233 if (err) {
3234 ret = 1;
3235 goto fail;
3236 }
3237 err = tlan_ee_send_byte(dev->base_addr, ee_addr, TLAN_EEPROM_ACK);
3238 if (err) {
3239 ret = 2;
3240 goto fail;
3241 }
3242 tlan_ee_send_start(dev->base_addr);
3243 err = tlan_ee_send_byte(dev->base_addr, 0xa1, TLAN_EEPROM_ACK);
3244 if (err) {
3245 ret = 3;
3246 goto fail;
3247 }
3248 tlan_ee_receive_byte(dev->base_addr, data, TLAN_EEPROM_STOP);
3249fail:
3250 spin_unlock_irqrestore(&priv->lock, flags);
3251
3252 return ret;
3253
3254}
3255
3256
3257
3258