1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52#include <linux/module.h>
53#include <linux/moduleparam.h>
54#include <linux/kernel.h>
55#include <linux/sched.h>
56#include <linux/string.h>
57#include <linux/timer.h>
58#include <linux/errno.h>
59#include <linux/ioport.h>
60#include <linux/slab.h>
61#include <linux/interrupt.h>
62#include <linux/pci.h>
63#include <linux/netdevice.h>
64#include <linux/init.h>
65#include <linux/mii.h>
66#include <linux/etherdevice.h>
67#include <linux/skbuff.h>
68#include <linux/delay.h>
69#include <linux/ethtool.h>
70#include <linux/crc32.h>
71#include <linux/bitops.h>
72#include <linux/dma-mapping.h>
73
74#include <asm/processor.h>
75#include <asm/io.h>
76#include <asm/irq.h>
77#include <linux/uaccess.h>
78
79#include "sis900.h"
80
81#define SIS900_MODULE_NAME "sis900"
82#define SIS900_DRV_VERSION "v1.08.10 Apr. 2 2006"
83
84static const char version[] =
85 KERN_INFO "sis900.c: " SIS900_DRV_VERSION "\n";
86
87static int max_interrupt_work = 40;
88static int multicast_filter_limit = 128;
89
90static int sis900_debug = -1;
91
92#define SIS900_DEF_MSG \
93 (NETIF_MSG_DRV | \
94 NETIF_MSG_LINK | \
95 NETIF_MSG_RX_ERR | \
96 NETIF_MSG_TX_ERR)
97
98
99#define TX_TIMEOUT (4*HZ)
100
101enum {
102 SIS_900 = 0,
103 SIS_7016
104};
105static const char * card_names[] = {
106 "SiS 900 PCI Fast Ethernet",
107 "SiS 7016 PCI Fast Ethernet"
108};
109
110static const struct pci_device_id sis900_pci_tbl[] = {
111 {PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_900,
112 PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_900},
113 {PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_7016,
114 PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_7016},
115 {0,}
116};
117MODULE_DEVICE_TABLE (pci, sis900_pci_tbl);
118
119static void sis900_read_mode(struct net_device *net_dev, int *speed, int *duplex);
120
121static const struct mii_chip_info {
122 const char * name;
123 u16 phy_id0;
124 u16 phy_id1;
125 u8 phy_types;
126#define HOME 0x0001
127#define LAN 0x0002
128#define MIX 0x0003
129#define UNKNOWN 0x0
130} mii_chip_table[] = {
131 { "SiS 900 Internal MII PHY", 0x001d, 0x8000, LAN },
132 { "SiS 7014 Physical Layer Solution", 0x0016, 0xf830, LAN },
133 { "SiS 900 on Foxconn 661 7MI", 0x0143, 0xBC70, LAN },
134 { "Altimata AC101LF PHY", 0x0022, 0x5520, LAN },
135 { "ADM 7001 LAN PHY", 0x002e, 0xcc60, LAN },
136 { "AMD 79C901 10BASE-T PHY", 0x0000, 0x6B70, LAN },
137 { "AMD 79C901 HomePNA PHY", 0x0000, 0x6B90, HOME},
138 { "ICS LAN PHY", 0x0015, 0xF440, LAN },
139 { "ICS LAN PHY", 0x0143, 0xBC70, LAN },
140 { "NS 83851 PHY", 0x2000, 0x5C20, MIX },
141 { "NS 83847 PHY", 0x2000, 0x5C30, MIX },
142 { "Realtek RTL8201 PHY", 0x0000, 0x8200, LAN },
143 { "VIA 6103 PHY", 0x0101, 0x8f20, LAN },
144 {NULL,},
145};
146
147struct mii_phy {
148 struct mii_phy * next;
149 int phy_addr;
150 u16 phy_id0;
151 u16 phy_id1;
152 u16 status;
153 u8 phy_types;
154};
155
156typedef struct _BufferDesc {
157 u32 link;
158 u32 cmdsts;
159 u32 bufptr;
160} BufferDesc;
161
162struct sis900_private {
163 struct pci_dev * pci_dev;
164
165 spinlock_t lock;
166
167 struct mii_phy * mii;
168 struct mii_phy * first_mii;
169 unsigned int cur_phy;
170 struct mii_if_info mii_info;
171
172 void __iomem *ioaddr;
173
174 struct timer_list timer;
175 u8 autong_complete;
176
177 u32 msg_enable;
178
179 unsigned int cur_rx, dirty_rx;
180 unsigned int cur_tx, dirty_tx;
181
182
183 struct sk_buff *tx_skbuff[NUM_TX_DESC];
184 struct sk_buff *rx_skbuff[NUM_RX_DESC];
185 BufferDesc *tx_ring;
186 BufferDesc *rx_ring;
187
188 dma_addr_t tx_ring_dma;
189 dma_addr_t rx_ring_dma;
190
191 unsigned int tx_full;
192 u8 host_bridge_rev;
193 u8 chipset_rev;
194};
195
196MODULE_AUTHOR("Jim Huang <cmhuang@sis.com.tw>, Ollie Lho <ollie@sis.com.tw>");
197MODULE_DESCRIPTION("SiS 900 PCI Fast Ethernet driver");
198MODULE_LICENSE("GPL");
199
200module_param(multicast_filter_limit, int, 0444);
201module_param(max_interrupt_work, int, 0444);
202module_param(sis900_debug, int, 0444);
203MODULE_PARM_DESC(multicast_filter_limit, "SiS 900/7016 maximum number of filtered multicast addresses");
204MODULE_PARM_DESC(max_interrupt_work, "SiS 900/7016 maximum events handled per interrupt");
205MODULE_PARM_DESC(sis900_debug, "SiS 900/7016 bitmapped debugging message level");
206
207#define sw32(reg, val) iowrite32(val, ioaddr + (reg))
208#define sw8(reg, val) iowrite8(val, ioaddr + (reg))
209#define sr32(reg) ioread32(ioaddr + (reg))
210#define sr16(reg) ioread16(ioaddr + (reg))
211
212#ifdef CONFIG_NET_POLL_CONTROLLER
213static void sis900_poll(struct net_device *dev);
214#endif
215static int sis900_open(struct net_device *net_dev);
216static int sis900_mii_probe (struct net_device * net_dev);
217static void sis900_init_rxfilter (struct net_device * net_dev);
218static u16 read_eeprom(void __iomem *ioaddr, int location);
219static int mdio_read(struct net_device *net_dev, int phy_id, int location);
220static void mdio_write(struct net_device *net_dev, int phy_id, int location, int val);
221static void sis900_timer(struct timer_list *t);
222static void sis900_check_mode (struct net_device *net_dev, struct mii_phy *mii_phy);
223static void sis900_tx_timeout(struct net_device *net_dev, unsigned int txqueue);
224static void sis900_init_tx_ring(struct net_device *net_dev);
225static void sis900_init_rx_ring(struct net_device *net_dev);
226static netdev_tx_t sis900_start_xmit(struct sk_buff *skb,
227 struct net_device *net_dev);
228static int sis900_rx(struct net_device *net_dev);
229static void sis900_finish_xmit (struct net_device *net_dev);
230static irqreturn_t sis900_interrupt(int irq, void *dev_instance);
231static int sis900_close(struct net_device *net_dev);
232static int mii_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd);
233static u16 sis900_mcast_bitnr(u8 *addr, u8 revision);
234static void set_rx_mode(struct net_device *net_dev);
235static void sis900_reset(struct net_device *net_dev);
236static void sis630_set_eq(struct net_device *net_dev, u8 revision);
237static int sis900_set_config(struct net_device *dev, struct ifmap *map);
238static u16 sis900_default_phy(struct net_device * net_dev);
239static void sis900_set_capability( struct net_device *net_dev ,struct mii_phy *phy);
240static u16 sis900_reset_phy(struct net_device *net_dev, int phy_addr);
241static void sis900_auto_negotiate(struct net_device *net_dev, int phy_addr);
242static void sis900_set_mode(struct sis900_private *, int speed, int duplex);
243static const struct ethtool_ops sis900_ethtool_ops;
244
245
246
247
248
249
250
251
252
253
254static int sis900_get_mac_addr(struct pci_dev *pci_dev,
255 struct net_device *net_dev)
256{
257 struct sis900_private *sis_priv = netdev_priv(net_dev);
258 void __iomem *ioaddr = sis_priv->ioaddr;
259 u16 signature;
260 int i;
261
262
263 signature = (u16) read_eeprom(ioaddr, EEPROMSignature);
264 if (signature == 0xffff || signature == 0x0000) {
265 printk (KERN_WARNING "%s: Error EERPOM read %x\n",
266 pci_name(pci_dev), signature);
267 return 0;
268 }
269
270
271 for (i = 0; i < 3; i++)
272 ((u16 *)(net_dev->dev_addr))[i] = read_eeprom(ioaddr, i+EEPROMMACAddr);
273
274 return 1;
275}
276
277
278
279
280
281
282
283
284
285
286
287static int sis630e_get_mac_addr(struct pci_dev *pci_dev,
288 struct net_device *net_dev)
289{
290 struct pci_dev *isa_bridge = NULL;
291 u8 reg;
292 int i;
293
294 isa_bridge = pci_get_device(PCI_VENDOR_ID_SI, 0x0008, isa_bridge);
295 if (!isa_bridge)
296 isa_bridge = pci_get_device(PCI_VENDOR_ID_SI, 0x0018, isa_bridge);
297 if (!isa_bridge) {
298 printk(KERN_WARNING "%s: Can not find ISA bridge\n",
299 pci_name(pci_dev));
300 return 0;
301 }
302 pci_read_config_byte(isa_bridge, 0x48, ®);
303 pci_write_config_byte(isa_bridge, 0x48, reg | 0x40);
304
305 for (i = 0; i < 6; i++) {
306 outb(0x09 + i, 0x70);
307 ((u8 *)(net_dev->dev_addr))[i] = inb(0x71);
308 }
309
310 pci_write_config_byte(isa_bridge, 0x48, reg & ~0x40);
311 pci_dev_put(isa_bridge);
312
313 return 1;
314}
315
316
317
318
319
320
321
322
323
324
325
326
327static int sis635_get_mac_addr(struct pci_dev *pci_dev,
328 struct net_device *net_dev)
329{
330 struct sis900_private *sis_priv = netdev_priv(net_dev);
331 void __iomem *ioaddr = sis_priv->ioaddr;
332 u32 rfcrSave;
333 u32 i;
334
335 rfcrSave = sr32(rfcr);
336
337 sw32(cr, rfcrSave | RELOAD);
338 sw32(cr, 0);
339
340
341 sw32(rfcr, rfcrSave & ~RFEN);
342
343
344 for (i = 0 ; i < 3 ; i++) {
345 sw32(rfcr, (i << RFADDR_shift));
346 *( ((u16 *)net_dev->dev_addr) + i) = sr16(rfdr);
347 }
348
349
350 sw32(rfcr, rfcrSave | RFEN);
351
352 return 1;
353}
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371static int sis96x_get_mac_addr(struct pci_dev *pci_dev,
372 struct net_device *net_dev)
373{
374 struct sis900_private *sis_priv = netdev_priv(net_dev);
375 void __iomem *ioaddr = sis_priv->ioaddr;
376 int wait, rc = 0;
377
378 sw32(mear, EEREQ);
379 for (wait = 0; wait < 2000; wait++) {
380 if (sr32(mear) & EEGNT) {
381 u16 *mac = (u16 *)net_dev->dev_addr;
382 int i;
383
384
385 for (i = 0; i < 3; i++)
386 mac[i] = read_eeprom(ioaddr, i + EEPROMMACAddr);
387
388 rc = 1;
389 break;
390 }
391 udelay(1);
392 }
393 sw32(mear, EEDONE);
394 return rc;
395}
396
397static const struct net_device_ops sis900_netdev_ops = {
398 .ndo_open = sis900_open,
399 .ndo_stop = sis900_close,
400 .ndo_start_xmit = sis900_start_xmit,
401 .ndo_set_config = sis900_set_config,
402 .ndo_set_rx_mode = set_rx_mode,
403 .ndo_validate_addr = eth_validate_addr,
404 .ndo_set_mac_address = eth_mac_addr,
405 .ndo_do_ioctl = mii_ioctl,
406 .ndo_tx_timeout = sis900_tx_timeout,
407#ifdef CONFIG_NET_POLL_CONTROLLER
408 .ndo_poll_controller = sis900_poll,
409#endif
410};
411
412
413
414
415
416
417
418
419
420
421
422
423static int sis900_probe(struct pci_dev *pci_dev,
424 const struct pci_device_id *pci_id)
425{
426 struct sis900_private *sis_priv;
427 struct net_device *net_dev;
428 struct pci_dev *dev;
429 dma_addr_t ring_dma;
430 void *ring_space;
431 void __iomem *ioaddr;
432 int i, ret;
433 const char *card_name = card_names[pci_id->driver_data];
434 const char *dev_name = pci_name(pci_dev);
435
436
437#ifndef MODULE
438 static int printed_version;
439 if (!printed_version++)
440 printk(version);
441#endif
442
443
444 ret = pci_enable_device(pci_dev);
445 if(ret) return ret;
446
447 i = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32));
448 if(i){
449 printk(KERN_ERR "sis900.c: architecture does not support "
450 "32bit PCI busmaster DMA\n");
451 return i;
452 }
453
454 pci_set_master(pci_dev);
455
456 net_dev = alloc_etherdev(sizeof(struct sis900_private));
457 if (!net_dev)
458 return -ENOMEM;
459 SET_NETDEV_DEV(net_dev, &pci_dev->dev);
460
461
462 ret = pci_request_regions(pci_dev, "sis900");
463 if (ret)
464 goto err_out;
465
466
467 ioaddr = pci_iomap(pci_dev, 0, 0);
468 if (!ioaddr) {
469 ret = -ENOMEM;
470 goto err_out_cleardev;
471 }
472
473 sis_priv = netdev_priv(net_dev);
474 sis_priv->ioaddr = ioaddr;
475 sis_priv->pci_dev = pci_dev;
476 spin_lock_init(&sis_priv->lock);
477
478 pci_set_drvdata(pci_dev, net_dev);
479
480 ring_space = pci_alloc_consistent(pci_dev, TX_TOTAL_SIZE, &ring_dma);
481 if (!ring_space) {
482 ret = -ENOMEM;
483 goto err_out_unmap;
484 }
485 sis_priv->tx_ring = ring_space;
486 sis_priv->tx_ring_dma = ring_dma;
487
488 ring_space = pci_alloc_consistent(pci_dev, RX_TOTAL_SIZE, &ring_dma);
489 if (!ring_space) {
490 ret = -ENOMEM;
491 goto err_unmap_tx;
492 }
493 sis_priv->rx_ring = ring_space;
494 sis_priv->rx_ring_dma = ring_dma;
495
496
497 net_dev->netdev_ops = &sis900_netdev_ops;
498 net_dev->watchdog_timeo = TX_TIMEOUT;
499 net_dev->ethtool_ops = &sis900_ethtool_ops;
500
501 if (sis900_debug > 0)
502 sis_priv->msg_enable = sis900_debug;
503 else
504 sis_priv->msg_enable = SIS900_DEF_MSG;
505
506 sis_priv->mii_info.dev = net_dev;
507 sis_priv->mii_info.mdio_read = mdio_read;
508 sis_priv->mii_info.mdio_write = mdio_write;
509 sis_priv->mii_info.phy_id_mask = 0x1f;
510 sis_priv->mii_info.reg_num_mask = 0x1f;
511
512
513 sis_priv->chipset_rev = pci_dev->revision;
514 if(netif_msg_probe(sis_priv))
515 printk(KERN_DEBUG "%s: detected revision %2.2x, "
516 "trying to get MAC address...\n",
517 dev_name, sis_priv->chipset_rev);
518
519 ret = 0;
520 if (sis_priv->chipset_rev == SIS630E_900_REV)
521 ret = sis630e_get_mac_addr(pci_dev, net_dev);
522 else if ((sis_priv->chipset_rev > 0x81) && (sis_priv->chipset_rev <= 0x90) )
523 ret = sis635_get_mac_addr(pci_dev, net_dev);
524 else if (sis_priv->chipset_rev == SIS96x_900_REV)
525 ret = sis96x_get_mac_addr(pci_dev, net_dev);
526 else
527 ret = sis900_get_mac_addr(pci_dev, net_dev);
528
529 if (!ret || !is_valid_ether_addr(net_dev->dev_addr)) {
530 eth_hw_addr_random(net_dev);
531 printk(KERN_WARNING "%s: Unreadable or invalid MAC address,"
532 "using random generated one\n", dev_name);
533 }
534
535
536 if (sis_priv->chipset_rev == SIS630ET_900_REV)
537 sw32(cr, ACCESSMODE | sr32(cr));
538
539
540 if (sis900_mii_probe(net_dev) == 0) {
541 printk(KERN_WARNING "%s: Error probing MII device.\n",
542 dev_name);
543 ret = -ENODEV;
544 goto err_unmap_rx;
545 }
546
547
548 dev = pci_get_device(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_630, NULL);
549 if (dev) {
550 sis_priv->host_bridge_rev = dev->revision;
551 pci_dev_put(dev);
552 }
553
554 ret = register_netdev(net_dev);
555 if (ret)
556 goto err_unmap_rx;
557
558
559 printk(KERN_INFO "%s: %s at 0x%p, IRQ %d, %pM\n",
560 net_dev->name, card_name, ioaddr, pci_dev->irq,
561 net_dev->dev_addr);
562
563
564 ret = (sr32(CFGPMC) & PMESP) >> 27;
565 if (netif_msg_probe(sis_priv) && (ret & PME_D3C) == 0)
566 printk(KERN_INFO "%s: Wake on LAN only available from suspend to RAM.", net_dev->name);
567
568 return 0;
569
570err_unmap_rx:
571 pci_free_consistent(pci_dev, RX_TOTAL_SIZE, sis_priv->rx_ring,
572 sis_priv->rx_ring_dma);
573err_unmap_tx:
574 pci_free_consistent(pci_dev, TX_TOTAL_SIZE, sis_priv->tx_ring,
575 sis_priv->tx_ring_dma);
576err_out_unmap:
577 pci_iounmap(pci_dev, ioaddr);
578err_out_cleardev:
579 pci_release_regions(pci_dev);
580 err_out:
581 free_netdev(net_dev);
582 return ret;
583}
584
585
586
587
588
589
590
591
592
593
594static int sis900_mii_probe(struct net_device *net_dev)
595{
596 struct sis900_private *sis_priv = netdev_priv(net_dev);
597 const char *dev_name = pci_name(sis_priv->pci_dev);
598 u16 poll_bit = MII_STAT_LINK, status = 0;
599 unsigned long timeout = jiffies + 5 * HZ;
600 int phy_addr;
601
602 sis_priv->mii = NULL;
603
604
605 for (phy_addr = 0; phy_addr < 32; phy_addr++) {
606 struct mii_phy * mii_phy = NULL;
607 u16 mii_status;
608 int i;
609
610 mii_phy = NULL;
611 for(i = 0; i < 2; i++)
612 mii_status = mdio_read(net_dev, phy_addr, MII_STATUS);
613
614 if (mii_status == 0xffff || mii_status == 0x0000) {
615 if (netif_msg_probe(sis_priv))
616 printk(KERN_DEBUG "%s: MII at address %d"
617 " not accessible\n",
618 dev_name, phy_addr);
619 continue;
620 }
621
622 if ((mii_phy = kmalloc(sizeof(struct mii_phy), GFP_KERNEL)) == NULL) {
623 mii_phy = sis_priv->first_mii;
624 while (mii_phy) {
625 struct mii_phy *phy;
626 phy = mii_phy;
627 mii_phy = mii_phy->next;
628 kfree(phy);
629 }
630 return 0;
631 }
632
633 mii_phy->phy_id0 = mdio_read(net_dev, phy_addr, MII_PHY_ID0);
634 mii_phy->phy_id1 = mdio_read(net_dev, phy_addr, MII_PHY_ID1);
635 mii_phy->phy_addr = phy_addr;
636 mii_phy->status = mii_status;
637 mii_phy->next = sis_priv->mii;
638 sis_priv->mii = mii_phy;
639 sis_priv->first_mii = mii_phy;
640
641 for (i = 0; mii_chip_table[i].phy_id1; i++)
642 if ((mii_phy->phy_id0 == mii_chip_table[i].phy_id0 ) &&
643 ((mii_phy->phy_id1 & 0xFFF0) == mii_chip_table[i].phy_id1)){
644 mii_phy->phy_types = mii_chip_table[i].phy_types;
645 if (mii_chip_table[i].phy_types == MIX)
646 mii_phy->phy_types =
647 (mii_status & (MII_STAT_CAN_TX_FDX | MII_STAT_CAN_TX)) ? LAN : HOME;
648 printk(KERN_INFO "%s: %s transceiver found "
649 "at address %d.\n",
650 dev_name,
651 mii_chip_table[i].name,
652 phy_addr);
653 break;
654 }
655
656 if( !mii_chip_table[i].phy_id1 ) {
657 printk(KERN_INFO "%s: Unknown PHY transceiver found at address %d.\n",
658 dev_name, phy_addr);
659 mii_phy->phy_types = UNKNOWN;
660 }
661 }
662
663 if (sis_priv->mii == NULL) {
664 printk(KERN_INFO "%s: No MII transceivers found!\n", dev_name);
665 return 0;
666 }
667
668
669 sis_priv->mii = NULL;
670 sis900_default_phy( net_dev );
671
672
673 if ((sis_priv->mii->phy_id0 == 0x001D) &&
674 ((sis_priv->mii->phy_id1&0xFFF0) == 0x8000))
675 status = sis900_reset_phy(net_dev, sis_priv->cur_phy);
676
677
678 if ((sis_priv->mii->phy_id0 == 0x0015) &&
679 ((sis_priv->mii->phy_id1&0xFFF0) == 0xF440))
680 mdio_write(net_dev, sis_priv->cur_phy, 0x0018, 0xD200);
681
682 if(status & MII_STAT_LINK){
683 while (poll_bit) {
684 yield();
685
686 poll_bit ^= (mdio_read(net_dev, sis_priv->cur_phy, MII_STATUS) & poll_bit);
687 if (time_after_eq(jiffies, timeout)) {
688 printk(KERN_WARNING "%s: reset phy and link down now\n",
689 dev_name);
690 return -ETIME;
691 }
692 }
693 }
694
695 if (sis_priv->chipset_rev == SIS630E_900_REV) {
696
697 mdio_write(net_dev, sis_priv->cur_phy, MII_ANADV, 0x05e1);
698 mdio_write(net_dev, sis_priv->cur_phy, MII_CONFIG1, 0x22);
699 mdio_write(net_dev, sis_priv->cur_phy, MII_CONFIG2, 0xff00);
700 mdio_write(net_dev, sis_priv->cur_phy, MII_MASK, 0xffc0);
701
702 }
703
704 if (sis_priv->mii->status & MII_STAT_LINK)
705 netif_carrier_on(net_dev);
706 else
707 netif_carrier_off(net_dev);
708
709 return 1;
710}
711
712
713
714
715
716
717
718
719
720
721static u16 sis900_default_phy(struct net_device * net_dev)
722{
723 struct sis900_private *sis_priv = netdev_priv(net_dev);
724 struct mii_phy *phy = NULL, *phy_home = NULL,
725 *default_phy = NULL, *phy_lan = NULL;
726 u16 status;
727
728 for (phy=sis_priv->first_mii; phy; phy=phy->next) {
729 status = mdio_read(net_dev, phy->phy_addr, MII_STATUS);
730 status = mdio_read(net_dev, phy->phy_addr, MII_STATUS);
731
732
733 if ((status & MII_STAT_LINK) && !default_phy &&
734 (phy->phy_types != UNKNOWN))
735 default_phy = phy;
736 else {
737 status = mdio_read(net_dev, phy->phy_addr, MII_CONTROL);
738 mdio_write(net_dev, phy->phy_addr, MII_CONTROL,
739 status | MII_CNTL_AUTO | MII_CNTL_ISOLATE);
740 if (phy->phy_types == HOME)
741 phy_home = phy;
742 else if(phy->phy_types == LAN)
743 phy_lan = phy;
744 }
745 }
746
747 if (!default_phy && phy_home)
748 default_phy = phy_home;
749 else if (!default_phy && phy_lan)
750 default_phy = phy_lan;
751 else if (!default_phy)
752 default_phy = sis_priv->first_mii;
753
754 if (sis_priv->mii != default_phy) {
755 sis_priv->mii = default_phy;
756 sis_priv->cur_phy = default_phy->phy_addr;
757 printk(KERN_INFO "%s: Using transceiver found at address %d as default\n",
758 pci_name(sis_priv->pci_dev), sis_priv->cur_phy);
759 }
760
761 sis_priv->mii_info.phy_id = sis_priv->cur_phy;
762
763 status = mdio_read(net_dev, sis_priv->cur_phy, MII_CONTROL);
764 status &= (~MII_CNTL_ISOLATE);
765
766 mdio_write(net_dev, sis_priv->cur_phy, MII_CONTROL, status);
767 status = mdio_read(net_dev, sis_priv->cur_phy, MII_STATUS);
768 status = mdio_read(net_dev, sis_priv->cur_phy, MII_STATUS);
769
770 return status;
771}
772
773
774
775
776
777
778
779
780
781
782
783static void sis900_set_capability(struct net_device *net_dev, struct mii_phy *phy)
784{
785 u16 cap;
786 u16 status;
787
788 status = mdio_read(net_dev, phy->phy_addr, MII_STATUS);
789 status = mdio_read(net_dev, phy->phy_addr, MII_STATUS);
790
791 cap = MII_NWAY_CSMA_CD |
792 ((phy->status & MII_STAT_CAN_TX_FDX)? MII_NWAY_TX_FDX:0) |
793 ((phy->status & MII_STAT_CAN_TX) ? MII_NWAY_TX:0) |
794 ((phy->status & MII_STAT_CAN_T_FDX) ? MII_NWAY_T_FDX:0)|
795 ((phy->status & MII_STAT_CAN_T) ? MII_NWAY_T:0);
796
797 mdio_write(net_dev, phy->phy_addr, MII_ANADV, cap);
798}
799
800
801
802#define eeprom_delay() sr32(mear)
803
804
805
806
807
808
809
810
811
812
813static u16 read_eeprom(void __iomem *ioaddr, int location)
814{
815 u32 read_cmd = location | EEread;
816 int i;
817 u16 retval = 0;
818
819 sw32(mear, 0);
820 eeprom_delay();
821 sw32(mear, EECS);
822 eeprom_delay();
823
824
825 for (i = 8; i >= 0; i--) {
826 u32 dataval = (read_cmd & (1 << i)) ? EEDI | EECS : EECS;
827
828 sw32(mear, dataval);
829 eeprom_delay();
830 sw32(mear, dataval | EECLK);
831 eeprom_delay();
832 }
833 sw32(mear, EECS);
834 eeprom_delay();
835
836
837 for (i = 16; i > 0; i--) {
838 sw32(mear, EECS);
839 eeprom_delay();
840 sw32(mear, EECS | EECLK);
841 eeprom_delay();
842 retval = (retval << 1) | ((sr32(mear) & EEDO) ? 1 : 0);
843 eeprom_delay();
844 }
845
846
847 sw32(mear, 0);
848 eeprom_delay();
849
850 return retval;
851}
852
853
854
855
856#define mdio_delay() sr32(mear)
857
858static void mdio_idle(struct sis900_private *sp)
859{
860 void __iomem *ioaddr = sp->ioaddr;
861
862 sw32(mear, MDIO | MDDIR);
863 mdio_delay();
864 sw32(mear, MDIO | MDDIR | MDC);
865}
866
867
868static void mdio_reset(struct sis900_private *sp)
869{
870 void __iomem *ioaddr = sp->ioaddr;
871 int i;
872
873 for (i = 31; i >= 0; i--) {
874 sw32(mear, MDDIR | MDIO);
875 mdio_delay();
876 sw32(mear, MDDIR | MDIO | MDC);
877 mdio_delay();
878 }
879}
880
881
882
883
884
885
886
887
888
889
890
891
892static int mdio_read(struct net_device *net_dev, int phy_id, int location)
893{
894 int mii_cmd = MIIread|(phy_id<<MIIpmdShift)|(location<<MIIregShift);
895 struct sis900_private *sp = netdev_priv(net_dev);
896 void __iomem *ioaddr = sp->ioaddr;
897 u16 retval = 0;
898 int i;
899
900 mdio_reset(sp);
901 mdio_idle(sp);
902
903 for (i = 15; i >= 0; i--) {
904 int dataval = (mii_cmd & (1 << i)) ? MDDIR | MDIO : MDDIR;
905
906 sw32(mear, dataval);
907 mdio_delay();
908 sw32(mear, dataval | MDC);
909 mdio_delay();
910 }
911
912
913 for (i = 16; i > 0; i--) {
914 sw32(mear, 0);
915 mdio_delay();
916 retval = (retval << 1) | ((sr32(mear) & MDIO) ? 1 : 0);
917 sw32(mear, MDC);
918 mdio_delay();
919 }
920 sw32(mear, 0x00);
921
922 return retval;
923}
924
925
926
927
928
929
930
931
932
933
934
935
936
937static void mdio_write(struct net_device *net_dev, int phy_id, int location,
938 int value)
939{
940 int mii_cmd = MIIwrite|(phy_id<<MIIpmdShift)|(location<<MIIregShift);
941 struct sis900_private *sp = netdev_priv(net_dev);
942 void __iomem *ioaddr = sp->ioaddr;
943 int i;
944
945 mdio_reset(sp);
946 mdio_idle(sp);
947
948
949 for (i = 15; i >= 0; i--) {
950 int dataval = (mii_cmd & (1 << i)) ? MDDIR | MDIO : MDDIR;
951
952 sw8(mear, dataval);
953 mdio_delay();
954 sw8(mear, dataval | MDC);
955 mdio_delay();
956 }
957 mdio_delay();
958
959
960 for (i = 15; i >= 0; i--) {
961 int dataval = (value & (1 << i)) ? MDDIR | MDIO : MDDIR;
962
963 sw32(mear, dataval);
964 mdio_delay();
965 sw32(mear, dataval | MDC);
966 mdio_delay();
967 }
968 mdio_delay();
969
970
971 for (i = 2; i > 0; i--) {
972 sw8(mear, 0);
973 mdio_delay();
974 sw8(mear, MDC);
975 mdio_delay();
976 }
977 sw32(mear, 0x00);
978}
979
980
981
982
983
984
985
986
987
988
989
990
991static u16 sis900_reset_phy(struct net_device *net_dev, int phy_addr)
992{
993 int i;
994 u16 status;
995
996 for (i = 0; i < 2; i++)
997 status = mdio_read(net_dev, phy_addr, MII_STATUS);
998
999 mdio_write( net_dev, phy_addr, MII_CONTROL, MII_CNTL_RESET );
1000
1001 return status;
1002}
1003
1004#ifdef CONFIG_NET_POLL_CONTROLLER
1005
1006
1007
1008
1009
1010static void sis900_poll(struct net_device *dev)
1011{
1012 struct sis900_private *sp = netdev_priv(dev);
1013 const int irq = sp->pci_dev->irq;
1014
1015 disable_irq(irq);
1016 sis900_interrupt(irq, dev);
1017 enable_irq(irq);
1018}
1019#endif
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029static int
1030sis900_open(struct net_device *net_dev)
1031{
1032 struct sis900_private *sis_priv = netdev_priv(net_dev);
1033 void __iomem *ioaddr = sis_priv->ioaddr;
1034 int ret;
1035
1036
1037 sis900_reset(net_dev);
1038
1039
1040 sis630_set_eq(net_dev, sis_priv->chipset_rev);
1041
1042 ret = request_irq(sis_priv->pci_dev->irq, sis900_interrupt, IRQF_SHARED,
1043 net_dev->name, net_dev);
1044 if (ret)
1045 return ret;
1046
1047 sis900_init_rxfilter(net_dev);
1048
1049 sis900_init_tx_ring(net_dev);
1050 sis900_init_rx_ring(net_dev);
1051
1052 set_rx_mode(net_dev);
1053
1054 netif_start_queue(net_dev);
1055
1056
1057 sis900_set_mode(sis_priv, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED);
1058
1059
1060 sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE);
1061 sw32(cr, RxENA | sr32(cr));
1062 sw32(ier, IE);
1063
1064 sis900_check_mode(net_dev, sis_priv->mii);
1065
1066
1067
1068 timer_setup(&sis_priv->timer, sis900_timer, 0);
1069 sis_priv->timer.expires = jiffies + HZ;
1070 add_timer(&sis_priv->timer);
1071
1072 return 0;
1073}
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083static void
1084sis900_init_rxfilter (struct net_device * net_dev)
1085{
1086 struct sis900_private *sis_priv = netdev_priv(net_dev);
1087 void __iomem *ioaddr = sis_priv->ioaddr;
1088 u32 rfcrSave;
1089 u32 i;
1090
1091 rfcrSave = sr32(rfcr);
1092
1093
1094 sw32(rfcr, rfcrSave & ~RFEN);
1095
1096
1097 for (i = 0 ; i < 3 ; i++) {
1098 u32 w = (u32) *((u16 *)(net_dev->dev_addr)+i);
1099
1100 sw32(rfcr, i << RFADDR_shift);
1101 sw32(rfdr, w);
1102
1103 if (netif_msg_hw(sis_priv)) {
1104 printk(KERN_DEBUG "%s: Receive Filter Addrss[%d]=%x\n",
1105 net_dev->name, i, sr32(rfdr));
1106 }
1107 }
1108
1109
1110 sw32(rfcr, rfcrSave | RFEN);
1111}
1112
1113
1114
1115
1116
1117
1118
1119
1120static void
1121sis900_init_tx_ring(struct net_device *net_dev)
1122{
1123 struct sis900_private *sis_priv = netdev_priv(net_dev);
1124 void __iomem *ioaddr = sis_priv->ioaddr;
1125 int i;
1126
1127 sis_priv->tx_full = 0;
1128 sis_priv->dirty_tx = sis_priv->cur_tx = 0;
1129
1130 for (i = 0; i < NUM_TX_DESC; i++) {
1131 sis_priv->tx_skbuff[i] = NULL;
1132
1133 sis_priv->tx_ring[i].link = sis_priv->tx_ring_dma +
1134 ((i+1)%NUM_TX_DESC)*sizeof(BufferDesc);
1135 sis_priv->tx_ring[i].cmdsts = 0;
1136 sis_priv->tx_ring[i].bufptr = 0;
1137 }
1138
1139
1140 sw32(txdp, sis_priv->tx_ring_dma);
1141 if (netif_msg_hw(sis_priv))
1142 printk(KERN_DEBUG "%s: TX descriptor register loaded with: %8.8x\n",
1143 net_dev->name, sr32(txdp));
1144}
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154static void
1155sis900_init_rx_ring(struct net_device *net_dev)
1156{
1157 struct sis900_private *sis_priv = netdev_priv(net_dev);
1158 void __iomem *ioaddr = sis_priv->ioaddr;
1159 int i;
1160
1161 sis_priv->cur_rx = 0;
1162 sis_priv->dirty_rx = 0;
1163
1164
1165 for (i = 0; i < NUM_RX_DESC; i++) {
1166 sis_priv->rx_skbuff[i] = NULL;
1167
1168 sis_priv->rx_ring[i].link = sis_priv->rx_ring_dma +
1169 ((i+1)%NUM_RX_DESC)*sizeof(BufferDesc);
1170 sis_priv->rx_ring[i].cmdsts = 0;
1171 sis_priv->rx_ring[i].bufptr = 0;
1172 }
1173
1174
1175 for (i = 0; i < NUM_RX_DESC; i++) {
1176 struct sk_buff *skb;
1177
1178 if ((skb = netdev_alloc_skb(net_dev, RX_BUF_SIZE)) == NULL) {
1179
1180
1181
1182
1183 break;
1184 }
1185 sis_priv->rx_skbuff[i] = skb;
1186 sis_priv->rx_ring[i].cmdsts = RX_BUF_SIZE;
1187 sis_priv->rx_ring[i].bufptr = pci_map_single(sis_priv->pci_dev,
1188 skb->data, RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
1189 if (unlikely(pci_dma_mapping_error(sis_priv->pci_dev,
1190 sis_priv->rx_ring[i].bufptr))) {
1191 dev_kfree_skb(skb);
1192 sis_priv->rx_skbuff[i] = NULL;
1193 break;
1194 }
1195 }
1196 sis_priv->dirty_rx = (unsigned int) (i - NUM_RX_DESC);
1197
1198
1199 sw32(rxdp, sis_priv->rx_ring_dma);
1200 if (netif_msg_hw(sis_priv))
1201 printk(KERN_DEBUG "%s: RX descriptor register loaded with: %8.8x\n",
1202 net_dev->name, sr32(rxdp));
1203}
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232static void sis630_set_eq(struct net_device *net_dev, u8 revision)
1233{
1234 struct sis900_private *sis_priv = netdev_priv(net_dev);
1235 u16 reg14h, eq_value=0, max_value=0, min_value=0;
1236 int i, maxcount=10;
1237
1238 if ( !(revision == SIS630E_900_REV || revision == SIS630EA1_900_REV ||
1239 revision == SIS630A_900_REV || revision == SIS630ET_900_REV) )
1240 return;
1241
1242 if (netif_carrier_ok(net_dev)) {
1243 reg14h = mdio_read(net_dev, sis_priv->cur_phy, MII_RESV);
1244 mdio_write(net_dev, sis_priv->cur_phy, MII_RESV,
1245 (0x2200 | reg14h) & 0xBFFF);
1246 for (i=0; i < maxcount; i++) {
1247 eq_value = (0x00F8 & mdio_read(net_dev,
1248 sis_priv->cur_phy, MII_RESV)) >> 3;
1249 if (i == 0)
1250 max_value=min_value=eq_value;
1251 max_value = (eq_value > max_value) ?
1252 eq_value : max_value;
1253 min_value = (eq_value < min_value) ?
1254 eq_value : min_value;
1255 }
1256
1257 if (revision == SIS630E_900_REV || revision == SIS630EA1_900_REV ||
1258 revision == SIS630ET_900_REV) {
1259 if (max_value < 5)
1260 eq_value = max_value;
1261 else if (max_value >= 5 && max_value < 15)
1262 eq_value = (max_value == min_value) ?
1263 max_value+2 : max_value+1;
1264 else if (max_value >= 15)
1265 eq_value=(max_value == min_value) ?
1266 max_value+6 : max_value+5;
1267 }
1268
1269 if (revision == SIS630A_900_REV &&
1270 (sis_priv->host_bridge_rev == SIS630B0 ||
1271 sis_priv->host_bridge_rev == SIS630B1)) {
1272 if (max_value == 0)
1273 eq_value = 3;
1274 else
1275 eq_value = (max_value + min_value + 1)/2;
1276 }
1277
1278 reg14h = mdio_read(net_dev, sis_priv->cur_phy, MII_RESV);
1279 reg14h = (reg14h & 0xFF07) | ((eq_value << 3) & 0x00F8);
1280 reg14h = (reg14h | 0x6000) & 0xFDFF;
1281 mdio_write(net_dev, sis_priv->cur_phy, MII_RESV, reg14h);
1282 } else {
1283 reg14h = mdio_read(net_dev, sis_priv->cur_phy, MII_RESV);
1284 if (revision == SIS630A_900_REV &&
1285 (sis_priv->host_bridge_rev == SIS630B0 ||
1286 sis_priv->host_bridge_rev == SIS630B1))
1287 mdio_write(net_dev, sis_priv->cur_phy, MII_RESV,
1288 (reg14h | 0x2200) & 0xBFFF);
1289 else
1290 mdio_write(net_dev, sis_priv->cur_phy, MII_RESV,
1291 (reg14h | 0x2000) & 0xBFFF);
1292 }
1293}
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303static void sis900_timer(struct timer_list *t)
1304{
1305 struct sis900_private *sis_priv = from_timer(sis_priv, t, timer);
1306 struct net_device *net_dev = sis_priv->mii_info.dev;
1307 struct mii_phy *mii_phy = sis_priv->mii;
1308 static const int next_tick = 5*HZ;
1309 int speed = 0, duplex = 0;
1310 u16 status;
1311
1312 status = mdio_read(net_dev, sis_priv->cur_phy, MII_STATUS);
1313 status = mdio_read(net_dev, sis_priv->cur_phy, MII_STATUS);
1314
1315
1316 if (!netif_carrier_ok(net_dev)) {
1317 LookForLink:
1318
1319 status = sis900_default_phy(net_dev);
1320 mii_phy = sis_priv->mii;
1321
1322 if (status & MII_STAT_LINK) {
1323 WARN_ON(!(status & MII_STAT_AUTO_DONE));
1324
1325 sis900_read_mode(net_dev, &speed, &duplex);
1326 if (duplex) {
1327 sis900_set_mode(sis_priv, speed, duplex);
1328 sis630_set_eq(net_dev, sis_priv->chipset_rev);
1329 netif_carrier_on(net_dev);
1330 }
1331 }
1332 } else {
1333
1334 if (!(status & MII_STAT_LINK)){
1335 netif_carrier_off(net_dev);
1336 if(netif_msg_link(sis_priv))
1337 printk(KERN_INFO "%s: Media Link Off\n", net_dev->name);
1338
1339
1340 if ((mii_phy->phy_id0 == 0x001D) &&
1341 ((mii_phy->phy_id1 & 0xFFF0) == 0x8000))
1342 sis900_reset_phy(net_dev, sis_priv->cur_phy);
1343
1344 sis630_set_eq(net_dev, sis_priv->chipset_rev);
1345
1346 goto LookForLink;
1347 }
1348 }
1349
1350 sis_priv->timer.expires = jiffies + next_tick;
1351 add_timer(&sis_priv->timer);
1352}
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366static void sis900_check_mode(struct net_device *net_dev, struct mii_phy *mii_phy)
1367{
1368 struct sis900_private *sis_priv = netdev_priv(net_dev);
1369 void __iomem *ioaddr = sis_priv->ioaddr;
1370 int speed, duplex;
1371
1372 if (mii_phy->phy_types == LAN) {
1373 sw32(cfg, ~EXD & sr32(cfg));
1374 sis900_set_capability(net_dev , mii_phy);
1375 sis900_auto_negotiate(net_dev, sis_priv->cur_phy);
1376 } else {
1377 sw32(cfg, EXD | sr32(cfg));
1378 speed = HW_SPEED_HOME;
1379 duplex = FDX_CAPABLE_HALF_SELECTED;
1380 sis900_set_mode(sis_priv, speed, duplex);
1381 sis_priv->autong_complete = 1;
1382 }
1383}
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398static void sis900_set_mode(struct sis900_private *sp, int speed, int duplex)
1399{
1400 void __iomem *ioaddr = sp->ioaddr;
1401 u32 tx_flags = 0, rx_flags = 0;
1402
1403 if (sr32( cfg) & EDB_MASTER_EN) {
1404 tx_flags = TxATP | (DMA_BURST_64 << TxMXDMA_shift) |
1405 (TX_FILL_THRESH << TxFILLT_shift);
1406 rx_flags = DMA_BURST_64 << RxMXDMA_shift;
1407 } else {
1408 tx_flags = TxATP | (DMA_BURST_512 << TxMXDMA_shift) |
1409 (TX_FILL_THRESH << TxFILLT_shift);
1410 rx_flags = DMA_BURST_512 << RxMXDMA_shift;
1411 }
1412
1413 if (speed == HW_SPEED_HOME || speed == HW_SPEED_10_MBPS) {
1414 rx_flags |= (RxDRNT_10 << RxDRNT_shift);
1415 tx_flags |= (TxDRNT_10 << TxDRNT_shift);
1416 } else {
1417 rx_flags |= (RxDRNT_100 << RxDRNT_shift);
1418 tx_flags |= (TxDRNT_100 << TxDRNT_shift);
1419 }
1420
1421 if (duplex == FDX_CAPABLE_FULL_SELECTED) {
1422 tx_flags |= (TxCSI | TxHBI);
1423 rx_flags |= RxATX;
1424 }
1425
1426#if IS_ENABLED(CONFIG_VLAN_8021Q)
1427
1428 rx_flags |= RxAJAB;
1429#endif
1430
1431 sw32(txcfg, tx_flags);
1432 sw32(rxcfg, rx_flags);
1433}
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446static void sis900_auto_negotiate(struct net_device *net_dev, int phy_addr)
1447{
1448 struct sis900_private *sis_priv = netdev_priv(net_dev);
1449 int i = 0;
1450 u32 status;
1451
1452 for (i = 0; i < 2; i++)
1453 status = mdio_read(net_dev, phy_addr, MII_STATUS);
1454
1455 if (!(status & MII_STAT_LINK)){
1456 if(netif_msg_link(sis_priv))
1457 printk(KERN_INFO "%s: Media Link Off\n", net_dev->name);
1458 sis_priv->autong_complete = 1;
1459 netif_carrier_off(net_dev);
1460 return;
1461 }
1462
1463
1464 mdio_write(net_dev, phy_addr, MII_CONTROL,
1465 MII_CNTL_AUTO | MII_CNTL_RST_AUTO);
1466 sis_priv->autong_complete = 0;
1467}
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481static void sis900_read_mode(struct net_device *net_dev, int *speed, int *duplex)
1482{
1483 struct sis900_private *sis_priv = netdev_priv(net_dev);
1484 struct mii_phy *phy = sis_priv->mii;
1485 int phy_addr = sis_priv->cur_phy;
1486 u32 status;
1487 u16 autoadv, autorec;
1488 int i;
1489
1490 for (i = 0; i < 2; i++)
1491 status = mdio_read(net_dev, phy_addr, MII_STATUS);
1492
1493 if (!(status & MII_STAT_LINK))
1494 return;
1495
1496
1497 autoadv = mdio_read(net_dev, phy_addr, MII_ANADV);
1498 autorec = mdio_read(net_dev, phy_addr, MII_ANLPAR);
1499 status = autoadv & autorec;
1500
1501 *speed = HW_SPEED_10_MBPS;
1502 *duplex = FDX_CAPABLE_HALF_SELECTED;
1503
1504 if (status & (MII_NWAY_TX | MII_NWAY_TX_FDX))
1505 *speed = HW_SPEED_100_MBPS;
1506 if (status & ( MII_NWAY_TX_FDX | MII_NWAY_T_FDX))
1507 *duplex = FDX_CAPABLE_FULL_SELECTED;
1508
1509 sis_priv->autong_complete = 1;
1510
1511
1512 if ((phy->phy_id0 == 0x0000) && ((phy->phy_id1 & 0xFFF0) == 0x8200)) {
1513 if (mdio_read(net_dev, phy_addr, MII_CONTROL) & MII_CNTL_FDX)
1514 *duplex = FDX_CAPABLE_FULL_SELECTED;
1515 if (mdio_read(net_dev, phy_addr, 0x0019) & 0x01)
1516 *speed = HW_SPEED_100_MBPS;
1517 }
1518
1519 if(netif_msg_link(sis_priv))
1520 printk(KERN_INFO "%s: Media Link On %s %s-duplex\n",
1521 net_dev->name,
1522 *speed == HW_SPEED_100_MBPS ?
1523 "100mbps" : "10mbps",
1524 *duplex == FDX_CAPABLE_FULL_SELECTED ?
1525 "full" : "half");
1526}
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536static void sis900_tx_timeout(struct net_device *net_dev, unsigned int txqueue)
1537{
1538 struct sis900_private *sis_priv = netdev_priv(net_dev);
1539 void __iomem *ioaddr = sis_priv->ioaddr;
1540 unsigned long flags;
1541 int i;
1542
1543 if (netif_msg_tx_err(sis_priv)) {
1544 printk(KERN_INFO "%s: Transmit timeout, status %8.8x %8.8x\n",
1545 net_dev->name, sr32(cr), sr32(isr));
1546 }
1547
1548
1549 sw32(imr, 0x0000);
1550
1551
1552 spin_lock_irqsave(&sis_priv->lock, flags);
1553
1554
1555 sis_priv->dirty_tx = sis_priv->cur_tx = 0;
1556 for (i = 0; i < NUM_TX_DESC; i++) {
1557 struct sk_buff *skb = sis_priv->tx_skbuff[i];
1558
1559 if (skb) {
1560 pci_unmap_single(sis_priv->pci_dev,
1561 sis_priv->tx_ring[i].bufptr, skb->len,
1562 PCI_DMA_TODEVICE);
1563 dev_kfree_skb_irq(skb);
1564 sis_priv->tx_skbuff[i] = NULL;
1565 sis_priv->tx_ring[i].cmdsts = 0;
1566 sis_priv->tx_ring[i].bufptr = 0;
1567 net_dev->stats.tx_dropped++;
1568 }
1569 }
1570 sis_priv->tx_full = 0;
1571 netif_wake_queue(net_dev);
1572
1573 spin_unlock_irqrestore(&sis_priv->lock, flags);
1574
1575 netif_trans_update(net_dev);
1576
1577
1578 sw32(txdp, sis_priv->tx_ring_dma);
1579
1580
1581 sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE);
1582}
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594static netdev_tx_t
1595sis900_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
1596{
1597 struct sis900_private *sis_priv = netdev_priv(net_dev);
1598 void __iomem *ioaddr = sis_priv->ioaddr;
1599 unsigned int entry;
1600 unsigned long flags;
1601 unsigned int index_cur_tx, index_dirty_tx;
1602 unsigned int count_dirty_tx;
1603
1604 spin_lock_irqsave(&sis_priv->lock, flags);
1605
1606
1607 entry = sis_priv->cur_tx % NUM_TX_DESC;
1608 sis_priv->tx_skbuff[entry] = skb;
1609
1610
1611 sis_priv->tx_ring[entry].bufptr = pci_map_single(sis_priv->pci_dev,
1612 skb->data, skb->len, PCI_DMA_TODEVICE);
1613 if (unlikely(pci_dma_mapping_error(sis_priv->pci_dev,
1614 sis_priv->tx_ring[entry].bufptr))) {
1615 dev_kfree_skb_any(skb);
1616 sis_priv->tx_skbuff[entry] = NULL;
1617 net_dev->stats.tx_dropped++;
1618 spin_unlock_irqrestore(&sis_priv->lock, flags);
1619 return NETDEV_TX_OK;
1620 }
1621 sis_priv->tx_ring[entry].cmdsts = (OWN | skb->len);
1622 sw32(cr, TxENA | sr32(cr));
1623
1624 sis_priv->cur_tx ++;
1625 index_cur_tx = sis_priv->cur_tx;
1626 index_dirty_tx = sis_priv->dirty_tx;
1627
1628 for (count_dirty_tx = 0; index_cur_tx != index_dirty_tx; index_dirty_tx++)
1629 count_dirty_tx ++;
1630
1631 if (index_cur_tx == index_dirty_tx) {
1632
1633 sis_priv->tx_full = 1;
1634 netif_stop_queue(net_dev);
1635 } else if (count_dirty_tx < NUM_TX_DESC) {
1636
1637 netif_start_queue(net_dev);
1638 } else {
1639
1640 sis_priv->tx_full = 1;
1641 netif_stop_queue(net_dev);
1642 }
1643
1644 spin_unlock_irqrestore(&sis_priv->lock, flags);
1645
1646 if (netif_msg_tx_queued(sis_priv))
1647 printk(KERN_DEBUG "%s: Queued Tx packet at %p size %d "
1648 "to slot %d.\n",
1649 net_dev->name, skb->data, (int)skb->len, entry);
1650
1651 return NETDEV_TX_OK;
1652}
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663static irqreturn_t sis900_interrupt(int irq, void *dev_instance)
1664{
1665 struct net_device *net_dev = dev_instance;
1666 struct sis900_private *sis_priv = netdev_priv(net_dev);
1667 int boguscnt = max_interrupt_work;
1668 void __iomem *ioaddr = sis_priv->ioaddr;
1669 u32 status;
1670 unsigned int handled = 0;
1671
1672 spin_lock (&sis_priv->lock);
1673
1674 do {
1675 status = sr32(isr);
1676
1677 if ((status & (HIBERR|TxURN|TxERR|TxIDLE|RxORN|RxERR|RxOK)) == 0)
1678
1679 break;
1680 handled = 1;
1681
1682
1683 if (status & (RxORN | RxERR | RxOK))
1684
1685 sis900_rx(net_dev);
1686
1687 if (status & (TxURN | TxERR | TxIDLE))
1688
1689 sis900_finish_xmit(net_dev);
1690
1691
1692 if (status & HIBERR) {
1693 if(netif_msg_intr(sis_priv))
1694 printk(KERN_INFO "%s: Abnormal interrupt, "
1695 "status %#8.8x.\n", net_dev->name, status);
1696 break;
1697 }
1698 if (--boguscnt < 0) {
1699 if(netif_msg_intr(sis_priv))
1700 printk(KERN_INFO "%s: Too much work at interrupt, "
1701 "interrupt status = %#8.8x.\n",
1702 net_dev->name, status);
1703 break;
1704 }
1705 } while (1);
1706
1707 if(netif_msg_intr(sis_priv))
1708 printk(KERN_DEBUG "%s: exiting interrupt, "
1709 "interrupt status = %#8.8x\n",
1710 net_dev->name, sr32(isr));
1711
1712 spin_unlock (&sis_priv->lock);
1713 return IRQ_RETVAL(handled);
1714}
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726static int sis900_rx(struct net_device *net_dev)
1727{
1728 struct sis900_private *sis_priv = netdev_priv(net_dev);
1729 void __iomem *ioaddr = sis_priv->ioaddr;
1730 unsigned int entry = sis_priv->cur_rx % NUM_RX_DESC;
1731 u32 rx_status = sis_priv->rx_ring[entry].cmdsts;
1732 int rx_work_limit;
1733
1734 if (netif_msg_rx_status(sis_priv))
1735 printk(KERN_DEBUG "sis900_rx, cur_rx:%4.4d, dirty_rx:%4.4d "
1736 "status:0x%8.8x\n",
1737 sis_priv->cur_rx, sis_priv->dirty_rx, rx_status);
1738 rx_work_limit = sis_priv->dirty_rx + NUM_RX_DESC - sis_priv->cur_rx;
1739
1740 while (rx_status & OWN) {
1741 unsigned int rx_size;
1742 unsigned int data_size;
1743
1744 if (--rx_work_limit < 0)
1745 break;
1746
1747 data_size = rx_status & DSIZE;
1748 rx_size = data_size - CRC_SIZE;
1749
1750#if IS_ENABLED(CONFIG_VLAN_8021Q)
1751
1752 if ((rx_status & TOOLONG) && data_size <= MAX_FRAME_SIZE)
1753 rx_status &= (~ ((unsigned int)TOOLONG));
1754#endif
1755
1756 if (rx_status & (ABORT|OVERRUN|TOOLONG|RUNT|RXISERR|CRCERR|FAERR)) {
1757
1758 if (netif_msg_rx_err(sis_priv))
1759 printk(KERN_DEBUG "%s: Corrupted packet "
1760 "received, buffer status = 0x%8.8x/%d.\n",
1761 net_dev->name, rx_status, data_size);
1762 net_dev->stats.rx_errors++;
1763 if (rx_status & OVERRUN)
1764 net_dev->stats.rx_over_errors++;
1765 if (rx_status & (TOOLONG|RUNT))
1766 net_dev->stats.rx_length_errors++;
1767 if (rx_status & (RXISERR | FAERR))
1768 net_dev->stats.rx_frame_errors++;
1769 if (rx_status & CRCERR)
1770 net_dev->stats.rx_crc_errors++;
1771
1772 sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE;
1773 } else {
1774 struct sk_buff * skb;
1775 struct sk_buff * rx_skb;
1776
1777 pci_unmap_single(sis_priv->pci_dev,
1778 sis_priv->rx_ring[entry].bufptr, RX_BUF_SIZE,
1779 PCI_DMA_FROMDEVICE);
1780
1781
1782
1783 if ((skb = netdev_alloc_skb(net_dev, RX_BUF_SIZE)) == NULL) {
1784
1785
1786
1787
1788
1789
1790 skb = sis_priv->rx_skbuff[entry];
1791 net_dev->stats.rx_dropped++;
1792 goto refill_rx_ring;
1793 }
1794
1795
1796
1797
1798 if (sis_priv->rx_skbuff[entry] == NULL) {
1799 if (netif_msg_rx_err(sis_priv))
1800 printk(KERN_WARNING "%s: NULL pointer "
1801 "encountered in Rx ring\n"
1802 "cur_rx:%4.4d, dirty_rx:%4.4d\n",
1803 net_dev->name, sis_priv->cur_rx,
1804 sis_priv->dirty_rx);
1805 dev_kfree_skb(skb);
1806 break;
1807 }
1808
1809
1810 rx_skb = sis_priv->rx_skbuff[entry];
1811 skb_put(rx_skb, rx_size);
1812 rx_skb->protocol = eth_type_trans(rx_skb, net_dev);
1813 netif_rx(rx_skb);
1814
1815
1816 if ((rx_status & BCAST) == MCAST)
1817 net_dev->stats.multicast++;
1818 net_dev->stats.rx_bytes += rx_size;
1819 net_dev->stats.rx_packets++;
1820 sis_priv->dirty_rx++;
1821refill_rx_ring:
1822 sis_priv->rx_skbuff[entry] = skb;
1823 sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE;
1824 sis_priv->rx_ring[entry].bufptr =
1825 pci_map_single(sis_priv->pci_dev, skb->data,
1826 RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
1827 if (unlikely(pci_dma_mapping_error(sis_priv->pci_dev,
1828 sis_priv->rx_ring[entry].bufptr))) {
1829 dev_kfree_skb_irq(skb);
1830 sis_priv->rx_skbuff[entry] = NULL;
1831 break;
1832 }
1833 }
1834 sis_priv->cur_rx++;
1835 entry = sis_priv->cur_rx % NUM_RX_DESC;
1836 rx_status = sis_priv->rx_ring[entry].cmdsts;
1837 }
1838
1839
1840
1841 for (; sis_priv->cur_rx != sis_priv->dirty_rx; sis_priv->dirty_rx++) {
1842 struct sk_buff *skb;
1843
1844 entry = sis_priv->dirty_rx % NUM_RX_DESC;
1845
1846 if (sis_priv->rx_skbuff[entry] == NULL) {
1847 skb = netdev_alloc_skb(net_dev, RX_BUF_SIZE);
1848 if (skb == NULL) {
1849
1850
1851
1852
1853 net_dev->stats.rx_dropped++;
1854 break;
1855 }
1856 sis_priv->rx_skbuff[entry] = skb;
1857 sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE;
1858 sis_priv->rx_ring[entry].bufptr =
1859 pci_map_single(sis_priv->pci_dev, skb->data,
1860 RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
1861 if (unlikely(pci_dma_mapping_error(sis_priv->pci_dev,
1862 sis_priv->rx_ring[entry].bufptr))) {
1863 dev_kfree_skb_irq(skb);
1864 sis_priv->rx_skbuff[entry] = NULL;
1865 break;
1866 }
1867 }
1868 }
1869
1870 sw32(cr , RxENA | sr32(cr));
1871
1872 return 0;
1873}
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885static void sis900_finish_xmit (struct net_device *net_dev)
1886{
1887 struct sis900_private *sis_priv = netdev_priv(net_dev);
1888
1889 for (; sis_priv->dirty_tx != sis_priv->cur_tx; sis_priv->dirty_tx++) {
1890 struct sk_buff *skb;
1891 unsigned int entry;
1892 u32 tx_status;
1893
1894 entry = sis_priv->dirty_tx % NUM_TX_DESC;
1895 tx_status = sis_priv->tx_ring[entry].cmdsts;
1896
1897 if (tx_status & OWN) {
1898
1899
1900
1901 break;
1902 }
1903
1904 if (tx_status & (ABORT | UNDERRUN | OWCOLL)) {
1905
1906 if (netif_msg_tx_err(sis_priv))
1907 printk(KERN_DEBUG "%s: Transmit "
1908 "error, Tx status %8.8x.\n",
1909 net_dev->name, tx_status);
1910 net_dev->stats.tx_errors++;
1911 if (tx_status & UNDERRUN)
1912 net_dev->stats.tx_fifo_errors++;
1913 if (tx_status & ABORT)
1914 net_dev->stats.tx_aborted_errors++;
1915 if (tx_status & NOCARRIER)
1916 net_dev->stats.tx_carrier_errors++;
1917 if (tx_status & OWCOLL)
1918 net_dev->stats.tx_window_errors++;
1919 } else {
1920
1921 net_dev->stats.collisions += (tx_status & COLCNT) >> 16;
1922 net_dev->stats.tx_bytes += tx_status & DSIZE;
1923 net_dev->stats.tx_packets++;
1924 }
1925
1926 skb = sis_priv->tx_skbuff[entry];
1927 pci_unmap_single(sis_priv->pci_dev,
1928 sis_priv->tx_ring[entry].bufptr, skb->len,
1929 PCI_DMA_TODEVICE);
1930 dev_kfree_skb_irq(skb);
1931 sis_priv->tx_skbuff[entry] = NULL;
1932 sis_priv->tx_ring[entry].bufptr = 0;
1933 sis_priv->tx_ring[entry].cmdsts = 0;
1934 }
1935
1936 if (sis_priv->tx_full && netif_queue_stopped(net_dev) &&
1937 sis_priv->cur_tx - sis_priv->dirty_tx < NUM_TX_DESC - 4) {
1938
1939
1940 sis_priv->tx_full = 0;
1941 netif_wake_queue (net_dev);
1942 }
1943}
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953static int sis900_close(struct net_device *net_dev)
1954{
1955 struct sis900_private *sis_priv = netdev_priv(net_dev);
1956 struct pci_dev *pdev = sis_priv->pci_dev;
1957 void __iomem *ioaddr = sis_priv->ioaddr;
1958 struct sk_buff *skb;
1959 int i;
1960
1961 netif_stop_queue(net_dev);
1962
1963
1964 sw32(imr, 0x0000);
1965 sw32(ier, 0x0000);
1966
1967
1968 sw32(cr, RxDIS | TxDIS | sr32(cr));
1969
1970 del_timer(&sis_priv->timer);
1971
1972 free_irq(pdev->irq, net_dev);
1973
1974
1975 for (i = 0; i < NUM_RX_DESC; i++) {
1976 skb = sis_priv->rx_skbuff[i];
1977 if (skb) {
1978 pci_unmap_single(pdev, sis_priv->rx_ring[i].bufptr,
1979 RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
1980 dev_kfree_skb(skb);
1981 sis_priv->rx_skbuff[i] = NULL;
1982 }
1983 }
1984 for (i = 0; i < NUM_TX_DESC; i++) {
1985 skb = sis_priv->tx_skbuff[i];
1986 if (skb) {
1987 pci_unmap_single(pdev, sis_priv->tx_ring[i].bufptr,
1988 skb->len, PCI_DMA_TODEVICE);
1989 dev_kfree_skb(skb);
1990 sis_priv->tx_skbuff[i] = NULL;
1991 }
1992 }
1993
1994
1995
1996 return 0;
1997}
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007static void sis900_get_drvinfo(struct net_device *net_dev,
2008 struct ethtool_drvinfo *info)
2009{
2010 struct sis900_private *sis_priv = netdev_priv(net_dev);
2011
2012 strlcpy(info->driver, SIS900_MODULE_NAME, sizeof(info->driver));
2013 strlcpy(info->version, SIS900_DRV_VERSION, sizeof(info->version));
2014 strlcpy(info->bus_info, pci_name(sis_priv->pci_dev),
2015 sizeof(info->bus_info));
2016}
2017
2018static u32 sis900_get_msglevel(struct net_device *net_dev)
2019{
2020 struct sis900_private *sis_priv = netdev_priv(net_dev);
2021 return sis_priv->msg_enable;
2022}
2023
2024static void sis900_set_msglevel(struct net_device *net_dev, u32 value)
2025{
2026 struct sis900_private *sis_priv = netdev_priv(net_dev);
2027 sis_priv->msg_enable = value;
2028}
2029
2030static u32 sis900_get_link(struct net_device *net_dev)
2031{
2032 struct sis900_private *sis_priv = netdev_priv(net_dev);
2033 return mii_link_ok(&sis_priv->mii_info);
2034}
2035
2036static int sis900_get_link_ksettings(struct net_device *net_dev,
2037 struct ethtool_link_ksettings *cmd)
2038{
2039 struct sis900_private *sis_priv = netdev_priv(net_dev);
2040 spin_lock_irq(&sis_priv->lock);
2041 mii_ethtool_get_link_ksettings(&sis_priv->mii_info, cmd);
2042 spin_unlock_irq(&sis_priv->lock);
2043 return 0;
2044}
2045
2046static int sis900_set_link_ksettings(struct net_device *net_dev,
2047 const struct ethtool_link_ksettings *cmd)
2048{
2049 struct sis900_private *sis_priv = netdev_priv(net_dev);
2050 int rt;
2051 spin_lock_irq(&sis_priv->lock);
2052 rt = mii_ethtool_set_link_ksettings(&sis_priv->mii_info, cmd);
2053 spin_unlock_irq(&sis_priv->lock);
2054 return rt;
2055}
2056
2057static int sis900_nway_reset(struct net_device *net_dev)
2058{
2059 struct sis900_private *sis_priv = netdev_priv(net_dev);
2060 return mii_nway_restart(&sis_priv->mii_info);
2061}
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074static int sis900_set_wol(struct net_device *net_dev, struct ethtool_wolinfo *wol)
2075{
2076 struct sis900_private *sis_priv = netdev_priv(net_dev);
2077 void __iomem *ioaddr = sis_priv->ioaddr;
2078 u32 cfgpmcsr = 0, pmctrl_bits = 0;
2079
2080 if (wol->wolopts == 0) {
2081 pci_read_config_dword(sis_priv->pci_dev, CFGPMCSR, &cfgpmcsr);
2082 cfgpmcsr &= ~PME_EN;
2083 pci_write_config_dword(sis_priv->pci_dev, CFGPMCSR, cfgpmcsr);
2084 sw32(pmctrl, pmctrl_bits);
2085 if (netif_msg_wol(sis_priv))
2086 printk(KERN_DEBUG "%s: Wake on LAN disabled\n", net_dev->name);
2087 return 0;
2088 }
2089
2090 if (wol->wolopts & (WAKE_MAGICSECURE | WAKE_UCAST | WAKE_MCAST
2091 | WAKE_BCAST | WAKE_ARP))
2092 return -EINVAL;
2093
2094 if (wol->wolopts & WAKE_MAGIC)
2095 pmctrl_bits |= MAGICPKT;
2096 if (wol->wolopts & WAKE_PHY)
2097 pmctrl_bits |= LINKON;
2098
2099 sw32(pmctrl, pmctrl_bits);
2100
2101 pci_read_config_dword(sis_priv->pci_dev, CFGPMCSR, &cfgpmcsr);
2102 cfgpmcsr |= PME_EN;
2103 pci_write_config_dword(sis_priv->pci_dev, CFGPMCSR, cfgpmcsr);
2104 if (netif_msg_wol(sis_priv))
2105 printk(KERN_DEBUG "%s: Wake on LAN enabled\n", net_dev->name);
2106
2107 return 0;
2108}
2109
2110static void sis900_get_wol(struct net_device *net_dev, struct ethtool_wolinfo *wol)
2111{
2112 struct sis900_private *sp = netdev_priv(net_dev);
2113 void __iomem *ioaddr = sp->ioaddr;
2114 u32 pmctrl_bits;
2115
2116 pmctrl_bits = sr32(pmctrl);
2117 if (pmctrl_bits & MAGICPKT)
2118 wol->wolopts |= WAKE_MAGIC;
2119 if (pmctrl_bits & LINKON)
2120 wol->wolopts |= WAKE_PHY;
2121
2122 wol->supported = (WAKE_PHY | WAKE_MAGIC);
2123}
2124
2125static const struct ethtool_ops sis900_ethtool_ops = {
2126 .get_drvinfo = sis900_get_drvinfo,
2127 .get_msglevel = sis900_get_msglevel,
2128 .set_msglevel = sis900_set_msglevel,
2129 .get_link = sis900_get_link,
2130 .nway_reset = sis900_nway_reset,
2131 .get_wol = sis900_get_wol,
2132 .set_wol = sis900_set_wol,
2133 .get_link_ksettings = sis900_get_link_ksettings,
2134 .set_link_ksettings = sis900_set_link_ksettings,
2135};
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146static int mii_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd)
2147{
2148 struct sis900_private *sis_priv = netdev_priv(net_dev);
2149 struct mii_ioctl_data *data = if_mii(rq);
2150
2151 switch(cmd) {
2152 case SIOCGMIIPHY:
2153 data->phy_id = sis_priv->mii->phy_addr;
2154
2155
2156 case SIOCGMIIREG:
2157 data->val_out = mdio_read(net_dev, data->phy_id & 0x1f, data->reg_num & 0x1f);
2158 return 0;
2159
2160 case SIOCSMIIREG:
2161 mdio_write(net_dev, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in);
2162 return 0;
2163 default:
2164 return -EOPNOTSUPP;
2165 }
2166}
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178static int sis900_set_config(struct net_device *dev, struct ifmap *map)
2179{
2180 struct sis900_private *sis_priv = netdev_priv(dev);
2181 struct mii_phy *mii_phy = sis_priv->mii;
2182
2183 u16 status;
2184
2185 if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) {
2186
2187
2188
2189
2190
2191
2192 switch(map->port){
2193 case IF_PORT_UNKNOWN:
2194 dev->if_port = map->port;
2195
2196
2197
2198
2199
2200 netif_carrier_off(dev);
2201
2202
2203 status = mdio_read(dev, mii_phy->phy_addr, MII_CONTROL);
2204
2205
2206
2207
2208
2209 mdio_write(dev, mii_phy->phy_addr,
2210 MII_CONTROL, status | MII_CNTL_AUTO | MII_CNTL_RST_AUTO);
2211
2212 break;
2213
2214 case IF_PORT_10BASET:
2215 dev->if_port = map->port;
2216
2217
2218
2219
2220
2221
2222 netif_carrier_off(dev);
2223
2224
2225
2226 status = mdio_read(dev, mii_phy->phy_addr, MII_CONTROL);
2227
2228
2229 mdio_write(dev, mii_phy->phy_addr,
2230 MII_CONTROL, status & ~(MII_CNTL_SPEED |
2231 MII_CNTL_AUTO));
2232 break;
2233
2234 case IF_PORT_100BASET:
2235 case IF_PORT_100BASETX:
2236 dev->if_port = map->port;
2237
2238
2239
2240
2241
2242
2243 netif_carrier_off(dev);
2244
2245
2246
2247 status = mdio_read(dev, mii_phy->phy_addr, MII_CONTROL);
2248 mdio_write(dev, mii_phy->phy_addr,
2249 MII_CONTROL, (status & ~MII_CNTL_SPEED) |
2250 MII_CNTL_SPEED);
2251
2252 break;
2253
2254 case IF_PORT_10BASE2:
2255 case IF_PORT_AUI:
2256 case IF_PORT_100BASEFX:
2257
2258 return -EOPNOTSUPP;
2259
2260 default:
2261 return -EINVAL;
2262 }
2263 }
2264 return 0;
2265}
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278static inline u16 sis900_mcast_bitnr(u8 *addr, u8 revision)
2279{
2280
2281 u32 crc = ether_crc(6, addr);
2282
2283
2284 if ((revision >= SIS635A_900_REV) || (revision == SIS900B_900_REV))
2285 return (int)(crc >> 24);
2286 else
2287 return (int)(crc >> 25);
2288}
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299static void set_rx_mode(struct net_device *net_dev)
2300{
2301 struct sis900_private *sis_priv = netdev_priv(net_dev);
2302 void __iomem *ioaddr = sis_priv->ioaddr;
2303 u16 mc_filter[16] = {0};
2304 int i, table_entries;
2305 u32 rx_mode;
2306
2307
2308 if((sis_priv->chipset_rev >= SIS635A_900_REV) ||
2309 (sis_priv->chipset_rev == SIS900B_900_REV))
2310 table_entries = 16;
2311 else
2312 table_entries = 8;
2313
2314 if (net_dev->flags & IFF_PROMISC) {
2315
2316 rx_mode = RFPromiscuous;
2317 for (i = 0; i < table_entries; i++)
2318 mc_filter[i] = 0xffff;
2319 } else if ((netdev_mc_count(net_dev) > multicast_filter_limit) ||
2320 (net_dev->flags & IFF_ALLMULTI)) {
2321
2322 rx_mode = RFAAB | RFAAM;
2323 for (i = 0; i < table_entries; i++)
2324 mc_filter[i] = 0xffff;
2325 } else {
2326
2327
2328
2329 struct netdev_hw_addr *ha;
2330 rx_mode = RFAAB;
2331
2332 netdev_for_each_mc_addr(ha, net_dev) {
2333 unsigned int bit_nr;
2334
2335 bit_nr = sis900_mcast_bitnr(ha->addr,
2336 sis_priv->chipset_rev);
2337 mc_filter[bit_nr >> 4] |= (1 << (bit_nr & 0xf));
2338 }
2339 }
2340
2341
2342 for (i = 0; i < table_entries; i++) {
2343
2344 sw32(rfcr, (u32)(0x00000004 + i) << RFADDR_shift);
2345 sw32(rfdr, mc_filter[i]);
2346 }
2347
2348 sw32(rfcr, RFEN | rx_mode);
2349
2350
2351
2352 if (net_dev->flags & IFF_LOOPBACK) {
2353 u32 cr_saved;
2354
2355 cr_saved = sr32(cr);
2356 sw32(cr, cr_saved | TxDIS | RxDIS);
2357
2358 sw32(txcfg, sr32(txcfg) | TxMLB);
2359 sw32(rxcfg, sr32(rxcfg) | RxATX);
2360
2361 sw32(cr, cr_saved);
2362 }
2363}
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374static void sis900_reset(struct net_device *net_dev)
2375{
2376 struct sis900_private *sis_priv = netdev_priv(net_dev);
2377 void __iomem *ioaddr = sis_priv->ioaddr;
2378 u32 status = TxRCMP | RxRCMP;
2379 int i;
2380
2381 sw32(ier, 0);
2382 sw32(imr, 0);
2383 sw32(rfcr, 0);
2384
2385 sw32(cr, RxRESET | TxRESET | RESET | sr32(cr));
2386
2387
2388 for (i = 0; status && (i < 1000); i++)
2389 status ^= sr32(isr) & status;
2390
2391 if (sis_priv->chipset_rev >= SIS635A_900_REV ||
2392 sis_priv->chipset_rev == SIS900B_900_REV)
2393 sw32(cfg, PESEL | RND_CNT);
2394 else
2395 sw32(cfg, PESEL);
2396}
2397
2398
2399
2400
2401
2402
2403
2404
2405static void sis900_remove(struct pci_dev *pci_dev)
2406{
2407 struct net_device *net_dev = pci_get_drvdata(pci_dev);
2408 struct sis900_private *sis_priv = netdev_priv(net_dev);
2409
2410 unregister_netdev(net_dev);
2411
2412 while (sis_priv->first_mii) {
2413 struct mii_phy *phy = sis_priv->first_mii;
2414
2415 sis_priv->first_mii = phy->next;
2416 kfree(phy);
2417 }
2418
2419 pci_free_consistent(pci_dev, RX_TOTAL_SIZE, sis_priv->rx_ring,
2420 sis_priv->rx_ring_dma);
2421 pci_free_consistent(pci_dev, TX_TOTAL_SIZE, sis_priv->tx_ring,
2422 sis_priv->tx_ring_dma);
2423 pci_iounmap(pci_dev, sis_priv->ioaddr);
2424 free_netdev(net_dev);
2425 pci_release_regions(pci_dev);
2426}
2427
2428#ifdef CONFIG_PM
2429
2430static int sis900_suspend(struct pci_dev *pci_dev, pm_message_t state)
2431{
2432 struct net_device *net_dev = pci_get_drvdata(pci_dev);
2433 struct sis900_private *sis_priv = netdev_priv(net_dev);
2434 void __iomem *ioaddr = sis_priv->ioaddr;
2435
2436 if(!netif_running(net_dev))
2437 return 0;
2438
2439 netif_stop_queue(net_dev);
2440 netif_device_detach(net_dev);
2441
2442
2443 sw32(cr, RxDIS | TxDIS | sr32(cr));
2444
2445 pci_set_power_state(pci_dev, PCI_D3hot);
2446 pci_save_state(pci_dev);
2447
2448 return 0;
2449}
2450
2451static int sis900_resume(struct pci_dev *pci_dev)
2452{
2453 struct net_device *net_dev = pci_get_drvdata(pci_dev);
2454 struct sis900_private *sis_priv = netdev_priv(net_dev);
2455 void __iomem *ioaddr = sis_priv->ioaddr;
2456
2457 if(!netif_running(net_dev))
2458 return 0;
2459 pci_restore_state(pci_dev);
2460 pci_set_power_state(pci_dev, PCI_D0);
2461
2462 sis900_init_rxfilter(net_dev);
2463
2464 sis900_init_tx_ring(net_dev);
2465 sis900_init_rx_ring(net_dev);
2466
2467 set_rx_mode(net_dev);
2468
2469 netif_device_attach(net_dev);
2470 netif_start_queue(net_dev);
2471
2472
2473 sis900_set_mode(sis_priv, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED);
2474
2475
2476 sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE);
2477 sw32(cr, RxENA | sr32(cr));
2478 sw32(ier, IE);
2479
2480 sis900_check_mode(net_dev, sis_priv->mii);
2481
2482 return 0;
2483}
2484#endif
2485
2486static struct pci_driver sis900_pci_driver = {
2487 .name = SIS900_MODULE_NAME,
2488 .id_table = sis900_pci_tbl,
2489 .probe = sis900_probe,
2490 .remove = sis900_remove,
2491#ifdef CONFIG_PM
2492 .suspend = sis900_suspend,
2493 .resume = sis900_resume,
2494#endif
2495};
2496
2497static int __init sis900_init_module(void)
2498{
2499
2500#ifdef MODULE
2501 printk(version);
2502#endif
2503
2504 return pci_register_driver(&sis900_pci_driver);
2505}
2506
2507static void __exit sis900_cleanup_module(void)
2508{
2509 pci_unregister_driver(&sis900_pci_driver);
2510}
2511
2512module_init(sis900_init_module);
2513module_exit(sis900_cleanup_module);
2514
2515