1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#include <linux/module.h>
23#include <linux/moduleparam.h>
24#include <linux/netdevice.h>
25#include <linux/rtnetlink.h>
26#include <linux/etherdevice.h>
27#include <linux/ethtool.h>
28#include <linux/pci.h>
29#include <linux/mii.h>
30#include <linux/delay.h>
31#include <linux/crc32.h>
32#include <linux/dma-mapping.h>
33#include <asm/irq.h>
34
35#define net_drv(p, arg...) if (netif_msg_drv(p)) \
36 printk(arg)
37#define net_probe(p, arg...) if (netif_msg_probe(p)) \
38 printk(arg)
39#define net_link(p, arg...) if (netif_msg_link(p)) \
40 printk(arg)
41#define net_intr(p, arg...) if (netif_msg_intr(p)) \
42 printk(arg)
43#define net_tx_err(p, arg...) if (netif_msg_tx_err(p)) \
44 printk(arg)
45
46#define PHY_MAX_ADDR 32
47#define PHY_ID_ANY 0x1f
48#define MII_REG_ANY 0x1f
49
50#define DRV_VERSION "1.3"
51#define DRV_NAME "sis190"
52#define SIS190_DRIVER_NAME DRV_NAME " Gigabit Ethernet driver " DRV_VERSION
53#define PFX DRV_NAME ": "
54
55#define sis190_rx_skb netif_rx
56#define sis190_rx_quota(count, quota) count
57
58#define MAC_ADDR_LEN 6
59
60#define NUM_TX_DESC 64
61#define NUM_RX_DESC 64
62#define TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
63#define RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
64#define RX_BUF_SIZE 1536
65#define RX_BUF_MASK 0xfff8
66
67#define SIS190_REGS_SIZE 0x80
68#define SIS190_TX_TIMEOUT (6*HZ)
69#define SIS190_PHY_TIMEOUT (10*HZ)
70#define SIS190_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
71 NETIF_MSG_LINK | NETIF_MSG_IFUP | \
72 NETIF_MSG_IFDOWN)
73
74
75#define EhnMIIread 0x0000
76#define EhnMIIwrite 0x0020
77#define EhnMIIdataShift 16
78#define EhnMIIpmdShift 6
79#define EhnMIIregShift 11
80#define EhnMIIreq 0x0010
81#define EhnMIInotDone 0x0010
82
83
84#define SIS_W8(reg, val) writeb ((val), ioaddr + (reg))
85#define SIS_W16(reg, val) writew ((val), ioaddr + (reg))
86#define SIS_W32(reg, val) writel ((val), ioaddr + (reg))
87#define SIS_R8(reg) readb (ioaddr + (reg))
88#define SIS_R16(reg) readw (ioaddr + (reg))
89#define SIS_R32(reg) readl (ioaddr + (reg))
90
91#define SIS_PCI_COMMIT() SIS_R32(IntrControl)
92
93enum sis190_registers {
94 TxControl = 0x00,
95 TxDescStartAddr = 0x04,
96 rsv0 = 0x08,
97 TxSts = 0x0c,
98 RxControl = 0x10,
99 RxDescStartAddr = 0x14,
100 rsv1 = 0x18,
101 RxSts = 0x1c,
102 IntrStatus = 0x20,
103 IntrMask = 0x24,
104 IntrControl = 0x28,
105 IntrTimer = 0x2c,
106 PMControl = 0x30,
107 rsv2 = 0x34,
108 ROMControl = 0x38,
109 ROMInterface = 0x3c,
110 StationControl = 0x40,
111 GMIIControl = 0x44,
112 GIoCR = 0x48,
113 GIoCtrl = 0x4c,
114 TxMacControl = 0x50,
115 TxLimit = 0x54,
116 RGDelay = 0x58,
117 rsv3 = 0x5c,
118 RxMacControl = 0x60,
119 RxMacAddr = 0x62,
120 RxHashTable = 0x68,
121
122 RxWolCtrl = 0x70,
123 RxWolData = 0x74,
124 RxMPSControl = 0x78,
125 rsv4 = 0x7c,
126};
127
128enum sis190_register_content {
129
130 SoftInt = 0x40000000,
131 Timeup = 0x20000000,
132 PauseFrame = 0x00080000,
133 MagicPacket = 0x00040000,
134 WakeupFrame = 0x00020000,
135 LinkChange = 0x00010000,
136 RxQEmpty = 0x00000080,
137 RxQInt = 0x00000040,
138 TxQ1Empty = 0x00000020,
139 TxQ1Int = 0x00000010,
140 TxQ0Empty = 0x00000008,
141 TxQ0Int = 0x00000004,
142 RxHalt = 0x00000002,
143 TxHalt = 0x00000001,
144
145
146 CmdReset = 0x10,
147 CmdRxEnb = 0x08,
148 CmdTxEnb = 0x01,
149 RxBufEmpty = 0x01,
150
151
152 Cfg9346_Lock = 0x00,
153 Cfg9346_Unlock = 0xc0,
154
155
156 AcceptErr = 0x20,
157 AcceptRunt = 0x10,
158 AcceptBroadcast = 0x0800,
159 AcceptMulticast = 0x0400,
160 AcceptMyPhys = 0x0200,
161 AcceptAllPhys = 0x0100,
162
163
164 RxCfgFIFOShift = 13,
165 RxCfgDMAShift = 8,
166
167
168 TxInterFrameGapShift = 24,
169 TxDMAShift = 8,
170
171 LinkStatus = 0x02,
172 FullDup = 0x01,
173
174
175 TBILinkOK = 0x02000000,
176};
177
178struct TxDesc {
179 __le32 PSize;
180 __le32 status;
181 __le32 addr;
182 __le32 size;
183};
184
185struct RxDesc {
186 __le32 PSize;
187 __le32 status;
188 __le32 addr;
189 __le32 size;
190};
191
192enum _DescStatusBit {
193
194 OWNbit = 0x80000000,
195 INTbit = 0x40000000,
196 CRCbit = 0x00020000,
197 PADbit = 0x00010000,
198
199 RingEnd = 0x80000000,
200
201 LSEN = 0x08000000,
202 IPCS = 0x04000000,
203 TCPCS = 0x02000000,
204 UDPCS = 0x01000000,
205 BSTEN = 0x00800000,
206 EXTEN = 0x00400000,
207 DEFEN = 0x00200000,
208 BKFEN = 0x00100000,
209 CRSEN = 0x00080000,
210 COLEN = 0x00040000,
211 THOL3 = 0x30000000,
212 THOL2 = 0x20000000,
213 THOL1 = 0x10000000,
214 THOL0 = 0x00000000,
215
216 WND = 0x00080000,
217 TABRT = 0x00040000,
218 FIFO = 0x00020000,
219 LINK = 0x00010000,
220 ColCountMask = 0x0000ffff,
221
222 IPON = 0x20000000,
223 TCPON = 0x10000000,
224 UDPON = 0x08000000,
225 Wakup = 0x00400000,
226 Magic = 0x00200000,
227 Pause = 0x00100000,
228 DEFbit = 0x00200000,
229 BCAST = 0x000c0000,
230 MCAST = 0x00080000,
231 UCAST = 0x00040000,
232
233 TAGON = 0x80000000,
234 RxDescCountMask = 0x7f000000,
235 ABORT = 0x00800000,
236 SHORT = 0x00400000,
237 LIMIT = 0x00200000,
238 MIIER = 0x00100000,
239 OVRUN = 0x00080000,
240 NIBON = 0x00040000,
241 COLON = 0x00020000,
242 CRCOK = 0x00010000,
243 RxSizeMask = 0x0000ffff
244
245
246
247
248
249};
250
251enum sis190_eeprom_access_register_bits {
252 EECS = 0x00000001,
253 EECLK = 0x00000002,
254 EEDO = 0x00000008,
255 EEDI = 0x00000004,
256 EEREQ = 0x00000080,
257 EEROP = 0x00000200,
258 EEWOP = 0x00000100
259};
260
261
262enum sis190_eeprom_address {
263 EEPROMSignature = 0x00,
264 EEPROMCLK = 0x01,
265 EEPROMInfo = 0x02,
266 EEPROMMACAddr = 0x03
267};
268
269enum sis190_feature {
270 F_HAS_RGMII = 1,
271 F_PHY_88E1111 = 2,
272 F_PHY_BCM5461 = 4
273};
274
275struct sis190_private {
276 void __iomem *mmio_addr;
277 struct pci_dev *pci_dev;
278 struct net_device *dev;
279 spinlock_t lock;
280 u32 rx_buf_sz;
281 u32 cur_rx;
282 u32 cur_tx;
283 u32 dirty_rx;
284 u32 dirty_tx;
285 dma_addr_t rx_dma;
286 dma_addr_t tx_dma;
287 struct RxDesc *RxDescRing;
288 struct TxDesc *TxDescRing;
289 struct sk_buff *Rx_skbuff[NUM_RX_DESC];
290 struct sk_buff *Tx_skbuff[NUM_TX_DESC];
291 struct work_struct phy_task;
292 struct timer_list timer;
293 u32 msg_enable;
294 struct mii_if_info mii_if;
295 struct list_head first_phy;
296 u32 features;
297};
298
299struct sis190_phy {
300 struct list_head list;
301 int phy_id;
302 u16 id[2];
303 u16 status;
304 u8 type;
305};
306
307enum sis190_phy_type {
308 UNKNOWN = 0x00,
309 HOME = 0x01,
310 LAN = 0x02,
311 MIX = 0x03
312};
313
314static struct mii_chip_info {
315 const char *name;
316 u16 id[2];
317 unsigned int type;
318 u32 feature;
319} mii_chip_table[] = {
320 { "Atheros PHY", { 0x004d, 0xd010 }, LAN, 0 },
321 { "Atheros PHY AR8012", { 0x004d, 0xd020 }, LAN, 0 },
322 { "Broadcom PHY BCM5461", { 0x0020, 0x60c0 }, LAN, F_PHY_BCM5461 },
323 { "Broadcom PHY AC131", { 0x0143, 0xbc70 }, LAN, 0 },
324 { "Agere PHY ET1101B", { 0x0282, 0xf010 }, LAN, 0 },
325 { "Marvell PHY 88E1111", { 0x0141, 0x0cc0 }, LAN, F_PHY_88E1111 },
326 { "Realtek PHY RTL8201", { 0x0000, 0x8200 }, LAN, 0 },
327 { NULL, }
328};
329
330static const struct {
331 const char *name;
332} sis_chip_info[] = {
333 { "SiS 190 PCI Fast Ethernet adapter" },
334 { "SiS 191 PCI Gigabit Ethernet adapter" },
335};
336
337static struct pci_device_id sis190_pci_tbl[] = {
338 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0190), 0, 0, 0 },
339 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0191), 0, 0, 1 },
340 { 0, },
341};
342
343MODULE_DEVICE_TABLE(pci, sis190_pci_tbl);
344
345static int rx_copybreak = 200;
346
347static struct {
348 u32 msg_enable;
349} debug = { -1 };
350
351MODULE_DESCRIPTION("SiS sis190/191 Gigabit Ethernet driver");
352module_param(rx_copybreak, int, 0);
353MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
354module_param_named(debug, debug.msg_enable, int, 0);
355MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
356MODULE_AUTHOR("K.M. Liu <kmliu@sis.com>, Ueimor <romieu@fr.zoreil.com>");
357MODULE_VERSION(DRV_VERSION);
358MODULE_LICENSE("GPL");
359
360static const u32 sis190_intr_mask =
361 RxQEmpty | RxQInt | TxQ1Int | TxQ0Int | RxHalt | TxHalt | LinkChange;
362
363
364
365
366
367static const int multicast_filter_limit = 32;
368
369static void __mdio_cmd(void __iomem *ioaddr, u32 ctl)
370{
371 unsigned int i;
372
373 SIS_W32(GMIIControl, ctl);
374
375 msleep(1);
376
377 for (i = 0; i < 100; i++) {
378 if (!(SIS_R32(GMIIControl) & EhnMIInotDone))
379 break;
380 msleep(1);
381 }
382
383 if (i > 99)
384 printk(KERN_ERR PFX "PHY command failed !\n");
385}
386
387static void mdio_write(void __iomem *ioaddr, int phy_id, int reg, int val)
388{
389 __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIwrite |
390 (((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift) |
391 (((u32) val) << EhnMIIdataShift));
392}
393
394static int mdio_read(void __iomem *ioaddr, int phy_id, int reg)
395{
396 __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIread |
397 (((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift));
398
399 return (u16) (SIS_R32(GMIIControl) >> EhnMIIdataShift);
400}
401
402static void __mdio_write(struct net_device *dev, int phy_id, int reg, int val)
403{
404 struct sis190_private *tp = netdev_priv(dev);
405
406 mdio_write(tp->mmio_addr, phy_id, reg, val);
407}
408
409static int __mdio_read(struct net_device *dev, int phy_id, int reg)
410{
411 struct sis190_private *tp = netdev_priv(dev);
412
413 return mdio_read(tp->mmio_addr, phy_id, reg);
414}
415
416static u16 mdio_read_latched(void __iomem *ioaddr, int phy_id, int reg)
417{
418 mdio_read(ioaddr, phy_id, reg);
419 return mdio_read(ioaddr, phy_id, reg);
420}
421
422static u16 __devinit sis190_read_eeprom(void __iomem *ioaddr, u32 reg)
423{
424 u16 data = 0xffff;
425 unsigned int i;
426
427 if (!(SIS_R32(ROMControl) & 0x0002))
428 return 0;
429
430 SIS_W32(ROMInterface, EEREQ | EEROP | (reg << 10));
431
432 for (i = 0; i < 200; i++) {
433 if (!(SIS_R32(ROMInterface) & EEREQ)) {
434 data = (SIS_R32(ROMInterface) & 0xffff0000) >> 16;
435 break;
436 }
437 msleep(1);
438 }
439
440 return data;
441}
442
443static void sis190_irq_mask_and_ack(void __iomem *ioaddr)
444{
445 SIS_W32(IntrMask, 0x00);
446 SIS_W32(IntrStatus, 0xffffffff);
447 SIS_PCI_COMMIT();
448}
449
450static void sis190_asic_down(void __iomem *ioaddr)
451{
452
453
454 SIS_W32(TxControl, 0x1a00);
455 SIS_W32(RxControl, 0x1a00);
456
457 sis190_irq_mask_and_ack(ioaddr);
458}
459
460static void sis190_mark_as_last_descriptor(struct RxDesc *desc)
461{
462 desc->size |= cpu_to_le32(RingEnd);
463}
464
465static inline void sis190_give_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
466{
467 u32 eor = le32_to_cpu(desc->size) & RingEnd;
468
469 desc->PSize = 0x0;
470 desc->size = cpu_to_le32((rx_buf_sz & RX_BUF_MASK) | eor);
471 wmb();
472 desc->status = cpu_to_le32(OWNbit | INTbit);
473}
474
475static inline void sis190_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
476 u32 rx_buf_sz)
477{
478 desc->addr = cpu_to_le32(mapping);
479 sis190_give_to_asic(desc, rx_buf_sz);
480}
481
482static inline void sis190_make_unusable_by_asic(struct RxDesc *desc)
483{
484 desc->PSize = 0x0;
485 desc->addr = cpu_to_le32(0xdeadbeef);
486 desc->size &= cpu_to_le32(RingEnd);
487 wmb();
488 desc->status = 0x0;
489}
490
491static struct sk_buff *sis190_alloc_rx_skb(struct sis190_private *tp,
492 struct RxDesc *desc)
493{
494 u32 rx_buf_sz = tp->rx_buf_sz;
495 struct sk_buff *skb;
496
497 skb = netdev_alloc_skb(tp->dev, rx_buf_sz);
498 if (likely(skb)) {
499 dma_addr_t mapping;
500
501 mapping = pci_map_single(tp->pci_dev, skb->data, tp->rx_buf_sz,
502 PCI_DMA_FROMDEVICE);
503 sis190_map_to_asic(desc, mapping, rx_buf_sz);
504 } else
505 sis190_make_unusable_by_asic(desc);
506
507 return skb;
508}
509
510static u32 sis190_rx_fill(struct sis190_private *tp, struct net_device *dev,
511 u32 start, u32 end)
512{
513 u32 cur;
514
515 for (cur = start; cur < end; cur++) {
516 unsigned int i = cur % NUM_RX_DESC;
517
518 if (tp->Rx_skbuff[i])
519 continue;
520
521 tp->Rx_skbuff[i] = sis190_alloc_rx_skb(tp, tp->RxDescRing + i);
522
523 if (!tp->Rx_skbuff[i])
524 break;
525 }
526 return cur - start;
527}
528
529static bool sis190_try_rx_copy(struct sis190_private *tp,
530 struct sk_buff **sk_buff, int pkt_size,
531 dma_addr_t addr)
532{
533 struct sk_buff *skb;
534 bool done = false;
535
536 if (pkt_size >= rx_copybreak)
537 goto out;
538
539 skb = netdev_alloc_skb(tp->dev, pkt_size + 2);
540 if (!skb)
541 goto out;
542
543 pci_dma_sync_single_for_cpu(tp->pci_dev, addr, tp->rx_buf_sz,
544 PCI_DMA_FROMDEVICE);
545 skb_reserve(skb, 2);
546 skb_copy_to_linear_data(skb, sk_buff[0]->data, pkt_size);
547 *sk_buff = skb;
548 done = true;
549out:
550 return done;
551}
552
553static inline int sis190_rx_pkt_err(u32 status, struct net_device_stats *stats)
554{
555#define ErrMask (OVRUN | SHORT | LIMIT | MIIER | NIBON | COLON | ABORT)
556
557 if ((status & CRCOK) && !(status & ErrMask))
558 return 0;
559
560 if (!(status & CRCOK))
561 stats->rx_crc_errors++;
562 else if (status & OVRUN)
563 stats->rx_over_errors++;
564 else if (status & (SHORT | LIMIT))
565 stats->rx_length_errors++;
566 else if (status & (MIIER | NIBON | COLON))
567 stats->rx_frame_errors++;
568
569 stats->rx_errors++;
570 return -1;
571}
572
573static int sis190_rx_interrupt(struct net_device *dev,
574 struct sis190_private *tp, void __iomem *ioaddr)
575{
576 struct net_device_stats *stats = &dev->stats;
577 u32 rx_left, cur_rx = tp->cur_rx;
578 u32 delta, count;
579
580 rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
581 rx_left = sis190_rx_quota(rx_left, (u32) dev->quota);
582
583 for (; rx_left > 0; rx_left--, cur_rx++) {
584 unsigned int entry = cur_rx % NUM_RX_DESC;
585 struct RxDesc *desc = tp->RxDescRing + entry;
586 u32 status;
587
588 if (le32_to_cpu(desc->status) & OWNbit)
589 break;
590
591 status = le32_to_cpu(desc->PSize);
592
593
594
595
596 if (sis190_rx_pkt_err(status, stats) < 0)
597 sis190_give_to_asic(desc, tp->rx_buf_sz);
598 else {
599 struct sk_buff *skb = tp->Rx_skbuff[entry];
600 dma_addr_t addr = le32_to_cpu(desc->addr);
601 int pkt_size = (status & RxSizeMask) - 4;
602 struct pci_dev *pdev = tp->pci_dev;
603
604 if (unlikely(pkt_size > tp->rx_buf_sz)) {
605 net_intr(tp, KERN_INFO
606 "%s: (frag) status = %08x.\n",
607 dev->name, status);
608 stats->rx_dropped++;
609 stats->rx_length_errors++;
610 sis190_give_to_asic(desc, tp->rx_buf_sz);
611 continue;
612 }
613
614
615 if (sis190_try_rx_copy(tp, &skb, pkt_size, addr)) {
616 pci_dma_sync_single_for_device(pdev, addr,
617 tp->rx_buf_sz, PCI_DMA_FROMDEVICE);
618 sis190_give_to_asic(desc, tp->rx_buf_sz);
619 } else {
620 pci_unmap_single(pdev, addr, tp->rx_buf_sz,
621 PCI_DMA_FROMDEVICE);
622 tp->Rx_skbuff[entry] = NULL;
623 sis190_make_unusable_by_asic(desc);
624 }
625
626 skb_put(skb, pkt_size);
627 skb->protocol = eth_type_trans(skb, dev);
628
629 sis190_rx_skb(skb);
630
631 stats->rx_packets++;
632 stats->rx_bytes += pkt_size;
633 if ((status & BCAST) == MCAST)
634 stats->multicast++;
635 }
636 }
637 count = cur_rx - tp->cur_rx;
638 tp->cur_rx = cur_rx;
639
640 delta = sis190_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
641 if (!delta && count && netif_msg_intr(tp))
642 printk(KERN_INFO "%s: no Rx buffer allocated.\n", dev->name);
643 tp->dirty_rx += delta;
644
645 if (((tp->dirty_rx + NUM_RX_DESC) == tp->cur_rx) && netif_msg_intr(tp))
646 printk(KERN_EMERG "%s: Rx buffers exhausted.\n", dev->name);
647
648 return count;
649}
650
651static void sis190_unmap_tx_skb(struct pci_dev *pdev, struct sk_buff *skb,
652 struct TxDesc *desc)
653{
654 unsigned int len;
655
656 len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
657
658 pci_unmap_single(pdev, le32_to_cpu(desc->addr), len, PCI_DMA_TODEVICE);
659
660 memset(desc, 0x00, sizeof(*desc));
661}
662
663static inline int sis190_tx_pkt_err(u32 status, struct net_device_stats *stats)
664{
665#define TxErrMask (WND | TABRT | FIFO | LINK)
666
667 if (!unlikely(status & TxErrMask))
668 return 0;
669
670 if (status & WND)
671 stats->tx_window_errors++;
672 if (status & TABRT)
673 stats->tx_aborted_errors++;
674 if (status & FIFO)
675 stats->tx_fifo_errors++;
676 if (status & LINK)
677 stats->tx_carrier_errors++;
678
679 stats->tx_errors++;
680
681 return -1;
682}
683
684static void sis190_tx_interrupt(struct net_device *dev,
685 struct sis190_private *tp, void __iomem *ioaddr)
686{
687 struct net_device_stats *stats = &dev->stats;
688 u32 pending, dirty_tx = tp->dirty_tx;
689
690
691
692
693 unsigned int queue_stopped;
694
695 smp_rmb();
696 pending = tp->cur_tx - dirty_tx;
697 queue_stopped = (pending == NUM_TX_DESC);
698
699 for (; pending; pending--, dirty_tx++) {
700 unsigned int entry = dirty_tx % NUM_TX_DESC;
701 struct TxDesc *txd = tp->TxDescRing + entry;
702 u32 status = le32_to_cpu(txd->status);
703 struct sk_buff *skb;
704
705 if (status & OWNbit)
706 break;
707
708 skb = tp->Tx_skbuff[entry];
709
710 if (likely(sis190_tx_pkt_err(status, stats) == 0)) {
711 stats->tx_packets++;
712 stats->tx_bytes += skb->len;
713 stats->collisions += ((status & ColCountMask) - 1);
714 }
715
716 sis190_unmap_tx_skb(tp->pci_dev, skb, txd);
717 tp->Tx_skbuff[entry] = NULL;
718 dev_kfree_skb_irq(skb);
719 }
720
721 if (tp->dirty_tx != dirty_tx) {
722 tp->dirty_tx = dirty_tx;
723 smp_wmb();
724 if (queue_stopped)
725 netif_wake_queue(dev);
726 }
727}
728
729
730
731
732
733static irqreturn_t sis190_interrupt(int irq, void *__dev)
734{
735 struct net_device *dev = __dev;
736 struct sis190_private *tp = netdev_priv(dev);
737 void __iomem *ioaddr = tp->mmio_addr;
738 unsigned int handled = 0;
739 u32 status;
740
741 status = SIS_R32(IntrStatus);
742
743 if ((status == 0xffffffff) || !status)
744 goto out;
745
746 handled = 1;
747
748 if (unlikely(!netif_running(dev))) {
749 sis190_asic_down(ioaddr);
750 goto out;
751 }
752
753 SIS_W32(IntrStatus, status);
754
755
756
757 if (status & LinkChange) {
758 net_intr(tp, KERN_INFO "%s: link change.\n", dev->name);
759 schedule_work(&tp->phy_task);
760 }
761
762 if (status & RxQInt)
763 sis190_rx_interrupt(dev, tp, ioaddr);
764
765 if (status & TxQ0Int)
766 sis190_tx_interrupt(dev, tp, ioaddr);
767out:
768 return IRQ_RETVAL(handled);
769}
770
771#ifdef CONFIG_NET_POLL_CONTROLLER
772static void sis190_netpoll(struct net_device *dev)
773{
774 struct sis190_private *tp = netdev_priv(dev);
775 struct pci_dev *pdev = tp->pci_dev;
776
777 disable_irq(pdev->irq);
778 sis190_interrupt(pdev->irq, dev);
779 enable_irq(pdev->irq);
780}
781#endif
782
783static void sis190_free_rx_skb(struct sis190_private *tp,
784 struct sk_buff **sk_buff, struct RxDesc *desc)
785{
786 struct pci_dev *pdev = tp->pci_dev;
787
788 pci_unmap_single(pdev, le32_to_cpu(desc->addr), tp->rx_buf_sz,
789 PCI_DMA_FROMDEVICE);
790 dev_kfree_skb(*sk_buff);
791 *sk_buff = NULL;
792 sis190_make_unusable_by_asic(desc);
793}
794
795static void sis190_rx_clear(struct sis190_private *tp)
796{
797 unsigned int i;
798
799 for (i = 0; i < NUM_RX_DESC; i++) {
800 if (!tp->Rx_skbuff[i])
801 continue;
802 sis190_free_rx_skb(tp, tp->Rx_skbuff + i, tp->RxDescRing + i);
803 }
804}
805
806static void sis190_init_ring_indexes(struct sis190_private *tp)
807{
808 tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
809}
810
811static int sis190_init_ring(struct net_device *dev)
812{
813 struct sis190_private *tp = netdev_priv(dev);
814
815 sis190_init_ring_indexes(tp);
816
817 memset(tp->Tx_skbuff, 0x0, NUM_TX_DESC * sizeof(struct sk_buff *));
818 memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *));
819
820 if (sis190_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC)
821 goto err_rx_clear;
822
823 sis190_mark_as_last_descriptor(tp->RxDescRing + NUM_RX_DESC - 1);
824
825 return 0;
826
827err_rx_clear:
828 sis190_rx_clear(tp);
829 return -ENOMEM;
830}
831
832static void sis190_set_rx_mode(struct net_device *dev)
833{
834 struct sis190_private *tp = netdev_priv(dev);
835 void __iomem *ioaddr = tp->mmio_addr;
836 unsigned long flags;
837 u32 mc_filter[2];
838 u16 rx_mode;
839
840 if (dev->flags & IFF_PROMISC) {
841 rx_mode =
842 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
843 AcceptAllPhys;
844 mc_filter[1] = mc_filter[0] = 0xffffffff;
845 } else if ((dev->mc_count > multicast_filter_limit) ||
846 (dev->flags & IFF_ALLMULTI)) {
847
848 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
849 mc_filter[1] = mc_filter[0] = 0xffffffff;
850 } else {
851 struct dev_mc_list *mclist;
852 unsigned int i;
853
854 rx_mode = AcceptBroadcast | AcceptMyPhys;
855 mc_filter[1] = mc_filter[0] = 0;
856 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
857 i++, mclist = mclist->next) {
858 int bit_nr =
859 ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3f;
860 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
861 rx_mode |= AcceptMulticast;
862 }
863 }
864
865 spin_lock_irqsave(&tp->lock, flags);
866
867 SIS_W16(RxMacControl, rx_mode | 0x2);
868 SIS_W32(RxHashTable, mc_filter[0]);
869 SIS_W32(RxHashTable + 4, mc_filter[1]);
870
871 spin_unlock_irqrestore(&tp->lock, flags);
872}
873
874static void sis190_soft_reset(void __iomem *ioaddr)
875{
876 SIS_W32(IntrControl, 0x8000);
877 SIS_PCI_COMMIT();
878 SIS_W32(IntrControl, 0x0);
879 sis190_asic_down(ioaddr);
880}
881
882static void sis190_hw_start(struct net_device *dev)
883{
884 struct sis190_private *tp = netdev_priv(dev);
885 void __iomem *ioaddr = tp->mmio_addr;
886
887 sis190_soft_reset(ioaddr);
888
889 SIS_W32(TxDescStartAddr, tp->tx_dma);
890 SIS_W32(RxDescStartAddr, tp->rx_dma);
891
892 SIS_W32(IntrStatus, 0xffffffff);
893 SIS_W32(IntrMask, 0x0);
894 SIS_W32(GMIIControl, 0x0);
895 SIS_W32(TxMacControl, 0x60);
896 SIS_W16(RxMacControl, 0x02);
897 SIS_W32(RxHashTable, 0x0);
898 SIS_W32(0x6c, 0x0);
899 SIS_W32(RxWolCtrl, 0x0);
900 SIS_W32(RxWolData, 0x0);
901
902 SIS_PCI_COMMIT();
903
904 sis190_set_rx_mode(dev);
905
906
907 SIS_W32(IntrMask, sis190_intr_mask);
908
909 SIS_W32(TxControl, 0x1a00 | CmdTxEnb);
910 SIS_W32(RxControl, 0x1a1d);
911
912 netif_start_queue(dev);
913}
914
915static void sis190_phy_task(struct work_struct *work)
916{
917 struct sis190_private *tp =
918 container_of(work, struct sis190_private, phy_task);
919 struct net_device *dev = tp->dev;
920 void __iomem *ioaddr = tp->mmio_addr;
921 int phy_id = tp->mii_if.phy_id;
922 u16 val;
923
924 rtnl_lock();
925
926 if (!netif_running(dev))
927 goto out_unlock;
928
929 val = mdio_read(ioaddr, phy_id, MII_BMCR);
930 if (val & BMCR_RESET) {
931
932 mod_timer(&tp->timer, jiffies + HZ/10);
933 } else if (!(mdio_read_latched(ioaddr, phy_id, MII_BMSR) &
934 BMSR_ANEGCOMPLETE)) {
935 netif_carrier_off(dev);
936 net_link(tp, KERN_WARNING "%s: auto-negotiating...\n",
937 dev->name);
938 mod_timer(&tp->timer, jiffies + SIS190_PHY_TIMEOUT);
939 } else {
940
941 struct {
942 int val;
943 u32 ctl;
944 const char *msg;
945 } reg31[] = {
946 { LPA_1000FULL, 0x07000c00 | 0x00001000,
947 "1000 Mbps Full Duplex" },
948 { LPA_1000HALF, 0x07000c00,
949 "1000 Mbps Half Duplex" },
950 { LPA_100FULL, 0x04000800 | 0x00001000,
951 "100 Mbps Full Duplex" },
952 { LPA_100HALF, 0x04000800,
953 "100 Mbps Half Duplex" },
954 { LPA_10FULL, 0x04000400 | 0x00001000,
955 "10 Mbps Full Duplex" },
956 { LPA_10HALF, 0x04000400,
957 "10 Mbps Half Duplex" },
958 { 0, 0x04000400, "unknown" }
959 }, *p = NULL;
960 u16 adv, autoexp, gigadv, gigrec;
961
962 val = mdio_read(ioaddr, phy_id, 0x1f);
963 net_link(tp, KERN_INFO "%s: mii ext = %04x.\n", dev->name, val);
964
965 val = mdio_read(ioaddr, phy_id, MII_LPA);
966 adv = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
967 autoexp = mdio_read(ioaddr, phy_id, MII_EXPANSION);
968 net_link(tp, KERN_INFO "%s: mii lpa=%04x adv=%04x exp=%04x.\n",
969 dev->name, val, adv, autoexp);
970
971 if (val & LPA_NPAGE && autoexp & EXPANSION_NWAY) {
972
973 gigadv = mdio_read(ioaddr, phy_id, MII_CTRL1000);
974 gigrec = mdio_read(ioaddr, phy_id, MII_STAT1000);
975 val = (gigadv & (gigrec >> 2));
976 if (val & ADVERTISE_1000FULL)
977 p = reg31;
978 else if (val & ADVERTISE_1000HALF)
979 p = reg31 + 1;
980 }
981 if (!p) {
982 val &= adv;
983
984 for (p = reg31; p->val; p++) {
985 if ((val & p->val) == p->val)
986 break;
987 }
988 }
989
990 p->ctl |= SIS_R32(StationControl) & ~0x0f001c00;
991
992 if ((tp->features & F_HAS_RGMII) &&
993 (tp->features & F_PHY_BCM5461)) {
994
995 mdio_write(ioaddr, phy_id, 0x18, 0xf1c7);
996 udelay(200);
997 mdio_write(ioaddr, phy_id, 0x1c, 0x8c00);
998 p->ctl |= 0x03000000;
999 }
1000
1001 SIS_W32(StationControl, p->ctl);
1002
1003 if (tp->features & F_HAS_RGMII) {
1004 SIS_W32(RGDelay, 0x0441);
1005 SIS_W32(RGDelay, 0x0440);
1006 }
1007
1008 net_link(tp, KERN_INFO "%s: link on %s mode.\n", dev->name,
1009 p->msg);
1010 netif_carrier_on(dev);
1011 }
1012
1013out_unlock:
1014 rtnl_unlock();
1015}
1016
1017static void sis190_phy_timer(unsigned long __opaque)
1018{
1019 struct net_device *dev = (struct net_device *)__opaque;
1020 struct sis190_private *tp = netdev_priv(dev);
1021
1022 if (likely(netif_running(dev)))
1023 schedule_work(&tp->phy_task);
1024}
1025
1026static inline void sis190_delete_timer(struct net_device *dev)
1027{
1028 struct sis190_private *tp = netdev_priv(dev);
1029
1030 del_timer_sync(&tp->timer);
1031}
1032
1033static inline void sis190_request_timer(struct net_device *dev)
1034{
1035 struct sis190_private *tp = netdev_priv(dev);
1036 struct timer_list *timer = &tp->timer;
1037
1038 init_timer(timer);
1039 timer->expires = jiffies + SIS190_PHY_TIMEOUT;
1040 timer->data = (unsigned long)dev;
1041 timer->function = sis190_phy_timer;
1042 add_timer(timer);
1043}
1044
1045static void sis190_set_rxbufsize(struct sis190_private *tp,
1046 struct net_device *dev)
1047{
1048 unsigned int mtu = dev->mtu;
1049
1050 tp->rx_buf_sz = (mtu > RX_BUF_SIZE) ? mtu + ETH_HLEN + 8 : RX_BUF_SIZE;
1051
1052 if (tp->rx_buf_sz & 0x07) {
1053 tp->rx_buf_sz += 8;
1054 tp->rx_buf_sz &= RX_BUF_MASK;
1055 }
1056}
1057
1058static int sis190_open(struct net_device *dev)
1059{
1060 struct sis190_private *tp = netdev_priv(dev);
1061 struct pci_dev *pdev = tp->pci_dev;
1062 int rc = -ENOMEM;
1063
1064 sis190_set_rxbufsize(tp, dev);
1065
1066
1067
1068
1069
1070 tp->TxDescRing = pci_alloc_consistent(pdev, TX_RING_BYTES, &tp->tx_dma);
1071 if (!tp->TxDescRing)
1072 goto out;
1073
1074 tp->RxDescRing = pci_alloc_consistent(pdev, RX_RING_BYTES, &tp->rx_dma);
1075 if (!tp->RxDescRing)
1076 goto err_free_tx_0;
1077
1078 rc = sis190_init_ring(dev);
1079 if (rc < 0)
1080 goto err_free_rx_1;
1081
1082 sis190_request_timer(dev);
1083
1084 rc = request_irq(dev->irq, sis190_interrupt, IRQF_SHARED, dev->name, dev);
1085 if (rc < 0)
1086 goto err_release_timer_2;
1087
1088 sis190_hw_start(dev);
1089out:
1090 return rc;
1091
1092err_release_timer_2:
1093 sis190_delete_timer(dev);
1094 sis190_rx_clear(tp);
1095err_free_rx_1:
1096 pci_free_consistent(tp->pci_dev, RX_RING_BYTES, tp->RxDescRing,
1097 tp->rx_dma);
1098err_free_tx_0:
1099 pci_free_consistent(tp->pci_dev, TX_RING_BYTES, tp->TxDescRing,
1100 tp->tx_dma);
1101 goto out;
1102}
1103
1104static void sis190_tx_clear(struct sis190_private *tp)
1105{
1106 unsigned int i;
1107
1108 for (i = 0; i < NUM_TX_DESC; i++) {
1109 struct sk_buff *skb = tp->Tx_skbuff[i];
1110
1111 if (!skb)
1112 continue;
1113
1114 sis190_unmap_tx_skb(tp->pci_dev, skb, tp->TxDescRing + i);
1115 tp->Tx_skbuff[i] = NULL;
1116 dev_kfree_skb(skb);
1117
1118 tp->dev->stats.tx_dropped++;
1119 }
1120 tp->cur_tx = tp->dirty_tx = 0;
1121}
1122
1123static void sis190_down(struct net_device *dev)
1124{
1125 struct sis190_private *tp = netdev_priv(dev);
1126 void __iomem *ioaddr = tp->mmio_addr;
1127 unsigned int poll_locked = 0;
1128
1129 sis190_delete_timer(dev);
1130
1131 netif_stop_queue(dev);
1132
1133 do {
1134 spin_lock_irq(&tp->lock);
1135
1136 sis190_asic_down(ioaddr);
1137
1138 spin_unlock_irq(&tp->lock);
1139
1140 synchronize_irq(dev->irq);
1141
1142 if (!poll_locked)
1143 poll_locked++;
1144
1145 synchronize_sched();
1146
1147 } while (SIS_R32(IntrMask));
1148
1149 sis190_tx_clear(tp);
1150 sis190_rx_clear(tp);
1151}
1152
1153static int sis190_close(struct net_device *dev)
1154{
1155 struct sis190_private *tp = netdev_priv(dev);
1156 struct pci_dev *pdev = tp->pci_dev;
1157
1158 sis190_down(dev);
1159
1160 free_irq(dev->irq, dev);
1161
1162 pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma);
1163 pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma);
1164
1165 tp->TxDescRing = NULL;
1166 tp->RxDescRing = NULL;
1167
1168 return 0;
1169}
1170
1171static netdev_tx_t sis190_start_xmit(struct sk_buff *skb,
1172 struct net_device *dev)
1173{
1174 struct sis190_private *tp = netdev_priv(dev);
1175 void __iomem *ioaddr = tp->mmio_addr;
1176 u32 len, entry, dirty_tx;
1177 struct TxDesc *desc;
1178 dma_addr_t mapping;
1179
1180 if (unlikely(skb->len < ETH_ZLEN)) {
1181 if (skb_padto(skb, ETH_ZLEN)) {
1182 dev->stats.tx_dropped++;
1183 goto out;
1184 }
1185 len = ETH_ZLEN;
1186 } else {
1187 len = skb->len;
1188 }
1189
1190 entry = tp->cur_tx % NUM_TX_DESC;
1191 desc = tp->TxDescRing + entry;
1192
1193 if (unlikely(le32_to_cpu(desc->status) & OWNbit)) {
1194 netif_stop_queue(dev);
1195 net_tx_err(tp, KERN_ERR PFX
1196 "%s: BUG! Tx Ring full when queue awake!\n",
1197 dev->name);
1198 return NETDEV_TX_BUSY;
1199 }
1200
1201 mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE);
1202
1203 tp->Tx_skbuff[entry] = skb;
1204
1205 desc->PSize = cpu_to_le32(len);
1206 desc->addr = cpu_to_le32(mapping);
1207
1208 desc->size = cpu_to_le32(len);
1209 if (entry == (NUM_TX_DESC - 1))
1210 desc->size |= cpu_to_le32(RingEnd);
1211
1212 wmb();
1213
1214 desc->status = cpu_to_le32(OWNbit | INTbit | DEFbit | CRCbit | PADbit);
1215
1216 tp->cur_tx++;
1217
1218 smp_wmb();
1219
1220 SIS_W32(TxControl, 0x1a00 | CmdReset | CmdTxEnb);
1221
1222 dirty_tx = tp->dirty_tx;
1223 if ((tp->cur_tx - NUM_TX_DESC) == dirty_tx) {
1224 netif_stop_queue(dev);
1225 smp_rmb();
1226 if (dirty_tx != tp->dirty_tx)
1227 netif_wake_queue(dev);
1228 }
1229out:
1230 return NETDEV_TX_OK;
1231}
1232
1233static void sis190_free_phy(struct list_head *first_phy)
1234{
1235 struct sis190_phy *cur, *next;
1236
1237 list_for_each_entry_safe(cur, next, first_phy, list) {
1238 kfree(cur);
1239 }
1240}
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250static u16 sis190_default_phy(struct net_device *dev)
1251{
1252 struct sis190_phy *phy, *phy_home, *phy_default, *phy_lan;
1253 struct sis190_private *tp = netdev_priv(dev);
1254 struct mii_if_info *mii_if = &tp->mii_if;
1255 void __iomem *ioaddr = tp->mmio_addr;
1256 u16 status;
1257
1258 phy_home = phy_default = phy_lan = NULL;
1259
1260 list_for_each_entry(phy, &tp->first_phy, list) {
1261 status = mdio_read_latched(ioaddr, phy->phy_id, MII_BMSR);
1262
1263
1264 if ((status & BMSR_LSTATUS) &&
1265 !phy_default &&
1266 (phy->type != UNKNOWN)) {
1267 phy_default = phy;
1268 } else {
1269 status = mdio_read(ioaddr, phy->phy_id, MII_BMCR);
1270 mdio_write(ioaddr, phy->phy_id, MII_BMCR,
1271 status | BMCR_ANENABLE | BMCR_ISOLATE);
1272 if (phy->type == HOME)
1273 phy_home = phy;
1274 else if (phy->type == LAN)
1275 phy_lan = phy;
1276 }
1277 }
1278
1279 if (!phy_default) {
1280 if (phy_home)
1281 phy_default = phy_home;
1282 else if (phy_lan)
1283 phy_default = phy_lan;
1284 else
1285 phy_default = list_first_entry(&tp->first_phy,
1286 struct sis190_phy, list);
1287 }
1288
1289 if (mii_if->phy_id != phy_default->phy_id) {
1290 mii_if->phy_id = phy_default->phy_id;
1291 net_probe(tp, KERN_INFO
1292 "%s: Using transceiver at address %d as default.\n",
1293 pci_name(tp->pci_dev), mii_if->phy_id);
1294 }
1295
1296 status = mdio_read(ioaddr, mii_if->phy_id, MII_BMCR);
1297 status &= (~BMCR_ISOLATE);
1298
1299 mdio_write(ioaddr, mii_if->phy_id, MII_BMCR, status);
1300 status = mdio_read_latched(ioaddr, mii_if->phy_id, MII_BMSR);
1301
1302 return status;
1303}
1304
1305static void sis190_init_phy(struct net_device *dev, struct sis190_private *tp,
1306 struct sis190_phy *phy, unsigned int phy_id,
1307 u16 mii_status)
1308{
1309 void __iomem *ioaddr = tp->mmio_addr;
1310 struct mii_chip_info *p;
1311
1312 INIT_LIST_HEAD(&phy->list);
1313 phy->status = mii_status;
1314 phy->phy_id = phy_id;
1315
1316 phy->id[0] = mdio_read(ioaddr, phy_id, MII_PHYSID1);
1317 phy->id[1] = mdio_read(ioaddr, phy_id, MII_PHYSID2);
1318
1319 for (p = mii_chip_table; p->type; p++) {
1320 if ((p->id[0] == phy->id[0]) &&
1321 (p->id[1] == (phy->id[1] & 0xfff0))) {
1322 break;
1323 }
1324 }
1325
1326 if (p->id[1]) {
1327 phy->type = (p->type == MIX) ?
1328 ((mii_status & (BMSR_100FULL | BMSR_100HALF)) ?
1329 LAN : HOME) : p->type;
1330 tp->features |= p->feature;
1331 net_probe(tp, KERN_INFO "%s: %s transceiver at address %d.\n",
1332 pci_name(tp->pci_dev), p->name, phy_id);
1333 } else {
1334 phy->type = UNKNOWN;
1335 net_probe(tp, KERN_INFO
1336 "%s: unknown PHY 0x%x:0x%x transceiver at address %d\n",
1337 pci_name(tp->pci_dev),
1338 phy->id[0], (phy->id[1] & 0xfff0), phy_id);
1339 }
1340}
1341
1342static void sis190_mii_probe_88e1111_fixup(struct sis190_private *tp)
1343{
1344 if (tp->features & F_PHY_88E1111) {
1345 void __iomem *ioaddr = tp->mmio_addr;
1346 int phy_id = tp->mii_if.phy_id;
1347 u16 reg[2][2] = {
1348 { 0x808b, 0x0ce1 },
1349 { 0x808f, 0x0c60 }
1350 }, *p;
1351
1352 p = (tp->features & F_HAS_RGMII) ? reg[0] : reg[1];
1353
1354 mdio_write(ioaddr, phy_id, 0x1b, p[0]);
1355 udelay(200);
1356 mdio_write(ioaddr, phy_id, 0x14, p[1]);
1357 udelay(200);
1358 }
1359}
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369static int __devinit sis190_mii_probe(struct net_device *dev)
1370{
1371 struct sis190_private *tp = netdev_priv(dev);
1372 struct mii_if_info *mii_if = &tp->mii_if;
1373 void __iomem *ioaddr = tp->mmio_addr;
1374 int phy_id;
1375 int rc = 0;
1376
1377 INIT_LIST_HEAD(&tp->first_phy);
1378
1379 for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) {
1380 struct sis190_phy *phy;
1381 u16 status;
1382
1383 status = mdio_read_latched(ioaddr, phy_id, MII_BMSR);
1384
1385
1386 if (status == 0xffff || status == 0x0000)
1387 continue;
1388
1389 phy = kmalloc(sizeof(*phy), GFP_KERNEL);
1390 if (!phy) {
1391 sis190_free_phy(&tp->first_phy);
1392 rc = -ENOMEM;
1393 goto out;
1394 }
1395
1396 sis190_init_phy(dev, tp, phy, phy_id, status);
1397
1398 list_add(&tp->first_phy, &phy->list);
1399 }
1400
1401 if (list_empty(&tp->first_phy)) {
1402 net_probe(tp, KERN_INFO "%s: No MII transceivers found!\n",
1403 pci_name(tp->pci_dev));
1404 rc = -EIO;
1405 goto out;
1406 }
1407
1408
1409 sis190_default_phy(dev);
1410
1411 sis190_mii_probe_88e1111_fixup(tp);
1412
1413 mii_if->dev = dev;
1414 mii_if->mdio_read = __mdio_read;
1415 mii_if->mdio_write = __mdio_write;
1416 mii_if->phy_id_mask = PHY_ID_ANY;
1417 mii_if->reg_num_mask = MII_REG_ANY;
1418out:
1419 return rc;
1420}
1421
1422static void sis190_mii_remove(struct net_device *dev)
1423{
1424 struct sis190_private *tp = netdev_priv(dev);
1425
1426 sis190_free_phy(&tp->first_phy);
1427}
1428
1429static void sis190_release_board(struct pci_dev *pdev)
1430{
1431 struct net_device *dev = pci_get_drvdata(pdev);
1432 struct sis190_private *tp = netdev_priv(dev);
1433
1434 iounmap(tp->mmio_addr);
1435 pci_release_regions(pdev);
1436 pci_disable_device(pdev);
1437 free_netdev(dev);
1438}
1439
1440static struct net_device * __devinit sis190_init_board(struct pci_dev *pdev)
1441{
1442 struct sis190_private *tp;
1443 struct net_device *dev;
1444 void __iomem *ioaddr;
1445 int rc;
1446
1447 dev = alloc_etherdev(sizeof(*tp));
1448 if (!dev) {
1449 net_drv(&debug, KERN_ERR PFX "unable to alloc new ethernet\n");
1450 rc = -ENOMEM;
1451 goto err_out_0;
1452 }
1453
1454 SET_NETDEV_DEV(dev, &pdev->dev);
1455
1456 tp = netdev_priv(dev);
1457 tp->dev = dev;
1458 tp->msg_enable = netif_msg_init(debug.msg_enable, SIS190_MSG_DEFAULT);
1459
1460 rc = pci_enable_device(pdev);
1461 if (rc < 0) {
1462 net_probe(tp, KERN_ERR "%s: enable failure\n", pci_name(pdev));
1463 goto err_free_dev_1;
1464 }
1465
1466 rc = -ENODEV;
1467
1468 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1469 net_probe(tp, KERN_ERR "%s: region #0 is no MMIO resource.\n",
1470 pci_name(pdev));
1471 goto err_pci_disable_2;
1472 }
1473 if (pci_resource_len(pdev, 0) < SIS190_REGS_SIZE) {
1474 net_probe(tp, KERN_ERR "%s: invalid PCI region size(s).\n",
1475 pci_name(pdev));
1476 goto err_pci_disable_2;
1477 }
1478
1479 rc = pci_request_regions(pdev, DRV_NAME);
1480 if (rc < 0) {
1481 net_probe(tp, KERN_ERR PFX "%s: could not request regions.\n",
1482 pci_name(pdev));
1483 goto err_pci_disable_2;
1484 }
1485
1486 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1487 if (rc < 0) {
1488 net_probe(tp, KERN_ERR "%s: DMA configuration failed.\n",
1489 pci_name(pdev));
1490 goto err_free_res_3;
1491 }
1492
1493 pci_set_master(pdev);
1494
1495 ioaddr = ioremap(pci_resource_start(pdev, 0), SIS190_REGS_SIZE);
1496 if (!ioaddr) {
1497 net_probe(tp, KERN_ERR "%s: cannot remap MMIO, aborting\n",
1498 pci_name(pdev));
1499 rc = -EIO;
1500 goto err_free_res_3;
1501 }
1502
1503 tp->pci_dev = pdev;
1504 tp->mmio_addr = ioaddr;
1505
1506 sis190_irq_mask_and_ack(ioaddr);
1507
1508 sis190_soft_reset(ioaddr);
1509out:
1510 return dev;
1511
1512err_free_res_3:
1513 pci_release_regions(pdev);
1514err_pci_disable_2:
1515 pci_disable_device(pdev);
1516err_free_dev_1:
1517 free_netdev(dev);
1518err_out_0:
1519 dev = ERR_PTR(rc);
1520 goto out;
1521}
1522
1523static void sis190_tx_timeout(struct net_device *dev)
1524{
1525 struct sis190_private *tp = netdev_priv(dev);
1526 void __iomem *ioaddr = tp->mmio_addr;
1527 u8 tmp8;
1528
1529
1530 tmp8 = SIS_R8(TxControl);
1531 if (tmp8 & CmdTxEnb)
1532 SIS_W8(TxControl, tmp8 & ~CmdTxEnb);
1533
1534
1535 net_tx_err(tp, KERN_INFO "%s: Transmit timeout, status %08x %08x.\n",
1536 dev->name, SIS_R32(TxControl), SIS_R32(TxSts));
1537
1538
1539 SIS_W32(IntrMask, 0x0000);
1540
1541
1542 spin_lock_irq(&tp->lock);
1543 sis190_tx_clear(tp);
1544 spin_unlock_irq(&tp->lock);
1545
1546
1547 sis190_hw_start(dev);
1548
1549 netif_wake_queue(dev);
1550}
1551
1552static void sis190_set_rgmii(struct sis190_private *tp, u8 reg)
1553{
1554 tp->features |= (reg & 0x80) ? F_HAS_RGMII : 0;
1555}
1556
1557static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
1558 struct net_device *dev)
1559{
1560 struct sis190_private *tp = netdev_priv(dev);
1561 void __iomem *ioaddr = tp->mmio_addr;
1562 u16 sig;
1563 int i;
1564
1565 net_probe(tp, KERN_INFO "%s: Read MAC address from EEPROM\n",
1566 pci_name(pdev));
1567
1568
1569 sig = (u16) sis190_read_eeprom(ioaddr, EEPROMSignature);
1570
1571 if ((sig == 0xffff) || (sig == 0x0000)) {
1572 net_probe(tp, KERN_INFO "%s: Error EEPROM read %x.\n",
1573 pci_name(pdev), sig);
1574 return -EIO;
1575 }
1576
1577
1578 for (i = 0; i < MAC_ADDR_LEN / 2; i++) {
1579 u16 w = sis190_read_eeprom(ioaddr, EEPROMMACAddr + i);
1580
1581 ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(w);
1582 }
1583
1584 sis190_set_rgmii(tp, sis190_read_eeprom(ioaddr, EEPROMInfo));
1585
1586 return 0;
1587}
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
1599 struct net_device *dev)
1600{
1601 static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
1602 struct sis190_private *tp = netdev_priv(dev);
1603 struct pci_dev *isa_bridge;
1604 u8 reg, tmp8;
1605 unsigned int i;
1606
1607 net_probe(tp, KERN_INFO "%s: Read MAC address from APC.\n",
1608 pci_name(pdev));
1609
1610 for (i = 0; i < ARRAY_SIZE(ids); i++) {
1611 isa_bridge = pci_get_device(PCI_VENDOR_ID_SI, ids[i], NULL);
1612 if (isa_bridge)
1613 break;
1614 }
1615
1616 if (!isa_bridge) {
1617 net_probe(tp, KERN_INFO "%s: Can not find ISA bridge.\n",
1618 pci_name(pdev));
1619 return -EIO;
1620 }
1621
1622
1623 pci_read_config_byte(isa_bridge, 0x48, &tmp8);
1624 reg = (tmp8 & ~0x02);
1625 pci_write_config_byte(isa_bridge, 0x48, reg);
1626 udelay(50);
1627 pci_read_config_byte(isa_bridge, 0x48, ®);
1628
1629 for (i = 0; i < MAC_ADDR_LEN; i++) {
1630 outb(0x9 + i, 0x78);
1631 dev->dev_addr[i] = inb(0x79);
1632 }
1633
1634 outb(0x12, 0x78);
1635 reg = inb(0x79);
1636
1637 sis190_set_rgmii(tp, reg);
1638
1639
1640 pci_write_config_byte(isa_bridge, 0x48, tmp8);
1641 pci_dev_put(isa_bridge);
1642
1643 return 0;
1644}
1645
1646
1647
1648
1649
1650
1651
1652
1653static inline void sis190_init_rxfilter(struct net_device *dev)
1654{
1655 struct sis190_private *tp = netdev_priv(dev);
1656 void __iomem *ioaddr = tp->mmio_addr;
1657 u16 ctl;
1658 int i;
1659
1660 ctl = SIS_R16(RxMacControl);
1661
1662
1663
1664
1665
1666 SIS_W16(RxMacControl, ctl & ~0x0f00);
1667
1668 for (i = 0; i < MAC_ADDR_LEN; i++)
1669 SIS_W8(RxMacAddr + i, dev->dev_addr[i]);
1670
1671 SIS_W16(RxMacControl, ctl);
1672 SIS_PCI_COMMIT();
1673}
1674
1675static int __devinit sis190_get_mac_addr(struct pci_dev *pdev,
1676 struct net_device *dev)
1677{
1678 int rc;
1679
1680 rc = sis190_get_mac_addr_from_eeprom(pdev, dev);
1681 if (rc < 0) {
1682 u8 reg;
1683
1684 pci_read_config_byte(pdev, 0x73, ®);
1685
1686 if (reg & 0x00000001)
1687 rc = sis190_get_mac_addr_from_apc(pdev, dev);
1688 }
1689 return rc;
1690}
1691
1692static void sis190_set_speed_auto(struct net_device *dev)
1693{
1694 struct sis190_private *tp = netdev_priv(dev);
1695 void __iomem *ioaddr = tp->mmio_addr;
1696 int phy_id = tp->mii_if.phy_id;
1697 int val;
1698
1699 net_link(tp, KERN_INFO "%s: Enabling Auto-negotiation.\n", dev->name);
1700
1701 val = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
1702
1703
1704
1705 mdio_write(ioaddr, phy_id, MII_ADVERTISE, (val & ADVERTISE_SLCT) |
1706 ADVERTISE_100FULL | ADVERTISE_10FULL |
1707 ADVERTISE_100HALF | ADVERTISE_10HALF);
1708
1709
1710 mdio_write(ioaddr, phy_id, MII_CTRL1000, ADVERTISE_1000FULL);
1711
1712
1713 mdio_write(ioaddr, phy_id, MII_BMCR,
1714 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET);
1715}
1716
1717static int sis190_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1718{
1719 struct sis190_private *tp = netdev_priv(dev);
1720
1721 return mii_ethtool_gset(&tp->mii_if, cmd);
1722}
1723
1724static int sis190_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1725{
1726 struct sis190_private *tp = netdev_priv(dev);
1727
1728 return mii_ethtool_sset(&tp->mii_if, cmd);
1729}
1730
1731static void sis190_get_drvinfo(struct net_device *dev,
1732 struct ethtool_drvinfo *info)
1733{
1734 struct sis190_private *tp = netdev_priv(dev);
1735
1736 strcpy(info->driver, DRV_NAME);
1737 strcpy(info->version, DRV_VERSION);
1738 strcpy(info->bus_info, pci_name(tp->pci_dev));
1739}
1740
1741static int sis190_get_regs_len(struct net_device *dev)
1742{
1743 return SIS190_REGS_SIZE;
1744}
1745
1746static void sis190_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1747 void *p)
1748{
1749 struct sis190_private *tp = netdev_priv(dev);
1750 unsigned long flags;
1751
1752 if (regs->len > SIS190_REGS_SIZE)
1753 regs->len = SIS190_REGS_SIZE;
1754
1755 spin_lock_irqsave(&tp->lock, flags);
1756 memcpy_fromio(p, tp->mmio_addr, regs->len);
1757 spin_unlock_irqrestore(&tp->lock, flags);
1758}
1759
1760static int sis190_nway_reset(struct net_device *dev)
1761{
1762 struct sis190_private *tp = netdev_priv(dev);
1763
1764 return mii_nway_restart(&tp->mii_if);
1765}
1766
1767static u32 sis190_get_msglevel(struct net_device *dev)
1768{
1769 struct sis190_private *tp = netdev_priv(dev);
1770
1771 return tp->msg_enable;
1772}
1773
1774static void sis190_set_msglevel(struct net_device *dev, u32 value)
1775{
1776 struct sis190_private *tp = netdev_priv(dev);
1777
1778 tp->msg_enable = value;
1779}
1780
1781static const struct ethtool_ops sis190_ethtool_ops = {
1782 .get_settings = sis190_get_settings,
1783 .set_settings = sis190_set_settings,
1784 .get_drvinfo = sis190_get_drvinfo,
1785 .get_regs_len = sis190_get_regs_len,
1786 .get_regs = sis190_get_regs,
1787 .get_link = ethtool_op_get_link,
1788 .get_msglevel = sis190_get_msglevel,
1789 .set_msglevel = sis190_set_msglevel,
1790 .nway_reset = sis190_nway_reset,
1791};
1792
1793static int sis190_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1794{
1795 struct sis190_private *tp = netdev_priv(dev);
1796
1797 return !netif_running(dev) ? -EINVAL :
1798 generic_mii_ioctl(&tp->mii_if, if_mii(ifr), cmd, NULL);
1799}
1800
1801static const struct net_device_ops sis190_netdev_ops = {
1802 .ndo_open = sis190_open,
1803 .ndo_stop = sis190_close,
1804 .ndo_do_ioctl = sis190_ioctl,
1805 .ndo_start_xmit = sis190_start_xmit,
1806 .ndo_tx_timeout = sis190_tx_timeout,
1807 .ndo_set_multicast_list = sis190_set_rx_mode,
1808 .ndo_change_mtu = eth_change_mtu,
1809 .ndo_set_mac_address = eth_mac_addr,
1810 .ndo_validate_addr = eth_validate_addr,
1811#ifdef CONFIG_NET_POLL_CONTROLLER
1812 .ndo_poll_controller = sis190_netpoll,
1813#endif
1814};
1815
1816static int __devinit sis190_init_one(struct pci_dev *pdev,
1817 const struct pci_device_id *ent)
1818{
1819 static int printed_version = 0;
1820 struct sis190_private *tp;
1821 struct net_device *dev;
1822 void __iomem *ioaddr;
1823 int rc;
1824
1825 if (!printed_version) {
1826 net_drv(&debug, KERN_INFO SIS190_DRIVER_NAME " loaded.\n");
1827 printed_version = 1;
1828 }
1829
1830 dev = sis190_init_board(pdev);
1831 if (IS_ERR(dev)) {
1832 rc = PTR_ERR(dev);
1833 goto out;
1834 }
1835
1836 pci_set_drvdata(pdev, dev);
1837
1838 tp = netdev_priv(dev);
1839 ioaddr = tp->mmio_addr;
1840
1841 rc = sis190_get_mac_addr(pdev, dev);
1842 if (rc < 0)
1843 goto err_release_board;
1844
1845 sis190_init_rxfilter(dev);
1846
1847 INIT_WORK(&tp->phy_task, sis190_phy_task);
1848
1849 dev->netdev_ops = &sis190_netdev_ops;
1850
1851 SET_ETHTOOL_OPS(dev, &sis190_ethtool_ops);
1852 dev->irq = pdev->irq;
1853 dev->base_addr = (unsigned long) 0xdead;
1854 dev->watchdog_timeo = SIS190_TX_TIMEOUT;
1855
1856 spin_lock_init(&tp->lock);
1857
1858 rc = sis190_mii_probe(dev);
1859 if (rc < 0)
1860 goto err_release_board;
1861
1862 rc = register_netdev(dev);
1863 if (rc < 0)
1864 goto err_remove_mii;
1865
1866 net_probe(tp, KERN_INFO "%s: %s at %p (IRQ: %d), %pM\n",
1867 pci_name(pdev), sis_chip_info[ent->driver_data].name,
1868 ioaddr, dev->irq, dev->dev_addr);
1869
1870 net_probe(tp, KERN_INFO "%s: %s mode.\n", dev->name,
1871 (tp->features & F_HAS_RGMII) ? "RGMII" : "GMII");
1872
1873 netif_carrier_off(dev);
1874
1875 sis190_set_speed_auto(dev);
1876out:
1877 return rc;
1878
1879err_remove_mii:
1880 sis190_mii_remove(dev);
1881err_release_board:
1882 sis190_release_board(pdev);
1883 goto out;
1884}
1885
1886static void __devexit sis190_remove_one(struct pci_dev *pdev)
1887{
1888 struct net_device *dev = pci_get_drvdata(pdev);
1889
1890 sis190_mii_remove(dev);
1891 flush_scheduled_work();
1892 unregister_netdev(dev);
1893 sis190_release_board(pdev);
1894 pci_set_drvdata(pdev, NULL);
1895}
1896
1897static struct pci_driver sis190_pci_driver = {
1898 .name = DRV_NAME,
1899 .id_table = sis190_pci_tbl,
1900 .probe = sis190_init_one,
1901 .remove = __devexit_p(sis190_remove_one),
1902};
1903
1904static int __init sis190_init_module(void)
1905{
1906 return pci_register_driver(&sis190_pci_driver);
1907}
1908
1909static void __exit sis190_cleanup_module(void)
1910{
1911 pci_unregister_driver(&sis190_pci_driver);
1912}
1913
1914module_init(sis190_init_module);
1915module_exit(sis190_cleanup_module);
1916