1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23
24#include <linux/interrupt.h>
25#include <linux/module.h>
26#include <linux/moduleparam.h>
27#include <linux/netdevice.h>
28#include <linux/rtnetlink.h>
29#include <linux/etherdevice.h>
30#include <linux/ethtool.h>
31#include <linux/pci.h>
32#include <linux/mii.h>
33#include <linux/delay.h>
34#include <linux/crc32.h>
35#include <linux/dma-mapping.h>
36#include <linux/slab.h>
37#include <asm/irq.h>
38
39#define PHY_MAX_ADDR 32
40#define PHY_ID_ANY 0x1f
41#define MII_REG_ANY 0x1f
42
43#define DRV_VERSION "1.4"
44#define DRV_NAME "sis190"
45#define SIS190_DRIVER_NAME DRV_NAME " Gigabit Ethernet driver " DRV_VERSION
46
47#define sis190_rx_skb netif_rx
48#define sis190_rx_quota(count, quota) count
49
50#define NUM_TX_DESC 64
51#define NUM_RX_DESC 64
52#define TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
53#define RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
54#define RX_BUF_SIZE 1536
55#define RX_BUF_MASK 0xfff8
56
57#define SIS190_REGS_SIZE 0x80
58#define SIS190_TX_TIMEOUT (6*HZ)
59#define SIS190_PHY_TIMEOUT (10*HZ)
60#define SIS190_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
61 NETIF_MSG_LINK | NETIF_MSG_IFUP | \
62 NETIF_MSG_IFDOWN)
63
64
65#define EhnMIIread 0x0000
66#define EhnMIIwrite 0x0020
67#define EhnMIIdataShift 16
68#define EhnMIIpmdShift 6
69#define EhnMIIregShift 11
70#define EhnMIIreq 0x0010
71#define EhnMIInotDone 0x0010
72
73
74#define SIS_W8(reg, val) writeb ((val), ioaddr + (reg))
75#define SIS_W16(reg, val) writew ((val), ioaddr + (reg))
76#define SIS_W32(reg, val) writel ((val), ioaddr + (reg))
77#define SIS_R8(reg) readb (ioaddr + (reg))
78#define SIS_R16(reg) readw (ioaddr + (reg))
79#define SIS_R32(reg) readl (ioaddr + (reg))
80
81#define SIS_PCI_COMMIT() SIS_R32(IntrControl)
82
83enum sis190_registers {
84 TxControl = 0x00,
85 TxDescStartAddr = 0x04,
86 rsv0 = 0x08,
87 TxSts = 0x0c,
88 RxControl = 0x10,
89 RxDescStartAddr = 0x14,
90 rsv1 = 0x18,
91 RxSts = 0x1c,
92 IntrStatus = 0x20,
93 IntrMask = 0x24,
94 IntrControl = 0x28,
95 IntrTimer = 0x2c,
96 PMControl = 0x30,
97 rsv2 = 0x34,
98 ROMControl = 0x38,
99 ROMInterface = 0x3c,
100 StationControl = 0x40,
101 GMIIControl = 0x44,
102 GIoCR = 0x48,
103 GIoCtrl = 0x4c,
104 TxMacControl = 0x50,
105 TxLimit = 0x54,
106 RGDelay = 0x58,
107 rsv3 = 0x5c,
108 RxMacControl = 0x60,
109 RxMacAddr = 0x62,
110 RxHashTable = 0x68,
111
112 RxWolCtrl = 0x70,
113 RxWolData = 0x74,
114 RxMPSControl = 0x78,
115 rsv4 = 0x7c,
116};
117
118enum sis190_register_content {
119
120 SoftInt = 0x40000000,
121 Timeup = 0x20000000,
122 PauseFrame = 0x00080000,
123 MagicPacket = 0x00040000,
124 WakeupFrame = 0x00020000,
125 LinkChange = 0x00010000,
126 RxQEmpty = 0x00000080,
127 RxQInt = 0x00000040,
128 TxQ1Empty = 0x00000020,
129 TxQ1Int = 0x00000010,
130 TxQ0Empty = 0x00000008,
131 TxQ0Int = 0x00000004,
132 RxHalt = 0x00000002,
133 TxHalt = 0x00000001,
134
135
136 CmdReset = 0x10,
137 CmdRxEnb = 0x08,
138 CmdTxEnb = 0x01,
139 RxBufEmpty = 0x01,
140
141
142 Cfg9346_Lock = 0x00,
143 Cfg9346_Unlock = 0xc0,
144
145
146 AcceptErr = 0x20,
147 AcceptRunt = 0x10,
148 AcceptBroadcast = 0x0800,
149 AcceptMulticast = 0x0400,
150 AcceptMyPhys = 0x0200,
151 AcceptAllPhys = 0x0100,
152
153
154 RxCfgFIFOShift = 13,
155 RxCfgDMAShift = 8,
156
157
158 TxInterFrameGapShift = 24,
159 TxDMAShift = 8,
160
161 LinkStatus = 0x02,
162 FullDup = 0x01,
163
164
165 TBILinkOK = 0x02000000,
166};
167
168struct TxDesc {
169 __le32 PSize;
170 __le32 status;
171 __le32 addr;
172 __le32 size;
173};
174
175struct RxDesc {
176 __le32 PSize;
177 __le32 status;
178 __le32 addr;
179 __le32 size;
180};
181
182enum _DescStatusBit {
183
184 OWNbit = 0x80000000,
185 INTbit = 0x40000000,
186 CRCbit = 0x00020000,
187 PADbit = 0x00010000,
188
189 RingEnd = 0x80000000,
190
191 LSEN = 0x08000000,
192 IPCS = 0x04000000,
193 TCPCS = 0x02000000,
194 UDPCS = 0x01000000,
195 BSTEN = 0x00800000,
196 EXTEN = 0x00400000,
197 DEFEN = 0x00200000,
198 BKFEN = 0x00100000,
199 CRSEN = 0x00080000,
200 COLEN = 0x00040000,
201 THOL3 = 0x30000000,
202 THOL2 = 0x20000000,
203 THOL1 = 0x10000000,
204 THOL0 = 0x00000000,
205
206 WND = 0x00080000,
207 TABRT = 0x00040000,
208 FIFO = 0x00020000,
209 LINK = 0x00010000,
210 ColCountMask = 0x0000ffff,
211
212 IPON = 0x20000000,
213 TCPON = 0x10000000,
214 UDPON = 0x08000000,
215 Wakup = 0x00400000,
216 Magic = 0x00200000,
217 Pause = 0x00100000,
218 DEFbit = 0x00200000,
219 BCAST = 0x000c0000,
220 MCAST = 0x00080000,
221 UCAST = 0x00040000,
222
223 TAGON = 0x80000000,
224 RxDescCountMask = 0x7f000000,
225 ABORT = 0x00800000,
226 SHORT = 0x00400000,
227 LIMIT = 0x00200000,
228 MIIER = 0x00100000,
229 OVRUN = 0x00080000,
230 NIBON = 0x00040000,
231 COLON = 0x00020000,
232 CRCOK = 0x00010000,
233 RxSizeMask = 0x0000ffff
234
235
236
237
238
239};
240
241enum sis190_eeprom_access_register_bits {
242 EECS = 0x00000001,
243 EECLK = 0x00000002,
244 EEDO = 0x00000008,
245 EEDI = 0x00000004,
246 EEREQ = 0x00000080,
247 EEROP = 0x00000200,
248 EEWOP = 0x00000100
249};
250
251
252enum sis190_eeprom_address {
253 EEPROMSignature = 0x00,
254 EEPROMCLK = 0x01,
255 EEPROMInfo = 0x02,
256 EEPROMMACAddr = 0x03
257};
258
259enum sis190_feature {
260 F_HAS_RGMII = 1,
261 F_PHY_88E1111 = 2,
262 F_PHY_BCM5461 = 4
263};
264
265struct sis190_private {
266 void __iomem *mmio_addr;
267 struct pci_dev *pci_dev;
268 struct net_device *dev;
269 spinlock_t lock;
270 u32 rx_buf_sz;
271 u32 cur_rx;
272 u32 cur_tx;
273 u32 dirty_rx;
274 u32 dirty_tx;
275 dma_addr_t rx_dma;
276 dma_addr_t tx_dma;
277 struct RxDesc *RxDescRing;
278 struct TxDesc *TxDescRing;
279 struct sk_buff *Rx_skbuff[NUM_RX_DESC];
280 struct sk_buff *Tx_skbuff[NUM_TX_DESC];
281 struct work_struct phy_task;
282 struct timer_list timer;
283 u32 msg_enable;
284 struct mii_if_info mii_if;
285 struct list_head first_phy;
286 u32 features;
287 u32 negotiated_lpa;
288 enum {
289 LNK_OFF,
290 LNK_ON,
291 LNK_AUTONEG,
292 } link_status;
293};
294
295struct sis190_phy {
296 struct list_head list;
297 int phy_id;
298 u16 id[2];
299 u16 status;
300 u8 type;
301};
302
303enum sis190_phy_type {
304 UNKNOWN = 0x00,
305 HOME = 0x01,
306 LAN = 0x02,
307 MIX = 0x03
308};
309
310static struct mii_chip_info {
311 const char *name;
312 u16 id[2];
313 unsigned int type;
314 u32 feature;
315} mii_chip_table[] = {
316 { "Atheros PHY", { 0x004d, 0xd010 }, LAN, 0 },
317 { "Atheros PHY AR8012", { 0x004d, 0xd020 }, LAN, 0 },
318 { "Broadcom PHY BCM5461", { 0x0020, 0x60c0 }, LAN, F_PHY_BCM5461 },
319 { "Broadcom PHY AC131", { 0x0143, 0xbc70 }, LAN, 0 },
320 { "Agere PHY ET1101B", { 0x0282, 0xf010 }, LAN, 0 },
321 { "Marvell PHY 88E1111", { 0x0141, 0x0cc0 }, LAN, F_PHY_88E1111 },
322 { "Realtek PHY RTL8201", { 0x0000, 0x8200 }, LAN, 0 },
323 { NULL, }
324};
325
326static const struct {
327 const char *name;
328} sis_chip_info[] = {
329 { "SiS 190 PCI Fast Ethernet adapter" },
330 { "SiS 191 PCI Gigabit Ethernet adapter" },
331};
332
333static DEFINE_PCI_DEVICE_TABLE(sis190_pci_tbl) = {
334 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0190), 0, 0, 0 },
335 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0191), 0, 0, 1 },
336 { 0, },
337};
338
339MODULE_DEVICE_TABLE(pci, sis190_pci_tbl);
340
341static int rx_copybreak = 200;
342
343static struct {
344 u32 msg_enable;
345} debug = { -1 };
346
347MODULE_DESCRIPTION("SiS sis190/191 Gigabit Ethernet driver");
348module_param(rx_copybreak, int, 0);
349MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
350module_param_named(debug, debug.msg_enable, int, 0);
351MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
352MODULE_AUTHOR("K.M. Liu <kmliu@sis.com>, Ueimor <romieu@fr.zoreil.com>");
353MODULE_VERSION(DRV_VERSION);
354MODULE_LICENSE("GPL");
355
356static const u32 sis190_intr_mask =
357 RxQEmpty | RxQInt | TxQ1Int | TxQ0Int | RxHalt | TxHalt | LinkChange;
358
359
360
361
362
363static const int multicast_filter_limit = 32;
364
365static void __mdio_cmd(void __iomem *ioaddr, u32 ctl)
366{
367 unsigned int i;
368
369 SIS_W32(GMIIControl, ctl);
370
371 msleep(1);
372
373 for (i = 0; i < 100; i++) {
374 if (!(SIS_R32(GMIIControl) & EhnMIInotDone))
375 break;
376 msleep(1);
377 }
378
379 if (i > 99)
380 pr_err("PHY command failed !\n");
381}
382
383static void mdio_write(void __iomem *ioaddr, int phy_id, int reg, int val)
384{
385 __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIwrite |
386 (((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift) |
387 (((u32) val) << EhnMIIdataShift));
388}
389
390static int mdio_read(void __iomem *ioaddr, int phy_id, int reg)
391{
392 __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIread |
393 (((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift));
394
395 return (u16) (SIS_R32(GMIIControl) >> EhnMIIdataShift);
396}
397
398static void __mdio_write(struct net_device *dev, int phy_id, int reg, int val)
399{
400 struct sis190_private *tp = netdev_priv(dev);
401
402 mdio_write(tp->mmio_addr, phy_id, reg, val);
403}
404
405static int __mdio_read(struct net_device *dev, int phy_id, int reg)
406{
407 struct sis190_private *tp = netdev_priv(dev);
408
409 return mdio_read(tp->mmio_addr, phy_id, reg);
410}
411
412static u16 mdio_read_latched(void __iomem *ioaddr, int phy_id, int reg)
413{
414 mdio_read(ioaddr, phy_id, reg);
415 return mdio_read(ioaddr, phy_id, reg);
416}
417
418static u16 __devinit sis190_read_eeprom(void __iomem *ioaddr, u32 reg)
419{
420 u16 data = 0xffff;
421 unsigned int i;
422
423 if (!(SIS_R32(ROMControl) & 0x0002))
424 return 0;
425
426 SIS_W32(ROMInterface, EEREQ | EEROP | (reg << 10));
427
428 for (i = 0; i < 200; i++) {
429 if (!(SIS_R32(ROMInterface) & EEREQ)) {
430 data = (SIS_R32(ROMInterface) & 0xffff0000) >> 16;
431 break;
432 }
433 msleep(1);
434 }
435
436 return data;
437}
438
439static void sis190_irq_mask_and_ack(void __iomem *ioaddr)
440{
441 SIS_W32(IntrMask, 0x00);
442 SIS_W32(IntrStatus, 0xffffffff);
443 SIS_PCI_COMMIT();
444}
445
446static void sis190_asic_down(void __iomem *ioaddr)
447{
448
449
450 SIS_W32(TxControl, 0x1a00);
451 SIS_W32(RxControl, 0x1a00);
452
453 sis190_irq_mask_and_ack(ioaddr);
454}
455
456static void sis190_mark_as_last_descriptor(struct RxDesc *desc)
457{
458 desc->size |= cpu_to_le32(RingEnd);
459}
460
461static inline void sis190_give_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
462{
463 u32 eor = le32_to_cpu(desc->size) & RingEnd;
464
465 desc->PSize = 0x0;
466 desc->size = cpu_to_le32((rx_buf_sz & RX_BUF_MASK) | eor);
467 wmb();
468 desc->status = cpu_to_le32(OWNbit | INTbit);
469}
470
471static inline void sis190_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
472 u32 rx_buf_sz)
473{
474 desc->addr = cpu_to_le32(mapping);
475 sis190_give_to_asic(desc, rx_buf_sz);
476}
477
478static inline void sis190_make_unusable_by_asic(struct RxDesc *desc)
479{
480 desc->PSize = 0x0;
481 desc->addr = cpu_to_le32(0xdeadbeef);
482 desc->size &= cpu_to_le32(RingEnd);
483 wmb();
484 desc->status = 0x0;
485}
486
487static struct sk_buff *sis190_alloc_rx_skb(struct sis190_private *tp,
488 struct RxDesc *desc)
489{
490 u32 rx_buf_sz = tp->rx_buf_sz;
491 struct sk_buff *skb;
492 dma_addr_t mapping;
493
494 skb = netdev_alloc_skb(tp->dev, rx_buf_sz);
495 if (unlikely(!skb))
496 goto skb_alloc_failed;
497 mapping = pci_map_single(tp->pci_dev, skb->data, tp->rx_buf_sz,
498 PCI_DMA_FROMDEVICE);
499 if (pci_dma_mapping_error(tp->pci_dev, mapping))
500 goto out;
501 sis190_map_to_asic(desc, mapping, rx_buf_sz);
502
503 return skb;
504
505out:
506 dev_kfree_skb_any(skb);
507skb_alloc_failed:
508 sis190_make_unusable_by_asic(desc);
509 return NULL;
510}
511
512static u32 sis190_rx_fill(struct sis190_private *tp, struct net_device *dev,
513 u32 start, u32 end)
514{
515 u32 cur;
516
517 for (cur = start; cur < end; cur++) {
518 unsigned int i = cur % NUM_RX_DESC;
519
520 if (tp->Rx_skbuff[i])
521 continue;
522
523 tp->Rx_skbuff[i] = sis190_alloc_rx_skb(tp, tp->RxDescRing + i);
524
525 if (!tp->Rx_skbuff[i])
526 break;
527 }
528 return cur - start;
529}
530
531static bool sis190_try_rx_copy(struct sis190_private *tp,
532 struct sk_buff **sk_buff, int pkt_size,
533 dma_addr_t addr)
534{
535 struct sk_buff *skb;
536 bool done = false;
537
538 if (pkt_size >= rx_copybreak)
539 goto out;
540
541 skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size);
542 if (!skb)
543 goto out;
544
545 pci_dma_sync_single_for_cpu(tp->pci_dev, addr, tp->rx_buf_sz,
546 PCI_DMA_FROMDEVICE);
547 skb_copy_to_linear_data(skb, sk_buff[0]->data, pkt_size);
548 *sk_buff = skb;
549 done = true;
550out:
551 return done;
552}
553
554static inline int sis190_rx_pkt_err(u32 status, struct net_device_stats *stats)
555{
556#define ErrMask (OVRUN | SHORT | LIMIT | MIIER | NIBON | COLON | ABORT)
557
558 if ((status & CRCOK) && !(status & ErrMask))
559 return 0;
560
561 if (!(status & CRCOK))
562 stats->rx_crc_errors++;
563 else if (status & OVRUN)
564 stats->rx_over_errors++;
565 else if (status & (SHORT | LIMIT))
566 stats->rx_length_errors++;
567 else if (status & (MIIER | NIBON | COLON))
568 stats->rx_frame_errors++;
569
570 stats->rx_errors++;
571 return -1;
572}
573
574static int sis190_rx_interrupt(struct net_device *dev,
575 struct sis190_private *tp, void __iomem *ioaddr)
576{
577 struct net_device_stats *stats = &dev->stats;
578 u32 rx_left, cur_rx = tp->cur_rx;
579 u32 delta, count;
580
581 rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
582 rx_left = sis190_rx_quota(rx_left, (u32) dev->quota);
583
584 for (; rx_left > 0; rx_left--, cur_rx++) {
585 unsigned int entry = cur_rx % NUM_RX_DESC;
586 struct RxDesc *desc = tp->RxDescRing + entry;
587 u32 status;
588
589 if (le32_to_cpu(desc->status) & OWNbit)
590 break;
591
592 status = le32_to_cpu(desc->PSize);
593
594
595
596 if (sis190_rx_pkt_err(status, stats) < 0)
597 sis190_give_to_asic(desc, tp->rx_buf_sz);
598 else {
599 struct sk_buff *skb = tp->Rx_skbuff[entry];
600 dma_addr_t addr = le32_to_cpu(desc->addr);
601 int pkt_size = (status & RxSizeMask) - 4;
602 struct pci_dev *pdev = tp->pci_dev;
603
604 if (unlikely(pkt_size > tp->rx_buf_sz)) {
605 netif_info(tp, intr, dev,
606 "(frag) status = %08x\n", status);
607 stats->rx_dropped++;
608 stats->rx_length_errors++;
609 sis190_give_to_asic(desc, tp->rx_buf_sz);
610 continue;
611 }
612
613
614 if (sis190_try_rx_copy(tp, &skb, pkt_size, addr)) {
615 pci_dma_sync_single_for_device(pdev, addr,
616 tp->rx_buf_sz, PCI_DMA_FROMDEVICE);
617 sis190_give_to_asic(desc, tp->rx_buf_sz);
618 } else {
619 pci_unmap_single(pdev, addr, tp->rx_buf_sz,
620 PCI_DMA_FROMDEVICE);
621 tp->Rx_skbuff[entry] = NULL;
622 sis190_make_unusable_by_asic(desc);
623 }
624
625 skb_put(skb, pkt_size);
626 skb->protocol = eth_type_trans(skb, dev);
627
628 sis190_rx_skb(skb);
629
630 stats->rx_packets++;
631 stats->rx_bytes += pkt_size;
632 if ((status & BCAST) == MCAST)
633 stats->multicast++;
634 }
635 }
636 count = cur_rx - tp->cur_rx;
637 tp->cur_rx = cur_rx;
638
639 delta = sis190_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
640 if (!delta && count)
641 netif_info(tp, intr, dev, "no Rx buffer allocated\n");
642 tp->dirty_rx += delta;
643
644 if ((tp->dirty_rx + NUM_RX_DESC) == tp->cur_rx)
645 netif_emerg(tp, intr, dev, "Rx buffers exhausted\n");
646
647 return count;
648}
649
650static void sis190_unmap_tx_skb(struct pci_dev *pdev, struct sk_buff *skb,
651 struct TxDesc *desc)
652{
653 unsigned int len;
654
655 len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
656
657 pci_unmap_single(pdev, le32_to_cpu(desc->addr), len, PCI_DMA_TODEVICE);
658
659 memset(desc, 0x00, sizeof(*desc));
660}
661
662static inline int sis190_tx_pkt_err(u32 status, struct net_device_stats *stats)
663{
664#define TxErrMask (WND | TABRT | FIFO | LINK)
665
666 if (!unlikely(status & TxErrMask))
667 return 0;
668
669 if (status & WND)
670 stats->tx_window_errors++;
671 if (status & TABRT)
672 stats->tx_aborted_errors++;
673 if (status & FIFO)
674 stats->tx_fifo_errors++;
675 if (status & LINK)
676 stats->tx_carrier_errors++;
677
678 stats->tx_errors++;
679
680 return -1;
681}
682
683static void sis190_tx_interrupt(struct net_device *dev,
684 struct sis190_private *tp, void __iomem *ioaddr)
685{
686 struct net_device_stats *stats = &dev->stats;
687 u32 pending, dirty_tx = tp->dirty_tx;
688
689
690
691
692 unsigned int queue_stopped;
693
694 smp_rmb();
695 pending = tp->cur_tx - dirty_tx;
696 queue_stopped = (pending == NUM_TX_DESC);
697
698 for (; pending; pending--, dirty_tx++) {
699 unsigned int entry = dirty_tx % NUM_TX_DESC;
700 struct TxDesc *txd = tp->TxDescRing + entry;
701 u32 status = le32_to_cpu(txd->status);
702 struct sk_buff *skb;
703
704 if (status & OWNbit)
705 break;
706
707 skb = tp->Tx_skbuff[entry];
708
709 if (likely(sis190_tx_pkt_err(status, stats) == 0)) {
710 stats->tx_packets++;
711 stats->tx_bytes += skb->len;
712 stats->collisions += ((status & ColCountMask) - 1);
713 }
714
715 sis190_unmap_tx_skb(tp->pci_dev, skb, txd);
716 tp->Tx_skbuff[entry] = NULL;
717 dev_kfree_skb_irq(skb);
718 }
719
720 if (tp->dirty_tx != dirty_tx) {
721 tp->dirty_tx = dirty_tx;
722 smp_wmb();
723 if (queue_stopped)
724 netif_wake_queue(dev);
725 }
726}
727
728
729
730
731
732static irqreturn_t sis190_irq(int irq, void *__dev)
733{
734 struct net_device *dev = __dev;
735 struct sis190_private *tp = netdev_priv(dev);
736 void __iomem *ioaddr = tp->mmio_addr;
737 unsigned int handled = 0;
738 u32 status;
739
740 status = SIS_R32(IntrStatus);
741
742 if ((status == 0xffffffff) || !status)
743 goto out;
744
745 handled = 1;
746
747 if (unlikely(!netif_running(dev))) {
748 sis190_asic_down(ioaddr);
749 goto out;
750 }
751
752 SIS_W32(IntrStatus, status);
753
754
755
756 if (status & LinkChange) {
757 netif_info(tp, intr, dev, "link change\n");
758 del_timer(&tp->timer);
759 schedule_work(&tp->phy_task);
760 }
761
762 if (status & RxQInt)
763 sis190_rx_interrupt(dev, tp, ioaddr);
764
765 if (status & TxQ0Int)
766 sis190_tx_interrupt(dev, tp, ioaddr);
767out:
768 return IRQ_RETVAL(handled);
769}
770
771#ifdef CONFIG_NET_POLL_CONTROLLER
772static void sis190_netpoll(struct net_device *dev)
773{
774 struct sis190_private *tp = netdev_priv(dev);
775 const int irq = tp->pci_dev->irq;
776
777 disable_irq(irq);
778 sis190_irq(irq, dev);
779 enable_irq(irq);
780}
781#endif
782
783static void sis190_free_rx_skb(struct sis190_private *tp,
784 struct sk_buff **sk_buff, struct RxDesc *desc)
785{
786 struct pci_dev *pdev = tp->pci_dev;
787
788 pci_unmap_single(pdev, le32_to_cpu(desc->addr), tp->rx_buf_sz,
789 PCI_DMA_FROMDEVICE);
790 dev_kfree_skb(*sk_buff);
791 *sk_buff = NULL;
792 sis190_make_unusable_by_asic(desc);
793}
794
795static void sis190_rx_clear(struct sis190_private *tp)
796{
797 unsigned int i;
798
799 for (i = 0; i < NUM_RX_DESC; i++) {
800 if (!tp->Rx_skbuff[i])
801 continue;
802 sis190_free_rx_skb(tp, tp->Rx_skbuff + i, tp->RxDescRing + i);
803 }
804}
805
806static void sis190_init_ring_indexes(struct sis190_private *tp)
807{
808 tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
809}
810
811static int sis190_init_ring(struct net_device *dev)
812{
813 struct sis190_private *tp = netdev_priv(dev);
814
815 sis190_init_ring_indexes(tp);
816
817 memset(tp->Tx_skbuff, 0x0, NUM_TX_DESC * sizeof(struct sk_buff *));
818 memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *));
819
820 if (sis190_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC)
821 goto err_rx_clear;
822
823 sis190_mark_as_last_descriptor(tp->RxDescRing + NUM_RX_DESC - 1);
824
825 return 0;
826
827err_rx_clear:
828 sis190_rx_clear(tp);
829 return -ENOMEM;
830}
831
832static void sis190_set_rx_mode(struct net_device *dev)
833{
834 struct sis190_private *tp = netdev_priv(dev);
835 void __iomem *ioaddr = tp->mmio_addr;
836 unsigned long flags;
837 u32 mc_filter[2];
838 u16 rx_mode;
839
840 if (dev->flags & IFF_PROMISC) {
841 rx_mode =
842 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
843 AcceptAllPhys;
844 mc_filter[1] = mc_filter[0] = 0xffffffff;
845 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
846 (dev->flags & IFF_ALLMULTI)) {
847
848 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
849 mc_filter[1] = mc_filter[0] = 0xffffffff;
850 } else {
851 struct netdev_hw_addr *ha;
852
853 rx_mode = AcceptBroadcast | AcceptMyPhys;
854 mc_filter[1] = mc_filter[0] = 0;
855 netdev_for_each_mc_addr(ha, dev) {
856 int bit_nr =
857 ether_crc(ETH_ALEN, ha->addr) & 0x3f;
858 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
859 rx_mode |= AcceptMulticast;
860 }
861 }
862
863 spin_lock_irqsave(&tp->lock, flags);
864
865 SIS_W16(RxMacControl, rx_mode | 0x2);
866 SIS_W32(RxHashTable, mc_filter[0]);
867 SIS_W32(RxHashTable + 4, mc_filter[1]);
868
869 spin_unlock_irqrestore(&tp->lock, flags);
870}
871
872static void sis190_soft_reset(void __iomem *ioaddr)
873{
874 SIS_W32(IntrControl, 0x8000);
875 SIS_PCI_COMMIT();
876 SIS_W32(IntrControl, 0x0);
877 sis190_asic_down(ioaddr);
878}
879
880static void sis190_hw_start(struct net_device *dev)
881{
882 struct sis190_private *tp = netdev_priv(dev);
883 void __iomem *ioaddr = tp->mmio_addr;
884
885 sis190_soft_reset(ioaddr);
886
887 SIS_W32(TxDescStartAddr, tp->tx_dma);
888 SIS_W32(RxDescStartAddr, tp->rx_dma);
889
890 SIS_W32(IntrStatus, 0xffffffff);
891 SIS_W32(IntrMask, 0x0);
892 SIS_W32(GMIIControl, 0x0);
893 SIS_W32(TxMacControl, 0x60);
894 SIS_W16(RxMacControl, 0x02);
895 SIS_W32(RxHashTable, 0x0);
896 SIS_W32(0x6c, 0x0);
897 SIS_W32(RxWolCtrl, 0x0);
898 SIS_W32(RxWolData, 0x0);
899
900 SIS_PCI_COMMIT();
901
902 sis190_set_rx_mode(dev);
903
904
905 SIS_W32(IntrMask, sis190_intr_mask);
906
907 SIS_W32(TxControl, 0x1a00 | CmdTxEnb);
908 SIS_W32(RxControl, 0x1a1d);
909
910 netif_start_queue(dev);
911}
912
913static void sis190_phy_task(struct work_struct *work)
914{
915 struct sis190_private *tp =
916 container_of(work, struct sis190_private, phy_task);
917 struct net_device *dev = tp->dev;
918 void __iomem *ioaddr = tp->mmio_addr;
919 int phy_id = tp->mii_if.phy_id;
920 u16 val;
921
922 rtnl_lock();
923
924 if (!netif_running(dev))
925 goto out_unlock;
926
927 val = mdio_read(ioaddr, phy_id, MII_BMCR);
928 if (val & BMCR_RESET) {
929
930 mod_timer(&tp->timer, jiffies + HZ/10);
931 goto out_unlock;
932 }
933
934 val = mdio_read_latched(ioaddr, phy_id, MII_BMSR);
935 if (!(val & BMSR_ANEGCOMPLETE) && tp->link_status != LNK_AUTONEG) {
936 netif_carrier_off(dev);
937 netif_warn(tp, link, dev, "auto-negotiating...\n");
938 tp->link_status = LNK_AUTONEG;
939 } else if ((val & BMSR_LSTATUS) && tp->link_status != LNK_ON) {
940
941 struct {
942 int val;
943 u32 ctl;
944 const char *msg;
945 } reg31[] = {
946 { LPA_1000FULL, 0x07000c00 | 0x00001000,
947 "1000 Mbps Full Duplex" },
948 { LPA_1000HALF, 0x07000c00,
949 "1000 Mbps Half Duplex" },
950 { LPA_100FULL, 0x04000800 | 0x00001000,
951 "100 Mbps Full Duplex" },
952 { LPA_100HALF, 0x04000800,
953 "100 Mbps Half Duplex" },
954 { LPA_10FULL, 0x04000400 | 0x00001000,
955 "10 Mbps Full Duplex" },
956 { LPA_10HALF, 0x04000400,
957 "10 Mbps Half Duplex" },
958 { 0, 0x04000400, "unknown" }
959 }, *p = NULL;
960 u16 adv, autoexp, gigadv, gigrec;
961
962 val = mdio_read(ioaddr, phy_id, 0x1f);
963 netif_info(tp, link, dev, "mii ext = %04x\n", val);
964
965 val = mdio_read(ioaddr, phy_id, MII_LPA);
966 adv = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
967 autoexp = mdio_read(ioaddr, phy_id, MII_EXPANSION);
968 netif_info(tp, link, dev, "mii lpa=%04x adv=%04x exp=%04x\n",
969 val, adv, autoexp);
970
971 if (val & LPA_NPAGE && autoexp & EXPANSION_NWAY) {
972
973 gigadv = mdio_read(ioaddr, phy_id, MII_CTRL1000);
974 gigrec = mdio_read(ioaddr, phy_id, MII_STAT1000);
975 val = (gigadv & (gigrec >> 2));
976 if (val & ADVERTISE_1000FULL)
977 p = reg31;
978 else if (val & ADVERTISE_1000HALF)
979 p = reg31 + 1;
980 }
981 if (!p) {
982 val &= adv;
983
984 for (p = reg31; p->val; p++) {
985 if ((val & p->val) == p->val)
986 break;
987 }
988 }
989
990 p->ctl |= SIS_R32(StationControl) & ~0x0f001c00;
991
992 if ((tp->features & F_HAS_RGMII) &&
993 (tp->features & F_PHY_BCM5461)) {
994
995 mdio_write(ioaddr, phy_id, 0x18, 0xf1c7);
996 udelay(200);
997 mdio_write(ioaddr, phy_id, 0x1c, 0x8c00);
998 p->ctl |= 0x03000000;
999 }
1000
1001 SIS_W32(StationControl, p->ctl);
1002
1003 if (tp->features & F_HAS_RGMII) {
1004 SIS_W32(RGDelay, 0x0441);
1005 SIS_W32(RGDelay, 0x0440);
1006 }
1007
1008 tp->negotiated_lpa = p->val;
1009
1010 netif_info(tp, link, dev, "link on %s mode\n", p->msg);
1011 netif_carrier_on(dev);
1012 tp->link_status = LNK_ON;
1013 } else if (!(val & BMSR_LSTATUS) && tp->link_status != LNK_AUTONEG)
1014 tp->link_status = LNK_OFF;
1015 mod_timer(&tp->timer, jiffies + SIS190_PHY_TIMEOUT);
1016
1017out_unlock:
1018 rtnl_unlock();
1019}
1020
1021static void sis190_phy_timer(unsigned long __opaque)
1022{
1023 struct net_device *dev = (struct net_device *)__opaque;
1024 struct sis190_private *tp = netdev_priv(dev);
1025
1026 if (likely(netif_running(dev)))
1027 schedule_work(&tp->phy_task);
1028}
1029
1030static inline void sis190_delete_timer(struct net_device *dev)
1031{
1032 struct sis190_private *tp = netdev_priv(dev);
1033
1034 del_timer_sync(&tp->timer);
1035}
1036
1037static inline void sis190_request_timer(struct net_device *dev)
1038{
1039 struct sis190_private *tp = netdev_priv(dev);
1040 struct timer_list *timer = &tp->timer;
1041
1042 init_timer(timer);
1043 timer->expires = jiffies + SIS190_PHY_TIMEOUT;
1044 timer->data = (unsigned long)dev;
1045 timer->function = sis190_phy_timer;
1046 add_timer(timer);
1047}
1048
1049static void sis190_set_rxbufsize(struct sis190_private *tp,
1050 struct net_device *dev)
1051{
1052 unsigned int mtu = dev->mtu;
1053
1054 tp->rx_buf_sz = (mtu > RX_BUF_SIZE) ? mtu + ETH_HLEN + 8 : RX_BUF_SIZE;
1055
1056 if (tp->rx_buf_sz & 0x07) {
1057 tp->rx_buf_sz += 8;
1058 tp->rx_buf_sz &= RX_BUF_MASK;
1059 }
1060}
1061
1062static int sis190_open(struct net_device *dev)
1063{
1064 struct sis190_private *tp = netdev_priv(dev);
1065 struct pci_dev *pdev = tp->pci_dev;
1066 int rc = -ENOMEM;
1067
1068 sis190_set_rxbufsize(tp, dev);
1069
1070
1071
1072
1073
1074 tp->TxDescRing = pci_alloc_consistent(pdev, TX_RING_BYTES, &tp->tx_dma);
1075 if (!tp->TxDescRing)
1076 goto out;
1077
1078 tp->RxDescRing = pci_alloc_consistent(pdev, RX_RING_BYTES, &tp->rx_dma);
1079 if (!tp->RxDescRing)
1080 goto err_free_tx_0;
1081
1082 rc = sis190_init_ring(dev);
1083 if (rc < 0)
1084 goto err_free_rx_1;
1085
1086 sis190_request_timer(dev);
1087
1088 rc = request_irq(pdev->irq, sis190_irq, IRQF_SHARED, dev->name, dev);
1089 if (rc < 0)
1090 goto err_release_timer_2;
1091
1092 sis190_hw_start(dev);
1093out:
1094 return rc;
1095
1096err_release_timer_2:
1097 sis190_delete_timer(dev);
1098 sis190_rx_clear(tp);
1099err_free_rx_1:
1100 pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma);
1101err_free_tx_0:
1102 pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma);
1103 goto out;
1104}
1105
1106static void sis190_tx_clear(struct sis190_private *tp)
1107{
1108 unsigned int i;
1109
1110 for (i = 0; i < NUM_TX_DESC; i++) {
1111 struct sk_buff *skb = tp->Tx_skbuff[i];
1112
1113 if (!skb)
1114 continue;
1115
1116 sis190_unmap_tx_skb(tp->pci_dev, skb, tp->TxDescRing + i);
1117 tp->Tx_skbuff[i] = NULL;
1118 dev_kfree_skb(skb);
1119
1120 tp->dev->stats.tx_dropped++;
1121 }
1122 tp->cur_tx = tp->dirty_tx = 0;
1123}
1124
1125static void sis190_down(struct net_device *dev)
1126{
1127 struct sis190_private *tp = netdev_priv(dev);
1128 void __iomem *ioaddr = tp->mmio_addr;
1129 unsigned int poll_locked = 0;
1130
1131 sis190_delete_timer(dev);
1132
1133 netif_stop_queue(dev);
1134
1135 do {
1136 spin_lock_irq(&tp->lock);
1137
1138 sis190_asic_down(ioaddr);
1139
1140 spin_unlock_irq(&tp->lock);
1141
1142 synchronize_irq(tp->pci_dev->irq);
1143
1144 if (!poll_locked)
1145 poll_locked++;
1146
1147 synchronize_sched();
1148
1149 } while (SIS_R32(IntrMask));
1150
1151 sis190_tx_clear(tp);
1152 sis190_rx_clear(tp);
1153}
1154
1155static int sis190_close(struct net_device *dev)
1156{
1157 struct sis190_private *tp = netdev_priv(dev);
1158 struct pci_dev *pdev = tp->pci_dev;
1159
1160 sis190_down(dev);
1161
1162 free_irq(pdev->irq, dev);
1163
1164 pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma);
1165 pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma);
1166
1167 tp->TxDescRing = NULL;
1168 tp->RxDescRing = NULL;
1169
1170 return 0;
1171}
1172
1173static netdev_tx_t sis190_start_xmit(struct sk_buff *skb,
1174 struct net_device *dev)
1175{
1176 struct sis190_private *tp = netdev_priv(dev);
1177 void __iomem *ioaddr = tp->mmio_addr;
1178 u32 len, entry, dirty_tx;
1179 struct TxDesc *desc;
1180 dma_addr_t mapping;
1181
1182 if (unlikely(skb->len < ETH_ZLEN)) {
1183 if (skb_padto(skb, ETH_ZLEN)) {
1184 dev->stats.tx_dropped++;
1185 goto out;
1186 }
1187 len = ETH_ZLEN;
1188 } else {
1189 len = skb->len;
1190 }
1191
1192 entry = tp->cur_tx % NUM_TX_DESC;
1193 desc = tp->TxDescRing + entry;
1194
1195 if (unlikely(le32_to_cpu(desc->status) & OWNbit)) {
1196 netif_stop_queue(dev);
1197 netif_err(tp, tx_err, dev,
1198 "BUG! Tx Ring full when queue awake!\n");
1199 return NETDEV_TX_BUSY;
1200 }
1201
1202 mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE);
1203 if (pci_dma_mapping_error(tp->pci_dev, mapping)) {
1204 netif_err(tp, tx_err, dev,
1205 "PCI mapping failed, dropping packet");
1206 return NETDEV_TX_BUSY;
1207 }
1208
1209 tp->Tx_skbuff[entry] = skb;
1210
1211 desc->PSize = cpu_to_le32(len);
1212 desc->addr = cpu_to_le32(mapping);
1213
1214 desc->size = cpu_to_le32(len);
1215 if (entry == (NUM_TX_DESC - 1))
1216 desc->size |= cpu_to_le32(RingEnd);
1217
1218 wmb();
1219
1220 desc->status = cpu_to_le32(OWNbit | INTbit | DEFbit | CRCbit | PADbit);
1221 if (tp->negotiated_lpa & (LPA_1000HALF | LPA_100HALF | LPA_10HALF)) {
1222
1223 desc->status |= cpu_to_le32(COLEN | CRSEN | BKFEN);
1224 if (tp->negotiated_lpa & (LPA_1000HALF | LPA_1000FULL))
1225 desc->status |= cpu_to_le32(EXTEN | BSTEN);
1226 }
1227
1228 tp->cur_tx++;
1229
1230 smp_wmb();
1231
1232 SIS_W32(TxControl, 0x1a00 | CmdReset | CmdTxEnb);
1233
1234 dirty_tx = tp->dirty_tx;
1235 if ((tp->cur_tx - NUM_TX_DESC) == dirty_tx) {
1236 netif_stop_queue(dev);
1237 smp_rmb();
1238 if (dirty_tx != tp->dirty_tx)
1239 netif_wake_queue(dev);
1240 }
1241out:
1242 return NETDEV_TX_OK;
1243}
1244
1245static void sis190_free_phy(struct list_head *first_phy)
1246{
1247 struct sis190_phy *cur, *next;
1248
1249 list_for_each_entry_safe(cur, next, first_phy, list) {
1250 kfree(cur);
1251 }
1252}
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262static u16 sis190_default_phy(struct net_device *dev)
1263{
1264 struct sis190_phy *phy, *phy_home, *phy_default, *phy_lan;
1265 struct sis190_private *tp = netdev_priv(dev);
1266 struct mii_if_info *mii_if = &tp->mii_if;
1267 void __iomem *ioaddr = tp->mmio_addr;
1268 u16 status;
1269
1270 phy_home = phy_default = phy_lan = NULL;
1271
1272 list_for_each_entry(phy, &tp->first_phy, list) {
1273 status = mdio_read_latched(ioaddr, phy->phy_id, MII_BMSR);
1274
1275
1276 if ((status & BMSR_LSTATUS) &&
1277 !phy_default &&
1278 (phy->type != UNKNOWN)) {
1279 phy_default = phy;
1280 } else {
1281 status = mdio_read(ioaddr, phy->phy_id, MII_BMCR);
1282 mdio_write(ioaddr, phy->phy_id, MII_BMCR,
1283 status | BMCR_ANENABLE | BMCR_ISOLATE);
1284 if (phy->type == HOME)
1285 phy_home = phy;
1286 else if (phy->type == LAN)
1287 phy_lan = phy;
1288 }
1289 }
1290
1291 if (!phy_default) {
1292 if (phy_home)
1293 phy_default = phy_home;
1294 else if (phy_lan)
1295 phy_default = phy_lan;
1296 else
1297 phy_default = list_first_entry(&tp->first_phy,
1298 struct sis190_phy, list);
1299 }
1300
1301 if (mii_if->phy_id != phy_default->phy_id) {
1302 mii_if->phy_id = phy_default->phy_id;
1303 if (netif_msg_probe(tp))
1304 pr_info("%s: Using transceiver at address %d as default\n",
1305 pci_name(tp->pci_dev), mii_if->phy_id);
1306 }
1307
1308 status = mdio_read(ioaddr, mii_if->phy_id, MII_BMCR);
1309 status &= (~BMCR_ISOLATE);
1310
1311 mdio_write(ioaddr, mii_if->phy_id, MII_BMCR, status);
1312 status = mdio_read_latched(ioaddr, mii_if->phy_id, MII_BMSR);
1313
1314 return status;
1315}
1316
1317static void sis190_init_phy(struct net_device *dev, struct sis190_private *tp,
1318 struct sis190_phy *phy, unsigned int phy_id,
1319 u16 mii_status)
1320{
1321 void __iomem *ioaddr = tp->mmio_addr;
1322 struct mii_chip_info *p;
1323
1324 INIT_LIST_HEAD(&phy->list);
1325 phy->status = mii_status;
1326 phy->phy_id = phy_id;
1327
1328 phy->id[0] = mdio_read(ioaddr, phy_id, MII_PHYSID1);
1329 phy->id[1] = mdio_read(ioaddr, phy_id, MII_PHYSID2);
1330
1331 for (p = mii_chip_table; p->type; p++) {
1332 if ((p->id[0] == phy->id[0]) &&
1333 (p->id[1] == (phy->id[1] & 0xfff0))) {
1334 break;
1335 }
1336 }
1337
1338 if (p->id[1]) {
1339 phy->type = (p->type == MIX) ?
1340 ((mii_status & (BMSR_100FULL | BMSR_100HALF)) ?
1341 LAN : HOME) : p->type;
1342 tp->features |= p->feature;
1343 if (netif_msg_probe(tp))
1344 pr_info("%s: %s transceiver at address %d\n",
1345 pci_name(tp->pci_dev), p->name, phy_id);
1346 } else {
1347 phy->type = UNKNOWN;
1348 if (netif_msg_probe(tp))
1349 pr_info("%s: unknown PHY 0x%x:0x%x transceiver at address %d\n",
1350 pci_name(tp->pci_dev),
1351 phy->id[0], (phy->id[1] & 0xfff0), phy_id);
1352 }
1353}
1354
1355static void sis190_mii_probe_88e1111_fixup(struct sis190_private *tp)
1356{
1357 if (tp->features & F_PHY_88E1111) {
1358 void __iomem *ioaddr = tp->mmio_addr;
1359 int phy_id = tp->mii_if.phy_id;
1360 u16 reg[2][2] = {
1361 { 0x808b, 0x0ce1 },
1362 { 0x808f, 0x0c60 }
1363 }, *p;
1364
1365 p = (tp->features & F_HAS_RGMII) ? reg[0] : reg[1];
1366
1367 mdio_write(ioaddr, phy_id, 0x1b, p[0]);
1368 udelay(200);
1369 mdio_write(ioaddr, phy_id, 0x14, p[1]);
1370 udelay(200);
1371 }
1372}
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382static int __devinit sis190_mii_probe(struct net_device *dev)
1383{
1384 struct sis190_private *tp = netdev_priv(dev);
1385 struct mii_if_info *mii_if = &tp->mii_if;
1386 void __iomem *ioaddr = tp->mmio_addr;
1387 int phy_id;
1388 int rc = 0;
1389
1390 INIT_LIST_HEAD(&tp->first_phy);
1391
1392 for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) {
1393 struct sis190_phy *phy;
1394 u16 status;
1395
1396 status = mdio_read_latched(ioaddr, phy_id, MII_BMSR);
1397
1398
1399 if (status == 0xffff || status == 0x0000)
1400 continue;
1401
1402 phy = kmalloc(sizeof(*phy), GFP_KERNEL);
1403 if (!phy) {
1404 sis190_free_phy(&tp->first_phy);
1405 rc = -ENOMEM;
1406 goto out;
1407 }
1408
1409 sis190_init_phy(dev, tp, phy, phy_id, status);
1410
1411 list_add(&tp->first_phy, &phy->list);
1412 }
1413
1414 if (list_empty(&tp->first_phy)) {
1415 if (netif_msg_probe(tp))
1416 pr_info("%s: No MII transceivers found!\n",
1417 pci_name(tp->pci_dev));
1418 rc = -EIO;
1419 goto out;
1420 }
1421
1422
1423 sis190_default_phy(dev);
1424
1425 sis190_mii_probe_88e1111_fixup(tp);
1426
1427 mii_if->dev = dev;
1428 mii_if->mdio_read = __mdio_read;
1429 mii_if->mdio_write = __mdio_write;
1430 mii_if->phy_id_mask = PHY_ID_ANY;
1431 mii_if->reg_num_mask = MII_REG_ANY;
1432out:
1433 return rc;
1434}
1435
1436static void sis190_mii_remove(struct net_device *dev)
1437{
1438 struct sis190_private *tp = netdev_priv(dev);
1439
1440 sis190_free_phy(&tp->first_phy);
1441}
1442
1443static void sis190_release_board(struct pci_dev *pdev)
1444{
1445 struct net_device *dev = pci_get_drvdata(pdev);
1446 struct sis190_private *tp = netdev_priv(dev);
1447
1448 iounmap(tp->mmio_addr);
1449 pci_release_regions(pdev);
1450 pci_disable_device(pdev);
1451 free_netdev(dev);
1452}
1453
1454static struct net_device * __devinit sis190_init_board(struct pci_dev *pdev)
1455{
1456 struct sis190_private *tp;
1457 struct net_device *dev;
1458 void __iomem *ioaddr;
1459 int rc;
1460
1461 dev = alloc_etherdev(sizeof(*tp));
1462 if (!dev) {
1463 rc = -ENOMEM;
1464 goto err_out_0;
1465 }
1466
1467 SET_NETDEV_DEV(dev, &pdev->dev);
1468
1469 tp = netdev_priv(dev);
1470 tp->dev = dev;
1471 tp->msg_enable = netif_msg_init(debug.msg_enable, SIS190_MSG_DEFAULT);
1472
1473 rc = pci_enable_device(pdev);
1474 if (rc < 0) {
1475 if (netif_msg_probe(tp))
1476 pr_err("%s: enable failure\n", pci_name(pdev));
1477 goto err_free_dev_1;
1478 }
1479
1480 rc = -ENODEV;
1481
1482 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1483 if (netif_msg_probe(tp))
1484 pr_err("%s: region #0 is no MMIO resource\n",
1485 pci_name(pdev));
1486 goto err_pci_disable_2;
1487 }
1488 if (pci_resource_len(pdev, 0) < SIS190_REGS_SIZE) {
1489 if (netif_msg_probe(tp))
1490 pr_err("%s: invalid PCI region size(s)\n",
1491 pci_name(pdev));
1492 goto err_pci_disable_2;
1493 }
1494
1495 rc = pci_request_regions(pdev, DRV_NAME);
1496 if (rc < 0) {
1497 if (netif_msg_probe(tp))
1498 pr_err("%s: could not request regions\n",
1499 pci_name(pdev));
1500 goto err_pci_disable_2;
1501 }
1502
1503 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1504 if (rc < 0) {
1505 if (netif_msg_probe(tp))
1506 pr_err("%s: DMA configuration failed\n",
1507 pci_name(pdev));
1508 goto err_free_res_3;
1509 }
1510
1511 pci_set_master(pdev);
1512
1513 ioaddr = ioremap(pci_resource_start(pdev, 0), SIS190_REGS_SIZE);
1514 if (!ioaddr) {
1515 if (netif_msg_probe(tp))
1516 pr_err("%s: cannot remap MMIO, aborting\n",
1517 pci_name(pdev));
1518 rc = -EIO;
1519 goto err_free_res_3;
1520 }
1521
1522 tp->pci_dev = pdev;
1523 tp->mmio_addr = ioaddr;
1524 tp->link_status = LNK_OFF;
1525
1526 sis190_irq_mask_and_ack(ioaddr);
1527
1528 sis190_soft_reset(ioaddr);
1529out:
1530 return dev;
1531
1532err_free_res_3:
1533 pci_release_regions(pdev);
1534err_pci_disable_2:
1535 pci_disable_device(pdev);
1536err_free_dev_1:
1537 free_netdev(dev);
1538err_out_0:
1539 dev = ERR_PTR(rc);
1540 goto out;
1541}
1542
1543static void sis190_tx_timeout(struct net_device *dev)
1544{
1545 struct sis190_private *tp = netdev_priv(dev);
1546 void __iomem *ioaddr = tp->mmio_addr;
1547 u8 tmp8;
1548
1549
1550 tmp8 = SIS_R8(TxControl);
1551 if (tmp8 & CmdTxEnb)
1552 SIS_W8(TxControl, tmp8 & ~CmdTxEnb);
1553
1554 netif_info(tp, tx_err, dev, "Transmit timeout, status %08x %08x\n",
1555 SIS_R32(TxControl), SIS_R32(TxSts));
1556
1557
1558 SIS_W32(IntrMask, 0x0000);
1559
1560
1561 spin_lock_irq(&tp->lock);
1562 sis190_tx_clear(tp);
1563 spin_unlock_irq(&tp->lock);
1564
1565
1566 sis190_hw_start(dev);
1567
1568 netif_wake_queue(dev);
1569}
1570
1571static void sis190_set_rgmii(struct sis190_private *tp, u8 reg)
1572{
1573 tp->features |= (reg & 0x80) ? F_HAS_RGMII : 0;
1574}
1575
1576static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
1577 struct net_device *dev)
1578{
1579 struct sis190_private *tp = netdev_priv(dev);
1580 void __iomem *ioaddr = tp->mmio_addr;
1581 u16 sig;
1582 int i;
1583
1584 if (netif_msg_probe(tp))
1585 pr_info("%s: Read MAC address from EEPROM\n", pci_name(pdev));
1586
1587
1588 sig = (u16) sis190_read_eeprom(ioaddr, EEPROMSignature);
1589
1590 if ((sig == 0xffff) || (sig == 0x0000)) {
1591 if (netif_msg_probe(tp))
1592 pr_info("%s: Error EEPROM read %x\n",
1593 pci_name(pdev), sig);
1594 return -EIO;
1595 }
1596
1597
1598 for (i = 0; i < ETH_ALEN / 2; i++) {
1599 u16 w = sis190_read_eeprom(ioaddr, EEPROMMACAddr + i);
1600
1601 ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(w);
1602 }
1603
1604 sis190_set_rgmii(tp, sis190_read_eeprom(ioaddr, EEPROMInfo));
1605
1606 return 0;
1607}
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
1619 struct net_device *dev)
1620{
1621 static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
1622 struct sis190_private *tp = netdev_priv(dev);
1623 struct pci_dev *isa_bridge;
1624 u8 reg, tmp8;
1625 unsigned int i;
1626
1627 if (netif_msg_probe(tp))
1628 pr_info("%s: Read MAC address from APC\n", pci_name(pdev));
1629
1630 for (i = 0; i < ARRAY_SIZE(ids); i++) {
1631 isa_bridge = pci_get_device(PCI_VENDOR_ID_SI, ids[i], NULL);
1632 if (isa_bridge)
1633 break;
1634 }
1635
1636 if (!isa_bridge) {
1637 if (netif_msg_probe(tp))
1638 pr_info("%s: Can not find ISA bridge\n",
1639 pci_name(pdev));
1640 return -EIO;
1641 }
1642
1643
1644 pci_read_config_byte(isa_bridge, 0x48, &tmp8);
1645 reg = (tmp8 & ~0x02);
1646 pci_write_config_byte(isa_bridge, 0x48, reg);
1647 udelay(50);
1648 pci_read_config_byte(isa_bridge, 0x48, ®);
1649
1650 for (i = 0; i < ETH_ALEN; i++) {
1651 outb(0x9 + i, 0x78);
1652 dev->dev_addr[i] = inb(0x79);
1653 }
1654
1655 outb(0x12, 0x78);
1656 reg = inb(0x79);
1657
1658 sis190_set_rgmii(tp, reg);
1659
1660
1661 pci_write_config_byte(isa_bridge, 0x48, tmp8);
1662 pci_dev_put(isa_bridge);
1663
1664 return 0;
1665}
1666
1667
1668
1669
1670
1671
1672
1673
1674static inline void sis190_init_rxfilter(struct net_device *dev)
1675{
1676 struct sis190_private *tp = netdev_priv(dev);
1677 void __iomem *ioaddr = tp->mmio_addr;
1678 u16 ctl;
1679 int i;
1680
1681 ctl = SIS_R16(RxMacControl);
1682
1683
1684
1685
1686
1687 SIS_W16(RxMacControl, ctl & ~0x0f00);
1688
1689 for (i = 0; i < ETH_ALEN; i++)
1690 SIS_W8(RxMacAddr + i, dev->dev_addr[i]);
1691
1692 SIS_W16(RxMacControl, ctl);
1693 SIS_PCI_COMMIT();
1694}
1695
1696static int __devinit sis190_get_mac_addr(struct pci_dev *pdev,
1697 struct net_device *dev)
1698{
1699 int rc;
1700
1701 rc = sis190_get_mac_addr_from_eeprom(pdev, dev);
1702 if (rc < 0) {
1703 u8 reg;
1704
1705 pci_read_config_byte(pdev, 0x73, ®);
1706
1707 if (reg & 0x00000001)
1708 rc = sis190_get_mac_addr_from_apc(pdev, dev);
1709 }
1710 return rc;
1711}
1712
1713static void sis190_set_speed_auto(struct net_device *dev)
1714{
1715 struct sis190_private *tp = netdev_priv(dev);
1716 void __iomem *ioaddr = tp->mmio_addr;
1717 int phy_id = tp->mii_if.phy_id;
1718 int val;
1719
1720 netif_info(tp, link, dev, "Enabling Auto-negotiation\n");
1721
1722 val = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
1723
1724
1725
1726 mdio_write(ioaddr, phy_id, MII_ADVERTISE, (val & ADVERTISE_SLCT) |
1727 ADVERTISE_100FULL | ADVERTISE_10FULL |
1728 ADVERTISE_100HALF | ADVERTISE_10HALF);
1729
1730
1731 mdio_write(ioaddr, phy_id, MII_CTRL1000, ADVERTISE_1000FULL);
1732
1733
1734 mdio_write(ioaddr, phy_id, MII_BMCR,
1735 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET);
1736}
1737
1738static int sis190_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1739{
1740 struct sis190_private *tp = netdev_priv(dev);
1741
1742 return mii_ethtool_gset(&tp->mii_if, cmd);
1743}
1744
1745static int sis190_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1746{
1747 struct sis190_private *tp = netdev_priv(dev);
1748
1749 return mii_ethtool_sset(&tp->mii_if, cmd);
1750}
1751
1752static void sis190_get_drvinfo(struct net_device *dev,
1753 struct ethtool_drvinfo *info)
1754{
1755 struct sis190_private *tp = netdev_priv(dev);
1756
1757 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1758 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1759 strlcpy(info->bus_info, pci_name(tp->pci_dev),
1760 sizeof(info->bus_info));
1761}
1762
1763static int sis190_get_regs_len(struct net_device *dev)
1764{
1765 return SIS190_REGS_SIZE;
1766}
1767
1768static void sis190_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1769 void *p)
1770{
1771 struct sis190_private *tp = netdev_priv(dev);
1772 unsigned long flags;
1773
1774 if (regs->len > SIS190_REGS_SIZE)
1775 regs->len = SIS190_REGS_SIZE;
1776
1777 spin_lock_irqsave(&tp->lock, flags);
1778 memcpy_fromio(p, tp->mmio_addr, regs->len);
1779 spin_unlock_irqrestore(&tp->lock, flags);
1780}
1781
1782static int sis190_nway_reset(struct net_device *dev)
1783{
1784 struct sis190_private *tp = netdev_priv(dev);
1785
1786 return mii_nway_restart(&tp->mii_if);
1787}
1788
1789static u32 sis190_get_msglevel(struct net_device *dev)
1790{
1791 struct sis190_private *tp = netdev_priv(dev);
1792
1793 return tp->msg_enable;
1794}
1795
1796static void sis190_set_msglevel(struct net_device *dev, u32 value)
1797{
1798 struct sis190_private *tp = netdev_priv(dev);
1799
1800 tp->msg_enable = value;
1801}
1802
1803static const struct ethtool_ops sis190_ethtool_ops = {
1804 .get_settings = sis190_get_settings,
1805 .set_settings = sis190_set_settings,
1806 .get_drvinfo = sis190_get_drvinfo,
1807 .get_regs_len = sis190_get_regs_len,
1808 .get_regs = sis190_get_regs,
1809 .get_link = ethtool_op_get_link,
1810 .get_msglevel = sis190_get_msglevel,
1811 .set_msglevel = sis190_set_msglevel,
1812 .nway_reset = sis190_nway_reset,
1813};
1814
1815static int sis190_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1816{
1817 struct sis190_private *tp = netdev_priv(dev);
1818
1819 return !netif_running(dev) ? -EINVAL :
1820 generic_mii_ioctl(&tp->mii_if, if_mii(ifr), cmd, NULL);
1821}
1822
1823static int sis190_mac_addr(struct net_device *dev, void *p)
1824{
1825 int rc;
1826
1827 rc = eth_mac_addr(dev, p);
1828 if (!rc)
1829 sis190_init_rxfilter(dev);
1830 return rc;
1831}
1832
1833static const struct net_device_ops sis190_netdev_ops = {
1834 .ndo_open = sis190_open,
1835 .ndo_stop = sis190_close,
1836 .ndo_do_ioctl = sis190_ioctl,
1837 .ndo_start_xmit = sis190_start_xmit,
1838 .ndo_tx_timeout = sis190_tx_timeout,
1839 .ndo_set_rx_mode = sis190_set_rx_mode,
1840 .ndo_change_mtu = eth_change_mtu,
1841 .ndo_set_mac_address = sis190_mac_addr,
1842 .ndo_validate_addr = eth_validate_addr,
1843#ifdef CONFIG_NET_POLL_CONTROLLER
1844 .ndo_poll_controller = sis190_netpoll,
1845#endif
1846};
1847
1848static int __devinit sis190_init_one(struct pci_dev *pdev,
1849 const struct pci_device_id *ent)
1850{
1851 static int printed_version = 0;
1852 struct sis190_private *tp;
1853 struct net_device *dev;
1854 void __iomem *ioaddr;
1855 int rc;
1856
1857 if (!printed_version) {
1858 if (netif_msg_drv(&debug))
1859 pr_info(SIS190_DRIVER_NAME " loaded\n");
1860 printed_version = 1;
1861 }
1862
1863 dev = sis190_init_board(pdev);
1864 if (IS_ERR(dev)) {
1865 rc = PTR_ERR(dev);
1866 goto out;
1867 }
1868
1869 pci_set_drvdata(pdev, dev);
1870
1871 tp = netdev_priv(dev);
1872 ioaddr = tp->mmio_addr;
1873
1874 rc = sis190_get_mac_addr(pdev, dev);
1875 if (rc < 0)
1876 goto err_release_board;
1877
1878 sis190_init_rxfilter(dev);
1879
1880 INIT_WORK(&tp->phy_task, sis190_phy_task);
1881
1882 dev->netdev_ops = &sis190_netdev_ops;
1883
1884 SET_ETHTOOL_OPS(dev, &sis190_ethtool_ops);
1885 dev->watchdog_timeo = SIS190_TX_TIMEOUT;
1886
1887 spin_lock_init(&tp->lock);
1888
1889 rc = sis190_mii_probe(dev);
1890 if (rc < 0)
1891 goto err_release_board;
1892
1893 rc = register_netdev(dev);
1894 if (rc < 0)
1895 goto err_remove_mii;
1896
1897 if (netif_msg_probe(tp)) {
1898 netdev_info(dev, "%s: %s at %p (IRQ: %d), %pM\n",
1899 pci_name(pdev),
1900 sis_chip_info[ent->driver_data].name,
1901 ioaddr, pdev->irq, dev->dev_addr);
1902 netdev_info(dev, "%s mode.\n",
1903 (tp->features & F_HAS_RGMII) ? "RGMII" : "GMII");
1904 }
1905
1906 netif_carrier_off(dev);
1907
1908 sis190_set_speed_auto(dev);
1909out:
1910 return rc;
1911
1912err_remove_mii:
1913 sis190_mii_remove(dev);
1914err_release_board:
1915 sis190_release_board(pdev);
1916 goto out;
1917}
1918
1919static void __devexit sis190_remove_one(struct pci_dev *pdev)
1920{
1921 struct net_device *dev = pci_get_drvdata(pdev);
1922 struct sis190_private *tp = netdev_priv(dev);
1923
1924 sis190_mii_remove(dev);
1925 cancel_work_sync(&tp->phy_task);
1926 unregister_netdev(dev);
1927 sis190_release_board(pdev);
1928 pci_set_drvdata(pdev, NULL);
1929}
1930
1931static struct pci_driver sis190_pci_driver = {
1932 .name = DRV_NAME,
1933 .id_table = sis190_pci_tbl,
1934 .probe = sis190_init_one,
1935 .remove = __devexit_p(sis190_remove_one),
1936};
1937
1938static int __init sis190_init_module(void)
1939{
1940 return pci_register_driver(&sis190_pci_driver);
1941}
1942
1943static void __exit sis190_cleanup_module(void)
1944{
1945 pci_unregister_driver(&sis190_pci_driver);
1946}
1947
1948module_init(sis190_init_module);
1949module_exit(sis190_cleanup_module);
1950