1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23
24#include <linux/module.h>
25#include <linux/moduleparam.h>
26#include <linux/netdevice.h>
27#include <linux/rtnetlink.h>
28#include <linux/etherdevice.h>
29#include <linux/ethtool.h>
30#include <linux/pci.h>
31#include <linux/mii.h>
32#include <linux/delay.h>
33#include <linux/crc32.h>
34#include <linux/dma-mapping.h>
35#include <linux/slab.h>
36#include <asm/irq.h>
37
38#define PHY_MAX_ADDR 32
39#define PHY_ID_ANY 0x1f
40#define MII_REG_ANY 0x1f
41
42#define DRV_VERSION "1.4"
43#define DRV_NAME "sis190"
44#define SIS190_DRIVER_NAME DRV_NAME " Gigabit Ethernet driver " DRV_VERSION
45
46#define sis190_rx_skb netif_rx
47#define sis190_rx_quota(count, quota) count
48
49#define MAC_ADDR_LEN 6
50
51#define NUM_TX_DESC 64
52#define NUM_RX_DESC 64
53#define TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
54#define RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
55#define RX_BUF_SIZE 1536
56#define RX_BUF_MASK 0xfff8
57
58#define SIS190_REGS_SIZE 0x80
59#define SIS190_TX_TIMEOUT (6*HZ)
60#define SIS190_PHY_TIMEOUT (10*HZ)
61#define SIS190_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
62 NETIF_MSG_LINK | NETIF_MSG_IFUP | \
63 NETIF_MSG_IFDOWN)
64
65
66#define EhnMIIread 0x0000
67#define EhnMIIwrite 0x0020
68#define EhnMIIdataShift 16
69#define EhnMIIpmdShift 6
70#define EhnMIIregShift 11
71#define EhnMIIreq 0x0010
72#define EhnMIInotDone 0x0010
73
74
75#define SIS_W8(reg, val) writeb ((val), ioaddr + (reg))
76#define SIS_W16(reg, val) writew ((val), ioaddr + (reg))
77#define SIS_W32(reg, val) writel ((val), ioaddr + (reg))
78#define SIS_R8(reg) readb (ioaddr + (reg))
79#define SIS_R16(reg) readw (ioaddr + (reg))
80#define SIS_R32(reg) readl (ioaddr + (reg))
81
82#define SIS_PCI_COMMIT() SIS_R32(IntrControl)
83
84enum sis190_registers {
85 TxControl = 0x00,
86 TxDescStartAddr = 0x04,
87 rsv0 = 0x08,
88 TxSts = 0x0c,
89 RxControl = 0x10,
90 RxDescStartAddr = 0x14,
91 rsv1 = 0x18,
92 RxSts = 0x1c,
93 IntrStatus = 0x20,
94 IntrMask = 0x24,
95 IntrControl = 0x28,
96 IntrTimer = 0x2c,
97 PMControl = 0x30,
98 rsv2 = 0x34,
99 ROMControl = 0x38,
100 ROMInterface = 0x3c,
101 StationControl = 0x40,
102 GMIIControl = 0x44,
103 GIoCR = 0x48,
104 GIoCtrl = 0x4c,
105 TxMacControl = 0x50,
106 TxLimit = 0x54,
107 RGDelay = 0x58,
108 rsv3 = 0x5c,
109 RxMacControl = 0x60,
110 RxMacAddr = 0x62,
111 RxHashTable = 0x68,
112
113 RxWolCtrl = 0x70,
114 RxWolData = 0x74,
115 RxMPSControl = 0x78,
116 rsv4 = 0x7c,
117};
118
119enum sis190_register_content {
120
121 SoftInt = 0x40000000,
122 Timeup = 0x20000000,
123 PauseFrame = 0x00080000,
124 MagicPacket = 0x00040000,
125 WakeupFrame = 0x00020000,
126 LinkChange = 0x00010000,
127 RxQEmpty = 0x00000080,
128 RxQInt = 0x00000040,
129 TxQ1Empty = 0x00000020,
130 TxQ1Int = 0x00000010,
131 TxQ0Empty = 0x00000008,
132 TxQ0Int = 0x00000004,
133 RxHalt = 0x00000002,
134 TxHalt = 0x00000001,
135
136
137 CmdReset = 0x10,
138 CmdRxEnb = 0x08,
139 CmdTxEnb = 0x01,
140 RxBufEmpty = 0x01,
141
142
143 Cfg9346_Lock = 0x00,
144 Cfg9346_Unlock = 0xc0,
145
146
147 AcceptErr = 0x20,
148 AcceptRunt = 0x10,
149 AcceptBroadcast = 0x0800,
150 AcceptMulticast = 0x0400,
151 AcceptMyPhys = 0x0200,
152 AcceptAllPhys = 0x0100,
153
154
155 RxCfgFIFOShift = 13,
156 RxCfgDMAShift = 8,
157
158
159 TxInterFrameGapShift = 24,
160 TxDMAShift = 8,
161
162 LinkStatus = 0x02,
163 FullDup = 0x01,
164
165
166 TBILinkOK = 0x02000000,
167};
168
169struct TxDesc {
170 __le32 PSize;
171 __le32 status;
172 __le32 addr;
173 __le32 size;
174};
175
176struct RxDesc {
177 __le32 PSize;
178 __le32 status;
179 __le32 addr;
180 __le32 size;
181};
182
183enum _DescStatusBit {
184
185 OWNbit = 0x80000000,
186 INTbit = 0x40000000,
187 CRCbit = 0x00020000,
188 PADbit = 0x00010000,
189
190 RingEnd = 0x80000000,
191
192 LSEN = 0x08000000,
193 IPCS = 0x04000000,
194 TCPCS = 0x02000000,
195 UDPCS = 0x01000000,
196 BSTEN = 0x00800000,
197 EXTEN = 0x00400000,
198 DEFEN = 0x00200000,
199 BKFEN = 0x00100000,
200 CRSEN = 0x00080000,
201 COLEN = 0x00040000,
202 THOL3 = 0x30000000,
203 THOL2 = 0x20000000,
204 THOL1 = 0x10000000,
205 THOL0 = 0x00000000,
206
207 WND = 0x00080000,
208 TABRT = 0x00040000,
209 FIFO = 0x00020000,
210 LINK = 0x00010000,
211 ColCountMask = 0x0000ffff,
212
213 IPON = 0x20000000,
214 TCPON = 0x10000000,
215 UDPON = 0x08000000,
216 Wakup = 0x00400000,
217 Magic = 0x00200000,
218 Pause = 0x00100000,
219 DEFbit = 0x00200000,
220 BCAST = 0x000c0000,
221 MCAST = 0x00080000,
222 UCAST = 0x00040000,
223
224 TAGON = 0x80000000,
225 RxDescCountMask = 0x7f000000,
226 ABORT = 0x00800000,
227 SHORT = 0x00400000,
228 LIMIT = 0x00200000,
229 MIIER = 0x00100000,
230 OVRUN = 0x00080000,
231 NIBON = 0x00040000,
232 COLON = 0x00020000,
233 CRCOK = 0x00010000,
234 RxSizeMask = 0x0000ffff
235
236
237
238
239
240};
241
242enum sis190_eeprom_access_register_bits {
243 EECS = 0x00000001,
244 EECLK = 0x00000002,
245 EEDO = 0x00000008,
246 EEDI = 0x00000004,
247 EEREQ = 0x00000080,
248 EEROP = 0x00000200,
249 EEWOP = 0x00000100
250};
251
252
253enum sis190_eeprom_address {
254 EEPROMSignature = 0x00,
255 EEPROMCLK = 0x01,
256 EEPROMInfo = 0x02,
257 EEPROMMACAddr = 0x03
258};
259
260enum sis190_feature {
261 F_HAS_RGMII = 1,
262 F_PHY_88E1111 = 2,
263 F_PHY_BCM5461 = 4
264};
265
266struct sis190_private {
267 void __iomem *mmio_addr;
268 struct pci_dev *pci_dev;
269 struct net_device *dev;
270 spinlock_t lock;
271 u32 rx_buf_sz;
272 u32 cur_rx;
273 u32 cur_tx;
274 u32 dirty_rx;
275 u32 dirty_tx;
276 dma_addr_t rx_dma;
277 dma_addr_t tx_dma;
278 struct RxDesc *RxDescRing;
279 struct TxDesc *TxDescRing;
280 struct sk_buff *Rx_skbuff[NUM_RX_DESC];
281 struct sk_buff *Tx_skbuff[NUM_TX_DESC];
282 struct work_struct phy_task;
283 struct timer_list timer;
284 u32 msg_enable;
285 struct mii_if_info mii_if;
286 struct list_head first_phy;
287 u32 features;
288 u32 negotiated_lpa;
289 enum {
290 LNK_OFF,
291 LNK_ON,
292 LNK_AUTONEG,
293 } link_status;
294};
295
296struct sis190_phy {
297 struct list_head list;
298 int phy_id;
299 u16 id[2];
300 u16 status;
301 u8 type;
302};
303
304enum sis190_phy_type {
305 UNKNOWN = 0x00,
306 HOME = 0x01,
307 LAN = 0x02,
308 MIX = 0x03
309};
310
311static struct mii_chip_info {
312 const char *name;
313 u16 id[2];
314 unsigned int type;
315 u32 feature;
316} mii_chip_table[] = {
317 { "Atheros PHY", { 0x004d, 0xd010 }, LAN, 0 },
318 { "Atheros PHY AR8012", { 0x004d, 0xd020 }, LAN, 0 },
319 { "Broadcom PHY BCM5461", { 0x0020, 0x60c0 }, LAN, F_PHY_BCM5461 },
320 { "Broadcom PHY AC131", { 0x0143, 0xbc70 }, LAN, 0 },
321 { "Agere PHY ET1101B", { 0x0282, 0xf010 }, LAN, 0 },
322 { "Marvell PHY 88E1111", { 0x0141, 0x0cc0 }, LAN, F_PHY_88E1111 },
323 { "Realtek PHY RTL8201", { 0x0000, 0x8200 }, LAN, 0 },
324 { NULL, }
325};
326
327static const struct {
328 const char *name;
329} sis_chip_info[] = {
330 { "SiS 190 PCI Fast Ethernet adapter" },
331 { "SiS 191 PCI Gigabit Ethernet adapter" },
332};
333
334static DEFINE_PCI_DEVICE_TABLE(sis190_pci_tbl) = {
335 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0190), 0, 0, 0 },
336 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0191), 0, 0, 1 },
337 { 0, },
338};
339
340MODULE_DEVICE_TABLE(pci, sis190_pci_tbl);
341
342static int rx_copybreak = 200;
343
344static struct {
345 u32 msg_enable;
346} debug = { -1 };
347
348MODULE_DESCRIPTION("SiS sis190/191 Gigabit Ethernet driver");
349module_param(rx_copybreak, int, 0);
350MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
351module_param_named(debug, debug.msg_enable, int, 0);
352MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
353MODULE_AUTHOR("K.M. Liu <kmliu@sis.com>, Ueimor <romieu@fr.zoreil.com>");
354MODULE_VERSION(DRV_VERSION);
355MODULE_LICENSE("GPL");
356
357static const u32 sis190_intr_mask =
358 RxQEmpty | RxQInt | TxQ1Int | TxQ0Int | RxHalt | TxHalt | LinkChange;
359
360
361
362
363
364static const int multicast_filter_limit = 32;
365
366static void __mdio_cmd(void __iomem *ioaddr, u32 ctl)
367{
368 unsigned int i;
369
370 SIS_W32(GMIIControl, ctl);
371
372 msleep(1);
373
374 for (i = 0; i < 100; i++) {
375 if (!(SIS_R32(GMIIControl) & EhnMIInotDone))
376 break;
377 msleep(1);
378 }
379
380 if (i > 99)
381 pr_err("PHY command failed !\n");
382}
383
384static void mdio_write(void __iomem *ioaddr, int phy_id, int reg, int val)
385{
386 __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIwrite |
387 (((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift) |
388 (((u32) val) << EhnMIIdataShift));
389}
390
391static int mdio_read(void __iomem *ioaddr, int phy_id, int reg)
392{
393 __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIread |
394 (((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift));
395
396 return (u16) (SIS_R32(GMIIControl) >> EhnMIIdataShift);
397}
398
399static void __mdio_write(struct net_device *dev, int phy_id, int reg, int val)
400{
401 struct sis190_private *tp = netdev_priv(dev);
402
403 mdio_write(tp->mmio_addr, phy_id, reg, val);
404}
405
406static int __mdio_read(struct net_device *dev, int phy_id, int reg)
407{
408 struct sis190_private *tp = netdev_priv(dev);
409
410 return mdio_read(tp->mmio_addr, phy_id, reg);
411}
412
413static u16 mdio_read_latched(void __iomem *ioaddr, int phy_id, int reg)
414{
415 mdio_read(ioaddr, phy_id, reg);
416 return mdio_read(ioaddr, phy_id, reg);
417}
418
419static u16 __devinit sis190_read_eeprom(void __iomem *ioaddr, u32 reg)
420{
421 u16 data = 0xffff;
422 unsigned int i;
423
424 if (!(SIS_R32(ROMControl) & 0x0002))
425 return 0;
426
427 SIS_W32(ROMInterface, EEREQ | EEROP | (reg << 10));
428
429 for (i = 0; i < 200; i++) {
430 if (!(SIS_R32(ROMInterface) & EEREQ)) {
431 data = (SIS_R32(ROMInterface) & 0xffff0000) >> 16;
432 break;
433 }
434 msleep(1);
435 }
436
437 return data;
438}
439
440static void sis190_irq_mask_and_ack(void __iomem *ioaddr)
441{
442 SIS_W32(IntrMask, 0x00);
443 SIS_W32(IntrStatus, 0xffffffff);
444 SIS_PCI_COMMIT();
445}
446
447static void sis190_asic_down(void __iomem *ioaddr)
448{
449
450
451 SIS_W32(TxControl, 0x1a00);
452 SIS_W32(RxControl, 0x1a00);
453
454 sis190_irq_mask_and_ack(ioaddr);
455}
456
457static void sis190_mark_as_last_descriptor(struct RxDesc *desc)
458{
459 desc->size |= cpu_to_le32(RingEnd);
460}
461
462static inline void sis190_give_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
463{
464 u32 eor = le32_to_cpu(desc->size) & RingEnd;
465
466 desc->PSize = 0x0;
467 desc->size = cpu_to_le32((rx_buf_sz & RX_BUF_MASK) | eor);
468 wmb();
469 desc->status = cpu_to_le32(OWNbit | INTbit);
470}
471
472static inline void sis190_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
473 u32 rx_buf_sz)
474{
475 desc->addr = cpu_to_le32(mapping);
476 sis190_give_to_asic(desc, rx_buf_sz);
477}
478
479static inline void sis190_make_unusable_by_asic(struct RxDesc *desc)
480{
481 desc->PSize = 0x0;
482 desc->addr = cpu_to_le32(0xdeadbeef);
483 desc->size &= cpu_to_le32(RingEnd);
484 wmb();
485 desc->status = 0x0;
486}
487
488static struct sk_buff *sis190_alloc_rx_skb(struct sis190_private *tp,
489 struct RxDesc *desc)
490{
491 u32 rx_buf_sz = tp->rx_buf_sz;
492 struct sk_buff *skb;
493 dma_addr_t mapping;
494
495 skb = netdev_alloc_skb(tp->dev, rx_buf_sz);
496 if (unlikely(!skb))
497 goto skb_alloc_failed;
498 mapping = pci_map_single(tp->pci_dev, skb->data, tp->rx_buf_sz,
499 PCI_DMA_FROMDEVICE);
500 if (pci_dma_mapping_error(tp->pci_dev, mapping))
501 goto out;
502 sis190_map_to_asic(desc, mapping, rx_buf_sz);
503
504 return skb;
505
506out:
507 dev_kfree_skb_any(skb);
508skb_alloc_failed:
509 sis190_make_unusable_by_asic(desc);
510 return NULL;
511}
512
513static u32 sis190_rx_fill(struct sis190_private *tp, struct net_device *dev,
514 u32 start, u32 end)
515{
516 u32 cur;
517
518 for (cur = start; cur < end; cur++) {
519 unsigned int i = cur % NUM_RX_DESC;
520
521 if (tp->Rx_skbuff[i])
522 continue;
523
524 tp->Rx_skbuff[i] = sis190_alloc_rx_skb(tp, tp->RxDescRing + i);
525
526 if (!tp->Rx_skbuff[i])
527 break;
528 }
529 return cur - start;
530}
531
532static bool sis190_try_rx_copy(struct sis190_private *tp,
533 struct sk_buff **sk_buff, int pkt_size,
534 dma_addr_t addr)
535{
536 struct sk_buff *skb;
537 bool done = false;
538
539 if (pkt_size >= rx_copybreak)
540 goto out;
541
542 skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size);
543 if (!skb)
544 goto out;
545
546 pci_dma_sync_single_for_cpu(tp->pci_dev, addr, tp->rx_buf_sz,
547 PCI_DMA_FROMDEVICE);
548 skb_copy_to_linear_data(skb, sk_buff[0]->data, pkt_size);
549 *sk_buff = skb;
550 done = true;
551out:
552 return done;
553}
554
555static inline int sis190_rx_pkt_err(u32 status, struct net_device_stats *stats)
556{
557#define ErrMask (OVRUN | SHORT | LIMIT | MIIER | NIBON | COLON | ABORT)
558
559 if ((status & CRCOK) && !(status & ErrMask))
560 return 0;
561
562 if (!(status & CRCOK))
563 stats->rx_crc_errors++;
564 else if (status & OVRUN)
565 stats->rx_over_errors++;
566 else if (status & (SHORT | LIMIT))
567 stats->rx_length_errors++;
568 else if (status & (MIIER | NIBON | COLON))
569 stats->rx_frame_errors++;
570
571 stats->rx_errors++;
572 return -1;
573}
574
575static int sis190_rx_interrupt(struct net_device *dev,
576 struct sis190_private *tp, void __iomem *ioaddr)
577{
578 struct net_device_stats *stats = &dev->stats;
579 u32 rx_left, cur_rx = tp->cur_rx;
580 u32 delta, count;
581
582 rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
583 rx_left = sis190_rx_quota(rx_left, (u32) dev->quota);
584
585 for (; rx_left > 0; rx_left--, cur_rx++) {
586 unsigned int entry = cur_rx % NUM_RX_DESC;
587 struct RxDesc *desc = tp->RxDescRing + entry;
588 u32 status;
589
590 if (le32_to_cpu(desc->status) & OWNbit)
591 break;
592
593 status = le32_to_cpu(desc->PSize);
594
595
596
597 if (sis190_rx_pkt_err(status, stats) < 0)
598 sis190_give_to_asic(desc, tp->rx_buf_sz);
599 else {
600 struct sk_buff *skb = tp->Rx_skbuff[entry];
601 dma_addr_t addr = le32_to_cpu(desc->addr);
602 int pkt_size = (status & RxSizeMask) - 4;
603 struct pci_dev *pdev = tp->pci_dev;
604
605 if (unlikely(pkt_size > tp->rx_buf_sz)) {
606 netif_info(tp, intr, dev,
607 "(frag) status = %08x\n", status);
608 stats->rx_dropped++;
609 stats->rx_length_errors++;
610 sis190_give_to_asic(desc, tp->rx_buf_sz);
611 continue;
612 }
613
614
615 if (sis190_try_rx_copy(tp, &skb, pkt_size, addr)) {
616 pci_dma_sync_single_for_device(pdev, addr,
617 tp->rx_buf_sz, PCI_DMA_FROMDEVICE);
618 sis190_give_to_asic(desc, tp->rx_buf_sz);
619 } else {
620 pci_unmap_single(pdev, addr, tp->rx_buf_sz,
621 PCI_DMA_FROMDEVICE);
622 tp->Rx_skbuff[entry] = NULL;
623 sis190_make_unusable_by_asic(desc);
624 }
625
626 skb_put(skb, pkt_size);
627 skb->protocol = eth_type_trans(skb, dev);
628
629 sis190_rx_skb(skb);
630
631 stats->rx_packets++;
632 stats->rx_bytes += pkt_size;
633 if ((status & BCAST) == MCAST)
634 stats->multicast++;
635 }
636 }
637 count = cur_rx - tp->cur_rx;
638 tp->cur_rx = cur_rx;
639
640 delta = sis190_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
641 if (!delta && count)
642 netif_info(tp, intr, dev, "no Rx buffer allocated\n");
643 tp->dirty_rx += delta;
644
645 if ((tp->dirty_rx + NUM_RX_DESC) == tp->cur_rx)
646 netif_emerg(tp, intr, dev, "Rx buffers exhausted\n");
647
648 return count;
649}
650
651static void sis190_unmap_tx_skb(struct pci_dev *pdev, struct sk_buff *skb,
652 struct TxDesc *desc)
653{
654 unsigned int len;
655
656 len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
657
658 pci_unmap_single(pdev, le32_to_cpu(desc->addr), len, PCI_DMA_TODEVICE);
659
660 memset(desc, 0x00, sizeof(*desc));
661}
662
663static inline int sis190_tx_pkt_err(u32 status, struct net_device_stats *stats)
664{
665#define TxErrMask (WND | TABRT | FIFO | LINK)
666
667 if (!unlikely(status & TxErrMask))
668 return 0;
669
670 if (status & WND)
671 stats->tx_window_errors++;
672 if (status & TABRT)
673 stats->tx_aborted_errors++;
674 if (status & FIFO)
675 stats->tx_fifo_errors++;
676 if (status & LINK)
677 stats->tx_carrier_errors++;
678
679 stats->tx_errors++;
680
681 return -1;
682}
683
684static void sis190_tx_interrupt(struct net_device *dev,
685 struct sis190_private *tp, void __iomem *ioaddr)
686{
687 struct net_device_stats *stats = &dev->stats;
688 u32 pending, dirty_tx = tp->dirty_tx;
689
690
691
692
693 unsigned int queue_stopped;
694
695 smp_rmb();
696 pending = tp->cur_tx - dirty_tx;
697 queue_stopped = (pending == NUM_TX_DESC);
698
699 for (; pending; pending--, dirty_tx++) {
700 unsigned int entry = dirty_tx % NUM_TX_DESC;
701 struct TxDesc *txd = tp->TxDescRing + entry;
702 u32 status = le32_to_cpu(txd->status);
703 struct sk_buff *skb;
704
705 if (status & OWNbit)
706 break;
707
708 skb = tp->Tx_skbuff[entry];
709
710 if (likely(sis190_tx_pkt_err(status, stats) == 0)) {
711 stats->tx_packets++;
712 stats->tx_bytes += skb->len;
713 stats->collisions += ((status & ColCountMask) - 1);
714 }
715
716 sis190_unmap_tx_skb(tp->pci_dev, skb, txd);
717 tp->Tx_skbuff[entry] = NULL;
718 dev_kfree_skb_irq(skb);
719 }
720
721 if (tp->dirty_tx != dirty_tx) {
722 tp->dirty_tx = dirty_tx;
723 smp_wmb();
724 if (queue_stopped)
725 netif_wake_queue(dev);
726 }
727}
728
729
730
731
732
733static irqreturn_t sis190_interrupt(int irq, void *__dev)
734{
735 struct net_device *dev = __dev;
736 struct sis190_private *tp = netdev_priv(dev);
737 void __iomem *ioaddr = tp->mmio_addr;
738 unsigned int handled = 0;
739 u32 status;
740
741 status = SIS_R32(IntrStatus);
742
743 if ((status == 0xffffffff) || !status)
744 goto out;
745
746 handled = 1;
747
748 if (unlikely(!netif_running(dev))) {
749 sis190_asic_down(ioaddr);
750 goto out;
751 }
752
753 SIS_W32(IntrStatus, status);
754
755
756
757 if (status & LinkChange) {
758 netif_info(tp, intr, dev, "link change\n");
759 del_timer(&tp->timer);
760 schedule_work(&tp->phy_task);
761 }
762
763 if (status & RxQInt)
764 sis190_rx_interrupt(dev, tp, ioaddr);
765
766 if (status & TxQ0Int)
767 sis190_tx_interrupt(dev, tp, ioaddr);
768out:
769 return IRQ_RETVAL(handled);
770}
771
772#ifdef CONFIG_NET_POLL_CONTROLLER
773static void sis190_netpoll(struct net_device *dev)
774{
775 struct sis190_private *tp = netdev_priv(dev);
776 struct pci_dev *pdev = tp->pci_dev;
777
778 disable_irq(pdev->irq);
779 sis190_interrupt(pdev->irq, dev);
780 enable_irq(pdev->irq);
781}
782#endif
783
784static void sis190_free_rx_skb(struct sis190_private *tp,
785 struct sk_buff **sk_buff, struct RxDesc *desc)
786{
787 struct pci_dev *pdev = tp->pci_dev;
788
789 pci_unmap_single(pdev, le32_to_cpu(desc->addr), tp->rx_buf_sz,
790 PCI_DMA_FROMDEVICE);
791 dev_kfree_skb(*sk_buff);
792 *sk_buff = NULL;
793 sis190_make_unusable_by_asic(desc);
794}
795
796static void sis190_rx_clear(struct sis190_private *tp)
797{
798 unsigned int i;
799
800 for (i = 0; i < NUM_RX_DESC; i++) {
801 if (!tp->Rx_skbuff[i])
802 continue;
803 sis190_free_rx_skb(tp, tp->Rx_skbuff + i, tp->RxDescRing + i);
804 }
805}
806
807static void sis190_init_ring_indexes(struct sis190_private *tp)
808{
809 tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
810}
811
812static int sis190_init_ring(struct net_device *dev)
813{
814 struct sis190_private *tp = netdev_priv(dev);
815
816 sis190_init_ring_indexes(tp);
817
818 memset(tp->Tx_skbuff, 0x0, NUM_TX_DESC * sizeof(struct sk_buff *));
819 memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *));
820
821 if (sis190_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC)
822 goto err_rx_clear;
823
824 sis190_mark_as_last_descriptor(tp->RxDescRing + NUM_RX_DESC - 1);
825
826 return 0;
827
828err_rx_clear:
829 sis190_rx_clear(tp);
830 return -ENOMEM;
831}
832
833static void sis190_set_rx_mode(struct net_device *dev)
834{
835 struct sis190_private *tp = netdev_priv(dev);
836 void __iomem *ioaddr = tp->mmio_addr;
837 unsigned long flags;
838 u32 mc_filter[2];
839 u16 rx_mode;
840
841 if (dev->flags & IFF_PROMISC) {
842 rx_mode =
843 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
844 AcceptAllPhys;
845 mc_filter[1] = mc_filter[0] = 0xffffffff;
846 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
847 (dev->flags & IFF_ALLMULTI)) {
848
849 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
850 mc_filter[1] = mc_filter[0] = 0xffffffff;
851 } else {
852 struct netdev_hw_addr *ha;
853
854 rx_mode = AcceptBroadcast | AcceptMyPhys;
855 mc_filter[1] = mc_filter[0] = 0;
856 netdev_for_each_mc_addr(ha, dev) {
857 int bit_nr =
858 ether_crc(ETH_ALEN, ha->addr) & 0x3f;
859 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
860 rx_mode |= AcceptMulticast;
861 }
862 }
863
864 spin_lock_irqsave(&tp->lock, flags);
865
866 SIS_W16(RxMacControl, rx_mode | 0x2);
867 SIS_W32(RxHashTable, mc_filter[0]);
868 SIS_W32(RxHashTable + 4, mc_filter[1]);
869
870 spin_unlock_irqrestore(&tp->lock, flags);
871}
872
873static void sis190_soft_reset(void __iomem *ioaddr)
874{
875 SIS_W32(IntrControl, 0x8000);
876 SIS_PCI_COMMIT();
877 SIS_W32(IntrControl, 0x0);
878 sis190_asic_down(ioaddr);
879}
880
881static void sis190_hw_start(struct net_device *dev)
882{
883 struct sis190_private *tp = netdev_priv(dev);
884 void __iomem *ioaddr = tp->mmio_addr;
885
886 sis190_soft_reset(ioaddr);
887
888 SIS_W32(TxDescStartAddr, tp->tx_dma);
889 SIS_W32(RxDescStartAddr, tp->rx_dma);
890
891 SIS_W32(IntrStatus, 0xffffffff);
892 SIS_W32(IntrMask, 0x0);
893 SIS_W32(GMIIControl, 0x0);
894 SIS_W32(TxMacControl, 0x60);
895 SIS_W16(RxMacControl, 0x02);
896 SIS_W32(RxHashTable, 0x0);
897 SIS_W32(0x6c, 0x0);
898 SIS_W32(RxWolCtrl, 0x0);
899 SIS_W32(RxWolData, 0x0);
900
901 SIS_PCI_COMMIT();
902
903 sis190_set_rx_mode(dev);
904
905
906 SIS_W32(IntrMask, sis190_intr_mask);
907
908 SIS_W32(TxControl, 0x1a00 | CmdTxEnb);
909 SIS_W32(RxControl, 0x1a1d);
910
911 netif_start_queue(dev);
912}
913
914static void sis190_phy_task(struct work_struct *work)
915{
916 struct sis190_private *tp =
917 container_of(work, struct sis190_private, phy_task);
918 struct net_device *dev = tp->dev;
919 void __iomem *ioaddr = tp->mmio_addr;
920 int phy_id = tp->mii_if.phy_id;
921 u16 val;
922
923 rtnl_lock();
924
925 if (!netif_running(dev))
926 goto out_unlock;
927
928 val = mdio_read(ioaddr, phy_id, MII_BMCR);
929 if (val & BMCR_RESET) {
930
931 mod_timer(&tp->timer, jiffies + HZ/10);
932 goto out_unlock;
933 }
934
935 val = mdio_read_latched(ioaddr, phy_id, MII_BMSR);
936 if (!(val & BMSR_ANEGCOMPLETE) && tp->link_status != LNK_AUTONEG) {
937 netif_carrier_off(dev);
938 netif_warn(tp, link, dev, "auto-negotiating...\n");
939 tp->link_status = LNK_AUTONEG;
940 } else if ((val & BMSR_LSTATUS) && tp->link_status != LNK_ON) {
941
942 struct {
943 int val;
944 u32 ctl;
945 const char *msg;
946 } reg31[] = {
947 { LPA_1000FULL, 0x07000c00 | 0x00001000,
948 "1000 Mbps Full Duplex" },
949 { LPA_1000HALF, 0x07000c00,
950 "1000 Mbps Half Duplex" },
951 { LPA_100FULL, 0x04000800 | 0x00001000,
952 "100 Mbps Full Duplex" },
953 { LPA_100HALF, 0x04000800,
954 "100 Mbps Half Duplex" },
955 { LPA_10FULL, 0x04000400 | 0x00001000,
956 "10 Mbps Full Duplex" },
957 { LPA_10HALF, 0x04000400,
958 "10 Mbps Half Duplex" },
959 { 0, 0x04000400, "unknown" }
960 }, *p = NULL;
961 u16 adv, autoexp, gigadv, gigrec;
962
963 val = mdio_read(ioaddr, phy_id, 0x1f);
964 netif_info(tp, link, dev, "mii ext = %04x\n", val);
965
966 val = mdio_read(ioaddr, phy_id, MII_LPA);
967 adv = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
968 autoexp = mdio_read(ioaddr, phy_id, MII_EXPANSION);
969 netif_info(tp, link, dev, "mii lpa=%04x adv=%04x exp=%04x\n",
970 val, adv, autoexp);
971
972 if (val & LPA_NPAGE && autoexp & EXPANSION_NWAY) {
973
974 gigadv = mdio_read(ioaddr, phy_id, MII_CTRL1000);
975 gigrec = mdio_read(ioaddr, phy_id, MII_STAT1000);
976 val = (gigadv & (gigrec >> 2));
977 if (val & ADVERTISE_1000FULL)
978 p = reg31;
979 else if (val & ADVERTISE_1000HALF)
980 p = reg31 + 1;
981 }
982 if (!p) {
983 val &= adv;
984
985 for (p = reg31; p->val; p++) {
986 if ((val & p->val) == p->val)
987 break;
988 }
989 }
990
991 p->ctl |= SIS_R32(StationControl) & ~0x0f001c00;
992
993 if ((tp->features & F_HAS_RGMII) &&
994 (tp->features & F_PHY_BCM5461)) {
995
996 mdio_write(ioaddr, phy_id, 0x18, 0xf1c7);
997 udelay(200);
998 mdio_write(ioaddr, phy_id, 0x1c, 0x8c00);
999 p->ctl |= 0x03000000;
1000 }
1001
1002 SIS_W32(StationControl, p->ctl);
1003
1004 if (tp->features & F_HAS_RGMII) {
1005 SIS_W32(RGDelay, 0x0441);
1006 SIS_W32(RGDelay, 0x0440);
1007 }
1008
1009 tp->negotiated_lpa = p->val;
1010
1011 netif_info(tp, link, dev, "link on %s mode\n", p->msg);
1012 netif_carrier_on(dev);
1013 tp->link_status = LNK_ON;
1014 } else if (!(val & BMSR_LSTATUS) && tp->link_status != LNK_AUTONEG)
1015 tp->link_status = LNK_OFF;
1016 mod_timer(&tp->timer, jiffies + SIS190_PHY_TIMEOUT);
1017
1018out_unlock:
1019 rtnl_unlock();
1020}
1021
1022static void sis190_phy_timer(unsigned long __opaque)
1023{
1024 struct net_device *dev = (struct net_device *)__opaque;
1025 struct sis190_private *tp = netdev_priv(dev);
1026
1027 if (likely(netif_running(dev)))
1028 schedule_work(&tp->phy_task);
1029}
1030
1031static inline void sis190_delete_timer(struct net_device *dev)
1032{
1033 struct sis190_private *tp = netdev_priv(dev);
1034
1035 del_timer_sync(&tp->timer);
1036}
1037
1038static inline void sis190_request_timer(struct net_device *dev)
1039{
1040 struct sis190_private *tp = netdev_priv(dev);
1041 struct timer_list *timer = &tp->timer;
1042
1043 init_timer(timer);
1044 timer->expires = jiffies + SIS190_PHY_TIMEOUT;
1045 timer->data = (unsigned long)dev;
1046 timer->function = sis190_phy_timer;
1047 add_timer(timer);
1048}
1049
1050static void sis190_set_rxbufsize(struct sis190_private *tp,
1051 struct net_device *dev)
1052{
1053 unsigned int mtu = dev->mtu;
1054
1055 tp->rx_buf_sz = (mtu > RX_BUF_SIZE) ? mtu + ETH_HLEN + 8 : RX_BUF_SIZE;
1056
1057 if (tp->rx_buf_sz & 0x07) {
1058 tp->rx_buf_sz += 8;
1059 tp->rx_buf_sz &= RX_BUF_MASK;
1060 }
1061}
1062
1063static int sis190_open(struct net_device *dev)
1064{
1065 struct sis190_private *tp = netdev_priv(dev);
1066 struct pci_dev *pdev = tp->pci_dev;
1067 int rc = -ENOMEM;
1068
1069 sis190_set_rxbufsize(tp, dev);
1070
1071
1072
1073
1074
1075 tp->TxDescRing = pci_alloc_consistent(pdev, TX_RING_BYTES, &tp->tx_dma);
1076 if (!tp->TxDescRing)
1077 goto out;
1078
1079 tp->RxDescRing = pci_alloc_consistent(pdev, RX_RING_BYTES, &tp->rx_dma);
1080 if (!tp->RxDescRing)
1081 goto err_free_tx_0;
1082
1083 rc = sis190_init_ring(dev);
1084 if (rc < 0)
1085 goto err_free_rx_1;
1086
1087 sis190_request_timer(dev);
1088
1089 rc = request_irq(dev->irq, sis190_interrupt, IRQF_SHARED, dev->name, dev);
1090 if (rc < 0)
1091 goto err_release_timer_2;
1092
1093 sis190_hw_start(dev);
1094out:
1095 return rc;
1096
1097err_release_timer_2:
1098 sis190_delete_timer(dev);
1099 sis190_rx_clear(tp);
1100err_free_rx_1:
1101 pci_free_consistent(tp->pci_dev, RX_RING_BYTES, tp->RxDescRing,
1102 tp->rx_dma);
1103err_free_tx_0:
1104 pci_free_consistent(tp->pci_dev, TX_RING_BYTES, tp->TxDescRing,
1105 tp->tx_dma);
1106 goto out;
1107}
1108
1109static void sis190_tx_clear(struct sis190_private *tp)
1110{
1111 unsigned int i;
1112
1113 for (i = 0; i < NUM_TX_DESC; i++) {
1114 struct sk_buff *skb = tp->Tx_skbuff[i];
1115
1116 if (!skb)
1117 continue;
1118
1119 sis190_unmap_tx_skb(tp->pci_dev, skb, tp->TxDescRing + i);
1120 tp->Tx_skbuff[i] = NULL;
1121 dev_kfree_skb(skb);
1122
1123 tp->dev->stats.tx_dropped++;
1124 }
1125 tp->cur_tx = tp->dirty_tx = 0;
1126}
1127
1128static void sis190_down(struct net_device *dev)
1129{
1130 struct sis190_private *tp = netdev_priv(dev);
1131 void __iomem *ioaddr = tp->mmio_addr;
1132 unsigned int poll_locked = 0;
1133
1134 sis190_delete_timer(dev);
1135
1136 netif_stop_queue(dev);
1137
1138 do {
1139 spin_lock_irq(&tp->lock);
1140
1141 sis190_asic_down(ioaddr);
1142
1143 spin_unlock_irq(&tp->lock);
1144
1145 synchronize_irq(dev->irq);
1146
1147 if (!poll_locked)
1148 poll_locked++;
1149
1150 synchronize_sched();
1151
1152 } while (SIS_R32(IntrMask));
1153
1154 sis190_tx_clear(tp);
1155 sis190_rx_clear(tp);
1156}
1157
1158static int sis190_close(struct net_device *dev)
1159{
1160 struct sis190_private *tp = netdev_priv(dev);
1161 struct pci_dev *pdev = tp->pci_dev;
1162
1163 sis190_down(dev);
1164
1165 free_irq(dev->irq, dev);
1166
1167 pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma);
1168 pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma);
1169
1170 tp->TxDescRing = NULL;
1171 tp->RxDescRing = NULL;
1172
1173 return 0;
1174}
1175
1176static netdev_tx_t sis190_start_xmit(struct sk_buff *skb,
1177 struct net_device *dev)
1178{
1179 struct sis190_private *tp = netdev_priv(dev);
1180 void __iomem *ioaddr = tp->mmio_addr;
1181 u32 len, entry, dirty_tx;
1182 struct TxDesc *desc;
1183 dma_addr_t mapping;
1184
1185 if (unlikely(skb->len < ETH_ZLEN)) {
1186 if (skb_padto(skb, ETH_ZLEN)) {
1187 dev->stats.tx_dropped++;
1188 goto out;
1189 }
1190 len = ETH_ZLEN;
1191 } else {
1192 len = skb->len;
1193 }
1194
1195 entry = tp->cur_tx % NUM_TX_DESC;
1196 desc = tp->TxDescRing + entry;
1197
1198 if (unlikely(le32_to_cpu(desc->status) & OWNbit)) {
1199 netif_stop_queue(dev);
1200 netif_err(tp, tx_err, dev,
1201 "BUG! Tx Ring full when queue awake!\n");
1202 return NETDEV_TX_BUSY;
1203 }
1204
1205 mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE);
1206 if (pci_dma_mapping_error(tp->pci_dev, mapping)) {
1207 netif_err(tp, tx_err, dev,
1208 "PCI mapping failed, dropping packet");
1209 return NETDEV_TX_BUSY;
1210 }
1211
1212 tp->Tx_skbuff[entry] = skb;
1213
1214 desc->PSize = cpu_to_le32(len);
1215 desc->addr = cpu_to_le32(mapping);
1216
1217 desc->size = cpu_to_le32(len);
1218 if (entry == (NUM_TX_DESC - 1))
1219 desc->size |= cpu_to_le32(RingEnd);
1220
1221 wmb();
1222
1223 desc->status = cpu_to_le32(OWNbit | INTbit | DEFbit | CRCbit | PADbit);
1224 if (tp->negotiated_lpa & (LPA_1000HALF | LPA_100HALF | LPA_10HALF)) {
1225
1226 desc->status |= cpu_to_le32(COLEN | CRSEN | BKFEN);
1227 if (tp->negotiated_lpa & (LPA_1000HALF | LPA_1000FULL))
1228 desc->status |= cpu_to_le32(EXTEN | BSTEN);
1229 }
1230
1231 tp->cur_tx++;
1232
1233 smp_wmb();
1234
1235 SIS_W32(TxControl, 0x1a00 | CmdReset | CmdTxEnb);
1236
1237 dirty_tx = tp->dirty_tx;
1238 if ((tp->cur_tx - NUM_TX_DESC) == dirty_tx) {
1239 netif_stop_queue(dev);
1240 smp_rmb();
1241 if (dirty_tx != tp->dirty_tx)
1242 netif_wake_queue(dev);
1243 }
1244out:
1245 return NETDEV_TX_OK;
1246}
1247
1248static void sis190_free_phy(struct list_head *first_phy)
1249{
1250 struct sis190_phy *cur, *next;
1251
1252 list_for_each_entry_safe(cur, next, first_phy, list) {
1253 kfree(cur);
1254 }
1255}
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265static u16 sis190_default_phy(struct net_device *dev)
1266{
1267 struct sis190_phy *phy, *phy_home, *phy_default, *phy_lan;
1268 struct sis190_private *tp = netdev_priv(dev);
1269 struct mii_if_info *mii_if = &tp->mii_if;
1270 void __iomem *ioaddr = tp->mmio_addr;
1271 u16 status;
1272
1273 phy_home = phy_default = phy_lan = NULL;
1274
1275 list_for_each_entry(phy, &tp->first_phy, list) {
1276 status = mdio_read_latched(ioaddr, phy->phy_id, MII_BMSR);
1277
1278
1279 if ((status & BMSR_LSTATUS) &&
1280 !phy_default &&
1281 (phy->type != UNKNOWN)) {
1282 phy_default = phy;
1283 } else {
1284 status = mdio_read(ioaddr, phy->phy_id, MII_BMCR);
1285 mdio_write(ioaddr, phy->phy_id, MII_BMCR,
1286 status | BMCR_ANENABLE | BMCR_ISOLATE);
1287 if (phy->type == HOME)
1288 phy_home = phy;
1289 else if (phy->type == LAN)
1290 phy_lan = phy;
1291 }
1292 }
1293
1294 if (!phy_default) {
1295 if (phy_home)
1296 phy_default = phy_home;
1297 else if (phy_lan)
1298 phy_default = phy_lan;
1299 else
1300 phy_default = list_first_entry(&tp->first_phy,
1301 struct sis190_phy, list);
1302 }
1303
1304 if (mii_if->phy_id != phy_default->phy_id) {
1305 mii_if->phy_id = phy_default->phy_id;
1306 if (netif_msg_probe(tp))
1307 pr_info("%s: Using transceiver at address %d as default\n",
1308 pci_name(tp->pci_dev), mii_if->phy_id);
1309 }
1310
1311 status = mdio_read(ioaddr, mii_if->phy_id, MII_BMCR);
1312 status &= (~BMCR_ISOLATE);
1313
1314 mdio_write(ioaddr, mii_if->phy_id, MII_BMCR, status);
1315 status = mdio_read_latched(ioaddr, mii_if->phy_id, MII_BMSR);
1316
1317 return status;
1318}
1319
1320static void sis190_init_phy(struct net_device *dev, struct sis190_private *tp,
1321 struct sis190_phy *phy, unsigned int phy_id,
1322 u16 mii_status)
1323{
1324 void __iomem *ioaddr = tp->mmio_addr;
1325 struct mii_chip_info *p;
1326
1327 INIT_LIST_HEAD(&phy->list);
1328 phy->status = mii_status;
1329 phy->phy_id = phy_id;
1330
1331 phy->id[0] = mdio_read(ioaddr, phy_id, MII_PHYSID1);
1332 phy->id[1] = mdio_read(ioaddr, phy_id, MII_PHYSID2);
1333
1334 for (p = mii_chip_table; p->type; p++) {
1335 if ((p->id[0] == phy->id[0]) &&
1336 (p->id[1] == (phy->id[1] & 0xfff0))) {
1337 break;
1338 }
1339 }
1340
1341 if (p->id[1]) {
1342 phy->type = (p->type == MIX) ?
1343 ((mii_status & (BMSR_100FULL | BMSR_100HALF)) ?
1344 LAN : HOME) : p->type;
1345 tp->features |= p->feature;
1346 if (netif_msg_probe(tp))
1347 pr_info("%s: %s transceiver at address %d\n",
1348 pci_name(tp->pci_dev), p->name, phy_id);
1349 } else {
1350 phy->type = UNKNOWN;
1351 if (netif_msg_probe(tp))
1352 pr_info("%s: unknown PHY 0x%x:0x%x transceiver at address %d\n",
1353 pci_name(tp->pci_dev),
1354 phy->id[0], (phy->id[1] & 0xfff0), phy_id);
1355 }
1356}
1357
1358static void sis190_mii_probe_88e1111_fixup(struct sis190_private *tp)
1359{
1360 if (tp->features & F_PHY_88E1111) {
1361 void __iomem *ioaddr = tp->mmio_addr;
1362 int phy_id = tp->mii_if.phy_id;
1363 u16 reg[2][2] = {
1364 { 0x808b, 0x0ce1 },
1365 { 0x808f, 0x0c60 }
1366 }, *p;
1367
1368 p = (tp->features & F_HAS_RGMII) ? reg[0] : reg[1];
1369
1370 mdio_write(ioaddr, phy_id, 0x1b, p[0]);
1371 udelay(200);
1372 mdio_write(ioaddr, phy_id, 0x14, p[1]);
1373 udelay(200);
1374 }
1375}
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385static int __devinit sis190_mii_probe(struct net_device *dev)
1386{
1387 struct sis190_private *tp = netdev_priv(dev);
1388 struct mii_if_info *mii_if = &tp->mii_if;
1389 void __iomem *ioaddr = tp->mmio_addr;
1390 int phy_id;
1391 int rc = 0;
1392
1393 INIT_LIST_HEAD(&tp->first_phy);
1394
1395 for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) {
1396 struct sis190_phy *phy;
1397 u16 status;
1398
1399 status = mdio_read_latched(ioaddr, phy_id, MII_BMSR);
1400
1401
1402 if (status == 0xffff || status == 0x0000)
1403 continue;
1404
1405 phy = kmalloc(sizeof(*phy), GFP_KERNEL);
1406 if (!phy) {
1407 sis190_free_phy(&tp->first_phy);
1408 rc = -ENOMEM;
1409 goto out;
1410 }
1411
1412 sis190_init_phy(dev, tp, phy, phy_id, status);
1413
1414 list_add(&tp->first_phy, &phy->list);
1415 }
1416
1417 if (list_empty(&tp->first_phy)) {
1418 if (netif_msg_probe(tp))
1419 pr_info("%s: No MII transceivers found!\n",
1420 pci_name(tp->pci_dev));
1421 rc = -EIO;
1422 goto out;
1423 }
1424
1425
1426 sis190_default_phy(dev);
1427
1428 sis190_mii_probe_88e1111_fixup(tp);
1429
1430 mii_if->dev = dev;
1431 mii_if->mdio_read = __mdio_read;
1432 mii_if->mdio_write = __mdio_write;
1433 mii_if->phy_id_mask = PHY_ID_ANY;
1434 mii_if->reg_num_mask = MII_REG_ANY;
1435out:
1436 return rc;
1437}
1438
1439static void sis190_mii_remove(struct net_device *dev)
1440{
1441 struct sis190_private *tp = netdev_priv(dev);
1442
1443 sis190_free_phy(&tp->first_phy);
1444}
1445
1446static void sis190_release_board(struct pci_dev *pdev)
1447{
1448 struct net_device *dev = pci_get_drvdata(pdev);
1449 struct sis190_private *tp = netdev_priv(dev);
1450
1451 iounmap(tp->mmio_addr);
1452 pci_release_regions(pdev);
1453 pci_disable_device(pdev);
1454 free_netdev(dev);
1455}
1456
1457static struct net_device * __devinit sis190_init_board(struct pci_dev *pdev)
1458{
1459 struct sis190_private *tp;
1460 struct net_device *dev;
1461 void __iomem *ioaddr;
1462 int rc;
1463
1464 dev = alloc_etherdev(sizeof(*tp));
1465 if (!dev) {
1466 if (netif_msg_drv(&debug))
1467 pr_err("unable to alloc new ethernet\n");
1468 rc = -ENOMEM;
1469 goto err_out_0;
1470 }
1471
1472 SET_NETDEV_DEV(dev, &pdev->dev);
1473
1474 tp = netdev_priv(dev);
1475 tp->dev = dev;
1476 tp->msg_enable = netif_msg_init(debug.msg_enable, SIS190_MSG_DEFAULT);
1477
1478 rc = pci_enable_device(pdev);
1479 if (rc < 0) {
1480 if (netif_msg_probe(tp))
1481 pr_err("%s: enable failure\n", pci_name(pdev));
1482 goto err_free_dev_1;
1483 }
1484
1485 rc = -ENODEV;
1486
1487 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1488 if (netif_msg_probe(tp))
1489 pr_err("%s: region #0 is no MMIO resource\n",
1490 pci_name(pdev));
1491 goto err_pci_disable_2;
1492 }
1493 if (pci_resource_len(pdev, 0) < SIS190_REGS_SIZE) {
1494 if (netif_msg_probe(tp))
1495 pr_err("%s: invalid PCI region size(s)\n",
1496 pci_name(pdev));
1497 goto err_pci_disable_2;
1498 }
1499
1500 rc = pci_request_regions(pdev, DRV_NAME);
1501 if (rc < 0) {
1502 if (netif_msg_probe(tp))
1503 pr_err("%s: could not request regions\n",
1504 pci_name(pdev));
1505 goto err_pci_disable_2;
1506 }
1507
1508 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1509 if (rc < 0) {
1510 if (netif_msg_probe(tp))
1511 pr_err("%s: DMA configuration failed\n",
1512 pci_name(pdev));
1513 goto err_free_res_3;
1514 }
1515
1516 pci_set_master(pdev);
1517
1518 ioaddr = ioremap(pci_resource_start(pdev, 0), SIS190_REGS_SIZE);
1519 if (!ioaddr) {
1520 if (netif_msg_probe(tp))
1521 pr_err("%s: cannot remap MMIO, aborting\n",
1522 pci_name(pdev));
1523 rc = -EIO;
1524 goto err_free_res_3;
1525 }
1526
1527 tp->pci_dev = pdev;
1528 tp->mmio_addr = ioaddr;
1529 tp->link_status = LNK_OFF;
1530
1531 sis190_irq_mask_and_ack(ioaddr);
1532
1533 sis190_soft_reset(ioaddr);
1534out:
1535 return dev;
1536
1537err_free_res_3:
1538 pci_release_regions(pdev);
1539err_pci_disable_2:
1540 pci_disable_device(pdev);
1541err_free_dev_1:
1542 free_netdev(dev);
1543err_out_0:
1544 dev = ERR_PTR(rc);
1545 goto out;
1546}
1547
1548static void sis190_tx_timeout(struct net_device *dev)
1549{
1550 struct sis190_private *tp = netdev_priv(dev);
1551 void __iomem *ioaddr = tp->mmio_addr;
1552 u8 tmp8;
1553
1554
1555 tmp8 = SIS_R8(TxControl);
1556 if (tmp8 & CmdTxEnb)
1557 SIS_W8(TxControl, tmp8 & ~CmdTxEnb);
1558
1559 netif_info(tp, tx_err, dev, "Transmit timeout, status %08x %08x\n",
1560 SIS_R32(TxControl), SIS_R32(TxSts));
1561
1562
1563 SIS_W32(IntrMask, 0x0000);
1564
1565
1566 spin_lock_irq(&tp->lock);
1567 sis190_tx_clear(tp);
1568 spin_unlock_irq(&tp->lock);
1569
1570
1571 sis190_hw_start(dev);
1572
1573 netif_wake_queue(dev);
1574}
1575
1576static void sis190_set_rgmii(struct sis190_private *tp, u8 reg)
1577{
1578 tp->features |= (reg & 0x80) ? F_HAS_RGMII : 0;
1579}
1580
1581static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
1582 struct net_device *dev)
1583{
1584 struct sis190_private *tp = netdev_priv(dev);
1585 void __iomem *ioaddr = tp->mmio_addr;
1586 u16 sig;
1587 int i;
1588
1589 if (netif_msg_probe(tp))
1590 pr_info("%s: Read MAC address from EEPROM\n", pci_name(pdev));
1591
1592
1593 sig = (u16) sis190_read_eeprom(ioaddr, EEPROMSignature);
1594
1595 if ((sig == 0xffff) || (sig == 0x0000)) {
1596 if (netif_msg_probe(tp))
1597 pr_info("%s: Error EEPROM read %x\n",
1598 pci_name(pdev), sig);
1599 return -EIO;
1600 }
1601
1602
1603 for (i = 0; i < MAC_ADDR_LEN / 2; i++) {
1604 u16 w = sis190_read_eeprom(ioaddr, EEPROMMACAddr + i);
1605
1606 ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(w);
1607 }
1608
1609 sis190_set_rgmii(tp, sis190_read_eeprom(ioaddr, EEPROMInfo));
1610
1611 return 0;
1612}
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
1624 struct net_device *dev)
1625{
1626 static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
1627 struct sis190_private *tp = netdev_priv(dev);
1628 struct pci_dev *isa_bridge;
1629 u8 reg, tmp8;
1630 unsigned int i;
1631
1632 if (netif_msg_probe(tp))
1633 pr_info("%s: Read MAC address from APC\n", pci_name(pdev));
1634
1635 for (i = 0; i < ARRAY_SIZE(ids); i++) {
1636 isa_bridge = pci_get_device(PCI_VENDOR_ID_SI, ids[i], NULL);
1637 if (isa_bridge)
1638 break;
1639 }
1640
1641 if (!isa_bridge) {
1642 if (netif_msg_probe(tp))
1643 pr_info("%s: Can not find ISA bridge\n",
1644 pci_name(pdev));
1645 return -EIO;
1646 }
1647
1648
1649 pci_read_config_byte(isa_bridge, 0x48, &tmp8);
1650 reg = (tmp8 & ~0x02);
1651 pci_write_config_byte(isa_bridge, 0x48, reg);
1652 udelay(50);
1653 pci_read_config_byte(isa_bridge, 0x48, ®);
1654
1655 for (i = 0; i < MAC_ADDR_LEN; i++) {
1656 outb(0x9 + i, 0x78);
1657 dev->dev_addr[i] = inb(0x79);
1658 }
1659
1660 outb(0x12, 0x78);
1661 reg = inb(0x79);
1662
1663 sis190_set_rgmii(tp, reg);
1664
1665
1666 pci_write_config_byte(isa_bridge, 0x48, tmp8);
1667 pci_dev_put(isa_bridge);
1668
1669 return 0;
1670}
1671
1672
1673
1674
1675
1676
1677
1678
1679static inline void sis190_init_rxfilter(struct net_device *dev)
1680{
1681 struct sis190_private *tp = netdev_priv(dev);
1682 void __iomem *ioaddr = tp->mmio_addr;
1683 u16 ctl;
1684 int i;
1685
1686 ctl = SIS_R16(RxMacControl);
1687
1688
1689
1690
1691
1692 SIS_W16(RxMacControl, ctl & ~0x0f00);
1693
1694 for (i = 0; i < MAC_ADDR_LEN; i++)
1695 SIS_W8(RxMacAddr + i, dev->dev_addr[i]);
1696
1697 SIS_W16(RxMacControl, ctl);
1698 SIS_PCI_COMMIT();
1699}
1700
1701static int __devinit sis190_get_mac_addr(struct pci_dev *pdev,
1702 struct net_device *dev)
1703{
1704 int rc;
1705
1706 rc = sis190_get_mac_addr_from_eeprom(pdev, dev);
1707 if (rc < 0) {
1708 u8 reg;
1709
1710 pci_read_config_byte(pdev, 0x73, ®);
1711
1712 if (reg & 0x00000001)
1713 rc = sis190_get_mac_addr_from_apc(pdev, dev);
1714 }
1715 return rc;
1716}
1717
1718static void sis190_set_speed_auto(struct net_device *dev)
1719{
1720 struct sis190_private *tp = netdev_priv(dev);
1721 void __iomem *ioaddr = tp->mmio_addr;
1722 int phy_id = tp->mii_if.phy_id;
1723 int val;
1724
1725 netif_info(tp, link, dev, "Enabling Auto-negotiation\n");
1726
1727 val = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
1728
1729
1730
1731 mdio_write(ioaddr, phy_id, MII_ADVERTISE, (val & ADVERTISE_SLCT) |
1732 ADVERTISE_100FULL | ADVERTISE_10FULL |
1733 ADVERTISE_100HALF | ADVERTISE_10HALF);
1734
1735
1736 mdio_write(ioaddr, phy_id, MII_CTRL1000, ADVERTISE_1000FULL);
1737
1738
1739 mdio_write(ioaddr, phy_id, MII_BMCR,
1740 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET);
1741}
1742
1743static int sis190_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1744{
1745 struct sis190_private *tp = netdev_priv(dev);
1746
1747 return mii_ethtool_gset(&tp->mii_if, cmd);
1748}
1749
1750static int sis190_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1751{
1752 struct sis190_private *tp = netdev_priv(dev);
1753
1754 return mii_ethtool_sset(&tp->mii_if, cmd);
1755}
1756
1757static void sis190_get_drvinfo(struct net_device *dev,
1758 struct ethtool_drvinfo *info)
1759{
1760 struct sis190_private *tp = netdev_priv(dev);
1761
1762 strcpy(info->driver, DRV_NAME);
1763 strcpy(info->version, DRV_VERSION);
1764 strcpy(info->bus_info, pci_name(tp->pci_dev));
1765}
1766
1767static int sis190_get_regs_len(struct net_device *dev)
1768{
1769 return SIS190_REGS_SIZE;
1770}
1771
1772static void sis190_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1773 void *p)
1774{
1775 struct sis190_private *tp = netdev_priv(dev);
1776 unsigned long flags;
1777
1778 if (regs->len > SIS190_REGS_SIZE)
1779 regs->len = SIS190_REGS_SIZE;
1780
1781 spin_lock_irqsave(&tp->lock, flags);
1782 memcpy_fromio(p, tp->mmio_addr, regs->len);
1783 spin_unlock_irqrestore(&tp->lock, flags);
1784}
1785
1786static int sis190_nway_reset(struct net_device *dev)
1787{
1788 struct sis190_private *tp = netdev_priv(dev);
1789
1790 return mii_nway_restart(&tp->mii_if);
1791}
1792
1793static u32 sis190_get_msglevel(struct net_device *dev)
1794{
1795 struct sis190_private *tp = netdev_priv(dev);
1796
1797 return tp->msg_enable;
1798}
1799
1800static void sis190_set_msglevel(struct net_device *dev, u32 value)
1801{
1802 struct sis190_private *tp = netdev_priv(dev);
1803
1804 tp->msg_enable = value;
1805}
1806
1807static const struct ethtool_ops sis190_ethtool_ops = {
1808 .get_settings = sis190_get_settings,
1809 .set_settings = sis190_set_settings,
1810 .get_drvinfo = sis190_get_drvinfo,
1811 .get_regs_len = sis190_get_regs_len,
1812 .get_regs = sis190_get_regs,
1813 .get_link = ethtool_op_get_link,
1814 .get_msglevel = sis190_get_msglevel,
1815 .set_msglevel = sis190_set_msglevel,
1816 .nway_reset = sis190_nway_reset,
1817};
1818
1819static int sis190_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1820{
1821 struct sis190_private *tp = netdev_priv(dev);
1822
1823 return !netif_running(dev) ? -EINVAL :
1824 generic_mii_ioctl(&tp->mii_if, if_mii(ifr), cmd, NULL);
1825}
1826
1827static const struct net_device_ops sis190_netdev_ops = {
1828 .ndo_open = sis190_open,
1829 .ndo_stop = sis190_close,
1830 .ndo_do_ioctl = sis190_ioctl,
1831 .ndo_start_xmit = sis190_start_xmit,
1832 .ndo_tx_timeout = sis190_tx_timeout,
1833 .ndo_set_multicast_list = sis190_set_rx_mode,
1834 .ndo_change_mtu = eth_change_mtu,
1835 .ndo_set_mac_address = eth_mac_addr,
1836 .ndo_validate_addr = eth_validate_addr,
1837#ifdef CONFIG_NET_POLL_CONTROLLER
1838 .ndo_poll_controller = sis190_netpoll,
1839#endif
1840};
1841
1842static int __devinit sis190_init_one(struct pci_dev *pdev,
1843 const struct pci_device_id *ent)
1844{
1845 static int printed_version = 0;
1846 struct sis190_private *tp;
1847 struct net_device *dev;
1848 void __iomem *ioaddr;
1849 int rc;
1850
1851 if (!printed_version) {
1852 if (netif_msg_drv(&debug))
1853 pr_info(SIS190_DRIVER_NAME " loaded\n");
1854 printed_version = 1;
1855 }
1856
1857 dev = sis190_init_board(pdev);
1858 if (IS_ERR(dev)) {
1859 rc = PTR_ERR(dev);
1860 goto out;
1861 }
1862
1863 pci_set_drvdata(pdev, dev);
1864
1865 tp = netdev_priv(dev);
1866 ioaddr = tp->mmio_addr;
1867
1868 rc = sis190_get_mac_addr(pdev, dev);
1869 if (rc < 0)
1870 goto err_release_board;
1871
1872 sis190_init_rxfilter(dev);
1873
1874 INIT_WORK(&tp->phy_task, sis190_phy_task);
1875
1876 dev->netdev_ops = &sis190_netdev_ops;
1877
1878 SET_ETHTOOL_OPS(dev, &sis190_ethtool_ops);
1879 dev->irq = pdev->irq;
1880 dev->base_addr = (unsigned long) 0xdead;
1881 dev->watchdog_timeo = SIS190_TX_TIMEOUT;
1882
1883 spin_lock_init(&tp->lock);
1884
1885 rc = sis190_mii_probe(dev);
1886 if (rc < 0)
1887 goto err_release_board;
1888
1889 rc = register_netdev(dev);
1890 if (rc < 0)
1891 goto err_remove_mii;
1892
1893 if (netif_msg_probe(tp)) {
1894 netdev_info(dev, "%s: %s at %p (IRQ: %d), %pM\n",
1895 pci_name(pdev),
1896 sis_chip_info[ent->driver_data].name,
1897 ioaddr, dev->irq, dev->dev_addr);
1898 netdev_info(dev, "%s mode.\n",
1899 (tp->features & F_HAS_RGMII) ? "RGMII" : "GMII");
1900 }
1901
1902 netif_carrier_off(dev);
1903
1904 sis190_set_speed_auto(dev);
1905out:
1906 return rc;
1907
1908err_remove_mii:
1909 sis190_mii_remove(dev);
1910err_release_board:
1911 sis190_release_board(pdev);
1912 goto out;
1913}
1914
1915static void __devexit sis190_remove_one(struct pci_dev *pdev)
1916{
1917 struct net_device *dev = pci_get_drvdata(pdev);
1918 struct sis190_private *tp = netdev_priv(dev);
1919
1920 sis190_mii_remove(dev);
1921 cancel_work_sync(&tp->phy_task);
1922 unregister_netdev(dev);
1923 sis190_release_board(pdev);
1924 pci_set_drvdata(pdev, NULL);
1925}
1926
1927static struct pci_driver sis190_pci_driver = {
1928 .name = DRV_NAME,
1929 .id_table = sis190_pci_tbl,
1930 .probe = sis190_init_one,
1931 .remove = __devexit_p(sis190_remove_one),
1932};
1933
1934static int __init sis190_init_module(void)
1935{
1936 return pci_register_driver(&sis190_pci_driver);
1937}
1938
1939static void __exit sis190_cleanup_module(void)
1940{
1941 pci_unregister_driver(&sis190_pci_driver);
1942}
1943
1944module_init(sis190_init_module);
1945module_exit(sis190_cleanup_module);
1946