1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33
34#define DRV_NAME "via-rhine"
35#define DRV_VERSION "1.5.0"
36#define DRV_RELDATE "2010-10-09"
37
38#include <linux/types.h>
39
40
41
42static int debug = 0;
43#define RHINE_MSG_DEFAULT \
44 (0x0000)
45
46
47
48#if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
49 defined(CONFIG_SPARC) || defined(__ia64__) || \
50 defined(__sh__) || defined(__mips__)
51static int rx_copybreak = 1518;
52#else
53static int rx_copybreak;
54#endif
55
56
57
58static bool avoid_D3;
59
60
61
62
63
64
65
66
67static const int multicast_filter_limit = 32;
68
69
70
71
72
73
74
75
76
77#define TX_RING_SIZE 16
78#define TX_QUEUE_LEN 10
79#define RX_RING_SIZE 64
80
81
82
83
84#define TX_TIMEOUT (2*HZ)
85
86#define PKT_BUF_SZ 1536
87
88#include <linux/module.h>
89#include <linux/moduleparam.h>
90#include <linux/kernel.h>
91#include <linux/string.h>
92#include <linux/timer.h>
93#include <linux/errno.h>
94#include <linux/ioport.h>
95#include <linux/interrupt.h>
96#include <linux/pci.h>
97#include <linux/dma-mapping.h>
98#include <linux/netdevice.h>
99#include <linux/etherdevice.h>
100#include <linux/skbuff.h>
101#include <linux/init.h>
102#include <linux/delay.h>
103#include <linux/mii.h>
104#include <linux/ethtool.h>
105#include <linux/crc32.h>
106#include <linux/if_vlan.h>
107#include <linux/bitops.h>
108#include <linux/workqueue.h>
109#include <asm/processor.h>
110#include <asm/io.h>
111#include <asm/irq.h>
112#include <asm/uaccess.h>
113#include <linux/dmi.h>
114
115
116static const char version[] __devinitconst =
117 "v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker";
118
119
120
121#ifdef CONFIG_VIA_RHINE_MMIO
122#define USE_MMIO
123#else
124#endif
125
126MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
127MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
128MODULE_LICENSE("GPL");
129
130module_param(debug, int, 0);
131module_param(rx_copybreak, int, 0);
132module_param(avoid_D3, bool, 0);
133MODULE_PARM_DESC(debug, "VIA Rhine debug message flags");
134MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
135MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)");
136
137#define MCAM_SIZE 32
138#define VCAM_SIZE 32
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240enum rhine_revs {
241 VT86C100A = 0x00,
242 VTunknown0 = 0x20,
243 VT6102 = 0x40,
244 VT8231 = 0x50,
245 VT8233 = 0x60,
246 VT8235 = 0x74,
247 VT8237 = 0x78,
248 VTunknown1 = 0x7C,
249 VT6105 = 0x80,
250 VT6105_B0 = 0x83,
251 VT6105L = 0x8A,
252 VT6107 = 0x8C,
253 VTunknown2 = 0x8E,
254 VT6105M = 0x90,
255};
256
257enum rhine_quirks {
258 rqWOL = 0x0001,
259 rqForceReset = 0x0002,
260 rq6patterns = 0x0040,
261 rqStatusWBRace = 0x0080,
262 rqRhineI = 0x0100,
263};
264
265
266
267
268
269
270
271#define IOSYNC do { ioread8(ioaddr + StationAddr); } while (0)
272
273static DEFINE_PCI_DEVICE_TABLE(rhine_pci_tbl) = {
274 { 0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, },
275 { 0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, },
276 { 0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, },
277 { 0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, },
278 { }
279};
280MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
281
282
283
284enum register_offsets {
285 StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
286 ChipCmd1=0x09, TQWake=0x0A,
287 IntrStatus=0x0C, IntrEnable=0x0E,
288 MulticastFilter0=0x10, MulticastFilter1=0x14,
289 RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
290 MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E, PCIBusConfig1=0x6F,
291 MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
292 ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B,
293 RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81,
294 StickyHW=0x83, IntrStatus2=0x84,
295 CamMask=0x88, CamCon=0x92, CamAddr=0x93,
296 WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4,
297 WOLcrClr1=0xA6, WOLcgClr=0xA7,
298 PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD,
299};
300
301
302enum backoff_bits {
303 BackOptional=0x01, BackModify=0x02,
304 BackCaptureEffect=0x04, BackRandom=0x08
305};
306
307
308enum tcr_bits {
309 TCR_PQEN=0x01,
310 TCR_LB0=0x02,
311 TCR_LB1=0x04,
312 TCR_OFSET=0x08,
313 TCR_RTGOPT=0x10,
314 TCR_RTFT0=0x20,
315 TCR_RTFT1=0x40,
316 TCR_RTSF=0x80,
317};
318
319
320enum camcon_bits {
321 CAMC_CAMEN=0x01,
322 CAMC_VCAMSL=0x02,
323 CAMC_CAMWR=0x04,
324 CAMC_CAMRD=0x08,
325};
326
327
328enum bcr1_bits {
329 BCR1_POT0=0x01,
330 BCR1_POT1=0x02,
331 BCR1_POT2=0x04,
332 BCR1_CTFT0=0x08,
333 BCR1_CTFT1=0x10,
334 BCR1_CTSF=0x20,
335 BCR1_TXQNOBK=0x40,
336 BCR1_VIDFR=0x80,
337 BCR1_MED0=0x40,
338 BCR1_MED1=0x80,
339};
340
341#ifdef USE_MMIO
342
343static const int mmio_verify_registers[] = {
344 RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD,
345 0
346};
347#endif
348
349
350enum intr_status_bits {
351 IntrRxDone = 0x0001,
352 IntrTxDone = 0x0002,
353 IntrRxErr = 0x0004,
354 IntrTxError = 0x0008,
355 IntrRxEmpty = 0x0020,
356 IntrPCIErr = 0x0040,
357 IntrStatsMax = 0x0080,
358 IntrRxEarly = 0x0100,
359 IntrTxUnderrun = 0x0210,
360 IntrRxOverflow = 0x0400,
361 IntrRxDropped = 0x0800,
362 IntrRxNoBuf = 0x1000,
363 IntrTxAborted = 0x2000,
364 IntrLinkChange = 0x4000,
365 IntrRxWakeUp = 0x8000,
366 IntrTxDescRace = 0x080000,
367 IntrNormalSummary = IntrRxDone | IntrTxDone,
368 IntrTxErrSummary = IntrTxDescRace | IntrTxAborted | IntrTxError |
369 IntrTxUnderrun,
370};
371
372
373enum wol_bits {
374 WOLucast = 0x10,
375 WOLmagic = 0x20,
376 WOLbmcast = 0x30,
377 WOLlnkon = 0x40,
378 WOLlnkoff = 0x80,
379};
380
381
382struct rx_desc {
383 __le32 rx_status;
384 __le32 desc_length;
385 __le32 addr;
386 __le32 next_desc;
387};
388struct tx_desc {
389 __le32 tx_status;
390 __le32 desc_length;
391 __le32 addr;
392 __le32 next_desc;
393};
394
395
396#define TXDESC 0x00e08000
397
398enum rx_status_bits {
399 RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F
400};
401
402
403enum desc_status_bits {
404 DescOwn=0x80000000
405};
406
407
408enum desc_length_bits {
409 DescTag=0x00010000
410};
411
412
413enum chip_cmd_bits {
414 CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08,
415 CmdTxOn=0x10, Cmd1TxDemand=0x20, CmdRxDemand=0x40,
416 Cmd1EarlyRx=0x01, Cmd1EarlyTx=0x02, Cmd1FDuplex=0x04,
417 Cmd1NoTxPoll=0x08, Cmd1Reset=0x80,
418};
419
420struct rhine_private {
421
422 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
423
424
425 struct rx_desc *rx_ring;
426 struct tx_desc *tx_ring;
427 dma_addr_t rx_ring_dma;
428 dma_addr_t tx_ring_dma;
429
430
431 struct sk_buff *rx_skbuff[RX_RING_SIZE];
432 dma_addr_t rx_skbuff_dma[RX_RING_SIZE];
433
434
435 struct sk_buff *tx_skbuff[TX_RING_SIZE];
436 dma_addr_t tx_skbuff_dma[TX_RING_SIZE];
437
438
439 unsigned char *tx_buf[TX_RING_SIZE];
440 unsigned char *tx_bufs;
441 dma_addr_t tx_bufs_dma;
442
443 struct pci_dev *pdev;
444 long pioaddr;
445 struct net_device *dev;
446 struct napi_struct napi;
447 spinlock_t lock;
448 struct mutex task_lock;
449 bool task_enable;
450 struct work_struct slow_event_task;
451 struct work_struct reset_task;
452
453 u32 msg_enable;
454
455
456 u32 quirks;
457 struct rx_desc *rx_head_desc;
458 unsigned int cur_rx, dirty_rx;
459 unsigned int cur_tx, dirty_tx;
460 unsigned int rx_buf_sz;
461 u8 wolopts;
462
463 u8 tx_thresh, rx_thresh;
464
465 struct mii_if_info mii_if;
466 void __iomem *base;
467};
468
469#define BYTE_REG_BITS_ON(x, p) do { iowrite8((ioread8((p))|(x)), (p)); } while (0)
470#define WORD_REG_BITS_ON(x, p) do { iowrite16((ioread16((p))|(x)), (p)); } while (0)
471#define DWORD_REG_BITS_ON(x, p) do { iowrite32((ioread32((p))|(x)), (p)); } while (0)
472
473#define BYTE_REG_BITS_IS_ON(x, p) (ioread8((p)) & (x))
474#define WORD_REG_BITS_IS_ON(x, p) (ioread16((p)) & (x))
475#define DWORD_REG_BITS_IS_ON(x, p) (ioread32((p)) & (x))
476
477#define BYTE_REG_BITS_OFF(x, p) do { iowrite8(ioread8((p)) & (~(x)), (p)); } while (0)
478#define WORD_REG_BITS_OFF(x, p) do { iowrite16(ioread16((p)) & (~(x)), (p)); } while (0)
479#define DWORD_REG_BITS_OFF(x, p) do { iowrite32(ioread32((p)) & (~(x)), (p)); } while (0)
480
481#define BYTE_REG_BITS_SET(x, m, p) do { iowrite8((ioread8((p)) & (~(m)))|(x), (p)); } while (0)
482#define WORD_REG_BITS_SET(x, m, p) do { iowrite16((ioread16((p)) & (~(m)))|(x), (p)); } while (0)
483#define DWORD_REG_BITS_SET(x, m, p) do { iowrite32((ioread32((p)) & (~(m)))|(x), (p)); } while (0)
484
485
486static int mdio_read(struct net_device *dev, int phy_id, int location);
487static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
488static int rhine_open(struct net_device *dev);
489static void rhine_reset_task(struct work_struct *work);
490static void rhine_slow_event_task(struct work_struct *work);
491static void rhine_tx_timeout(struct net_device *dev);
492static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
493 struct net_device *dev);
494static irqreturn_t rhine_interrupt(int irq, void *dev_instance);
495static void rhine_tx(struct net_device *dev);
496static int rhine_rx(struct net_device *dev, int limit);
497static void rhine_set_rx_mode(struct net_device *dev);
498static struct net_device_stats *rhine_get_stats(struct net_device *dev);
499static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
500static const struct ethtool_ops netdev_ethtool_ops;
501static int rhine_close(struct net_device *dev);
502static int rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid);
503static int rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid);
504static void rhine_restart_tx(struct net_device *dev);
505
506static void rhine_wait_bit(struct rhine_private *rp, u8 reg, u8 mask, bool low)
507{
508 void __iomem *ioaddr = rp->base;
509 int i;
510
511 for (i = 0; i < 1024; i++) {
512 bool has_mask_bits = !!(ioread8(ioaddr + reg) & mask);
513
514 if (low ^ has_mask_bits)
515 break;
516 udelay(10);
517 }
518 if (i > 64) {
519 netif_dbg(rp, hw, rp->dev, "%s bit wait (%02x/%02x) cycle "
520 "count: %04d\n", low ? "low" : "high", reg, mask, i);
521 }
522}
523
524static void rhine_wait_bit_high(struct rhine_private *rp, u8 reg, u8 mask)
525{
526 rhine_wait_bit(rp, reg, mask, false);
527}
528
529static void rhine_wait_bit_low(struct rhine_private *rp, u8 reg, u8 mask)
530{
531 rhine_wait_bit(rp, reg, mask, true);
532}
533
534static u32 rhine_get_events(struct rhine_private *rp)
535{
536 void __iomem *ioaddr = rp->base;
537 u32 intr_status;
538
539 intr_status = ioread16(ioaddr + IntrStatus);
540
541 if (rp->quirks & rqStatusWBRace)
542 intr_status |= ioread8(ioaddr + IntrStatus2) << 16;
543 return intr_status;
544}
545
546static void rhine_ack_events(struct rhine_private *rp, u32 mask)
547{
548 void __iomem *ioaddr = rp->base;
549
550 if (rp->quirks & rqStatusWBRace)
551 iowrite8(mask >> 16, ioaddr + IntrStatus2);
552 iowrite16(mask, ioaddr + IntrStatus);
553 mmiowb();
554}
555
556
557
558
559
560static void rhine_power_init(struct net_device *dev)
561{
562 struct rhine_private *rp = netdev_priv(dev);
563 void __iomem *ioaddr = rp->base;
564 u16 wolstat;
565
566 if (rp->quirks & rqWOL) {
567
568 iowrite8(ioread8(ioaddr + StickyHW) & 0xFC, ioaddr + StickyHW);
569
570
571 iowrite8(0x80, ioaddr + WOLcgClr);
572
573
574 iowrite8(0xFF, ioaddr + WOLcrClr);
575
576 if (rp->quirks & rq6patterns)
577 iowrite8(0x03, ioaddr + WOLcrClr1);
578
579
580 wolstat = ioread8(ioaddr + PwrcsrSet);
581 if (rp->quirks & rq6patterns)
582 wolstat |= (ioread8(ioaddr + PwrcsrSet1) & 0x03) << 8;
583
584
585 iowrite8(0xFF, ioaddr + PwrcsrClr);
586 if (rp->quirks & rq6patterns)
587 iowrite8(0x03, ioaddr + PwrcsrClr1);
588
589 if (wolstat) {
590 char *reason;
591 switch (wolstat) {
592 case WOLmagic:
593 reason = "Magic packet";
594 break;
595 case WOLlnkon:
596 reason = "Link went up";
597 break;
598 case WOLlnkoff:
599 reason = "Link went down";
600 break;
601 case WOLucast:
602 reason = "Unicast packet";
603 break;
604 case WOLbmcast:
605 reason = "Multicast/broadcast packet";
606 break;
607 default:
608 reason = "Unknown";
609 }
610 netdev_info(dev, "Woke system up. Reason: %s\n",
611 reason);
612 }
613 }
614}
615
616static void rhine_chip_reset(struct net_device *dev)
617{
618 struct rhine_private *rp = netdev_priv(dev);
619 void __iomem *ioaddr = rp->base;
620 u8 cmd1;
621
622 iowrite8(Cmd1Reset, ioaddr + ChipCmd1);
623 IOSYNC;
624
625 if (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) {
626 netdev_info(dev, "Reset not complete yet. Trying harder.\n");
627
628
629 if (rp->quirks & rqForceReset)
630 iowrite8(0x40, ioaddr + MiscCmd);
631
632
633 rhine_wait_bit_low(rp, ChipCmd1, Cmd1Reset);
634 }
635
636 cmd1 = ioread8(ioaddr + ChipCmd1);
637 netif_info(rp, hw, dev, "Reset %s\n", (cmd1 & Cmd1Reset) ?
638 "failed" : "succeeded");
639}
640
641#ifdef USE_MMIO
642static void enable_mmio(long pioaddr, u32 quirks)
643{
644 int n;
645 if (quirks & rqRhineI) {
646
647 n = inb(pioaddr + ConfigA) | 0x20;
648 outb(n, pioaddr + ConfigA);
649 } else {
650 n = inb(pioaddr + ConfigD) | 0x80;
651 outb(n, pioaddr + ConfigD);
652 }
653}
654#endif
655
656
657
658
659
660static void __devinit rhine_reload_eeprom(long pioaddr, struct net_device *dev)
661{
662 struct rhine_private *rp = netdev_priv(dev);
663 void __iomem *ioaddr = rp->base;
664 int i;
665
666 outb(0x20, pioaddr + MACRegEEcsr);
667 for (i = 0; i < 1024; i++) {
668 if (!(inb(pioaddr + MACRegEEcsr) & 0x20))
669 break;
670 }
671 if (i > 512)
672 pr_info("%4d cycles used @ %s:%d\n", i, __func__, __LINE__);
673
674#ifdef USE_MMIO
675
676
677
678
679
680 enable_mmio(pioaddr, rp->quirks);
681#endif
682
683
684 if (rp->quirks & rqWOL)
685 iowrite8(ioread8(ioaddr + ConfigA) & 0xFC, ioaddr + ConfigA);
686
687}
688
689#ifdef CONFIG_NET_POLL_CONTROLLER
690static void rhine_poll(struct net_device *dev)
691{
692 struct rhine_private *rp = netdev_priv(dev);
693 const int irq = rp->pdev->irq;
694
695 disable_irq(irq);
696 rhine_interrupt(irq, dev);
697 enable_irq(irq);
698}
699#endif
700
701static void rhine_kick_tx_threshold(struct rhine_private *rp)
702{
703 if (rp->tx_thresh < 0xe0) {
704 void __iomem *ioaddr = rp->base;
705
706 rp->tx_thresh += 0x20;
707 BYTE_REG_BITS_SET(rp->tx_thresh, 0x80, ioaddr + TxConfig);
708 }
709}
710
711static void rhine_tx_err(struct rhine_private *rp, u32 status)
712{
713 struct net_device *dev = rp->dev;
714
715 if (status & IntrTxAborted) {
716 netif_info(rp, tx_err, dev,
717 "Abort %08x, frame dropped\n", status);
718 }
719
720 if (status & IntrTxUnderrun) {
721 rhine_kick_tx_threshold(rp);
722 netif_info(rp, tx_err ,dev, "Transmitter underrun, "
723 "Tx threshold now %02x\n", rp->tx_thresh);
724 }
725
726 if (status & IntrTxDescRace)
727 netif_info(rp, tx_err, dev, "Tx descriptor write-back race\n");
728
729 if ((status & IntrTxError) &&
730 (status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace)) == 0) {
731 rhine_kick_tx_threshold(rp);
732 netif_info(rp, tx_err, dev, "Unspecified error. "
733 "Tx threshold now %02x\n", rp->tx_thresh);
734 }
735
736 rhine_restart_tx(dev);
737}
738
739static void rhine_update_rx_crc_and_missed_errord(struct rhine_private *rp)
740{
741 void __iomem *ioaddr = rp->base;
742 struct net_device_stats *stats = &rp->dev->stats;
743
744 stats->rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
745 stats->rx_missed_errors += ioread16(ioaddr + RxMissed);
746
747
748
749
750
751
752
753 iowrite32(0, ioaddr + RxMissed);
754 ioread16(ioaddr + RxCRCErrs);
755 ioread16(ioaddr + RxMissed);
756}
757
758#define RHINE_EVENT_NAPI_RX (IntrRxDone | \
759 IntrRxErr | \
760 IntrRxEmpty | \
761 IntrRxOverflow | \
762 IntrRxDropped | \
763 IntrRxNoBuf | \
764 IntrRxWakeUp)
765
766#define RHINE_EVENT_NAPI_TX_ERR (IntrTxError | \
767 IntrTxAborted | \
768 IntrTxUnderrun | \
769 IntrTxDescRace)
770#define RHINE_EVENT_NAPI_TX (IntrTxDone | RHINE_EVENT_NAPI_TX_ERR)
771
772#define RHINE_EVENT_NAPI (RHINE_EVENT_NAPI_RX | \
773 RHINE_EVENT_NAPI_TX | \
774 IntrStatsMax)
775#define RHINE_EVENT_SLOW (IntrPCIErr | IntrLinkChange)
776#define RHINE_EVENT (RHINE_EVENT_NAPI | RHINE_EVENT_SLOW)
777
778static int rhine_napipoll(struct napi_struct *napi, int budget)
779{
780 struct rhine_private *rp = container_of(napi, struct rhine_private, napi);
781 struct net_device *dev = rp->dev;
782 void __iomem *ioaddr = rp->base;
783 u16 enable_mask = RHINE_EVENT & 0xffff;
784 int work_done = 0;
785 u32 status;
786
787 status = rhine_get_events(rp);
788 rhine_ack_events(rp, status & ~RHINE_EVENT_SLOW);
789
790 if (status & RHINE_EVENT_NAPI_RX)
791 work_done += rhine_rx(dev, budget);
792
793 if (status & RHINE_EVENT_NAPI_TX) {
794 if (status & RHINE_EVENT_NAPI_TX_ERR) {
795
796 rhine_wait_bit_low(rp, ChipCmd, CmdTxOn);
797 if (ioread8(ioaddr + ChipCmd) & CmdTxOn)
798 netif_warn(rp, tx_err, dev, "Tx still on\n");
799 }
800
801 rhine_tx(dev);
802
803 if (status & RHINE_EVENT_NAPI_TX_ERR)
804 rhine_tx_err(rp, status);
805 }
806
807 if (status & IntrStatsMax) {
808 spin_lock(&rp->lock);
809 rhine_update_rx_crc_and_missed_errord(rp);
810 spin_unlock(&rp->lock);
811 }
812
813 if (status & RHINE_EVENT_SLOW) {
814 enable_mask &= ~RHINE_EVENT_SLOW;
815 schedule_work(&rp->slow_event_task);
816 }
817
818 if (work_done < budget) {
819 napi_complete(napi);
820 iowrite16(enable_mask, ioaddr + IntrEnable);
821 mmiowb();
822 }
823 return work_done;
824}
825
826static void __devinit rhine_hw_init(struct net_device *dev, long pioaddr)
827{
828 struct rhine_private *rp = netdev_priv(dev);
829
830
831 rhine_chip_reset(dev);
832
833
834 if (rp->quirks & rqRhineI)
835 msleep(5);
836
837
838 rhine_reload_eeprom(pioaddr, dev);
839}
840
841static const struct net_device_ops rhine_netdev_ops = {
842 .ndo_open = rhine_open,
843 .ndo_stop = rhine_close,
844 .ndo_start_xmit = rhine_start_tx,
845 .ndo_get_stats = rhine_get_stats,
846 .ndo_set_rx_mode = rhine_set_rx_mode,
847 .ndo_change_mtu = eth_change_mtu,
848 .ndo_validate_addr = eth_validate_addr,
849 .ndo_set_mac_address = eth_mac_addr,
850 .ndo_do_ioctl = netdev_ioctl,
851 .ndo_tx_timeout = rhine_tx_timeout,
852 .ndo_vlan_rx_add_vid = rhine_vlan_rx_add_vid,
853 .ndo_vlan_rx_kill_vid = rhine_vlan_rx_kill_vid,
854#ifdef CONFIG_NET_POLL_CONTROLLER
855 .ndo_poll_controller = rhine_poll,
856#endif
857};
858
859static int __devinit rhine_init_one(struct pci_dev *pdev,
860 const struct pci_device_id *ent)
861{
862 struct net_device *dev;
863 struct rhine_private *rp;
864 int i, rc;
865 u32 quirks;
866 long pioaddr;
867 long memaddr;
868 void __iomem *ioaddr;
869 int io_size, phy_id;
870 const char *name;
871#ifdef USE_MMIO
872 int bar = 1;
873#else
874 int bar = 0;
875#endif
876
877
878#ifndef MODULE
879 pr_info_once("%s\n", version);
880#endif
881
882 io_size = 256;
883 phy_id = 0;
884 quirks = 0;
885 name = "Rhine";
886 if (pdev->revision < VTunknown0) {
887 quirks = rqRhineI;
888 io_size = 128;
889 }
890 else if (pdev->revision >= VT6102) {
891 quirks = rqWOL | rqForceReset;
892 if (pdev->revision < VT6105) {
893 name = "Rhine II";
894 quirks |= rqStatusWBRace;
895 }
896 else {
897 phy_id = 1;
898 if (pdev->revision >= VT6105_B0)
899 quirks |= rq6patterns;
900 if (pdev->revision < VT6105M)
901 name = "Rhine III";
902 else
903 name = "Rhine III (Management Adapter)";
904 }
905 }
906
907 rc = pci_enable_device(pdev);
908 if (rc)
909 goto err_out;
910
911
912 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
913 if (rc) {
914 dev_err(&pdev->dev,
915 "32-bit PCI DMA addresses not supported by the card!?\n");
916 goto err_out;
917 }
918
919
920 if ((pci_resource_len(pdev, 0) < io_size) ||
921 (pci_resource_len(pdev, 1) < io_size)) {
922 rc = -EIO;
923 dev_err(&pdev->dev, "Insufficient PCI resources, aborting\n");
924 goto err_out;
925 }
926
927 pioaddr = pci_resource_start(pdev, 0);
928 memaddr = pci_resource_start(pdev, 1);
929
930 pci_set_master(pdev);
931
932 dev = alloc_etherdev(sizeof(struct rhine_private));
933 if (!dev) {
934 rc = -ENOMEM;
935 goto err_out;
936 }
937 SET_NETDEV_DEV(dev, &pdev->dev);
938
939 rp = netdev_priv(dev);
940 rp->dev = dev;
941 rp->quirks = quirks;
942 rp->pioaddr = pioaddr;
943 rp->pdev = pdev;
944 rp->msg_enable = netif_msg_init(debug, RHINE_MSG_DEFAULT);
945
946 rc = pci_request_regions(pdev, DRV_NAME);
947 if (rc)
948 goto err_out_free_netdev;
949
950 ioaddr = pci_iomap(pdev, bar, io_size);
951 if (!ioaddr) {
952 rc = -EIO;
953 dev_err(&pdev->dev,
954 "ioremap failed for device %s, region 0x%X @ 0x%lX\n",
955 pci_name(pdev), io_size, memaddr);
956 goto err_out_free_res;
957 }
958
959#ifdef USE_MMIO
960 enable_mmio(pioaddr, quirks);
961
962
963 i = 0;
964 while (mmio_verify_registers[i]) {
965 int reg = mmio_verify_registers[i++];
966 unsigned char a = inb(pioaddr+reg);
967 unsigned char b = readb(ioaddr+reg);
968 if (a != b) {
969 rc = -EIO;
970 dev_err(&pdev->dev,
971 "MMIO do not match PIO [%02x] (%02x != %02x)\n",
972 reg, a, b);
973 goto err_out_unmap;
974 }
975 }
976#endif
977
978 rp->base = ioaddr;
979
980
981 rhine_power_init(dev);
982 rhine_hw_init(dev, pioaddr);
983
984 for (i = 0; i < 6; i++)
985 dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i);
986
987 if (!is_valid_ether_addr(dev->dev_addr)) {
988
989 netdev_err(dev, "Invalid MAC address: %pM\n", dev->dev_addr);
990 eth_hw_addr_random(dev);
991 netdev_info(dev, "Using random MAC address: %pM\n",
992 dev->dev_addr);
993 }
994 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
995
996
997 if (!phy_id)
998 phy_id = ioread8(ioaddr + 0x6C);
999
1000 spin_lock_init(&rp->lock);
1001 mutex_init(&rp->task_lock);
1002 INIT_WORK(&rp->reset_task, rhine_reset_task);
1003 INIT_WORK(&rp->slow_event_task, rhine_slow_event_task);
1004
1005 rp->mii_if.dev = dev;
1006 rp->mii_if.mdio_read = mdio_read;
1007 rp->mii_if.mdio_write = mdio_write;
1008 rp->mii_if.phy_id_mask = 0x1f;
1009 rp->mii_if.reg_num_mask = 0x1f;
1010
1011
1012 dev->netdev_ops = &rhine_netdev_ops;
1013 dev->ethtool_ops = &netdev_ethtool_ops,
1014 dev->watchdog_timeo = TX_TIMEOUT;
1015
1016 netif_napi_add(dev, &rp->napi, rhine_napipoll, 64);
1017
1018 if (rp->quirks & rqRhineI)
1019 dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
1020
1021 if (pdev->revision >= VT6105M)
1022 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
1023 NETIF_F_HW_VLAN_FILTER;
1024
1025
1026 rc = register_netdev(dev);
1027 if (rc)
1028 goto err_out_unmap;
1029
1030 netdev_info(dev, "VIA %s at 0x%lx, %pM, IRQ %d\n",
1031 name,
1032#ifdef USE_MMIO
1033 memaddr,
1034#else
1035 (long)ioaddr,
1036#endif
1037 dev->dev_addr, pdev->irq);
1038
1039 pci_set_drvdata(pdev, dev);
1040
1041 {
1042 u16 mii_cmd;
1043 int mii_status = mdio_read(dev, phy_id, 1);
1044 mii_cmd = mdio_read(dev, phy_id, MII_BMCR) & ~BMCR_ISOLATE;
1045 mdio_write(dev, phy_id, MII_BMCR, mii_cmd);
1046 if (mii_status != 0xffff && mii_status != 0x0000) {
1047 rp->mii_if.advertising = mdio_read(dev, phy_id, 4);
1048 netdev_info(dev,
1049 "MII PHY found at address %d, status 0x%04x advertising %04x Link %04x\n",
1050 phy_id,
1051 mii_status, rp->mii_if.advertising,
1052 mdio_read(dev, phy_id, 5));
1053
1054
1055 if (mii_status & BMSR_LSTATUS)
1056 netif_carrier_on(dev);
1057 else
1058 netif_carrier_off(dev);
1059
1060 }
1061 }
1062 rp->mii_if.phy_id = phy_id;
1063 if (avoid_D3)
1064 netif_info(rp, probe, dev, "No D3 power state at shutdown\n");
1065
1066 return 0;
1067
1068err_out_unmap:
1069 pci_iounmap(pdev, ioaddr);
1070err_out_free_res:
1071 pci_release_regions(pdev);
1072err_out_free_netdev:
1073 free_netdev(dev);
1074err_out:
1075 return rc;
1076}
1077
1078static int alloc_ring(struct net_device* dev)
1079{
1080 struct rhine_private *rp = netdev_priv(dev);
1081 void *ring;
1082 dma_addr_t ring_dma;
1083
1084 ring = pci_alloc_consistent(rp->pdev,
1085 RX_RING_SIZE * sizeof(struct rx_desc) +
1086 TX_RING_SIZE * sizeof(struct tx_desc),
1087 &ring_dma);
1088 if (!ring) {
1089 netdev_err(dev, "Could not allocate DMA memory\n");
1090 return -ENOMEM;
1091 }
1092 if (rp->quirks & rqRhineI) {
1093 rp->tx_bufs = pci_alloc_consistent(rp->pdev,
1094 PKT_BUF_SZ * TX_RING_SIZE,
1095 &rp->tx_bufs_dma);
1096 if (rp->tx_bufs == NULL) {
1097 pci_free_consistent(rp->pdev,
1098 RX_RING_SIZE * sizeof(struct rx_desc) +
1099 TX_RING_SIZE * sizeof(struct tx_desc),
1100 ring, ring_dma);
1101 return -ENOMEM;
1102 }
1103 }
1104
1105 rp->rx_ring = ring;
1106 rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc);
1107 rp->rx_ring_dma = ring_dma;
1108 rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc);
1109
1110 return 0;
1111}
1112
1113static void free_ring(struct net_device* dev)
1114{
1115 struct rhine_private *rp = netdev_priv(dev);
1116
1117 pci_free_consistent(rp->pdev,
1118 RX_RING_SIZE * sizeof(struct rx_desc) +
1119 TX_RING_SIZE * sizeof(struct tx_desc),
1120 rp->rx_ring, rp->rx_ring_dma);
1121 rp->tx_ring = NULL;
1122
1123 if (rp->tx_bufs)
1124 pci_free_consistent(rp->pdev, PKT_BUF_SZ * TX_RING_SIZE,
1125 rp->tx_bufs, rp->tx_bufs_dma);
1126
1127 rp->tx_bufs = NULL;
1128
1129}
1130
1131static void alloc_rbufs(struct net_device *dev)
1132{
1133 struct rhine_private *rp = netdev_priv(dev);
1134 dma_addr_t next;
1135 int i;
1136
1137 rp->dirty_rx = rp->cur_rx = 0;
1138
1139 rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1140 rp->rx_head_desc = &rp->rx_ring[0];
1141 next = rp->rx_ring_dma;
1142
1143
1144 for (i = 0; i < RX_RING_SIZE; i++) {
1145 rp->rx_ring[i].rx_status = 0;
1146 rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz);
1147 next += sizeof(struct rx_desc);
1148 rp->rx_ring[i].next_desc = cpu_to_le32(next);
1149 rp->rx_skbuff[i] = NULL;
1150 }
1151
1152 rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma);
1153
1154
1155 for (i = 0; i < RX_RING_SIZE; i++) {
1156 struct sk_buff *skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
1157 rp->rx_skbuff[i] = skb;
1158 if (skb == NULL)
1159 break;
1160
1161 rp->rx_skbuff_dma[i] =
1162 pci_map_single(rp->pdev, skb->data, rp->rx_buf_sz,
1163 PCI_DMA_FROMDEVICE);
1164
1165 rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]);
1166 rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
1167 }
1168 rp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1169}
1170
1171static void free_rbufs(struct net_device* dev)
1172{
1173 struct rhine_private *rp = netdev_priv(dev);
1174 int i;
1175
1176
1177 for (i = 0; i < RX_RING_SIZE; i++) {
1178 rp->rx_ring[i].rx_status = 0;
1179 rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0);
1180 if (rp->rx_skbuff[i]) {
1181 pci_unmap_single(rp->pdev,
1182 rp->rx_skbuff_dma[i],
1183 rp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1184 dev_kfree_skb(rp->rx_skbuff[i]);
1185 }
1186 rp->rx_skbuff[i] = NULL;
1187 }
1188}
1189
1190static void alloc_tbufs(struct net_device* dev)
1191{
1192 struct rhine_private *rp = netdev_priv(dev);
1193 dma_addr_t next;
1194 int i;
1195
1196 rp->dirty_tx = rp->cur_tx = 0;
1197 next = rp->tx_ring_dma;
1198 for (i = 0; i < TX_RING_SIZE; i++) {
1199 rp->tx_skbuff[i] = NULL;
1200 rp->tx_ring[i].tx_status = 0;
1201 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1202 next += sizeof(struct tx_desc);
1203 rp->tx_ring[i].next_desc = cpu_to_le32(next);
1204 if (rp->quirks & rqRhineI)
1205 rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ];
1206 }
1207 rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma);
1208
1209}
1210
1211static void free_tbufs(struct net_device* dev)
1212{
1213 struct rhine_private *rp = netdev_priv(dev);
1214 int i;
1215
1216 for (i = 0; i < TX_RING_SIZE; i++) {
1217 rp->tx_ring[i].tx_status = 0;
1218 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1219 rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0);
1220 if (rp->tx_skbuff[i]) {
1221 if (rp->tx_skbuff_dma[i]) {
1222 pci_unmap_single(rp->pdev,
1223 rp->tx_skbuff_dma[i],
1224 rp->tx_skbuff[i]->len,
1225 PCI_DMA_TODEVICE);
1226 }
1227 dev_kfree_skb(rp->tx_skbuff[i]);
1228 }
1229 rp->tx_skbuff[i] = NULL;
1230 rp->tx_buf[i] = NULL;
1231 }
1232}
1233
1234static void rhine_check_media(struct net_device *dev, unsigned int init_media)
1235{
1236 struct rhine_private *rp = netdev_priv(dev);
1237 void __iomem *ioaddr = rp->base;
1238
1239 mii_check_media(&rp->mii_if, netif_msg_link(rp), init_media);
1240
1241 if (rp->mii_if.full_duplex)
1242 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex,
1243 ioaddr + ChipCmd1);
1244 else
1245 iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex,
1246 ioaddr + ChipCmd1);
1247
1248 netif_info(rp, link, dev, "force_media %d, carrier %d\n",
1249 rp->mii_if.force_media, netif_carrier_ok(dev));
1250}
1251
1252
1253static void rhine_set_carrier(struct mii_if_info *mii)
1254{
1255 struct net_device *dev = mii->dev;
1256 struct rhine_private *rp = netdev_priv(dev);
1257
1258 if (mii->force_media) {
1259
1260 if (!netif_carrier_ok(dev))
1261 netif_carrier_on(dev);
1262 } else
1263 rhine_check_media(dev, 0);
1264
1265 netif_info(rp, link, dev, "force_media %d, carrier %d\n",
1266 mii->force_media, netif_carrier_ok(dev));
1267}
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr)
1278{
1279 int i;
1280
1281 iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1282 wmb();
1283
1284
1285 idx &= (MCAM_SIZE - 1);
1286
1287 iowrite8((u8) idx, ioaddr + CamAddr);
1288
1289 for (i = 0; i < 6; i++, addr++)
1290 iowrite8(*addr, ioaddr + MulticastFilter0 + i);
1291 udelay(10);
1292 wmb();
1293
1294 iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1295 udelay(10);
1296
1297 iowrite8(0, ioaddr + CamCon);
1298}
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr)
1309{
1310 iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1311 wmb();
1312
1313
1314 idx &= (VCAM_SIZE - 1);
1315
1316 iowrite8((u8) idx, ioaddr + CamAddr);
1317
1318 iowrite16(*((u16 *) addr), ioaddr + MulticastFilter0 + 6);
1319 udelay(10);
1320 wmb();
1321
1322 iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1323 udelay(10);
1324
1325 iowrite8(0, ioaddr + CamCon);
1326}
1327
1328
1329
1330
1331
1332
1333
1334
1335static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask)
1336{
1337 iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1338 wmb();
1339
1340
1341 iowrite32(mask, ioaddr + CamMask);
1342
1343
1344 iowrite8(0, ioaddr + CamCon);
1345}
1346
1347
1348
1349
1350
1351
1352
1353
1354static void rhine_set_vlan_cam_mask(void __iomem *ioaddr, u32 mask)
1355{
1356 iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1357 wmb();
1358
1359
1360 iowrite32(mask, ioaddr + CamMask);
1361
1362
1363 iowrite8(0, ioaddr + CamCon);
1364}
1365
1366
1367
1368
1369
1370
1371
1372
1373static void rhine_init_cam_filter(struct net_device *dev)
1374{
1375 struct rhine_private *rp = netdev_priv(dev);
1376 void __iomem *ioaddr = rp->base;
1377
1378
1379 rhine_set_vlan_cam_mask(ioaddr, 0);
1380 rhine_set_cam_mask(ioaddr, 0);
1381
1382
1383 BYTE_REG_BITS_ON(TCR_PQEN, ioaddr + TxConfig);
1384 BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
1385}
1386
1387
1388
1389
1390
1391
1392
1393static void rhine_update_vcam(struct net_device *dev)
1394{
1395 struct rhine_private *rp = netdev_priv(dev);
1396 void __iomem *ioaddr = rp->base;
1397 u16 vid;
1398 u32 vCAMmask = 0;
1399 unsigned int i = 0;
1400
1401 for_each_set_bit(vid, rp->active_vlans, VLAN_N_VID) {
1402 rhine_set_vlan_cam(ioaddr, i, (u8 *)&vid);
1403 vCAMmask |= 1 << i;
1404 if (++i >= VCAM_SIZE)
1405 break;
1406 }
1407 rhine_set_vlan_cam_mask(ioaddr, vCAMmask);
1408}
1409
1410static int rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
1411{
1412 struct rhine_private *rp = netdev_priv(dev);
1413
1414 spin_lock_bh(&rp->lock);
1415 set_bit(vid, rp->active_vlans);
1416 rhine_update_vcam(dev);
1417 spin_unlock_bh(&rp->lock);
1418 return 0;
1419}
1420
1421static int rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
1422{
1423 struct rhine_private *rp = netdev_priv(dev);
1424
1425 spin_lock_bh(&rp->lock);
1426 clear_bit(vid, rp->active_vlans);
1427 rhine_update_vcam(dev);
1428 spin_unlock_bh(&rp->lock);
1429 return 0;
1430}
1431
1432static void init_registers(struct net_device *dev)
1433{
1434 struct rhine_private *rp = netdev_priv(dev);
1435 void __iomem *ioaddr = rp->base;
1436 int i;
1437
1438 for (i = 0; i < 6; i++)
1439 iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
1440
1441
1442 iowrite16(0x0006, ioaddr + PCIBusConfig);
1443
1444 iowrite8(0x20, ioaddr + TxConfig);
1445 rp->tx_thresh = 0x20;
1446 rp->rx_thresh = 0x60;
1447
1448 iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr);
1449 iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr);
1450
1451 rhine_set_rx_mode(dev);
1452
1453 if (rp->pdev->revision >= VT6105M)
1454 rhine_init_cam_filter(dev);
1455
1456 napi_enable(&rp->napi);
1457
1458 iowrite16(RHINE_EVENT & 0xffff, ioaddr + IntrEnable);
1459
1460 iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8),
1461 ioaddr + ChipCmd);
1462 rhine_check_media(dev, 1);
1463}
1464
1465
1466static void rhine_enable_linkmon(struct rhine_private *rp)
1467{
1468 void __iomem *ioaddr = rp->base;
1469
1470 iowrite8(0, ioaddr + MIICmd);
1471 iowrite8(MII_BMSR, ioaddr + MIIRegAddr);
1472 iowrite8(0x80, ioaddr + MIICmd);
1473
1474 rhine_wait_bit_high(rp, MIIRegAddr, 0x20);
1475
1476 iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr);
1477}
1478
1479
1480static void rhine_disable_linkmon(struct rhine_private *rp)
1481{
1482 void __iomem *ioaddr = rp->base;
1483
1484 iowrite8(0, ioaddr + MIICmd);
1485
1486 if (rp->quirks & rqRhineI) {
1487 iowrite8(0x01, ioaddr + MIIRegAddr);
1488
1489
1490 mdelay(1);
1491
1492
1493 iowrite8(0x80, ioaddr + MIICmd);
1494
1495 rhine_wait_bit_high(rp, MIIRegAddr, 0x20);
1496
1497
1498 iowrite8(0, ioaddr + MIICmd);
1499 }
1500 else
1501 rhine_wait_bit_high(rp, MIIRegAddr, 0x80);
1502}
1503
1504
1505
1506static int mdio_read(struct net_device *dev, int phy_id, int regnum)
1507{
1508 struct rhine_private *rp = netdev_priv(dev);
1509 void __iomem *ioaddr = rp->base;
1510 int result;
1511
1512 rhine_disable_linkmon(rp);
1513
1514
1515 iowrite8(phy_id, ioaddr + MIIPhyAddr);
1516 iowrite8(regnum, ioaddr + MIIRegAddr);
1517 iowrite8(0x40, ioaddr + MIICmd);
1518 rhine_wait_bit_low(rp, MIICmd, 0x40);
1519 result = ioread16(ioaddr + MIIData);
1520
1521 rhine_enable_linkmon(rp);
1522 return result;
1523}
1524
1525static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
1526{
1527 struct rhine_private *rp = netdev_priv(dev);
1528 void __iomem *ioaddr = rp->base;
1529
1530 rhine_disable_linkmon(rp);
1531
1532
1533 iowrite8(phy_id, ioaddr + MIIPhyAddr);
1534 iowrite8(regnum, ioaddr + MIIRegAddr);
1535 iowrite16(value, ioaddr + MIIData);
1536 iowrite8(0x20, ioaddr + MIICmd);
1537 rhine_wait_bit_low(rp, MIICmd, 0x20);
1538
1539 rhine_enable_linkmon(rp);
1540}
1541
1542static void rhine_task_disable(struct rhine_private *rp)
1543{
1544 mutex_lock(&rp->task_lock);
1545 rp->task_enable = false;
1546 mutex_unlock(&rp->task_lock);
1547
1548 cancel_work_sync(&rp->slow_event_task);
1549 cancel_work_sync(&rp->reset_task);
1550}
1551
1552static void rhine_task_enable(struct rhine_private *rp)
1553{
1554 mutex_lock(&rp->task_lock);
1555 rp->task_enable = true;
1556 mutex_unlock(&rp->task_lock);
1557}
1558
1559static int rhine_open(struct net_device *dev)
1560{
1561 struct rhine_private *rp = netdev_priv(dev);
1562 void __iomem *ioaddr = rp->base;
1563 int rc;
1564
1565 rc = request_irq(rp->pdev->irq, rhine_interrupt, IRQF_SHARED, dev->name,
1566 dev);
1567 if (rc)
1568 return rc;
1569
1570 netif_dbg(rp, ifup, dev, "%s() irq %d\n", __func__, rp->pdev->irq);
1571
1572 rc = alloc_ring(dev);
1573 if (rc) {
1574 free_irq(rp->pdev->irq, dev);
1575 return rc;
1576 }
1577 alloc_rbufs(dev);
1578 alloc_tbufs(dev);
1579 rhine_chip_reset(dev);
1580 rhine_task_enable(rp);
1581 init_registers(dev);
1582
1583 netif_dbg(rp, ifup, dev, "%s() Done - status %04x MII status: %04x\n",
1584 __func__, ioread16(ioaddr + ChipCmd),
1585 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1586
1587 netif_start_queue(dev);
1588
1589 return 0;
1590}
1591
1592static void rhine_reset_task(struct work_struct *work)
1593{
1594 struct rhine_private *rp = container_of(work, struct rhine_private,
1595 reset_task);
1596 struct net_device *dev = rp->dev;
1597
1598 mutex_lock(&rp->task_lock);
1599
1600 if (!rp->task_enable)
1601 goto out_unlock;
1602
1603 napi_disable(&rp->napi);
1604 spin_lock_bh(&rp->lock);
1605
1606
1607 free_tbufs(dev);
1608 free_rbufs(dev);
1609 alloc_tbufs(dev);
1610 alloc_rbufs(dev);
1611
1612
1613 rhine_chip_reset(dev);
1614 init_registers(dev);
1615
1616 spin_unlock_bh(&rp->lock);
1617
1618 dev->trans_start = jiffies;
1619 dev->stats.tx_errors++;
1620 netif_wake_queue(dev);
1621
1622out_unlock:
1623 mutex_unlock(&rp->task_lock);
1624}
1625
1626static void rhine_tx_timeout(struct net_device *dev)
1627{
1628 struct rhine_private *rp = netdev_priv(dev);
1629 void __iomem *ioaddr = rp->base;
1630
1631 netdev_warn(dev, "Transmit timed out, status %04x, PHY status %04x, resetting...\n",
1632 ioread16(ioaddr + IntrStatus),
1633 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1634
1635 schedule_work(&rp->reset_task);
1636}
1637
1638static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
1639 struct net_device *dev)
1640{
1641 struct rhine_private *rp = netdev_priv(dev);
1642 void __iomem *ioaddr = rp->base;
1643 unsigned entry;
1644
1645
1646
1647
1648
1649 entry = rp->cur_tx % TX_RING_SIZE;
1650
1651 if (skb_padto(skb, ETH_ZLEN))
1652 return NETDEV_TX_OK;
1653
1654 rp->tx_skbuff[entry] = skb;
1655
1656 if ((rp->quirks & rqRhineI) &&
1657 (((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_PARTIAL)) {
1658
1659 if (skb->len > PKT_BUF_SZ) {
1660
1661 dev_kfree_skb(skb);
1662 rp->tx_skbuff[entry] = NULL;
1663 dev->stats.tx_dropped++;
1664 return NETDEV_TX_OK;
1665 }
1666
1667
1668 skb_copy_and_csum_dev(skb, rp->tx_buf[entry]);
1669 if (skb->len < ETH_ZLEN)
1670 memset(rp->tx_buf[entry] + skb->len, 0,
1671 ETH_ZLEN - skb->len);
1672 rp->tx_skbuff_dma[entry] = 0;
1673 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma +
1674 (rp->tx_buf[entry] -
1675 rp->tx_bufs));
1676 } else {
1677 rp->tx_skbuff_dma[entry] =
1678 pci_map_single(rp->pdev, skb->data, skb->len,
1679 PCI_DMA_TODEVICE);
1680 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]);
1681 }
1682
1683 rp->tx_ring[entry].desc_length =
1684 cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1685
1686 if (unlikely(vlan_tx_tag_present(skb))) {
1687 rp->tx_ring[entry].tx_status = cpu_to_le32((vlan_tx_tag_get(skb)) << 16);
1688
1689 rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000);
1690 }
1691 else
1692 rp->tx_ring[entry].tx_status = 0;
1693
1694
1695 wmb();
1696 rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn);
1697 wmb();
1698
1699 rp->cur_tx++;
1700
1701
1702
1703 if (vlan_tx_tag_present(skb))
1704
1705 BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
1706
1707
1708 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1709 ioaddr + ChipCmd1);
1710 IOSYNC;
1711
1712 if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN)
1713 netif_stop_queue(dev);
1714
1715 netif_dbg(rp, tx_queued, dev, "Transmit frame #%d queued in slot %d\n",
1716 rp->cur_tx - 1, entry);
1717
1718 return NETDEV_TX_OK;
1719}
1720
1721static void rhine_irq_disable(struct rhine_private *rp)
1722{
1723 iowrite16(0x0000, rp->base + IntrEnable);
1724 mmiowb();
1725}
1726
1727
1728
1729static irqreturn_t rhine_interrupt(int irq, void *dev_instance)
1730{
1731 struct net_device *dev = dev_instance;
1732 struct rhine_private *rp = netdev_priv(dev);
1733 u32 status;
1734 int handled = 0;
1735
1736 status = rhine_get_events(rp);
1737
1738 netif_dbg(rp, intr, dev, "Interrupt, status %08x\n", status);
1739
1740 if (status & RHINE_EVENT) {
1741 handled = 1;
1742
1743 rhine_irq_disable(rp);
1744 napi_schedule(&rp->napi);
1745 }
1746
1747 if (status & ~(IntrLinkChange | IntrStatsMax | RHINE_EVENT_NAPI)) {
1748 netif_err(rp, intr, dev, "Something Wicked happened! %08x\n",
1749 status);
1750 }
1751
1752 return IRQ_RETVAL(handled);
1753}
1754
1755
1756
1757static void rhine_tx(struct net_device *dev)
1758{
1759 struct rhine_private *rp = netdev_priv(dev);
1760 int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE;
1761
1762
1763 while (rp->dirty_tx != rp->cur_tx) {
1764 txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
1765 netif_dbg(rp, tx_done, dev, "Tx scavenge %d status %08x\n",
1766 entry, txstatus);
1767 if (txstatus & DescOwn)
1768 break;
1769 if (txstatus & 0x8000) {
1770 netif_dbg(rp, tx_done, dev,
1771 "Transmit error, Tx status %08x\n", txstatus);
1772 dev->stats.tx_errors++;
1773 if (txstatus & 0x0400)
1774 dev->stats.tx_carrier_errors++;
1775 if (txstatus & 0x0200)
1776 dev->stats.tx_window_errors++;
1777 if (txstatus & 0x0100)
1778 dev->stats.tx_aborted_errors++;
1779 if (txstatus & 0x0080)
1780 dev->stats.tx_heartbeat_errors++;
1781 if (((rp->quirks & rqRhineI) && txstatus & 0x0002) ||
1782 (txstatus & 0x0800) || (txstatus & 0x1000)) {
1783 dev->stats.tx_fifo_errors++;
1784 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1785 break;
1786 }
1787
1788 } else {
1789 if (rp->quirks & rqRhineI)
1790 dev->stats.collisions += (txstatus >> 3) & 0x0F;
1791 else
1792 dev->stats.collisions += txstatus & 0x0F;
1793 netif_dbg(rp, tx_done, dev, "collisions: %1.1x:%1.1x\n",
1794 (txstatus >> 3) & 0xF, txstatus & 0xF);
1795 dev->stats.tx_bytes += rp->tx_skbuff[entry]->len;
1796 dev->stats.tx_packets++;
1797 }
1798
1799 if (rp->tx_skbuff_dma[entry]) {
1800 pci_unmap_single(rp->pdev,
1801 rp->tx_skbuff_dma[entry],
1802 rp->tx_skbuff[entry]->len,
1803 PCI_DMA_TODEVICE);
1804 }
1805 dev_kfree_skb_irq(rp->tx_skbuff[entry]);
1806 rp->tx_skbuff[entry] = NULL;
1807 entry = (++rp->dirty_tx) % TX_RING_SIZE;
1808 }
1809 if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4)
1810 netif_wake_queue(dev);
1811}
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822static inline u16 rhine_get_vlan_tci(struct sk_buff *skb, int data_size)
1823{
1824 u8 *trailer = (u8 *)skb->data + ((data_size + 3) & ~3) + 2;
1825 return be16_to_cpup((__be16 *)trailer);
1826}
1827
1828
1829static int rhine_rx(struct net_device *dev, int limit)
1830{
1831 struct rhine_private *rp = netdev_priv(dev);
1832 int count;
1833 int entry = rp->cur_rx % RX_RING_SIZE;
1834
1835 netif_dbg(rp, rx_status, dev, "%s(), entry %d status %08x\n", __func__,
1836 entry, le32_to_cpu(rp->rx_head_desc->rx_status));
1837
1838
1839 for (count = 0; count < limit; ++count) {
1840 struct rx_desc *desc = rp->rx_head_desc;
1841 u32 desc_status = le32_to_cpu(desc->rx_status);
1842 u32 desc_length = le32_to_cpu(desc->desc_length);
1843 int data_size = desc_status >> 16;
1844
1845 if (desc_status & DescOwn)
1846 break;
1847
1848 netif_dbg(rp, rx_status, dev, "%s() status %08x\n", __func__,
1849 desc_status);
1850
1851 if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
1852 if ((desc_status & RxWholePkt) != RxWholePkt) {
1853 netdev_warn(dev,
1854 "Oversized Ethernet frame spanned multiple buffers, "
1855 "entry %#x length %d status %08x!\n",
1856 entry, data_size,
1857 desc_status);
1858 netdev_warn(dev,
1859 "Oversized Ethernet frame %p vs %p\n",
1860 rp->rx_head_desc,
1861 &rp->rx_ring[entry]);
1862 dev->stats.rx_length_errors++;
1863 } else if (desc_status & RxErr) {
1864
1865 netif_dbg(rp, rx_err, dev,
1866 "%s() Rx error %08x\n", __func__,
1867 desc_status);
1868 dev->stats.rx_errors++;
1869 if (desc_status & 0x0030)
1870 dev->stats.rx_length_errors++;
1871 if (desc_status & 0x0048)
1872 dev->stats.rx_fifo_errors++;
1873 if (desc_status & 0x0004)
1874 dev->stats.rx_frame_errors++;
1875 if (desc_status & 0x0002) {
1876
1877 spin_lock(&rp->lock);
1878 dev->stats.rx_crc_errors++;
1879 spin_unlock(&rp->lock);
1880 }
1881 }
1882 } else {
1883 struct sk_buff *skb = NULL;
1884
1885 int pkt_len = data_size - 4;
1886 u16 vlan_tci = 0;
1887
1888
1889
1890 if (pkt_len < rx_copybreak)
1891 skb = netdev_alloc_skb_ip_align(dev, pkt_len);
1892 if (skb) {
1893 pci_dma_sync_single_for_cpu(rp->pdev,
1894 rp->rx_skbuff_dma[entry],
1895 rp->rx_buf_sz,
1896 PCI_DMA_FROMDEVICE);
1897
1898 skb_copy_to_linear_data(skb,
1899 rp->rx_skbuff[entry]->data,
1900 pkt_len);
1901 skb_put(skb, pkt_len);
1902 pci_dma_sync_single_for_device(rp->pdev,
1903 rp->rx_skbuff_dma[entry],
1904 rp->rx_buf_sz,
1905 PCI_DMA_FROMDEVICE);
1906 } else {
1907 skb = rp->rx_skbuff[entry];
1908 if (skb == NULL) {
1909 netdev_err(dev, "Inconsistent Rx descriptor chain\n");
1910 break;
1911 }
1912 rp->rx_skbuff[entry] = NULL;
1913 skb_put(skb, pkt_len);
1914 pci_unmap_single(rp->pdev,
1915 rp->rx_skbuff_dma[entry],
1916 rp->rx_buf_sz,
1917 PCI_DMA_FROMDEVICE);
1918 }
1919
1920 if (unlikely(desc_length & DescTag))
1921 vlan_tci = rhine_get_vlan_tci(skb, data_size);
1922
1923 skb->protocol = eth_type_trans(skb, dev);
1924
1925 if (unlikely(desc_length & DescTag))
1926 __vlan_hwaccel_put_tag(skb, vlan_tci);
1927 netif_receive_skb(skb);
1928 dev->stats.rx_bytes += pkt_len;
1929 dev->stats.rx_packets++;
1930 }
1931 entry = (++rp->cur_rx) % RX_RING_SIZE;
1932 rp->rx_head_desc = &rp->rx_ring[entry];
1933 }
1934
1935
1936 for (; rp->cur_rx - rp->dirty_rx > 0; rp->dirty_rx++) {
1937 struct sk_buff *skb;
1938 entry = rp->dirty_rx % RX_RING_SIZE;
1939 if (rp->rx_skbuff[entry] == NULL) {
1940 skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
1941 rp->rx_skbuff[entry] = skb;
1942 if (skb == NULL)
1943 break;
1944 rp->rx_skbuff_dma[entry] =
1945 pci_map_single(rp->pdev, skb->data,
1946 rp->rx_buf_sz,
1947 PCI_DMA_FROMDEVICE);
1948 rp->rx_ring[entry].addr = cpu_to_le32(rp->rx_skbuff_dma[entry]);
1949 }
1950 rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn);
1951 }
1952
1953 return count;
1954}
1955
1956static void rhine_restart_tx(struct net_device *dev) {
1957 struct rhine_private *rp = netdev_priv(dev);
1958 void __iomem *ioaddr = rp->base;
1959 int entry = rp->dirty_tx % TX_RING_SIZE;
1960 u32 intr_status;
1961
1962
1963
1964
1965
1966 intr_status = rhine_get_events(rp);
1967
1968 if ((intr_status & IntrTxErrSummary) == 0) {
1969
1970
1971 iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc),
1972 ioaddr + TxRingPtr);
1973
1974 iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn,
1975 ioaddr + ChipCmd);
1976
1977 if (rp->tx_ring[entry].desc_length & cpu_to_le32(0x020000))
1978
1979 BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
1980
1981 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1982 ioaddr + ChipCmd1);
1983 IOSYNC;
1984 }
1985 else {
1986
1987 netif_warn(rp, tx_err, dev, "another error occurred %08x\n",
1988 intr_status);
1989 }
1990
1991}
1992
1993static void rhine_slow_event_task(struct work_struct *work)
1994{
1995 struct rhine_private *rp =
1996 container_of(work, struct rhine_private, slow_event_task);
1997 struct net_device *dev = rp->dev;
1998 u32 intr_status;
1999
2000 mutex_lock(&rp->task_lock);
2001
2002 if (!rp->task_enable)
2003 goto out_unlock;
2004
2005 intr_status = rhine_get_events(rp);
2006 rhine_ack_events(rp, intr_status & RHINE_EVENT_SLOW);
2007
2008 if (intr_status & IntrLinkChange)
2009 rhine_check_media(dev, 0);
2010
2011 if (intr_status & IntrPCIErr)
2012 netif_warn(rp, hw, dev, "PCI error\n");
2013
2014 napi_disable(&rp->napi);
2015 rhine_irq_disable(rp);
2016
2017 napi_enable(&rp->napi);
2018 napi_schedule(&rp->napi);
2019
2020out_unlock:
2021 mutex_unlock(&rp->task_lock);
2022}
2023
2024static struct net_device_stats *rhine_get_stats(struct net_device *dev)
2025{
2026 struct rhine_private *rp = netdev_priv(dev);
2027
2028 spin_lock_bh(&rp->lock);
2029 rhine_update_rx_crc_and_missed_errord(rp);
2030 spin_unlock_bh(&rp->lock);
2031
2032 return &dev->stats;
2033}
2034
2035static void rhine_set_rx_mode(struct net_device *dev)
2036{
2037 struct rhine_private *rp = netdev_priv(dev);
2038 void __iomem *ioaddr = rp->base;
2039 u32 mc_filter[2];
2040 u8 rx_mode = 0x0C;
2041 struct netdev_hw_addr *ha;
2042
2043 if (dev->flags & IFF_PROMISC) {
2044 rx_mode = 0x1C;
2045 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
2046 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
2047 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
2048 (dev->flags & IFF_ALLMULTI)) {
2049
2050 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
2051 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
2052 } else if (rp->pdev->revision >= VT6105M) {
2053 int i = 0;
2054 u32 mCAMmask = 0;
2055 netdev_for_each_mc_addr(ha, dev) {
2056 if (i == MCAM_SIZE)
2057 break;
2058 rhine_set_cam(ioaddr, i, ha->addr);
2059 mCAMmask |= 1 << i;
2060 i++;
2061 }
2062 rhine_set_cam_mask(ioaddr, mCAMmask);
2063 } else {
2064 memset(mc_filter, 0, sizeof(mc_filter));
2065 netdev_for_each_mc_addr(ha, dev) {
2066 int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
2067
2068 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
2069 }
2070 iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
2071 iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
2072 }
2073
2074 if (rp->pdev->revision >= VT6105M) {
2075 if (dev->flags & IFF_PROMISC)
2076 BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2077 else
2078 BYTE_REG_BITS_ON(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2079 }
2080 BYTE_REG_BITS_ON(rx_mode, ioaddr + RxConfig);
2081}
2082
2083static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2084{
2085 struct rhine_private *rp = netdev_priv(dev);
2086
2087 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
2088 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
2089 strlcpy(info->bus_info, pci_name(rp->pdev), sizeof(info->bus_info));
2090}
2091
2092static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2093{
2094 struct rhine_private *rp = netdev_priv(dev);
2095 int rc;
2096
2097 mutex_lock(&rp->task_lock);
2098 rc = mii_ethtool_gset(&rp->mii_if, cmd);
2099 mutex_unlock(&rp->task_lock);
2100
2101 return rc;
2102}
2103
2104static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2105{
2106 struct rhine_private *rp = netdev_priv(dev);
2107 int rc;
2108
2109 mutex_lock(&rp->task_lock);
2110 rc = mii_ethtool_sset(&rp->mii_if, cmd);
2111 rhine_set_carrier(&rp->mii_if);
2112 mutex_unlock(&rp->task_lock);
2113
2114 return rc;
2115}
2116
2117static int netdev_nway_reset(struct net_device *dev)
2118{
2119 struct rhine_private *rp = netdev_priv(dev);
2120
2121 return mii_nway_restart(&rp->mii_if);
2122}
2123
2124static u32 netdev_get_link(struct net_device *dev)
2125{
2126 struct rhine_private *rp = netdev_priv(dev);
2127
2128 return mii_link_ok(&rp->mii_if);
2129}
2130
2131static u32 netdev_get_msglevel(struct net_device *dev)
2132{
2133 struct rhine_private *rp = netdev_priv(dev);
2134
2135 return rp->msg_enable;
2136}
2137
2138static void netdev_set_msglevel(struct net_device *dev, u32 value)
2139{
2140 struct rhine_private *rp = netdev_priv(dev);
2141
2142 rp->msg_enable = value;
2143}
2144
2145static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2146{
2147 struct rhine_private *rp = netdev_priv(dev);
2148
2149 if (!(rp->quirks & rqWOL))
2150 return;
2151
2152 spin_lock_irq(&rp->lock);
2153 wol->supported = WAKE_PHY | WAKE_MAGIC |
2154 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST;
2155 wol->wolopts = rp->wolopts;
2156 spin_unlock_irq(&rp->lock);
2157}
2158
2159static int rhine_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2160{
2161 struct rhine_private *rp = netdev_priv(dev);
2162 u32 support = WAKE_PHY | WAKE_MAGIC |
2163 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST;
2164
2165 if (!(rp->quirks & rqWOL))
2166 return -EINVAL;
2167
2168 if (wol->wolopts & ~support)
2169 return -EINVAL;
2170
2171 spin_lock_irq(&rp->lock);
2172 rp->wolopts = wol->wolopts;
2173 spin_unlock_irq(&rp->lock);
2174
2175 return 0;
2176}
2177
2178static const struct ethtool_ops netdev_ethtool_ops = {
2179 .get_drvinfo = netdev_get_drvinfo,
2180 .get_settings = netdev_get_settings,
2181 .set_settings = netdev_set_settings,
2182 .nway_reset = netdev_nway_reset,
2183 .get_link = netdev_get_link,
2184 .get_msglevel = netdev_get_msglevel,
2185 .set_msglevel = netdev_set_msglevel,
2186 .get_wol = rhine_get_wol,
2187 .set_wol = rhine_set_wol,
2188};
2189
2190static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2191{
2192 struct rhine_private *rp = netdev_priv(dev);
2193 int rc;
2194
2195 if (!netif_running(dev))
2196 return -EINVAL;
2197
2198 mutex_lock(&rp->task_lock);
2199 rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL);
2200 rhine_set_carrier(&rp->mii_if);
2201 mutex_unlock(&rp->task_lock);
2202
2203 return rc;
2204}
2205
2206static int rhine_close(struct net_device *dev)
2207{
2208 struct rhine_private *rp = netdev_priv(dev);
2209 void __iomem *ioaddr = rp->base;
2210
2211 rhine_task_disable(rp);
2212 napi_disable(&rp->napi);
2213 netif_stop_queue(dev);
2214
2215 netif_dbg(rp, ifdown, dev, "Shutting down ethercard, status was %04x\n",
2216 ioread16(ioaddr + ChipCmd));
2217
2218
2219 iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig);
2220
2221 rhine_irq_disable(rp);
2222
2223
2224 iowrite16(CmdStop, ioaddr + ChipCmd);
2225
2226 free_irq(rp->pdev->irq, dev);
2227 free_rbufs(dev);
2228 free_tbufs(dev);
2229 free_ring(dev);
2230
2231 return 0;
2232}
2233
2234
2235static void __devexit rhine_remove_one(struct pci_dev *pdev)
2236{
2237 struct net_device *dev = pci_get_drvdata(pdev);
2238 struct rhine_private *rp = netdev_priv(dev);
2239
2240 unregister_netdev(dev);
2241
2242 pci_iounmap(pdev, rp->base);
2243 pci_release_regions(pdev);
2244
2245 free_netdev(dev);
2246 pci_disable_device(pdev);
2247 pci_set_drvdata(pdev, NULL);
2248}
2249
2250static void rhine_shutdown (struct pci_dev *pdev)
2251{
2252 struct net_device *dev = pci_get_drvdata(pdev);
2253 struct rhine_private *rp = netdev_priv(dev);
2254 void __iomem *ioaddr = rp->base;
2255
2256 if (!(rp->quirks & rqWOL))
2257 return;
2258
2259 rhine_power_init(dev);
2260
2261
2262 if (rp->quirks & rq6patterns)
2263 iowrite8(0x04, ioaddr + WOLcgClr);
2264
2265 spin_lock(&rp->lock);
2266
2267 if (rp->wolopts & WAKE_MAGIC) {
2268 iowrite8(WOLmagic, ioaddr + WOLcrSet);
2269
2270
2271
2272
2273 iowrite8(ioread8(ioaddr + ConfigA) | 0x03, ioaddr + ConfigA);
2274 }
2275
2276 if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST))
2277 iowrite8(WOLbmcast, ioaddr + WOLcgSet);
2278
2279 if (rp->wolopts & WAKE_PHY)
2280 iowrite8(WOLlnkon | WOLlnkoff, ioaddr + WOLcrSet);
2281
2282 if (rp->wolopts & WAKE_UCAST)
2283 iowrite8(WOLucast, ioaddr + WOLcrSet);
2284
2285 if (rp->wolopts) {
2286
2287 iowrite8(0x01, ioaddr + PwcfgSet);
2288 iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW);
2289 }
2290
2291 spin_unlock(&rp->lock);
2292
2293 if (system_state == SYSTEM_POWER_OFF && !avoid_D3) {
2294 iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
2295
2296 pci_wake_from_d3(pdev, true);
2297 pci_set_power_state(pdev, PCI_D3hot);
2298 }
2299}
2300
2301#ifdef CONFIG_PM_SLEEP
2302static int rhine_suspend(struct device *device)
2303{
2304 struct pci_dev *pdev = to_pci_dev(device);
2305 struct net_device *dev = pci_get_drvdata(pdev);
2306 struct rhine_private *rp = netdev_priv(dev);
2307
2308 if (!netif_running(dev))
2309 return 0;
2310
2311 rhine_task_disable(rp);
2312 rhine_irq_disable(rp);
2313 napi_disable(&rp->napi);
2314
2315 netif_device_detach(dev);
2316
2317 rhine_shutdown(pdev);
2318
2319 return 0;
2320}
2321
2322static int rhine_resume(struct device *device)
2323{
2324 struct pci_dev *pdev = to_pci_dev(device);
2325 struct net_device *dev = pci_get_drvdata(pdev);
2326 struct rhine_private *rp = netdev_priv(dev);
2327
2328 if (!netif_running(dev))
2329 return 0;
2330
2331#ifdef USE_MMIO
2332 enable_mmio(rp->pioaddr, rp->quirks);
2333#endif
2334 rhine_power_init(dev);
2335 free_tbufs(dev);
2336 free_rbufs(dev);
2337 alloc_tbufs(dev);
2338 alloc_rbufs(dev);
2339 rhine_task_enable(rp);
2340 spin_lock_bh(&rp->lock);
2341 init_registers(dev);
2342 spin_unlock_bh(&rp->lock);
2343
2344 netif_device_attach(dev);
2345
2346 return 0;
2347}
2348
2349static SIMPLE_DEV_PM_OPS(rhine_pm_ops, rhine_suspend, rhine_resume);
2350#define RHINE_PM_OPS (&rhine_pm_ops)
2351
2352#else
2353
2354#define RHINE_PM_OPS NULL
2355
2356#endif
2357
2358static struct pci_driver rhine_driver = {
2359 .name = DRV_NAME,
2360 .id_table = rhine_pci_tbl,
2361 .probe = rhine_init_one,
2362 .remove = __devexit_p(rhine_remove_one),
2363 .shutdown = rhine_shutdown,
2364 .driver.pm = RHINE_PM_OPS,
2365};
2366
2367static struct dmi_system_id __initdata rhine_dmi_table[] = {
2368 {
2369 .ident = "EPIA-M",
2370 .matches = {
2371 DMI_MATCH(DMI_BIOS_VENDOR, "Award Software International, Inc."),
2372 DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2373 },
2374 },
2375 {
2376 .ident = "KV7",
2377 .matches = {
2378 DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
2379 DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2380 },
2381 },
2382 { NULL }
2383};
2384
2385static int __init rhine_init(void)
2386{
2387
2388#ifdef MODULE
2389 pr_info("%s\n", version);
2390#endif
2391 if (dmi_check_system(rhine_dmi_table)) {
2392
2393 avoid_D3 = true;
2394 pr_warn("Broken BIOS detected, avoid_D3 enabled\n");
2395 }
2396 else if (avoid_D3)
2397 pr_info("avoid_D3 set\n");
2398
2399 return pci_register_driver(&rhine_driver);
2400}
2401
2402
2403static void __exit rhine_cleanup(void)
2404{
2405 pci_unregister_driver(&rhine_driver);
2406}
2407
2408
2409module_init(rhine_init);
2410module_exit(rhine_cleanup);
2411