1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33
34#define DRV_NAME "via-rhine"
35#define DRV_VERSION "1.5.1"
36#define DRV_RELDATE "2010-10-09"
37
38#include <linux/types.h>
39
40
41
42static int debug = 0;
43#define RHINE_MSG_DEFAULT \
44 (0x0000)
45
46
47
48#if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
49 defined(CONFIG_SPARC) || defined(__ia64__) || \
50 defined(__sh__) || defined(__mips__)
51static int rx_copybreak = 1518;
52#else
53static int rx_copybreak;
54#endif
55
56
57
58static bool avoid_D3;
59
60
61
62
63
64
65
66
67static const int multicast_filter_limit = 32;
68
69
70
71
72
73
74
75
76
77#define TX_RING_SIZE 16
78#define TX_QUEUE_LEN 10
79#define RX_RING_SIZE 64
80
81
82
83
84#define TX_TIMEOUT (2*HZ)
85
86#define PKT_BUF_SZ 1536
87
88#include <linux/module.h>
89#include <linux/moduleparam.h>
90#include <linux/kernel.h>
91#include <linux/string.h>
92#include <linux/timer.h>
93#include <linux/errno.h>
94#include <linux/ioport.h>
95#include <linux/interrupt.h>
96#include <linux/pci.h>
97#include <linux/dma-mapping.h>
98#include <linux/netdevice.h>
99#include <linux/etherdevice.h>
100#include <linux/skbuff.h>
101#include <linux/init.h>
102#include <linux/delay.h>
103#include <linux/mii.h>
104#include <linux/ethtool.h>
105#include <linux/crc32.h>
106#include <linux/if_vlan.h>
107#include <linux/bitops.h>
108#include <linux/workqueue.h>
109#include <asm/processor.h>
110#include <asm/io.h>
111#include <asm/irq.h>
112#include <asm/uaccess.h>
113#include <linux/dmi.h>
114
115
116static const char version[] =
117 "v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker";
118
119
120
121#ifdef CONFIG_VIA_RHINE_MMIO
122#define USE_MMIO
123#else
124#endif
125
126MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
127MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
128MODULE_LICENSE("GPL");
129
130module_param(debug, int, 0);
131module_param(rx_copybreak, int, 0);
132module_param(avoid_D3, bool, 0);
133MODULE_PARM_DESC(debug, "VIA Rhine debug message flags");
134MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
135MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)");
136
137#define MCAM_SIZE 32
138#define VCAM_SIZE 32
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240enum rhine_revs {
241 VT86C100A = 0x00,
242 VTunknown0 = 0x20,
243 VT6102 = 0x40,
244 VT8231 = 0x50,
245 VT8233 = 0x60,
246 VT8235 = 0x74,
247 VT8237 = 0x78,
248 VTunknown1 = 0x7C,
249 VT6105 = 0x80,
250 VT6105_B0 = 0x83,
251 VT6105L = 0x8A,
252 VT6107 = 0x8C,
253 VTunknown2 = 0x8E,
254 VT6105M = 0x90,
255};
256
257enum rhine_quirks {
258 rqWOL = 0x0001,
259 rqForceReset = 0x0002,
260 rq6patterns = 0x0040,
261 rqStatusWBRace = 0x0080,
262 rqRhineI = 0x0100,
263};
264
265
266
267
268
269
270
271#define IOSYNC do { ioread8(ioaddr + StationAddr); } while (0)
272
273static const struct pci_device_id rhine_pci_tbl[] = {
274 { 0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, },
275 { 0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, },
276 { 0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, },
277 { 0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, },
278 { }
279};
280MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
281
282
283
284enum register_offsets {
285 StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
286 ChipCmd1=0x09, TQWake=0x0A,
287 IntrStatus=0x0C, IntrEnable=0x0E,
288 MulticastFilter0=0x10, MulticastFilter1=0x14,
289 RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
290 MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E, PCIBusConfig1=0x6F,
291 MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
292 ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B,
293 RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81,
294 StickyHW=0x83, IntrStatus2=0x84,
295 CamMask=0x88, CamCon=0x92, CamAddr=0x93,
296 WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4,
297 WOLcrClr1=0xA6, WOLcgClr=0xA7,
298 PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD,
299};
300
301
302enum backoff_bits {
303 BackOptional=0x01, BackModify=0x02,
304 BackCaptureEffect=0x04, BackRandom=0x08
305};
306
307
308enum tcr_bits {
309 TCR_PQEN=0x01,
310 TCR_LB0=0x02,
311 TCR_LB1=0x04,
312 TCR_OFSET=0x08,
313 TCR_RTGOPT=0x10,
314 TCR_RTFT0=0x20,
315 TCR_RTFT1=0x40,
316 TCR_RTSF=0x80,
317};
318
319
320enum camcon_bits {
321 CAMC_CAMEN=0x01,
322 CAMC_VCAMSL=0x02,
323 CAMC_CAMWR=0x04,
324 CAMC_CAMRD=0x08,
325};
326
327
328enum bcr1_bits {
329 BCR1_POT0=0x01,
330 BCR1_POT1=0x02,
331 BCR1_POT2=0x04,
332 BCR1_CTFT0=0x08,
333 BCR1_CTFT1=0x10,
334 BCR1_CTSF=0x20,
335 BCR1_TXQNOBK=0x40,
336 BCR1_VIDFR=0x80,
337 BCR1_MED0=0x40,
338 BCR1_MED1=0x80,
339};
340
341#ifdef USE_MMIO
342
343static const int mmio_verify_registers[] = {
344 RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD,
345 0
346};
347#endif
348
349
350enum intr_status_bits {
351 IntrRxDone = 0x0001,
352 IntrTxDone = 0x0002,
353 IntrRxErr = 0x0004,
354 IntrTxError = 0x0008,
355 IntrRxEmpty = 0x0020,
356 IntrPCIErr = 0x0040,
357 IntrStatsMax = 0x0080,
358 IntrRxEarly = 0x0100,
359 IntrTxUnderrun = 0x0210,
360 IntrRxOverflow = 0x0400,
361 IntrRxDropped = 0x0800,
362 IntrRxNoBuf = 0x1000,
363 IntrTxAborted = 0x2000,
364 IntrLinkChange = 0x4000,
365 IntrRxWakeUp = 0x8000,
366 IntrTxDescRace = 0x080000,
367 IntrNormalSummary = IntrRxDone | IntrTxDone,
368 IntrTxErrSummary = IntrTxDescRace | IntrTxAborted | IntrTxError |
369 IntrTxUnderrun,
370};
371
372
373enum wol_bits {
374 WOLucast = 0x10,
375 WOLmagic = 0x20,
376 WOLbmcast = 0x30,
377 WOLlnkon = 0x40,
378 WOLlnkoff = 0x80,
379};
380
381
382struct rx_desc {
383 __le32 rx_status;
384 __le32 desc_length;
385 __le32 addr;
386 __le32 next_desc;
387};
388struct tx_desc {
389 __le32 tx_status;
390 __le32 desc_length;
391 __le32 addr;
392 __le32 next_desc;
393};
394
395
396#define TXDESC 0x00e08000
397
398enum rx_status_bits {
399 RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F
400};
401
402
403enum desc_status_bits {
404 DescOwn=0x80000000
405};
406
407
408enum desc_length_bits {
409 DescTag=0x00010000
410};
411
412
413enum chip_cmd_bits {
414 CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08,
415 CmdTxOn=0x10, Cmd1TxDemand=0x20, CmdRxDemand=0x40,
416 Cmd1EarlyRx=0x01, Cmd1EarlyTx=0x02, Cmd1FDuplex=0x04,
417 Cmd1NoTxPoll=0x08, Cmd1Reset=0x80,
418};
419
420struct rhine_stats {
421 u64 packets;
422 u64 bytes;
423 struct u64_stats_sync syncp;
424};
425
426struct rhine_private {
427
428 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
429
430
431 struct rx_desc *rx_ring;
432 struct tx_desc *tx_ring;
433 dma_addr_t rx_ring_dma;
434 dma_addr_t tx_ring_dma;
435
436
437 struct sk_buff *rx_skbuff[RX_RING_SIZE];
438 dma_addr_t rx_skbuff_dma[RX_RING_SIZE];
439
440
441 struct sk_buff *tx_skbuff[TX_RING_SIZE];
442 dma_addr_t tx_skbuff_dma[TX_RING_SIZE];
443
444
445 unsigned char *tx_buf[TX_RING_SIZE];
446 unsigned char *tx_bufs;
447 dma_addr_t tx_bufs_dma;
448
449 struct pci_dev *pdev;
450 long pioaddr;
451 struct net_device *dev;
452 struct napi_struct napi;
453 spinlock_t lock;
454 struct mutex task_lock;
455 bool task_enable;
456 struct work_struct slow_event_task;
457 struct work_struct reset_task;
458
459 u32 msg_enable;
460
461
462 u32 quirks;
463 struct rx_desc *rx_head_desc;
464 unsigned int cur_rx, dirty_rx;
465 unsigned int cur_tx, dirty_tx;
466 unsigned int rx_buf_sz;
467 struct rhine_stats rx_stats;
468 struct rhine_stats tx_stats;
469 u8 wolopts;
470
471 u8 tx_thresh, rx_thresh;
472
473 struct mii_if_info mii_if;
474 void __iomem *base;
475};
476
477#define BYTE_REG_BITS_ON(x, p) do { iowrite8((ioread8((p))|(x)), (p)); } while (0)
478#define WORD_REG_BITS_ON(x, p) do { iowrite16((ioread16((p))|(x)), (p)); } while (0)
479#define DWORD_REG_BITS_ON(x, p) do { iowrite32((ioread32((p))|(x)), (p)); } while (0)
480
481#define BYTE_REG_BITS_IS_ON(x, p) (ioread8((p)) & (x))
482#define WORD_REG_BITS_IS_ON(x, p) (ioread16((p)) & (x))
483#define DWORD_REG_BITS_IS_ON(x, p) (ioread32((p)) & (x))
484
485#define BYTE_REG_BITS_OFF(x, p) do { iowrite8(ioread8((p)) & (~(x)), (p)); } while (0)
486#define WORD_REG_BITS_OFF(x, p) do { iowrite16(ioread16((p)) & (~(x)), (p)); } while (0)
487#define DWORD_REG_BITS_OFF(x, p) do { iowrite32(ioread32((p)) & (~(x)), (p)); } while (0)
488
489#define BYTE_REG_BITS_SET(x, m, p) do { iowrite8((ioread8((p)) & (~(m)))|(x), (p)); } while (0)
490#define WORD_REG_BITS_SET(x, m, p) do { iowrite16((ioread16((p)) & (~(m)))|(x), (p)); } while (0)
491#define DWORD_REG_BITS_SET(x, m, p) do { iowrite32((ioread32((p)) & (~(m)))|(x), (p)); } while (0)
492
493
494static int mdio_read(struct net_device *dev, int phy_id, int location);
495static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
496static int rhine_open(struct net_device *dev);
497static void rhine_reset_task(struct work_struct *work);
498static void rhine_slow_event_task(struct work_struct *work);
499static void rhine_tx_timeout(struct net_device *dev);
500static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
501 struct net_device *dev);
502static irqreturn_t rhine_interrupt(int irq, void *dev_instance);
503static void rhine_tx(struct net_device *dev);
504static int rhine_rx(struct net_device *dev, int limit);
505static void rhine_set_rx_mode(struct net_device *dev);
506static struct rtnl_link_stats64 *rhine_get_stats64(struct net_device *dev,
507 struct rtnl_link_stats64 *stats);
508static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
509static const struct ethtool_ops netdev_ethtool_ops;
510static int rhine_close(struct net_device *dev);
511static int rhine_vlan_rx_add_vid(struct net_device *dev,
512 __be16 proto, u16 vid);
513static int rhine_vlan_rx_kill_vid(struct net_device *dev,
514 __be16 proto, u16 vid);
515static void rhine_restart_tx(struct net_device *dev);
516
517static void rhine_wait_bit(struct rhine_private *rp, u8 reg, u8 mask, bool low)
518{
519 void __iomem *ioaddr = rp->base;
520 int i;
521
522 for (i = 0; i < 1024; i++) {
523 bool has_mask_bits = !!(ioread8(ioaddr + reg) & mask);
524
525 if (low ^ has_mask_bits)
526 break;
527 udelay(10);
528 }
529 if (i > 64) {
530 netif_dbg(rp, hw, rp->dev, "%s bit wait (%02x/%02x) cycle "
531 "count: %04d\n", low ? "low" : "high", reg, mask, i);
532 }
533}
534
535static void rhine_wait_bit_high(struct rhine_private *rp, u8 reg, u8 mask)
536{
537 rhine_wait_bit(rp, reg, mask, false);
538}
539
540static void rhine_wait_bit_low(struct rhine_private *rp, u8 reg, u8 mask)
541{
542 rhine_wait_bit(rp, reg, mask, true);
543}
544
545static u32 rhine_get_events(struct rhine_private *rp)
546{
547 void __iomem *ioaddr = rp->base;
548 u32 intr_status;
549
550 intr_status = ioread16(ioaddr + IntrStatus);
551
552 if (rp->quirks & rqStatusWBRace)
553 intr_status |= ioread8(ioaddr + IntrStatus2) << 16;
554 return intr_status;
555}
556
557static void rhine_ack_events(struct rhine_private *rp, u32 mask)
558{
559 void __iomem *ioaddr = rp->base;
560
561 if (rp->quirks & rqStatusWBRace)
562 iowrite8(mask >> 16, ioaddr + IntrStatus2);
563 iowrite16(mask, ioaddr + IntrStatus);
564 mmiowb();
565}
566
567
568
569
570
571static void rhine_power_init(struct net_device *dev)
572{
573 struct rhine_private *rp = netdev_priv(dev);
574 void __iomem *ioaddr = rp->base;
575 u16 wolstat;
576
577 if (rp->quirks & rqWOL) {
578
579 iowrite8(ioread8(ioaddr + StickyHW) & 0xFC, ioaddr + StickyHW);
580
581
582 iowrite8(0x80, ioaddr + WOLcgClr);
583
584
585 iowrite8(0xFF, ioaddr + WOLcrClr);
586
587 if (rp->quirks & rq6patterns)
588 iowrite8(0x03, ioaddr + WOLcrClr1);
589
590
591 wolstat = ioread8(ioaddr + PwrcsrSet);
592 if (rp->quirks & rq6patterns)
593 wolstat |= (ioread8(ioaddr + PwrcsrSet1) & 0x03) << 8;
594
595
596 iowrite8(0xFF, ioaddr + PwrcsrClr);
597 if (rp->quirks & rq6patterns)
598 iowrite8(0x03, ioaddr + PwrcsrClr1);
599
600 if (wolstat) {
601 char *reason;
602 switch (wolstat) {
603 case WOLmagic:
604 reason = "Magic packet";
605 break;
606 case WOLlnkon:
607 reason = "Link went up";
608 break;
609 case WOLlnkoff:
610 reason = "Link went down";
611 break;
612 case WOLucast:
613 reason = "Unicast packet";
614 break;
615 case WOLbmcast:
616 reason = "Multicast/broadcast packet";
617 break;
618 default:
619 reason = "Unknown";
620 }
621 netdev_info(dev, "Woke system up. Reason: %s\n",
622 reason);
623 }
624 }
625}
626
627static void rhine_chip_reset(struct net_device *dev)
628{
629 struct rhine_private *rp = netdev_priv(dev);
630 void __iomem *ioaddr = rp->base;
631 u8 cmd1;
632
633 iowrite8(Cmd1Reset, ioaddr + ChipCmd1);
634 IOSYNC;
635
636 if (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) {
637 netdev_info(dev, "Reset not complete yet. Trying harder.\n");
638
639
640 if (rp->quirks & rqForceReset)
641 iowrite8(0x40, ioaddr + MiscCmd);
642
643
644 rhine_wait_bit_low(rp, ChipCmd1, Cmd1Reset);
645 }
646
647 cmd1 = ioread8(ioaddr + ChipCmd1);
648 netif_info(rp, hw, dev, "Reset %s\n", (cmd1 & Cmd1Reset) ?
649 "failed" : "succeeded");
650}
651
652#ifdef USE_MMIO
653static void enable_mmio(long pioaddr, u32 quirks)
654{
655 int n;
656 if (quirks & rqRhineI) {
657
658 n = inb(pioaddr + ConfigA) | 0x20;
659 outb(n, pioaddr + ConfigA);
660 } else {
661 n = inb(pioaddr + ConfigD) | 0x80;
662 outb(n, pioaddr + ConfigD);
663 }
664}
665#endif
666
667
668
669
670
671static void rhine_reload_eeprom(long pioaddr, struct net_device *dev)
672{
673 struct rhine_private *rp = netdev_priv(dev);
674 void __iomem *ioaddr = rp->base;
675 int i;
676
677 outb(0x20, pioaddr + MACRegEEcsr);
678 for (i = 0; i < 1024; i++) {
679 if (!(inb(pioaddr + MACRegEEcsr) & 0x20))
680 break;
681 }
682 if (i > 512)
683 pr_info("%4d cycles used @ %s:%d\n", i, __func__, __LINE__);
684
685#ifdef USE_MMIO
686
687
688
689
690
691 enable_mmio(pioaddr, rp->quirks);
692#endif
693
694
695 if (rp->quirks & rqWOL)
696 iowrite8(ioread8(ioaddr + ConfigA) & 0xFC, ioaddr + ConfigA);
697
698}
699
700#ifdef CONFIG_NET_POLL_CONTROLLER
701static void rhine_poll(struct net_device *dev)
702{
703 struct rhine_private *rp = netdev_priv(dev);
704 const int irq = rp->pdev->irq;
705
706 disable_irq(irq);
707 rhine_interrupt(irq, dev);
708 enable_irq(irq);
709}
710#endif
711
712static void rhine_kick_tx_threshold(struct rhine_private *rp)
713{
714 if (rp->tx_thresh < 0xe0) {
715 void __iomem *ioaddr = rp->base;
716
717 rp->tx_thresh += 0x20;
718 BYTE_REG_BITS_SET(rp->tx_thresh, 0x80, ioaddr + TxConfig);
719 }
720}
721
722static void rhine_tx_err(struct rhine_private *rp, u32 status)
723{
724 struct net_device *dev = rp->dev;
725
726 if (status & IntrTxAborted) {
727 netif_info(rp, tx_err, dev,
728 "Abort %08x, frame dropped\n", status);
729 }
730
731 if (status & IntrTxUnderrun) {
732 rhine_kick_tx_threshold(rp);
733 netif_info(rp, tx_err ,dev, "Transmitter underrun, "
734 "Tx threshold now %02x\n", rp->tx_thresh);
735 }
736
737 if (status & IntrTxDescRace)
738 netif_info(rp, tx_err, dev, "Tx descriptor write-back race\n");
739
740 if ((status & IntrTxError) &&
741 (status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace)) == 0) {
742 rhine_kick_tx_threshold(rp);
743 netif_info(rp, tx_err, dev, "Unspecified error. "
744 "Tx threshold now %02x\n", rp->tx_thresh);
745 }
746
747 rhine_restart_tx(dev);
748}
749
750static void rhine_update_rx_crc_and_missed_errord(struct rhine_private *rp)
751{
752 void __iomem *ioaddr = rp->base;
753 struct net_device_stats *stats = &rp->dev->stats;
754
755 stats->rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
756 stats->rx_missed_errors += ioread16(ioaddr + RxMissed);
757
758
759
760
761
762
763
764 iowrite32(0, ioaddr + RxMissed);
765 ioread16(ioaddr + RxCRCErrs);
766 ioread16(ioaddr + RxMissed);
767}
768
769#define RHINE_EVENT_NAPI_RX (IntrRxDone | \
770 IntrRxErr | \
771 IntrRxEmpty | \
772 IntrRxOverflow | \
773 IntrRxDropped | \
774 IntrRxNoBuf | \
775 IntrRxWakeUp)
776
777#define RHINE_EVENT_NAPI_TX_ERR (IntrTxError | \
778 IntrTxAborted | \
779 IntrTxUnderrun | \
780 IntrTxDescRace)
781#define RHINE_EVENT_NAPI_TX (IntrTxDone | RHINE_EVENT_NAPI_TX_ERR)
782
783#define RHINE_EVENT_NAPI (RHINE_EVENT_NAPI_RX | \
784 RHINE_EVENT_NAPI_TX | \
785 IntrStatsMax)
786#define RHINE_EVENT_SLOW (IntrPCIErr | IntrLinkChange)
787#define RHINE_EVENT (RHINE_EVENT_NAPI | RHINE_EVENT_SLOW)
788
789static int rhine_napipoll(struct napi_struct *napi, int budget)
790{
791 struct rhine_private *rp = container_of(napi, struct rhine_private, napi);
792 struct net_device *dev = rp->dev;
793 void __iomem *ioaddr = rp->base;
794 u16 enable_mask = RHINE_EVENT & 0xffff;
795 int work_done = 0;
796 u32 status;
797
798 status = rhine_get_events(rp);
799 rhine_ack_events(rp, status & ~RHINE_EVENT_SLOW);
800
801 if (status & RHINE_EVENT_NAPI_RX)
802 work_done += rhine_rx(dev, budget);
803
804 if (status & RHINE_EVENT_NAPI_TX) {
805 if (status & RHINE_EVENT_NAPI_TX_ERR) {
806
807 rhine_wait_bit_low(rp, ChipCmd, CmdTxOn);
808 if (ioread8(ioaddr + ChipCmd) & CmdTxOn)
809 netif_warn(rp, tx_err, dev, "Tx still on\n");
810 }
811
812 rhine_tx(dev);
813
814 if (status & RHINE_EVENT_NAPI_TX_ERR)
815 rhine_tx_err(rp, status);
816 }
817
818 if (status & IntrStatsMax) {
819 spin_lock(&rp->lock);
820 rhine_update_rx_crc_and_missed_errord(rp);
821 spin_unlock(&rp->lock);
822 }
823
824 if (status & RHINE_EVENT_SLOW) {
825 enable_mask &= ~RHINE_EVENT_SLOW;
826 schedule_work(&rp->slow_event_task);
827 }
828
829 if (work_done < budget) {
830 napi_complete(napi);
831 iowrite16(enable_mask, ioaddr + IntrEnable);
832 mmiowb();
833 }
834 return work_done;
835}
836
837static void rhine_hw_init(struct net_device *dev, long pioaddr)
838{
839 struct rhine_private *rp = netdev_priv(dev);
840
841
842 rhine_chip_reset(dev);
843
844
845 if (rp->quirks & rqRhineI)
846 msleep(5);
847
848
849 rhine_reload_eeprom(pioaddr, dev);
850}
851
852static const struct net_device_ops rhine_netdev_ops = {
853 .ndo_open = rhine_open,
854 .ndo_stop = rhine_close,
855 .ndo_start_xmit = rhine_start_tx,
856 .ndo_get_stats64 = rhine_get_stats64,
857 .ndo_set_rx_mode = rhine_set_rx_mode,
858 .ndo_change_mtu = eth_change_mtu,
859 .ndo_validate_addr = eth_validate_addr,
860 .ndo_set_mac_address = eth_mac_addr,
861 .ndo_do_ioctl = netdev_ioctl,
862 .ndo_tx_timeout = rhine_tx_timeout,
863 .ndo_vlan_rx_add_vid = rhine_vlan_rx_add_vid,
864 .ndo_vlan_rx_kill_vid = rhine_vlan_rx_kill_vid,
865#ifdef CONFIG_NET_POLL_CONTROLLER
866 .ndo_poll_controller = rhine_poll,
867#endif
868};
869
870static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
871{
872 struct net_device *dev;
873 struct rhine_private *rp;
874 int i, rc;
875 u32 quirks;
876 long pioaddr;
877 long memaddr;
878 void __iomem *ioaddr;
879 int io_size, phy_id;
880 const char *name;
881#ifdef USE_MMIO
882 int bar = 1;
883#else
884 int bar = 0;
885#endif
886
887
888#ifndef MODULE
889 pr_info_once("%s\n", version);
890#endif
891
892 io_size = 256;
893 phy_id = 0;
894 quirks = 0;
895 name = "Rhine";
896 if (pdev->revision < VTunknown0) {
897 quirks = rqRhineI;
898 io_size = 128;
899 }
900 else if (pdev->revision >= VT6102) {
901 quirks = rqWOL | rqForceReset;
902 if (pdev->revision < VT6105) {
903 name = "Rhine II";
904 quirks |= rqStatusWBRace;
905 }
906 else {
907 phy_id = 1;
908 if (pdev->revision >= VT6105_B0)
909 quirks |= rq6patterns;
910 if (pdev->revision < VT6105M)
911 name = "Rhine III";
912 else
913 name = "Rhine III (Management Adapter)";
914 }
915 }
916
917 rc = pci_enable_device(pdev);
918 if (rc)
919 goto err_out;
920
921
922 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
923 if (rc) {
924 dev_err(&pdev->dev,
925 "32-bit PCI DMA addresses not supported by the card!?\n");
926 goto err_out;
927 }
928
929
930 if ((pci_resource_len(pdev, 0) < io_size) ||
931 (pci_resource_len(pdev, 1) < io_size)) {
932 rc = -EIO;
933 dev_err(&pdev->dev, "Insufficient PCI resources, aborting\n");
934 goto err_out;
935 }
936
937 pioaddr = pci_resource_start(pdev, 0);
938 memaddr = pci_resource_start(pdev, 1);
939
940 pci_set_master(pdev);
941
942 dev = alloc_etherdev(sizeof(struct rhine_private));
943 if (!dev) {
944 rc = -ENOMEM;
945 goto err_out;
946 }
947 SET_NETDEV_DEV(dev, &pdev->dev);
948
949 rp = netdev_priv(dev);
950 rp->dev = dev;
951 rp->quirks = quirks;
952 rp->pioaddr = pioaddr;
953 rp->pdev = pdev;
954 rp->msg_enable = netif_msg_init(debug, RHINE_MSG_DEFAULT);
955
956 rc = pci_request_regions(pdev, DRV_NAME);
957 if (rc)
958 goto err_out_free_netdev;
959
960 ioaddr = pci_iomap(pdev, bar, io_size);
961 if (!ioaddr) {
962 rc = -EIO;
963 dev_err(&pdev->dev,
964 "ioremap failed for device %s, region 0x%X @ 0x%lX\n",
965 pci_name(pdev), io_size, memaddr);
966 goto err_out_free_res;
967 }
968
969#ifdef USE_MMIO
970 enable_mmio(pioaddr, quirks);
971
972
973 i = 0;
974 while (mmio_verify_registers[i]) {
975 int reg = mmio_verify_registers[i++];
976 unsigned char a = inb(pioaddr+reg);
977 unsigned char b = readb(ioaddr+reg);
978 if (a != b) {
979 rc = -EIO;
980 dev_err(&pdev->dev,
981 "MMIO do not match PIO [%02x] (%02x != %02x)\n",
982 reg, a, b);
983 goto err_out_unmap;
984 }
985 }
986#endif
987
988 rp->base = ioaddr;
989
990
991 rhine_power_init(dev);
992 rhine_hw_init(dev, pioaddr);
993
994 for (i = 0; i < 6; i++)
995 dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i);
996
997 if (!is_valid_ether_addr(dev->dev_addr)) {
998
999 netdev_err(dev, "Invalid MAC address: %pM\n", dev->dev_addr);
1000 eth_hw_addr_random(dev);
1001 netdev_info(dev, "Using random MAC address: %pM\n",
1002 dev->dev_addr);
1003 }
1004
1005
1006 if (!phy_id)
1007 phy_id = ioread8(ioaddr + 0x6C);
1008
1009 spin_lock_init(&rp->lock);
1010 mutex_init(&rp->task_lock);
1011 INIT_WORK(&rp->reset_task, rhine_reset_task);
1012 INIT_WORK(&rp->slow_event_task, rhine_slow_event_task);
1013
1014 rp->mii_if.dev = dev;
1015 rp->mii_if.mdio_read = mdio_read;
1016 rp->mii_if.mdio_write = mdio_write;
1017 rp->mii_if.phy_id_mask = 0x1f;
1018 rp->mii_if.reg_num_mask = 0x1f;
1019
1020
1021 dev->netdev_ops = &rhine_netdev_ops;
1022 dev->ethtool_ops = &netdev_ethtool_ops,
1023 dev->watchdog_timeo = TX_TIMEOUT;
1024
1025 netif_napi_add(dev, &rp->napi, rhine_napipoll, 64);
1026
1027 if (rp->quirks & rqRhineI)
1028 dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
1029
1030 if (pdev->revision >= VT6105M)
1031 dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
1032 NETIF_F_HW_VLAN_CTAG_RX |
1033 NETIF_F_HW_VLAN_CTAG_FILTER;
1034
1035
1036 rc = register_netdev(dev);
1037 if (rc)
1038 goto err_out_unmap;
1039
1040 netdev_info(dev, "VIA %s at 0x%lx, %pM, IRQ %d\n",
1041 name,
1042#ifdef USE_MMIO
1043 memaddr,
1044#else
1045 (long)ioaddr,
1046#endif
1047 dev->dev_addr, pdev->irq);
1048
1049 pci_set_drvdata(pdev, dev);
1050
1051 {
1052 u16 mii_cmd;
1053 int mii_status = mdio_read(dev, phy_id, 1);
1054 mii_cmd = mdio_read(dev, phy_id, MII_BMCR) & ~BMCR_ISOLATE;
1055 mdio_write(dev, phy_id, MII_BMCR, mii_cmd);
1056 if (mii_status != 0xffff && mii_status != 0x0000) {
1057 rp->mii_if.advertising = mdio_read(dev, phy_id, 4);
1058 netdev_info(dev,
1059 "MII PHY found at address %d, status 0x%04x advertising %04x Link %04x\n",
1060 phy_id,
1061 mii_status, rp->mii_if.advertising,
1062 mdio_read(dev, phy_id, 5));
1063
1064
1065 if (mii_status & BMSR_LSTATUS)
1066 netif_carrier_on(dev);
1067 else
1068 netif_carrier_off(dev);
1069
1070 }
1071 }
1072 rp->mii_if.phy_id = phy_id;
1073 if (avoid_D3)
1074 netif_info(rp, probe, dev, "No D3 power state at shutdown\n");
1075
1076 return 0;
1077
1078err_out_unmap:
1079 pci_iounmap(pdev, ioaddr);
1080err_out_free_res:
1081 pci_release_regions(pdev);
1082err_out_free_netdev:
1083 free_netdev(dev);
1084err_out:
1085 return rc;
1086}
1087
1088static int alloc_ring(struct net_device* dev)
1089{
1090 struct rhine_private *rp = netdev_priv(dev);
1091 void *ring;
1092 dma_addr_t ring_dma;
1093
1094 ring = pci_alloc_consistent(rp->pdev,
1095 RX_RING_SIZE * sizeof(struct rx_desc) +
1096 TX_RING_SIZE * sizeof(struct tx_desc),
1097 &ring_dma);
1098 if (!ring) {
1099 netdev_err(dev, "Could not allocate DMA memory\n");
1100 return -ENOMEM;
1101 }
1102 if (rp->quirks & rqRhineI) {
1103 rp->tx_bufs = pci_alloc_consistent(rp->pdev,
1104 PKT_BUF_SZ * TX_RING_SIZE,
1105 &rp->tx_bufs_dma);
1106 if (rp->tx_bufs == NULL) {
1107 pci_free_consistent(rp->pdev,
1108 RX_RING_SIZE * sizeof(struct rx_desc) +
1109 TX_RING_SIZE * sizeof(struct tx_desc),
1110 ring, ring_dma);
1111 return -ENOMEM;
1112 }
1113 }
1114
1115 rp->rx_ring = ring;
1116 rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc);
1117 rp->rx_ring_dma = ring_dma;
1118 rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc);
1119
1120 return 0;
1121}
1122
1123static void free_ring(struct net_device* dev)
1124{
1125 struct rhine_private *rp = netdev_priv(dev);
1126
1127 pci_free_consistent(rp->pdev,
1128 RX_RING_SIZE * sizeof(struct rx_desc) +
1129 TX_RING_SIZE * sizeof(struct tx_desc),
1130 rp->rx_ring, rp->rx_ring_dma);
1131 rp->tx_ring = NULL;
1132
1133 if (rp->tx_bufs)
1134 pci_free_consistent(rp->pdev, PKT_BUF_SZ * TX_RING_SIZE,
1135 rp->tx_bufs, rp->tx_bufs_dma);
1136
1137 rp->tx_bufs = NULL;
1138
1139}
1140
1141static void alloc_rbufs(struct net_device *dev)
1142{
1143 struct rhine_private *rp = netdev_priv(dev);
1144 dma_addr_t next;
1145 int i;
1146
1147 rp->dirty_rx = rp->cur_rx = 0;
1148
1149 rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1150 rp->rx_head_desc = &rp->rx_ring[0];
1151 next = rp->rx_ring_dma;
1152
1153
1154 for (i = 0; i < RX_RING_SIZE; i++) {
1155 rp->rx_ring[i].rx_status = 0;
1156 rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz);
1157 next += sizeof(struct rx_desc);
1158 rp->rx_ring[i].next_desc = cpu_to_le32(next);
1159 rp->rx_skbuff[i] = NULL;
1160 }
1161
1162 rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma);
1163
1164
1165 for (i = 0; i < RX_RING_SIZE; i++) {
1166 struct sk_buff *skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
1167 rp->rx_skbuff[i] = skb;
1168 if (skb == NULL)
1169 break;
1170
1171 rp->rx_skbuff_dma[i] =
1172 pci_map_single(rp->pdev, skb->data, rp->rx_buf_sz,
1173 PCI_DMA_FROMDEVICE);
1174
1175 rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]);
1176 rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
1177 }
1178 rp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1179}
1180
1181static void free_rbufs(struct net_device* dev)
1182{
1183 struct rhine_private *rp = netdev_priv(dev);
1184 int i;
1185
1186
1187 for (i = 0; i < RX_RING_SIZE; i++) {
1188 rp->rx_ring[i].rx_status = 0;
1189 rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0);
1190 if (rp->rx_skbuff[i]) {
1191 pci_unmap_single(rp->pdev,
1192 rp->rx_skbuff_dma[i],
1193 rp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1194 dev_kfree_skb(rp->rx_skbuff[i]);
1195 }
1196 rp->rx_skbuff[i] = NULL;
1197 }
1198}
1199
1200static void alloc_tbufs(struct net_device* dev)
1201{
1202 struct rhine_private *rp = netdev_priv(dev);
1203 dma_addr_t next;
1204 int i;
1205
1206 rp->dirty_tx = rp->cur_tx = 0;
1207 next = rp->tx_ring_dma;
1208 for (i = 0; i < TX_RING_SIZE; i++) {
1209 rp->tx_skbuff[i] = NULL;
1210 rp->tx_ring[i].tx_status = 0;
1211 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1212 next += sizeof(struct tx_desc);
1213 rp->tx_ring[i].next_desc = cpu_to_le32(next);
1214 if (rp->quirks & rqRhineI)
1215 rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ];
1216 }
1217 rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma);
1218
1219}
1220
1221static void free_tbufs(struct net_device* dev)
1222{
1223 struct rhine_private *rp = netdev_priv(dev);
1224 int i;
1225
1226 for (i = 0; i < TX_RING_SIZE; i++) {
1227 rp->tx_ring[i].tx_status = 0;
1228 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1229 rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0);
1230 if (rp->tx_skbuff[i]) {
1231 if (rp->tx_skbuff_dma[i]) {
1232 pci_unmap_single(rp->pdev,
1233 rp->tx_skbuff_dma[i],
1234 rp->tx_skbuff[i]->len,
1235 PCI_DMA_TODEVICE);
1236 }
1237 dev_kfree_skb(rp->tx_skbuff[i]);
1238 }
1239 rp->tx_skbuff[i] = NULL;
1240 rp->tx_buf[i] = NULL;
1241 }
1242}
1243
1244static void rhine_check_media(struct net_device *dev, unsigned int init_media)
1245{
1246 struct rhine_private *rp = netdev_priv(dev);
1247 void __iomem *ioaddr = rp->base;
1248
1249 mii_check_media(&rp->mii_if, netif_msg_link(rp), init_media);
1250
1251 if (rp->mii_if.full_duplex)
1252 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex,
1253 ioaddr + ChipCmd1);
1254 else
1255 iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex,
1256 ioaddr + ChipCmd1);
1257
1258 netif_info(rp, link, dev, "force_media %d, carrier %d\n",
1259 rp->mii_if.force_media, netif_carrier_ok(dev));
1260}
1261
1262
1263static void rhine_set_carrier(struct mii_if_info *mii)
1264{
1265 struct net_device *dev = mii->dev;
1266 struct rhine_private *rp = netdev_priv(dev);
1267
1268 if (mii->force_media) {
1269
1270 if (!netif_carrier_ok(dev))
1271 netif_carrier_on(dev);
1272 } else
1273 rhine_check_media(dev, 0);
1274
1275 netif_info(rp, link, dev, "force_media %d, carrier %d\n",
1276 mii->force_media, netif_carrier_ok(dev));
1277}
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr)
1288{
1289 int i;
1290
1291 iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1292 wmb();
1293
1294
1295 idx &= (MCAM_SIZE - 1);
1296
1297 iowrite8((u8) idx, ioaddr + CamAddr);
1298
1299 for (i = 0; i < 6; i++, addr++)
1300 iowrite8(*addr, ioaddr + MulticastFilter0 + i);
1301 udelay(10);
1302 wmb();
1303
1304 iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1305 udelay(10);
1306
1307 iowrite8(0, ioaddr + CamCon);
1308}
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr)
1319{
1320 iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1321 wmb();
1322
1323
1324 idx &= (VCAM_SIZE - 1);
1325
1326 iowrite8((u8) idx, ioaddr + CamAddr);
1327
1328 iowrite16(*((u16 *) addr), ioaddr + MulticastFilter0 + 6);
1329 udelay(10);
1330 wmb();
1331
1332 iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1333 udelay(10);
1334
1335 iowrite8(0, ioaddr + CamCon);
1336}
1337
1338
1339
1340
1341
1342
1343
1344
1345static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask)
1346{
1347 iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1348 wmb();
1349
1350
1351 iowrite32(mask, ioaddr + CamMask);
1352
1353
1354 iowrite8(0, ioaddr + CamCon);
1355}
1356
1357
1358
1359
1360
1361
1362
1363
1364static void rhine_set_vlan_cam_mask(void __iomem *ioaddr, u32 mask)
1365{
1366 iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1367 wmb();
1368
1369
1370 iowrite32(mask, ioaddr + CamMask);
1371
1372
1373 iowrite8(0, ioaddr + CamCon);
1374}
1375
1376
1377
1378
1379
1380
1381
1382
1383static void rhine_init_cam_filter(struct net_device *dev)
1384{
1385 struct rhine_private *rp = netdev_priv(dev);
1386 void __iomem *ioaddr = rp->base;
1387
1388
1389 rhine_set_vlan_cam_mask(ioaddr, 0);
1390 rhine_set_cam_mask(ioaddr, 0);
1391
1392
1393 BYTE_REG_BITS_ON(TCR_PQEN, ioaddr + TxConfig);
1394 BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
1395}
1396
1397
1398
1399
1400
1401
1402
1403static void rhine_update_vcam(struct net_device *dev)
1404{
1405 struct rhine_private *rp = netdev_priv(dev);
1406 void __iomem *ioaddr = rp->base;
1407 u16 vid;
1408 u32 vCAMmask = 0;
1409 unsigned int i = 0;
1410
1411 for_each_set_bit(vid, rp->active_vlans, VLAN_N_VID) {
1412 rhine_set_vlan_cam(ioaddr, i, (u8 *)&vid);
1413 vCAMmask |= 1 << i;
1414 if (++i >= VCAM_SIZE)
1415 break;
1416 }
1417 rhine_set_vlan_cam_mask(ioaddr, vCAMmask);
1418}
1419
1420static int rhine_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
1421{
1422 struct rhine_private *rp = netdev_priv(dev);
1423
1424 spin_lock_bh(&rp->lock);
1425 set_bit(vid, rp->active_vlans);
1426 rhine_update_vcam(dev);
1427 spin_unlock_bh(&rp->lock);
1428 return 0;
1429}
1430
1431static int rhine_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
1432{
1433 struct rhine_private *rp = netdev_priv(dev);
1434
1435 spin_lock_bh(&rp->lock);
1436 clear_bit(vid, rp->active_vlans);
1437 rhine_update_vcam(dev);
1438 spin_unlock_bh(&rp->lock);
1439 return 0;
1440}
1441
1442static void init_registers(struct net_device *dev)
1443{
1444 struct rhine_private *rp = netdev_priv(dev);
1445 void __iomem *ioaddr = rp->base;
1446 int i;
1447
1448 for (i = 0; i < 6; i++)
1449 iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
1450
1451
1452 iowrite16(0x0006, ioaddr + PCIBusConfig);
1453
1454 iowrite8(0x20, ioaddr + TxConfig);
1455 rp->tx_thresh = 0x20;
1456 rp->rx_thresh = 0x60;
1457
1458 iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr);
1459 iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr);
1460
1461 rhine_set_rx_mode(dev);
1462
1463 if (rp->pdev->revision >= VT6105M)
1464 rhine_init_cam_filter(dev);
1465
1466 napi_enable(&rp->napi);
1467
1468 iowrite16(RHINE_EVENT & 0xffff, ioaddr + IntrEnable);
1469
1470 iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8),
1471 ioaddr + ChipCmd);
1472 rhine_check_media(dev, 1);
1473}
1474
1475
1476static void rhine_enable_linkmon(struct rhine_private *rp)
1477{
1478 void __iomem *ioaddr = rp->base;
1479
1480 iowrite8(0, ioaddr + MIICmd);
1481 iowrite8(MII_BMSR, ioaddr + MIIRegAddr);
1482 iowrite8(0x80, ioaddr + MIICmd);
1483
1484 rhine_wait_bit_high(rp, MIIRegAddr, 0x20);
1485
1486 iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr);
1487}
1488
1489
1490static void rhine_disable_linkmon(struct rhine_private *rp)
1491{
1492 void __iomem *ioaddr = rp->base;
1493
1494 iowrite8(0, ioaddr + MIICmd);
1495
1496 if (rp->quirks & rqRhineI) {
1497 iowrite8(0x01, ioaddr + MIIRegAddr);
1498
1499
1500 mdelay(1);
1501
1502
1503 iowrite8(0x80, ioaddr + MIICmd);
1504
1505 rhine_wait_bit_high(rp, MIIRegAddr, 0x20);
1506
1507
1508 iowrite8(0, ioaddr + MIICmd);
1509 }
1510 else
1511 rhine_wait_bit_high(rp, MIIRegAddr, 0x80);
1512}
1513
1514
1515
1516static int mdio_read(struct net_device *dev, int phy_id, int regnum)
1517{
1518 struct rhine_private *rp = netdev_priv(dev);
1519 void __iomem *ioaddr = rp->base;
1520 int result;
1521
1522 rhine_disable_linkmon(rp);
1523
1524
1525 iowrite8(phy_id, ioaddr + MIIPhyAddr);
1526 iowrite8(regnum, ioaddr + MIIRegAddr);
1527 iowrite8(0x40, ioaddr + MIICmd);
1528 rhine_wait_bit_low(rp, MIICmd, 0x40);
1529 result = ioread16(ioaddr + MIIData);
1530
1531 rhine_enable_linkmon(rp);
1532 return result;
1533}
1534
1535static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
1536{
1537 struct rhine_private *rp = netdev_priv(dev);
1538 void __iomem *ioaddr = rp->base;
1539
1540 rhine_disable_linkmon(rp);
1541
1542
1543 iowrite8(phy_id, ioaddr + MIIPhyAddr);
1544 iowrite8(regnum, ioaddr + MIIRegAddr);
1545 iowrite16(value, ioaddr + MIIData);
1546 iowrite8(0x20, ioaddr + MIICmd);
1547 rhine_wait_bit_low(rp, MIICmd, 0x20);
1548
1549 rhine_enable_linkmon(rp);
1550}
1551
1552static void rhine_task_disable(struct rhine_private *rp)
1553{
1554 mutex_lock(&rp->task_lock);
1555 rp->task_enable = false;
1556 mutex_unlock(&rp->task_lock);
1557
1558 cancel_work_sync(&rp->slow_event_task);
1559 cancel_work_sync(&rp->reset_task);
1560}
1561
1562static void rhine_task_enable(struct rhine_private *rp)
1563{
1564 mutex_lock(&rp->task_lock);
1565 rp->task_enable = true;
1566 mutex_unlock(&rp->task_lock);
1567}
1568
1569static int rhine_open(struct net_device *dev)
1570{
1571 struct rhine_private *rp = netdev_priv(dev);
1572 void __iomem *ioaddr = rp->base;
1573 int rc;
1574
1575 rc = request_irq(rp->pdev->irq, rhine_interrupt, IRQF_SHARED, dev->name,
1576 dev);
1577 if (rc)
1578 return rc;
1579
1580 netif_dbg(rp, ifup, dev, "%s() irq %d\n", __func__, rp->pdev->irq);
1581
1582 rc = alloc_ring(dev);
1583 if (rc) {
1584 free_irq(rp->pdev->irq, dev);
1585 return rc;
1586 }
1587 alloc_rbufs(dev);
1588 alloc_tbufs(dev);
1589 rhine_chip_reset(dev);
1590 rhine_task_enable(rp);
1591 init_registers(dev);
1592
1593 netif_dbg(rp, ifup, dev, "%s() Done - status %04x MII status: %04x\n",
1594 __func__, ioread16(ioaddr + ChipCmd),
1595 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1596
1597 netif_start_queue(dev);
1598
1599 return 0;
1600}
1601
1602static void rhine_reset_task(struct work_struct *work)
1603{
1604 struct rhine_private *rp = container_of(work, struct rhine_private,
1605 reset_task);
1606 struct net_device *dev = rp->dev;
1607
1608 mutex_lock(&rp->task_lock);
1609
1610 if (!rp->task_enable)
1611 goto out_unlock;
1612
1613 napi_disable(&rp->napi);
1614 spin_lock_bh(&rp->lock);
1615
1616
1617 free_tbufs(dev);
1618 free_rbufs(dev);
1619 alloc_tbufs(dev);
1620 alloc_rbufs(dev);
1621
1622
1623 rhine_chip_reset(dev);
1624 init_registers(dev);
1625
1626 spin_unlock_bh(&rp->lock);
1627
1628 netif_trans_update(dev);
1629 dev->stats.tx_errors++;
1630 netif_wake_queue(dev);
1631
1632out_unlock:
1633 mutex_unlock(&rp->task_lock);
1634}
1635
1636static void rhine_tx_timeout(struct net_device *dev)
1637{
1638 struct rhine_private *rp = netdev_priv(dev);
1639 void __iomem *ioaddr = rp->base;
1640
1641 netdev_warn(dev, "Transmit timed out, status %04x, PHY status %04x, resetting...\n",
1642 ioread16(ioaddr + IntrStatus),
1643 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1644
1645 schedule_work(&rp->reset_task);
1646}
1647
1648static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
1649 struct net_device *dev)
1650{
1651 struct rhine_private *rp = netdev_priv(dev);
1652 void __iomem *ioaddr = rp->base;
1653 unsigned entry;
1654
1655
1656
1657
1658
1659 entry = rp->cur_tx % TX_RING_SIZE;
1660
1661 if (skb_padto(skb, ETH_ZLEN))
1662 return NETDEV_TX_OK;
1663
1664 rp->tx_skbuff[entry] = skb;
1665
1666 if ((rp->quirks & rqRhineI) &&
1667 (((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_PARTIAL)) {
1668
1669 if (skb->len > PKT_BUF_SZ) {
1670
1671 dev_kfree_skb(skb);
1672 rp->tx_skbuff[entry] = NULL;
1673 dev->stats.tx_dropped++;
1674 return NETDEV_TX_OK;
1675 }
1676
1677
1678 skb_copy_and_csum_dev(skb, rp->tx_buf[entry]);
1679 if (skb->len < ETH_ZLEN)
1680 memset(rp->tx_buf[entry] + skb->len, 0,
1681 ETH_ZLEN - skb->len);
1682 rp->tx_skbuff_dma[entry] = 0;
1683 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma +
1684 (rp->tx_buf[entry] -
1685 rp->tx_bufs));
1686 } else {
1687 rp->tx_skbuff_dma[entry] =
1688 pci_map_single(rp->pdev, skb->data, skb->len,
1689 PCI_DMA_TODEVICE);
1690 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]);
1691 }
1692
1693 rp->tx_ring[entry].desc_length =
1694 cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1695
1696 if (unlikely(skb_vlan_tag_present(skb))) {
1697 u16 vid_pcp = skb_vlan_tag_get(skb);
1698
1699
1700 vid_pcp = (vid_pcp & VLAN_VID_MASK) |
1701 ((vid_pcp & VLAN_PRIO_MASK) >> 1);
1702 rp->tx_ring[entry].tx_status = cpu_to_le32((vid_pcp) << 16);
1703
1704 rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000);
1705 }
1706 else
1707 rp->tx_ring[entry].tx_status = 0;
1708
1709
1710 wmb();
1711 rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn);
1712 wmb();
1713
1714 rp->cur_tx++;
1715
1716
1717
1718 if (skb_vlan_tag_present(skb))
1719
1720 BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
1721
1722
1723 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1724 ioaddr + ChipCmd1);
1725 IOSYNC;
1726
1727 if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN)
1728 netif_stop_queue(dev);
1729
1730 netif_dbg(rp, tx_queued, dev, "Transmit frame #%d queued in slot %d\n",
1731 rp->cur_tx - 1, entry);
1732
1733 return NETDEV_TX_OK;
1734}
1735
1736static void rhine_irq_disable(struct rhine_private *rp)
1737{
1738 iowrite16(0x0000, rp->base + IntrEnable);
1739 mmiowb();
1740}
1741
1742
1743
1744static irqreturn_t rhine_interrupt(int irq, void *dev_instance)
1745{
1746 struct net_device *dev = dev_instance;
1747 struct rhine_private *rp = netdev_priv(dev);
1748 u32 status;
1749 int handled = 0;
1750
1751 status = rhine_get_events(rp);
1752
1753 netif_dbg(rp, intr, dev, "Interrupt, status %08x\n", status);
1754
1755 if (status & RHINE_EVENT) {
1756 handled = 1;
1757
1758 rhine_irq_disable(rp);
1759 napi_schedule(&rp->napi);
1760 }
1761
1762 if (status & ~(IntrLinkChange | IntrStatsMax | RHINE_EVENT_NAPI)) {
1763 netif_err(rp, intr, dev, "Something Wicked happened! %08x\n",
1764 status);
1765 }
1766
1767 return IRQ_RETVAL(handled);
1768}
1769
1770
1771
1772static void rhine_tx(struct net_device *dev)
1773{
1774 struct rhine_private *rp = netdev_priv(dev);
1775 int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE;
1776
1777
1778 while (rp->dirty_tx != rp->cur_tx) {
1779 txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
1780 netif_dbg(rp, tx_done, dev, "Tx scavenge %d status %08x\n",
1781 entry, txstatus);
1782 if (txstatus & DescOwn)
1783 break;
1784 if (txstatus & 0x8000) {
1785 netif_dbg(rp, tx_done, dev,
1786 "Transmit error, Tx status %08x\n", txstatus);
1787 dev->stats.tx_errors++;
1788 if (txstatus & 0x0400)
1789 dev->stats.tx_carrier_errors++;
1790 if (txstatus & 0x0200)
1791 dev->stats.tx_window_errors++;
1792 if (txstatus & 0x0100)
1793 dev->stats.tx_aborted_errors++;
1794 if (txstatus & 0x0080)
1795 dev->stats.tx_heartbeat_errors++;
1796 if (((rp->quirks & rqRhineI) && txstatus & 0x0002) ||
1797 (txstatus & 0x0800) || (txstatus & 0x1000)) {
1798 dev->stats.tx_fifo_errors++;
1799 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1800 break;
1801 }
1802
1803 } else {
1804 if (rp->quirks & rqRhineI)
1805 dev->stats.collisions += (txstatus >> 3) & 0x0F;
1806 else
1807 dev->stats.collisions += txstatus & 0x0F;
1808 netif_dbg(rp, tx_done, dev, "collisions: %1.1x:%1.1x\n",
1809 (txstatus >> 3) & 0xF, txstatus & 0xF);
1810
1811 u64_stats_update_begin(&rp->tx_stats.syncp);
1812 rp->tx_stats.bytes += rp->tx_skbuff[entry]->len;
1813 rp->tx_stats.packets++;
1814 u64_stats_update_end(&rp->tx_stats.syncp);
1815 }
1816
1817 if (rp->tx_skbuff_dma[entry]) {
1818 pci_unmap_single(rp->pdev,
1819 rp->tx_skbuff_dma[entry],
1820 rp->tx_skbuff[entry]->len,
1821 PCI_DMA_TODEVICE);
1822 }
1823 dev_kfree_skb(rp->tx_skbuff[entry]);
1824 rp->tx_skbuff[entry] = NULL;
1825 entry = (++rp->dirty_tx) % TX_RING_SIZE;
1826 }
1827 if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4)
1828 netif_wake_queue(dev);
1829}
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840static inline u16 rhine_get_vlan_tci(struct sk_buff *skb, int data_size)
1841{
1842 u8 *trailer = (u8 *)skb->data + ((data_size + 3) & ~3) + 2;
1843 return be16_to_cpup((__be16 *)trailer);
1844}
1845
1846
1847static int rhine_rx(struct net_device *dev, int limit)
1848{
1849 struct rhine_private *rp = netdev_priv(dev);
1850 int count;
1851 int entry = rp->cur_rx % RX_RING_SIZE;
1852
1853 netif_dbg(rp, rx_status, dev, "%s(), entry %d status %08x\n", __func__,
1854 entry, le32_to_cpu(rp->rx_head_desc->rx_status));
1855
1856
1857 for (count = 0; count < limit; ++count) {
1858 struct rx_desc *desc = rp->rx_head_desc;
1859 u32 desc_status = le32_to_cpu(desc->rx_status);
1860 u32 desc_length = le32_to_cpu(desc->desc_length);
1861 int data_size = desc_status >> 16;
1862
1863 if (desc_status & DescOwn)
1864 break;
1865
1866 netif_dbg(rp, rx_status, dev, "%s() status %08x\n", __func__,
1867 desc_status);
1868
1869 if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
1870 if ((desc_status & RxWholePkt) != RxWholePkt) {
1871 netdev_warn(dev,
1872 "Oversized Ethernet frame spanned multiple buffers, "
1873 "entry %#x length %d status %08x!\n",
1874 entry, data_size,
1875 desc_status);
1876 netdev_warn(dev,
1877 "Oversized Ethernet frame %p vs %p\n",
1878 rp->rx_head_desc,
1879 &rp->rx_ring[entry]);
1880 dev->stats.rx_length_errors++;
1881 } else if (desc_status & RxErr) {
1882
1883 netif_dbg(rp, rx_err, dev,
1884 "%s() Rx error %08x\n", __func__,
1885 desc_status);
1886 dev->stats.rx_errors++;
1887 if (desc_status & 0x0030)
1888 dev->stats.rx_length_errors++;
1889 if (desc_status & 0x0048)
1890 dev->stats.rx_fifo_errors++;
1891 if (desc_status & 0x0004)
1892 dev->stats.rx_frame_errors++;
1893 if (desc_status & 0x0002) {
1894
1895 spin_lock(&rp->lock);
1896 dev->stats.rx_crc_errors++;
1897 spin_unlock(&rp->lock);
1898 }
1899 }
1900 } else {
1901 struct sk_buff *skb = NULL;
1902
1903 int pkt_len = data_size - 4;
1904 u16 vlan_tci = 0;
1905
1906
1907
1908 if (pkt_len < rx_copybreak)
1909 skb = netdev_alloc_skb_ip_align(dev, pkt_len);
1910 if (skb) {
1911 pci_dma_sync_single_for_cpu(rp->pdev,
1912 rp->rx_skbuff_dma[entry],
1913 rp->rx_buf_sz,
1914 PCI_DMA_FROMDEVICE);
1915
1916 skb_copy_to_linear_data(skb,
1917 rp->rx_skbuff[entry]->data,
1918 pkt_len);
1919 skb_put(skb, pkt_len);
1920 pci_dma_sync_single_for_device(rp->pdev,
1921 rp->rx_skbuff_dma[entry],
1922 rp->rx_buf_sz,
1923 PCI_DMA_FROMDEVICE);
1924 } else {
1925 skb = rp->rx_skbuff[entry];
1926 if (skb == NULL) {
1927 netdev_err(dev, "Inconsistent Rx descriptor chain\n");
1928 break;
1929 }
1930 rp->rx_skbuff[entry] = NULL;
1931 skb_put(skb, pkt_len);
1932 pci_unmap_single(rp->pdev,
1933 rp->rx_skbuff_dma[entry],
1934 rp->rx_buf_sz,
1935 PCI_DMA_FROMDEVICE);
1936 }
1937
1938 if (unlikely(desc_length & DescTag))
1939 vlan_tci = rhine_get_vlan_tci(skb, data_size);
1940
1941 skb->protocol = eth_type_trans(skb, dev);
1942
1943 if (unlikely(desc_length & DescTag))
1944 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
1945 netif_receive_skb(skb);
1946
1947 u64_stats_update_begin(&rp->rx_stats.syncp);
1948 rp->rx_stats.bytes += pkt_len;
1949 rp->rx_stats.packets++;
1950 u64_stats_update_end(&rp->rx_stats.syncp);
1951 }
1952 entry = (++rp->cur_rx) % RX_RING_SIZE;
1953 rp->rx_head_desc = &rp->rx_ring[entry];
1954 }
1955
1956
1957 for (; rp->cur_rx - rp->dirty_rx > 0; rp->dirty_rx++) {
1958 struct sk_buff *skb;
1959 entry = rp->dirty_rx % RX_RING_SIZE;
1960 if (rp->rx_skbuff[entry] == NULL) {
1961 skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
1962 rp->rx_skbuff[entry] = skb;
1963 if (skb == NULL)
1964 break;
1965 rp->rx_skbuff_dma[entry] =
1966 pci_map_single(rp->pdev, skb->data,
1967 rp->rx_buf_sz,
1968 PCI_DMA_FROMDEVICE);
1969 rp->rx_ring[entry].addr = cpu_to_le32(rp->rx_skbuff_dma[entry]);
1970 }
1971 rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn);
1972 }
1973
1974 return count;
1975}
1976
1977static void rhine_restart_tx(struct net_device *dev) {
1978 struct rhine_private *rp = netdev_priv(dev);
1979 void __iomem *ioaddr = rp->base;
1980 int entry = rp->dirty_tx % TX_RING_SIZE;
1981 u32 intr_status;
1982
1983
1984
1985
1986
1987 intr_status = rhine_get_events(rp);
1988
1989 if ((intr_status & IntrTxErrSummary) == 0) {
1990
1991
1992 iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc),
1993 ioaddr + TxRingPtr);
1994
1995 iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn,
1996 ioaddr + ChipCmd);
1997
1998 if (rp->tx_ring[entry].desc_length & cpu_to_le32(0x020000))
1999
2000 BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
2001
2002 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
2003 ioaddr + ChipCmd1);
2004 IOSYNC;
2005 }
2006 else {
2007
2008 netif_warn(rp, tx_err, dev, "another error occurred %08x\n",
2009 intr_status);
2010 }
2011
2012}
2013
2014static void rhine_slow_event_task(struct work_struct *work)
2015{
2016 struct rhine_private *rp =
2017 container_of(work, struct rhine_private, slow_event_task);
2018 struct net_device *dev = rp->dev;
2019 u32 intr_status;
2020
2021 mutex_lock(&rp->task_lock);
2022
2023 if (!rp->task_enable)
2024 goto out_unlock;
2025
2026 intr_status = rhine_get_events(rp);
2027 rhine_ack_events(rp, intr_status & RHINE_EVENT_SLOW);
2028
2029 if (intr_status & IntrLinkChange)
2030 rhine_check_media(dev, 0);
2031
2032 if (intr_status & IntrPCIErr)
2033 netif_warn(rp, hw, dev, "PCI error\n");
2034
2035 iowrite16(RHINE_EVENT & 0xffff, rp->base + IntrEnable);
2036
2037out_unlock:
2038 mutex_unlock(&rp->task_lock);
2039}
2040
2041static struct rtnl_link_stats64 *
2042rhine_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
2043{
2044 struct rhine_private *rp = netdev_priv(dev);
2045 unsigned int start;
2046
2047 spin_lock_bh(&rp->lock);
2048 rhine_update_rx_crc_and_missed_errord(rp);
2049 spin_unlock_bh(&rp->lock);
2050
2051 netdev_stats_to_stats64(stats, &dev->stats);
2052
2053 do {
2054 start = u64_stats_fetch_begin_irq(&rp->rx_stats.syncp);
2055 stats->rx_packets = rp->rx_stats.packets;
2056 stats->rx_bytes = rp->rx_stats.bytes;
2057 } while (u64_stats_fetch_retry_irq(&rp->rx_stats.syncp, start));
2058
2059 do {
2060 start = u64_stats_fetch_begin_irq(&rp->tx_stats.syncp);
2061 stats->tx_packets = rp->tx_stats.packets;
2062 stats->tx_bytes = rp->tx_stats.bytes;
2063 } while (u64_stats_fetch_retry_irq(&rp->tx_stats.syncp, start));
2064
2065 return stats;
2066}
2067
2068static void rhine_set_rx_mode(struct net_device *dev)
2069{
2070 struct rhine_private *rp = netdev_priv(dev);
2071 void __iomem *ioaddr = rp->base;
2072 u32 mc_filter[2];
2073 u8 rx_mode = 0x0C;
2074 struct netdev_hw_addr *ha;
2075
2076 if (dev->flags & IFF_PROMISC) {
2077 rx_mode = 0x1C;
2078 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
2079 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
2080 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
2081 (dev->flags & IFF_ALLMULTI)) {
2082
2083 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
2084 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
2085 } else if (rp->pdev->revision >= VT6105M) {
2086 int i = 0;
2087 u32 mCAMmask = 0;
2088 netdev_for_each_mc_addr(ha, dev) {
2089 if (i == MCAM_SIZE)
2090 break;
2091 rhine_set_cam(ioaddr, i, ha->addr);
2092 mCAMmask |= 1 << i;
2093 i++;
2094 }
2095 rhine_set_cam_mask(ioaddr, mCAMmask);
2096 } else {
2097 memset(mc_filter, 0, sizeof(mc_filter));
2098 netdev_for_each_mc_addr(ha, dev) {
2099 int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
2100
2101 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
2102 }
2103 iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
2104 iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
2105 }
2106
2107 if (rp->pdev->revision >= VT6105M) {
2108 if (dev->flags & IFF_PROMISC)
2109 BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2110 else
2111 BYTE_REG_BITS_ON(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2112 }
2113 BYTE_REG_BITS_ON(rx_mode, ioaddr + RxConfig);
2114}
2115
2116static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2117{
2118 struct rhine_private *rp = netdev_priv(dev);
2119
2120 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
2121 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
2122 strlcpy(info->bus_info, pci_name(rp->pdev), sizeof(info->bus_info));
2123}
2124
2125static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2126{
2127 struct rhine_private *rp = netdev_priv(dev);
2128 int rc;
2129
2130 mutex_lock(&rp->task_lock);
2131 rc = mii_ethtool_gset(&rp->mii_if, cmd);
2132 mutex_unlock(&rp->task_lock);
2133
2134 return rc;
2135}
2136
2137static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2138{
2139 struct rhine_private *rp = netdev_priv(dev);
2140 int rc;
2141
2142 mutex_lock(&rp->task_lock);
2143 rc = mii_ethtool_sset(&rp->mii_if, cmd);
2144 rhine_set_carrier(&rp->mii_if);
2145 mutex_unlock(&rp->task_lock);
2146
2147 return rc;
2148}
2149
2150static int netdev_nway_reset(struct net_device *dev)
2151{
2152 struct rhine_private *rp = netdev_priv(dev);
2153
2154 return mii_nway_restart(&rp->mii_if);
2155}
2156
2157static u32 netdev_get_link(struct net_device *dev)
2158{
2159 struct rhine_private *rp = netdev_priv(dev);
2160
2161 return mii_link_ok(&rp->mii_if);
2162}
2163
2164static u32 netdev_get_msglevel(struct net_device *dev)
2165{
2166 struct rhine_private *rp = netdev_priv(dev);
2167
2168 return rp->msg_enable;
2169}
2170
2171static void netdev_set_msglevel(struct net_device *dev, u32 value)
2172{
2173 struct rhine_private *rp = netdev_priv(dev);
2174
2175 rp->msg_enable = value;
2176}
2177
2178static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2179{
2180 struct rhine_private *rp = netdev_priv(dev);
2181
2182 if (!(rp->quirks & rqWOL))
2183 return;
2184
2185 spin_lock_irq(&rp->lock);
2186 wol->supported = WAKE_PHY | WAKE_MAGIC |
2187 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST;
2188 wol->wolopts = rp->wolopts;
2189 spin_unlock_irq(&rp->lock);
2190}
2191
2192static int rhine_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2193{
2194 struct rhine_private *rp = netdev_priv(dev);
2195 u32 support = WAKE_PHY | WAKE_MAGIC |
2196 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST;
2197
2198 if (!(rp->quirks & rqWOL))
2199 return -EINVAL;
2200
2201 if (wol->wolopts & ~support)
2202 return -EINVAL;
2203
2204 spin_lock_irq(&rp->lock);
2205 rp->wolopts = wol->wolopts;
2206 spin_unlock_irq(&rp->lock);
2207
2208 return 0;
2209}
2210
2211static const struct ethtool_ops netdev_ethtool_ops = {
2212 .get_drvinfo = netdev_get_drvinfo,
2213 .get_settings = netdev_get_settings,
2214 .set_settings = netdev_set_settings,
2215 .nway_reset = netdev_nway_reset,
2216 .get_link = netdev_get_link,
2217 .get_msglevel = netdev_get_msglevel,
2218 .set_msglevel = netdev_set_msglevel,
2219 .get_wol = rhine_get_wol,
2220 .set_wol = rhine_set_wol,
2221};
2222
2223static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2224{
2225 struct rhine_private *rp = netdev_priv(dev);
2226 int rc;
2227
2228 if (!netif_running(dev))
2229 return -EINVAL;
2230
2231 mutex_lock(&rp->task_lock);
2232 rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL);
2233 rhine_set_carrier(&rp->mii_if);
2234 mutex_unlock(&rp->task_lock);
2235
2236 return rc;
2237}
2238
2239static int rhine_close(struct net_device *dev)
2240{
2241 struct rhine_private *rp = netdev_priv(dev);
2242 void __iomem *ioaddr = rp->base;
2243
2244 rhine_task_disable(rp);
2245 napi_disable(&rp->napi);
2246 netif_stop_queue(dev);
2247
2248 netif_dbg(rp, ifdown, dev, "Shutting down ethercard, status was %04x\n",
2249 ioread16(ioaddr + ChipCmd));
2250
2251
2252 iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig);
2253
2254 rhine_irq_disable(rp);
2255
2256
2257 iowrite16(CmdStop, ioaddr + ChipCmd);
2258
2259 free_irq(rp->pdev->irq, dev);
2260 free_rbufs(dev);
2261 free_tbufs(dev);
2262 free_ring(dev);
2263
2264 return 0;
2265}
2266
2267
2268static void rhine_remove_one(struct pci_dev *pdev)
2269{
2270 struct net_device *dev = pci_get_drvdata(pdev);
2271 struct rhine_private *rp = netdev_priv(dev);
2272
2273 unregister_netdev(dev);
2274
2275 pci_iounmap(pdev, rp->base);
2276 pci_release_regions(pdev);
2277
2278 free_netdev(dev);
2279 pci_disable_device(pdev);
2280 pci_set_drvdata(pdev, NULL);
2281}
2282
2283static void rhine_shutdown (struct pci_dev *pdev)
2284{
2285 struct net_device *dev = pci_get_drvdata(pdev);
2286 struct rhine_private *rp = netdev_priv(dev);
2287 void __iomem *ioaddr = rp->base;
2288
2289 if (!(rp->quirks & rqWOL))
2290 return;
2291
2292 rhine_power_init(dev);
2293
2294
2295 if (rp->quirks & rq6patterns)
2296 iowrite8(0x04, ioaddr + WOLcgClr);
2297
2298 spin_lock(&rp->lock);
2299
2300 if (rp->wolopts & WAKE_MAGIC) {
2301 iowrite8(WOLmagic, ioaddr + WOLcrSet);
2302
2303
2304
2305
2306 iowrite8(ioread8(ioaddr + ConfigA) | 0x03, ioaddr + ConfigA);
2307 }
2308
2309 if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST))
2310 iowrite8(WOLbmcast, ioaddr + WOLcgSet);
2311
2312 if (rp->wolopts & WAKE_PHY)
2313 iowrite8(WOLlnkon | WOLlnkoff, ioaddr + WOLcrSet);
2314
2315 if (rp->wolopts & WAKE_UCAST)
2316 iowrite8(WOLucast, ioaddr + WOLcrSet);
2317
2318 if (rp->wolopts) {
2319
2320 iowrite8(0x01, ioaddr + PwcfgSet);
2321 iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW);
2322 }
2323
2324 spin_unlock(&rp->lock);
2325
2326 if (system_state == SYSTEM_POWER_OFF && !avoid_D3) {
2327 iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
2328
2329 pci_wake_from_d3(pdev, true);
2330 pci_set_power_state(pdev, PCI_D3hot);
2331 }
2332}
2333
2334#ifdef CONFIG_PM_SLEEP
2335static int rhine_suspend(struct device *device)
2336{
2337 struct pci_dev *pdev = to_pci_dev(device);
2338 struct net_device *dev = pci_get_drvdata(pdev);
2339 struct rhine_private *rp = netdev_priv(dev);
2340
2341 if (!netif_running(dev))
2342 return 0;
2343
2344 rhine_task_disable(rp);
2345 rhine_irq_disable(rp);
2346 napi_disable(&rp->napi);
2347
2348 netif_device_detach(dev);
2349
2350 rhine_shutdown(pdev);
2351
2352 return 0;
2353}
2354
2355static int rhine_resume(struct device *device)
2356{
2357 struct pci_dev *pdev = to_pci_dev(device);
2358 struct net_device *dev = pci_get_drvdata(pdev);
2359 struct rhine_private *rp = netdev_priv(dev);
2360
2361 if (!netif_running(dev))
2362 return 0;
2363
2364#ifdef USE_MMIO
2365 enable_mmio(rp->pioaddr, rp->quirks);
2366#endif
2367 rhine_power_init(dev);
2368 free_tbufs(dev);
2369 free_rbufs(dev);
2370 alloc_tbufs(dev);
2371 alloc_rbufs(dev);
2372 rhine_task_enable(rp);
2373 spin_lock_bh(&rp->lock);
2374 init_registers(dev);
2375 spin_unlock_bh(&rp->lock);
2376
2377 netif_device_attach(dev);
2378
2379 return 0;
2380}
2381
2382static SIMPLE_DEV_PM_OPS(rhine_pm_ops, rhine_suspend, rhine_resume);
2383#define RHINE_PM_OPS (&rhine_pm_ops)
2384
2385#else
2386
2387#define RHINE_PM_OPS NULL
2388
2389#endif
2390
2391static struct pci_driver rhine_driver = {
2392 .name = DRV_NAME,
2393 .id_table = rhine_pci_tbl,
2394 .probe = rhine_init_one,
2395 .remove = rhine_remove_one,
2396 .shutdown = rhine_shutdown,
2397 .driver.pm = RHINE_PM_OPS,
2398};
2399
2400static struct dmi_system_id __initdata rhine_dmi_table[] = {
2401 {
2402 .ident = "EPIA-M",
2403 .matches = {
2404 DMI_MATCH(DMI_BIOS_VENDOR, "Award Software International, Inc."),
2405 DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2406 },
2407 },
2408 {
2409 .ident = "KV7",
2410 .matches = {
2411 DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
2412 DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2413 },
2414 },
2415 { NULL }
2416};
2417
2418static int __init rhine_init(void)
2419{
2420
2421#ifdef MODULE
2422 pr_info("%s\n", version);
2423#endif
2424 if (dmi_check_system(rhine_dmi_table)) {
2425
2426 avoid_D3 = true;
2427 pr_warn("Broken BIOS detected, avoid_D3 enabled\n");
2428 }
2429 else if (avoid_D3)
2430 pr_info("avoid_D3 set\n");
2431
2432 return pci_register_driver(&rhine_driver);
2433}
2434
2435
2436static void __exit rhine_cleanup(void)
2437{
2438 pci_unregister_driver(&rhine_driver);
2439}
2440
2441
2442module_init(rhine_init);
2443module_exit(rhine_cleanup);
2444