1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32#define DRV_NAME "via-rhine"
33#define DRV_VERSION "1.4.3"
34#define DRV_RELDATE "2007-03-06"
35
36
37
38
39
40static int debug = 1;
41static int max_interrupt_work = 20;
42
43
44
45#if defined(__alpha__) || defined(__arm__) || defined(__hppa__) \
46 || defined(CONFIG_SPARC) || defined(__ia64__) \
47 || defined(__sh__) || defined(__mips__)
48static int rx_copybreak = 1518;
49#else
50static int rx_copybreak;
51#endif
52
53
54
55static int avoid_D3;
56
57
58
59
60
61
62
63
64static const int multicast_filter_limit = 32;
65
66
67
68
69
70
71
72
73
74#define TX_RING_SIZE 16
75#define TX_QUEUE_LEN 10
76#ifdef CONFIG_VIA_RHINE_NAPI
77#define RX_RING_SIZE 64
78#else
79#define RX_RING_SIZE 16
80#endif
81
82
83
84
85
86#define TX_TIMEOUT (2*HZ)
87
88#define PKT_BUF_SZ 1536
89
90#include <linux/module.h>
91#include <linux/moduleparam.h>
92#include <linux/kernel.h>
93#include <linux/string.h>
94#include <linux/timer.h>
95#include <linux/errno.h>
96#include <linux/ioport.h>
97#include <linux/slab.h>
98#include <linux/interrupt.h>
99#include <linux/pci.h>
100#include <linux/dma-mapping.h>
101#include <linux/netdevice.h>
102#include <linux/etherdevice.h>
103#include <linux/skbuff.h>
104#include <linux/init.h>
105#include <linux/delay.h>
106#include <linux/mii.h>
107#include <linux/ethtool.h>
108#include <linux/crc32.h>
109#include <linux/bitops.h>
110#include <asm/processor.h>
111#include <asm/io.h>
112#include <asm/irq.h>
113#include <asm/uaccess.h>
114#include <linux/dmi.h>
115
116
117static char version[] __devinitdata =
118KERN_INFO DRV_NAME ".c:v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n";
119
120
121
122#ifdef CONFIG_VIA_RHINE_MMIO
123#define USE_MMIO
124#else
125#endif
126
127MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
128MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
129MODULE_LICENSE("GPL");
130
131module_param(max_interrupt_work, int, 0);
132module_param(debug, int, 0);
133module_param(rx_copybreak, int, 0);
134module_param(avoid_D3, bool, 0);
135MODULE_PARM_DESC(max_interrupt_work, "VIA Rhine maximum events handled per interrupt");
136MODULE_PARM_DESC(debug, "VIA Rhine debug level (0-7)");
137MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
138MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)");
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239enum rhine_revs {
240 VT86C100A = 0x00,
241 VTunknown0 = 0x20,
242 VT6102 = 0x40,
243 VT8231 = 0x50,
244 VT8233 = 0x60,
245 VT8235 = 0x74,
246 VT8237 = 0x78,
247 VTunknown1 = 0x7C,
248 VT6105 = 0x80,
249 VT6105_B0 = 0x83,
250 VT6105L = 0x8A,
251 VT6107 = 0x8C,
252 VTunknown2 = 0x8E,
253 VT6105M = 0x90,
254};
255
256enum rhine_quirks {
257 rqWOL = 0x0001,
258 rqForceReset = 0x0002,
259 rq6patterns = 0x0040,
260 rqStatusWBRace = 0x0080,
261 rqRhineI = 0x0100,
262};
263
264
265
266
267
268
269
270#define IOSYNC do { ioread8(ioaddr + StationAddr); } while (0)
271
272static const struct pci_device_id rhine_pci_tbl[] = {
273 { 0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, },
274 { 0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, },
275 { 0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, },
276 { 0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, },
277 { }
278};
279MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
280
281
282
283enum register_offsets {
284 StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
285 ChipCmd1=0x09,
286 IntrStatus=0x0C, IntrEnable=0x0E,
287 MulticastFilter0=0x10, MulticastFilter1=0x14,
288 RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
289 MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E,
290 MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
291 ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B,
292 RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81,
293 StickyHW=0x83, IntrStatus2=0x84,
294 WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4,
295 WOLcrClr1=0xA6, WOLcgClr=0xA7,
296 PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD,
297};
298
299
300enum backoff_bits {
301 BackOptional=0x01, BackModify=0x02,
302 BackCaptureEffect=0x04, BackRandom=0x08
303};
304
305#ifdef USE_MMIO
306
307static const int mmio_verify_registers[] = {
308 RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD,
309 0
310};
311#endif
312
313
314enum intr_status_bits {
315 IntrRxDone=0x0001, IntrRxErr=0x0004, IntrRxEmpty=0x0020,
316 IntrTxDone=0x0002, IntrTxError=0x0008, IntrTxUnderrun=0x0210,
317 IntrPCIErr=0x0040,
318 IntrStatsMax=0x0080, IntrRxEarly=0x0100,
319 IntrRxOverflow=0x0400, IntrRxDropped=0x0800, IntrRxNoBuf=0x1000,
320 IntrTxAborted=0x2000, IntrLinkChange=0x4000,
321 IntrRxWakeUp=0x8000,
322 IntrNormalSummary=0x0003, IntrAbnormalSummary=0xC260,
323 IntrTxDescRace=0x080000,
324 IntrTxErrSummary=0x082218,
325};
326
327
328enum wol_bits {
329 WOLucast = 0x10,
330 WOLmagic = 0x20,
331 WOLbmcast = 0x30,
332 WOLlnkon = 0x40,
333 WOLlnkoff = 0x80,
334};
335
336
337struct rx_desc {
338 __le32 rx_status;
339 __le32 desc_length;
340 __le32 addr;
341 __le32 next_desc;
342};
343struct tx_desc {
344 __le32 tx_status;
345 __le32 desc_length;
346 __le32 addr;
347 __le32 next_desc;
348};
349
350
351#define TXDESC 0x00e08000
352
353enum rx_status_bits {
354 RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F
355};
356
357
358enum desc_status_bits {
359 DescOwn=0x80000000
360};
361
362
363enum chip_cmd_bits {
364 CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08,
365 CmdTxOn=0x10, Cmd1TxDemand=0x20, CmdRxDemand=0x40,
366 Cmd1EarlyRx=0x01, Cmd1EarlyTx=0x02, Cmd1FDuplex=0x04,
367 Cmd1NoTxPoll=0x08, Cmd1Reset=0x80,
368};
369
370struct rhine_private {
371
372 struct rx_desc *rx_ring;
373 struct tx_desc *tx_ring;
374 dma_addr_t rx_ring_dma;
375 dma_addr_t tx_ring_dma;
376
377
378 struct sk_buff *rx_skbuff[RX_RING_SIZE];
379 dma_addr_t rx_skbuff_dma[RX_RING_SIZE];
380
381
382 struct sk_buff *tx_skbuff[TX_RING_SIZE];
383 dma_addr_t tx_skbuff_dma[TX_RING_SIZE];
384
385
386 unsigned char *tx_buf[TX_RING_SIZE];
387 unsigned char *tx_bufs;
388 dma_addr_t tx_bufs_dma;
389
390 struct pci_dev *pdev;
391 long pioaddr;
392 struct net_device *dev;
393 struct napi_struct napi;
394 struct net_device_stats stats;
395 spinlock_t lock;
396
397
398 u32 quirks;
399 struct rx_desc *rx_head_desc;
400 unsigned int cur_rx, dirty_rx;
401 unsigned int cur_tx, dirty_tx;
402 unsigned int rx_buf_sz;
403 u8 wolopts;
404
405 u8 tx_thresh, rx_thresh;
406
407 struct mii_if_info mii_if;
408 void __iomem *base;
409};
410
411static int mdio_read(struct net_device *dev, int phy_id, int location);
412static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
413static int rhine_open(struct net_device *dev);
414static void rhine_tx_timeout(struct net_device *dev);
415static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev);
416static irqreturn_t rhine_interrupt(int irq, void *dev_instance);
417static void rhine_tx(struct net_device *dev);
418static int rhine_rx(struct net_device *dev, int limit);
419static void rhine_error(struct net_device *dev, int intr_status);
420static void rhine_set_rx_mode(struct net_device *dev);
421static struct net_device_stats *rhine_get_stats(struct net_device *dev);
422static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
423static const struct ethtool_ops netdev_ethtool_ops;
424static int rhine_close(struct net_device *dev);
425static void rhine_shutdown (struct pci_dev *pdev);
426
427#define RHINE_WAIT_FOR(condition) do { \
428 int i=1024; \
429 while (!(condition) && --i) \
430 ; \
431 if (debug > 1 && i < 512) \
432 printk(KERN_INFO "%s: %4d cycles used @ %s:%d\n", \
433 DRV_NAME, 1024-i, __func__, __LINE__); \
434} while(0)
435
436static inline u32 get_intr_status(struct net_device *dev)
437{
438 struct rhine_private *rp = netdev_priv(dev);
439 void __iomem *ioaddr = rp->base;
440 u32 intr_status;
441
442 intr_status = ioread16(ioaddr + IntrStatus);
443
444 if (rp->quirks & rqStatusWBRace)
445 intr_status |= ioread8(ioaddr + IntrStatus2) << 16;
446 return intr_status;
447}
448
449
450
451
452
453static void rhine_power_init(struct net_device *dev)
454{
455 struct rhine_private *rp = netdev_priv(dev);
456 void __iomem *ioaddr = rp->base;
457 u16 wolstat;
458
459 if (rp->quirks & rqWOL) {
460
461 iowrite8(ioread8(ioaddr + StickyHW) & 0xFC, ioaddr + StickyHW);
462
463
464 iowrite8(0x80, ioaddr + WOLcgClr);
465
466
467 iowrite8(0xFF, ioaddr + WOLcrClr);
468
469 if (rp->quirks & rq6patterns)
470 iowrite8(0x03, ioaddr + WOLcrClr1);
471
472
473 wolstat = ioread8(ioaddr + PwrcsrSet);
474 if (rp->quirks & rq6patterns)
475 wolstat |= (ioread8(ioaddr + PwrcsrSet1) & 0x03) << 8;
476
477
478 iowrite8(0xFF, ioaddr + PwrcsrClr);
479 if (rp->quirks & rq6patterns)
480 iowrite8(0x03, ioaddr + PwrcsrClr1);
481
482 if (wolstat) {
483 char *reason;
484 switch (wolstat) {
485 case WOLmagic:
486 reason = "Magic packet";
487 break;
488 case WOLlnkon:
489 reason = "Link went up";
490 break;
491 case WOLlnkoff:
492 reason = "Link went down";
493 break;
494 case WOLucast:
495 reason = "Unicast packet";
496 break;
497 case WOLbmcast:
498 reason = "Multicast/broadcast packet";
499 break;
500 default:
501 reason = "Unknown";
502 }
503 printk(KERN_INFO "%s: Woke system up. Reason: %s.\n",
504 DRV_NAME, reason);
505 }
506 }
507}
508
509static void rhine_chip_reset(struct net_device *dev)
510{
511 struct rhine_private *rp = netdev_priv(dev);
512 void __iomem *ioaddr = rp->base;
513
514 iowrite8(Cmd1Reset, ioaddr + ChipCmd1);
515 IOSYNC;
516
517 if (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) {
518 printk(KERN_INFO "%s: Reset not complete yet. "
519 "Trying harder.\n", DRV_NAME);
520
521
522 if (rp->quirks & rqForceReset)
523 iowrite8(0x40, ioaddr + MiscCmd);
524
525
526 RHINE_WAIT_FOR(!(ioread8(ioaddr + ChipCmd1) & Cmd1Reset));
527 }
528
529 if (debug > 1)
530 printk(KERN_INFO "%s: Reset %s.\n", dev->name,
531 (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) ?
532 "failed" : "succeeded");
533}
534
535#ifdef USE_MMIO
536static void enable_mmio(long pioaddr, u32 quirks)
537{
538 int n;
539 if (quirks & rqRhineI) {
540
541 n = inb(pioaddr + ConfigA) | 0x20;
542 outb(n, pioaddr + ConfigA);
543 } else {
544 n = inb(pioaddr + ConfigD) | 0x80;
545 outb(n, pioaddr + ConfigD);
546 }
547}
548#endif
549
550
551
552
553
554static void __devinit rhine_reload_eeprom(long pioaddr, struct net_device *dev)
555{
556 struct rhine_private *rp = netdev_priv(dev);
557 void __iomem *ioaddr = rp->base;
558
559 outb(0x20, pioaddr + MACRegEEcsr);
560 RHINE_WAIT_FOR(!(inb(pioaddr + MACRegEEcsr) & 0x20));
561
562#ifdef USE_MMIO
563
564
565
566
567
568 enable_mmio(pioaddr, rp->quirks);
569#endif
570
571
572 if (rp->quirks & rqWOL)
573 iowrite8(ioread8(ioaddr + ConfigA) & 0xFC, ioaddr + ConfigA);
574
575}
576
577#ifdef CONFIG_NET_POLL_CONTROLLER
578static void rhine_poll(struct net_device *dev)
579{
580 disable_irq(dev->irq);
581 rhine_interrupt(dev->irq, (void *)dev);
582 enable_irq(dev->irq);
583}
584#endif
585
586#ifdef CONFIG_VIA_RHINE_NAPI
587static int rhine_napipoll(struct napi_struct *napi, int budget)
588{
589 struct rhine_private *rp = container_of(napi, struct rhine_private, napi);
590 struct net_device *dev = rp->dev;
591 void __iomem *ioaddr = rp->base;
592 int work_done;
593
594 work_done = rhine_rx(dev, budget);
595
596 if (work_done < budget) {
597 netif_rx_complete(dev, napi);
598
599 iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
600 IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
601 IntrTxDone | IntrTxError | IntrTxUnderrun |
602 IntrPCIErr | IntrStatsMax | IntrLinkChange,
603 ioaddr + IntrEnable);
604 }
605 return work_done;
606}
607#endif
608
609static void rhine_hw_init(struct net_device *dev, long pioaddr)
610{
611 struct rhine_private *rp = netdev_priv(dev);
612
613
614 rhine_chip_reset(dev);
615
616
617 if (rp->quirks & rqRhineI)
618 msleep(5);
619
620
621 rhine_reload_eeprom(pioaddr, dev);
622}
623
624static int __devinit rhine_init_one(struct pci_dev *pdev,
625 const struct pci_device_id *ent)
626{
627 struct net_device *dev;
628 struct rhine_private *rp;
629 int i, rc;
630 u32 quirks;
631 long pioaddr;
632 long memaddr;
633 void __iomem *ioaddr;
634 int io_size, phy_id;
635 const char *name;
636#ifdef USE_MMIO
637 int bar = 1;
638#else
639 int bar = 0;
640#endif
641 DECLARE_MAC_BUF(mac);
642
643
644#ifndef MODULE
645 static int printed_version;
646 if (!printed_version++)
647 printk(version);
648#endif
649
650 io_size = 256;
651 phy_id = 0;
652 quirks = 0;
653 name = "Rhine";
654 if (pdev->revision < VTunknown0) {
655 quirks = rqRhineI;
656 io_size = 128;
657 }
658 else if (pdev->revision >= VT6102) {
659 quirks = rqWOL | rqForceReset;
660 if (pdev->revision < VT6105) {
661 name = "Rhine II";
662 quirks |= rqStatusWBRace;
663 }
664 else {
665 phy_id = 1;
666 if (pdev->revision >= VT6105_B0)
667 quirks |= rq6patterns;
668 if (pdev->revision < VT6105M)
669 name = "Rhine III";
670 else
671 name = "Rhine III (Management Adapter)";
672 }
673 }
674
675 rc = pci_enable_device(pdev);
676 if (rc)
677 goto err_out;
678
679
680 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
681 if (rc) {
682 printk(KERN_ERR "32-bit PCI DMA addresses not supported by "
683 "the card!?\n");
684 goto err_out;
685 }
686
687
688 if ((pci_resource_len(pdev, 0) < io_size) ||
689 (pci_resource_len(pdev, 1) < io_size)) {
690 rc = -EIO;
691 printk(KERN_ERR "Insufficient PCI resources, aborting\n");
692 goto err_out;
693 }
694
695 pioaddr = pci_resource_start(pdev, 0);
696 memaddr = pci_resource_start(pdev, 1);
697
698 pci_set_master(pdev);
699
700 dev = alloc_etherdev(sizeof(struct rhine_private));
701 if (!dev) {
702 rc = -ENOMEM;
703 printk(KERN_ERR "alloc_etherdev failed\n");
704 goto err_out;
705 }
706 SET_NETDEV_DEV(dev, &pdev->dev);
707
708 rp = netdev_priv(dev);
709 rp->dev = dev;
710 rp->quirks = quirks;
711 rp->pioaddr = pioaddr;
712 rp->pdev = pdev;
713
714 rc = pci_request_regions(pdev, DRV_NAME);
715 if (rc)
716 goto err_out_free_netdev;
717
718 ioaddr = pci_iomap(pdev, bar, io_size);
719 if (!ioaddr) {
720 rc = -EIO;
721 printk(KERN_ERR "ioremap failed for device %s, region 0x%X "
722 "@ 0x%lX\n", pci_name(pdev), io_size, memaddr);
723 goto err_out_free_res;
724 }
725
726#ifdef USE_MMIO
727 enable_mmio(pioaddr, quirks);
728
729
730 i = 0;
731 while (mmio_verify_registers[i]) {
732 int reg = mmio_verify_registers[i++];
733 unsigned char a = inb(pioaddr+reg);
734 unsigned char b = readb(ioaddr+reg);
735 if (a != b) {
736 rc = -EIO;
737 printk(KERN_ERR "MMIO do not match PIO [%02x] "
738 "(%02x != %02x)\n", reg, a, b);
739 goto err_out_unmap;
740 }
741 }
742#endif
743
744 dev->base_addr = (unsigned long)ioaddr;
745 rp->base = ioaddr;
746
747
748 rhine_power_init(dev);
749 rhine_hw_init(dev, pioaddr);
750
751 for (i = 0; i < 6; i++)
752 dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i);
753 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
754
755 if (!is_valid_ether_addr(dev->perm_addr)) {
756 rc = -EIO;
757 printk(KERN_ERR "Invalid MAC address\n");
758 goto err_out_unmap;
759 }
760
761
762 if (!phy_id)
763 phy_id = ioread8(ioaddr + 0x6C);
764
765 dev->irq = pdev->irq;
766
767 spin_lock_init(&rp->lock);
768 rp->mii_if.dev = dev;
769 rp->mii_if.mdio_read = mdio_read;
770 rp->mii_if.mdio_write = mdio_write;
771 rp->mii_if.phy_id_mask = 0x1f;
772 rp->mii_if.reg_num_mask = 0x1f;
773
774
775 dev->open = rhine_open;
776 dev->hard_start_xmit = rhine_start_tx;
777 dev->stop = rhine_close;
778 dev->get_stats = rhine_get_stats;
779 dev->set_multicast_list = rhine_set_rx_mode;
780 dev->do_ioctl = netdev_ioctl;
781 dev->ethtool_ops = &netdev_ethtool_ops;
782 dev->tx_timeout = rhine_tx_timeout;
783 dev->watchdog_timeo = TX_TIMEOUT;
784#ifdef CONFIG_NET_POLL_CONTROLLER
785 dev->poll_controller = rhine_poll;
786#endif
787#ifdef CONFIG_VIA_RHINE_NAPI
788 netif_napi_add(dev, &rp->napi, rhine_napipoll, 64);
789#endif
790 if (rp->quirks & rqRhineI)
791 dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
792
793
794 rc = register_netdev(dev);
795 if (rc)
796 goto err_out_unmap;
797
798 printk(KERN_INFO "%s: VIA %s at 0x%lx, %s, IRQ %d.\n",
799 dev->name, name,
800#ifdef USE_MMIO
801 memaddr,
802#else
803 (long)ioaddr,
804#endif
805 print_mac(mac, dev->dev_addr), pdev->irq);
806
807 pci_set_drvdata(pdev, dev);
808
809 {
810 u16 mii_cmd;
811 int mii_status = mdio_read(dev, phy_id, 1);
812 mii_cmd = mdio_read(dev, phy_id, MII_BMCR) & ~BMCR_ISOLATE;
813 mdio_write(dev, phy_id, MII_BMCR, mii_cmd);
814 if (mii_status != 0xffff && mii_status != 0x0000) {
815 rp->mii_if.advertising = mdio_read(dev, phy_id, 4);
816 printk(KERN_INFO "%s: MII PHY found at address "
817 "%d, status 0x%4.4x advertising %4.4x "
818 "Link %4.4x.\n", dev->name, phy_id,
819 mii_status, rp->mii_if.advertising,
820 mdio_read(dev, phy_id, 5));
821
822
823 if (mii_status & BMSR_LSTATUS)
824 netif_carrier_on(dev);
825 else
826 netif_carrier_off(dev);
827
828 }
829 }
830 rp->mii_if.phy_id = phy_id;
831 if (debug > 1 && avoid_D3)
832 printk(KERN_INFO "%s: No D3 power state at shutdown.\n",
833 dev->name);
834
835 return 0;
836
837err_out_unmap:
838 pci_iounmap(pdev, ioaddr);
839err_out_free_res:
840 pci_release_regions(pdev);
841err_out_free_netdev:
842 free_netdev(dev);
843err_out:
844 return rc;
845}
846
847static int alloc_ring(struct net_device* dev)
848{
849 struct rhine_private *rp = netdev_priv(dev);
850 void *ring;
851 dma_addr_t ring_dma;
852
853 ring = pci_alloc_consistent(rp->pdev,
854 RX_RING_SIZE * sizeof(struct rx_desc) +
855 TX_RING_SIZE * sizeof(struct tx_desc),
856 &ring_dma);
857 if (!ring) {
858 printk(KERN_ERR "Could not allocate DMA memory.\n");
859 return -ENOMEM;
860 }
861 if (rp->quirks & rqRhineI) {
862 rp->tx_bufs = pci_alloc_consistent(rp->pdev,
863 PKT_BUF_SZ * TX_RING_SIZE,
864 &rp->tx_bufs_dma);
865 if (rp->tx_bufs == NULL) {
866 pci_free_consistent(rp->pdev,
867 RX_RING_SIZE * sizeof(struct rx_desc) +
868 TX_RING_SIZE * sizeof(struct tx_desc),
869 ring, ring_dma);
870 return -ENOMEM;
871 }
872 }
873
874 rp->rx_ring = ring;
875 rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc);
876 rp->rx_ring_dma = ring_dma;
877 rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc);
878
879 return 0;
880}
881
882static void free_ring(struct net_device* dev)
883{
884 struct rhine_private *rp = netdev_priv(dev);
885
886 pci_free_consistent(rp->pdev,
887 RX_RING_SIZE * sizeof(struct rx_desc) +
888 TX_RING_SIZE * sizeof(struct tx_desc),
889 rp->rx_ring, rp->rx_ring_dma);
890 rp->tx_ring = NULL;
891
892 if (rp->tx_bufs)
893 pci_free_consistent(rp->pdev, PKT_BUF_SZ * TX_RING_SIZE,
894 rp->tx_bufs, rp->tx_bufs_dma);
895
896 rp->tx_bufs = NULL;
897
898}
899
900static void alloc_rbufs(struct net_device *dev)
901{
902 struct rhine_private *rp = netdev_priv(dev);
903 dma_addr_t next;
904 int i;
905
906 rp->dirty_rx = rp->cur_rx = 0;
907
908 rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
909 rp->rx_head_desc = &rp->rx_ring[0];
910 next = rp->rx_ring_dma;
911
912
913 for (i = 0; i < RX_RING_SIZE; i++) {
914 rp->rx_ring[i].rx_status = 0;
915 rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz);
916 next += sizeof(struct rx_desc);
917 rp->rx_ring[i].next_desc = cpu_to_le32(next);
918 rp->rx_skbuff[i] = NULL;
919 }
920
921 rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma);
922
923
924 for (i = 0; i < RX_RING_SIZE; i++) {
925 struct sk_buff *skb = dev_alloc_skb(rp->rx_buf_sz);
926 rp->rx_skbuff[i] = skb;
927 if (skb == NULL)
928 break;
929 skb->dev = dev;
930
931 rp->rx_skbuff_dma[i] =
932 pci_map_single(rp->pdev, skb->data, rp->rx_buf_sz,
933 PCI_DMA_FROMDEVICE);
934
935 rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]);
936 rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
937 }
938 rp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
939}
940
941static void free_rbufs(struct net_device* dev)
942{
943 struct rhine_private *rp = netdev_priv(dev);
944 int i;
945
946
947 for (i = 0; i < RX_RING_SIZE; i++) {
948 rp->rx_ring[i].rx_status = 0;
949 rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0);
950 if (rp->rx_skbuff[i]) {
951 pci_unmap_single(rp->pdev,
952 rp->rx_skbuff_dma[i],
953 rp->rx_buf_sz, PCI_DMA_FROMDEVICE);
954 dev_kfree_skb(rp->rx_skbuff[i]);
955 }
956 rp->rx_skbuff[i] = NULL;
957 }
958}
959
960static void alloc_tbufs(struct net_device* dev)
961{
962 struct rhine_private *rp = netdev_priv(dev);
963 dma_addr_t next;
964 int i;
965
966 rp->dirty_tx = rp->cur_tx = 0;
967 next = rp->tx_ring_dma;
968 for (i = 0; i < TX_RING_SIZE; i++) {
969 rp->tx_skbuff[i] = NULL;
970 rp->tx_ring[i].tx_status = 0;
971 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
972 next += sizeof(struct tx_desc);
973 rp->tx_ring[i].next_desc = cpu_to_le32(next);
974 if (rp->quirks & rqRhineI)
975 rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ];
976 }
977 rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma);
978
979}
980
981static void free_tbufs(struct net_device* dev)
982{
983 struct rhine_private *rp = netdev_priv(dev);
984 int i;
985
986 for (i = 0; i < TX_RING_SIZE; i++) {
987 rp->tx_ring[i].tx_status = 0;
988 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
989 rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0);
990 if (rp->tx_skbuff[i]) {
991 if (rp->tx_skbuff_dma[i]) {
992 pci_unmap_single(rp->pdev,
993 rp->tx_skbuff_dma[i],
994 rp->tx_skbuff[i]->len,
995 PCI_DMA_TODEVICE);
996 }
997 dev_kfree_skb(rp->tx_skbuff[i]);
998 }
999 rp->tx_skbuff[i] = NULL;
1000 rp->tx_buf[i] = NULL;
1001 }
1002}
1003
1004static void rhine_check_media(struct net_device *dev, unsigned int init_media)
1005{
1006 struct rhine_private *rp = netdev_priv(dev);
1007 void __iomem *ioaddr = rp->base;
1008
1009 mii_check_media(&rp->mii_if, debug, init_media);
1010
1011 if (rp->mii_if.full_duplex)
1012 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex,
1013 ioaddr + ChipCmd1);
1014 else
1015 iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex,
1016 ioaddr + ChipCmd1);
1017 if (debug > 1)
1018 printk(KERN_INFO "%s: force_media %d, carrier %d\n", dev->name,
1019 rp->mii_if.force_media, netif_carrier_ok(dev));
1020}
1021
1022
1023static void rhine_set_carrier(struct mii_if_info *mii)
1024{
1025 if (mii->force_media) {
1026
1027 if (!netif_carrier_ok(mii->dev))
1028 netif_carrier_on(mii->dev);
1029 }
1030 else
1031 rhine_check_media(mii->dev, 0);
1032 if (debug > 1)
1033 printk(KERN_INFO "%s: force_media %d, carrier %d\n",
1034 mii->dev->name, mii->force_media,
1035 netif_carrier_ok(mii->dev));
1036}
1037
1038static void init_registers(struct net_device *dev)
1039{
1040 struct rhine_private *rp = netdev_priv(dev);
1041 void __iomem *ioaddr = rp->base;
1042 int i;
1043
1044 for (i = 0; i < 6; i++)
1045 iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
1046
1047
1048 iowrite16(0x0006, ioaddr + PCIBusConfig);
1049
1050 iowrite8(0x20, ioaddr + TxConfig);
1051 rp->tx_thresh = 0x20;
1052 rp->rx_thresh = 0x60;
1053
1054 iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr);
1055 iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr);
1056
1057 rhine_set_rx_mode(dev);
1058
1059#ifdef CONFIG_VIA_RHINE_NAPI
1060 napi_enable(&rp->napi);
1061#endif
1062
1063
1064 iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
1065 IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
1066 IntrTxDone | IntrTxError | IntrTxUnderrun |
1067 IntrPCIErr | IntrStatsMax | IntrLinkChange,
1068 ioaddr + IntrEnable);
1069
1070 iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8),
1071 ioaddr + ChipCmd);
1072 rhine_check_media(dev, 1);
1073}
1074
1075
1076static void rhine_enable_linkmon(void __iomem *ioaddr)
1077{
1078 iowrite8(0, ioaddr + MIICmd);
1079 iowrite8(MII_BMSR, ioaddr + MIIRegAddr);
1080 iowrite8(0x80, ioaddr + MIICmd);
1081
1082 RHINE_WAIT_FOR((ioread8(ioaddr + MIIRegAddr) & 0x20));
1083
1084 iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr);
1085}
1086
1087
1088static void rhine_disable_linkmon(void __iomem *ioaddr, u32 quirks)
1089{
1090 iowrite8(0, ioaddr + MIICmd);
1091
1092 if (quirks & rqRhineI) {
1093 iowrite8(0x01, ioaddr + MIIRegAddr);
1094
1095
1096 mdelay(1);
1097
1098
1099 iowrite8(0x80, ioaddr + MIICmd);
1100
1101 RHINE_WAIT_FOR(ioread8(ioaddr + MIIRegAddr) & 0x20);
1102
1103
1104 iowrite8(0, ioaddr + MIICmd);
1105 }
1106 else
1107 RHINE_WAIT_FOR(ioread8(ioaddr + MIIRegAddr) & 0x80);
1108}
1109
1110
1111
1112static int mdio_read(struct net_device *dev, int phy_id, int regnum)
1113{
1114 struct rhine_private *rp = netdev_priv(dev);
1115 void __iomem *ioaddr = rp->base;
1116 int result;
1117
1118 rhine_disable_linkmon(ioaddr, rp->quirks);
1119
1120
1121 iowrite8(phy_id, ioaddr + MIIPhyAddr);
1122 iowrite8(regnum, ioaddr + MIIRegAddr);
1123 iowrite8(0x40, ioaddr + MIICmd);
1124 RHINE_WAIT_FOR(!(ioread8(ioaddr + MIICmd) & 0x40));
1125 result = ioread16(ioaddr + MIIData);
1126
1127 rhine_enable_linkmon(ioaddr);
1128 return result;
1129}
1130
1131static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
1132{
1133 struct rhine_private *rp = netdev_priv(dev);
1134 void __iomem *ioaddr = rp->base;
1135
1136 rhine_disable_linkmon(ioaddr, rp->quirks);
1137
1138
1139 iowrite8(phy_id, ioaddr + MIIPhyAddr);
1140 iowrite8(regnum, ioaddr + MIIRegAddr);
1141 iowrite16(value, ioaddr + MIIData);
1142 iowrite8(0x20, ioaddr + MIICmd);
1143 RHINE_WAIT_FOR(!(ioread8(ioaddr + MIICmd) & 0x20));
1144
1145 rhine_enable_linkmon(ioaddr);
1146}
1147
1148static int rhine_open(struct net_device *dev)
1149{
1150 struct rhine_private *rp = netdev_priv(dev);
1151 void __iomem *ioaddr = rp->base;
1152 int rc;
1153
1154 rc = request_irq(rp->pdev->irq, &rhine_interrupt, IRQF_SHARED, dev->name,
1155 dev);
1156 if (rc)
1157 return rc;
1158
1159 if (debug > 1)
1160 printk(KERN_DEBUG "%s: rhine_open() irq %d.\n",
1161 dev->name, rp->pdev->irq);
1162
1163 rc = alloc_ring(dev);
1164 if (rc) {
1165 free_irq(rp->pdev->irq, dev);
1166 return rc;
1167 }
1168 alloc_rbufs(dev);
1169 alloc_tbufs(dev);
1170 rhine_chip_reset(dev);
1171 init_registers(dev);
1172 if (debug > 2)
1173 printk(KERN_DEBUG "%s: Done rhine_open(), status %4.4x "
1174 "MII status: %4.4x.\n",
1175 dev->name, ioread16(ioaddr + ChipCmd),
1176 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1177
1178 netif_start_queue(dev);
1179
1180 return 0;
1181}
1182
1183static void rhine_tx_timeout(struct net_device *dev)
1184{
1185 struct rhine_private *rp = netdev_priv(dev);
1186 void __iomem *ioaddr = rp->base;
1187
1188 printk(KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status "
1189 "%4.4x, resetting...\n",
1190 dev->name, ioread16(ioaddr + IntrStatus),
1191 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1192
1193
1194 disable_irq(rp->pdev->irq);
1195
1196#ifdef CONFIG_VIA_RHINE_NAPI
1197 napi_disable(&rp->napi);
1198#endif
1199
1200 spin_lock(&rp->lock);
1201
1202
1203 free_tbufs(dev);
1204 free_rbufs(dev);
1205 alloc_tbufs(dev);
1206 alloc_rbufs(dev);
1207
1208
1209 rhine_chip_reset(dev);
1210 init_registers(dev);
1211
1212 spin_unlock(&rp->lock);
1213 enable_irq(rp->pdev->irq);
1214
1215 dev->trans_start = jiffies;
1216 rp->stats.tx_errors++;
1217 netif_wake_queue(dev);
1218}
1219
1220static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev)
1221{
1222 struct rhine_private *rp = netdev_priv(dev);
1223 void __iomem *ioaddr = rp->base;
1224 unsigned entry;
1225
1226
1227
1228
1229
1230 entry = rp->cur_tx % TX_RING_SIZE;
1231
1232 if (skb_padto(skb, ETH_ZLEN))
1233 return 0;
1234
1235 rp->tx_skbuff[entry] = skb;
1236
1237 if ((rp->quirks & rqRhineI) &&
1238 (((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_PARTIAL)) {
1239
1240 if (skb->len > PKT_BUF_SZ) {
1241
1242 dev_kfree_skb(skb);
1243 rp->tx_skbuff[entry] = NULL;
1244 rp->stats.tx_dropped++;
1245 return 0;
1246 }
1247
1248
1249 skb_copy_and_csum_dev(skb, rp->tx_buf[entry]);
1250 if (skb->len < ETH_ZLEN)
1251 memset(rp->tx_buf[entry] + skb->len, 0,
1252 ETH_ZLEN - skb->len);
1253 rp->tx_skbuff_dma[entry] = 0;
1254 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma +
1255 (rp->tx_buf[entry] -
1256 rp->tx_bufs));
1257 } else {
1258 rp->tx_skbuff_dma[entry] =
1259 pci_map_single(rp->pdev, skb->data, skb->len,
1260 PCI_DMA_TODEVICE);
1261 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]);
1262 }
1263
1264 rp->tx_ring[entry].desc_length =
1265 cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1266
1267
1268 spin_lock_irq(&rp->lock);
1269 wmb();
1270 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1271 wmb();
1272
1273 rp->cur_tx++;
1274
1275
1276
1277
1278 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1279 ioaddr + ChipCmd1);
1280 IOSYNC;
1281
1282 if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN)
1283 netif_stop_queue(dev);
1284
1285 dev->trans_start = jiffies;
1286
1287 spin_unlock_irq(&rp->lock);
1288
1289 if (debug > 4) {
1290 printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
1291 dev->name, rp->cur_tx-1, entry);
1292 }
1293 return 0;
1294}
1295
1296
1297
1298static irqreturn_t rhine_interrupt(int irq, void *dev_instance)
1299{
1300 struct net_device *dev = dev_instance;
1301 struct rhine_private *rp = netdev_priv(dev);
1302 void __iomem *ioaddr = rp->base;
1303 u32 intr_status;
1304 int boguscnt = max_interrupt_work;
1305 int handled = 0;
1306
1307 while ((intr_status = get_intr_status(dev))) {
1308 handled = 1;
1309
1310
1311 if (intr_status & IntrTxDescRace)
1312 iowrite8(0x08, ioaddr + IntrStatus2);
1313 iowrite16(intr_status & 0xffff, ioaddr + IntrStatus);
1314 IOSYNC;
1315
1316 if (debug > 4)
1317 printk(KERN_DEBUG "%s: Interrupt, status %8.8x.\n",
1318 dev->name, intr_status);
1319
1320 if (intr_status & (IntrRxDone | IntrRxErr | IntrRxDropped |
1321 IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf)) {
1322#ifdef CONFIG_VIA_RHINE_NAPI
1323 iowrite16(IntrTxAborted |
1324 IntrTxDone | IntrTxError | IntrTxUnderrun |
1325 IntrPCIErr | IntrStatsMax | IntrLinkChange,
1326 ioaddr + IntrEnable);
1327
1328 netif_rx_schedule(dev, &rp->napi);
1329#else
1330 rhine_rx(dev, RX_RING_SIZE);
1331#endif
1332 }
1333
1334 if (intr_status & (IntrTxErrSummary | IntrTxDone)) {
1335 if (intr_status & IntrTxErrSummary) {
1336
1337 RHINE_WAIT_FOR(!(ioread8(ioaddr+ChipCmd) & CmdTxOn));
1338 if (debug > 2 &&
1339 ioread8(ioaddr+ChipCmd) & CmdTxOn)
1340 printk(KERN_WARNING "%s: "
1341 "rhine_interrupt() Tx engine"
1342 "still on.\n", dev->name);
1343 }
1344 rhine_tx(dev);
1345 }
1346
1347
1348 if (intr_status & (IntrPCIErr | IntrLinkChange |
1349 IntrStatsMax | IntrTxError | IntrTxAborted |
1350 IntrTxUnderrun | IntrTxDescRace))
1351 rhine_error(dev, intr_status);
1352
1353 if (--boguscnt < 0) {
1354 printk(KERN_WARNING "%s: Too much work at interrupt, "
1355 "status=%#8.8x.\n",
1356 dev->name, intr_status);
1357 break;
1358 }
1359 }
1360
1361 if (debug > 3)
1362 printk(KERN_DEBUG "%s: exiting interrupt, status=%8.8x.\n",
1363 dev->name, ioread16(ioaddr + IntrStatus));
1364 return IRQ_RETVAL(handled);
1365}
1366
1367
1368
1369static void rhine_tx(struct net_device *dev)
1370{
1371 struct rhine_private *rp = netdev_priv(dev);
1372 int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE;
1373
1374 spin_lock(&rp->lock);
1375
1376
1377 while (rp->dirty_tx != rp->cur_tx) {
1378 txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
1379 if (debug > 6)
1380 printk(KERN_DEBUG "Tx scavenge %d status %8.8x.\n",
1381 entry, txstatus);
1382 if (txstatus & DescOwn)
1383 break;
1384 if (txstatus & 0x8000) {
1385 if (debug > 1)
1386 printk(KERN_DEBUG "%s: Transmit error, "
1387 "Tx status %8.8x.\n",
1388 dev->name, txstatus);
1389 rp->stats.tx_errors++;
1390 if (txstatus & 0x0400) rp->stats.tx_carrier_errors++;
1391 if (txstatus & 0x0200) rp->stats.tx_window_errors++;
1392 if (txstatus & 0x0100) rp->stats.tx_aborted_errors++;
1393 if (txstatus & 0x0080) rp->stats.tx_heartbeat_errors++;
1394 if (((rp->quirks & rqRhineI) && txstatus & 0x0002) ||
1395 (txstatus & 0x0800) || (txstatus & 0x1000)) {
1396 rp->stats.tx_fifo_errors++;
1397 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1398 break;
1399 }
1400
1401 } else {
1402 if (rp->quirks & rqRhineI)
1403 rp->stats.collisions += (txstatus >> 3) & 0x0F;
1404 else
1405 rp->stats.collisions += txstatus & 0x0F;
1406 if (debug > 6)
1407 printk(KERN_DEBUG "collisions: %1.1x:%1.1x\n",
1408 (txstatus >> 3) & 0xF,
1409 txstatus & 0xF);
1410 rp->stats.tx_bytes += rp->tx_skbuff[entry]->len;
1411 rp->stats.tx_packets++;
1412 }
1413
1414 if (rp->tx_skbuff_dma[entry]) {
1415 pci_unmap_single(rp->pdev,
1416 rp->tx_skbuff_dma[entry],
1417 rp->tx_skbuff[entry]->len,
1418 PCI_DMA_TODEVICE);
1419 }
1420 dev_kfree_skb_irq(rp->tx_skbuff[entry]);
1421 rp->tx_skbuff[entry] = NULL;
1422 entry = (++rp->dirty_tx) % TX_RING_SIZE;
1423 }
1424 if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4)
1425 netif_wake_queue(dev);
1426
1427 spin_unlock(&rp->lock);
1428}
1429
1430
1431static int rhine_rx(struct net_device *dev, int limit)
1432{
1433 struct rhine_private *rp = netdev_priv(dev);
1434 int count;
1435 int entry = rp->cur_rx % RX_RING_SIZE;
1436
1437 if (debug > 4) {
1438 printk(KERN_DEBUG "%s: rhine_rx(), entry %d status %8.8x.\n",
1439 dev->name, entry,
1440 le32_to_cpu(rp->rx_head_desc->rx_status));
1441 }
1442
1443
1444 for (count = 0; count < limit; ++count) {
1445 struct rx_desc *desc = rp->rx_head_desc;
1446 u32 desc_status = le32_to_cpu(desc->rx_status);
1447 int data_size = desc_status >> 16;
1448
1449 if (desc_status & DescOwn)
1450 break;
1451
1452 if (debug > 4)
1453 printk(KERN_DEBUG "rhine_rx() status is %8.8x.\n",
1454 desc_status);
1455
1456 if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
1457 if ((desc_status & RxWholePkt) != RxWholePkt) {
1458 printk(KERN_WARNING "%s: Oversized Ethernet "
1459 "frame spanned multiple buffers, entry "
1460 "%#x length %d status %8.8x!\n",
1461 dev->name, entry, data_size,
1462 desc_status);
1463 printk(KERN_WARNING "%s: Oversized Ethernet "
1464 "frame %p vs %p.\n", dev->name,
1465 rp->rx_head_desc, &rp->rx_ring[entry]);
1466 rp->stats.rx_length_errors++;
1467 } else if (desc_status & RxErr) {
1468
1469 if (debug > 2)
1470 printk(KERN_DEBUG "rhine_rx() Rx "
1471 "error was %8.8x.\n",
1472 desc_status);
1473 rp->stats.rx_errors++;
1474 if (desc_status & 0x0030) rp->stats.rx_length_errors++;
1475 if (desc_status & 0x0048) rp->stats.rx_fifo_errors++;
1476 if (desc_status & 0x0004) rp->stats.rx_frame_errors++;
1477 if (desc_status & 0x0002) {
1478
1479 spin_lock(&rp->lock);
1480 rp->stats.rx_crc_errors++;
1481 spin_unlock(&rp->lock);
1482 }
1483 }
1484 } else {
1485 struct sk_buff *skb;
1486
1487 int pkt_len = data_size - 4;
1488
1489
1490
1491 if (pkt_len < rx_copybreak &&
1492 (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1493 skb_reserve(skb, 2);
1494 pci_dma_sync_single_for_cpu(rp->pdev,
1495 rp->rx_skbuff_dma[entry],
1496 rp->rx_buf_sz,
1497 PCI_DMA_FROMDEVICE);
1498
1499 skb_copy_to_linear_data(skb,
1500 rp->rx_skbuff[entry]->data,
1501 pkt_len);
1502 skb_put(skb, pkt_len);
1503 pci_dma_sync_single_for_device(rp->pdev,
1504 rp->rx_skbuff_dma[entry],
1505 rp->rx_buf_sz,
1506 PCI_DMA_FROMDEVICE);
1507 } else {
1508 skb = rp->rx_skbuff[entry];
1509 if (skb == NULL) {
1510 printk(KERN_ERR "%s: Inconsistent Rx "
1511 "descriptor chain.\n",
1512 dev->name);
1513 break;
1514 }
1515 rp->rx_skbuff[entry] = NULL;
1516 skb_put(skb, pkt_len);
1517 pci_unmap_single(rp->pdev,
1518 rp->rx_skbuff_dma[entry],
1519 rp->rx_buf_sz,
1520 PCI_DMA_FROMDEVICE);
1521 }
1522 skb->protocol = eth_type_trans(skb, dev);
1523#ifdef CONFIG_VIA_RHINE_NAPI
1524 netif_receive_skb(skb);
1525#else
1526 netif_rx(skb);
1527#endif
1528 dev->last_rx = jiffies;
1529 rp->stats.rx_bytes += pkt_len;
1530 rp->stats.rx_packets++;
1531 }
1532 entry = (++rp->cur_rx) % RX_RING_SIZE;
1533 rp->rx_head_desc = &rp->rx_ring[entry];
1534 }
1535
1536
1537 for (; rp->cur_rx - rp->dirty_rx > 0; rp->dirty_rx++) {
1538 struct sk_buff *skb;
1539 entry = rp->dirty_rx % RX_RING_SIZE;
1540 if (rp->rx_skbuff[entry] == NULL) {
1541 skb = dev_alloc_skb(rp->rx_buf_sz);
1542 rp->rx_skbuff[entry] = skb;
1543 if (skb == NULL)
1544 break;
1545 skb->dev = dev;
1546 rp->rx_skbuff_dma[entry] =
1547 pci_map_single(rp->pdev, skb->data,
1548 rp->rx_buf_sz,
1549 PCI_DMA_FROMDEVICE);
1550 rp->rx_ring[entry].addr = cpu_to_le32(rp->rx_skbuff_dma[entry]);
1551 }
1552 rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn);
1553 }
1554
1555 return count;
1556}
1557
1558
1559
1560
1561
1562
1563
1564static inline void clear_tally_counters(void __iomem *ioaddr)
1565{
1566 iowrite32(0, ioaddr + RxMissed);
1567 ioread16(ioaddr + RxCRCErrs);
1568 ioread16(ioaddr + RxMissed);
1569}
1570
1571static void rhine_restart_tx(struct net_device *dev) {
1572 struct rhine_private *rp = netdev_priv(dev);
1573 void __iomem *ioaddr = rp->base;
1574 int entry = rp->dirty_tx % TX_RING_SIZE;
1575 u32 intr_status;
1576
1577
1578
1579
1580
1581 intr_status = get_intr_status(dev);
1582
1583 if ((intr_status & IntrTxErrSummary) == 0) {
1584
1585
1586 iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc),
1587 ioaddr + TxRingPtr);
1588
1589 iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn,
1590 ioaddr + ChipCmd);
1591 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1592 ioaddr + ChipCmd1);
1593 IOSYNC;
1594 }
1595 else {
1596
1597 if (debug > 1)
1598 printk(KERN_WARNING "%s: rhine_restart_tx() "
1599 "Another error occured %8.8x.\n",
1600 dev->name, intr_status);
1601 }
1602
1603}
1604
1605static void rhine_error(struct net_device *dev, int intr_status)
1606{
1607 struct rhine_private *rp = netdev_priv(dev);
1608 void __iomem *ioaddr = rp->base;
1609
1610 spin_lock(&rp->lock);
1611
1612 if (intr_status & IntrLinkChange)
1613 rhine_check_media(dev, 0);
1614 if (intr_status & IntrStatsMax) {
1615 rp->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
1616 rp->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
1617 clear_tally_counters(ioaddr);
1618 }
1619 if (intr_status & IntrTxAborted) {
1620 if (debug > 1)
1621 printk(KERN_INFO "%s: Abort %8.8x, frame dropped.\n",
1622 dev->name, intr_status);
1623 }
1624 if (intr_status & IntrTxUnderrun) {
1625 if (rp->tx_thresh < 0xE0)
1626 iowrite8(rp->tx_thresh += 0x20, ioaddr + TxConfig);
1627 if (debug > 1)
1628 printk(KERN_INFO "%s: Transmitter underrun, Tx "
1629 "threshold now %2.2x.\n",
1630 dev->name, rp->tx_thresh);
1631 }
1632 if (intr_status & IntrTxDescRace) {
1633 if (debug > 2)
1634 printk(KERN_INFO "%s: Tx descriptor write-back race.\n",
1635 dev->name);
1636 }
1637 if ((intr_status & IntrTxError) &&
1638 (intr_status & (IntrTxAborted |
1639 IntrTxUnderrun | IntrTxDescRace)) == 0) {
1640 if (rp->tx_thresh < 0xE0) {
1641 iowrite8(rp->tx_thresh += 0x20, ioaddr + TxConfig);
1642 }
1643 if (debug > 1)
1644 printk(KERN_INFO "%s: Unspecified error. Tx "
1645 "threshold now %2.2x.\n",
1646 dev->name, rp->tx_thresh);
1647 }
1648 if (intr_status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace |
1649 IntrTxError))
1650 rhine_restart_tx(dev);
1651
1652 if (intr_status & ~(IntrLinkChange | IntrStatsMax | IntrTxUnderrun |
1653 IntrTxError | IntrTxAborted | IntrNormalSummary |
1654 IntrTxDescRace)) {
1655 if (debug > 1)
1656 printk(KERN_ERR "%s: Something Wicked happened! "
1657 "%8.8x.\n", dev->name, intr_status);
1658 }
1659
1660 spin_unlock(&rp->lock);
1661}
1662
1663static struct net_device_stats *rhine_get_stats(struct net_device *dev)
1664{
1665 struct rhine_private *rp = netdev_priv(dev);
1666 void __iomem *ioaddr = rp->base;
1667 unsigned long flags;
1668
1669 spin_lock_irqsave(&rp->lock, flags);
1670 rp->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
1671 rp->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
1672 clear_tally_counters(ioaddr);
1673 spin_unlock_irqrestore(&rp->lock, flags);
1674
1675 return &rp->stats;
1676}
1677
1678static void rhine_set_rx_mode(struct net_device *dev)
1679{
1680 struct rhine_private *rp = netdev_priv(dev);
1681 void __iomem *ioaddr = rp->base;
1682 u32 mc_filter[2];
1683 u8 rx_mode;
1684
1685 if (dev->flags & IFF_PROMISC) {
1686 rx_mode = 0x1C;
1687 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
1688 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
1689 } else if ((dev->mc_count > multicast_filter_limit)
1690 || (dev->flags & IFF_ALLMULTI)) {
1691
1692 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
1693 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
1694 rx_mode = 0x0C;
1695 } else {
1696 struct dev_mc_list *mclist;
1697 int i;
1698 memset(mc_filter, 0, sizeof(mc_filter));
1699 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1700 i++, mclist = mclist->next) {
1701 int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
1702
1703 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
1704 }
1705 iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
1706 iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
1707 rx_mode = 0x0C;
1708 }
1709 iowrite8(rp->rx_thresh | rx_mode, ioaddr + RxConfig);
1710}
1711
1712static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1713{
1714 struct rhine_private *rp = netdev_priv(dev);
1715
1716 strcpy(info->driver, DRV_NAME);
1717 strcpy(info->version, DRV_VERSION);
1718 strcpy(info->bus_info, pci_name(rp->pdev));
1719}
1720
1721static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1722{
1723 struct rhine_private *rp = netdev_priv(dev);
1724 int rc;
1725
1726 spin_lock_irq(&rp->lock);
1727 rc = mii_ethtool_gset(&rp->mii_if, cmd);
1728 spin_unlock_irq(&rp->lock);
1729
1730 return rc;
1731}
1732
1733static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1734{
1735 struct rhine_private *rp = netdev_priv(dev);
1736 int rc;
1737
1738 spin_lock_irq(&rp->lock);
1739 rc = mii_ethtool_sset(&rp->mii_if, cmd);
1740 spin_unlock_irq(&rp->lock);
1741 rhine_set_carrier(&rp->mii_if);
1742
1743 return rc;
1744}
1745
1746static int netdev_nway_reset(struct net_device *dev)
1747{
1748 struct rhine_private *rp = netdev_priv(dev);
1749
1750 return mii_nway_restart(&rp->mii_if);
1751}
1752
1753static u32 netdev_get_link(struct net_device *dev)
1754{
1755 struct rhine_private *rp = netdev_priv(dev);
1756
1757 return mii_link_ok(&rp->mii_if);
1758}
1759
1760static u32 netdev_get_msglevel(struct net_device *dev)
1761{
1762 return debug;
1763}
1764
1765static void netdev_set_msglevel(struct net_device *dev, u32 value)
1766{
1767 debug = value;
1768}
1769
1770static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1771{
1772 struct rhine_private *rp = netdev_priv(dev);
1773
1774 if (!(rp->quirks & rqWOL))
1775 return;
1776
1777 spin_lock_irq(&rp->lock);
1778 wol->supported = WAKE_PHY | WAKE_MAGIC |
1779 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST;
1780 wol->wolopts = rp->wolopts;
1781 spin_unlock_irq(&rp->lock);
1782}
1783
1784static int rhine_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1785{
1786 struct rhine_private *rp = netdev_priv(dev);
1787 u32 support = WAKE_PHY | WAKE_MAGIC |
1788 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST;
1789
1790 if (!(rp->quirks & rqWOL))
1791 return -EINVAL;
1792
1793 if (wol->wolopts & ~support)
1794 return -EINVAL;
1795
1796 spin_lock_irq(&rp->lock);
1797 rp->wolopts = wol->wolopts;
1798 spin_unlock_irq(&rp->lock);
1799
1800 return 0;
1801}
1802
1803static const struct ethtool_ops netdev_ethtool_ops = {
1804 .get_drvinfo = netdev_get_drvinfo,
1805 .get_settings = netdev_get_settings,
1806 .set_settings = netdev_set_settings,
1807 .nway_reset = netdev_nway_reset,
1808 .get_link = netdev_get_link,
1809 .get_msglevel = netdev_get_msglevel,
1810 .set_msglevel = netdev_set_msglevel,
1811 .get_wol = rhine_get_wol,
1812 .set_wol = rhine_set_wol,
1813};
1814
1815static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1816{
1817 struct rhine_private *rp = netdev_priv(dev);
1818 int rc;
1819
1820 if (!netif_running(dev))
1821 return -EINVAL;
1822
1823 spin_lock_irq(&rp->lock);
1824 rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL);
1825 spin_unlock_irq(&rp->lock);
1826 rhine_set_carrier(&rp->mii_if);
1827
1828 return rc;
1829}
1830
1831static int rhine_close(struct net_device *dev)
1832{
1833 struct rhine_private *rp = netdev_priv(dev);
1834 void __iomem *ioaddr = rp->base;
1835
1836 spin_lock_irq(&rp->lock);
1837
1838 netif_stop_queue(dev);
1839#ifdef CONFIG_VIA_RHINE_NAPI
1840 napi_disable(&rp->napi);
1841#endif
1842
1843 if (debug > 1)
1844 printk(KERN_DEBUG "%s: Shutting down ethercard, "
1845 "status was %4.4x.\n",
1846 dev->name, ioread16(ioaddr + ChipCmd));
1847
1848
1849 iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig);
1850
1851
1852 iowrite16(0x0000, ioaddr + IntrEnable);
1853
1854
1855 iowrite16(CmdStop, ioaddr + ChipCmd);
1856
1857 spin_unlock_irq(&rp->lock);
1858
1859 free_irq(rp->pdev->irq, dev);
1860 free_rbufs(dev);
1861 free_tbufs(dev);
1862 free_ring(dev);
1863
1864 return 0;
1865}
1866
1867
1868static void __devexit rhine_remove_one(struct pci_dev *pdev)
1869{
1870 struct net_device *dev = pci_get_drvdata(pdev);
1871 struct rhine_private *rp = netdev_priv(dev);
1872
1873 unregister_netdev(dev);
1874
1875 pci_iounmap(pdev, rp->base);
1876 pci_release_regions(pdev);
1877
1878 free_netdev(dev);
1879 pci_disable_device(pdev);
1880 pci_set_drvdata(pdev, NULL);
1881}
1882
1883static void rhine_shutdown (struct pci_dev *pdev)
1884{
1885 struct net_device *dev = pci_get_drvdata(pdev);
1886 struct rhine_private *rp = netdev_priv(dev);
1887 void __iomem *ioaddr = rp->base;
1888
1889 if (!(rp->quirks & rqWOL))
1890 return;
1891
1892 rhine_power_init(dev);
1893
1894
1895 if (rp->quirks & rq6patterns)
1896 iowrite8(0x04, ioaddr + 0xA7);
1897
1898 if (rp->wolopts & WAKE_MAGIC) {
1899 iowrite8(WOLmagic, ioaddr + WOLcrSet);
1900
1901
1902
1903
1904 iowrite8(ioread8(ioaddr + ConfigA) | 0x03, ioaddr + ConfigA);
1905 }
1906
1907 if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST))
1908 iowrite8(WOLbmcast, ioaddr + WOLcgSet);
1909
1910 if (rp->wolopts & WAKE_PHY)
1911 iowrite8(WOLlnkon | WOLlnkoff, ioaddr + WOLcrSet);
1912
1913 if (rp->wolopts & WAKE_UCAST)
1914 iowrite8(WOLucast, ioaddr + WOLcrSet);
1915
1916 if (rp->wolopts) {
1917
1918 iowrite8(0x01, ioaddr + PwcfgSet);
1919 iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW);
1920 }
1921
1922
1923 if (!avoid_D3)
1924 iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
1925
1926
1927
1928}
1929
1930#ifdef CONFIG_PM
1931static int rhine_suspend(struct pci_dev *pdev, pm_message_t state)
1932{
1933 struct net_device *dev = pci_get_drvdata(pdev);
1934 struct rhine_private *rp = netdev_priv(dev);
1935 unsigned long flags;
1936
1937 if (!netif_running(dev))
1938 return 0;
1939
1940#ifdef CONFIG_VIA_RHINE_NAPI
1941 napi_disable(&rp->napi);
1942#endif
1943 netif_device_detach(dev);
1944 pci_save_state(pdev);
1945
1946 spin_lock_irqsave(&rp->lock, flags);
1947 rhine_shutdown(pdev);
1948 spin_unlock_irqrestore(&rp->lock, flags);
1949
1950 free_irq(dev->irq, dev);
1951 return 0;
1952}
1953
1954static int rhine_resume(struct pci_dev *pdev)
1955{
1956 struct net_device *dev = pci_get_drvdata(pdev);
1957 struct rhine_private *rp = netdev_priv(dev);
1958 unsigned long flags;
1959 int ret;
1960
1961 if (!netif_running(dev))
1962 return 0;
1963
1964 if (request_irq(dev->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev))
1965 printk(KERN_ERR "via-rhine %s: request_irq failed\n", dev->name);
1966
1967 ret = pci_set_power_state(pdev, PCI_D0);
1968 if (debug > 1)
1969 printk(KERN_INFO "%s: Entering power state D0 %s (%d).\n",
1970 dev->name, ret ? "failed" : "succeeded", ret);
1971
1972 pci_restore_state(pdev);
1973
1974 spin_lock_irqsave(&rp->lock, flags);
1975#ifdef USE_MMIO
1976 enable_mmio(rp->pioaddr, rp->quirks);
1977#endif
1978 rhine_power_init(dev);
1979 free_tbufs(dev);
1980 free_rbufs(dev);
1981 alloc_tbufs(dev);
1982 alloc_rbufs(dev);
1983 init_registers(dev);
1984 spin_unlock_irqrestore(&rp->lock, flags);
1985
1986 netif_device_attach(dev);
1987
1988 return 0;
1989}
1990#endif
1991
1992static struct pci_driver rhine_driver = {
1993 .name = DRV_NAME,
1994 .id_table = rhine_pci_tbl,
1995 .probe = rhine_init_one,
1996 .remove = __devexit_p(rhine_remove_one),
1997#ifdef CONFIG_PM
1998 .suspend = rhine_suspend,
1999 .resume = rhine_resume,
2000#endif
2001 .shutdown = rhine_shutdown,
2002};
2003
2004static struct dmi_system_id __initdata rhine_dmi_table[] = {
2005 {
2006 .ident = "EPIA-M",
2007 .matches = {
2008 DMI_MATCH(DMI_BIOS_VENDOR, "Award Software International, Inc."),
2009 DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2010 },
2011 },
2012 {
2013 .ident = "KV7",
2014 .matches = {
2015 DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
2016 DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2017 },
2018 },
2019 { NULL }
2020};
2021
2022static int __init rhine_init(void)
2023{
2024
2025#ifdef MODULE
2026 printk(version);
2027#endif
2028 if (dmi_check_system(rhine_dmi_table)) {
2029
2030 avoid_D3 = 1;
2031 printk(KERN_WARNING "%s: Broken BIOS detected, avoid_D3 "
2032 "enabled.\n",
2033 DRV_NAME);
2034 }
2035 else if (avoid_D3)
2036 printk(KERN_INFO "%s: avoid_D3 set.\n", DRV_NAME);
2037
2038 return pci_register_driver(&rhine_driver);
2039}
2040
2041
2042static void __exit rhine_cleanup(void)
2043{
2044 pci_unregister_driver(&rhine_driver);
2045}
2046
2047
2048module_init(rhine_init);
2049module_exit(rhine_cleanup);
2050