1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
48
49#define DRV_NAME "winbond-840"
50#define DRV_VERSION "1.01-e"
51#define DRV_RELDATE "Sep-11-2006"
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69static int debug = 1;
70static int max_interrupt_work = 20;
71
72
73static int multicast_filter_limit = 32;
74
75
76
77static int rx_copybreak;
78
79
80
81
82
83
84#define MAX_UNITS 8
85static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
86static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
87
88
89
90
91
92
93
94
95#define TX_QUEUE_LEN 10
96#define TX_QUEUE_LEN_RESTART 5
97
98#define TX_BUFLIMIT (1024-128)
99
100
101
102
103
104#define TX_FIFO_SIZE (2048)
105#define TX_BUG_FIFO_LIMIT (TX_FIFO_SIZE-1514-16)
106
107
108
109
110#define TX_TIMEOUT (2*HZ)
111
112
113#include <linux/module.h>
114#include <linux/kernel.h>
115#include <linux/string.h>
116#include <linux/timer.h>
117#include <linux/errno.h>
118#include <linux/ioport.h>
119#include <linux/interrupt.h>
120#include <linux/pci.h>
121#include <linux/dma-mapping.h>
122#include <linux/netdevice.h>
123#include <linux/etherdevice.h>
124#include <linux/skbuff.h>
125#include <linux/init.h>
126#include <linux/delay.h>
127#include <linux/ethtool.h>
128#include <linux/mii.h>
129#include <linux/rtnetlink.h>
130#include <linux/crc32.h>
131#include <linux/bitops.h>
132#include <linux/uaccess.h>
133#include <asm/processor.h>
134#include <asm/io.h>
135#include <asm/irq.h>
136
137#include "tulip.h"
138
139#undef PKT_BUF_SZ
140#define PKT_BUF_SZ 1536
141
142
143static const char version[] __initconst =
144 "v" DRV_VERSION " (2.4 port) "
145 DRV_RELDATE " Donald Becker <becker@scyld.com>\n"
146 " http://www.scyld.com/network/drivers.html\n";
147
148MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
149MODULE_DESCRIPTION("Winbond W89c840 Ethernet driver");
150MODULE_LICENSE("GPL");
151MODULE_VERSION(DRV_VERSION);
152
153module_param(max_interrupt_work, int, 0);
154module_param(debug, int, 0);
155module_param(rx_copybreak, int, 0);
156module_param(multicast_filter_limit, int, 0);
157module_param_array(options, int, NULL, 0);
158module_param_array(full_duplex, int, NULL, 0);
159MODULE_PARM_DESC(max_interrupt_work, "winbond-840 maximum events handled per interrupt");
160MODULE_PARM_DESC(debug, "winbond-840 debug level (0-6)");
161MODULE_PARM_DESC(rx_copybreak, "winbond-840 copy breakpoint for copy-only-tiny-frames");
162MODULE_PARM_DESC(multicast_filter_limit, "winbond-840 maximum number of filtered multicast addresses");
163MODULE_PARM_DESC(options, "winbond-840: Bits 0-3: media type, bit 17: full duplex");
164MODULE_PARM_DESC(full_duplex, "winbond-840 full duplex setting(s) (1)");
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218enum chip_capability_flags {
219 CanHaveMII=1, HasBrokenTx=2, AlwaysFDX=4, FDXOnNoMII=8,
220};
221
222static const struct pci_device_id w840_pci_tbl[] = {
223 { 0x1050, 0x0840, PCI_ANY_ID, 0x8153, 0, 0, 0 },
224 { 0x1050, 0x0840, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
225 { 0x11f6, 0x2011, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
226 { }
227};
228MODULE_DEVICE_TABLE(pci, w840_pci_tbl);
229
230enum {
231 netdev_res_size = 128,
232};
233
234struct pci_id_info {
235 const char *name;
236 int drv_flags;
237};
238
239static const struct pci_id_info pci_id_tbl[] = {
240 {
241 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
242 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
243 { "Compex RL100-ATX", CanHaveMII | HasBrokenTx},
244 { }
245};
246
247
248
249
250
251
252
253
254
255
256
257enum w840_offsets {
258 PCIBusCfg=0x00, TxStartDemand=0x04, RxStartDemand=0x08,
259 RxRingPtr=0x0C, TxRingPtr=0x10,
260 IntrStatus=0x14, NetworkConfig=0x18, IntrEnable=0x1C,
261 RxMissed=0x20, EECtrl=0x24, MIICtrl=0x24, BootRom=0x28, GPTimer=0x2C,
262 CurRxDescAddr=0x30, CurRxBufAddr=0x34,
263 MulticastFilter0=0x38, MulticastFilter1=0x3C, StationAddr=0x40,
264 CurTxDescAddr=0x4C, CurTxBufAddr=0x50,
265};
266
267
268enum rx_mode_bits {
269 AcceptErr=0x80,
270 RxAcceptBroadcast=0x20, AcceptMulticast=0x10,
271 RxAcceptAllPhys=0x08, AcceptMyPhys=0x02,
272};
273
274enum mii_reg_bits {
275 MDIO_ShiftClk=0x10000, MDIO_DataIn=0x80000, MDIO_DataOut=0x20000,
276 MDIO_EnbOutput=0x40000, MDIO_EnbIn = 0x00000,
277};
278
279
280struct w840_rx_desc {
281 s32 status;
282 s32 length;
283 u32 buffer1;
284 u32 buffer2;
285};
286
287struct w840_tx_desc {
288 s32 status;
289 s32 length;
290 u32 buffer1, buffer2;
291};
292
293#define MII_CNT 1
294struct netdev_private {
295 struct w840_rx_desc *rx_ring;
296 dma_addr_t rx_addr[RX_RING_SIZE];
297 struct w840_tx_desc *tx_ring;
298 dma_addr_t tx_addr[TX_RING_SIZE];
299 dma_addr_t ring_dma_addr;
300
301 struct sk_buff* rx_skbuff[RX_RING_SIZE];
302
303 struct sk_buff* tx_skbuff[TX_RING_SIZE];
304 struct net_device_stats stats;
305 struct timer_list timer;
306
307 spinlock_t lock;
308 int chip_id, drv_flags;
309 struct pci_dev *pci_dev;
310 int csr6;
311 struct w840_rx_desc *rx_head_desc;
312 unsigned int cur_rx, dirty_rx;
313 unsigned int rx_buf_sz;
314 unsigned int cur_tx, dirty_tx;
315 unsigned int tx_q_bytes;
316 unsigned int tx_full;
317
318 int mii_cnt;
319 unsigned char phys[MII_CNT];
320 u32 mii;
321 struct mii_if_info mii_if;
322 void __iomem *base_addr;
323};
324
325static int eeprom_read(void __iomem *ioaddr, int location);
326static int mdio_read(struct net_device *dev, int phy_id, int location);
327static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
328static int netdev_open(struct net_device *dev);
329static int update_link(struct net_device *dev);
330static void netdev_timer(unsigned long data);
331static void init_rxtx_rings(struct net_device *dev);
332static void free_rxtx_rings(struct netdev_private *np);
333static void init_registers(struct net_device *dev);
334static void tx_timeout(struct net_device *dev);
335static int alloc_ringdesc(struct net_device *dev);
336static void free_ringdesc(struct netdev_private *np);
337static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
338static irqreturn_t intr_handler(int irq, void *dev_instance);
339static void netdev_error(struct net_device *dev, int intr_status);
340static int netdev_rx(struct net_device *dev);
341static u32 __set_rx_mode(struct net_device *dev);
342static void set_rx_mode(struct net_device *dev);
343static struct net_device_stats *get_stats(struct net_device *dev);
344static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
345static const struct ethtool_ops netdev_ethtool_ops;
346static int netdev_close(struct net_device *dev);
347
348static const struct net_device_ops netdev_ops = {
349 .ndo_open = netdev_open,
350 .ndo_stop = netdev_close,
351 .ndo_start_xmit = start_tx,
352 .ndo_get_stats = get_stats,
353 .ndo_set_rx_mode = set_rx_mode,
354 .ndo_do_ioctl = netdev_ioctl,
355 .ndo_tx_timeout = tx_timeout,
356 .ndo_set_mac_address = eth_mac_addr,
357 .ndo_validate_addr = eth_validate_addr,
358};
359
360static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
361{
362 struct net_device *dev;
363 struct netdev_private *np;
364 static int find_cnt;
365 int chip_idx = ent->driver_data;
366 int irq;
367 int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
368 void __iomem *ioaddr;
369
370 i = pci_enable_device(pdev);
371 if (i) return i;
372
373 pci_set_master(pdev);
374
375 irq = pdev->irq;
376
377 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
378 pr_warn("Device %s disabled due to DMA limitations\n",
379 pci_name(pdev));
380 return -EIO;
381 }
382 dev = alloc_etherdev(sizeof(*np));
383 if (!dev)
384 return -ENOMEM;
385 SET_NETDEV_DEV(dev, &pdev->dev);
386
387 if (pci_request_regions(pdev, DRV_NAME))
388 goto err_out_netdev;
389
390 ioaddr = pci_iomap(pdev, TULIP_BAR, netdev_res_size);
391 if (!ioaddr)
392 goto err_out_free_res;
393
394 for (i = 0; i < 3; i++)
395 ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(eeprom_read(ioaddr, i));
396
397
398
399 iowrite32(0x00000001, ioaddr + PCIBusCfg);
400
401 np = netdev_priv(dev);
402 np->pci_dev = pdev;
403 np->chip_id = chip_idx;
404 np->drv_flags = pci_id_tbl[chip_idx].drv_flags;
405 spin_lock_init(&np->lock);
406 np->mii_if.dev = dev;
407 np->mii_if.mdio_read = mdio_read;
408 np->mii_if.mdio_write = mdio_write;
409 np->base_addr = ioaddr;
410
411 pci_set_drvdata(pdev, dev);
412
413 if (dev->mem_start)
414 option = dev->mem_start;
415
416
417 if (option > 0) {
418 if (option & 0x200)
419 np->mii_if.full_duplex = 1;
420 if (option & 15)
421 dev_info(&dev->dev,
422 "ignoring user supplied media type %d",
423 option & 15);
424 }
425 if (find_cnt < MAX_UNITS && full_duplex[find_cnt] > 0)
426 np->mii_if.full_duplex = 1;
427
428 if (np->mii_if.full_duplex)
429 np->mii_if.force_media = 1;
430
431
432 dev->netdev_ops = &netdev_ops;
433 dev->ethtool_ops = &netdev_ethtool_ops;
434 dev->watchdog_timeo = TX_TIMEOUT;
435
436 i = register_netdev(dev);
437 if (i)
438 goto err_out_cleardev;
439
440 dev_info(&dev->dev, "%s at %p, %pM, IRQ %d\n",
441 pci_id_tbl[chip_idx].name, ioaddr, dev->dev_addr, irq);
442
443 if (np->drv_flags & CanHaveMII) {
444 int phy, phy_idx = 0;
445 for (phy = 1; phy < 32 && phy_idx < MII_CNT; phy++) {
446 int mii_status = mdio_read(dev, phy, MII_BMSR);
447 if (mii_status != 0xffff && mii_status != 0x0000) {
448 np->phys[phy_idx++] = phy;
449 np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE);
450 np->mii = (mdio_read(dev, phy, MII_PHYSID1) << 16)+
451 mdio_read(dev, phy, MII_PHYSID2);
452 dev_info(&dev->dev,
453 "MII PHY %08xh found at address %d, status 0x%04x advertising %04x\n",
454 np->mii, phy, mii_status,
455 np->mii_if.advertising);
456 }
457 }
458 np->mii_cnt = phy_idx;
459 np->mii_if.phy_id = np->phys[0];
460 if (phy_idx == 0) {
461 dev_warn(&dev->dev,
462 "MII PHY not found -- this device may not operate correctly\n");
463 }
464 }
465
466 find_cnt++;
467 return 0;
468
469err_out_cleardev:
470 pci_iounmap(pdev, ioaddr);
471err_out_free_res:
472 pci_release_regions(pdev);
473err_out_netdev:
474 free_netdev (dev);
475 return -ENODEV;
476}
477
478
479
480
481
482
483
484
485
486
487
488
489
490#define eeprom_delay(ee_addr) ioread32(ee_addr)
491
492enum EEPROM_Ctrl_Bits {
493 EE_ShiftClk=0x02, EE_Write0=0x801, EE_Write1=0x805,
494 EE_ChipSelect=0x801, EE_DataIn=0x08,
495};
496
497
498enum EEPROM_Cmds {
499 EE_WriteCmd=(5 << 6), EE_ReadCmd=(6 << 6), EE_EraseCmd=(7 << 6),
500};
501
502static int eeprom_read(void __iomem *addr, int location)
503{
504 int i;
505 int retval = 0;
506 void __iomem *ee_addr = addr + EECtrl;
507 int read_cmd = location | EE_ReadCmd;
508 iowrite32(EE_ChipSelect, ee_addr);
509
510
511 for (i = 10; i >= 0; i--) {
512 short dataval = (read_cmd & (1 << i)) ? EE_Write1 : EE_Write0;
513 iowrite32(dataval, ee_addr);
514 eeprom_delay(ee_addr);
515 iowrite32(dataval | EE_ShiftClk, ee_addr);
516 eeprom_delay(ee_addr);
517 }
518 iowrite32(EE_ChipSelect, ee_addr);
519 eeprom_delay(ee_addr);
520
521 for (i = 16; i > 0; i--) {
522 iowrite32(EE_ChipSelect | EE_ShiftClk, ee_addr);
523 eeprom_delay(ee_addr);
524 retval = (retval << 1) | ((ioread32(ee_addr) & EE_DataIn) ? 1 : 0);
525 iowrite32(EE_ChipSelect, ee_addr);
526 eeprom_delay(ee_addr);
527 }
528
529
530 iowrite32(0, ee_addr);
531 return retval;
532}
533
534
535
536
537
538
539
540
541#define mdio_delay(mdio_addr) ioread32(mdio_addr)
542
543
544
545
546static char mii_preamble_required = 1;
547
548#define MDIO_WRITE0 (MDIO_EnbOutput)
549#define MDIO_WRITE1 (MDIO_DataOut | MDIO_EnbOutput)
550
551
552
553static void mdio_sync(void __iomem *mdio_addr)
554{
555 int bits = 32;
556
557
558 while (--bits >= 0) {
559 iowrite32(MDIO_WRITE1, mdio_addr);
560 mdio_delay(mdio_addr);
561 iowrite32(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
562 mdio_delay(mdio_addr);
563 }
564}
565
566static int mdio_read(struct net_device *dev, int phy_id, int location)
567{
568 struct netdev_private *np = netdev_priv(dev);
569 void __iomem *mdio_addr = np->base_addr + MIICtrl;
570 int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
571 int i, retval = 0;
572
573 if (mii_preamble_required)
574 mdio_sync(mdio_addr);
575
576
577 for (i = 15; i >= 0; i--) {
578 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
579
580 iowrite32(dataval, mdio_addr);
581 mdio_delay(mdio_addr);
582 iowrite32(dataval | MDIO_ShiftClk, mdio_addr);
583 mdio_delay(mdio_addr);
584 }
585
586 for (i = 20; i > 0; i--) {
587 iowrite32(MDIO_EnbIn, mdio_addr);
588 mdio_delay(mdio_addr);
589 retval = (retval << 1) | ((ioread32(mdio_addr) & MDIO_DataIn) ? 1 : 0);
590 iowrite32(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
591 mdio_delay(mdio_addr);
592 }
593 return (retval>>1) & 0xffff;
594}
595
596static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
597{
598 struct netdev_private *np = netdev_priv(dev);
599 void __iomem *mdio_addr = np->base_addr + MIICtrl;
600 int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
601 int i;
602
603 if (location == 4 && phy_id == np->phys[0])
604 np->mii_if.advertising = value;
605
606 if (mii_preamble_required)
607 mdio_sync(mdio_addr);
608
609
610 for (i = 31; i >= 0; i--) {
611 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
612
613 iowrite32(dataval, mdio_addr);
614 mdio_delay(mdio_addr);
615 iowrite32(dataval | MDIO_ShiftClk, mdio_addr);
616 mdio_delay(mdio_addr);
617 }
618
619 for (i = 2; i > 0; i--) {
620 iowrite32(MDIO_EnbIn, mdio_addr);
621 mdio_delay(mdio_addr);
622 iowrite32(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
623 mdio_delay(mdio_addr);
624 }
625}
626
627
628static int netdev_open(struct net_device *dev)
629{
630 struct netdev_private *np = netdev_priv(dev);
631 void __iomem *ioaddr = np->base_addr;
632 const int irq = np->pci_dev->irq;
633 int i;
634
635 iowrite32(0x00000001, ioaddr + PCIBusCfg);
636
637 netif_device_detach(dev);
638 i = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
639 if (i)
640 goto out_err;
641
642 if (debug > 1)
643 netdev_dbg(dev, "w89c840_open() irq %d\n", irq);
644
645 if((i=alloc_ringdesc(dev)))
646 goto out_err;
647
648 spin_lock_irq(&np->lock);
649 netif_device_attach(dev);
650 init_registers(dev);
651 spin_unlock_irq(&np->lock);
652
653 netif_start_queue(dev);
654 if (debug > 2)
655 netdev_dbg(dev, "Done netdev_open()\n");
656
657
658 init_timer(&np->timer);
659 np->timer.expires = jiffies + 1*HZ;
660 np->timer.data = (unsigned long)dev;
661 np->timer.function = netdev_timer;
662 add_timer(&np->timer);
663 return 0;
664out_err:
665 netif_device_attach(dev);
666 return i;
667}
668
669#define MII_DAVICOM_DM9101 0x0181b800
670
671static int update_link(struct net_device *dev)
672{
673 struct netdev_private *np = netdev_priv(dev);
674 int duplex, fasteth, result, mii_reg;
675
676
677 mii_reg = mdio_read(dev, np->phys[0], MII_BMSR);
678
679 if (mii_reg == 0xffff)
680 return np->csr6;
681
682 mii_reg = mdio_read(dev, np->phys[0], MII_BMSR);
683 if (!(mii_reg & 0x4)) {
684 if (netif_carrier_ok(dev)) {
685 if (debug)
686 dev_info(&dev->dev,
687 "MII #%d reports no link. Disabling watchdog\n",
688 np->phys[0]);
689 netif_carrier_off(dev);
690 }
691 return np->csr6;
692 }
693 if (!netif_carrier_ok(dev)) {
694 if (debug)
695 dev_info(&dev->dev,
696 "MII #%d link is back. Enabling watchdog\n",
697 np->phys[0]);
698 netif_carrier_on(dev);
699 }
700
701 if ((np->mii & ~0xf) == MII_DAVICOM_DM9101) {
702
703
704
705
706
707
708
709
710 mii_reg = mdio_read(dev, np->phys[0], MII_BMCR);
711 duplex = mii_reg & BMCR_FULLDPLX;
712 fasteth = mii_reg & BMCR_SPEED100;
713 } else {
714 int negotiated;
715 mii_reg = mdio_read(dev, np->phys[0], MII_LPA);
716 negotiated = mii_reg & np->mii_if.advertising;
717
718 duplex = (negotiated & LPA_100FULL) || ((negotiated & 0x02C0) == LPA_10FULL);
719 fasteth = negotiated & 0x380;
720 }
721 duplex |= np->mii_if.force_media;
722
723 result = np->csr6 & ~0x20000200;
724 if (duplex)
725 result |= 0x200;
726 if (fasteth)
727 result |= 0x20000000;
728 if (result != np->csr6 && debug)
729 dev_info(&dev->dev,
730 "Setting %dMBit-%s-duplex based on MII#%d\n",
731 fasteth ? 100 : 10, duplex ? "full" : "half",
732 np->phys[0]);
733 return result;
734}
735
736#define RXTX_TIMEOUT 2000
737static inline void update_csr6(struct net_device *dev, int new)
738{
739 struct netdev_private *np = netdev_priv(dev);
740 void __iomem *ioaddr = np->base_addr;
741 int limit = RXTX_TIMEOUT;
742
743 if (!netif_device_present(dev))
744 new = 0;
745 if (new==np->csr6)
746 return;
747
748 iowrite32(np->csr6 & ~0x2002, ioaddr + NetworkConfig);
749
750 for (;;) {
751 int csr5 = ioread32(ioaddr + IntrStatus);
752 int t;
753
754 t = (csr5 >> 17) & 0x07;
755 if (t==0||t==1) {
756
757 t = (csr5 >> 20) & 0x07;
758 if (t==0||t==1)
759 break;
760 }
761
762 limit--;
763 if(!limit) {
764 dev_info(&dev->dev,
765 "couldn't stop rxtx, IntrStatus %xh\n", csr5);
766 break;
767 }
768 udelay(1);
769 }
770 np->csr6 = new;
771
772 iowrite32(np->csr6, ioaddr + NetworkConfig);
773 if (new & 0x200)
774 np->mii_if.full_duplex = 1;
775}
776
777static void netdev_timer(unsigned long data)
778{
779 struct net_device *dev = (struct net_device *)data;
780 struct netdev_private *np = netdev_priv(dev);
781 void __iomem *ioaddr = np->base_addr;
782
783 if (debug > 2)
784 netdev_dbg(dev, "Media selection timer tick, status %08x config %08x\n",
785 ioread32(ioaddr + IntrStatus),
786 ioread32(ioaddr + NetworkConfig));
787 spin_lock_irq(&np->lock);
788 update_csr6(dev, update_link(dev));
789 spin_unlock_irq(&np->lock);
790 np->timer.expires = jiffies + 10*HZ;
791 add_timer(&np->timer);
792}
793
794static void init_rxtx_rings(struct net_device *dev)
795{
796 struct netdev_private *np = netdev_priv(dev);
797 int i;
798
799 np->rx_head_desc = &np->rx_ring[0];
800 np->tx_ring = (struct w840_tx_desc*)&np->rx_ring[RX_RING_SIZE];
801
802
803 for (i = 0; i < RX_RING_SIZE; i++) {
804 np->rx_ring[i].length = np->rx_buf_sz;
805 np->rx_ring[i].status = 0;
806 np->rx_skbuff[i] = NULL;
807 }
808
809 np->rx_ring[i-1].length |= DescEndRing;
810
811
812 for (i = 0; i < RX_RING_SIZE; i++) {
813 struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz);
814 np->rx_skbuff[i] = skb;
815 if (skb == NULL)
816 break;
817 np->rx_addr[i] = pci_map_single(np->pci_dev,skb->data,
818 np->rx_buf_sz,PCI_DMA_FROMDEVICE);
819
820 np->rx_ring[i].buffer1 = np->rx_addr[i];
821 np->rx_ring[i].status = DescOwned;
822 }
823
824 np->cur_rx = 0;
825 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
826
827
828 for (i = 0; i < TX_RING_SIZE; i++) {
829 np->tx_skbuff[i] = NULL;
830 np->tx_ring[i].status = 0;
831 }
832 np->tx_full = 0;
833 np->tx_q_bytes = np->dirty_tx = np->cur_tx = 0;
834
835 iowrite32(np->ring_dma_addr, np->base_addr + RxRingPtr);
836 iowrite32(np->ring_dma_addr+sizeof(struct w840_rx_desc)*RX_RING_SIZE,
837 np->base_addr + TxRingPtr);
838
839}
840
841static void free_rxtx_rings(struct netdev_private* np)
842{
843 int i;
844
845 for (i = 0; i < RX_RING_SIZE; i++) {
846 np->rx_ring[i].status = 0;
847 if (np->rx_skbuff[i]) {
848 pci_unmap_single(np->pci_dev,
849 np->rx_addr[i],
850 np->rx_skbuff[i]->len,
851 PCI_DMA_FROMDEVICE);
852 dev_kfree_skb(np->rx_skbuff[i]);
853 }
854 np->rx_skbuff[i] = NULL;
855 }
856 for (i = 0; i < TX_RING_SIZE; i++) {
857 if (np->tx_skbuff[i]) {
858 pci_unmap_single(np->pci_dev,
859 np->tx_addr[i],
860 np->tx_skbuff[i]->len,
861 PCI_DMA_TODEVICE);
862 dev_kfree_skb(np->tx_skbuff[i]);
863 }
864 np->tx_skbuff[i] = NULL;
865 }
866}
867
868static void init_registers(struct net_device *dev)
869{
870 struct netdev_private *np = netdev_priv(dev);
871 void __iomem *ioaddr = np->base_addr;
872 int i;
873
874 for (i = 0; i < 6; i++)
875 iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
876
877
878#ifdef __BIG_ENDIAN
879 i = (1<<20);
880#else
881 i = 0;
882#endif
883 i |= (0x04<<2);
884 i |= 0x02;
885
886
887
888
889
890
891
892
893
894
895#if defined (__i386__) && !defined(MODULE)
896
897 if (boot_cpu_data.x86 <= 4) {
898 i |= 0x4800;
899 dev_info(&dev->dev,
900 "This is a 386/486 PCI system, setting cache alignment to 8 longwords\n");
901 } else {
902 i |= 0xE000;
903 }
904#elif defined(__powerpc__) || defined(__i386__) || defined(__alpha__) || defined(__ia64__) || defined(__x86_64__)
905 i |= 0xE000;
906#elif defined(CONFIG_SPARC) || defined (CONFIG_PARISC) || defined(CONFIG_ARM)
907 i |= 0x4800;
908#else
909 dev_warn(&dev->dev, "unknown CPU architecture, using default csr0 setting\n");
910 i |= 0x4800;
911#endif
912 iowrite32(i, ioaddr + PCIBusCfg);
913
914 np->csr6 = 0;
915
916
917 update_csr6(dev, 0x00022002 | update_link(dev) | __set_rx_mode(dev));
918
919
920 iowrite32(0x1A0F5, ioaddr + IntrStatus);
921 iowrite32(0x1A0F5, ioaddr + IntrEnable);
922
923 iowrite32(0, ioaddr + RxStartDemand);
924}
925
926static void tx_timeout(struct net_device *dev)
927{
928 struct netdev_private *np = netdev_priv(dev);
929 void __iomem *ioaddr = np->base_addr;
930 const int irq = np->pci_dev->irq;
931
932 dev_warn(&dev->dev, "Transmit timed out, status %08x, resetting...\n",
933 ioread32(ioaddr + IntrStatus));
934
935 {
936 int i;
937 printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring);
938 for (i = 0; i < RX_RING_SIZE; i++)
939 printk(KERN_CONT " %08x", (unsigned int)np->rx_ring[i].status);
940 printk(KERN_CONT "\n");
941 printk(KERN_DEBUG " Tx ring %p: ", np->tx_ring);
942 for (i = 0; i < TX_RING_SIZE; i++)
943 printk(KERN_CONT " %08x", np->tx_ring[i].status);
944 printk(KERN_CONT "\n");
945 }
946 printk(KERN_DEBUG "Tx cur %d Tx dirty %d Tx Full %d, q bytes %d\n",
947 np->cur_tx, np->dirty_tx, np->tx_full, np->tx_q_bytes);
948 printk(KERN_DEBUG "Tx Descriptor addr %xh\n", ioread32(ioaddr+0x4C));
949
950 disable_irq(irq);
951 spin_lock_irq(&np->lock);
952
953
954
955
956
957
958 iowrite32(1, np->base_addr+PCIBusCfg);
959 udelay(1);
960
961 free_rxtx_rings(np);
962 init_rxtx_rings(dev);
963 init_registers(dev);
964 spin_unlock_irq(&np->lock);
965 enable_irq(irq);
966
967 netif_wake_queue(dev);
968 netif_trans_update(dev);
969 np->stats.tx_errors++;
970}
971
972
973static int alloc_ringdesc(struct net_device *dev)
974{
975 struct netdev_private *np = netdev_priv(dev);
976
977 np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
978
979 np->rx_ring = pci_alloc_consistent(np->pci_dev,
980 sizeof(struct w840_rx_desc)*RX_RING_SIZE +
981 sizeof(struct w840_tx_desc)*TX_RING_SIZE,
982 &np->ring_dma_addr);
983 if(!np->rx_ring)
984 return -ENOMEM;
985 init_rxtx_rings(dev);
986 return 0;
987}
988
989static void free_ringdesc(struct netdev_private *np)
990{
991 pci_free_consistent(np->pci_dev,
992 sizeof(struct w840_rx_desc)*RX_RING_SIZE +
993 sizeof(struct w840_tx_desc)*TX_RING_SIZE,
994 np->rx_ring, np->ring_dma_addr);
995
996}
997
998static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
999{
1000 struct netdev_private *np = netdev_priv(dev);
1001 unsigned entry;
1002
1003
1004
1005
1006
1007 entry = np->cur_tx % TX_RING_SIZE;
1008
1009 np->tx_addr[entry] = pci_map_single(np->pci_dev,
1010 skb->data,skb->len, PCI_DMA_TODEVICE);
1011 np->tx_skbuff[entry] = skb;
1012
1013 np->tx_ring[entry].buffer1 = np->tx_addr[entry];
1014 if (skb->len < TX_BUFLIMIT) {
1015 np->tx_ring[entry].length = DescWholePkt | skb->len;
1016 } else {
1017 int len = skb->len - TX_BUFLIMIT;
1018
1019 np->tx_ring[entry].buffer2 = np->tx_addr[entry]+TX_BUFLIMIT;
1020 np->tx_ring[entry].length = DescWholePkt | (len << 11) | TX_BUFLIMIT;
1021 }
1022 if(entry == TX_RING_SIZE-1)
1023 np->tx_ring[entry].length |= DescEndRing;
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035 spin_lock_irq(&np->lock);
1036 np->cur_tx++;
1037
1038 wmb();
1039 np->tx_ring[entry].status = DescOwned;
1040 wmb();
1041 iowrite32(0, np->base_addr + TxStartDemand);
1042 np->tx_q_bytes += skb->len;
1043
1044
1045 if (np->cur_tx - np->dirty_tx > TX_QUEUE_LEN ||
1046 ((np->drv_flags & HasBrokenTx) && np->tx_q_bytes > TX_BUG_FIFO_LIMIT)) {
1047 netif_stop_queue(dev);
1048 wmb();
1049 np->tx_full = 1;
1050 }
1051 spin_unlock_irq(&np->lock);
1052
1053 if (debug > 4) {
1054 netdev_dbg(dev, "Transmit frame #%d queued in slot %d\n",
1055 np->cur_tx, entry);
1056 }
1057 return NETDEV_TX_OK;
1058}
1059
1060static void netdev_tx_done(struct net_device *dev)
1061{
1062 struct netdev_private *np = netdev_priv(dev);
1063 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1064 int entry = np->dirty_tx % TX_RING_SIZE;
1065 int tx_status = np->tx_ring[entry].status;
1066
1067 if (tx_status < 0)
1068 break;
1069 if (tx_status & 0x8000) {
1070#ifndef final_version
1071 if (debug > 1)
1072 netdev_dbg(dev, "Transmit error, Tx status %08x\n",
1073 tx_status);
1074#endif
1075 np->stats.tx_errors++;
1076 if (tx_status & 0x0104) np->stats.tx_aborted_errors++;
1077 if (tx_status & 0x0C80) np->stats.tx_carrier_errors++;
1078 if (tx_status & 0x0200) np->stats.tx_window_errors++;
1079 if (tx_status & 0x0002) np->stats.tx_fifo_errors++;
1080 if ((tx_status & 0x0080) && np->mii_if.full_duplex == 0)
1081 np->stats.tx_heartbeat_errors++;
1082 } else {
1083#ifndef final_version
1084 if (debug > 3)
1085 netdev_dbg(dev, "Transmit slot %d ok, Tx status %08x\n",
1086 entry, tx_status);
1087#endif
1088 np->stats.tx_bytes += np->tx_skbuff[entry]->len;
1089 np->stats.collisions += (tx_status >> 3) & 15;
1090 np->stats.tx_packets++;
1091 }
1092
1093 pci_unmap_single(np->pci_dev,np->tx_addr[entry],
1094 np->tx_skbuff[entry]->len,
1095 PCI_DMA_TODEVICE);
1096 np->tx_q_bytes -= np->tx_skbuff[entry]->len;
1097 dev_kfree_skb_irq(np->tx_skbuff[entry]);
1098 np->tx_skbuff[entry] = NULL;
1099 }
1100 if (np->tx_full &&
1101 np->cur_tx - np->dirty_tx < TX_QUEUE_LEN_RESTART &&
1102 np->tx_q_bytes < TX_BUG_FIFO_LIMIT) {
1103
1104 np->tx_full = 0;
1105 wmb();
1106 netif_wake_queue(dev);
1107 }
1108}
1109
1110
1111
1112static irqreturn_t intr_handler(int irq, void *dev_instance)
1113{
1114 struct net_device *dev = (struct net_device *)dev_instance;
1115 struct netdev_private *np = netdev_priv(dev);
1116 void __iomem *ioaddr = np->base_addr;
1117 int work_limit = max_interrupt_work;
1118 int handled = 0;
1119
1120 if (!netif_device_present(dev))
1121 return IRQ_NONE;
1122 do {
1123 u32 intr_status = ioread32(ioaddr + IntrStatus);
1124
1125
1126 iowrite32(intr_status & 0x001ffff, ioaddr + IntrStatus);
1127
1128 if (debug > 4)
1129 netdev_dbg(dev, "Interrupt, status %04x\n", intr_status);
1130
1131 if ((intr_status & (NormalIntr|AbnormalIntr)) == 0)
1132 break;
1133
1134 handled = 1;
1135
1136 if (intr_status & (RxIntr | RxNoBuf))
1137 netdev_rx(dev);
1138 if (intr_status & RxNoBuf)
1139 iowrite32(0, ioaddr + RxStartDemand);
1140
1141 if (intr_status & (TxNoBuf | TxIntr) &&
1142 np->cur_tx != np->dirty_tx) {
1143 spin_lock(&np->lock);
1144 netdev_tx_done(dev);
1145 spin_unlock(&np->lock);
1146 }
1147
1148
1149 if (intr_status & (AbnormalIntr | TxFIFOUnderflow | SystemError |
1150 TimerInt | TxDied))
1151 netdev_error(dev, intr_status);
1152
1153 if (--work_limit < 0) {
1154 dev_warn(&dev->dev,
1155 "Too much work at interrupt, status=0x%04x\n",
1156 intr_status);
1157
1158
1159 spin_lock(&np->lock);
1160 if (netif_device_present(dev)) {
1161 iowrite32(AbnormalIntr | TimerInt, ioaddr + IntrEnable);
1162 iowrite32(10, ioaddr + GPTimer);
1163 }
1164 spin_unlock(&np->lock);
1165 break;
1166 }
1167 } while (1);
1168
1169 if (debug > 3)
1170 netdev_dbg(dev, "exiting interrupt, status=%#4.4x\n",
1171 ioread32(ioaddr + IntrStatus));
1172 return IRQ_RETVAL(handled);
1173}
1174
1175
1176
1177static int netdev_rx(struct net_device *dev)
1178{
1179 struct netdev_private *np = netdev_priv(dev);
1180 int entry = np->cur_rx % RX_RING_SIZE;
1181 int work_limit = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
1182
1183 if (debug > 4) {
1184 netdev_dbg(dev, " In netdev_rx(), entry %d status %04x\n",
1185 entry, np->rx_ring[entry].status);
1186 }
1187
1188
1189 while (--work_limit >= 0) {
1190 struct w840_rx_desc *desc = np->rx_head_desc;
1191 s32 status = desc->status;
1192
1193 if (debug > 4)
1194 netdev_dbg(dev, " netdev_rx() status was %08x\n",
1195 status);
1196 if (status < 0)
1197 break;
1198 if ((status & 0x38008300) != 0x0300) {
1199 if ((status & 0x38000300) != 0x0300) {
1200
1201 if ((status & 0xffff) != 0x7fff) {
1202 dev_warn(&dev->dev,
1203 "Oversized Ethernet frame spanned multiple buffers, entry %#x status %04x!\n",
1204 np->cur_rx, status);
1205 np->stats.rx_length_errors++;
1206 }
1207 } else if (status & 0x8000) {
1208
1209 if (debug > 2)
1210 netdev_dbg(dev, "Receive error, Rx status %08x\n",
1211 status);
1212 np->stats.rx_errors++;
1213 if (status & 0x0890) np->stats.rx_length_errors++;
1214 if (status & 0x004C) np->stats.rx_frame_errors++;
1215 if (status & 0x0002) np->stats.rx_crc_errors++;
1216 }
1217 } else {
1218 struct sk_buff *skb;
1219
1220 int pkt_len = ((status >> 16) & 0x7ff) - 4;
1221
1222#ifndef final_version
1223 if (debug > 4)
1224 netdev_dbg(dev, " netdev_rx() normal Rx pkt length %d status %x\n",
1225 pkt_len, status);
1226#endif
1227
1228
1229 if (pkt_len < rx_copybreak &&
1230 (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
1231 skb_reserve(skb, 2);
1232 pci_dma_sync_single_for_cpu(np->pci_dev,np->rx_addr[entry],
1233 np->rx_skbuff[entry]->len,
1234 PCI_DMA_FROMDEVICE);
1235 skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
1236 skb_put(skb, pkt_len);
1237 pci_dma_sync_single_for_device(np->pci_dev,np->rx_addr[entry],
1238 np->rx_skbuff[entry]->len,
1239 PCI_DMA_FROMDEVICE);
1240 } else {
1241 pci_unmap_single(np->pci_dev,np->rx_addr[entry],
1242 np->rx_skbuff[entry]->len,
1243 PCI_DMA_FROMDEVICE);
1244 skb_put(skb = np->rx_skbuff[entry], pkt_len);
1245 np->rx_skbuff[entry] = NULL;
1246 }
1247#ifndef final_version
1248
1249 if (debug > 5)
1250 netdev_dbg(dev, " Rx data %pM %pM %02x%02x %pI4\n",
1251 &skb->data[0], &skb->data[6],
1252 skb->data[12], skb->data[13],
1253 &skb->data[14]);
1254#endif
1255 skb->protocol = eth_type_trans(skb, dev);
1256 netif_rx(skb);
1257 np->stats.rx_packets++;
1258 np->stats.rx_bytes += pkt_len;
1259 }
1260 entry = (++np->cur_rx) % RX_RING_SIZE;
1261 np->rx_head_desc = &np->rx_ring[entry];
1262 }
1263
1264
1265 for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
1266 struct sk_buff *skb;
1267 entry = np->dirty_rx % RX_RING_SIZE;
1268 if (np->rx_skbuff[entry] == NULL) {
1269 skb = netdev_alloc_skb(dev, np->rx_buf_sz);
1270 np->rx_skbuff[entry] = skb;
1271 if (skb == NULL)
1272 break;
1273 np->rx_addr[entry] = pci_map_single(np->pci_dev,
1274 skb->data,
1275 np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1276 np->rx_ring[entry].buffer1 = np->rx_addr[entry];
1277 }
1278 wmb();
1279 np->rx_ring[entry].status = DescOwned;
1280 }
1281
1282 return 0;
1283}
1284
1285static void netdev_error(struct net_device *dev, int intr_status)
1286{
1287 struct netdev_private *np = netdev_priv(dev);
1288 void __iomem *ioaddr = np->base_addr;
1289
1290 if (debug > 2)
1291 netdev_dbg(dev, "Abnormal event, %08x\n", intr_status);
1292 if (intr_status == 0xffffffff)
1293 return;
1294 spin_lock(&np->lock);
1295 if (intr_status & TxFIFOUnderflow) {
1296 int new;
1297
1298#if 0
1299
1300
1301
1302 new = np->csr6 + 0x4000;
1303#else
1304 new = (np->csr6 >> 14)&0x7f;
1305 if (new < 64)
1306 new *= 2;
1307 else
1308 new = 127;
1309 new = (np->csr6 & ~(0x7F << 14)) | (new<<14);
1310#endif
1311 netdev_dbg(dev, "Tx underflow, new csr6 %08x\n", new);
1312 update_csr6(dev, new);
1313 }
1314 if (intr_status & RxDied) {
1315 np->stats.rx_errors++;
1316 }
1317 if (intr_status & TimerInt) {
1318
1319 if (netif_device_present(dev))
1320 iowrite32(0x1A0F5, ioaddr + IntrEnable);
1321 }
1322 np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
1323 iowrite32(0, ioaddr + RxStartDemand);
1324 spin_unlock(&np->lock);
1325}
1326
1327static struct net_device_stats *get_stats(struct net_device *dev)
1328{
1329 struct netdev_private *np = netdev_priv(dev);
1330 void __iomem *ioaddr = np->base_addr;
1331
1332
1333 spin_lock_irq(&np->lock);
1334 if (netif_running(dev) && netif_device_present(dev))
1335 np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
1336 spin_unlock_irq(&np->lock);
1337
1338 return &np->stats;
1339}
1340
1341
1342static u32 __set_rx_mode(struct net_device *dev)
1343{
1344 struct netdev_private *np = netdev_priv(dev);
1345 void __iomem *ioaddr = np->base_addr;
1346 u32 mc_filter[2];
1347 u32 rx_mode;
1348
1349 if (dev->flags & IFF_PROMISC) {
1350 memset(mc_filter, 0xff, sizeof(mc_filter));
1351 rx_mode = RxAcceptBroadcast | AcceptMulticast | RxAcceptAllPhys
1352 | AcceptMyPhys;
1353 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
1354 (dev->flags & IFF_ALLMULTI)) {
1355
1356 memset(mc_filter, 0xff, sizeof(mc_filter));
1357 rx_mode = RxAcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1358 } else {
1359 struct netdev_hw_addr *ha;
1360
1361 memset(mc_filter, 0, sizeof(mc_filter));
1362 netdev_for_each_mc_addr(ha, dev) {
1363 int filbit;
1364
1365 filbit = (ether_crc(ETH_ALEN, ha->addr) >> 26) ^ 0x3F;
1366 filbit &= 0x3f;
1367 mc_filter[filbit >> 5] |= 1 << (filbit & 31);
1368 }
1369 rx_mode = RxAcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1370 }
1371 iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
1372 iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
1373 return rx_mode;
1374}
1375
1376static void set_rx_mode(struct net_device *dev)
1377{
1378 struct netdev_private *np = netdev_priv(dev);
1379 u32 rx_mode = __set_rx_mode(dev);
1380 spin_lock_irq(&np->lock);
1381 update_csr6(dev, (np->csr6 & ~0x00F8) | rx_mode);
1382 spin_unlock_irq(&np->lock);
1383}
1384
1385static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1386{
1387 struct netdev_private *np = netdev_priv(dev);
1388
1389 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1390 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1391 strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
1392}
1393
1394static int netdev_get_link_ksettings(struct net_device *dev,
1395 struct ethtool_link_ksettings *cmd)
1396{
1397 struct netdev_private *np = netdev_priv(dev);
1398
1399 spin_lock_irq(&np->lock);
1400 mii_ethtool_get_link_ksettings(&np->mii_if, cmd);
1401 spin_unlock_irq(&np->lock);
1402
1403 return 0;
1404}
1405
1406static int netdev_set_link_ksettings(struct net_device *dev,
1407 const struct ethtool_link_ksettings *cmd)
1408{
1409 struct netdev_private *np = netdev_priv(dev);
1410 int rc;
1411
1412 spin_lock_irq(&np->lock);
1413 rc = mii_ethtool_set_link_ksettings(&np->mii_if, cmd);
1414 spin_unlock_irq(&np->lock);
1415
1416 return rc;
1417}
1418
1419static int netdev_nway_reset(struct net_device *dev)
1420{
1421 struct netdev_private *np = netdev_priv(dev);
1422 return mii_nway_restart(&np->mii_if);
1423}
1424
1425static u32 netdev_get_link(struct net_device *dev)
1426{
1427 struct netdev_private *np = netdev_priv(dev);
1428 return mii_link_ok(&np->mii_if);
1429}
1430
1431static u32 netdev_get_msglevel(struct net_device *dev)
1432{
1433 return debug;
1434}
1435
1436static void netdev_set_msglevel(struct net_device *dev, u32 value)
1437{
1438 debug = value;
1439}
1440
1441static const struct ethtool_ops netdev_ethtool_ops = {
1442 .get_drvinfo = netdev_get_drvinfo,
1443 .nway_reset = netdev_nway_reset,
1444 .get_link = netdev_get_link,
1445 .get_msglevel = netdev_get_msglevel,
1446 .set_msglevel = netdev_set_msglevel,
1447 .get_link_ksettings = netdev_get_link_ksettings,
1448 .set_link_ksettings = netdev_set_link_ksettings,
1449};
1450
1451static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1452{
1453 struct mii_ioctl_data *data = if_mii(rq);
1454 struct netdev_private *np = netdev_priv(dev);
1455
1456 switch(cmd) {
1457 case SIOCGMIIPHY:
1458 data->phy_id = ((struct netdev_private *)netdev_priv(dev))->phys[0] & 0x1f;
1459
1460
1461 case SIOCGMIIREG:
1462 spin_lock_irq(&np->lock);
1463 data->val_out = mdio_read(dev, data->phy_id & 0x1f, data->reg_num & 0x1f);
1464 spin_unlock_irq(&np->lock);
1465 return 0;
1466
1467 case SIOCSMIIREG:
1468 spin_lock_irq(&np->lock);
1469 mdio_write(dev, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in);
1470 spin_unlock_irq(&np->lock);
1471 return 0;
1472 default:
1473 return -EOPNOTSUPP;
1474 }
1475}
1476
1477static int netdev_close(struct net_device *dev)
1478{
1479 struct netdev_private *np = netdev_priv(dev);
1480 void __iomem *ioaddr = np->base_addr;
1481
1482 netif_stop_queue(dev);
1483
1484 if (debug > 1) {
1485 netdev_dbg(dev, "Shutting down ethercard, status was %08x Config %08x\n",
1486 ioread32(ioaddr + IntrStatus),
1487 ioread32(ioaddr + NetworkConfig));
1488 netdev_dbg(dev, "Queue pointers were Tx %d / %d, Rx %d / %d\n",
1489 np->cur_tx, np->dirty_tx,
1490 np->cur_rx, np->dirty_rx);
1491 }
1492
1493
1494 spin_lock_irq(&np->lock);
1495 netif_device_detach(dev);
1496 update_csr6(dev, 0);
1497 iowrite32(0x0000, ioaddr + IntrEnable);
1498 spin_unlock_irq(&np->lock);
1499
1500 free_irq(np->pci_dev->irq, dev);
1501 wmb();
1502 netif_device_attach(dev);
1503
1504 if (ioread32(ioaddr + NetworkConfig) != 0xffffffff)
1505 np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
1506
1507#ifdef __i386__
1508 if (debug > 2) {
1509 int i;
1510
1511 printk(KERN_DEBUG" Tx ring at %p:\n", np->tx_ring);
1512 for (i = 0; i < TX_RING_SIZE; i++)
1513 printk(KERN_DEBUG " #%d desc. %04x %04x %08x\n",
1514 i, np->tx_ring[i].length,
1515 np->tx_ring[i].status, np->tx_ring[i].buffer1);
1516 printk(KERN_DEBUG " Rx ring %p:\n", np->rx_ring);
1517 for (i = 0; i < RX_RING_SIZE; i++) {
1518 printk(KERN_DEBUG " #%d desc. %04x %04x %08x\n",
1519 i, np->rx_ring[i].length,
1520 np->rx_ring[i].status, np->rx_ring[i].buffer1);
1521 }
1522 }
1523#endif
1524
1525 del_timer_sync(&np->timer);
1526
1527 free_rxtx_rings(np);
1528 free_ringdesc(np);
1529
1530 return 0;
1531}
1532
1533static void w840_remove1(struct pci_dev *pdev)
1534{
1535 struct net_device *dev = pci_get_drvdata(pdev);
1536
1537 if (dev) {
1538 struct netdev_private *np = netdev_priv(dev);
1539 unregister_netdev(dev);
1540 pci_release_regions(pdev);
1541 pci_iounmap(pdev, np->base_addr);
1542 free_netdev(dev);
1543 }
1544}
1545
1546#ifdef CONFIG_PM
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571static int w840_suspend (struct pci_dev *pdev, pm_message_t state)
1572{
1573 struct net_device *dev = pci_get_drvdata (pdev);
1574 struct netdev_private *np = netdev_priv(dev);
1575 void __iomem *ioaddr = np->base_addr;
1576
1577 rtnl_lock();
1578 if (netif_running (dev)) {
1579 del_timer_sync(&np->timer);
1580
1581 spin_lock_irq(&np->lock);
1582 netif_device_detach(dev);
1583 update_csr6(dev, 0);
1584 iowrite32(0, ioaddr + IntrEnable);
1585 spin_unlock_irq(&np->lock);
1586
1587 synchronize_irq(np->pci_dev->irq);
1588 netif_tx_disable(dev);
1589
1590 np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
1591
1592
1593
1594 BUG_ON(np->csr6 || ioread32(ioaddr + IntrEnable));
1595
1596
1597
1598 free_rxtx_rings(np);
1599 } else {
1600 netif_device_detach(dev);
1601 }
1602 rtnl_unlock();
1603 return 0;
1604}
1605
1606static int w840_resume (struct pci_dev *pdev)
1607{
1608 struct net_device *dev = pci_get_drvdata (pdev);
1609 struct netdev_private *np = netdev_priv(dev);
1610 int retval = 0;
1611
1612 rtnl_lock();
1613 if (netif_device_present(dev))
1614 goto out;
1615 if (netif_running(dev)) {
1616 if ((retval = pci_enable_device(pdev))) {
1617 dev_err(&dev->dev,
1618 "pci_enable_device failed in resume\n");
1619 goto out;
1620 }
1621 spin_lock_irq(&np->lock);
1622 iowrite32(1, np->base_addr+PCIBusCfg);
1623 ioread32(np->base_addr+PCIBusCfg);
1624 udelay(1);
1625 netif_device_attach(dev);
1626 init_rxtx_rings(dev);
1627 init_registers(dev);
1628 spin_unlock_irq(&np->lock);
1629
1630 netif_wake_queue(dev);
1631
1632 mod_timer(&np->timer, jiffies + 1*HZ);
1633 } else {
1634 netif_device_attach(dev);
1635 }
1636out:
1637 rtnl_unlock();
1638 return retval;
1639}
1640#endif
1641
1642static struct pci_driver w840_driver = {
1643 .name = DRV_NAME,
1644 .id_table = w840_pci_tbl,
1645 .probe = w840_probe1,
1646 .remove = w840_remove1,
1647#ifdef CONFIG_PM
1648 .suspend = w840_suspend,
1649 .resume = w840_resume,
1650#endif
1651};
1652
1653static int __init w840_init(void)
1654{
1655 printk(version);
1656 return pci_register_driver(&w840_driver);
1657}
1658
1659static void __exit w840_exit(void)
1660{
1661 pci_unregister_driver(&w840_driver);
1662}
1663
1664module_init(w840_init);
1665module_exit(w840_exit);
1666