1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
48
49#define DRV_NAME "winbond-840"
50#define DRV_VERSION "1.01-e"
51#define DRV_RELDATE "Sep-11-2006"
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69static int debug = 1;
70static int max_interrupt_work = 20;
71
72
73static int multicast_filter_limit = 32;
74
75
76
77static int rx_copybreak;
78
79
80
81
82
83
84#define MAX_UNITS 8
85static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
86static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
87
88
89
90
91
92
93
94
95#define TX_QUEUE_LEN 10
96#define TX_QUEUE_LEN_RESTART 5
97
98#define TX_BUFLIMIT (1024-128)
99
100
101
102
103
104#define TX_FIFO_SIZE (2048)
105#define TX_BUG_FIFO_LIMIT (TX_FIFO_SIZE-1514-16)
106
107
108
109
110#define TX_TIMEOUT (2*HZ)
111
112
113#include <linux/module.h>
114#include <linux/kernel.h>
115#include <linux/string.h>
116#include <linux/timer.h>
117#include <linux/errno.h>
118#include <linux/ioport.h>
119#include <linux/interrupt.h>
120#include <linux/pci.h>
121#include <linux/dma-mapping.h>
122#include <linux/netdevice.h>
123#include <linux/etherdevice.h>
124#include <linux/skbuff.h>
125#include <linux/init.h>
126#include <linux/delay.h>
127#include <linux/ethtool.h>
128#include <linux/mii.h>
129#include <linux/rtnetlink.h>
130#include <linux/crc32.h>
131#include <linux/bitops.h>
132#include <asm/uaccess.h>
133#include <asm/processor.h>
134#include <asm/io.h>
135#include <asm/irq.h>
136
137#include "tulip.h"
138
139#undef PKT_BUF_SZ
140#define PKT_BUF_SZ 1536
141
142
143static const char version[] __initconst =
144 "v" DRV_VERSION " (2.4 port) "
145 DRV_RELDATE " Donald Becker <becker@scyld.com>\n"
146 " http://www.scyld.com/network/drivers.html\n";
147
148MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
149MODULE_DESCRIPTION("Winbond W89c840 Ethernet driver");
150MODULE_LICENSE("GPL");
151MODULE_VERSION(DRV_VERSION);
152
153module_param(max_interrupt_work, int, 0);
154module_param(debug, int, 0);
155module_param(rx_copybreak, int, 0);
156module_param(multicast_filter_limit, int, 0);
157module_param_array(options, int, NULL, 0);
158module_param_array(full_duplex, int, NULL, 0);
159MODULE_PARM_DESC(max_interrupt_work, "winbond-840 maximum events handled per interrupt");
160MODULE_PARM_DESC(debug, "winbond-840 debug level (0-6)");
161MODULE_PARM_DESC(rx_copybreak, "winbond-840 copy breakpoint for copy-only-tiny-frames");
162MODULE_PARM_DESC(multicast_filter_limit, "winbond-840 maximum number of filtered multicast addresses");
163MODULE_PARM_DESC(options, "winbond-840: Bits 0-3: media type, bit 17: full duplex");
164MODULE_PARM_DESC(full_duplex, "winbond-840 full duplex setting(s) (1)");
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218enum chip_capability_flags {
219 CanHaveMII=1, HasBrokenTx=2, AlwaysFDX=4, FDXOnNoMII=8,
220};
221
222static const struct pci_device_id w840_pci_tbl[] = {
223 { 0x1050, 0x0840, PCI_ANY_ID, 0x8153, 0, 0, 0 },
224 { 0x1050, 0x0840, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
225 { 0x11f6, 0x2011, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
226 { }
227};
228MODULE_DEVICE_TABLE(pci, w840_pci_tbl);
229
230enum {
231 netdev_res_size = 128,
232};
233
234struct pci_id_info {
235 const char *name;
236 int drv_flags;
237};
238
239static const struct pci_id_info pci_id_tbl[] = {
240 {
241 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
242 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
243 { "Compex RL100-ATX", CanHaveMII | HasBrokenTx},
244 { }
245};
246
247
248
249
250
251
252
253
254
255
256
257enum w840_offsets {
258 PCIBusCfg=0x00, TxStartDemand=0x04, RxStartDemand=0x08,
259 RxRingPtr=0x0C, TxRingPtr=0x10,
260 IntrStatus=0x14, NetworkConfig=0x18, IntrEnable=0x1C,
261 RxMissed=0x20, EECtrl=0x24, MIICtrl=0x24, BootRom=0x28, GPTimer=0x2C,
262 CurRxDescAddr=0x30, CurRxBufAddr=0x34,
263 MulticastFilter0=0x38, MulticastFilter1=0x3C, StationAddr=0x40,
264 CurTxDescAddr=0x4C, CurTxBufAddr=0x50,
265};
266
267
268enum rx_mode_bits {
269 AcceptErr=0x80,
270 RxAcceptBroadcast=0x20, AcceptMulticast=0x10,
271 RxAcceptAllPhys=0x08, AcceptMyPhys=0x02,
272};
273
274enum mii_reg_bits {
275 MDIO_ShiftClk=0x10000, MDIO_DataIn=0x80000, MDIO_DataOut=0x20000,
276 MDIO_EnbOutput=0x40000, MDIO_EnbIn = 0x00000,
277};
278
279
280struct w840_rx_desc {
281 s32 status;
282 s32 length;
283 u32 buffer1;
284 u32 buffer2;
285};
286
287struct w840_tx_desc {
288 s32 status;
289 s32 length;
290 u32 buffer1, buffer2;
291};
292
293#define MII_CNT 1
294struct netdev_private {
295 struct w840_rx_desc *rx_ring;
296 dma_addr_t rx_addr[RX_RING_SIZE];
297 struct w840_tx_desc *tx_ring;
298 dma_addr_t tx_addr[TX_RING_SIZE];
299 dma_addr_t ring_dma_addr;
300
301 struct sk_buff* rx_skbuff[RX_RING_SIZE];
302
303 struct sk_buff* tx_skbuff[TX_RING_SIZE];
304 struct net_device_stats stats;
305 struct timer_list timer;
306
307 spinlock_t lock;
308 int chip_id, drv_flags;
309 struct pci_dev *pci_dev;
310 int csr6;
311 struct w840_rx_desc *rx_head_desc;
312 unsigned int cur_rx, dirty_rx;
313 unsigned int rx_buf_sz;
314 unsigned int cur_tx, dirty_tx;
315 unsigned int tx_q_bytes;
316 unsigned int tx_full;
317
318 int mii_cnt;
319 unsigned char phys[MII_CNT];
320 u32 mii;
321 struct mii_if_info mii_if;
322 void __iomem *base_addr;
323};
324
325static int eeprom_read(void __iomem *ioaddr, int location);
326static int mdio_read(struct net_device *dev, int phy_id, int location);
327static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
328static int netdev_open(struct net_device *dev);
329static int update_link(struct net_device *dev);
330static void netdev_timer(unsigned long data);
331static void init_rxtx_rings(struct net_device *dev);
332static void free_rxtx_rings(struct netdev_private *np);
333static void init_registers(struct net_device *dev);
334static void tx_timeout(struct net_device *dev);
335static int alloc_ringdesc(struct net_device *dev);
336static void free_ringdesc(struct netdev_private *np);
337static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
338static irqreturn_t intr_handler(int irq, void *dev_instance);
339static void netdev_error(struct net_device *dev, int intr_status);
340static int netdev_rx(struct net_device *dev);
341static u32 __set_rx_mode(struct net_device *dev);
342static void set_rx_mode(struct net_device *dev);
343static struct net_device_stats *get_stats(struct net_device *dev);
344static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
345static const struct ethtool_ops netdev_ethtool_ops;
346static int netdev_close(struct net_device *dev);
347
348static const struct net_device_ops netdev_ops = {
349 .ndo_open = netdev_open,
350 .ndo_stop = netdev_close,
351 .ndo_start_xmit = start_tx,
352 .ndo_get_stats = get_stats,
353 .ndo_set_rx_mode = set_rx_mode,
354 .ndo_do_ioctl = netdev_ioctl,
355 .ndo_tx_timeout = tx_timeout,
356 .ndo_change_mtu_rh74 = eth_change_mtu,
357 .ndo_set_mac_address = eth_mac_addr,
358 .ndo_validate_addr = eth_validate_addr,
359};
360
361static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
362{
363 struct net_device *dev;
364 struct netdev_private *np;
365 static int find_cnt;
366 int chip_idx = ent->driver_data;
367 int irq;
368 int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
369 void __iomem *ioaddr;
370
371 i = pci_enable_device(pdev);
372 if (i) return i;
373
374 pci_set_master(pdev);
375
376 irq = pdev->irq;
377
378 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
379 pr_warn("Device %s disabled due to DMA limitations\n",
380 pci_name(pdev));
381 return -EIO;
382 }
383 dev = alloc_etherdev(sizeof(*np));
384 if (!dev)
385 return -ENOMEM;
386 SET_NETDEV_DEV(dev, &pdev->dev);
387
388 if (pci_request_regions(pdev, DRV_NAME))
389 goto err_out_netdev;
390
391 ioaddr = pci_iomap(pdev, TULIP_BAR, netdev_res_size);
392 if (!ioaddr)
393 goto err_out_free_res;
394
395 for (i = 0; i < 3; i++)
396 ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(eeprom_read(ioaddr, i));
397
398
399
400 iowrite32(0x00000001, ioaddr + PCIBusCfg);
401
402 np = netdev_priv(dev);
403 np->pci_dev = pdev;
404 np->chip_id = chip_idx;
405 np->drv_flags = pci_id_tbl[chip_idx].drv_flags;
406 spin_lock_init(&np->lock);
407 np->mii_if.dev = dev;
408 np->mii_if.mdio_read = mdio_read;
409 np->mii_if.mdio_write = mdio_write;
410 np->base_addr = ioaddr;
411
412 pci_set_drvdata(pdev, dev);
413
414 if (dev->mem_start)
415 option = dev->mem_start;
416
417
418 if (option > 0) {
419 if (option & 0x200)
420 np->mii_if.full_duplex = 1;
421 if (option & 15)
422 dev_info(&dev->dev,
423 "ignoring user supplied media type %d",
424 option & 15);
425 }
426 if (find_cnt < MAX_UNITS && full_duplex[find_cnt] > 0)
427 np->mii_if.full_duplex = 1;
428
429 if (np->mii_if.full_duplex)
430 np->mii_if.force_media = 1;
431
432
433 dev->netdev_ops = &netdev_ops;
434 dev->ethtool_ops = &netdev_ethtool_ops;
435 dev->watchdog_timeo = TX_TIMEOUT;
436
437 i = register_netdev(dev);
438 if (i)
439 goto err_out_cleardev;
440
441 dev_info(&dev->dev, "%s at %p, %pM, IRQ %d\n",
442 pci_id_tbl[chip_idx].name, ioaddr, dev->dev_addr, irq);
443
444 if (np->drv_flags & CanHaveMII) {
445 int phy, phy_idx = 0;
446 for (phy = 1; phy < 32 && phy_idx < MII_CNT; phy++) {
447 int mii_status = mdio_read(dev, phy, MII_BMSR);
448 if (mii_status != 0xffff && mii_status != 0x0000) {
449 np->phys[phy_idx++] = phy;
450 np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE);
451 np->mii = (mdio_read(dev, phy, MII_PHYSID1) << 16)+
452 mdio_read(dev, phy, MII_PHYSID2);
453 dev_info(&dev->dev,
454 "MII PHY %08xh found at address %d, status 0x%04x advertising %04x\n",
455 np->mii, phy, mii_status,
456 np->mii_if.advertising);
457 }
458 }
459 np->mii_cnt = phy_idx;
460 np->mii_if.phy_id = np->phys[0];
461 if (phy_idx == 0) {
462 dev_warn(&dev->dev,
463 "MII PHY not found -- this device may not operate correctly\n");
464 }
465 }
466
467 find_cnt++;
468 return 0;
469
470err_out_cleardev:
471 pci_set_drvdata(pdev, NULL);
472 pci_iounmap(pdev, ioaddr);
473err_out_free_res:
474 pci_release_regions(pdev);
475err_out_netdev:
476 free_netdev (dev);
477 return -ENODEV;
478}
479
480
481
482
483
484
485
486
487
488
489
490
491
492#define eeprom_delay(ee_addr) ioread32(ee_addr)
493
494enum EEPROM_Ctrl_Bits {
495 EE_ShiftClk=0x02, EE_Write0=0x801, EE_Write1=0x805,
496 EE_ChipSelect=0x801, EE_DataIn=0x08,
497};
498
499
500enum EEPROM_Cmds {
501 EE_WriteCmd=(5 << 6), EE_ReadCmd=(6 << 6), EE_EraseCmd=(7 << 6),
502};
503
504static int eeprom_read(void __iomem *addr, int location)
505{
506 int i;
507 int retval = 0;
508 void __iomem *ee_addr = addr + EECtrl;
509 int read_cmd = location | EE_ReadCmd;
510 iowrite32(EE_ChipSelect, ee_addr);
511
512
513 for (i = 10; i >= 0; i--) {
514 short dataval = (read_cmd & (1 << i)) ? EE_Write1 : EE_Write0;
515 iowrite32(dataval, ee_addr);
516 eeprom_delay(ee_addr);
517 iowrite32(dataval | EE_ShiftClk, ee_addr);
518 eeprom_delay(ee_addr);
519 }
520 iowrite32(EE_ChipSelect, ee_addr);
521 eeprom_delay(ee_addr);
522
523 for (i = 16; i > 0; i--) {
524 iowrite32(EE_ChipSelect | EE_ShiftClk, ee_addr);
525 eeprom_delay(ee_addr);
526 retval = (retval << 1) | ((ioread32(ee_addr) & EE_DataIn) ? 1 : 0);
527 iowrite32(EE_ChipSelect, ee_addr);
528 eeprom_delay(ee_addr);
529 }
530
531
532 iowrite32(0, ee_addr);
533 return retval;
534}
535
536
537
538
539
540
541
542
543#define mdio_delay(mdio_addr) ioread32(mdio_addr)
544
545
546
547
548static char mii_preamble_required = 1;
549
550#define MDIO_WRITE0 (MDIO_EnbOutput)
551#define MDIO_WRITE1 (MDIO_DataOut | MDIO_EnbOutput)
552
553
554
555static void mdio_sync(void __iomem *mdio_addr)
556{
557 int bits = 32;
558
559
560 while (--bits >= 0) {
561 iowrite32(MDIO_WRITE1, mdio_addr);
562 mdio_delay(mdio_addr);
563 iowrite32(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
564 mdio_delay(mdio_addr);
565 }
566}
567
568static int mdio_read(struct net_device *dev, int phy_id, int location)
569{
570 struct netdev_private *np = netdev_priv(dev);
571 void __iomem *mdio_addr = np->base_addr + MIICtrl;
572 int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
573 int i, retval = 0;
574
575 if (mii_preamble_required)
576 mdio_sync(mdio_addr);
577
578
579 for (i = 15; i >= 0; i--) {
580 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
581
582 iowrite32(dataval, mdio_addr);
583 mdio_delay(mdio_addr);
584 iowrite32(dataval | MDIO_ShiftClk, mdio_addr);
585 mdio_delay(mdio_addr);
586 }
587
588 for (i = 20; i > 0; i--) {
589 iowrite32(MDIO_EnbIn, mdio_addr);
590 mdio_delay(mdio_addr);
591 retval = (retval << 1) | ((ioread32(mdio_addr) & MDIO_DataIn) ? 1 : 0);
592 iowrite32(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
593 mdio_delay(mdio_addr);
594 }
595 return (retval>>1) & 0xffff;
596}
597
598static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
599{
600 struct netdev_private *np = netdev_priv(dev);
601 void __iomem *mdio_addr = np->base_addr + MIICtrl;
602 int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
603 int i;
604
605 if (location == 4 && phy_id == np->phys[0])
606 np->mii_if.advertising = value;
607
608 if (mii_preamble_required)
609 mdio_sync(mdio_addr);
610
611
612 for (i = 31; i >= 0; i--) {
613 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
614
615 iowrite32(dataval, mdio_addr);
616 mdio_delay(mdio_addr);
617 iowrite32(dataval | MDIO_ShiftClk, mdio_addr);
618 mdio_delay(mdio_addr);
619 }
620
621 for (i = 2; i > 0; i--) {
622 iowrite32(MDIO_EnbIn, mdio_addr);
623 mdio_delay(mdio_addr);
624 iowrite32(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
625 mdio_delay(mdio_addr);
626 }
627}
628
629
630static int netdev_open(struct net_device *dev)
631{
632 struct netdev_private *np = netdev_priv(dev);
633 void __iomem *ioaddr = np->base_addr;
634 const int irq = np->pci_dev->irq;
635 int i;
636
637 iowrite32(0x00000001, ioaddr + PCIBusCfg);
638
639 netif_device_detach(dev);
640 i = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
641 if (i)
642 goto out_err;
643
644 if (debug > 1)
645 netdev_dbg(dev, "w89c840_open() irq %d\n", irq);
646
647 if((i=alloc_ringdesc(dev)))
648 goto out_err;
649
650 spin_lock_irq(&np->lock);
651 netif_device_attach(dev);
652 init_registers(dev);
653 spin_unlock_irq(&np->lock);
654
655 netif_start_queue(dev);
656 if (debug > 2)
657 netdev_dbg(dev, "Done netdev_open()\n");
658
659
660 init_timer(&np->timer);
661 np->timer.expires = jiffies + 1*HZ;
662 np->timer.data = (unsigned long)dev;
663 np->timer.function = netdev_timer;
664 add_timer(&np->timer);
665 return 0;
666out_err:
667 netif_device_attach(dev);
668 return i;
669}
670
671#define MII_DAVICOM_DM9101 0x0181b800
672
673static int update_link(struct net_device *dev)
674{
675 struct netdev_private *np = netdev_priv(dev);
676 int duplex, fasteth, result, mii_reg;
677
678
679 mii_reg = mdio_read(dev, np->phys[0], MII_BMSR);
680
681 if (mii_reg == 0xffff)
682 return np->csr6;
683
684 mii_reg = mdio_read(dev, np->phys[0], MII_BMSR);
685 if (!(mii_reg & 0x4)) {
686 if (netif_carrier_ok(dev)) {
687 if (debug)
688 dev_info(&dev->dev,
689 "MII #%d reports no link. Disabling watchdog\n",
690 np->phys[0]);
691 netif_carrier_off(dev);
692 }
693 return np->csr6;
694 }
695 if (!netif_carrier_ok(dev)) {
696 if (debug)
697 dev_info(&dev->dev,
698 "MII #%d link is back. Enabling watchdog\n",
699 np->phys[0]);
700 netif_carrier_on(dev);
701 }
702
703 if ((np->mii & ~0xf) == MII_DAVICOM_DM9101) {
704
705
706
707
708
709
710
711
712 mii_reg = mdio_read(dev, np->phys[0], MII_BMCR);
713 duplex = mii_reg & BMCR_FULLDPLX;
714 fasteth = mii_reg & BMCR_SPEED100;
715 } else {
716 int negotiated;
717 mii_reg = mdio_read(dev, np->phys[0], MII_LPA);
718 negotiated = mii_reg & np->mii_if.advertising;
719
720 duplex = (negotiated & LPA_100FULL) || ((negotiated & 0x02C0) == LPA_10FULL);
721 fasteth = negotiated & 0x380;
722 }
723 duplex |= np->mii_if.force_media;
724
725 result = np->csr6 & ~0x20000200;
726 if (duplex)
727 result |= 0x200;
728 if (fasteth)
729 result |= 0x20000000;
730 if (result != np->csr6 && debug)
731 dev_info(&dev->dev,
732 "Setting %dMBit-%s-duplex based on MII#%d\n",
733 fasteth ? 100 : 10, duplex ? "full" : "half",
734 np->phys[0]);
735 return result;
736}
737
738#define RXTX_TIMEOUT 2000
739static inline void update_csr6(struct net_device *dev, int new)
740{
741 struct netdev_private *np = netdev_priv(dev);
742 void __iomem *ioaddr = np->base_addr;
743 int limit = RXTX_TIMEOUT;
744
745 if (!netif_device_present(dev))
746 new = 0;
747 if (new==np->csr6)
748 return;
749
750 iowrite32(np->csr6 & ~0x2002, ioaddr + NetworkConfig);
751
752 for (;;) {
753 int csr5 = ioread32(ioaddr + IntrStatus);
754 int t;
755
756 t = (csr5 >> 17) & 0x07;
757 if (t==0||t==1) {
758
759 t = (csr5 >> 20) & 0x07;
760 if (t==0||t==1)
761 break;
762 }
763
764 limit--;
765 if(!limit) {
766 dev_info(&dev->dev,
767 "couldn't stop rxtx, IntrStatus %xh\n", csr5);
768 break;
769 }
770 udelay(1);
771 }
772 np->csr6 = new;
773
774 iowrite32(np->csr6, ioaddr + NetworkConfig);
775 if (new & 0x200)
776 np->mii_if.full_duplex = 1;
777}
778
779static void netdev_timer(unsigned long data)
780{
781 struct net_device *dev = (struct net_device *)data;
782 struct netdev_private *np = netdev_priv(dev);
783 void __iomem *ioaddr = np->base_addr;
784
785 if (debug > 2)
786 netdev_dbg(dev, "Media selection timer tick, status %08x config %08x\n",
787 ioread32(ioaddr + IntrStatus),
788 ioread32(ioaddr + NetworkConfig));
789 spin_lock_irq(&np->lock);
790 update_csr6(dev, update_link(dev));
791 spin_unlock_irq(&np->lock);
792 np->timer.expires = jiffies + 10*HZ;
793 add_timer(&np->timer);
794}
795
796static void init_rxtx_rings(struct net_device *dev)
797{
798 struct netdev_private *np = netdev_priv(dev);
799 int i;
800
801 np->rx_head_desc = &np->rx_ring[0];
802 np->tx_ring = (struct w840_tx_desc*)&np->rx_ring[RX_RING_SIZE];
803
804
805 for (i = 0; i < RX_RING_SIZE; i++) {
806 np->rx_ring[i].length = np->rx_buf_sz;
807 np->rx_ring[i].status = 0;
808 np->rx_skbuff[i] = NULL;
809 }
810
811 np->rx_ring[i-1].length |= DescEndRing;
812
813
814 for (i = 0; i < RX_RING_SIZE; i++) {
815 struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz);
816 np->rx_skbuff[i] = skb;
817 if (skb == NULL)
818 break;
819 np->rx_addr[i] = pci_map_single(np->pci_dev,skb->data,
820 np->rx_buf_sz,PCI_DMA_FROMDEVICE);
821
822 np->rx_ring[i].buffer1 = np->rx_addr[i];
823 np->rx_ring[i].status = DescOwned;
824 }
825
826 np->cur_rx = 0;
827 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
828
829
830 for (i = 0; i < TX_RING_SIZE; i++) {
831 np->tx_skbuff[i] = NULL;
832 np->tx_ring[i].status = 0;
833 }
834 np->tx_full = 0;
835 np->tx_q_bytes = np->dirty_tx = np->cur_tx = 0;
836
837 iowrite32(np->ring_dma_addr, np->base_addr + RxRingPtr);
838 iowrite32(np->ring_dma_addr+sizeof(struct w840_rx_desc)*RX_RING_SIZE,
839 np->base_addr + TxRingPtr);
840
841}
842
843static void free_rxtx_rings(struct netdev_private* np)
844{
845 int i;
846
847 for (i = 0; i < RX_RING_SIZE; i++) {
848 np->rx_ring[i].status = 0;
849 if (np->rx_skbuff[i]) {
850 pci_unmap_single(np->pci_dev,
851 np->rx_addr[i],
852 np->rx_skbuff[i]->len,
853 PCI_DMA_FROMDEVICE);
854 dev_kfree_skb(np->rx_skbuff[i]);
855 }
856 np->rx_skbuff[i] = NULL;
857 }
858 for (i = 0; i < TX_RING_SIZE; i++) {
859 if (np->tx_skbuff[i]) {
860 pci_unmap_single(np->pci_dev,
861 np->tx_addr[i],
862 np->tx_skbuff[i]->len,
863 PCI_DMA_TODEVICE);
864 dev_kfree_skb(np->tx_skbuff[i]);
865 }
866 np->tx_skbuff[i] = NULL;
867 }
868}
869
870static void init_registers(struct net_device *dev)
871{
872 struct netdev_private *np = netdev_priv(dev);
873 void __iomem *ioaddr = np->base_addr;
874 int i;
875
876 for (i = 0; i < 6; i++)
877 iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
878
879
880#ifdef __BIG_ENDIAN
881 i = (1<<20);
882#else
883 i = 0;
884#endif
885 i |= (0x04<<2);
886 i |= 0x02;
887
888
889
890
891
892
893
894
895
896
897#if defined (__i386__) && !defined(MODULE)
898
899 if (boot_cpu_data.x86 <= 4) {
900 i |= 0x4800;
901 dev_info(&dev->dev,
902 "This is a 386/486 PCI system, setting cache alignment to 8 longwords\n");
903 } else {
904 i |= 0xE000;
905 }
906#elif defined(__powerpc__) || defined(__i386__) || defined(__alpha__) || defined(__ia64__) || defined(__x86_64__)
907 i |= 0xE000;
908#elif defined(CONFIG_SPARC) || defined (CONFIG_PARISC)
909 i |= 0x4800;
910#else
911#warning Processor architecture undefined
912 i |= 0x4800;
913#endif
914 iowrite32(i, ioaddr + PCIBusCfg);
915
916 np->csr6 = 0;
917
918
919 update_csr6(dev, 0x00022002 | update_link(dev) | __set_rx_mode(dev));
920
921
922 iowrite32(0x1A0F5, ioaddr + IntrStatus);
923 iowrite32(0x1A0F5, ioaddr + IntrEnable);
924
925 iowrite32(0, ioaddr + RxStartDemand);
926}
927
928static void tx_timeout(struct net_device *dev)
929{
930 struct netdev_private *np = netdev_priv(dev);
931 void __iomem *ioaddr = np->base_addr;
932 const int irq = np->pci_dev->irq;
933
934 dev_warn(&dev->dev, "Transmit timed out, status %08x, resetting...\n",
935 ioread32(ioaddr + IntrStatus));
936
937 {
938 int i;
939 printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring);
940 for (i = 0; i < RX_RING_SIZE; i++)
941 printk(KERN_CONT " %08x", (unsigned int)np->rx_ring[i].status);
942 printk(KERN_CONT "\n");
943 printk(KERN_DEBUG " Tx ring %p: ", np->tx_ring);
944 for (i = 0; i < TX_RING_SIZE; i++)
945 printk(KERN_CONT " %08x", np->tx_ring[i].status);
946 printk(KERN_CONT "\n");
947 }
948 printk(KERN_DEBUG "Tx cur %d Tx dirty %d Tx Full %d, q bytes %d\n",
949 np->cur_tx, np->dirty_tx, np->tx_full, np->tx_q_bytes);
950 printk(KERN_DEBUG "Tx Descriptor addr %xh\n", ioread32(ioaddr+0x4C));
951
952 disable_irq(irq);
953 spin_lock_irq(&np->lock);
954
955
956
957
958
959
960 iowrite32(1, np->base_addr+PCIBusCfg);
961 udelay(1);
962
963 free_rxtx_rings(np);
964 init_rxtx_rings(dev);
965 init_registers(dev);
966 spin_unlock_irq(&np->lock);
967 enable_irq(irq);
968
969 netif_wake_queue(dev);
970 netif_trans_update(dev);
971 np->stats.tx_errors++;
972}
973
974
975static int alloc_ringdesc(struct net_device *dev)
976{
977 struct netdev_private *np = netdev_priv(dev);
978
979 np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
980
981 np->rx_ring = pci_alloc_consistent(np->pci_dev,
982 sizeof(struct w840_rx_desc)*RX_RING_SIZE +
983 sizeof(struct w840_tx_desc)*TX_RING_SIZE,
984 &np->ring_dma_addr);
985 if(!np->rx_ring)
986 return -ENOMEM;
987 init_rxtx_rings(dev);
988 return 0;
989}
990
991static void free_ringdesc(struct netdev_private *np)
992{
993 pci_free_consistent(np->pci_dev,
994 sizeof(struct w840_rx_desc)*RX_RING_SIZE +
995 sizeof(struct w840_tx_desc)*TX_RING_SIZE,
996 np->rx_ring, np->ring_dma_addr);
997
998}
999
1000static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
1001{
1002 struct netdev_private *np = netdev_priv(dev);
1003 unsigned entry;
1004
1005
1006
1007
1008
1009 entry = np->cur_tx % TX_RING_SIZE;
1010
1011 np->tx_addr[entry] = pci_map_single(np->pci_dev,
1012 skb->data,skb->len, PCI_DMA_TODEVICE);
1013 np->tx_skbuff[entry] = skb;
1014
1015 np->tx_ring[entry].buffer1 = np->tx_addr[entry];
1016 if (skb->len < TX_BUFLIMIT) {
1017 np->tx_ring[entry].length = DescWholePkt | skb->len;
1018 } else {
1019 int len = skb->len - TX_BUFLIMIT;
1020
1021 np->tx_ring[entry].buffer2 = np->tx_addr[entry]+TX_BUFLIMIT;
1022 np->tx_ring[entry].length = DescWholePkt | (len << 11) | TX_BUFLIMIT;
1023 }
1024 if(entry == TX_RING_SIZE-1)
1025 np->tx_ring[entry].length |= DescEndRing;
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037 spin_lock_irq(&np->lock);
1038 np->cur_tx++;
1039
1040 wmb();
1041 np->tx_ring[entry].status = DescOwned;
1042 wmb();
1043 iowrite32(0, np->base_addr + TxStartDemand);
1044 np->tx_q_bytes += skb->len;
1045
1046
1047 if (np->cur_tx - np->dirty_tx > TX_QUEUE_LEN ||
1048 ((np->drv_flags & HasBrokenTx) && np->tx_q_bytes > TX_BUG_FIFO_LIMIT)) {
1049 netif_stop_queue(dev);
1050 wmb();
1051 np->tx_full = 1;
1052 }
1053 spin_unlock_irq(&np->lock);
1054
1055 if (debug > 4) {
1056 netdev_dbg(dev, "Transmit frame #%d queued in slot %d\n",
1057 np->cur_tx, entry);
1058 }
1059 return NETDEV_TX_OK;
1060}
1061
1062static void netdev_tx_done(struct net_device *dev)
1063{
1064 struct netdev_private *np = netdev_priv(dev);
1065 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1066 int entry = np->dirty_tx % TX_RING_SIZE;
1067 int tx_status = np->tx_ring[entry].status;
1068
1069 if (tx_status < 0)
1070 break;
1071 if (tx_status & 0x8000) {
1072#ifndef final_version
1073 if (debug > 1)
1074 netdev_dbg(dev, "Transmit error, Tx status %08x\n",
1075 tx_status);
1076#endif
1077 np->stats.tx_errors++;
1078 if (tx_status & 0x0104) np->stats.tx_aborted_errors++;
1079 if (tx_status & 0x0C80) np->stats.tx_carrier_errors++;
1080 if (tx_status & 0x0200) np->stats.tx_window_errors++;
1081 if (tx_status & 0x0002) np->stats.tx_fifo_errors++;
1082 if ((tx_status & 0x0080) && np->mii_if.full_duplex == 0)
1083 np->stats.tx_heartbeat_errors++;
1084 } else {
1085#ifndef final_version
1086 if (debug > 3)
1087 netdev_dbg(dev, "Transmit slot %d ok, Tx status %08x\n",
1088 entry, tx_status);
1089#endif
1090 np->stats.tx_bytes += np->tx_skbuff[entry]->len;
1091 np->stats.collisions += (tx_status >> 3) & 15;
1092 np->stats.tx_packets++;
1093 }
1094
1095 pci_unmap_single(np->pci_dev,np->tx_addr[entry],
1096 np->tx_skbuff[entry]->len,
1097 PCI_DMA_TODEVICE);
1098 np->tx_q_bytes -= np->tx_skbuff[entry]->len;
1099 dev_kfree_skb_irq(np->tx_skbuff[entry]);
1100 np->tx_skbuff[entry] = NULL;
1101 }
1102 if (np->tx_full &&
1103 np->cur_tx - np->dirty_tx < TX_QUEUE_LEN_RESTART &&
1104 np->tx_q_bytes < TX_BUG_FIFO_LIMIT) {
1105
1106 np->tx_full = 0;
1107 wmb();
1108 netif_wake_queue(dev);
1109 }
1110}
1111
1112
1113
1114static irqreturn_t intr_handler(int irq, void *dev_instance)
1115{
1116 struct net_device *dev = (struct net_device *)dev_instance;
1117 struct netdev_private *np = netdev_priv(dev);
1118 void __iomem *ioaddr = np->base_addr;
1119 int work_limit = max_interrupt_work;
1120 int handled = 0;
1121
1122 if (!netif_device_present(dev))
1123 return IRQ_NONE;
1124 do {
1125 u32 intr_status = ioread32(ioaddr + IntrStatus);
1126
1127
1128 iowrite32(intr_status & 0x001ffff, ioaddr + IntrStatus);
1129
1130 if (debug > 4)
1131 netdev_dbg(dev, "Interrupt, status %04x\n", intr_status);
1132
1133 if ((intr_status & (NormalIntr|AbnormalIntr)) == 0)
1134 break;
1135
1136 handled = 1;
1137
1138 if (intr_status & (RxIntr | RxNoBuf))
1139 netdev_rx(dev);
1140 if (intr_status & RxNoBuf)
1141 iowrite32(0, ioaddr + RxStartDemand);
1142
1143 if (intr_status & (TxNoBuf | TxIntr) &&
1144 np->cur_tx != np->dirty_tx) {
1145 spin_lock(&np->lock);
1146 netdev_tx_done(dev);
1147 spin_unlock(&np->lock);
1148 }
1149
1150
1151 if (intr_status & (AbnormalIntr | TxFIFOUnderflow | SystemError |
1152 TimerInt | TxDied))
1153 netdev_error(dev, intr_status);
1154
1155 if (--work_limit < 0) {
1156 dev_warn(&dev->dev,
1157 "Too much work at interrupt, status=0x%04x\n",
1158 intr_status);
1159
1160
1161 spin_lock(&np->lock);
1162 if (netif_device_present(dev)) {
1163 iowrite32(AbnormalIntr | TimerInt, ioaddr + IntrEnable);
1164 iowrite32(10, ioaddr + GPTimer);
1165 }
1166 spin_unlock(&np->lock);
1167 break;
1168 }
1169 } while (1);
1170
1171 if (debug > 3)
1172 netdev_dbg(dev, "exiting interrupt, status=%#4.4x\n",
1173 ioread32(ioaddr + IntrStatus));
1174 return IRQ_RETVAL(handled);
1175}
1176
1177
1178
1179static int netdev_rx(struct net_device *dev)
1180{
1181 struct netdev_private *np = netdev_priv(dev);
1182 int entry = np->cur_rx % RX_RING_SIZE;
1183 int work_limit = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
1184
1185 if (debug > 4) {
1186 netdev_dbg(dev, " In netdev_rx(), entry %d status %04x\n",
1187 entry, np->rx_ring[entry].status);
1188 }
1189
1190
1191 while (--work_limit >= 0) {
1192 struct w840_rx_desc *desc = np->rx_head_desc;
1193 s32 status = desc->status;
1194
1195 if (debug > 4)
1196 netdev_dbg(dev, " netdev_rx() status was %08x\n",
1197 status);
1198 if (status < 0)
1199 break;
1200 if ((status & 0x38008300) != 0x0300) {
1201 if ((status & 0x38000300) != 0x0300) {
1202
1203 if ((status & 0xffff) != 0x7fff) {
1204 dev_warn(&dev->dev,
1205 "Oversized Ethernet frame spanned multiple buffers, entry %#x status %04x!\n",
1206 np->cur_rx, status);
1207 np->stats.rx_length_errors++;
1208 }
1209 } else if (status & 0x8000) {
1210
1211 if (debug > 2)
1212 netdev_dbg(dev, "Receive error, Rx status %08x\n",
1213 status);
1214 np->stats.rx_errors++;
1215 if (status & 0x0890) np->stats.rx_length_errors++;
1216 if (status & 0x004C) np->stats.rx_frame_errors++;
1217 if (status & 0x0002) np->stats.rx_crc_errors++;
1218 }
1219 } else {
1220 struct sk_buff *skb;
1221
1222 int pkt_len = ((status >> 16) & 0x7ff) - 4;
1223
1224#ifndef final_version
1225 if (debug > 4)
1226 netdev_dbg(dev, " netdev_rx() normal Rx pkt length %d status %x\n",
1227 pkt_len, status);
1228#endif
1229
1230
1231 if (pkt_len < rx_copybreak &&
1232 (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
1233 skb_reserve(skb, 2);
1234 pci_dma_sync_single_for_cpu(np->pci_dev,np->rx_addr[entry],
1235 np->rx_skbuff[entry]->len,
1236 PCI_DMA_FROMDEVICE);
1237 skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
1238 skb_put(skb, pkt_len);
1239 pci_dma_sync_single_for_device(np->pci_dev,np->rx_addr[entry],
1240 np->rx_skbuff[entry]->len,
1241 PCI_DMA_FROMDEVICE);
1242 } else {
1243 pci_unmap_single(np->pci_dev,np->rx_addr[entry],
1244 np->rx_skbuff[entry]->len,
1245 PCI_DMA_FROMDEVICE);
1246 skb_put(skb = np->rx_skbuff[entry], pkt_len);
1247 np->rx_skbuff[entry] = NULL;
1248 }
1249#ifndef final_version
1250
1251 if (debug > 5)
1252 netdev_dbg(dev, " Rx data %pM %pM %02x%02x %pI4\n",
1253 &skb->data[0], &skb->data[6],
1254 skb->data[12], skb->data[13],
1255 &skb->data[14]);
1256#endif
1257 skb->protocol = eth_type_trans(skb, dev);
1258 netif_rx(skb);
1259 np->stats.rx_packets++;
1260 np->stats.rx_bytes += pkt_len;
1261 }
1262 entry = (++np->cur_rx) % RX_RING_SIZE;
1263 np->rx_head_desc = &np->rx_ring[entry];
1264 }
1265
1266
1267 for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
1268 struct sk_buff *skb;
1269 entry = np->dirty_rx % RX_RING_SIZE;
1270 if (np->rx_skbuff[entry] == NULL) {
1271 skb = netdev_alloc_skb(dev, np->rx_buf_sz);
1272 np->rx_skbuff[entry] = skb;
1273 if (skb == NULL)
1274 break;
1275 np->rx_addr[entry] = pci_map_single(np->pci_dev,
1276 skb->data,
1277 np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1278 np->rx_ring[entry].buffer1 = np->rx_addr[entry];
1279 }
1280 wmb();
1281 np->rx_ring[entry].status = DescOwned;
1282 }
1283
1284 return 0;
1285}
1286
1287static void netdev_error(struct net_device *dev, int intr_status)
1288{
1289 struct netdev_private *np = netdev_priv(dev);
1290 void __iomem *ioaddr = np->base_addr;
1291
1292 if (debug > 2)
1293 netdev_dbg(dev, "Abnormal event, %08x\n", intr_status);
1294 if (intr_status == 0xffffffff)
1295 return;
1296 spin_lock(&np->lock);
1297 if (intr_status & TxFIFOUnderflow) {
1298 int new;
1299
1300#if 0
1301
1302
1303
1304 new = np->csr6 + 0x4000;
1305#else
1306 new = (np->csr6 >> 14)&0x7f;
1307 if (new < 64)
1308 new *= 2;
1309 else
1310 new = 127;
1311 new = (np->csr6 & ~(0x7F << 14)) | (new<<14);
1312#endif
1313 netdev_dbg(dev, "Tx underflow, new csr6 %08x\n", new);
1314 update_csr6(dev, new);
1315 }
1316 if (intr_status & RxDied) {
1317 np->stats.rx_errors++;
1318 }
1319 if (intr_status & TimerInt) {
1320
1321 if (netif_device_present(dev))
1322 iowrite32(0x1A0F5, ioaddr + IntrEnable);
1323 }
1324 np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
1325 iowrite32(0, ioaddr + RxStartDemand);
1326 spin_unlock(&np->lock);
1327}
1328
1329static struct net_device_stats *get_stats(struct net_device *dev)
1330{
1331 struct netdev_private *np = netdev_priv(dev);
1332 void __iomem *ioaddr = np->base_addr;
1333
1334
1335 spin_lock_irq(&np->lock);
1336 if (netif_running(dev) && netif_device_present(dev))
1337 np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
1338 spin_unlock_irq(&np->lock);
1339
1340 return &np->stats;
1341}
1342
1343
1344static u32 __set_rx_mode(struct net_device *dev)
1345{
1346 struct netdev_private *np = netdev_priv(dev);
1347 void __iomem *ioaddr = np->base_addr;
1348 u32 mc_filter[2];
1349 u32 rx_mode;
1350
1351 if (dev->flags & IFF_PROMISC) {
1352 memset(mc_filter, 0xff, sizeof(mc_filter));
1353 rx_mode = RxAcceptBroadcast | AcceptMulticast | RxAcceptAllPhys
1354 | AcceptMyPhys;
1355 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
1356 (dev->flags & IFF_ALLMULTI)) {
1357
1358 memset(mc_filter, 0xff, sizeof(mc_filter));
1359 rx_mode = RxAcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1360 } else {
1361 struct netdev_hw_addr *ha;
1362
1363 memset(mc_filter, 0, sizeof(mc_filter));
1364 netdev_for_each_mc_addr(ha, dev) {
1365 int filbit;
1366
1367 filbit = (ether_crc(ETH_ALEN, ha->addr) >> 26) ^ 0x3F;
1368 filbit &= 0x3f;
1369 mc_filter[filbit >> 5] |= 1 << (filbit & 31);
1370 }
1371 rx_mode = RxAcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1372 }
1373 iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
1374 iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
1375 return rx_mode;
1376}
1377
1378static void set_rx_mode(struct net_device *dev)
1379{
1380 struct netdev_private *np = netdev_priv(dev);
1381 u32 rx_mode = __set_rx_mode(dev);
1382 spin_lock_irq(&np->lock);
1383 update_csr6(dev, (np->csr6 & ~0x00F8) | rx_mode);
1384 spin_unlock_irq(&np->lock);
1385}
1386
1387static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1388{
1389 struct netdev_private *np = netdev_priv(dev);
1390
1391 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1392 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1393 strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
1394}
1395
1396static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1397{
1398 struct netdev_private *np = netdev_priv(dev);
1399 int rc;
1400
1401 spin_lock_irq(&np->lock);
1402 rc = mii_ethtool_gset(&np->mii_if, cmd);
1403 spin_unlock_irq(&np->lock);
1404
1405 return rc;
1406}
1407
1408static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1409{
1410 struct netdev_private *np = netdev_priv(dev);
1411 int rc;
1412
1413 spin_lock_irq(&np->lock);
1414 rc = mii_ethtool_sset(&np->mii_if, cmd);
1415 spin_unlock_irq(&np->lock);
1416
1417 return rc;
1418}
1419
1420static int netdev_nway_reset(struct net_device *dev)
1421{
1422 struct netdev_private *np = netdev_priv(dev);
1423 return mii_nway_restart(&np->mii_if);
1424}
1425
1426static u32 netdev_get_link(struct net_device *dev)
1427{
1428 struct netdev_private *np = netdev_priv(dev);
1429 return mii_link_ok(&np->mii_if);
1430}
1431
1432static u32 netdev_get_msglevel(struct net_device *dev)
1433{
1434 return debug;
1435}
1436
1437static void netdev_set_msglevel(struct net_device *dev, u32 value)
1438{
1439 debug = value;
1440}
1441
1442static const struct ethtool_ops netdev_ethtool_ops = {
1443 .get_drvinfo = netdev_get_drvinfo,
1444 .get_settings = netdev_get_settings,
1445 .set_settings = netdev_set_settings,
1446 .nway_reset = netdev_nway_reset,
1447 .get_link = netdev_get_link,
1448 .get_msglevel = netdev_get_msglevel,
1449 .set_msglevel = netdev_set_msglevel,
1450};
1451
1452static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1453{
1454 struct mii_ioctl_data *data = if_mii(rq);
1455 struct netdev_private *np = netdev_priv(dev);
1456
1457 switch(cmd) {
1458 case SIOCGMIIPHY:
1459 data->phy_id = ((struct netdev_private *)netdev_priv(dev))->phys[0] & 0x1f;
1460
1461
1462 case SIOCGMIIREG:
1463 spin_lock_irq(&np->lock);
1464 data->val_out = mdio_read(dev, data->phy_id & 0x1f, data->reg_num & 0x1f);
1465 spin_unlock_irq(&np->lock);
1466 return 0;
1467
1468 case SIOCSMIIREG:
1469 spin_lock_irq(&np->lock);
1470 mdio_write(dev, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in);
1471 spin_unlock_irq(&np->lock);
1472 return 0;
1473 default:
1474 return -EOPNOTSUPP;
1475 }
1476}
1477
1478static int netdev_close(struct net_device *dev)
1479{
1480 struct netdev_private *np = netdev_priv(dev);
1481 void __iomem *ioaddr = np->base_addr;
1482
1483 netif_stop_queue(dev);
1484
1485 if (debug > 1) {
1486 netdev_dbg(dev, "Shutting down ethercard, status was %08x Config %08x\n",
1487 ioread32(ioaddr + IntrStatus),
1488 ioread32(ioaddr + NetworkConfig));
1489 netdev_dbg(dev, "Queue pointers were Tx %d / %d, Rx %d / %d\n",
1490 np->cur_tx, np->dirty_tx,
1491 np->cur_rx, np->dirty_rx);
1492 }
1493
1494
1495 spin_lock_irq(&np->lock);
1496 netif_device_detach(dev);
1497 update_csr6(dev, 0);
1498 iowrite32(0x0000, ioaddr + IntrEnable);
1499 spin_unlock_irq(&np->lock);
1500
1501 free_irq(np->pci_dev->irq, dev);
1502 wmb();
1503 netif_device_attach(dev);
1504
1505 if (ioread32(ioaddr + NetworkConfig) != 0xffffffff)
1506 np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
1507
1508#ifdef __i386__
1509 if (debug > 2) {
1510 int i;
1511
1512 printk(KERN_DEBUG" Tx ring at %p:\n", np->tx_ring);
1513 for (i = 0; i < TX_RING_SIZE; i++)
1514 printk(KERN_DEBUG " #%d desc. %04x %04x %08x\n",
1515 i, np->tx_ring[i].length,
1516 np->tx_ring[i].status, np->tx_ring[i].buffer1);
1517 printk(KERN_DEBUG " Rx ring %p:\n", np->rx_ring);
1518 for (i = 0; i < RX_RING_SIZE; i++) {
1519 printk(KERN_DEBUG " #%d desc. %04x %04x %08x\n",
1520 i, np->rx_ring[i].length,
1521 np->rx_ring[i].status, np->rx_ring[i].buffer1);
1522 }
1523 }
1524#endif
1525
1526 del_timer_sync(&np->timer);
1527
1528 free_rxtx_rings(np);
1529 free_ringdesc(np);
1530
1531 return 0;
1532}
1533
1534static void w840_remove1(struct pci_dev *pdev)
1535{
1536 struct net_device *dev = pci_get_drvdata(pdev);
1537
1538 if (dev) {
1539 struct netdev_private *np = netdev_priv(dev);
1540 unregister_netdev(dev);
1541 pci_release_regions(pdev);
1542 pci_iounmap(pdev, np->base_addr);
1543 free_netdev(dev);
1544 }
1545
1546 pci_set_drvdata(pdev, NULL);
1547}
1548
1549#ifdef CONFIG_PM
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574static int w840_suspend (struct pci_dev *pdev, pm_message_t state)
1575{
1576 struct net_device *dev = pci_get_drvdata (pdev);
1577 struct netdev_private *np = netdev_priv(dev);
1578 void __iomem *ioaddr = np->base_addr;
1579
1580 rtnl_lock();
1581 if (netif_running (dev)) {
1582 del_timer_sync(&np->timer);
1583
1584 spin_lock_irq(&np->lock);
1585 netif_device_detach(dev);
1586 update_csr6(dev, 0);
1587 iowrite32(0, ioaddr + IntrEnable);
1588 spin_unlock_irq(&np->lock);
1589
1590 synchronize_irq(np->pci_dev->irq);
1591 netif_tx_disable(dev);
1592
1593 np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
1594
1595
1596
1597 BUG_ON(np->csr6 || ioread32(ioaddr + IntrEnable));
1598
1599
1600
1601 free_rxtx_rings(np);
1602 } else {
1603 netif_device_detach(dev);
1604 }
1605 rtnl_unlock();
1606 return 0;
1607}
1608
1609static int w840_resume (struct pci_dev *pdev)
1610{
1611 struct net_device *dev = pci_get_drvdata (pdev);
1612 struct netdev_private *np = netdev_priv(dev);
1613 int retval = 0;
1614
1615 rtnl_lock();
1616 if (netif_device_present(dev))
1617 goto out;
1618 if (netif_running(dev)) {
1619 if ((retval = pci_enable_device(pdev))) {
1620 dev_err(&dev->dev,
1621 "pci_enable_device failed in resume\n");
1622 goto out;
1623 }
1624 spin_lock_irq(&np->lock);
1625 iowrite32(1, np->base_addr+PCIBusCfg);
1626 ioread32(np->base_addr+PCIBusCfg);
1627 udelay(1);
1628 netif_device_attach(dev);
1629 init_rxtx_rings(dev);
1630 init_registers(dev);
1631 spin_unlock_irq(&np->lock);
1632
1633 netif_wake_queue(dev);
1634
1635 mod_timer(&np->timer, jiffies + 1*HZ);
1636 } else {
1637 netif_device_attach(dev);
1638 }
1639out:
1640 rtnl_unlock();
1641 return retval;
1642}
1643#endif
1644
1645static struct pci_driver w840_driver = {
1646 .name = DRV_NAME,
1647 .id_table = w840_pci_tbl,
1648 .probe = w840_probe1,
1649 .remove = w840_remove1,
1650#ifdef CONFIG_PM
1651 .suspend = w840_suspend,
1652 .resume = w840_resume,
1653#endif
1654};
1655
1656static int __init w840_init(void)
1657{
1658 printk(version);
1659 return pci_register_driver(&w840_driver);
1660}
1661
1662static void __exit w840_exit(void)
1663{
1664 pci_unregister_driver(&w840_driver);
1665}
1666
1667module_init(w840_init);
1668module_exit(w840_exit);
1669