1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
48
49#define DRV_NAME "winbond-840"
50#define DRV_VERSION "1.01-e"
51#define DRV_RELDATE "Sep-11-2006"
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69static int debug = 1;
70static int max_interrupt_work = 20;
71
72
73static int multicast_filter_limit = 32;
74
75
76
77static int rx_copybreak;
78
79
80
81
82
83
84#define MAX_UNITS 8
85static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
86static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
87
88
89
90
91
92
93
94
95#define TX_QUEUE_LEN 10
96#define TX_QUEUE_LEN_RESTART 5
97
98#define TX_BUFLIMIT (1024-128)
99
100
101
102
103
104#define TX_FIFO_SIZE (2048)
105#define TX_BUG_FIFO_LIMIT (TX_FIFO_SIZE-1514-16)
106
107
108
109
110#define TX_TIMEOUT (2*HZ)
111
112
113#include <linux/module.h>
114#include <linux/kernel.h>
115#include <linux/string.h>
116#include <linux/timer.h>
117#include <linux/errno.h>
118#include <linux/ioport.h>
119#include <linux/interrupt.h>
120#include <linux/pci.h>
121#include <linux/dma-mapping.h>
122#include <linux/netdevice.h>
123#include <linux/etherdevice.h>
124#include <linux/skbuff.h>
125#include <linux/init.h>
126#include <linux/delay.h>
127#include <linux/ethtool.h>
128#include <linux/mii.h>
129#include <linux/rtnetlink.h>
130#include <linux/crc32.h>
131#include <linux/bitops.h>
132#include <asm/uaccess.h>
133#include <asm/processor.h>
134#include <asm/io.h>
135#include <asm/irq.h>
136
137#include "tulip.h"
138
139#undef PKT_BUF_SZ
140#define PKT_BUF_SZ 1536
141
142
143static const char version[] __initconst =
144 "v" DRV_VERSION " (2.4 port) "
145 DRV_RELDATE " Donald Becker <becker@scyld.com>\n"
146 " http://www.scyld.com/network/drivers.html\n";
147
148MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
149MODULE_DESCRIPTION("Winbond W89c840 Ethernet driver");
150MODULE_LICENSE("GPL");
151MODULE_VERSION(DRV_VERSION);
152
153module_param(max_interrupt_work, int, 0);
154module_param(debug, int, 0);
155module_param(rx_copybreak, int, 0);
156module_param(multicast_filter_limit, int, 0);
157module_param_array(options, int, NULL, 0);
158module_param_array(full_duplex, int, NULL, 0);
159MODULE_PARM_DESC(max_interrupt_work, "winbond-840 maximum events handled per interrupt");
160MODULE_PARM_DESC(debug, "winbond-840 debug level (0-6)");
161MODULE_PARM_DESC(rx_copybreak, "winbond-840 copy breakpoint for copy-only-tiny-frames");
162MODULE_PARM_DESC(multicast_filter_limit, "winbond-840 maximum number of filtered multicast addresses");
163MODULE_PARM_DESC(options, "winbond-840: Bits 0-3: media type, bit 17: full duplex");
164MODULE_PARM_DESC(full_duplex, "winbond-840 full duplex setting(s) (1)");
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218enum chip_capability_flags {
219 CanHaveMII=1, HasBrokenTx=2, AlwaysFDX=4, FDXOnNoMII=8,
220};
221
222static const struct pci_device_id w840_pci_tbl[] = {
223 { 0x1050, 0x0840, PCI_ANY_ID, 0x8153, 0, 0, 0 },
224 { 0x1050, 0x0840, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
225 { 0x11f6, 0x2011, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
226 { }
227};
228MODULE_DEVICE_TABLE(pci, w840_pci_tbl);
229
230enum {
231 netdev_res_size = 128,
232};
233
234struct pci_id_info {
235 const char *name;
236 int drv_flags;
237};
238
239static const struct pci_id_info pci_id_tbl[] = {
240 {
241 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
242 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
243 { "Compex RL100-ATX", CanHaveMII | HasBrokenTx},
244 { }
245};
246
247
248
249
250
251
252
253
254
255
256
257enum w840_offsets {
258 PCIBusCfg=0x00, TxStartDemand=0x04, RxStartDemand=0x08,
259 RxRingPtr=0x0C, TxRingPtr=0x10,
260 IntrStatus=0x14, NetworkConfig=0x18, IntrEnable=0x1C,
261 RxMissed=0x20, EECtrl=0x24, MIICtrl=0x24, BootRom=0x28, GPTimer=0x2C,
262 CurRxDescAddr=0x30, CurRxBufAddr=0x34,
263 MulticastFilter0=0x38, MulticastFilter1=0x3C, StationAddr=0x40,
264 CurTxDescAddr=0x4C, CurTxBufAddr=0x50,
265};
266
267
268enum rx_mode_bits {
269 AcceptErr=0x80,
270 RxAcceptBroadcast=0x20, AcceptMulticast=0x10,
271 RxAcceptAllPhys=0x08, AcceptMyPhys=0x02,
272};
273
274enum mii_reg_bits {
275 MDIO_ShiftClk=0x10000, MDIO_DataIn=0x80000, MDIO_DataOut=0x20000,
276 MDIO_EnbOutput=0x40000, MDIO_EnbIn = 0x00000,
277};
278
279
280struct w840_rx_desc {
281 s32 status;
282 s32 length;
283 u32 buffer1;
284 u32 buffer2;
285};
286
287struct w840_tx_desc {
288 s32 status;
289 s32 length;
290 u32 buffer1, buffer2;
291};
292
293#define MII_CNT 1
294struct netdev_private {
295 struct w840_rx_desc *rx_ring;
296 dma_addr_t rx_addr[RX_RING_SIZE];
297 struct w840_tx_desc *tx_ring;
298 dma_addr_t tx_addr[TX_RING_SIZE];
299 dma_addr_t ring_dma_addr;
300
301 struct sk_buff* rx_skbuff[RX_RING_SIZE];
302
303 struct sk_buff* tx_skbuff[TX_RING_SIZE];
304 struct net_device_stats stats;
305 struct timer_list timer;
306
307 spinlock_t lock;
308 int chip_id, drv_flags;
309 struct pci_dev *pci_dev;
310 int csr6;
311 struct w840_rx_desc *rx_head_desc;
312 unsigned int cur_rx, dirty_rx;
313 unsigned int rx_buf_sz;
314 unsigned int cur_tx, dirty_tx;
315 unsigned int tx_q_bytes;
316 unsigned int tx_full;
317
318 int mii_cnt;
319 unsigned char phys[MII_CNT];
320 u32 mii;
321 struct mii_if_info mii_if;
322 void __iomem *base_addr;
323};
324
325static int eeprom_read(void __iomem *ioaddr, int location);
326static int mdio_read(struct net_device *dev, int phy_id, int location);
327static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
328static int netdev_open(struct net_device *dev);
329static int update_link(struct net_device *dev);
330static void netdev_timer(unsigned long data);
331static void init_rxtx_rings(struct net_device *dev);
332static void free_rxtx_rings(struct netdev_private *np);
333static void init_registers(struct net_device *dev);
334static void tx_timeout(struct net_device *dev);
335static int alloc_ringdesc(struct net_device *dev);
336static void free_ringdesc(struct netdev_private *np);
337static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
338static irqreturn_t intr_handler(int irq, void *dev_instance);
339static void netdev_error(struct net_device *dev, int intr_status);
340static int netdev_rx(struct net_device *dev);
341static u32 __set_rx_mode(struct net_device *dev);
342static void set_rx_mode(struct net_device *dev);
343static struct net_device_stats *get_stats(struct net_device *dev);
344static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
345static const struct ethtool_ops netdev_ethtool_ops;
346static int netdev_close(struct net_device *dev);
347
348static const struct net_device_ops netdev_ops = {
349 .ndo_open = netdev_open,
350 .ndo_stop = netdev_close,
351 .ndo_start_xmit = start_tx,
352 .ndo_get_stats = get_stats,
353 .ndo_set_rx_mode = set_rx_mode,
354 .ndo_do_ioctl = netdev_ioctl,
355 .ndo_tx_timeout = tx_timeout,
356 .ndo_change_mtu = eth_change_mtu,
357 .ndo_set_mac_address = eth_mac_addr,
358 .ndo_validate_addr = eth_validate_addr,
359};
360
361static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
362{
363 struct net_device *dev;
364 struct netdev_private *np;
365 static int find_cnt;
366 int chip_idx = ent->driver_data;
367 int irq;
368 int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
369 void __iomem *ioaddr;
370
371 i = pci_enable_device(pdev);
372 if (i) return i;
373
374 pci_set_master(pdev);
375
376 irq = pdev->irq;
377
378 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
379 pr_warn("Device %s disabled due to DMA limitations\n",
380 pci_name(pdev));
381 return -EIO;
382 }
383 dev = alloc_etherdev(sizeof(*np));
384 if (!dev)
385 return -ENOMEM;
386 SET_NETDEV_DEV(dev, &pdev->dev);
387
388 if (pci_request_regions(pdev, DRV_NAME))
389 goto err_out_netdev;
390
391 ioaddr = pci_iomap(pdev, TULIP_BAR, netdev_res_size);
392 if (!ioaddr)
393 goto err_out_free_res;
394
395 for (i = 0; i < 3; i++)
396 ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(eeprom_read(ioaddr, i));
397
398
399
400 iowrite32(0x00000001, ioaddr + PCIBusCfg);
401
402 np = netdev_priv(dev);
403 np->pci_dev = pdev;
404 np->chip_id = chip_idx;
405 np->drv_flags = pci_id_tbl[chip_idx].drv_flags;
406 spin_lock_init(&np->lock);
407 np->mii_if.dev = dev;
408 np->mii_if.mdio_read = mdio_read;
409 np->mii_if.mdio_write = mdio_write;
410 np->base_addr = ioaddr;
411
412 pci_set_drvdata(pdev, dev);
413
414 if (dev->mem_start)
415 option = dev->mem_start;
416
417
418 if (option > 0) {
419 if (option & 0x200)
420 np->mii_if.full_duplex = 1;
421 if (option & 15)
422 dev_info(&dev->dev,
423 "ignoring user supplied media type %d",
424 option & 15);
425 }
426 if (find_cnt < MAX_UNITS && full_duplex[find_cnt] > 0)
427 np->mii_if.full_duplex = 1;
428
429 if (np->mii_if.full_duplex)
430 np->mii_if.force_media = 1;
431
432
433 dev->netdev_ops = &netdev_ops;
434 dev->ethtool_ops = &netdev_ethtool_ops;
435 dev->watchdog_timeo = TX_TIMEOUT;
436
437 i = register_netdev(dev);
438 if (i)
439 goto err_out_cleardev;
440
441 dev_info(&dev->dev, "%s at %p, %pM, IRQ %d\n",
442 pci_id_tbl[chip_idx].name, ioaddr, dev->dev_addr, irq);
443
444 if (np->drv_flags & CanHaveMII) {
445 int phy, phy_idx = 0;
446 for (phy = 1; phy < 32 && phy_idx < MII_CNT; phy++) {
447 int mii_status = mdio_read(dev, phy, MII_BMSR);
448 if (mii_status != 0xffff && mii_status != 0x0000) {
449 np->phys[phy_idx++] = phy;
450 np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE);
451 np->mii = (mdio_read(dev, phy, MII_PHYSID1) << 16)+
452 mdio_read(dev, phy, MII_PHYSID2);
453 dev_info(&dev->dev,
454 "MII PHY %08xh found at address %d, status 0x%04x advertising %04x\n",
455 np->mii, phy, mii_status,
456 np->mii_if.advertising);
457 }
458 }
459 np->mii_cnt = phy_idx;
460 np->mii_if.phy_id = np->phys[0];
461 if (phy_idx == 0) {
462 dev_warn(&dev->dev,
463 "MII PHY not found -- this device may not operate correctly\n");
464 }
465 }
466
467 find_cnt++;
468 return 0;
469
470err_out_cleardev:
471 pci_iounmap(pdev, ioaddr);
472err_out_free_res:
473 pci_release_regions(pdev);
474err_out_netdev:
475 free_netdev (dev);
476 return -ENODEV;
477}
478
479
480
481
482
483
484
485
486
487
488
489
490
491#define eeprom_delay(ee_addr) ioread32(ee_addr)
492
493enum EEPROM_Ctrl_Bits {
494 EE_ShiftClk=0x02, EE_Write0=0x801, EE_Write1=0x805,
495 EE_ChipSelect=0x801, EE_DataIn=0x08,
496};
497
498
499enum EEPROM_Cmds {
500 EE_WriteCmd=(5 << 6), EE_ReadCmd=(6 << 6), EE_EraseCmd=(7 << 6),
501};
502
503static int eeprom_read(void __iomem *addr, int location)
504{
505 int i;
506 int retval = 0;
507 void __iomem *ee_addr = addr + EECtrl;
508 int read_cmd = location | EE_ReadCmd;
509 iowrite32(EE_ChipSelect, ee_addr);
510
511
512 for (i = 10; i >= 0; i--) {
513 short dataval = (read_cmd & (1 << i)) ? EE_Write1 : EE_Write0;
514 iowrite32(dataval, ee_addr);
515 eeprom_delay(ee_addr);
516 iowrite32(dataval | EE_ShiftClk, ee_addr);
517 eeprom_delay(ee_addr);
518 }
519 iowrite32(EE_ChipSelect, ee_addr);
520 eeprom_delay(ee_addr);
521
522 for (i = 16; i > 0; i--) {
523 iowrite32(EE_ChipSelect | EE_ShiftClk, ee_addr);
524 eeprom_delay(ee_addr);
525 retval = (retval << 1) | ((ioread32(ee_addr) & EE_DataIn) ? 1 : 0);
526 iowrite32(EE_ChipSelect, ee_addr);
527 eeprom_delay(ee_addr);
528 }
529
530
531 iowrite32(0, ee_addr);
532 return retval;
533}
534
535
536
537
538
539
540
541
542#define mdio_delay(mdio_addr) ioread32(mdio_addr)
543
544
545
546
547static char mii_preamble_required = 1;
548
549#define MDIO_WRITE0 (MDIO_EnbOutput)
550#define MDIO_WRITE1 (MDIO_DataOut | MDIO_EnbOutput)
551
552
553
554static void mdio_sync(void __iomem *mdio_addr)
555{
556 int bits = 32;
557
558
559 while (--bits >= 0) {
560 iowrite32(MDIO_WRITE1, mdio_addr);
561 mdio_delay(mdio_addr);
562 iowrite32(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
563 mdio_delay(mdio_addr);
564 }
565}
566
567static int mdio_read(struct net_device *dev, int phy_id, int location)
568{
569 struct netdev_private *np = netdev_priv(dev);
570 void __iomem *mdio_addr = np->base_addr + MIICtrl;
571 int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
572 int i, retval = 0;
573
574 if (mii_preamble_required)
575 mdio_sync(mdio_addr);
576
577
578 for (i = 15; i >= 0; i--) {
579 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
580
581 iowrite32(dataval, mdio_addr);
582 mdio_delay(mdio_addr);
583 iowrite32(dataval | MDIO_ShiftClk, mdio_addr);
584 mdio_delay(mdio_addr);
585 }
586
587 for (i = 20; i > 0; i--) {
588 iowrite32(MDIO_EnbIn, mdio_addr);
589 mdio_delay(mdio_addr);
590 retval = (retval << 1) | ((ioread32(mdio_addr) & MDIO_DataIn) ? 1 : 0);
591 iowrite32(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
592 mdio_delay(mdio_addr);
593 }
594 return (retval>>1) & 0xffff;
595}
596
597static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
598{
599 struct netdev_private *np = netdev_priv(dev);
600 void __iomem *mdio_addr = np->base_addr + MIICtrl;
601 int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
602 int i;
603
604 if (location == 4 && phy_id == np->phys[0])
605 np->mii_if.advertising = value;
606
607 if (mii_preamble_required)
608 mdio_sync(mdio_addr);
609
610
611 for (i = 31; i >= 0; i--) {
612 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
613
614 iowrite32(dataval, mdio_addr);
615 mdio_delay(mdio_addr);
616 iowrite32(dataval | MDIO_ShiftClk, mdio_addr);
617 mdio_delay(mdio_addr);
618 }
619
620 for (i = 2; i > 0; i--) {
621 iowrite32(MDIO_EnbIn, mdio_addr);
622 mdio_delay(mdio_addr);
623 iowrite32(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
624 mdio_delay(mdio_addr);
625 }
626}
627
628
629static int netdev_open(struct net_device *dev)
630{
631 struct netdev_private *np = netdev_priv(dev);
632 void __iomem *ioaddr = np->base_addr;
633 const int irq = np->pci_dev->irq;
634 int i;
635
636 iowrite32(0x00000001, ioaddr + PCIBusCfg);
637
638 netif_device_detach(dev);
639 i = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
640 if (i)
641 goto out_err;
642
643 if (debug > 1)
644 netdev_dbg(dev, "w89c840_open() irq %d\n", irq);
645
646 if((i=alloc_ringdesc(dev)))
647 goto out_err;
648
649 spin_lock_irq(&np->lock);
650 netif_device_attach(dev);
651 init_registers(dev);
652 spin_unlock_irq(&np->lock);
653
654 netif_start_queue(dev);
655 if (debug > 2)
656 netdev_dbg(dev, "Done netdev_open()\n");
657
658
659 init_timer(&np->timer);
660 np->timer.expires = jiffies + 1*HZ;
661 np->timer.data = (unsigned long)dev;
662 np->timer.function = netdev_timer;
663 add_timer(&np->timer);
664 return 0;
665out_err:
666 netif_device_attach(dev);
667 return i;
668}
669
670#define MII_DAVICOM_DM9101 0x0181b800
671
672static int update_link(struct net_device *dev)
673{
674 struct netdev_private *np = netdev_priv(dev);
675 int duplex, fasteth, result, mii_reg;
676
677
678 mii_reg = mdio_read(dev, np->phys[0], MII_BMSR);
679
680 if (mii_reg == 0xffff)
681 return np->csr6;
682
683 mii_reg = mdio_read(dev, np->phys[0], MII_BMSR);
684 if (!(mii_reg & 0x4)) {
685 if (netif_carrier_ok(dev)) {
686 if (debug)
687 dev_info(&dev->dev,
688 "MII #%d reports no link. Disabling watchdog\n",
689 np->phys[0]);
690 netif_carrier_off(dev);
691 }
692 return np->csr6;
693 }
694 if (!netif_carrier_ok(dev)) {
695 if (debug)
696 dev_info(&dev->dev,
697 "MII #%d link is back. Enabling watchdog\n",
698 np->phys[0]);
699 netif_carrier_on(dev);
700 }
701
702 if ((np->mii & ~0xf) == MII_DAVICOM_DM9101) {
703
704
705
706
707
708
709
710
711 mii_reg = mdio_read(dev, np->phys[0], MII_BMCR);
712 duplex = mii_reg & BMCR_FULLDPLX;
713 fasteth = mii_reg & BMCR_SPEED100;
714 } else {
715 int negotiated;
716 mii_reg = mdio_read(dev, np->phys[0], MII_LPA);
717 negotiated = mii_reg & np->mii_if.advertising;
718
719 duplex = (negotiated & LPA_100FULL) || ((negotiated & 0x02C0) == LPA_10FULL);
720 fasteth = negotiated & 0x380;
721 }
722 duplex |= np->mii_if.force_media;
723
724 result = np->csr6 & ~0x20000200;
725 if (duplex)
726 result |= 0x200;
727 if (fasteth)
728 result |= 0x20000000;
729 if (result != np->csr6 && debug)
730 dev_info(&dev->dev,
731 "Setting %dMBit-%s-duplex based on MII#%d\n",
732 fasteth ? 100 : 10, duplex ? "full" : "half",
733 np->phys[0]);
734 return result;
735}
736
737#define RXTX_TIMEOUT 2000
738static inline void update_csr6(struct net_device *dev, int new)
739{
740 struct netdev_private *np = netdev_priv(dev);
741 void __iomem *ioaddr = np->base_addr;
742 int limit = RXTX_TIMEOUT;
743
744 if (!netif_device_present(dev))
745 new = 0;
746 if (new==np->csr6)
747 return;
748
749 iowrite32(np->csr6 & ~0x2002, ioaddr + NetworkConfig);
750
751 for (;;) {
752 int csr5 = ioread32(ioaddr + IntrStatus);
753 int t;
754
755 t = (csr5 >> 17) & 0x07;
756 if (t==0||t==1) {
757
758 t = (csr5 >> 20) & 0x07;
759 if (t==0||t==1)
760 break;
761 }
762
763 limit--;
764 if(!limit) {
765 dev_info(&dev->dev,
766 "couldn't stop rxtx, IntrStatus %xh\n", csr5);
767 break;
768 }
769 udelay(1);
770 }
771 np->csr6 = new;
772
773 iowrite32(np->csr6, ioaddr + NetworkConfig);
774 if (new & 0x200)
775 np->mii_if.full_duplex = 1;
776}
777
778static void netdev_timer(unsigned long data)
779{
780 struct net_device *dev = (struct net_device *)data;
781 struct netdev_private *np = netdev_priv(dev);
782 void __iomem *ioaddr = np->base_addr;
783
784 if (debug > 2)
785 netdev_dbg(dev, "Media selection timer tick, status %08x config %08x\n",
786 ioread32(ioaddr + IntrStatus),
787 ioread32(ioaddr + NetworkConfig));
788 spin_lock_irq(&np->lock);
789 update_csr6(dev, update_link(dev));
790 spin_unlock_irq(&np->lock);
791 np->timer.expires = jiffies + 10*HZ;
792 add_timer(&np->timer);
793}
794
795static void init_rxtx_rings(struct net_device *dev)
796{
797 struct netdev_private *np = netdev_priv(dev);
798 int i;
799
800 np->rx_head_desc = &np->rx_ring[0];
801 np->tx_ring = (struct w840_tx_desc*)&np->rx_ring[RX_RING_SIZE];
802
803
804 for (i = 0; i < RX_RING_SIZE; i++) {
805 np->rx_ring[i].length = np->rx_buf_sz;
806 np->rx_ring[i].status = 0;
807 np->rx_skbuff[i] = NULL;
808 }
809
810 np->rx_ring[i-1].length |= DescEndRing;
811
812
813 for (i = 0; i < RX_RING_SIZE; i++) {
814 struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz);
815 np->rx_skbuff[i] = skb;
816 if (skb == NULL)
817 break;
818 np->rx_addr[i] = pci_map_single(np->pci_dev,skb->data,
819 np->rx_buf_sz,PCI_DMA_FROMDEVICE);
820
821 np->rx_ring[i].buffer1 = np->rx_addr[i];
822 np->rx_ring[i].status = DescOwned;
823 }
824
825 np->cur_rx = 0;
826 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
827
828
829 for (i = 0; i < TX_RING_SIZE; i++) {
830 np->tx_skbuff[i] = NULL;
831 np->tx_ring[i].status = 0;
832 }
833 np->tx_full = 0;
834 np->tx_q_bytes = np->dirty_tx = np->cur_tx = 0;
835
836 iowrite32(np->ring_dma_addr, np->base_addr + RxRingPtr);
837 iowrite32(np->ring_dma_addr+sizeof(struct w840_rx_desc)*RX_RING_SIZE,
838 np->base_addr + TxRingPtr);
839
840}
841
842static void free_rxtx_rings(struct netdev_private* np)
843{
844 int i;
845
846 for (i = 0; i < RX_RING_SIZE; i++) {
847 np->rx_ring[i].status = 0;
848 if (np->rx_skbuff[i]) {
849 pci_unmap_single(np->pci_dev,
850 np->rx_addr[i],
851 np->rx_skbuff[i]->len,
852 PCI_DMA_FROMDEVICE);
853 dev_kfree_skb(np->rx_skbuff[i]);
854 }
855 np->rx_skbuff[i] = NULL;
856 }
857 for (i = 0; i < TX_RING_SIZE; i++) {
858 if (np->tx_skbuff[i]) {
859 pci_unmap_single(np->pci_dev,
860 np->tx_addr[i],
861 np->tx_skbuff[i]->len,
862 PCI_DMA_TODEVICE);
863 dev_kfree_skb(np->tx_skbuff[i]);
864 }
865 np->tx_skbuff[i] = NULL;
866 }
867}
868
869static void init_registers(struct net_device *dev)
870{
871 struct netdev_private *np = netdev_priv(dev);
872 void __iomem *ioaddr = np->base_addr;
873 int i;
874
875 for (i = 0; i < 6; i++)
876 iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
877
878
879#ifdef __BIG_ENDIAN
880 i = (1<<20);
881#else
882 i = 0;
883#endif
884 i |= (0x04<<2);
885 i |= 0x02;
886
887
888
889
890
891
892
893
894
895
896#if defined (__i386__) && !defined(MODULE)
897
898 if (boot_cpu_data.x86 <= 4) {
899 i |= 0x4800;
900 dev_info(&dev->dev,
901 "This is a 386/486 PCI system, setting cache alignment to 8 longwords\n");
902 } else {
903 i |= 0xE000;
904 }
905#elif defined(__powerpc__) || defined(__i386__) || defined(__alpha__) || defined(__ia64__) || defined(__x86_64__)
906 i |= 0xE000;
907#elif defined(CONFIG_SPARC) || defined (CONFIG_PARISC) || defined(CONFIG_ARM)
908 i |= 0x4800;
909#else
910 dev_warn(&dev->dev, "unknown CPU architecture, using default csr0 setting\n");
911 i |= 0x4800;
912#endif
913 iowrite32(i, ioaddr + PCIBusCfg);
914
915 np->csr6 = 0;
916
917
918 update_csr6(dev, 0x00022002 | update_link(dev) | __set_rx_mode(dev));
919
920
921 iowrite32(0x1A0F5, ioaddr + IntrStatus);
922 iowrite32(0x1A0F5, ioaddr + IntrEnable);
923
924 iowrite32(0, ioaddr + RxStartDemand);
925}
926
927static void tx_timeout(struct net_device *dev)
928{
929 struct netdev_private *np = netdev_priv(dev);
930 void __iomem *ioaddr = np->base_addr;
931 const int irq = np->pci_dev->irq;
932
933 dev_warn(&dev->dev, "Transmit timed out, status %08x, resetting...\n",
934 ioread32(ioaddr + IntrStatus));
935
936 {
937 int i;
938 printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring);
939 for (i = 0; i < RX_RING_SIZE; i++)
940 printk(KERN_CONT " %08x", (unsigned int)np->rx_ring[i].status);
941 printk(KERN_CONT "\n");
942 printk(KERN_DEBUG " Tx ring %p: ", np->tx_ring);
943 for (i = 0; i < TX_RING_SIZE; i++)
944 printk(KERN_CONT " %08x", np->tx_ring[i].status);
945 printk(KERN_CONT "\n");
946 }
947 printk(KERN_DEBUG "Tx cur %d Tx dirty %d Tx Full %d, q bytes %d\n",
948 np->cur_tx, np->dirty_tx, np->tx_full, np->tx_q_bytes);
949 printk(KERN_DEBUG "Tx Descriptor addr %xh\n", ioread32(ioaddr+0x4C));
950
951 disable_irq(irq);
952 spin_lock_irq(&np->lock);
953
954
955
956
957
958
959 iowrite32(1, np->base_addr+PCIBusCfg);
960 udelay(1);
961
962 free_rxtx_rings(np);
963 init_rxtx_rings(dev);
964 init_registers(dev);
965 spin_unlock_irq(&np->lock);
966 enable_irq(irq);
967
968 netif_wake_queue(dev);
969 netif_trans_update(dev);
970 np->stats.tx_errors++;
971}
972
973
974static int alloc_ringdesc(struct net_device *dev)
975{
976 struct netdev_private *np = netdev_priv(dev);
977
978 np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
979
980 np->rx_ring = pci_alloc_consistent(np->pci_dev,
981 sizeof(struct w840_rx_desc)*RX_RING_SIZE +
982 sizeof(struct w840_tx_desc)*TX_RING_SIZE,
983 &np->ring_dma_addr);
984 if(!np->rx_ring)
985 return -ENOMEM;
986 init_rxtx_rings(dev);
987 return 0;
988}
989
990static void free_ringdesc(struct netdev_private *np)
991{
992 pci_free_consistent(np->pci_dev,
993 sizeof(struct w840_rx_desc)*RX_RING_SIZE +
994 sizeof(struct w840_tx_desc)*TX_RING_SIZE,
995 np->rx_ring, np->ring_dma_addr);
996
997}
998
999static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
1000{
1001 struct netdev_private *np = netdev_priv(dev);
1002 unsigned entry;
1003
1004
1005
1006
1007
1008 entry = np->cur_tx % TX_RING_SIZE;
1009
1010 np->tx_addr[entry] = pci_map_single(np->pci_dev,
1011 skb->data,skb->len, PCI_DMA_TODEVICE);
1012 np->tx_skbuff[entry] = skb;
1013
1014 np->tx_ring[entry].buffer1 = np->tx_addr[entry];
1015 if (skb->len < TX_BUFLIMIT) {
1016 np->tx_ring[entry].length = DescWholePkt | skb->len;
1017 } else {
1018 int len = skb->len - TX_BUFLIMIT;
1019
1020 np->tx_ring[entry].buffer2 = np->tx_addr[entry]+TX_BUFLIMIT;
1021 np->tx_ring[entry].length = DescWholePkt | (len << 11) | TX_BUFLIMIT;
1022 }
1023 if(entry == TX_RING_SIZE-1)
1024 np->tx_ring[entry].length |= DescEndRing;
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036 spin_lock_irq(&np->lock);
1037 np->cur_tx++;
1038
1039 wmb();
1040 np->tx_ring[entry].status = DescOwned;
1041 wmb();
1042 iowrite32(0, np->base_addr + TxStartDemand);
1043 np->tx_q_bytes += skb->len;
1044
1045
1046 if (np->cur_tx - np->dirty_tx > TX_QUEUE_LEN ||
1047 ((np->drv_flags & HasBrokenTx) && np->tx_q_bytes > TX_BUG_FIFO_LIMIT)) {
1048 netif_stop_queue(dev);
1049 wmb();
1050 np->tx_full = 1;
1051 }
1052 spin_unlock_irq(&np->lock);
1053
1054 if (debug > 4) {
1055 netdev_dbg(dev, "Transmit frame #%d queued in slot %d\n",
1056 np->cur_tx, entry);
1057 }
1058 return NETDEV_TX_OK;
1059}
1060
1061static void netdev_tx_done(struct net_device *dev)
1062{
1063 struct netdev_private *np = netdev_priv(dev);
1064 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1065 int entry = np->dirty_tx % TX_RING_SIZE;
1066 int tx_status = np->tx_ring[entry].status;
1067
1068 if (tx_status < 0)
1069 break;
1070 if (tx_status & 0x8000) {
1071#ifndef final_version
1072 if (debug > 1)
1073 netdev_dbg(dev, "Transmit error, Tx status %08x\n",
1074 tx_status);
1075#endif
1076 np->stats.tx_errors++;
1077 if (tx_status & 0x0104) np->stats.tx_aborted_errors++;
1078 if (tx_status & 0x0C80) np->stats.tx_carrier_errors++;
1079 if (tx_status & 0x0200) np->stats.tx_window_errors++;
1080 if (tx_status & 0x0002) np->stats.tx_fifo_errors++;
1081 if ((tx_status & 0x0080) && np->mii_if.full_duplex == 0)
1082 np->stats.tx_heartbeat_errors++;
1083 } else {
1084#ifndef final_version
1085 if (debug > 3)
1086 netdev_dbg(dev, "Transmit slot %d ok, Tx status %08x\n",
1087 entry, tx_status);
1088#endif
1089 np->stats.tx_bytes += np->tx_skbuff[entry]->len;
1090 np->stats.collisions += (tx_status >> 3) & 15;
1091 np->stats.tx_packets++;
1092 }
1093
1094 pci_unmap_single(np->pci_dev,np->tx_addr[entry],
1095 np->tx_skbuff[entry]->len,
1096 PCI_DMA_TODEVICE);
1097 np->tx_q_bytes -= np->tx_skbuff[entry]->len;
1098 dev_kfree_skb_irq(np->tx_skbuff[entry]);
1099 np->tx_skbuff[entry] = NULL;
1100 }
1101 if (np->tx_full &&
1102 np->cur_tx - np->dirty_tx < TX_QUEUE_LEN_RESTART &&
1103 np->tx_q_bytes < TX_BUG_FIFO_LIMIT) {
1104
1105 np->tx_full = 0;
1106 wmb();
1107 netif_wake_queue(dev);
1108 }
1109}
1110
1111
1112
1113static irqreturn_t intr_handler(int irq, void *dev_instance)
1114{
1115 struct net_device *dev = (struct net_device *)dev_instance;
1116 struct netdev_private *np = netdev_priv(dev);
1117 void __iomem *ioaddr = np->base_addr;
1118 int work_limit = max_interrupt_work;
1119 int handled = 0;
1120
1121 if (!netif_device_present(dev))
1122 return IRQ_NONE;
1123 do {
1124 u32 intr_status = ioread32(ioaddr + IntrStatus);
1125
1126
1127 iowrite32(intr_status & 0x001ffff, ioaddr + IntrStatus);
1128
1129 if (debug > 4)
1130 netdev_dbg(dev, "Interrupt, status %04x\n", intr_status);
1131
1132 if ((intr_status & (NormalIntr|AbnormalIntr)) == 0)
1133 break;
1134
1135 handled = 1;
1136
1137 if (intr_status & (RxIntr | RxNoBuf))
1138 netdev_rx(dev);
1139 if (intr_status & RxNoBuf)
1140 iowrite32(0, ioaddr + RxStartDemand);
1141
1142 if (intr_status & (TxNoBuf | TxIntr) &&
1143 np->cur_tx != np->dirty_tx) {
1144 spin_lock(&np->lock);
1145 netdev_tx_done(dev);
1146 spin_unlock(&np->lock);
1147 }
1148
1149
1150 if (intr_status & (AbnormalIntr | TxFIFOUnderflow | SystemError |
1151 TimerInt | TxDied))
1152 netdev_error(dev, intr_status);
1153
1154 if (--work_limit < 0) {
1155 dev_warn(&dev->dev,
1156 "Too much work at interrupt, status=0x%04x\n",
1157 intr_status);
1158
1159
1160 spin_lock(&np->lock);
1161 if (netif_device_present(dev)) {
1162 iowrite32(AbnormalIntr | TimerInt, ioaddr + IntrEnable);
1163 iowrite32(10, ioaddr + GPTimer);
1164 }
1165 spin_unlock(&np->lock);
1166 break;
1167 }
1168 } while (1);
1169
1170 if (debug > 3)
1171 netdev_dbg(dev, "exiting interrupt, status=%#4.4x\n",
1172 ioread32(ioaddr + IntrStatus));
1173 return IRQ_RETVAL(handled);
1174}
1175
1176
1177
1178static int netdev_rx(struct net_device *dev)
1179{
1180 struct netdev_private *np = netdev_priv(dev);
1181 int entry = np->cur_rx % RX_RING_SIZE;
1182 int work_limit = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
1183
1184 if (debug > 4) {
1185 netdev_dbg(dev, " In netdev_rx(), entry %d status %04x\n",
1186 entry, np->rx_ring[entry].status);
1187 }
1188
1189
1190 while (--work_limit >= 0) {
1191 struct w840_rx_desc *desc = np->rx_head_desc;
1192 s32 status = desc->status;
1193
1194 if (debug > 4)
1195 netdev_dbg(dev, " netdev_rx() status was %08x\n",
1196 status);
1197 if (status < 0)
1198 break;
1199 if ((status & 0x38008300) != 0x0300) {
1200 if ((status & 0x38000300) != 0x0300) {
1201
1202 if ((status & 0xffff) != 0x7fff) {
1203 dev_warn(&dev->dev,
1204 "Oversized Ethernet frame spanned multiple buffers, entry %#x status %04x!\n",
1205 np->cur_rx, status);
1206 np->stats.rx_length_errors++;
1207 }
1208 } else if (status & 0x8000) {
1209
1210 if (debug > 2)
1211 netdev_dbg(dev, "Receive error, Rx status %08x\n",
1212 status);
1213 np->stats.rx_errors++;
1214 if (status & 0x0890) np->stats.rx_length_errors++;
1215 if (status & 0x004C) np->stats.rx_frame_errors++;
1216 if (status & 0x0002) np->stats.rx_crc_errors++;
1217 }
1218 } else {
1219 struct sk_buff *skb;
1220
1221 int pkt_len = ((status >> 16) & 0x7ff) - 4;
1222
1223#ifndef final_version
1224 if (debug > 4)
1225 netdev_dbg(dev, " netdev_rx() normal Rx pkt length %d status %x\n",
1226 pkt_len, status);
1227#endif
1228
1229
1230 if (pkt_len < rx_copybreak &&
1231 (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
1232 skb_reserve(skb, 2);
1233 pci_dma_sync_single_for_cpu(np->pci_dev,np->rx_addr[entry],
1234 np->rx_skbuff[entry]->len,
1235 PCI_DMA_FROMDEVICE);
1236 skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
1237 skb_put(skb, pkt_len);
1238 pci_dma_sync_single_for_device(np->pci_dev,np->rx_addr[entry],
1239 np->rx_skbuff[entry]->len,
1240 PCI_DMA_FROMDEVICE);
1241 } else {
1242 pci_unmap_single(np->pci_dev,np->rx_addr[entry],
1243 np->rx_skbuff[entry]->len,
1244 PCI_DMA_FROMDEVICE);
1245 skb_put(skb = np->rx_skbuff[entry], pkt_len);
1246 np->rx_skbuff[entry] = NULL;
1247 }
1248#ifndef final_version
1249
1250 if (debug > 5)
1251 netdev_dbg(dev, " Rx data %pM %pM %02x%02x %pI4\n",
1252 &skb->data[0], &skb->data[6],
1253 skb->data[12], skb->data[13],
1254 &skb->data[14]);
1255#endif
1256 skb->protocol = eth_type_trans(skb, dev);
1257 netif_rx(skb);
1258 np->stats.rx_packets++;
1259 np->stats.rx_bytes += pkt_len;
1260 }
1261 entry = (++np->cur_rx) % RX_RING_SIZE;
1262 np->rx_head_desc = &np->rx_ring[entry];
1263 }
1264
1265
1266 for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
1267 struct sk_buff *skb;
1268 entry = np->dirty_rx % RX_RING_SIZE;
1269 if (np->rx_skbuff[entry] == NULL) {
1270 skb = netdev_alloc_skb(dev, np->rx_buf_sz);
1271 np->rx_skbuff[entry] = skb;
1272 if (skb == NULL)
1273 break;
1274 np->rx_addr[entry] = pci_map_single(np->pci_dev,
1275 skb->data,
1276 np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1277 np->rx_ring[entry].buffer1 = np->rx_addr[entry];
1278 }
1279 wmb();
1280 np->rx_ring[entry].status = DescOwned;
1281 }
1282
1283 return 0;
1284}
1285
1286static void netdev_error(struct net_device *dev, int intr_status)
1287{
1288 struct netdev_private *np = netdev_priv(dev);
1289 void __iomem *ioaddr = np->base_addr;
1290
1291 if (debug > 2)
1292 netdev_dbg(dev, "Abnormal event, %08x\n", intr_status);
1293 if (intr_status == 0xffffffff)
1294 return;
1295 spin_lock(&np->lock);
1296 if (intr_status & TxFIFOUnderflow) {
1297 int new;
1298
1299#if 0
1300
1301
1302
1303 new = np->csr6 + 0x4000;
1304#else
1305 new = (np->csr6 >> 14)&0x7f;
1306 if (new < 64)
1307 new *= 2;
1308 else
1309 new = 127;
1310 new = (np->csr6 & ~(0x7F << 14)) | (new<<14);
1311#endif
1312 netdev_dbg(dev, "Tx underflow, new csr6 %08x\n", new);
1313 update_csr6(dev, new);
1314 }
1315 if (intr_status & RxDied) {
1316 np->stats.rx_errors++;
1317 }
1318 if (intr_status & TimerInt) {
1319
1320 if (netif_device_present(dev))
1321 iowrite32(0x1A0F5, ioaddr + IntrEnable);
1322 }
1323 np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
1324 iowrite32(0, ioaddr + RxStartDemand);
1325 spin_unlock(&np->lock);
1326}
1327
1328static struct net_device_stats *get_stats(struct net_device *dev)
1329{
1330 struct netdev_private *np = netdev_priv(dev);
1331 void __iomem *ioaddr = np->base_addr;
1332
1333
1334 spin_lock_irq(&np->lock);
1335 if (netif_running(dev) && netif_device_present(dev))
1336 np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
1337 spin_unlock_irq(&np->lock);
1338
1339 return &np->stats;
1340}
1341
1342
1343static u32 __set_rx_mode(struct net_device *dev)
1344{
1345 struct netdev_private *np = netdev_priv(dev);
1346 void __iomem *ioaddr = np->base_addr;
1347 u32 mc_filter[2];
1348 u32 rx_mode;
1349
1350 if (dev->flags & IFF_PROMISC) {
1351 memset(mc_filter, 0xff, sizeof(mc_filter));
1352 rx_mode = RxAcceptBroadcast | AcceptMulticast | RxAcceptAllPhys
1353 | AcceptMyPhys;
1354 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
1355 (dev->flags & IFF_ALLMULTI)) {
1356
1357 memset(mc_filter, 0xff, sizeof(mc_filter));
1358 rx_mode = RxAcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1359 } else {
1360 struct netdev_hw_addr *ha;
1361
1362 memset(mc_filter, 0, sizeof(mc_filter));
1363 netdev_for_each_mc_addr(ha, dev) {
1364 int filbit;
1365
1366 filbit = (ether_crc(ETH_ALEN, ha->addr) >> 26) ^ 0x3F;
1367 filbit &= 0x3f;
1368 mc_filter[filbit >> 5] |= 1 << (filbit & 31);
1369 }
1370 rx_mode = RxAcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1371 }
1372 iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
1373 iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
1374 return rx_mode;
1375}
1376
1377static void set_rx_mode(struct net_device *dev)
1378{
1379 struct netdev_private *np = netdev_priv(dev);
1380 u32 rx_mode = __set_rx_mode(dev);
1381 spin_lock_irq(&np->lock);
1382 update_csr6(dev, (np->csr6 & ~0x00F8) | rx_mode);
1383 spin_unlock_irq(&np->lock);
1384}
1385
1386static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1387{
1388 struct netdev_private *np = netdev_priv(dev);
1389
1390 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1391 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1392 strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
1393}
1394
1395static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1396{
1397 struct netdev_private *np = netdev_priv(dev);
1398 int rc;
1399
1400 spin_lock_irq(&np->lock);
1401 rc = mii_ethtool_gset(&np->mii_if, cmd);
1402 spin_unlock_irq(&np->lock);
1403
1404 return rc;
1405}
1406
1407static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1408{
1409 struct netdev_private *np = netdev_priv(dev);
1410 int rc;
1411
1412 spin_lock_irq(&np->lock);
1413 rc = mii_ethtool_sset(&np->mii_if, cmd);
1414 spin_unlock_irq(&np->lock);
1415
1416 return rc;
1417}
1418
1419static int netdev_nway_reset(struct net_device *dev)
1420{
1421 struct netdev_private *np = netdev_priv(dev);
1422 return mii_nway_restart(&np->mii_if);
1423}
1424
1425static u32 netdev_get_link(struct net_device *dev)
1426{
1427 struct netdev_private *np = netdev_priv(dev);
1428 return mii_link_ok(&np->mii_if);
1429}
1430
1431static u32 netdev_get_msglevel(struct net_device *dev)
1432{
1433 return debug;
1434}
1435
1436static void netdev_set_msglevel(struct net_device *dev, u32 value)
1437{
1438 debug = value;
1439}
1440
1441static const struct ethtool_ops netdev_ethtool_ops = {
1442 .get_drvinfo = netdev_get_drvinfo,
1443 .get_settings = netdev_get_settings,
1444 .set_settings = netdev_set_settings,
1445 .nway_reset = netdev_nway_reset,
1446 .get_link = netdev_get_link,
1447 .get_msglevel = netdev_get_msglevel,
1448 .set_msglevel = netdev_set_msglevel,
1449};
1450
1451static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1452{
1453 struct mii_ioctl_data *data = if_mii(rq);
1454 struct netdev_private *np = netdev_priv(dev);
1455
1456 switch(cmd) {
1457 case SIOCGMIIPHY:
1458 data->phy_id = ((struct netdev_private *)netdev_priv(dev))->phys[0] & 0x1f;
1459
1460
1461 case SIOCGMIIREG:
1462 spin_lock_irq(&np->lock);
1463 data->val_out = mdio_read(dev, data->phy_id & 0x1f, data->reg_num & 0x1f);
1464 spin_unlock_irq(&np->lock);
1465 return 0;
1466
1467 case SIOCSMIIREG:
1468 spin_lock_irq(&np->lock);
1469 mdio_write(dev, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in);
1470 spin_unlock_irq(&np->lock);
1471 return 0;
1472 default:
1473 return -EOPNOTSUPP;
1474 }
1475}
1476
1477static int netdev_close(struct net_device *dev)
1478{
1479 struct netdev_private *np = netdev_priv(dev);
1480 void __iomem *ioaddr = np->base_addr;
1481
1482 netif_stop_queue(dev);
1483
1484 if (debug > 1) {
1485 netdev_dbg(dev, "Shutting down ethercard, status was %08x Config %08x\n",
1486 ioread32(ioaddr + IntrStatus),
1487 ioread32(ioaddr + NetworkConfig));
1488 netdev_dbg(dev, "Queue pointers were Tx %d / %d, Rx %d / %d\n",
1489 np->cur_tx, np->dirty_tx,
1490 np->cur_rx, np->dirty_rx);
1491 }
1492
1493
1494 spin_lock_irq(&np->lock);
1495 netif_device_detach(dev);
1496 update_csr6(dev, 0);
1497 iowrite32(0x0000, ioaddr + IntrEnable);
1498 spin_unlock_irq(&np->lock);
1499
1500 free_irq(np->pci_dev->irq, dev);
1501 wmb();
1502 netif_device_attach(dev);
1503
1504 if (ioread32(ioaddr + NetworkConfig) != 0xffffffff)
1505 np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
1506
1507#ifdef __i386__
1508 if (debug > 2) {
1509 int i;
1510
1511 printk(KERN_DEBUG" Tx ring at %p:\n", np->tx_ring);
1512 for (i = 0; i < TX_RING_SIZE; i++)
1513 printk(KERN_DEBUG " #%d desc. %04x %04x %08x\n",
1514 i, np->tx_ring[i].length,
1515 np->tx_ring[i].status, np->tx_ring[i].buffer1);
1516 printk(KERN_DEBUG " Rx ring %p:\n", np->rx_ring);
1517 for (i = 0; i < RX_RING_SIZE; i++) {
1518 printk(KERN_DEBUG " #%d desc. %04x %04x %08x\n",
1519 i, np->rx_ring[i].length,
1520 np->rx_ring[i].status, np->rx_ring[i].buffer1);
1521 }
1522 }
1523#endif
1524
1525 del_timer_sync(&np->timer);
1526
1527 free_rxtx_rings(np);
1528 free_ringdesc(np);
1529
1530 return 0;
1531}
1532
1533static void w840_remove1(struct pci_dev *pdev)
1534{
1535 struct net_device *dev = pci_get_drvdata(pdev);
1536
1537 if (dev) {
1538 struct netdev_private *np = netdev_priv(dev);
1539 unregister_netdev(dev);
1540 pci_release_regions(pdev);
1541 pci_iounmap(pdev, np->base_addr);
1542 free_netdev(dev);
1543 }
1544}
1545
1546#ifdef CONFIG_PM
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571static int w840_suspend (struct pci_dev *pdev, pm_message_t state)
1572{
1573 struct net_device *dev = pci_get_drvdata (pdev);
1574 struct netdev_private *np = netdev_priv(dev);
1575 void __iomem *ioaddr = np->base_addr;
1576
1577 rtnl_lock();
1578 if (netif_running (dev)) {
1579 del_timer_sync(&np->timer);
1580
1581 spin_lock_irq(&np->lock);
1582 netif_device_detach(dev);
1583 update_csr6(dev, 0);
1584 iowrite32(0, ioaddr + IntrEnable);
1585 spin_unlock_irq(&np->lock);
1586
1587 synchronize_irq(np->pci_dev->irq);
1588 netif_tx_disable(dev);
1589
1590 np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
1591
1592
1593
1594 BUG_ON(np->csr6 || ioread32(ioaddr + IntrEnable));
1595
1596
1597
1598 free_rxtx_rings(np);
1599 } else {
1600 netif_device_detach(dev);
1601 }
1602 rtnl_unlock();
1603 return 0;
1604}
1605
1606static int w840_resume (struct pci_dev *pdev)
1607{
1608 struct net_device *dev = pci_get_drvdata (pdev);
1609 struct netdev_private *np = netdev_priv(dev);
1610 int retval = 0;
1611
1612 rtnl_lock();
1613 if (netif_device_present(dev))
1614 goto out;
1615 if (netif_running(dev)) {
1616 if ((retval = pci_enable_device(pdev))) {
1617 dev_err(&dev->dev,
1618 "pci_enable_device failed in resume\n");
1619 goto out;
1620 }
1621 spin_lock_irq(&np->lock);
1622 iowrite32(1, np->base_addr+PCIBusCfg);
1623 ioread32(np->base_addr+PCIBusCfg);
1624 udelay(1);
1625 netif_device_attach(dev);
1626 init_rxtx_rings(dev);
1627 init_registers(dev);
1628 spin_unlock_irq(&np->lock);
1629
1630 netif_wake_queue(dev);
1631
1632 mod_timer(&np->timer, jiffies + 1*HZ);
1633 } else {
1634 netif_device_attach(dev);
1635 }
1636out:
1637 rtnl_unlock();
1638 return retval;
1639}
1640#endif
1641
1642static struct pci_driver w840_driver = {
1643 .name = DRV_NAME,
1644 .id_table = w840_pci_tbl,
1645 .probe = w840_probe1,
1646 .remove = w840_remove1,
1647#ifdef CONFIG_PM
1648 .suspend = w840_suspend,
1649 .resume = w840_resume,
1650#endif
1651};
1652
1653static int __init w840_init(void)
1654{
1655 printk(version);
1656 return pci_register_driver(&w840_driver);
1657}
1658
1659static void __exit w840_exit(void)
1660{
1661 pci_unregister_driver(&w840_driver);
1662}
1663
1664module_init(w840_init);
1665module_exit(w840_exit);
1666