1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#define DRV_NAME "starfire"
30#define DRV_VERSION "2.1"
31#define DRV_RELDATE "July 6, 2008"
32
33#include <linux/module.h>
34#include <linux/kernel.h>
35#include <linux/pci.h>
36#include <linux/netdevice.h>
37#include <linux/etherdevice.h>
38#include <linux/init.h>
39#include <linux/delay.h>
40#include <linux/crc32.h>
41#include <linux/ethtool.h>
42#include <linux/mii.h>
43#include <linux/if_vlan.h>
44#include <linux/mm.h>
45#include <linux/firmware.h>
46#include <asm/processor.h>
47#include <asm/uaccess.h>
48#include <asm/io.h>
49
50
51
52
53
54#define HAS_BROKEN_FIRMWARE
55
56
57
58
59#ifdef HAS_BROKEN_FIRMWARE
60#define PADDING_MASK 3
61#endif
62
63
64
65
66#define ZEROCOPY
67
68#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
69#define VLAN_SUPPORT
70#endif
71
72
73
74
75
76static int intr_latency;
77static int small_frames;
78
79static int debug = 1;
80static int max_interrupt_work = 20;
81static int mtu;
82
83
84static const int multicast_filter_limit = 512;
85
86static int enable_hw_cksum = 1;
87
88#define PKT_BUF_SZ 1536
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103#if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
104static int rx_copybreak = PKT_BUF_SZ;
105#else
106static int rx_copybreak ;
107#endif
108
109
110#ifdef __sparc__
111#define DMA_BURST_SIZE 64
112#else
113#define DMA_BURST_SIZE 128
114#endif
115
116
117
118
119
120
121#define MAX_UNITS 8
122static int options[MAX_UNITS] = {0, };
123static int full_duplex[MAX_UNITS] = {0, };
124
125
126
127
128
129
130#define RX_RING_SIZE 256
131#define TX_RING_SIZE 32
132
133#define DONE_Q_SIZE 1024
134
135#define QUEUE_ALIGN 256
136
137#if RX_RING_SIZE > 256
138#define RX_Q_ENTRIES Rx2048QEntries
139#else
140#define RX_Q_ENTRIES Rx256QEntries
141#endif
142
143
144
145#define TX_TIMEOUT (2 * HZ)
146
147#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
148
149#define ADDR_64BITS
150#define netdrv_addr_t __le64
151#define cpu_to_dma(x) cpu_to_le64(x)
152#define dma_to_cpu(x) le64_to_cpu(x)
153#define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
154#define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
155#define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
156#define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
157#define RX_DESC_ADDR_SIZE RxDescAddr64bit
158#else
159#define netdrv_addr_t __le32
160#define cpu_to_dma(x) cpu_to_le32(x)
161#define dma_to_cpu(x) le32_to_cpu(x)
162#define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
163#define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
164#define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
165#define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
166#define RX_DESC_ADDR_SIZE RxDescAddr32bit
167#endif
168
169#define skb_first_frag_len(skb) skb_headlen(skb)
170#define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
171
172
173#define FIRMWARE_RX "adaptec/starfire_rx.bin"
174#define FIRMWARE_TX "adaptec/starfire_tx.bin"
175
176
177static const char version[] __devinitconst =
178KERN_INFO "starfire.c:v1.03 7/26/2000 Written by Donald Becker <becker@scyld.com>\n"
179" (unofficial 2.2/2.4 kernel port, version " DRV_VERSION ", " DRV_RELDATE ")\n";
180
181MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
182MODULE_DESCRIPTION("Adaptec Starfire Ethernet driver");
183MODULE_LICENSE("GPL");
184MODULE_VERSION(DRV_VERSION);
185MODULE_FIRMWARE(FIRMWARE_RX);
186MODULE_FIRMWARE(FIRMWARE_TX);
187
188module_param(max_interrupt_work, int, 0);
189module_param(mtu, int, 0);
190module_param(debug, int, 0);
191module_param(rx_copybreak, int, 0);
192module_param(intr_latency, int, 0);
193module_param(small_frames, int, 0);
194module_param_array(options, int, NULL, 0);
195module_param_array(full_duplex, int, NULL, 0);
196module_param(enable_hw_cksum, int, 0);
197MODULE_PARM_DESC(max_interrupt_work, "Maximum events handled per interrupt");
198MODULE_PARM_DESC(mtu, "MTU (all boards)");
199MODULE_PARM_DESC(debug, "Debug level (0-6)");
200MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
201MODULE_PARM_DESC(intr_latency, "Maximum interrupt latency, in microseconds");
202MODULE_PARM_DESC(small_frames, "Maximum size of receive frames that bypass interrupt latency (0,64,128,256,512)");
203MODULE_PARM_DESC(options, "Deprecated: Bits 0-3: media type, bit 17: full duplex");
204MODULE_PARM_DESC(full_duplex, "Deprecated: Forced full-duplex setting (0/1)");
205MODULE_PARM_DESC(enable_hw_cksum, "Enable/disable hardware cksum support (0/1)");
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294enum chip_capability_flags {CanHaveMII=1, };
295
296enum chipset {
297 CH_6915 = 0,
298};
299
300static DEFINE_PCI_DEVICE_TABLE(starfire_pci_tbl) = {
301 { PCI_VDEVICE(ADAPTEC, 0x6915), CH_6915 },
302 { 0, }
303};
304MODULE_DEVICE_TABLE(pci, starfire_pci_tbl);
305
306
307static const struct chip_info {
308 const char *name;
309 int drv_flags;
310} netdrv_tbl[] __devinitdata = {
311 { "Adaptec Starfire 6915", CanHaveMII },
312};
313
314
315
316
317
318
319
320
321
322
323enum register_offsets {
324 PCIDeviceConfig=0x50040, GenCtrl=0x50070, IntrTimerCtrl=0x50074,
325 IntrClear=0x50080, IntrStatus=0x50084, IntrEnable=0x50088,
326 MIICtrl=0x52000, TxStationAddr=0x50120, EEPROMCtrl=0x51000,
327 GPIOCtrl=0x5008C, TxDescCtrl=0x50090,
328 TxRingPtr=0x50098, HiPriTxRingPtr=0x50094,
329 TxRingHiAddr=0x5009C,
330 TxProducerIdx=0x500A0, TxConsumerIdx=0x500A4,
331 TxThreshold=0x500B0,
332 CompletionHiAddr=0x500B4, TxCompletionAddr=0x500B8,
333 RxCompletionAddr=0x500BC, RxCompletionQ2Addr=0x500C0,
334 CompletionQConsumerIdx=0x500C4, RxDMACtrl=0x500D0,
335 RxDescQCtrl=0x500D4, RxDescQHiAddr=0x500DC, RxDescQAddr=0x500E0,
336 RxDescQIdx=0x500E8, RxDMAStatus=0x500F0, RxFilterMode=0x500F4,
337 TxMode=0x55000, VlanType=0x55064,
338 PerfFilterTable=0x56000, HashTable=0x56100,
339 TxGfpMem=0x58000, RxGfpMem=0x5a000,
340};
341
342
343
344
345
346
347enum intr_status_bits {
348 IntrLinkChange=0xf0000000, IntrStatsMax=0x08000000,
349 IntrAbnormalSummary=0x02000000, IntrGeneralTimer=0x01000000,
350 IntrSoftware=0x800000, IntrRxComplQ1Low=0x400000,
351 IntrTxComplQLow=0x200000, IntrPCI=0x100000,
352 IntrDMAErr=0x080000, IntrTxDataLow=0x040000,
353 IntrRxComplQ2Low=0x020000, IntrRxDescQ1Low=0x010000,
354 IntrNormalSummary=0x8000, IntrTxDone=0x4000,
355 IntrTxDMADone=0x2000, IntrTxEmpty=0x1000,
356 IntrEarlyRxQ2=0x0800, IntrEarlyRxQ1=0x0400,
357 IntrRxQ2Done=0x0200, IntrRxQ1Done=0x0100,
358 IntrRxGFPDead=0x80, IntrRxDescQ2Low=0x40,
359 IntrNoTxCsum=0x20, IntrTxBadID=0x10,
360 IntrHiPriTxBadID=0x08, IntrRxGfp=0x04,
361 IntrTxGfp=0x02, IntrPCIPad=0x01,
362
363 IntrRxDone=IntrRxQ2Done | IntrRxQ1Done,
364 IntrRxEmpty=IntrRxDescQ1Low | IntrRxDescQ2Low,
365 IntrNormalMask=0xff00, IntrAbnormalMask=0x3ff00fe,
366};
367
368
369enum rx_mode_bits {
370 AcceptBroadcast=0x04, AcceptAllMulticast=0x02, AcceptAll=0x01,
371 AcceptMulticast=0x10, PerfectFilter=0x40, HashFilter=0x30,
372 PerfectFilterVlan=0x80, MinVLANPrio=0xE000, VlanMode=0x0200,
373 WakeupOnGFP=0x0800,
374};
375
376
377enum tx_mode_bits {
378 MiiSoftReset=0x8000, MIILoopback=0x4000,
379 TxFlowEnable=0x0800, RxFlowEnable=0x0400,
380 PadEnable=0x04, FullDuplex=0x02, HugeFrame=0x01,
381};
382
383
384enum tx_ctrl_bits {
385 TxDescSpaceUnlim=0x00, TxDescSpace32=0x10, TxDescSpace64=0x20,
386 TxDescSpace128=0x30, TxDescSpace256=0x40,
387 TxDescType0=0x00, TxDescType1=0x01, TxDescType2=0x02,
388 TxDescType3=0x03, TxDescType4=0x04,
389 TxNoDMACompletion=0x08,
390 TxDescQAddr64bit=0x80, TxDescQAddr32bit=0,
391 TxHiPriFIFOThreshShift=24, TxPadLenShift=16,
392 TxDMABurstSizeShift=8,
393};
394
395
396enum rx_ctrl_bits {
397 RxBufferLenShift=16, RxMinDescrThreshShift=0,
398 RxPrefetchMode=0x8000, RxVariableQ=0x2000,
399 Rx2048QEntries=0x4000, Rx256QEntries=0,
400 RxDescAddr64bit=0x1000, RxDescAddr32bit=0,
401 RxDescQAddr64bit=0x0100, RxDescQAddr32bit=0,
402 RxDescSpace4=0x000, RxDescSpace8=0x100,
403 RxDescSpace16=0x200, RxDescSpace32=0x300,
404 RxDescSpace64=0x400, RxDescSpace128=0x500,
405 RxConsumerWrEn=0x80,
406};
407
408
409enum rx_dmactrl_bits {
410 RxReportBadFrames=0x80000000, RxDMAShortFrames=0x40000000,
411 RxDMABadFrames=0x20000000, RxDMACrcErrorFrames=0x10000000,
412 RxDMAControlFrame=0x08000000, RxDMAPauseFrame=0x04000000,
413 RxChecksumIgnore=0, RxChecksumRejectTCPUDP=0x02000000,
414 RxChecksumRejectTCPOnly=0x01000000,
415 RxCompletionQ2Enable=0x800000,
416 RxDMAQ2Disable=0, RxDMAQ2FPOnly=0x100000,
417 RxDMAQ2SmallPkt=0x200000, RxDMAQ2HighPrio=0x300000,
418 RxDMAQ2NonIP=0x400000,
419 RxUseBackupQueue=0x080000, RxDMACRC=0x040000,
420 RxEarlyIntThreshShift=12, RxHighPrioThreshShift=8,
421 RxBurstSizeShift=0,
422};
423
424
425enum rx_compl_bits {
426 RxComplQAddr64bit=0x80, RxComplQAddr32bit=0,
427 RxComplProducerWrEn=0x40,
428 RxComplType0=0x00, RxComplType1=0x10,
429 RxComplType2=0x20, RxComplType3=0x30,
430 RxComplThreshShift=0,
431};
432
433
434enum tx_compl_bits {
435 TxComplQAddr64bit=0x80, TxComplQAddr32bit=0,
436 TxComplProducerWrEn=0x40,
437 TxComplIntrStatus=0x20,
438 CommonQueueMode=0x10,
439 TxComplThreshShift=0,
440};
441
442
443enum gen_ctrl_bits {
444 RxEnable=0x05, TxEnable=0x0a,
445 RxGFPEnable=0x10, TxGFPEnable=0x20,
446};
447
448
449enum intr_ctrl_bits {
450 Timer10X=0x800, EnableIntrMasking=0x60, SmallFrameBypass=0x100,
451 SmallFrame64=0, SmallFrame128=0x200, SmallFrame256=0x400, SmallFrame512=0x600,
452 IntrLatencyMask=0x1f,
453};
454
455
456struct starfire_rx_desc {
457 netdrv_addr_t rxaddr;
458};
459enum rx_desc_bits {
460 RxDescValid=1, RxDescEndRing=2,
461};
462
463
464struct short_rx_done_desc {
465 __le32 status;
466};
467struct basic_rx_done_desc {
468 __le32 status;
469 __le16 vlanid;
470 __le16 status2;
471};
472struct csum_rx_done_desc {
473 __le32 status;
474 __le16 csum;
475 __le16 status2;
476};
477struct full_rx_done_desc {
478 __le32 status;
479 __le16 status3;
480 __le16 status2;
481 __le16 vlanid;
482 __le16 csum;
483 __le32 timestamp;
484};
485
486#ifdef VLAN_SUPPORT
487typedef struct full_rx_done_desc rx_done_desc;
488#define RxComplType RxComplType3
489#else
490typedef struct csum_rx_done_desc rx_done_desc;
491#define RxComplType RxComplType2
492#endif
493
494enum rx_done_bits {
495 RxOK=0x20000000, RxFIFOErr=0x10000000, RxBufQ2=0x08000000,
496};
497
498
499struct starfire_tx_desc_1 {
500 __le32 status;
501 __le32 addr;
502};
503
504
505struct starfire_tx_desc_2 {
506 __le32 status;
507 __le32 reserved;
508 __le64 addr;
509};
510
511#ifdef ADDR_64BITS
512typedef struct starfire_tx_desc_2 starfire_tx_desc;
513#define TX_DESC_TYPE TxDescType2
514#else
515typedef struct starfire_tx_desc_1 starfire_tx_desc;
516#define TX_DESC_TYPE TxDescType1
517#endif
518#define TX_DESC_SPACING TxDescSpaceUnlim
519
520enum tx_desc_bits {
521 TxDescID=0xB0000000,
522 TxCRCEn=0x01000000, TxDescIntr=0x08000000,
523 TxRingWrap=0x04000000, TxCalTCP=0x02000000,
524};
525struct tx_done_desc {
526 __le32 status;
527#if 0
528 __le32 intrstatus;
529#endif
530};
531
532struct rx_ring_info {
533 struct sk_buff *skb;
534 dma_addr_t mapping;
535};
536struct tx_ring_info {
537 struct sk_buff *skb;
538 dma_addr_t mapping;
539 unsigned int used_slots;
540};
541
542#define PHY_CNT 2
543struct netdev_private {
544
545 struct starfire_rx_desc *rx_ring;
546 starfire_tx_desc *tx_ring;
547 dma_addr_t rx_ring_dma;
548 dma_addr_t tx_ring_dma;
549
550 struct rx_ring_info rx_info[RX_RING_SIZE];
551 struct tx_ring_info tx_info[TX_RING_SIZE];
552
553 rx_done_desc *rx_done_q;
554 dma_addr_t rx_done_q_dma;
555 unsigned int rx_done;
556 struct tx_done_desc *tx_done_q;
557 dma_addr_t tx_done_q_dma;
558 unsigned int tx_done;
559 struct napi_struct napi;
560 struct net_device *dev;
561 struct pci_dev *pci_dev;
562#ifdef VLAN_SUPPORT
563 struct vlan_group *vlgrp;
564#endif
565 void *queue_mem;
566 dma_addr_t queue_mem_dma;
567 size_t queue_mem_size;
568
569
570 spinlock_t lock;
571 unsigned int cur_rx, dirty_rx;
572 unsigned int cur_tx, dirty_tx, reap_tx;
573 unsigned int rx_buf_sz;
574
575 int speed100;
576 u32 tx_mode;
577 u32 intr_timer_ctrl;
578 u8 tx_threshold;
579
580 struct mii_if_info mii_if;
581 int phy_cnt;
582 unsigned char phys[PHY_CNT];
583 void __iomem *base;
584};
585
586
587static int mdio_read(struct net_device *dev, int phy_id, int location);
588static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
589static int netdev_open(struct net_device *dev);
590static void check_duplex(struct net_device *dev);
591static void tx_timeout(struct net_device *dev);
592static void init_ring(struct net_device *dev);
593static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
594static irqreturn_t intr_handler(int irq, void *dev_instance);
595static void netdev_error(struct net_device *dev, int intr_status);
596static int __netdev_rx(struct net_device *dev, int *quota);
597static int netdev_poll(struct napi_struct *napi, int budget);
598static void refill_rx_ring(struct net_device *dev);
599static void netdev_error(struct net_device *dev, int intr_status);
600static void set_rx_mode(struct net_device *dev);
601static struct net_device_stats *get_stats(struct net_device *dev);
602static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
603static int netdev_close(struct net_device *dev);
604static void netdev_media_change(struct net_device *dev);
605static const struct ethtool_ops ethtool_ops;
606
607
608#ifdef VLAN_SUPPORT
609static void netdev_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
610{
611 struct netdev_private *np = netdev_priv(dev);
612
613 spin_lock(&np->lock);
614 if (debug > 2)
615 printk("%s: Setting vlgrp to %p\n", dev->name, grp);
616 np->vlgrp = grp;
617 set_rx_mode(dev);
618 spin_unlock(&np->lock);
619}
620
621static void netdev_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
622{
623 struct netdev_private *np = netdev_priv(dev);
624
625 spin_lock(&np->lock);
626 if (debug > 1)
627 printk("%s: Adding vlanid %d to vlan filter\n", dev->name, vid);
628 set_rx_mode(dev);
629 spin_unlock(&np->lock);
630}
631
632static void netdev_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
633{
634 struct netdev_private *np = netdev_priv(dev);
635
636 spin_lock(&np->lock);
637 if (debug > 1)
638 printk("%s: removing vlanid %d from vlan filter\n", dev->name, vid);
639 vlan_group_set_device(np->vlgrp, vid, NULL);
640 set_rx_mode(dev);
641 spin_unlock(&np->lock);
642}
643#endif
644
645
646static const struct net_device_ops netdev_ops = {
647 .ndo_open = netdev_open,
648 .ndo_stop = netdev_close,
649 .ndo_start_xmit = start_tx,
650 .ndo_tx_timeout = tx_timeout,
651 .ndo_get_stats = get_stats,
652 .ndo_set_multicast_list = &set_rx_mode,
653 .ndo_do_ioctl = netdev_ioctl,
654 .ndo_change_mtu = eth_change_mtu,
655 .ndo_set_mac_address = eth_mac_addr,
656 .ndo_validate_addr = eth_validate_addr,
657#ifdef VLAN_SUPPORT
658 .ndo_vlan_rx_register = netdev_vlan_rx_register,
659 .ndo_vlan_rx_add_vid = netdev_vlan_rx_add_vid,
660 .ndo_vlan_rx_kill_vid = netdev_vlan_rx_kill_vid,
661#endif
662};
663
664static int __devinit starfire_init_one(struct pci_dev *pdev,
665 const struct pci_device_id *ent)
666{
667 struct netdev_private *np;
668 int i, irq, option, chip_idx = ent->driver_data;
669 struct net_device *dev;
670 static int card_idx = -1;
671 long ioaddr;
672 void __iomem *base;
673 int drv_flags, io_size;
674 int boguscnt;
675
676
677#ifndef MODULE
678 static int printed_version;
679 if (!printed_version++)
680 printk(version);
681#endif
682
683 card_idx++;
684
685 if (pci_enable_device (pdev))
686 return -EIO;
687
688 ioaddr = pci_resource_start(pdev, 0);
689 io_size = pci_resource_len(pdev, 0);
690 if (!ioaddr || ((pci_resource_flags(pdev, 0) & IORESOURCE_MEM) == 0)) {
691 printk(KERN_ERR DRV_NAME " %d: no PCI MEM resources, aborting\n", card_idx);
692 return -ENODEV;
693 }
694
695 dev = alloc_etherdev(sizeof(*np));
696 if (!dev) {
697 printk(KERN_ERR DRV_NAME " %d: cannot alloc etherdev, aborting\n", card_idx);
698 return -ENOMEM;
699 }
700 SET_NETDEV_DEV(dev, &pdev->dev);
701
702 irq = pdev->irq;
703
704 if (pci_request_regions (pdev, DRV_NAME)) {
705 printk(KERN_ERR DRV_NAME " %d: cannot reserve PCI resources, aborting\n", card_idx);
706 goto err_out_free_netdev;
707 }
708
709 base = ioremap(ioaddr, io_size);
710 if (!base) {
711 printk(KERN_ERR DRV_NAME " %d: cannot remap %#x @ %#lx, aborting\n",
712 card_idx, io_size, ioaddr);
713 goto err_out_free_res;
714 }
715
716 pci_set_master(pdev);
717
718
719 pci_try_set_mwi(pdev);
720
721#ifdef ZEROCOPY
722
723 if (enable_hw_cksum)
724 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
725#endif
726
727#ifdef VLAN_SUPPORT
728 dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
729#endif
730#ifdef ADDR_64BITS
731 dev->features |= NETIF_F_HIGHDMA;
732#endif
733
734
735 for (i = 0; i < 6; i++)
736 dev->dev_addr[i] = readb(base + EEPROMCtrl + 20 - i);
737
738#if ! defined(final_version)
739 if (debug > 4)
740 for (i = 0; i < 0x20; i++)
741 printk("%2.2x%s",
742 (unsigned int)readb(base + EEPROMCtrl + i),
743 i % 16 != 15 ? " " : "\n");
744#endif
745
746
747 writel(MiiSoftReset, base + TxMode);
748 udelay(1000);
749 writel(0, base + TxMode);
750
751
752 writel(1, base + PCIDeviceConfig);
753 boguscnt = 1000;
754 while (--boguscnt > 0) {
755 udelay(10);
756 if ((readl(base + PCIDeviceConfig) & 1) == 0)
757 break;
758 }
759 if (boguscnt == 0)
760 printk("%s: chipset reset never completed!\n", dev->name);
761
762 udelay(1000);
763
764 dev->base_addr = (unsigned long)base;
765 dev->irq = irq;
766
767 np = netdev_priv(dev);
768 np->dev = dev;
769 np->base = base;
770 spin_lock_init(&np->lock);
771 pci_set_drvdata(pdev, dev);
772
773 np->pci_dev = pdev;
774
775 np->mii_if.dev = dev;
776 np->mii_if.mdio_read = mdio_read;
777 np->mii_if.mdio_write = mdio_write;
778 np->mii_if.phy_id_mask = 0x1f;
779 np->mii_if.reg_num_mask = 0x1f;
780
781 drv_flags = netdrv_tbl[chip_idx].drv_flags;
782
783 option = card_idx < MAX_UNITS ? options[card_idx] : 0;
784 if (dev->mem_start)
785 option = dev->mem_start;
786
787
788 if (option & 0x200)
789 np->mii_if.full_duplex = 1;
790
791 if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
792 np->mii_if.full_duplex = 1;
793
794 if (np->mii_if.full_duplex)
795 np->mii_if.force_media = 1;
796 else
797 np->mii_if.force_media = 0;
798 np->speed100 = 1;
799
800
801 np->intr_timer_ctrl = (((intr_latency * 10) / 1024) & IntrLatencyMask) |
802 Timer10X | EnableIntrMasking;
803
804 if (small_frames > 0) {
805 np->intr_timer_ctrl |= SmallFrameBypass;
806 switch (small_frames) {
807 case 1 ... 64:
808 np->intr_timer_ctrl |= SmallFrame64;
809 break;
810 case 65 ... 128:
811 np->intr_timer_ctrl |= SmallFrame128;
812 break;
813 case 129 ... 256:
814 np->intr_timer_ctrl |= SmallFrame256;
815 break;
816 default:
817 np->intr_timer_ctrl |= SmallFrame512;
818 if (small_frames > 512)
819 printk("Adjusting small_frames down to 512\n");
820 break;
821 }
822 }
823
824 dev->netdev_ops = &netdev_ops;
825 dev->watchdog_timeo = TX_TIMEOUT;
826 SET_ETHTOOL_OPS(dev, ðtool_ops);
827
828 netif_napi_add(dev, &np->napi, netdev_poll, max_interrupt_work);
829
830 if (mtu)
831 dev->mtu = mtu;
832
833 if (register_netdev(dev))
834 goto err_out_cleardev;
835
836 printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n",
837 dev->name, netdrv_tbl[chip_idx].name, base,
838 dev->dev_addr, irq);
839
840 if (drv_flags & CanHaveMII) {
841 int phy, phy_idx = 0;
842 int mii_status;
843 for (phy = 0; phy < 32 && phy_idx < PHY_CNT; phy++) {
844 mdio_write(dev, phy, MII_BMCR, BMCR_RESET);
845 mdelay(100);
846 boguscnt = 1000;
847 while (--boguscnt > 0)
848 if ((mdio_read(dev, phy, MII_BMCR) & BMCR_RESET) == 0)
849 break;
850 if (boguscnt == 0) {
851 printk("%s: PHY#%d reset never completed!\n", dev->name, phy);
852 continue;
853 }
854 mii_status = mdio_read(dev, phy, MII_BMSR);
855 if (mii_status != 0) {
856 np->phys[phy_idx++] = phy;
857 np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE);
858 printk(KERN_INFO "%s: MII PHY found at address %d, status "
859 "%#4.4x advertising %#4.4x.\n",
860 dev->name, phy, mii_status, np->mii_if.advertising);
861
862 break;
863 }
864 }
865 np->phy_cnt = phy_idx;
866 if (np->phy_cnt > 0)
867 np->mii_if.phy_id = np->phys[0];
868 else
869 memset(&np->mii_if, 0, sizeof(np->mii_if));
870 }
871
872 printk(KERN_INFO "%s: scatter-gather and hardware TCP cksumming %s.\n",
873 dev->name, enable_hw_cksum ? "enabled" : "disabled");
874 return 0;
875
876err_out_cleardev:
877 pci_set_drvdata(pdev, NULL);
878 iounmap(base);
879err_out_free_res:
880 pci_release_regions (pdev);
881err_out_free_netdev:
882 free_netdev(dev);
883 return -ENODEV;
884}
885
886
887
888static int mdio_read(struct net_device *dev, int phy_id, int location)
889{
890 struct netdev_private *np = netdev_priv(dev);
891 void __iomem *mdio_addr = np->base + MIICtrl + (phy_id<<7) + (location<<2);
892 int result, boguscnt=1000;
893
894 do {
895 result = readl(mdio_addr);
896 } while ((result & 0xC0000000) != 0x80000000 && --boguscnt > 0);
897 if (boguscnt == 0)
898 return 0;
899 if ((result & 0xffff) == 0xffff)
900 return 0;
901 return result & 0xffff;
902}
903
904
905static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
906{
907 struct netdev_private *np = netdev_priv(dev);
908 void __iomem *mdio_addr = np->base + MIICtrl + (phy_id<<7) + (location<<2);
909 writel(value, mdio_addr);
910
911}
912
913
914static int netdev_open(struct net_device *dev)
915{
916 const struct firmware *fw_rx, *fw_tx;
917 const __be32 *fw_rx_data, *fw_tx_data;
918 struct netdev_private *np = netdev_priv(dev);
919 void __iomem *ioaddr = np->base;
920 int i, retval;
921 size_t tx_size, rx_size;
922 size_t tx_done_q_size, rx_done_q_size, tx_ring_size, rx_ring_size;
923
924
925
926 retval = request_irq(dev->irq, intr_handler, IRQF_SHARED, dev->name, dev);
927 if (retval)
928 return retval;
929
930
931 writel(0, ioaddr + GenCtrl);
932 writel(1, ioaddr + PCIDeviceConfig);
933 if (debug > 1)
934 printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
935 dev->name, dev->irq);
936
937
938 if (!np->queue_mem) {
939 tx_done_q_size = ((sizeof(struct tx_done_desc) * DONE_Q_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN;
940 rx_done_q_size = ((sizeof(rx_done_desc) * DONE_Q_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN;
941 tx_ring_size = ((sizeof(starfire_tx_desc) * TX_RING_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN;
942 rx_ring_size = sizeof(struct starfire_rx_desc) * RX_RING_SIZE;
943 np->queue_mem_size = tx_done_q_size + rx_done_q_size + tx_ring_size + rx_ring_size;
944 np->queue_mem = pci_alloc_consistent(np->pci_dev, np->queue_mem_size, &np->queue_mem_dma);
945 if (np->queue_mem == NULL) {
946 free_irq(dev->irq, dev);
947 return -ENOMEM;
948 }
949
950 np->tx_done_q = np->queue_mem;
951 np->tx_done_q_dma = np->queue_mem_dma;
952 np->rx_done_q = (void *) np->tx_done_q + tx_done_q_size;
953 np->rx_done_q_dma = np->tx_done_q_dma + tx_done_q_size;
954 np->tx_ring = (void *) np->rx_done_q + rx_done_q_size;
955 np->tx_ring_dma = np->rx_done_q_dma + rx_done_q_size;
956 np->rx_ring = (void *) np->tx_ring + tx_ring_size;
957 np->rx_ring_dma = np->tx_ring_dma + tx_ring_size;
958 }
959
960
961 netif_carrier_off(dev);
962 init_ring(dev);
963
964 writel((np->rx_buf_sz << RxBufferLenShift) |
965 (0 << RxMinDescrThreshShift) |
966 RxPrefetchMode | RxVariableQ |
967 RX_Q_ENTRIES |
968 RX_DESC_Q_ADDR_SIZE | RX_DESC_ADDR_SIZE |
969 RxDescSpace4,
970 ioaddr + RxDescQCtrl);
971
972
973 writel(RxChecksumIgnore |
974 (0 << RxEarlyIntThreshShift) |
975 (6 << RxHighPrioThreshShift) |
976 ((DMA_BURST_SIZE / 32) << RxBurstSizeShift),
977 ioaddr + RxDMACtrl);
978
979
980 writel((2 << TxHiPriFIFOThreshShift) |
981 (0 << TxPadLenShift) |
982 ((DMA_BURST_SIZE / 32) << TxDMABurstSizeShift) |
983 TX_DESC_Q_ADDR_SIZE |
984 TX_DESC_SPACING | TX_DESC_TYPE,
985 ioaddr + TxDescCtrl);
986
987 writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + RxDescQHiAddr);
988 writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + TxRingHiAddr);
989 writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + CompletionHiAddr);
990 writel(np->rx_ring_dma, ioaddr + RxDescQAddr);
991 writel(np->tx_ring_dma, ioaddr + TxRingPtr);
992
993 writel(np->tx_done_q_dma, ioaddr + TxCompletionAddr);
994 writel(np->rx_done_q_dma |
995 RxComplType |
996 (0 << RxComplThreshShift),
997 ioaddr + RxCompletionAddr);
998
999 if (debug > 1)
1000 printk(KERN_DEBUG "%s: Filling in the station address.\n", dev->name);
1001
1002
1003 for (i = 0; i < 6; i++)
1004 writeb(dev->dev_addr[i], ioaddr + TxStationAddr + 5 - i);
1005
1006
1007 writew(0, ioaddr + PerfFilterTable);
1008 writew(0, ioaddr + PerfFilterTable + 4);
1009 writew(0, ioaddr + PerfFilterTable + 8);
1010 for (i = 1; i < 16; i++) {
1011 __be16 *eaddrs = (__be16 *)dev->dev_addr;
1012 void __iomem *setup_frm = ioaddr + PerfFilterTable + i * 16;
1013 writew(be16_to_cpu(eaddrs[2]), setup_frm); setup_frm += 4;
1014 writew(be16_to_cpu(eaddrs[1]), setup_frm); setup_frm += 4;
1015 writew(be16_to_cpu(eaddrs[0]), setup_frm); setup_frm += 8;
1016 }
1017
1018
1019
1020 np->tx_mode = TxFlowEnable|RxFlowEnable|PadEnable;
1021 writel(MiiSoftReset | np->tx_mode, ioaddr + TxMode);
1022 udelay(1000);
1023 writel(np->tx_mode, ioaddr + TxMode);
1024 np->tx_threshold = 4;
1025 writel(np->tx_threshold, ioaddr + TxThreshold);
1026
1027 writel(np->intr_timer_ctrl, ioaddr + IntrTimerCtrl);
1028
1029 napi_enable(&np->napi);
1030
1031 netif_start_queue(dev);
1032
1033 if (debug > 1)
1034 printk(KERN_DEBUG "%s: Setting the Rx and Tx modes.\n", dev->name);
1035 set_rx_mode(dev);
1036
1037 np->mii_if.advertising = mdio_read(dev, np->phys[0], MII_ADVERTISE);
1038 check_duplex(dev);
1039
1040
1041 writel(0x0f00ff00, ioaddr + GPIOCtrl);
1042
1043
1044 writel(IntrRxDone | IntrRxEmpty | IntrDMAErr |
1045 IntrTxDMADone | IntrStatsMax | IntrLinkChange |
1046 IntrRxGFPDead | IntrNoTxCsum | IntrTxBadID,
1047 ioaddr + IntrEnable);
1048
1049 writel(0x00800000 | readl(ioaddr + PCIDeviceConfig),
1050 ioaddr + PCIDeviceConfig);
1051
1052#ifdef VLAN_SUPPORT
1053
1054 writel(ETH_P_8021Q, ioaddr + VlanType);
1055#endif
1056
1057 retval = request_firmware(&fw_rx, FIRMWARE_RX, &np->pci_dev->dev);
1058 if (retval) {
1059 printk(KERN_ERR "starfire: Failed to load firmware \"%s\"\n",
1060 FIRMWARE_RX);
1061 goto out_init;
1062 }
1063 if (fw_rx->size % 4) {
1064 printk(KERN_ERR "starfire: bogus length %zu in \"%s\"\n",
1065 fw_rx->size, FIRMWARE_RX);
1066 retval = -EINVAL;
1067 goto out_rx;
1068 }
1069 retval = request_firmware(&fw_tx, FIRMWARE_TX, &np->pci_dev->dev);
1070 if (retval) {
1071 printk(KERN_ERR "starfire: Failed to load firmware \"%s\"\n",
1072 FIRMWARE_TX);
1073 goto out_rx;
1074 }
1075 if (fw_tx->size % 4) {
1076 printk(KERN_ERR "starfire: bogus length %zu in \"%s\"\n",
1077 fw_tx->size, FIRMWARE_TX);
1078 retval = -EINVAL;
1079 goto out_tx;
1080 }
1081 fw_rx_data = (const __be32 *)&fw_rx->data[0];
1082 fw_tx_data = (const __be32 *)&fw_tx->data[0];
1083 rx_size = fw_rx->size / 4;
1084 tx_size = fw_tx->size / 4;
1085
1086
1087 for (i = 0; i < rx_size; i++)
1088 writel(be32_to_cpup(&fw_rx_data[i]), ioaddr + RxGfpMem + i * 4);
1089 for (i = 0; i < tx_size; i++)
1090 writel(be32_to_cpup(&fw_tx_data[i]), ioaddr + TxGfpMem + i * 4);
1091 if (enable_hw_cksum)
1092
1093 writel(TxEnable|TxGFPEnable|RxEnable|RxGFPEnable, ioaddr + GenCtrl);
1094 else
1095
1096 writel(TxEnable|RxEnable, ioaddr + GenCtrl);
1097
1098 if (debug > 1)
1099 printk(KERN_DEBUG "%s: Done netdev_open().\n",
1100 dev->name);
1101
1102out_tx:
1103 release_firmware(fw_tx);
1104out_rx:
1105 release_firmware(fw_rx);
1106out_init:
1107 if (retval)
1108 netdev_close(dev);
1109 return retval;
1110}
1111
1112
1113static void check_duplex(struct net_device *dev)
1114{
1115 struct netdev_private *np = netdev_priv(dev);
1116 u16 reg0;
1117 int silly_count = 1000;
1118
1119 mdio_write(dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising);
1120 mdio_write(dev, np->phys[0], MII_BMCR, BMCR_RESET);
1121 udelay(500);
1122 while (--silly_count && mdio_read(dev, np->phys[0], MII_BMCR) & BMCR_RESET)
1123 ;
1124 if (!silly_count) {
1125 printk("%s: MII reset failed!\n", dev->name);
1126 return;
1127 }
1128
1129 reg0 = mdio_read(dev, np->phys[0], MII_BMCR);
1130
1131 if (!np->mii_if.force_media) {
1132 reg0 |= BMCR_ANENABLE | BMCR_ANRESTART;
1133 } else {
1134 reg0 &= ~(BMCR_ANENABLE | BMCR_ANRESTART);
1135 if (np->speed100)
1136 reg0 |= BMCR_SPEED100;
1137 if (np->mii_if.full_duplex)
1138 reg0 |= BMCR_FULLDPLX;
1139 printk(KERN_DEBUG "%s: Link forced to %sMbit %s-duplex\n",
1140 dev->name,
1141 np->speed100 ? "100" : "10",
1142 np->mii_if.full_duplex ? "full" : "half");
1143 }
1144 mdio_write(dev, np->phys[0], MII_BMCR, reg0);
1145}
1146
1147
1148static void tx_timeout(struct net_device *dev)
1149{
1150 struct netdev_private *np = netdev_priv(dev);
1151 void __iomem *ioaddr = np->base;
1152 int old_debug;
1153
1154 printk(KERN_WARNING "%s: Transmit timed out, status %#8.8x, "
1155 "resetting...\n", dev->name, (int) readl(ioaddr + IntrStatus));
1156
1157
1158
1159
1160
1161
1162
1163 old_debug = debug;
1164 debug = 2;
1165 netdev_close(dev);
1166 netdev_open(dev);
1167 debug = old_debug;
1168
1169
1170
1171 dev->trans_start = jiffies;
1172 dev->stats.tx_errors++;
1173 netif_wake_queue(dev);
1174}
1175
1176
1177
1178static void init_ring(struct net_device *dev)
1179{
1180 struct netdev_private *np = netdev_priv(dev);
1181 int i;
1182
1183 np->cur_rx = np->cur_tx = np->reap_tx = 0;
1184 np->dirty_rx = np->dirty_tx = np->rx_done = np->tx_done = 0;
1185
1186 np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1187
1188
1189 for (i = 0; i < RX_RING_SIZE; i++) {
1190 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
1191 np->rx_info[i].skb = skb;
1192 if (skb == NULL)
1193 break;
1194 np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1195 skb->dev = dev;
1196
1197 np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid);
1198 }
1199 writew(i - 1, np->base + RxDescQIdx);
1200 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1201
1202
1203 for ( ; i < RX_RING_SIZE; i++) {
1204 np->rx_ring[i].rxaddr = 0;
1205 np->rx_info[i].skb = NULL;
1206 np->rx_info[i].mapping = 0;
1207 }
1208
1209 np->rx_ring[RX_RING_SIZE - 1].rxaddr |= cpu_to_dma(RxDescEndRing);
1210
1211
1212 for (i = 0; i < DONE_Q_SIZE; i++) {
1213 np->rx_done_q[i].status = 0;
1214 np->tx_done_q[i].status = 0;
1215 }
1216
1217 for (i = 0; i < TX_RING_SIZE; i++)
1218 memset(&np->tx_info[i], 0, sizeof(np->tx_info[i]));
1219}
1220
1221
1222static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
1223{
1224 struct netdev_private *np = netdev_priv(dev);
1225 unsigned int entry;
1226 u32 status;
1227 int i;
1228
1229
1230
1231
1232
1233 if ((np->cur_tx - np->dirty_tx) + skb_num_frags(skb) * 2 > TX_RING_SIZE) {
1234 netif_stop_queue(dev);
1235 return NETDEV_TX_BUSY;
1236 }
1237
1238#if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
1239 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1240 if (skb_padto(skb, (skb->len + PADDING_MASK) & ~PADDING_MASK))
1241 return NETDEV_TX_OK;
1242 }
1243#endif
1244
1245 entry = np->cur_tx % TX_RING_SIZE;
1246 for (i = 0; i < skb_num_frags(skb); i++) {
1247 int wrap_ring = 0;
1248 status = TxDescID;
1249
1250 if (i == 0) {
1251 np->tx_info[entry].skb = skb;
1252 status |= TxCRCEn;
1253 if (entry >= TX_RING_SIZE - skb_num_frags(skb)) {
1254 status |= TxRingWrap;
1255 wrap_ring = 1;
1256 }
1257 if (np->reap_tx) {
1258 status |= TxDescIntr;
1259 np->reap_tx = 0;
1260 }
1261 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1262 status |= TxCalTCP;
1263 dev->stats.tx_compressed++;
1264 }
1265 status |= skb_first_frag_len(skb) | (skb_num_frags(skb) << 16);
1266
1267 np->tx_info[entry].mapping =
1268 pci_map_single(np->pci_dev, skb->data, skb_first_frag_len(skb), PCI_DMA_TODEVICE);
1269 } else {
1270 skb_frag_t *this_frag = &skb_shinfo(skb)->frags[i - 1];
1271 status |= this_frag->size;
1272 np->tx_info[entry].mapping =
1273 pci_map_single(np->pci_dev, page_address(this_frag->page) + this_frag->page_offset, this_frag->size, PCI_DMA_TODEVICE);
1274 }
1275
1276 np->tx_ring[entry].addr = cpu_to_dma(np->tx_info[entry].mapping);
1277 np->tx_ring[entry].status = cpu_to_le32(status);
1278 if (debug > 3)
1279 printk(KERN_DEBUG "%s: Tx #%d/#%d slot %d status %#8.8x.\n",
1280 dev->name, np->cur_tx, np->dirty_tx,
1281 entry, status);
1282 if (wrap_ring) {
1283 np->tx_info[entry].used_slots = TX_RING_SIZE - entry;
1284 np->cur_tx += np->tx_info[entry].used_slots;
1285 entry = 0;
1286 } else {
1287 np->tx_info[entry].used_slots = 1;
1288 np->cur_tx += np->tx_info[entry].used_slots;
1289 entry++;
1290 }
1291
1292 if (np->cur_tx % (TX_RING_SIZE / 2) == 0)
1293 np->reap_tx = 1;
1294 }
1295
1296
1297
1298
1299 wmb();
1300
1301
1302 writel(entry * (sizeof(starfire_tx_desc) / 8), np->base + TxProducerIdx);
1303
1304
1305 if ((np->cur_tx - np->dirty_tx) + 4 > TX_RING_SIZE)
1306 netif_stop_queue(dev);
1307
1308 return NETDEV_TX_OK;
1309}
1310
1311
1312
1313
1314static irqreturn_t intr_handler(int irq, void *dev_instance)
1315{
1316 struct net_device *dev = dev_instance;
1317 struct netdev_private *np = netdev_priv(dev);
1318 void __iomem *ioaddr = np->base;
1319 int boguscnt = max_interrupt_work;
1320 int consumer;
1321 int tx_status;
1322 int handled = 0;
1323
1324 do {
1325 u32 intr_status = readl(ioaddr + IntrClear);
1326
1327 if (debug > 4)
1328 printk(KERN_DEBUG "%s: Interrupt status %#8.8x.\n",
1329 dev->name, intr_status);
1330
1331 if (intr_status == 0 || intr_status == (u32) -1)
1332 break;
1333
1334 handled = 1;
1335
1336 if (intr_status & (IntrRxDone | IntrRxEmpty)) {
1337 u32 enable;
1338
1339 if (likely(napi_schedule_prep(&np->napi))) {
1340 __napi_schedule(&np->napi);
1341 enable = readl(ioaddr + IntrEnable);
1342 enable &= ~(IntrRxDone | IntrRxEmpty);
1343 writel(enable, ioaddr + IntrEnable);
1344
1345 readl(ioaddr + IntrEnable);
1346 } else {
1347
1348 enable = readl(ioaddr + IntrEnable);
1349 if (enable & (IntrRxDone | IntrRxEmpty)) {
1350 printk(KERN_INFO
1351 "%s: interrupt while in poll!\n",
1352 dev->name);
1353 enable &= ~(IntrRxDone | IntrRxEmpty);
1354 writel(enable, ioaddr + IntrEnable);
1355 }
1356 }
1357 }
1358
1359
1360
1361
1362 consumer = readl(ioaddr + TxConsumerIdx);
1363 if (debug > 3)
1364 printk(KERN_DEBUG "%s: Tx Consumer index is %d.\n",
1365 dev->name, consumer);
1366
1367 while ((tx_status = le32_to_cpu(np->tx_done_q[np->tx_done].status)) != 0) {
1368 if (debug > 3)
1369 printk(KERN_DEBUG "%s: Tx completion #%d entry %d is %#8.8x.\n",
1370 dev->name, np->dirty_tx, np->tx_done, tx_status);
1371 if ((tx_status & 0xe0000000) == 0xa0000000) {
1372 dev->stats.tx_packets++;
1373 } else if ((tx_status & 0xe0000000) == 0x80000000) {
1374 u16 entry = (tx_status & 0x7fff) / sizeof(starfire_tx_desc);
1375 struct sk_buff *skb = np->tx_info[entry].skb;
1376 np->tx_info[entry].skb = NULL;
1377 pci_unmap_single(np->pci_dev,
1378 np->tx_info[entry].mapping,
1379 skb_first_frag_len(skb),
1380 PCI_DMA_TODEVICE);
1381 np->tx_info[entry].mapping = 0;
1382 np->dirty_tx += np->tx_info[entry].used_slots;
1383 entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE;
1384 {
1385 int i;
1386 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1387 pci_unmap_single(np->pci_dev,
1388 np->tx_info[entry].mapping,
1389 skb_shinfo(skb)->frags[i].size,
1390 PCI_DMA_TODEVICE);
1391 np->dirty_tx++;
1392 entry++;
1393 }
1394 }
1395
1396 dev_kfree_skb_irq(skb);
1397 }
1398 np->tx_done_q[np->tx_done].status = 0;
1399 np->tx_done = (np->tx_done + 1) % DONE_Q_SIZE;
1400 }
1401 writew(np->tx_done, ioaddr + CompletionQConsumerIdx + 2);
1402
1403 if (netif_queue_stopped(dev) &&
1404 (np->cur_tx - np->dirty_tx + 4 < TX_RING_SIZE)) {
1405
1406 netif_wake_queue(dev);
1407 }
1408
1409
1410 if (intr_status & IntrStatsMax)
1411 get_stats(dev);
1412
1413
1414 if (intr_status & IntrLinkChange)
1415 netdev_media_change(dev);
1416
1417
1418 if (intr_status & IntrAbnormalSummary)
1419 netdev_error(dev, intr_status);
1420
1421 if (--boguscnt < 0) {
1422 if (debug > 1)
1423 printk(KERN_WARNING "%s: Too much work at interrupt, "
1424 "status=%#8.8x.\n",
1425 dev->name, intr_status);
1426 break;
1427 }
1428 } while (1);
1429
1430 if (debug > 4)
1431 printk(KERN_DEBUG "%s: exiting interrupt, status=%#8.8x.\n",
1432 dev->name, (int) readl(ioaddr + IntrStatus));
1433 return IRQ_RETVAL(handled);
1434}
1435
1436
1437
1438
1439
1440
1441static int __netdev_rx(struct net_device *dev, int *quota)
1442{
1443 struct netdev_private *np = netdev_priv(dev);
1444 u32 desc_status;
1445 int retcode = 0;
1446
1447
1448 while ((desc_status = le32_to_cpu(np->rx_done_q[np->rx_done].status)) != 0) {
1449 struct sk_buff *skb;
1450 u16 pkt_len;
1451 int entry;
1452 rx_done_desc *desc = &np->rx_done_q[np->rx_done];
1453
1454 if (debug > 4)
1455 printk(KERN_DEBUG " netdev_rx() status of %d was %#8.8x.\n", np->rx_done, desc_status);
1456 if (!(desc_status & RxOK)) {
1457
1458 if (debug > 2)
1459 printk(KERN_DEBUG " netdev_rx() Rx error was %#8.8x.\n", desc_status);
1460 dev->stats.rx_errors++;
1461 if (desc_status & RxFIFOErr)
1462 dev->stats.rx_fifo_errors++;
1463 goto next_rx;
1464 }
1465
1466 if (*quota <= 0) {
1467 retcode = 1;
1468 goto out;
1469 }
1470 (*quota)--;
1471
1472 pkt_len = desc_status;
1473 entry = (desc_status >> 16) & 0x7ff;
1474
1475 if (debug > 4)
1476 printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d, quota %d.\n", pkt_len, *quota);
1477
1478
1479 if (pkt_len < rx_copybreak &&
1480 (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1481 skb_reserve(skb, 2);
1482 pci_dma_sync_single_for_cpu(np->pci_dev,
1483 np->rx_info[entry].mapping,
1484 pkt_len, PCI_DMA_FROMDEVICE);
1485 skb_copy_to_linear_data(skb, np->rx_info[entry].skb->data, pkt_len);
1486 pci_dma_sync_single_for_device(np->pci_dev,
1487 np->rx_info[entry].mapping,
1488 pkt_len, PCI_DMA_FROMDEVICE);
1489 skb_put(skb, pkt_len);
1490 } else {
1491 pci_unmap_single(np->pci_dev, np->rx_info[entry].mapping, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1492 skb = np->rx_info[entry].skb;
1493 skb_put(skb, pkt_len);
1494 np->rx_info[entry].skb = NULL;
1495 np->rx_info[entry].mapping = 0;
1496 }
1497#ifndef final_version
1498
1499 if (debug > 5) {
1500 printk(KERN_DEBUG " Rx data %pM %pM %2.2x%2.2x.\n",
1501 skb->data, skb->data + 6,
1502 skb->data[12], skb->data[13]);
1503 }
1504#endif
1505
1506 skb->protocol = eth_type_trans(skb, dev);
1507#ifdef VLAN_SUPPORT
1508 if (debug > 4)
1509 printk(KERN_DEBUG " netdev_rx() status2 of %d was %#4.4x.\n", np->rx_done, le16_to_cpu(desc->status2));
1510#endif
1511 if (le16_to_cpu(desc->status2) & 0x0100) {
1512 skb->ip_summed = CHECKSUM_UNNECESSARY;
1513 dev->stats.rx_compressed++;
1514 }
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524 else if (le16_to_cpu(desc->status2) & 0x0040) {
1525 skb->ip_summed = CHECKSUM_COMPLETE;
1526 skb->csum = le16_to_cpu(desc->csum);
1527 printk(KERN_DEBUG "%s: checksum_hw, status2 = %#x\n", dev->name, le16_to_cpu(desc->status2));
1528 }
1529#ifdef VLAN_SUPPORT
1530 if (np->vlgrp && le16_to_cpu(desc->status2) & 0x0200) {
1531 u16 vlid = le16_to_cpu(desc->vlanid);
1532
1533 if (debug > 4) {
1534 printk(KERN_DEBUG " netdev_rx() vlanid = %d\n",
1535 vlid);
1536 }
1537
1538
1539
1540
1541 vlan_hwaccel_rx(skb, np->vlgrp, vlid);
1542 } else
1543#endif
1544 netif_receive_skb(skb);
1545 dev->stats.rx_packets++;
1546
1547 next_rx:
1548 np->cur_rx++;
1549 desc->status = 0;
1550 np->rx_done = (np->rx_done + 1) % DONE_Q_SIZE;
1551 }
1552
1553 if (*quota == 0) {
1554 retcode = 1;
1555 goto out;
1556 }
1557 writew(np->rx_done, np->base + CompletionQConsumerIdx);
1558
1559 out:
1560 refill_rx_ring(dev);
1561 if (debug > 5)
1562 printk(KERN_DEBUG " exiting netdev_rx(): %d, status of %d was %#8.8x.\n",
1563 retcode, np->rx_done, desc_status);
1564 return retcode;
1565}
1566
1567static int netdev_poll(struct napi_struct *napi, int budget)
1568{
1569 struct netdev_private *np = container_of(napi, struct netdev_private, napi);
1570 struct net_device *dev = np->dev;
1571 u32 intr_status;
1572 void __iomem *ioaddr = np->base;
1573 int quota = budget;
1574
1575 do {
1576 writel(IntrRxDone | IntrRxEmpty, ioaddr + IntrClear);
1577
1578 if (__netdev_rx(dev, "a))
1579 goto out;
1580
1581 intr_status = readl(ioaddr + IntrStatus);
1582 } while (intr_status & (IntrRxDone | IntrRxEmpty));
1583
1584 napi_complete(napi);
1585 intr_status = readl(ioaddr + IntrEnable);
1586 intr_status |= IntrRxDone | IntrRxEmpty;
1587 writel(intr_status, ioaddr + IntrEnable);
1588
1589 out:
1590 if (debug > 5)
1591 printk(KERN_DEBUG " exiting netdev_poll(): %d.\n",
1592 budget - quota);
1593
1594
1595 return budget - quota;
1596}
1597
1598static void refill_rx_ring(struct net_device *dev)
1599{
1600 struct netdev_private *np = netdev_priv(dev);
1601 struct sk_buff *skb;
1602 int entry = -1;
1603
1604
1605 for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
1606 entry = np->dirty_rx % RX_RING_SIZE;
1607 if (np->rx_info[entry].skb == NULL) {
1608 skb = dev_alloc_skb(np->rx_buf_sz);
1609 np->rx_info[entry].skb = skb;
1610 if (skb == NULL)
1611 break;
1612 np->rx_info[entry].mapping =
1613 pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1614 skb->dev = dev;
1615 np->rx_ring[entry].rxaddr =
1616 cpu_to_dma(np->rx_info[entry].mapping | RxDescValid);
1617 }
1618 if (entry == RX_RING_SIZE - 1)
1619 np->rx_ring[entry].rxaddr |= cpu_to_dma(RxDescEndRing);
1620 }
1621 if (entry >= 0)
1622 writew(entry, np->base + RxDescQIdx);
1623}
1624
1625
1626static void netdev_media_change(struct net_device *dev)
1627{
1628 struct netdev_private *np = netdev_priv(dev);
1629 void __iomem *ioaddr = np->base;
1630 u16 reg0, reg1, reg4, reg5;
1631 u32 new_tx_mode;
1632 u32 new_intr_timer_ctrl;
1633
1634
1635 mdio_read(dev, np->phys[0], MII_BMCR);
1636 mdio_read(dev, np->phys[0], MII_BMSR);
1637
1638 reg0 = mdio_read(dev, np->phys[0], MII_BMCR);
1639 reg1 = mdio_read(dev, np->phys[0], MII_BMSR);
1640
1641 if (reg1 & BMSR_LSTATUS) {
1642
1643 if (reg0 & BMCR_ANENABLE) {
1644
1645 reg4 = mdio_read(dev, np->phys[0], MII_ADVERTISE);
1646 reg5 = mdio_read(dev, np->phys[0], MII_LPA);
1647 if (reg4 & ADVERTISE_100FULL && reg5 & LPA_100FULL) {
1648 np->speed100 = 1;
1649 np->mii_if.full_duplex = 1;
1650 } else if (reg4 & ADVERTISE_100HALF && reg5 & LPA_100HALF) {
1651 np->speed100 = 1;
1652 np->mii_if.full_duplex = 0;
1653 } else if (reg4 & ADVERTISE_10FULL && reg5 & LPA_10FULL) {
1654 np->speed100 = 0;
1655 np->mii_if.full_duplex = 1;
1656 } else {
1657 np->speed100 = 0;
1658 np->mii_if.full_duplex = 0;
1659 }
1660 } else {
1661
1662 if (reg0 & BMCR_SPEED100)
1663 np->speed100 = 1;
1664 else
1665 np->speed100 = 0;
1666 if (reg0 & BMCR_FULLDPLX)
1667 np->mii_if.full_duplex = 1;
1668 else
1669 np->mii_if.full_duplex = 0;
1670 }
1671 netif_carrier_on(dev);
1672 printk(KERN_DEBUG "%s: Link is up, running at %sMbit %s-duplex\n",
1673 dev->name,
1674 np->speed100 ? "100" : "10",
1675 np->mii_if.full_duplex ? "full" : "half");
1676
1677 new_tx_mode = np->tx_mode & ~FullDuplex;
1678 if (np->mii_if.full_duplex)
1679 new_tx_mode |= FullDuplex;
1680 if (np->tx_mode != new_tx_mode) {
1681 np->tx_mode = new_tx_mode;
1682 writel(np->tx_mode | MiiSoftReset, ioaddr + TxMode);
1683 udelay(1000);
1684 writel(np->tx_mode, ioaddr + TxMode);
1685 }
1686
1687 new_intr_timer_ctrl = np->intr_timer_ctrl & ~Timer10X;
1688 if (np->speed100)
1689 new_intr_timer_ctrl |= Timer10X;
1690 if (np->intr_timer_ctrl != new_intr_timer_ctrl) {
1691 np->intr_timer_ctrl = new_intr_timer_ctrl;
1692 writel(new_intr_timer_ctrl, ioaddr + IntrTimerCtrl);
1693 }
1694 } else {
1695 netif_carrier_off(dev);
1696 printk(KERN_DEBUG "%s: Link is down\n", dev->name);
1697 }
1698}
1699
1700
1701static void netdev_error(struct net_device *dev, int intr_status)
1702{
1703 struct netdev_private *np = netdev_priv(dev);
1704
1705
1706 if (intr_status & IntrTxDataLow) {
1707 if (np->tx_threshold <= PKT_BUF_SZ / 16) {
1708 writel(++np->tx_threshold, np->base + TxThreshold);
1709 printk(KERN_NOTICE "%s: PCI bus congestion, increasing Tx FIFO threshold to %d bytes\n",
1710 dev->name, np->tx_threshold * 16);
1711 } else
1712 printk(KERN_WARNING "%s: PCI Tx underflow -- adapter is probably malfunctioning\n", dev->name);
1713 }
1714 if (intr_status & IntrRxGFPDead) {
1715 dev->stats.rx_fifo_errors++;
1716 dev->stats.rx_errors++;
1717 }
1718 if (intr_status & (IntrNoTxCsum | IntrDMAErr)) {
1719 dev->stats.tx_fifo_errors++;
1720 dev->stats.tx_errors++;
1721 }
1722 if ((intr_status & ~(IntrNormalMask | IntrAbnormalSummary | IntrLinkChange | IntrStatsMax | IntrTxDataLow | IntrRxGFPDead | IntrNoTxCsum | IntrPCIPad)) && debug)
1723 printk(KERN_ERR "%s: Something Wicked happened! %#8.8x.\n",
1724 dev->name, intr_status);
1725}
1726
1727
1728static struct net_device_stats *get_stats(struct net_device *dev)
1729{
1730 struct netdev_private *np = netdev_priv(dev);
1731 void __iomem *ioaddr = np->base;
1732
1733
1734 dev->stats.tx_bytes = readl(ioaddr + 0x57010);
1735 dev->stats.rx_bytes = readl(ioaddr + 0x57044);
1736 dev->stats.tx_packets = readl(ioaddr + 0x57000);
1737 dev->stats.tx_aborted_errors =
1738 readl(ioaddr + 0x57024) + readl(ioaddr + 0x57028);
1739 dev->stats.tx_window_errors = readl(ioaddr + 0x57018);
1740 dev->stats.collisions =
1741 readl(ioaddr + 0x57004) + readl(ioaddr + 0x57008);
1742
1743
1744 dev->stats.rx_dropped += readw(ioaddr + RxDMAStatus);
1745 writew(0, ioaddr + RxDMAStatus);
1746 dev->stats.rx_crc_errors = readl(ioaddr + 0x5703C);
1747 dev->stats.rx_frame_errors = readl(ioaddr + 0x57040);
1748 dev->stats.rx_length_errors = readl(ioaddr + 0x57058);
1749 dev->stats.rx_missed_errors = readl(ioaddr + 0x5707C);
1750
1751 return &dev->stats;
1752}
1753
1754
1755static void set_rx_mode(struct net_device *dev)
1756{
1757 struct netdev_private *np = netdev_priv(dev);
1758 void __iomem *ioaddr = np->base;
1759 u32 rx_mode = MinVLANPrio;
1760 struct netdev_hw_addr *ha;
1761 int i;
1762#ifdef VLAN_SUPPORT
1763
1764 rx_mode |= VlanMode;
1765 if (np->vlgrp) {
1766 int vlan_count = 0;
1767 void __iomem *filter_addr = ioaddr + HashTable + 8;
1768 for (i = 0; i < VLAN_VID_MASK; i++) {
1769 if (vlan_group_get_device(np->vlgrp, i)) {
1770 if (vlan_count >= 32)
1771 break;
1772 writew(i, filter_addr);
1773 filter_addr += 16;
1774 vlan_count++;
1775 }
1776 }
1777 if (i == VLAN_VID_MASK) {
1778 rx_mode |= PerfectFilterVlan;
1779 while (vlan_count < 32) {
1780 writew(0, filter_addr);
1781 filter_addr += 16;
1782 vlan_count++;
1783 }
1784 }
1785 }
1786#endif
1787
1788 if (dev->flags & IFF_PROMISC) {
1789 rx_mode |= AcceptAll;
1790 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
1791 (dev->flags & IFF_ALLMULTI)) {
1792
1793 rx_mode |= AcceptBroadcast|AcceptAllMulticast|PerfectFilter;
1794 } else if (netdev_mc_count(dev) <= 14) {
1795
1796 void __iomem *filter_addr = ioaddr + PerfFilterTable + 2 * 16;
1797 __be16 *eaddrs;
1798 netdev_for_each_mc_addr(ha, dev) {
1799 eaddrs = (__be16 *) ha->addr;
1800 writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 4;
1801 writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4;
1802 writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 8;
1803 }
1804 eaddrs = (__be16 *)dev->dev_addr;
1805 i = netdev_mc_count(dev) + 2;
1806 while (i++ < 16) {
1807 writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 4;
1808 writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4;
1809 writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 8;
1810 }
1811 rx_mode |= AcceptBroadcast|PerfectFilter;
1812 } else {
1813
1814 void __iomem *filter_addr;
1815 __be16 *eaddrs;
1816 __le16 mc_filter[32] __attribute__ ((aligned(sizeof(long))));
1817
1818 memset(mc_filter, 0, sizeof(mc_filter));
1819 netdev_for_each_mc_addr(ha, dev) {
1820
1821
1822 int bit_nr = ether_crc_le(ETH_ALEN, ha->addr) >> 23;
1823 __le32 *fptr = (__le32 *) &mc_filter[(bit_nr >> 4) & ~1];
1824
1825 *fptr |= cpu_to_le32(1 << (bit_nr & 31));
1826 }
1827
1828 filter_addr = ioaddr + PerfFilterTable + 2 * 16;
1829 eaddrs = (__be16 *)dev->dev_addr;
1830 for (i = 2; i < 16; i++) {
1831 writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 4;
1832 writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4;
1833 writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 8;
1834 }
1835 for (filter_addr = ioaddr + HashTable, i = 0; i < 32; filter_addr+= 16, i++)
1836 writew(mc_filter[i], filter_addr);
1837 rx_mode |= AcceptBroadcast|PerfectFilter|HashFilter;
1838 }
1839 writel(rx_mode, ioaddr + RxFilterMode);
1840}
1841
1842static int check_if_running(struct net_device *dev)
1843{
1844 if (!netif_running(dev))
1845 return -EINVAL;
1846 return 0;
1847}
1848
1849static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1850{
1851 struct netdev_private *np = netdev_priv(dev);
1852 strcpy(info->driver, DRV_NAME);
1853 strcpy(info->version, DRV_VERSION);
1854 strcpy(info->bus_info, pci_name(np->pci_dev));
1855}
1856
1857static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1858{
1859 struct netdev_private *np = netdev_priv(dev);
1860 spin_lock_irq(&np->lock);
1861 mii_ethtool_gset(&np->mii_if, ecmd);
1862 spin_unlock_irq(&np->lock);
1863 return 0;
1864}
1865
1866static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1867{
1868 struct netdev_private *np = netdev_priv(dev);
1869 int res;
1870 spin_lock_irq(&np->lock);
1871 res = mii_ethtool_sset(&np->mii_if, ecmd);
1872 spin_unlock_irq(&np->lock);
1873 check_duplex(dev);
1874 return res;
1875}
1876
1877static int nway_reset(struct net_device *dev)
1878{
1879 struct netdev_private *np = netdev_priv(dev);
1880 return mii_nway_restart(&np->mii_if);
1881}
1882
1883static u32 get_link(struct net_device *dev)
1884{
1885 struct netdev_private *np = netdev_priv(dev);
1886 return mii_link_ok(&np->mii_if);
1887}
1888
1889static u32 get_msglevel(struct net_device *dev)
1890{
1891 return debug;
1892}
1893
1894static void set_msglevel(struct net_device *dev, u32 val)
1895{
1896 debug = val;
1897}
1898
1899static const struct ethtool_ops ethtool_ops = {
1900 .begin = check_if_running,
1901 .get_drvinfo = get_drvinfo,
1902 .get_settings = get_settings,
1903 .set_settings = set_settings,
1904 .nway_reset = nway_reset,
1905 .get_link = get_link,
1906 .get_msglevel = get_msglevel,
1907 .set_msglevel = set_msglevel,
1908};
1909
1910static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1911{
1912 struct netdev_private *np = netdev_priv(dev);
1913 struct mii_ioctl_data *data = if_mii(rq);
1914 int rc;
1915
1916 if (!netif_running(dev))
1917 return -EINVAL;
1918
1919 spin_lock_irq(&np->lock);
1920 rc = generic_mii_ioctl(&np->mii_if, data, cmd, NULL);
1921 spin_unlock_irq(&np->lock);
1922
1923 if ((cmd == SIOCSMIIREG) && (data->phy_id == np->phys[0]))
1924 check_duplex(dev);
1925
1926 return rc;
1927}
1928
1929static int netdev_close(struct net_device *dev)
1930{
1931 struct netdev_private *np = netdev_priv(dev);
1932 void __iomem *ioaddr = np->base;
1933 int i;
1934
1935 netif_stop_queue(dev);
1936
1937 napi_disable(&np->napi);
1938
1939 if (debug > 1) {
1940 printk(KERN_DEBUG "%s: Shutting down ethercard, Intr status %#8.8x.\n",
1941 dev->name, (int) readl(ioaddr + IntrStatus));
1942 printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
1943 dev->name, np->cur_tx, np->dirty_tx,
1944 np->cur_rx, np->dirty_rx);
1945 }
1946
1947
1948 writel(0, ioaddr + IntrEnable);
1949
1950
1951 writel(0, ioaddr + GenCtrl);
1952 readl(ioaddr + GenCtrl);
1953
1954 if (debug > 5) {
1955 printk(KERN_DEBUG" Tx ring at %#llx:\n",
1956 (long long) np->tx_ring_dma);
1957 for (i = 0; i < 8 ; i++)
1958 printk(KERN_DEBUG " #%d desc. %#8.8x %#llx -> %#8.8x.\n",
1959 i, le32_to_cpu(np->tx_ring[i].status),
1960 (long long) dma_to_cpu(np->tx_ring[i].addr),
1961 le32_to_cpu(np->tx_done_q[i].status));
1962 printk(KERN_DEBUG " Rx ring at %#llx -> %p:\n",
1963 (long long) np->rx_ring_dma, np->rx_done_q);
1964 if (np->rx_done_q)
1965 for (i = 0; i < 8 ; i++) {
1966 printk(KERN_DEBUG " #%d desc. %#llx -> %#8.8x\n",
1967 i, (long long) dma_to_cpu(np->rx_ring[i].rxaddr), le32_to_cpu(np->rx_done_q[i].status));
1968 }
1969 }
1970
1971 free_irq(dev->irq, dev);
1972
1973
1974 for (i = 0; i < RX_RING_SIZE; i++) {
1975 np->rx_ring[i].rxaddr = cpu_to_dma(0xBADF00D0);
1976 if (np->rx_info[i].skb != NULL) {
1977 pci_unmap_single(np->pci_dev, np->rx_info[i].mapping, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1978 dev_kfree_skb(np->rx_info[i].skb);
1979 }
1980 np->rx_info[i].skb = NULL;
1981 np->rx_info[i].mapping = 0;
1982 }
1983 for (i = 0; i < TX_RING_SIZE; i++) {
1984 struct sk_buff *skb = np->tx_info[i].skb;
1985 if (skb == NULL)
1986 continue;
1987 pci_unmap_single(np->pci_dev,
1988 np->tx_info[i].mapping,
1989 skb_first_frag_len(skb), PCI_DMA_TODEVICE);
1990 np->tx_info[i].mapping = 0;
1991 dev_kfree_skb(skb);
1992 np->tx_info[i].skb = NULL;
1993 }
1994
1995 return 0;
1996}
1997
1998#ifdef CONFIG_PM
1999static int starfire_suspend(struct pci_dev *pdev, pm_message_t state)
2000{
2001 struct net_device *dev = pci_get_drvdata(pdev);
2002
2003 if (netif_running(dev)) {
2004 netif_device_detach(dev);
2005 netdev_close(dev);
2006 }
2007
2008 pci_save_state(pdev);
2009 pci_set_power_state(pdev, pci_choose_state(pdev,state));
2010
2011 return 0;
2012}
2013
2014static int starfire_resume(struct pci_dev *pdev)
2015{
2016 struct net_device *dev = pci_get_drvdata(pdev);
2017
2018 pci_set_power_state(pdev, PCI_D0);
2019 pci_restore_state(pdev);
2020
2021 if (netif_running(dev)) {
2022 netdev_open(dev);
2023 netif_device_attach(dev);
2024 }
2025
2026 return 0;
2027}
2028#endif
2029
2030
2031static void __devexit starfire_remove_one (struct pci_dev *pdev)
2032{
2033 struct net_device *dev = pci_get_drvdata(pdev);
2034 struct netdev_private *np = netdev_priv(dev);
2035
2036 BUG_ON(!dev);
2037
2038 unregister_netdev(dev);
2039
2040 if (np->queue_mem)
2041 pci_free_consistent(pdev, np->queue_mem_size, np->queue_mem, np->queue_mem_dma);
2042
2043
2044
2045 pci_set_power_state(pdev, PCI_D3hot);
2046 pci_disable_device(pdev);
2047
2048 iounmap(np->base);
2049 pci_release_regions(pdev);
2050
2051 pci_set_drvdata(pdev, NULL);
2052 free_netdev(dev);
2053}
2054
2055
2056static struct pci_driver starfire_driver = {
2057 .name = DRV_NAME,
2058 .probe = starfire_init_one,
2059 .remove = __devexit_p(starfire_remove_one),
2060#ifdef CONFIG_PM
2061 .suspend = starfire_suspend,
2062 .resume = starfire_resume,
2063#endif
2064 .id_table = starfire_pci_tbl,
2065};
2066
2067
2068static int __init starfire_init (void)
2069{
2070
2071#ifdef MODULE
2072 printk(version);
2073
2074 printk(KERN_INFO DRV_NAME ": polling (NAPI) enabled\n");
2075#endif
2076
2077 BUILD_BUG_ON(sizeof(dma_addr_t) != sizeof(netdrv_addr_t));
2078
2079 return pci_register_driver(&starfire_driver);
2080}
2081
2082
2083static void __exit starfire_cleanup (void)
2084{
2085 pci_unregister_driver (&starfire_driver);
2086}
2087
2088
2089module_init(starfire_init);
2090module_exit(starfire_cleanup);
2091
2092
2093
2094
2095
2096
2097
2098
2099