1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30#include <linux/module.h>
31#include <linux/kernel.h>
32#include <linux/string.h>
33#include <linux/timer.h>
34#include <linux/errno.h>
35#include <linux/ioport.h>
36#include <linux/slab.h>
37#include <linux/interrupt.h>
38#include <linux/pci.h>
39#include <linux/netdevice.h>
40#include <linux/etherdevice.h>
41#include <linux/skbuff.h>
42#include <linux/init.h>
43#include <linux/spinlock.h>
44#include <linux/ethtool.h>
45#include <linux/delay.h>
46#include <linux/rtnetlink.h>
47#include <linux/mii.h>
48#include <linux/crc32.h>
49#include <linux/bitops.h>
50#include <linux/prefetch.h>
51#include <asm/processor.h>
52#include <asm/io.h>
53#include <asm/irq.h>
54#include <asm/uaccess.h>
55
56#define DRV_NAME "natsemi"
57#define DRV_VERSION "2.1"
58#define DRV_RELDATE "Sept 11, 2006"
59
60#define RX_OFFSET 2
61
62
63
64
65
66
67#define NATSEMI_DEF_MSG (NETIF_MSG_DRV | \
68 NETIF_MSG_LINK | \
69 NETIF_MSG_WOL | \
70 NETIF_MSG_RX_ERR | \
71 NETIF_MSG_TX_ERR)
72static int debug = -1;
73
74static int mtu;
75
76
77
78static const int multicast_filter_limit = 100;
79
80
81
82static int rx_copybreak;
83
84static int dspcfg_workaround = 1;
85
86
87
88
89
90
91#define MAX_UNITS 8
92static int options[MAX_UNITS];
93static int full_duplex[MAX_UNITS];
94
95
96
97
98
99
100
101
102#define TX_RING_SIZE 16
103#define TX_QUEUE_LEN 10
104#define RX_RING_SIZE 32
105
106
107
108#define TX_TIMEOUT (2*HZ)
109
110#define NATSEMI_HW_TIMEOUT 400
111#define NATSEMI_TIMER_FREQ 5*HZ
112#define NATSEMI_PG0_NREGS 64
113#define NATSEMI_RFDR_NREGS 8
114#define NATSEMI_PG1_NREGS 4
115#define NATSEMI_NREGS (NATSEMI_PG0_NREGS + NATSEMI_RFDR_NREGS + \
116 NATSEMI_PG1_NREGS)
117#define NATSEMI_REGS_VER 1
118#define NATSEMI_REGS_SIZE (NATSEMI_NREGS * sizeof(u32))
119
120
121
122
123
124#define NATSEMI_HEADERS 22
125#define NATSEMI_PADDING 16
126#define NATSEMI_LONGPKT 1518
127#define NATSEMI_RX_LIMIT 2046
128
129
130static const char version[] =
131 KERN_INFO DRV_NAME " dp8381x driver, version "
132 DRV_VERSION ", " DRV_RELDATE "\n"
133 " originally by Donald Becker <becker@scyld.com>\n"
134 " 2.4.x kernel port by Jeff Garzik, Tjeerd Mulder\n";
135
136MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
137MODULE_DESCRIPTION("National Semiconductor DP8381x series PCI Ethernet driver");
138MODULE_LICENSE("GPL");
139
140module_param(mtu, int, 0);
141module_param(debug, int, 0);
142module_param(rx_copybreak, int, 0);
143module_param(dspcfg_workaround, int, 0);
144module_param_array(options, int, NULL, 0);
145module_param_array(full_duplex, int, NULL, 0);
146MODULE_PARM_DESC(mtu, "DP8381x MTU (all boards)");
147MODULE_PARM_DESC(debug, "DP8381x default debug level");
148MODULE_PARM_DESC(rx_copybreak,
149 "DP8381x copy breakpoint for copy-only-tiny-frames");
150MODULE_PARM_DESC(dspcfg_workaround, "DP8381x: control DspCfg workaround");
151MODULE_PARM_DESC(options,
152 "DP8381x: Bits 0-3: media type, bit 17: full duplex");
153MODULE_PARM_DESC(full_duplex, "DP8381x full duplex setting(s) (1)");
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228#define PHYID_AM79C874 0x0022561b
229
230enum {
231 MII_MCTRL = 0x15,
232 MII_FX_SEL = 0x0001,
233 MII_EN_SCRM = 0x0004,
234};
235
236enum {
237 NATSEMI_FLAG_IGNORE_PHY = 0x1,
238};
239
240
241static struct {
242 const char *name;
243 unsigned long flags;
244 unsigned int eeprom_size;
245} natsemi_pci_info[] = {
246 { "Aculab E1/T1 PMXc cPCI carrier card", NATSEMI_FLAG_IGNORE_PHY, 128 },
247 { "NatSemi DP8381[56]", 0, 24 },
248};
249
250static const struct pci_device_id natsemi_pci_tbl[] = {
251 { PCI_VENDOR_ID_NS, 0x0020, 0x12d9, 0x000c, 0, 0, 0 },
252 { PCI_VENDOR_ID_NS, 0x0020, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
253 { }
254};
255MODULE_DEVICE_TABLE(pci, natsemi_pci_tbl);
256
257
258
259
260
261
262enum register_offsets {
263 ChipCmd = 0x00,
264 ChipConfig = 0x04,
265 EECtrl = 0x08,
266 PCIBusCfg = 0x0C,
267 IntrStatus = 0x10,
268 IntrMask = 0x14,
269 IntrEnable = 0x18,
270 IntrHoldoff = 0x1C,
271 TxRingPtr = 0x20,
272 TxConfig = 0x24,
273 RxRingPtr = 0x30,
274 RxConfig = 0x34,
275 ClkRun = 0x3C,
276 WOLCmd = 0x40,
277 PauseCmd = 0x44,
278 RxFilterAddr = 0x48,
279 RxFilterData = 0x4C,
280 BootRomAddr = 0x50,
281 BootRomData = 0x54,
282 SiliconRev = 0x58,
283 StatsCtrl = 0x5C,
284 StatsData = 0x60,
285 RxPktErrs = 0x60,
286 RxMissed = 0x68,
287 RxCRCErrs = 0x64,
288 BasicControl = 0x80,
289 BasicStatus = 0x84,
290 AnegAdv = 0x90,
291 AnegPeer = 0x94,
292 PhyStatus = 0xC0,
293 MIntrCtrl = 0xC4,
294 MIntrStatus = 0xC8,
295 PhyCtrl = 0xE4,
296
297
298
299 PGSEL = 0xCC,
300 PMDCSR = 0xE4,
301 TSTDAT = 0xFC,
302 DSPCFG = 0xF4,
303 SDCFG = 0xF8
304};
305
306#define PMDCSR_VAL 0x189c
307#define TSTDAT_VAL 0x0
308#define DSPCFG_VAL 0x5040
309#define SDCFG_VAL 0x008c
310#define DSPCFG_LOCK 0x20
311#define DSPCFG_COEF 0x1000
312#define TSTDAT_FIXED 0xe8
313
314
315enum pci_register_offsets {
316 PCIPM = 0x44,
317};
318
319enum ChipCmd_bits {
320 ChipReset = 0x100,
321 RxReset = 0x20,
322 TxReset = 0x10,
323 RxOff = 0x08,
324 RxOn = 0x04,
325 TxOff = 0x02,
326 TxOn = 0x01,
327};
328
329enum ChipConfig_bits {
330 CfgPhyDis = 0x200,
331 CfgPhyRst = 0x400,
332 CfgExtPhy = 0x1000,
333 CfgAnegEnable = 0x2000,
334 CfgAneg100 = 0x4000,
335 CfgAnegFull = 0x8000,
336 CfgAnegDone = 0x8000000,
337 CfgFullDuplex = 0x20000000,
338 CfgSpeed100 = 0x40000000,
339 CfgLink = 0x80000000,
340};
341
342enum EECtrl_bits {
343 EE_ShiftClk = 0x04,
344 EE_DataIn = 0x01,
345 EE_ChipSelect = 0x08,
346 EE_DataOut = 0x02,
347 MII_Data = 0x10,
348 MII_Write = 0x20,
349 MII_ShiftClk = 0x40,
350};
351
352enum PCIBusCfg_bits {
353 EepromReload = 0x4,
354};
355
356
357enum IntrStatus_bits {
358 IntrRxDone = 0x0001,
359 IntrRxIntr = 0x0002,
360 IntrRxErr = 0x0004,
361 IntrRxEarly = 0x0008,
362 IntrRxIdle = 0x0010,
363 IntrRxOverrun = 0x0020,
364 IntrTxDone = 0x0040,
365 IntrTxIntr = 0x0080,
366 IntrTxErr = 0x0100,
367 IntrTxIdle = 0x0200,
368 IntrTxUnderrun = 0x0400,
369 StatsMax = 0x0800,
370 SWInt = 0x1000,
371 WOLPkt = 0x2000,
372 LinkChange = 0x4000,
373 IntrHighBits = 0x8000,
374 RxStatusFIFOOver = 0x10000,
375 IntrPCIErr = 0xf00000,
376 RxResetDone = 0x1000000,
377 TxResetDone = 0x2000000,
378 IntrAbnormalSummary = 0xCD20,
379};
380
381
382
383
384
385
386
387
388
389
390#define DEFAULT_INTR 0x00f1cd65
391
392enum TxConfig_bits {
393 TxDrthMask = 0x3f,
394 TxFlthMask = 0x3f00,
395 TxMxdmaMask = 0x700000,
396 TxMxdma_512 = 0x0,
397 TxMxdma_4 = 0x100000,
398 TxMxdma_8 = 0x200000,
399 TxMxdma_16 = 0x300000,
400 TxMxdma_32 = 0x400000,
401 TxMxdma_64 = 0x500000,
402 TxMxdma_128 = 0x600000,
403 TxMxdma_256 = 0x700000,
404 TxCollRetry = 0x800000,
405 TxAutoPad = 0x10000000,
406 TxMacLoop = 0x20000000,
407 TxHeartIgn = 0x40000000,
408 TxCarrierIgn = 0x80000000
409};
410
411
412
413
414
415
416
417
418
419
420
421
422#define TX_FLTH_VAL ((512/32) << 8)
423#define TX_DRTH_VAL_START (64/32)
424#define TX_DRTH_VAL_INC 2
425#define TX_DRTH_VAL_LIMIT (1472/32)
426
427enum RxConfig_bits {
428 RxDrthMask = 0x3e,
429 RxMxdmaMask = 0x700000,
430 RxMxdma_512 = 0x0,
431 RxMxdma_4 = 0x100000,
432 RxMxdma_8 = 0x200000,
433 RxMxdma_16 = 0x300000,
434 RxMxdma_32 = 0x400000,
435 RxMxdma_64 = 0x500000,
436 RxMxdma_128 = 0x600000,
437 RxMxdma_256 = 0x700000,
438 RxAcceptLong = 0x8000000,
439 RxAcceptTx = 0x10000000,
440 RxAcceptRunt = 0x40000000,
441 RxAcceptErr = 0x80000000
442};
443#define RX_DRTH_VAL (128/8)
444
445enum ClkRun_bits {
446 PMEEnable = 0x100,
447 PMEStatus = 0x8000,
448};
449
450enum WolCmd_bits {
451 WakePhy = 0x1,
452 WakeUnicast = 0x2,
453 WakeMulticast = 0x4,
454 WakeBroadcast = 0x8,
455 WakeArp = 0x10,
456 WakePMatch0 = 0x20,
457 WakePMatch1 = 0x40,
458 WakePMatch2 = 0x80,
459 WakePMatch3 = 0x100,
460 WakeMagic = 0x200,
461 WakeMagicSecure = 0x400,
462 SecureHack = 0x100000,
463 WokePhy = 0x400000,
464 WokeUnicast = 0x800000,
465 WokeMulticast = 0x1000000,
466 WokeBroadcast = 0x2000000,
467 WokeArp = 0x4000000,
468 WokePMatch0 = 0x8000000,
469 WokePMatch1 = 0x10000000,
470 WokePMatch2 = 0x20000000,
471 WokePMatch3 = 0x40000000,
472 WokeMagic = 0x80000000,
473 WakeOptsSummary = 0x7ff
474};
475
476enum RxFilterAddr_bits {
477 RFCRAddressMask = 0x3ff,
478 AcceptMulticast = 0x00200000,
479 AcceptMyPhys = 0x08000000,
480 AcceptAllPhys = 0x10000000,
481 AcceptAllMulticast = 0x20000000,
482 AcceptBroadcast = 0x40000000,
483 RxFilterEnable = 0x80000000
484};
485
486enum StatsCtrl_bits {
487 StatsWarn = 0x1,
488 StatsFreeze = 0x2,
489 StatsClear = 0x4,
490 StatsStrobe = 0x8,
491};
492
493enum MIntrCtrl_bits {
494 MICRIntEn = 0x2,
495};
496
497enum PhyCtrl_bits {
498 PhyAddrMask = 0x1f,
499};
500
501#define PHY_ADDR_NONE 32
502#define PHY_ADDR_INTERNAL 1
503
504
505#define SRR_DP83815_C 0x0302
506#define SRR_DP83815_D 0x0403
507#define SRR_DP83816_A4 0x0504
508#define SRR_DP83816_A5 0x0505
509
510
511
512
513struct netdev_desc {
514 __le32 next_desc;
515 __le32 cmd_status;
516 __le32 addr;
517 __le32 software_use;
518};
519
520
521enum desc_status_bits {
522 DescOwn=0x80000000, DescMore=0x40000000, DescIntr=0x20000000,
523 DescNoCRC=0x10000000, DescPktOK=0x08000000,
524 DescSizeMask=0xfff,
525
526 DescTxAbort=0x04000000, DescTxFIFO=0x02000000,
527 DescTxCarrier=0x01000000, DescTxDefer=0x00800000,
528 DescTxExcDefer=0x00400000, DescTxOOWCol=0x00200000,
529 DescTxExcColl=0x00100000, DescTxCollCount=0x000f0000,
530
531 DescRxAbort=0x04000000, DescRxOver=0x02000000,
532 DescRxDest=0x01800000, DescRxLong=0x00400000,
533 DescRxRunt=0x00200000, DescRxInvalid=0x00100000,
534 DescRxCRC=0x00080000, DescRxAlign=0x00040000,
535 DescRxLoop=0x00020000, DesRxColl=0x00010000,
536};
537
538struct netdev_private {
539
540 dma_addr_t ring_dma;
541 struct netdev_desc *rx_ring;
542 struct netdev_desc *tx_ring;
543
544 struct sk_buff *rx_skbuff[RX_RING_SIZE];
545 dma_addr_t rx_dma[RX_RING_SIZE];
546
547 struct sk_buff *tx_skbuff[TX_RING_SIZE];
548 dma_addr_t tx_dma[TX_RING_SIZE];
549 struct net_device *dev;
550 void __iomem *ioaddr;
551 struct napi_struct napi;
552
553 struct timer_list timer;
554
555 struct pci_dev *pci_dev;
556 struct netdev_desc *rx_head_desc;
557
558 unsigned int cur_rx, dirty_rx;
559 unsigned int cur_tx, dirty_tx;
560
561 unsigned int rx_buf_sz;
562 int oom;
563
564 u32 intr_status;
565
566 int hands_off;
567
568 int ignore_phy;
569
570 int mii;
571 int phy_addr_external;
572 unsigned int full_duplex;
573
574 u32 cur_rx_mode;
575 u32 rx_filter[16];
576
577 u32 tx_config, rx_config;
578
579 u32 SavedClkRun;
580
581 u32 srr;
582
583 u16 dspcfg;
584 int dspcfg_workaround;
585
586 u16 speed;
587 u8 duplex;
588 u8 autoneg;
589
590 u16 advertising;
591 unsigned int iosize;
592 spinlock_t lock;
593 u32 msg_enable;
594
595 int eeprom_size;
596};
597
598static void move_int_phy(struct net_device *dev, int addr);
599static int eeprom_read(void __iomem *ioaddr, int location);
600static int mdio_read(struct net_device *dev, int reg);
601static void mdio_write(struct net_device *dev, int reg, u16 data);
602static void init_phy_fixup(struct net_device *dev);
603static int miiport_read(struct net_device *dev, int phy_id, int reg);
604static void miiport_write(struct net_device *dev, int phy_id, int reg, u16 data);
605static int find_mii(struct net_device *dev);
606static void natsemi_reset(struct net_device *dev);
607static void natsemi_reload_eeprom(struct net_device *dev);
608static void natsemi_stop_rxtx(struct net_device *dev);
609static int netdev_open(struct net_device *dev);
610static void do_cable_magic(struct net_device *dev);
611static void undo_cable_magic(struct net_device *dev);
612static void check_link(struct net_device *dev);
613static void netdev_timer(unsigned long data);
614static void dump_ring(struct net_device *dev);
615static void ns_tx_timeout(struct net_device *dev);
616static int alloc_ring(struct net_device *dev);
617static void refill_rx(struct net_device *dev);
618static void init_ring(struct net_device *dev);
619static void drain_tx(struct net_device *dev);
620static void drain_ring(struct net_device *dev);
621static void free_ring(struct net_device *dev);
622static void reinit_ring(struct net_device *dev);
623static void init_registers(struct net_device *dev);
624static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
625static irqreturn_t intr_handler(int irq, void *dev_instance);
626static void netdev_error(struct net_device *dev, int intr_status);
627static int natsemi_poll(struct napi_struct *napi, int budget);
628static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do);
629static void netdev_tx_done(struct net_device *dev);
630static int natsemi_change_mtu(struct net_device *dev, int new_mtu);
631#ifdef CONFIG_NET_POLL_CONTROLLER
632static void natsemi_poll_controller(struct net_device *dev);
633#endif
634static void __set_rx_mode(struct net_device *dev);
635static void set_rx_mode(struct net_device *dev);
636static void __get_stats(struct net_device *dev);
637static struct net_device_stats *get_stats(struct net_device *dev);
638static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
639static int netdev_set_wol(struct net_device *dev, u32 newval);
640static int netdev_get_wol(struct net_device *dev, u32 *supported, u32 *cur);
641static int netdev_set_sopass(struct net_device *dev, u8 *newval);
642static int netdev_get_sopass(struct net_device *dev, u8 *data);
643static int netdev_get_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd);
644static int netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd);
645static void enable_wol_mode(struct net_device *dev, int enable_intr);
646static int netdev_close(struct net_device *dev);
647static int netdev_get_regs(struct net_device *dev, u8 *buf);
648static int netdev_get_eeprom(struct net_device *dev, u8 *buf);
649static const struct ethtool_ops ethtool_ops;
650
651#define NATSEMI_ATTR(_name) \
652static ssize_t natsemi_show_##_name(struct device *dev, \
653 struct device_attribute *attr, char *buf); \
654 static ssize_t natsemi_set_##_name(struct device *dev, \
655 struct device_attribute *attr, \
656 const char *buf, size_t count); \
657 static DEVICE_ATTR(_name, 0644, natsemi_show_##_name, natsemi_set_##_name)
658
659#define NATSEMI_CREATE_FILE(_dev, _name) \
660 device_create_file(&_dev->dev, &dev_attr_##_name)
661#define NATSEMI_REMOVE_FILE(_dev, _name) \
662 device_remove_file(&_dev->dev, &dev_attr_##_name)
663
664NATSEMI_ATTR(dspcfg_workaround);
665
666static ssize_t natsemi_show_dspcfg_workaround(struct device *dev,
667 struct device_attribute *attr,
668 char *buf)
669{
670 struct netdev_private *np = netdev_priv(to_net_dev(dev));
671
672 return sprintf(buf, "%s\n", np->dspcfg_workaround ? "on" : "off");
673}
674
675static ssize_t natsemi_set_dspcfg_workaround(struct device *dev,
676 struct device_attribute *attr,
677 const char *buf, size_t count)
678{
679 struct netdev_private *np = netdev_priv(to_net_dev(dev));
680 int new_setting;
681 unsigned long flags;
682
683
684 if (!strncmp("on", buf, count - 1) || !strncmp("1", buf, count - 1))
685 new_setting = 1;
686 else if (!strncmp("off", buf, count - 1) ||
687 !strncmp("0", buf, count - 1))
688 new_setting = 0;
689 else
690 return count;
691
692 spin_lock_irqsave(&np->lock, flags);
693
694 np->dspcfg_workaround = new_setting;
695
696 spin_unlock_irqrestore(&np->lock, flags);
697
698 return count;
699}
700
701static inline void __iomem *ns_ioaddr(struct net_device *dev)
702{
703 struct netdev_private *np = netdev_priv(dev);
704
705 return np->ioaddr;
706}
707
708static inline void natsemi_irq_enable(struct net_device *dev)
709{
710 writel(1, ns_ioaddr(dev) + IntrEnable);
711 readl(ns_ioaddr(dev) + IntrEnable);
712}
713
714static inline void natsemi_irq_disable(struct net_device *dev)
715{
716 writel(0, ns_ioaddr(dev) + IntrEnable);
717 readl(ns_ioaddr(dev) + IntrEnable);
718}
719
720static void move_int_phy(struct net_device *dev, int addr)
721{
722 struct netdev_private *np = netdev_priv(dev);
723 void __iomem *ioaddr = ns_ioaddr(dev);
724 int target = 31;
725
726
727
728
729
730
731
732
733
734
735
736 if (target == addr)
737 target--;
738 if (target == np->phy_addr_external)
739 target--;
740 writew(target, ioaddr + PhyCtrl);
741 readw(ioaddr + PhyCtrl);
742 udelay(1);
743}
744
745static void natsemi_init_media(struct net_device *dev)
746{
747 struct netdev_private *np = netdev_priv(dev);
748 u32 tmp;
749
750 if (np->ignore_phy)
751 netif_carrier_on(dev);
752 else
753 netif_carrier_off(dev);
754
755
756 tmp = mdio_read(dev, MII_BMCR);
757 np->speed = (tmp & BMCR_SPEED100)? SPEED_100 : SPEED_10;
758 np->duplex = (tmp & BMCR_FULLDPLX)? DUPLEX_FULL : DUPLEX_HALF;
759 np->autoneg = (tmp & BMCR_ANENABLE)? AUTONEG_ENABLE: AUTONEG_DISABLE;
760 np->advertising= mdio_read(dev, MII_ADVERTISE);
761
762 if ((np->advertising & ADVERTISE_ALL) != ADVERTISE_ALL &&
763 netif_msg_probe(np)) {
764 printk(KERN_INFO "natsemi %s: Transceiver default autonegotiation %s "
765 "10%s %s duplex.\n",
766 pci_name(np->pci_dev),
767 (mdio_read(dev, MII_BMCR) & BMCR_ANENABLE)?
768 "enabled, advertise" : "disabled, force",
769 (np->advertising &
770 (ADVERTISE_100FULL|ADVERTISE_100HALF))?
771 "0" : "",
772 (np->advertising &
773 (ADVERTISE_100FULL|ADVERTISE_10FULL))?
774 "full" : "half");
775 }
776 if (netif_msg_probe(np))
777 printk(KERN_INFO
778 "natsemi %s: Transceiver status %#04x advertising %#04x.\n",
779 pci_name(np->pci_dev), mdio_read(dev, MII_BMSR),
780 np->advertising);
781
782}
783
784static const struct net_device_ops natsemi_netdev_ops = {
785 .ndo_open = netdev_open,
786 .ndo_stop = netdev_close,
787 .ndo_start_xmit = start_tx,
788 .ndo_get_stats = get_stats,
789 .ndo_set_rx_mode = set_rx_mode,
790 .ndo_change_mtu = natsemi_change_mtu,
791 .ndo_do_ioctl = netdev_ioctl,
792 .ndo_tx_timeout = ns_tx_timeout,
793 .ndo_set_mac_address = eth_mac_addr,
794 .ndo_validate_addr = eth_validate_addr,
795#ifdef CONFIG_NET_POLL_CONTROLLER
796 .ndo_poll_controller = natsemi_poll_controller,
797#endif
798};
799
800static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
801{
802 struct net_device *dev;
803 struct netdev_private *np;
804 int i, option, irq, chip_idx = ent->driver_data;
805 static int find_cnt = -1;
806 resource_size_t iostart;
807 unsigned long iosize;
808 void __iomem *ioaddr;
809 const int pcibar = 1;
810 int prev_eedata;
811 u32 tmp;
812
813
814#ifndef MODULE
815 static int printed_version;
816 if (!printed_version++)
817 printk(version);
818#endif
819
820 i = pci_enable_device(pdev);
821 if (i) return i;
822
823
824
825
826
827 pci_read_config_dword(pdev, PCIPM, &tmp);
828 if (tmp & PCI_PM_CTRL_STATE_MASK) {
829
830 u32 newtmp = tmp & ~PCI_PM_CTRL_STATE_MASK;
831 pci_write_config_dword(pdev, PCIPM, newtmp);
832 }
833
834 find_cnt++;
835 iostart = pci_resource_start(pdev, pcibar);
836 iosize = pci_resource_len(pdev, pcibar);
837 irq = pdev->irq;
838
839 pci_set_master(pdev);
840
841 dev = alloc_etherdev(sizeof (struct netdev_private));
842 if (!dev)
843 return -ENOMEM;
844 SET_NETDEV_DEV(dev, &pdev->dev);
845
846 i = pci_request_regions(pdev, DRV_NAME);
847 if (i)
848 goto err_pci_request_regions;
849
850 ioaddr = ioremap(iostart, iosize);
851 if (!ioaddr) {
852 i = -ENOMEM;
853 goto err_ioremap;
854 }
855
856
857 prev_eedata = eeprom_read(ioaddr, 6);
858 for (i = 0; i < 3; i++) {
859 int eedata = eeprom_read(ioaddr, i + 7);
860 dev->dev_addr[i*2] = (eedata << 1) + (prev_eedata >> 15);
861 dev->dev_addr[i*2+1] = eedata >> 7;
862 prev_eedata = eedata;
863 }
864
865 np = netdev_priv(dev);
866 np->ioaddr = ioaddr;
867
868 netif_napi_add(dev, &np->napi, natsemi_poll, 64);
869 np->dev = dev;
870
871 np->pci_dev = pdev;
872 pci_set_drvdata(pdev, dev);
873 np->iosize = iosize;
874 spin_lock_init(&np->lock);
875 np->msg_enable = (debug >= 0) ? (1<<debug)-1 : NATSEMI_DEF_MSG;
876 np->hands_off = 0;
877 np->intr_status = 0;
878 np->eeprom_size = natsemi_pci_info[chip_idx].eeprom_size;
879 if (natsemi_pci_info[chip_idx].flags & NATSEMI_FLAG_IGNORE_PHY)
880 np->ignore_phy = 1;
881 else
882 np->ignore_phy = 0;
883 np->dspcfg_workaround = dspcfg_workaround;
884
885
886
887
888
889
890
891
892
893
894 if (np->ignore_phy || readl(ioaddr + ChipConfig) & CfgExtPhy)
895 dev->if_port = PORT_MII;
896 else
897 dev->if_port = PORT_TP;
898
899 natsemi_reload_eeprom(dev);
900 natsemi_reset(dev);
901
902 if (dev->if_port != PORT_TP) {
903 np->phy_addr_external = find_mii(dev);
904
905
906 if (!np->ignore_phy && np->phy_addr_external == PHY_ADDR_NONE) {
907 dev->if_port = PORT_TP;
908 np->phy_addr_external = PHY_ADDR_INTERNAL;
909 }
910 } else {
911 np->phy_addr_external = PHY_ADDR_INTERNAL;
912 }
913
914 option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
915
916 if (option) {
917 if (option & 0x200)
918 np->full_duplex = 1;
919 if (option & 15)
920 printk(KERN_INFO
921 "natsemi %s: ignoring user supplied media type %d",
922 pci_name(np->pci_dev), option & 15);
923 }
924 if (find_cnt < MAX_UNITS && full_duplex[find_cnt])
925 np->full_duplex = 1;
926
927 dev->netdev_ops = &natsemi_netdev_ops;
928 dev->watchdog_timeo = TX_TIMEOUT;
929
930 dev->ethtool_ops = ðtool_ops;
931
932 if (mtu)
933 dev->mtu = mtu;
934
935 natsemi_init_media(dev);
936
937
938 np->srr = readl(ioaddr + SiliconRev);
939 if (netif_msg_hw(np))
940 printk(KERN_INFO "natsemi %s: silicon revision %#04x.\n",
941 pci_name(np->pci_dev), np->srr);
942
943 i = register_netdev(dev);
944 if (i)
945 goto err_register_netdev;
946 i = NATSEMI_CREATE_FILE(pdev, dspcfg_workaround);
947 if (i)
948 goto err_create_file;
949
950 if (netif_msg_drv(np)) {
951 printk(KERN_INFO "natsemi %s: %s at %#08llx "
952 "(%s), %pM, IRQ %d",
953 dev->name, natsemi_pci_info[chip_idx].name,
954 (unsigned long long)iostart, pci_name(np->pci_dev),
955 dev->dev_addr, irq);
956 if (dev->if_port == PORT_TP)
957 printk(", port TP.\n");
958 else if (np->ignore_phy)
959 printk(", port MII, ignoring PHY\n");
960 else
961 printk(", port MII, phy ad %d.\n", np->phy_addr_external);
962 }
963 return 0;
964
965 err_create_file:
966 unregister_netdev(dev);
967
968 err_register_netdev:
969 iounmap(ioaddr);
970
971 err_ioremap:
972 pci_release_regions(pdev);
973
974 err_pci_request_regions:
975 free_netdev(dev);
976 return i;
977}
978
979
980
981
982
983
984
985
986
987
988
989
990#define eeprom_delay(ee_addr) readl(ee_addr)
991
992#define EE_Write0 (EE_ChipSelect)
993#define EE_Write1 (EE_ChipSelect | EE_DataIn)
994
995
996enum EEPROM_Cmds {
997 EE_WriteCmd=(5 << 6), EE_ReadCmd=(6 << 6), EE_EraseCmd=(7 << 6),
998};
999
1000static int eeprom_read(void __iomem *addr, int location)
1001{
1002 int i;
1003 int retval = 0;
1004 void __iomem *ee_addr = addr + EECtrl;
1005 int read_cmd = location | EE_ReadCmd;
1006
1007 writel(EE_Write0, ee_addr);
1008
1009
1010 for (i = 10; i >= 0; i--) {
1011 short dataval = (read_cmd & (1 << i)) ? EE_Write1 : EE_Write0;
1012 writel(dataval, ee_addr);
1013 eeprom_delay(ee_addr);
1014 writel(dataval | EE_ShiftClk, ee_addr);
1015 eeprom_delay(ee_addr);
1016 }
1017 writel(EE_ChipSelect, ee_addr);
1018 eeprom_delay(ee_addr);
1019
1020 for (i = 0; i < 16; i++) {
1021 writel(EE_ChipSelect | EE_ShiftClk, ee_addr);
1022 eeprom_delay(ee_addr);
1023 retval |= (readl(ee_addr) & EE_DataOut) ? 1 << i : 0;
1024 writel(EE_ChipSelect, ee_addr);
1025 eeprom_delay(ee_addr);
1026 }
1027
1028
1029 writel(EE_Write0, ee_addr);
1030 writel(0, ee_addr);
1031 return retval;
1032}
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043#define mii_delay(ioaddr) readl(ioaddr + EECtrl)
1044
1045static int mii_getbit (struct net_device *dev)
1046{
1047 int data;
1048 void __iomem *ioaddr = ns_ioaddr(dev);
1049
1050 writel(MII_ShiftClk, ioaddr + EECtrl);
1051 data = readl(ioaddr + EECtrl);
1052 writel(0, ioaddr + EECtrl);
1053 mii_delay(ioaddr);
1054 return (data & MII_Data)? 1 : 0;
1055}
1056
1057static void mii_send_bits (struct net_device *dev, u32 data, int len)
1058{
1059 u32 i;
1060 void __iomem *ioaddr = ns_ioaddr(dev);
1061
1062 for (i = (1 << (len-1)); i; i >>= 1)
1063 {
1064 u32 mdio_val = MII_Write | ((data & i)? MII_Data : 0);
1065 writel(mdio_val, ioaddr + EECtrl);
1066 mii_delay(ioaddr);
1067 writel(mdio_val | MII_ShiftClk, ioaddr + EECtrl);
1068 mii_delay(ioaddr);
1069 }
1070 writel(0, ioaddr + EECtrl);
1071 mii_delay(ioaddr);
1072}
1073
1074static int miiport_read(struct net_device *dev, int phy_id, int reg)
1075{
1076 u32 cmd;
1077 int i;
1078 u32 retval = 0;
1079
1080
1081 mii_send_bits (dev, 0xffffffff, 32);
1082
1083
1084 cmd = (0x06 << 10) | (phy_id << 5) | reg;
1085 mii_send_bits (dev, cmd, 14);
1086
1087 if (mii_getbit (dev))
1088 return 0;
1089
1090 for (i = 0; i < 16; i++) {
1091 retval <<= 1;
1092 retval |= mii_getbit (dev);
1093 }
1094
1095 mii_getbit (dev);
1096 return retval;
1097}
1098
1099static void miiport_write(struct net_device *dev, int phy_id, int reg, u16 data)
1100{
1101 u32 cmd;
1102
1103
1104 mii_send_bits (dev, 0xffffffff, 32);
1105
1106
1107 cmd = (0x5002 << 16) | (phy_id << 23) | (reg << 18) | data;
1108 mii_send_bits (dev, cmd, 32);
1109
1110 mii_getbit (dev);
1111}
1112
1113static int mdio_read(struct net_device *dev, int reg)
1114{
1115 struct netdev_private *np = netdev_priv(dev);
1116 void __iomem *ioaddr = ns_ioaddr(dev);
1117
1118
1119
1120
1121
1122 if (dev->if_port == PORT_TP)
1123 return readw(ioaddr+BasicControl+(reg<<2));
1124 else
1125 return miiport_read(dev, np->phy_addr_external, reg);
1126}
1127
1128static void mdio_write(struct net_device *dev, int reg, u16 data)
1129{
1130 struct netdev_private *np = netdev_priv(dev);
1131 void __iomem *ioaddr = ns_ioaddr(dev);
1132
1133
1134 if (dev->if_port == PORT_TP)
1135 writew(data, ioaddr+BasicControl+(reg<<2));
1136 else
1137 miiport_write(dev, np->phy_addr_external, reg, data);
1138}
1139
1140static void init_phy_fixup(struct net_device *dev)
1141{
1142 struct netdev_private *np = netdev_priv(dev);
1143 void __iomem *ioaddr = ns_ioaddr(dev);
1144 int i;
1145 u32 cfg;
1146 u16 tmp;
1147
1148
1149 tmp = mdio_read(dev, MII_BMCR);
1150 if (np->autoneg == AUTONEG_ENABLE) {
1151
1152 if ((tmp & BMCR_ANENABLE) == 0 ||
1153 np->advertising != mdio_read(dev, MII_ADVERTISE))
1154 {
1155
1156 tmp |= (BMCR_ANENABLE | BMCR_ANRESTART);
1157 mdio_write(dev, MII_ADVERTISE, np->advertising);
1158 }
1159 } else {
1160
1161 tmp &= ~(BMCR_ANENABLE | BMCR_SPEED100 | BMCR_FULLDPLX);
1162 if (np->speed == SPEED_100)
1163 tmp |= BMCR_SPEED100;
1164 if (np->duplex == DUPLEX_FULL)
1165 tmp |= BMCR_FULLDPLX;
1166
1167
1168
1169
1170
1171
1172
1173 }
1174 mdio_write(dev, MII_BMCR, tmp);
1175 readl(ioaddr + ChipConfig);
1176 udelay(1);
1177
1178
1179 np->mii = (mdio_read(dev, MII_PHYSID1) << 16)
1180 + mdio_read(dev, MII_PHYSID2);
1181
1182
1183 switch (np->mii) {
1184 case PHYID_AM79C874:
1185
1186 tmp = mdio_read(dev, MII_MCTRL);
1187 tmp &= ~(MII_FX_SEL | MII_EN_SCRM);
1188 if (dev->if_port == PORT_FIBRE)
1189 tmp |= MII_FX_SEL;
1190 else
1191 tmp |= MII_EN_SCRM;
1192 mdio_write(dev, MII_MCTRL, tmp);
1193 break;
1194 default:
1195 break;
1196 }
1197 cfg = readl(ioaddr + ChipConfig);
1198 if (cfg & CfgExtPhy)
1199 return;
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212 for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
1213
1214 int dspcfg;
1215 writew(1, ioaddr + PGSEL);
1216 writew(PMDCSR_VAL, ioaddr + PMDCSR);
1217 writew(TSTDAT_VAL, ioaddr + TSTDAT);
1218 np->dspcfg = (np->srr <= SRR_DP83815_C)?
1219 DSPCFG_VAL : (DSPCFG_COEF | readw(ioaddr + DSPCFG));
1220 writew(np->dspcfg, ioaddr + DSPCFG);
1221 writew(SDCFG_VAL, ioaddr + SDCFG);
1222 writew(0, ioaddr + PGSEL);
1223 readl(ioaddr + ChipConfig);
1224 udelay(10);
1225
1226 writew(1, ioaddr + PGSEL);
1227 dspcfg = readw(ioaddr + DSPCFG);
1228 writew(0, ioaddr + PGSEL);
1229 if (np->dspcfg == dspcfg)
1230 break;
1231 }
1232
1233 if (netif_msg_link(np)) {
1234 if (i==NATSEMI_HW_TIMEOUT) {
1235 printk(KERN_INFO
1236 "%s: DSPCFG mismatch after retrying for %d usec.\n",
1237 dev->name, i*10);
1238 } else {
1239 printk(KERN_INFO
1240 "%s: DSPCFG accepted after %d usec.\n",
1241 dev->name, i*10);
1242 }
1243 }
1244
1245
1246
1247
1248
1249 readw(ioaddr + MIntrStatus);
1250 writew(MICRIntEn, ioaddr + MIntrCtrl);
1251}
1252
1253static int switch_port_external(struct net_device *dev)
1254{
1255 struct netdev_private *np = netdev_priv(dev);
1256 void __iomem *ioaddr = ns_ioaddr(dev);
1257 u32 cfg;
1258
1259 cfg = readl(ioaddr + ChipConfig);
1260 if (cfg & CfgExtPhy)
1261 return 0;
1262
1263 if (netif_msg_link(np)) {
1264 printk(KERN_INFO "%s: switching to external transceiver.\n",
1265 dev->name);
1266 }
1267
1268
1269 writel(cfg | (CfgExtPhy | CfgPhyDis), ioaddr + ChipConfig);
1270 readl(ioaddr + ChipConfig);
1271 udelay(1);
1272
1273
1274
1275
1276
1277
1278
1279
1280 move_int_phy(dev, np->phy_addr_external);
1281 init_phy_fixup(dev);
1282
1283 return 1;
1284}
1285
1286static int switch_port_internal(struct net_device *dev)
1287{
1288 struct netdev_private *np = netdev_priv(dev);
1289 void __iomem *ioaddr = ns_ioaddr(dev);
1290 int i;
1291 u32 cfg;
1292 u16 bmcr;
1293
1294 cfg = readl(ioaddr + ChipConfig);
1295 if (!(cfg &CfgExtPhy))
1296 return 0;
1297
1298 if (netif_msg_link(np)) {
1299 printk(KERN_INFO "%s: switching to internal transceiver.\n",
1300 dev->name);
1301 }
1302
1303 cfg = cfg & ~(CfgExtPhy | CfgPhyDis);
1304 writel(cfg, ioaddr + ChipConfig);
1305 readl(ioaddr + ChipConfig);
1306 udelay(1);
1307
1308
1309 bmcr = readw(ioaddr+BasicControl+(MII_BMCR<<2));
1310 writel(bmcr | BMCR_RESET, ioaddr+BasicControl+(MII_BMCR<<2));
1311 readl(ioaddr + ChipConfig);
1312 udelay(10);
1313 for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
1314 bmcr = readw(ioaddr+BasicControl+(MII_BMCR<<2));
1315 if (!(bmcr & BMCR_RESET))
1316 break;
1317 udelay(10);
1318 }
1319 if (i==NATSEMI_HW_TIMEOUT && netif_msg_link(np)) {
1320 printk(KERN_INFO
1321 "%s: phy reset did not complete in %d usec.\n",
1322 dev->name, i*10);
1323 }
1324
1325 init_phy_fixup(dev);
1326
1327 return 1;
1328}
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338static int find_mii(struct net_device *dev)
1339{
1340 struct netdev_private *np = netdev_priv(dev);
1341 int tmp;
1342 int i;
1343 int did_switch;
1344
1345
1346 did_switch = switch_port_external(dev);
1347
1348
1349
1350
1351
1352
1353
1354 for (i = 1; i <= 31; i++) {
1355 move_int_phy(dev, i);
1356 tmp = miiport_read(dev, i, MII_BMSR);
1357 if (tmp != 0xffff && tmp != 0x0000) {
1358
1359 np->mii = (mdio_read(dev, MII_PHYSID1) << 16)
1360 + mdio_read(dev, MII_PHYSID2);
1361 if (netif_msg_probe(np)) {
1362 printk(KERN_INFO "natsemi %s: found external phy %08x at address %d.\n",
1363 pci_name(np->pci_dev), np->mii, i);
1364 }
1365 break;
1366 }
1367 }
1368
1369 if (did_switch)
1370 switch_port_internal(dev);
1371 return i;
1372}
1373
1374
1375#define CFG_RESET_SAVE 0xfde000
1376
1377#define WCSR_RESET_SAVE 0x61f
1378
1379#define RFCR_RESET_SAVE 0xf8500000
1380
1381static void natsemi_reset(struct net_device *dev)
1382{
1383 int i;
1384 u32 cfg;
1385 u32 wcsr;
1386 u32 rfcr;
1387 u16 pmatch[3];
1388 u16 sopass[3];
1389 struct netdev_private *np = netdev_priv(dev);
1390 void __iomem *ioaddr = ns_ioaddr(dev);
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401 cfg = readl(ioaddr + ChipConfig) & CFG_RESET_SAVE;
1402
1403 wcsr = readl(ioaddr + WOLCmd) & WCSR_RESET_SAVE;
1404
1405 rfcr = readl(ioaddr + RxFilterAddr) & RFCR_RESET_SAVE;
1406
1407 for (i = 0; i < 3; i++) {
1408 writel(i*2, ioaddr + RxFilterAddr);
1409 pmatch[i] = readw(ioaddr + RxFilterData);
1410 }
1411
1412 for (i = 0; i < 3; i++) {
1413 writel(0xa+(i*2), ioaddr + RxFilterAddr);
1414 sopass[i] = readw(ioaddr + RxFilterData);
1415 }
1416
1417
1418 writel(ChipReset, ioaddr + ChipCmd);
1419 for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
1420 if (!(readl(ioaddr + ChipCmd) & ChipReset))
1421 break;
1422 udelay(5);
1423 }
1424 if (i==NATSEMI_HW_TIMEOUT) {
1425 printk(KERN_WARNING "%s: reset did not complete in %d usec.\n",
1426 dev->name, i*5);
1427 } else if (netif_msg_hw(np)) {
1428 printk(KERN_DEBUG "%s: reset completed in %d usec.\n",
1429 dev->name, i*5);
1430 }
1431
1432
1433 cfg |= readl(ioaddr + ChipConfig) & ~CFG_RESET_SAVE;
1434
1435 if (dev->if_port == PORT_TP)
1436 cfg &= ~(CfgExtPhy | CfgPhyDis);
1437 else
1438 cfg |= (CfgExtPhy | CfgPhyDis);
1439 writel(cfg, ioaddr + ChipConfig);
1440
1441 wcsr |= readl(ioaddr + WOLCmd) & ~WCSR_RESET_SAVE;
1442 writel(wcsr, ioaddr + WOLCmd);
1443
1444 rfcr |= readl(ioaddr + RxFilterAddr) & ~RFCR_RESET_SAVE;
1445
1446 for (i = 0; i < 3; i++) {
1447 writel(i*2, ioaddr + RxFilterAddr);
1448 writew(pmatch[i], ioaddr + RxFilterData);
1449 }
1450 for (i = 0; i < 3; i++) {
1451 writel(0xa+(i*2), ioaddr + RxFilterAddr);
1452 writew(sopass[i], ioaddr + RxFilterData);
1453 }
1454
1455 writel(rfcr, ioaddr + RxFilterAddr);
1456}
1457
1458static void reset_rx(struct net_device *dev)
1459{
1460 int i;
1461 struct netdev_private *np = netdev_priv(dev);
1462 void __iomem *ioaddr = ns_ioaddr(dev);
1463
1464 np->intr_status &= ~RxResetDone;
1465
1466 writel(RxReset, ioaddr + ChipCmd);
1467
1468 for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
1469 np->intr_status |= readl(ioaddr + IntrStatus);
1470 if (np->intr_status & RxResetDone)
1471 break;
1472 udelay(15);
1473 }
1474 if (i==NATSEMI_HW_TIMEOUT) {
1475 printk(KERN_WARNING "%s: RX reset did not complete in %d usec.\n",
1476 dev->name, i*15);
1477 } else if (netif_msg_hw(np)) {
1478 printk(KERN_WARNING "%s: RX reset took %d usec.\n",
1479 dev->name, i*15);
1480 }
1481}
1482
1483static void natsemi_reload_eeprom(struct net_device *dev)
1484{
1485 struct netdev_private *np = netdev_priv(dev);
1486 void __iomem *ioaddr = ns_ioaddr(dev);
1487 int i;
1488
1489 writel(EepromReload, ioaddr + PCIBusCfg);
1490 for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
1491 udelay(50);
1492 if (!(readl(ioaddr + PCIBusCfg) & EepromReload))
1493 break;
1494 }
1495 if (i==NATSEMI_HW_TIMEOUT) {
1496 printk(KERN_WARNING "natsemi %s: EEPROM did not reload in %d usec.\n",
1497 pci_name(np->pci_dev), i*50);
1498 } else if (netif_msg_hw(np)) {
1499 printk(KERN_DEBUG "natsemi %s: EEPROM reloaded in %d usec.\n",
1500 pci_name(np->pci_dev), i*50);
1501 }
1502}
1503
1504static void natsemi_stop_rxtx(struct net_device *dev)
1505{
1506 void __iomem * ioaddr = ns_ioaddr(dev);
1507 struct netdev_private *np = netdev_priv(dev);
1508 int i;
1509
1510 writel(RxOff | TxOff, ioaddr + ChipCmd);
1511 for(i=0;i< NATSEMI_HW_TIMEOUT;i++) {
1512 if ((readl(ioaddr + ChipCmd) & (TxOn|RxOn)) == 0)
1513 break;
1514 udelay(5);
1515 }
1516 if (i==NATSEMI_HW_TIMEOUT) {
1517 printk(KERN_WARNING "%s: Tx/Rx process did not stop in %d usec.\n",
1518 dev->name, i*5);
1519 } else if (netif_msg_hw(np)) {
1520 printk(KERN_DEBUG "%s: Tx/Rx process stopped in %d usec.\n",
1521 dev->name, i*5);
1522 }
1523}
1524
1525static int netdev_open(struct net_device *dev)
1526{
1527 struct netdev_private *np = netdev_priv(dev);
1528 void __iomem * ioaddr = ns_ioaddr(dev);
1529 const int irq = np->pci_dev->irq;
1530 int i;
1531
1532
1533 natsemi_reset(dev);
1534
1535 i = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
1536 if (i) return i;
1537
1538 if (netif_msg_ifup(np))
1539 printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
1540 dev->name, irq);
1541 i = alloc_ring(dev);
1542 if (i < 0) {
1543 free_irq(irq, dev);
1544 return i;
1545 }
1546 napi_enable(&np->napi);
1547
1548 init_ring(dev);
1549 spin_lock_irq(&np->lock);
1550 init_registers(dev);
1551
1552 for (i = 0; i < 3; i++) {
1553 u16 mac = (dev->dev_addr[2*i+1]<<8) + dev->dev_addr[2*i];
1554
1555 writel(i*2, ioaddr + RxFilterAddr);
1556 writew(mac, ioaddr + RxFilterData);
1557 }
1558 writel(np->cur_rx_mode, ioaddr + RxFilterAddr);
1559 spin_unlock_irq(&np->lock);
1560
1561 netif_start_queue(dev);
1562
1563 if (netif_msg_ifup(np))
1564 printk(KERN_DEBUG "%s: Done netdev_open(), status: %#08x.\n",
1565 dev->name, (int)readl(ioaddr + ChipCmd));
1566
1567
1568 init_timer(&np->timer);
1569 np->timer.expires = round_jiffies(jiffies + NATSEMI_TIMER_FREQ);
1570 np->timer.data = (unsigned long)dev;
1571 np->timer.function = netdev_timer;
1572 add_timer(&np->timer);
1573
1574 return 0;
1575}
1576
1577static void do_cable_magic(struct net_device *dev)
1578{
1579 struct netdev_private *np = netdev_priv(dev);
1580 void __iomem *ioaddr = ns_ioaddr(dev);
1581
1582 if (dev->if_port != PORT_TP)
1583 return;
1584
1585 if (np->srr >= SRR_DP83816_A5)
1586 return;
1587
1588
1589
1590
1591
1592
1593
1594 if (readl(ioaddr + ChipConfig) & CfgSpeed100) {
1595 u16 data;
1596
1597 writew(1, ioaddr + PGSEL);
1598
1599
1600
1601
1602 data = readw(ioaddr + TSTDAT) & 0xff;
1603
1604
1605
1606
1607 if (!(data & 0x80) || ((data >= 0xd8) && (data <= 0xff))) {
1608 np = netdev_priv(dev);
1609
1610
1611 writew(TSTDAT_FIXED, ioaddr + TSTDAT);
1612
1613 data = readw(ioaddr + DSPCFG);
1614 np->dspcfg = data | DSPCFG_LOCK;
1615 writew(np->dspcfg, ioaddr + DSPCFG);
1616 }
1617 writew(0, ioaddr + PGSEL);
1618 }
1619}
1620
1621static void undo_cable_magic(struct net_device *dev)
1622{
1623 u16 data;
1624 struct netdev_private *np = netdev_priv(dev);
1625 void __iomem * ioaddr = ns_ioaddr(dev);
1626
1627 if (dev->if_port != PORT_TP)
1628 return;
1629
1630 if (np->srr >= SRR_DP83816_A5)
1631 return;
1632
1633 writew(1, ioaddr + PGSEL);
1634
1635 data = readw(ioaddr + DSPCFG);
1636 np->dspcfg = data & ~DSPCFG_LOCK;
1637 writew(np->dspcfg, ioaddr + DSPCFG);
1638 writew(0, ioaddr + PGSEL);
1639}
1640
1641static void check_link(struct net_device *dev)
1642{
1643 struct netdev_private *np = netdev_priv(dev);
1644 void __iomem * ioaddr = ns_ioaddr(dev);
1645 int duplex = np->duplex;
1646 u16 bmsr;
1647
1648
1649 if (np->ignore_phy)
1650 goto propagate_state;
1651
1652
1653
1654
1655
1656 mdio_read(dev, MII_BMSR);
1657 bmsr = mdio_read(dev, MII_BMSR);
1658
1659 if (!(bmsr & BMSR_LSTATUS)) {
1660 if (netif_carrier_ok(dev)) {
1661 if (netif_msg_link(np))
1662 printk(KERN_NOTICE "%s: link down.\n",
1663 dev->name);
1664 netif_carrier_off(dev);
1665 undo_cable_magic(dev);
1666 }
1667 return;
1668 }
1669 if (!netif_carrier_ok(dev)) {
1670 if (netif_msg_link(np))
1671 printk(KERN_NOTICE "%s: link up.\n", dev->name);
1672 netif_carrier_on(dev);
1673 do_cable_magic(dev);
1674 }
1675
1676 duplex = np->full_duplex;
1677 if (!duplex) {
1678 if (bmsr & BMSR_ANEGCOMPLETE) {
1679 int tmp = mii_nway_result(
1680 np->advertising & mdio_read(dev, MII_LPA));
1681 if (tmp == LPA_100FULL || tmp == LPA_10FULL)
1682 duplex = 1;
1683 } else if (mdio_read(dev, MII_BMCR) & BMCR_FULLDPLX)
1684 duplex = 1;
1685 }
1686
1687propagate_state:
1688
1689 if (duplex ^ !!(np->rx_config & RxAcceptTx)) {
1690 if (netif_msg_link(np))
1691 printk(KERN_INFO
1692 "%s: Setting %s-duplex based on negotiated "
1693 "link capability.\n", dev->name,
1694 duplex ? "full" : "half");
1695 if (duplex) {
1696 np->rx_config |= RxAcceptTx;
1697 np->tx_config |= TxCarrierIgn | TxHeartIgn;
1698 } else {
1699 np->rx_config &= ~RxAcceptTx;
1700 np->tx_config &= ~(TxCarrierIgn | TxHeartIgn);
1701 }
1702 writel(np->tx_config, ioaddr + TxConfig);
1703 writel(np->rx_config, ioaddr + RxConfig);
1704 }
1705}
1706
1707static void init_registers(struct net_device *dev)
1708{
1709 struct netdev_private *np = netdev_priv(dev);
1710 void __iomem * ioaddr = ns_ioaddr(dev);
1711
1712 init_phy_fixup(dev);
1713
1714
1715 readl(ioaddr + IntrStatus);
1716
1717 writel(np->ring_dma, ioaddr + RxRingPtr);
1718 writel(np->ring_dma + RX_RING_SIZE * sizeof(struct netdev_desc),
1719 ioaddr + TxRingPtr);
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735 np->tx_config = TxAutoPad | TxCollRetry | TxMxdma_256 |
1736 TX_FLTH_VAL | TX_DRTH_VAL_START;
1737 writel(np->tx_config, ioaddr + TxConfig);
1738
1739
1740
1741
1742 np->rx_config = RxMxdma_256 | RX_DRTH_VAL;
1743
1744 if (np->rx_buf_sz > NATSEMI_LONGPKT)
1745 np->rx_config |= RxAcceptLong;
1746
1747 writel(np->rx_config, ioaddr + RxConfig);
1748
1749
1750
1751
1752
1753
1754
1755 np->SavedClkRun = readl(ioaddr + ClkRun);
1756 writel(np->SavedClkRun & ~PMEEnable, ioaddr + ClkRun);
1757 if (np->SavedClkRun & PMEStatus && netif_msg_wol(np)) {
1758 printk(KERN_NOTICE "%s: Wake-up event %#08x\n",
1759 dev->name, readl(ioaddr + WOLCmd));
1760 }
1761
1762 check_link(dev);
1763 __set_rx_mode(dev);
1764
1765
1766 writel(DEFAULT_INTR, ioaddr + IntrMask);
1767 natsemi_irq_enable(dev);
1768
1769 writel(RxOn | TxOn, ioaddr + ChipCmd);
1770 writel(StatsClear, ioaddr + StatsCtrl);
1771}
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786static void netdev_timer(unsigned long data)
1787{
1788 struct net_device *dev = (struct net_device *)data;
1789 struct netdev_private *np = netdev_priv(dev);
1790 void __iomem * ioaddr = ns_ioaddr(dev);
1791 int next_tick = NATSEMI_TIMER_FREQ;
1792 const int irq = np->pci_dev->irq;
1793
1794 if (netif_msg_timer(np)) {
1795
1796
1797
1798 printk(KERN_DEBUG "%s: Media selection timer tick.\n",
1799 dev->name);
1800 }
1801
1802 if (dev->if_port == PORT_TP) {
1803 u16 dspcfg;
1804
1805 spin_lock_irq(&np->lock);
1806
1807 writew(1, ioaddr+PGSEL);
1808 dspcfg = readw(ioaddr+DSPCFG);
1809 writew(0, ioaddr+PGSEL);
1810 if (np->dspcfg_workaround && dspcfg != np->dspcfg) {
1811 if (!netif_queue_stopped(dev)) {
1812 spin_unlock_irq(&np->lock);
1813 if (netif_msg_drv(np))
1814 printk(KERN_NOTICE "%s: possible phy reset: "
1815 "re-initializing\n", dev->name);
1816 disable_irq(irq);
1817 spin_lock_irq(&np->lock);
1818 natsemi_stop_rxtx(dev);
1819 dump_ring(dev);
1820 reinit_ring(dev);
1821 init_registers(dev);
1822 spin_unlock_irq(&np->lock);
1823 enable_irq(irq);
1824 } else {
1825
1826 next_tick = HZ;
1827 spin_unlock_irq(&np->lock);
1828 }
1829 } else {
1830
1831 check_link(dev);
1832 spin_unlock_irq(&np->lock);
1833 }
1834 } else {
1835 spin_lock_irq(&np->lock);
1836 check_link(dev);
1837 spin_unlock_irq(&np->lock);
1838 }
1839 if (np->oom) {
1840 disable_irq(irq);
1841 np->oom = 0;
1842 refill_rx(dev);
1843 enable_irq(irq);
1844 if (!np->oom) {
1845 writel(RxOn, ioaddr + ChipCmd);
1846 } else {
1847 next_tick = 1;
1848 }
1849 }
1850
1851 if (next_tick > 1)
1852 mod_timer(&np->timer, round_jiffies(jiffies + next_tick));
1853 else
1854 mod_timer(&np->timer, jiffies + next_tick);
1855}
1856
1857static void dump_ring(struct net_device *dev)
1858{
1859 struct netdev_private *np = netdev_priv(dev);
1860
1861 if (netif_msg_pktdata(np)) {
1862 int i;
1863 printk(KERN_DEBUG " Tx ring at %p:\n", np->tx_ring);
1864 for (i = 0; i < TX_RING_SIZE; i++) {
1865 printk(KERN_DEBUG " #%d desc. %#08x %#08x %#08x.\n",
1866 i, np->tx_ring[i].next_desc,
1867 np->tx_ring[i].cmd_status,
1868 np->tx_ring[i].addr);
1869 }
1870 printk(KERN_DEBUG " Rx ring %p:\n", np->rx_ring);
1871 for (i = 0; i < RX_RING_SIZE; i++) {
1872 printk(KERN_DEBUG " #%d desc. %#08x %#08x %#08x.\n",
1873 i, np->rx_ring[i].next_desc,
1874 np->rx_ring[i].cmd_status,
1875 np->rx_ring[i].addr);
1876 }
1877 }
1878}
1879
1880static void ns_tx_timeout(struct net_device *dev)
1881{
1882 struct netdev_private *np = netdev_priv(dev);
1883 void __iomem * ioaddr = ns_ioaddr(dev);
1884 const int irq = np->pci_dev->irq;
1885
1886 disable_irq(irq);
1887 spin_lock_irq(&np->lock);
1888 if (!np->hands_off) {
1889 if (netif_msg_tx_err(np))
1890 printk(KERN_WARNING
1891 "%s: Transmit timed out, status %#08x,"
1892 " resetting...\n",
1893 dev->name, readl(ioaddr + IntrStatus));
1894 dump_ring(dev);
1895
1896 natsemi_reset(dev);
1897 reinit_ring(dev);
1898 init_registers(dev);
1899 } else {
1900 printk(KERN_WARNING
1901 "%s: tx_timeout while in hands_off state?\n",
1902 dev->name);
1903 }
1904 spin_unlock_irq(&np->lock);
1905 enable_irq(irq);
1906
1907 netif_trans_update(dev);
1908 dev->stats.tx_errors++;
1909 netif_wake_queue(dev);
1910}
1911
1912static int alloc_ring(struct net_device *dev)
1913{
1914 struct netdev_private *np = netdev_priv(dev);
1915 np->rx_ring = pci_alloc_consistent(np->pci_dev,
1916 sizeof(struct netdev_desc) * (RX_RING_SIZE+TX_RING_SIZE),
1917 &np->ring_dma);
1918 if (!np->rx_ring)
1919 return -ENOMEM;
1920 np->tx_ring = &np->rx_ring[RX_RING_SIZE];
1921 return 0;
1922}
1923
1924static void refill_rx(struct net_device *dev)
1925{
1926 struct netdev_private *np = netdev_priv(dev);
1927
1928
1929 for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
1930 struct sk_buff *skb;
1931 int entry = np->dirty_rx % RX_RING_SIZE;
1932 if (np->rx_skbuff[entry] == NULL) {
1933 unsigned int buflen = np->rx_buf_sz+NATSEMI_PADDING;
1934 skb = netdev_alloc_skb(dev, buflen);
1935 np->rx_skbuff[entry] = skb;
1936 if (skb == NULL)
1937 break;
1938 np->rx_dma[entry] = pci_map_single(np->pci_dev,
1939 skb->data, buflen, PCI_DMA_FROMDEVICE);
1940 if (pci_dma_mapping_error(np->pci_dev,
1941 np->rx_dma[entry])) {
1942 dev_kfree_skb_any(skb);
1943 np->rx_skbuff[entry] = NULL;
1944 break;
1945 }
1946 np->rx_ring[entry].addr = cpu_to_le32(np->rx_dma[entry]);
1947 }
1948 np->rx_ring[entry].cmd_status = cpu_to_le32(np->rx_buf_sz);
1949 }
1950 if (np->cur_rx - np->dirty_rx == RX_RING_SIZE) {
1951 if (netif_msg_rx_err(np))
1952 printk(KERN_WARNING "%s: going OOM.\n", dev->name);
1953 np->oom = 1;
1954 }
1955}
1956
1957static void set_bufsize(struct net_device *dev)
1958{
1959 struct netdev_private *np = netdev_priv(dev);
1960 if (dev->mtu <= ETH_DATA_LEN)
1961 np->rx_buf_sz = ETH_DATA_LEN + NATSEMI_HEADERS;
1962 else
1963 np->rx_buf_sz = dev->mtu + NATSEMI_HEADERS;
1964}
1965
1966
1967static void init_ring(struct net_device *dev)
1968{
1969 struct netdev_private *np = netdev_priv(dev);
1970 int i;
1971
1972
1973 np->dirty_tx = np->cur_tx = 0;
1974 for (i = 0; i < TX_RING_SIZE; i++) {
1975 np->tx_skbuff[i] = NULL;
1976 np->tx_ring[i].next_desc = cpu_to_le32(np->ring_dma
1977 +sizeof(struct netdev_desc)
1978 *((i+1)%TX_RING_SIZE+RX_RING_SIZE));
1979 np->tx_ring[i].cmd_status = 0;
1980 }
1981
1982
1983 np->dirty_rx = 0;
1984 np->cur_rx = RX_RING_SIZE;
1985 np->oom = 0;
1986 set_bufsize(dev);
1987
1988 np->rx_head_desc = &np->rx_ring[0];
1989
1990
1991
1992
1993
1994 for (i = 0; i < RX_RING_SIZE; i++) {
1995 np->rx_ring[i].next_desc = cpu_to_le32(np->ring_dma
1996 +sizeof(struct netdev_desc)
1997 *((i+1)%RX_RING_SIZE));
1998 np->rx_ring[i].cmd_status = cpu_to_le32(DescOwn);
1999 np->rx_skbuff[i] = NULL;
2000 }
2001 refill_rx(dev);
2002 dump_ring(dev);
2003}
2004
2005static void drain_tx(struct net_device *dev)
2006{
2007 struct netdev_private *np = netdev_priv(dev);
2008 int i;
2009
2010 for (i = 0; i < TX_RING_SIZE; i++) {
2011 if (np->tx_skbuff[i]) {
2012 pci_unmap_single(np->pci_dev,
2013 np->tx_dma[i], np->tx_skbuff[i]->len,
2014 PCI_DMA_TODEVICE);
2015 dev_kfree_skb(np->tx_skbuff[i]);
2016 dev->stats.tx_dropped++;
2017 }
2018 np->tx_skbuff[i] = NULL;
2019 }
2020}
2021
2022static void drain_rx(struct net_device *dev)
2023{
2024 struct netdev_private *np = netdev_priv(dev);
2025 unsigned int buflen = np->rx_buf_sz;
2026 int i;
2027
2028
2029 for (i = 0; i < RX_RING_SIZE; i++) {
2030 np->rx_ring[i].cmd_status = 0;
2031 np->rx_ring[i].addr = cpu_to_le32(0xBADF00D0);
2032 if (np->rx_skbuff[i]) {
2033 pci_unmap_single(np->pci_dev, np->rx_dma[i],
2034 buflen + NATSEMI_PADDING,
2035 PCI_DMA_FROMDEVICE);
2036 dev_kfree_skb(np->rx_skbuff[i]);
2037 }
2038 np->rx_skbuff[i] = NULL;
2039 }
2040}
2041
2042static void drain_ring(struct net_device *dev)
2043{
2044 drain_rx(dev);
2045 drain_tx(dev);
2046}
2047
2048static void free_ring(struct net_device *dev)
2049{
2050 struct netdev_private *np = netdev_priv(dev);
2051 pci_free_consistent(np->pci_dev,
2052 sizeof(struct netdev_desc) * (RX_RING_SIZE+TX_RING_SIZE),
2053 np->rx_ring, np->ring_dma);
2054}
2055
2056static void reinit_rx(struct net_device *dev)
2057{
2058 struct netdev_private *np = netdev_priv(dev);
2059 int i;
2060
2061
2062 np->dirty_rx = 0;
2063 np->cur_rx = RX_RING_SIZE;
2064 np->rx_head_desc = &np->rx_ring[0];
2065
2066 for (i = 0; i < RX_RING_SIZE; i++)
2067 np->rx_ring[i].cmd_status = cpu_to_le32(DescOwn);
2068
2069 refill_rx(dev);
2070}
2071
2072static void reinit_ring(struct net_device *dev)
2073{
2074 struct netdev_private *np = netdev_priv(dev);
2075 int i;
2076
2077
2078 drain_tx(dev);
2079 np->dirty_tx = np->cur_tx = 0;
2080 for (i=0;i<TX_RING_SIZE;i++)
2081 np->tx_ring[i].cmd_status = 0;
2082
2083 reinit_rx(dev);
2084}
2085
2086static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
2087{
2088 struct netdev_private *np = netdev_priv(dev);
2089 void __iomem * ioaddr = ns_ioaddr(dev);
2090 unsigned entry;
2091 unsigned long flags;
2092
2093
2094
2095
2096
2097 entry = np->cur_tx % TX_RING_SIZE;
2098
2099 np->tx_skbuff[entry] = skb;
2100 np->tx_dma[entry] = pci_map_single(np->pci_dev,
2101 skb->data,skb->len, PCI_DMA_TODEVICE);
2102 if (pci_dma_mapping_error(np->pci_dev, np->tx_dma[entry])) {
2103 np->tx_skbuff[entry] = NULL;
2104 dev_kfree_skb_irq(skb);
2105 dev->stats.tx_dropped++;
2106 return NETDEV_TX_OK;
2107 }
2108
2109 np->tx_ring[entry].addr = cpu_to_le32(np->tx_dma[entry]);
2110
2111 spin_lock_irqsave(&np->lock, flags);
2112
2113 if (!np->hands_off) {
2114 np->tx_ring[entry].cmd_status = cpu_to_le32(DescOwn | skb->len);
2115
2116
2117 wmb();
2118 np->cur_tx++;
2119 if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1) {
2120 netdev_tx_done(dev);
2121 if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1)
2122 netif_stop_queue(dev);
2123 }
2124
2125 writel(TxOn, ioaddr + ChipCmd);
2126 } else {
2127 dev_kfree_skb_irq(skb);
2128 dev->stats.tx_dropped++;
2129 }
2130 spin_unlock_irqrestore(&np->lock, flags);
2131
2132 if (netif_msg_tx_queued(np)) {
2133 printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
2134 dev->name, np->cur_tx, entry);
2135 }
2136 return NETDEV_TX_OK;
2137}
2138
2139static void netdev_tx_done(struct net_device *dev)
2140{
2141 struct netdev_private *np = netdev_priv(dev);
2142
2143 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
2144 int entry = np->dirty_tx % TX_RING_SIZE;
2145 if (np->tx_ring[entry].cmd_status & cpu_to_le32(DescOwn))
2146 break;
2147 if (netif_msg_tx_done(np))
2148 printk(KERN_DEBUG
2149 "%s: tx frame #%d finished, status %#08x.\n",
2150 dev->name, np->dirty_tx,
2151 le32_to_cpu(np->tx_ring[entry].cmd_status));
2152 if (np->tx_ring[entry].cmd_status & cpu_to_le32(DescPktOK)) {
2153 dev->stats.tx_packets++;
2154 dev->stats.tx_bytes += np->tx_skbuff[entry]->len;
2155 } else {
2156 int tx_status =
2157 le32_to_cpu(np->tx_ring[entry].cmd_status);
2158 if (tx_status & (DescTxAbort|DescTxExcColl))
2159 dev->stats.tx_aborted_errors++;
2160 if (tx_status & DescTxFIFO)
2161 dev->stats.tx_fifo_errors++;
2162 if (tx_status & DescTxCarrier)
2163 dev->stats.tx_carrier_errors++;
2164 if (tx_status & DescTxOOWCol)
2165 dev->stats.tx_window_errors++;
2166 dev->stats.tx_errors++;
2167 }
2168 pci_unmap_single(np->pci_dev,np->tx_dma[entry],
2169 np->tx_skbuff[entry]->len,
2170 PCI_DMA_TODEVICE);
2171
2172 dev_kfree_skb_irq(np->tx_skbuff[entry]);
2173 np->tx_skbuff[entry] = NULL;
2174 }
2175 if (netif_queue_stopped(dev) &&
2176 np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
2177
2178 netif_wake_queue(dev);
2179 }
2180}
2181
2182
2183
2184static irqreturn_t intr_handler(int irq, void *dev_instance)
2185{
2186 struct net_device *dev = dev_instance;
2187 struct netdev_private *np = netdev_priv(dev);
2188 void __iomem * ioaddr = ns_ioaddr(dev);
2189
2190
2191
2192
2193 if (np->hands_off || !readl(ioaddr + IntrEnable))
2194 return IRQ_NONE;
2195
2196 np->intr_status = readl(ioaddr + IntrStatus);
2197
2198 if (!np->intr_status)
2199 return IRQ_NONE;
2200
2201 if (netif_msg_intr(np))
2202 printk(KERN_DEBUG
2203 "%s: Interrupt, status %#08x, mask %#08x.\n",
2204 dev->name, np->intr_status,
2205 readl(ioaddr + IntrMask));
2206
2207 prefetch(&np->rx_skbuff[np->cur_rx % RX_RING_SIZE]);
2208
2209 if (napi_schedule_prep(&np->napi)) {
2210
2211 natsemi_irq_disable(dev);
2212 __napi_schedule(&np->napi);
2213 } else
2214 printk(KERN_WARNING
2215 "%s: Ignoring interrupt, status %#08x, mask %#08x.\n",
2216 dev->name, np->intr_status,
2217 readl(ioaddr + IntrMask));
2218
2219 return IRQ_HANDLED;
2220}
2221
2222
2223
2224
2225static int natsemi_poll(struct napi_struct *napi, int budget)
2226{
2227 struct netdev_private *np = container_of(napi, struct netdev_private, napi);
2228 struct net_device *dev = np->dev;
2229 void __iomem * ioaddr = ns_ioaddr(dev);
2230 int work_done = 0;
2231
2232 do {
2233 if (netif_msg_intr(np))
2234 printk(KERN_DEBUG
2235 "%s: Poll, status %#08x, mask %#08x.\n",
2236 dev->name, np->intr_status,
2237 readl(ioaddr + IntrMask));
2238
2239
2240
2241 if (np->intr_status &
2242 (IntrRxDone | IntrRxIntr | RxStatusFIFOOver |
2243 IntrRxErr | IntrRxOverrun)) {
2244 netdev_rx(dev, &work_done, budget);
2245 }
2246
2247 if (np->intr_status &
2248 (IntrTxDone | IntrTxIntr | IntrTxIdle | IntrTxErr)) {
2249 spin_lock(&np->lock);
2250 netdev_tx_done(dev);
2251 spin_unlock(&np->lock);
2252 }
2253
2254
2255 if (np->intr_status & IntrAbnormalSummary)
2256 netdev_error(dev, np->intr_status);
2257
2258 if (work_done >= budget)
2259 return work_done;
2260
2261 np->intr_status = readl(ioaddr + IntrStatus);
2262 } while (np->intr_status);
2263
2264 napi_complete(napi);
2265
2266
2267
2268 spin_lock(&np->lock);
2269 if (!np->hands_off)
2270 natsemi_irq_enable(dev);
2271 spin_unlock(&np->lock);
2272
2273 return work_done;
2274}
2275
2276
2277
2278static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do)
2279{
2280 struct netdev_private *np = netdev_priv(dev);
2281 int entry = np->cur_rx % RX_RING_SIZE;
2282 int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
2283 s32 desc_status = le32_to_cpu(np->rx_head_desc->cmd_status);
2284 unsigned int buflen = np->rx_buf_sz;
2285 void __iomem * ioaddr = ns_ioaddr(dev);
2286
2287
2288 while (desc_status < 0) {
2289 int pkt_len;
2290 if (netif_msg_rx_status(np))
2291 printk(KERN_DEBUG
2292 " netdev_rx() entry %d status was %#08x.\n",
2293 entry, desc_status);
2294 if (--boguscnt < 0)
2295 break;
2296
2297 if (*work_done >= work_to_do)
2298 break;
2299
2300 (*work_done)++;
2301
2302 pkt_len = (desc_status & DescSizeMask) - 4;
2303 if ((desc_status&(DescMore|DescPktOK|DescRxLong)) != DescPktOK){
2304 if (desc_status & DescMore) {
2305 unsigned long flags;
2306
2307 if (netif_msg_rx_err(np))
2308 printk(KERN_WARNING
2309 "%s: Oversized(?) Ethernet "
2310 "frame spanned multiple "
2311 "buffers, entry %#08x "
2312 "status %#08x.\n", dev->name,
2313 np->cur_rx, desc_status);
2314 dev->stats.rx_length_errors++;
2315
2316
2317
2318
2319
2320
2321 spin_lock_irqsave(&np->lock, flags);
2322 reset_rx(dev);
2323 reinit_rx(dev);
2324 writel(np->ring_dma, ioaddr + RxRingPtr);
2325 check_link(dev);
2326 spin_unlock_irqrestore(&np->lock, flags);
2327
2328
2329
2330 break;
2331
2332 } else {
2333
2334 dev->stats.rx_errors++;
2335 if (desc_status & (DescRxAbort|DescRxOver))
2336 dev->stats.rx_over_errors++;
2337 if (desc_status & (DescRxLong|DescRxRunt))
2338 dev->stats.rx_length_errors++;
2339 if (desc_status & (DescRxInvalid|DescRxAlign))
2340 dev->stats.rx_frame_errors++;
2341 if (desc_status & DescRxCRC)
2342 dev->stats.rx_crc_errors++;
2343 }
2344 } else if (pkt_len > np->rx_buf_sz) {
2345
2346
2347
2348
2349 } else {
2350 struct sk_buff *skb;
2351
2352
2353
2354 if (pkt_len < rx_copybreak &&
2355 (skb = netdev_alloc_skb(dev, pkt_len + RX_OFFSET)) != NULL) {
2356
2357 skb_reserve(skb, RX_OFFSET);
2358 pci_dma_sync_single_for_cpu(np->pci_dev,
2359 np->rx_dma[entry],
2360 buflen,
2361 PCI_DMA_FROMDEVICE);
2362 skb_copy_to_linear_data(skb,
2363 np->rx_skbuff[entry]->data, pkt_len);
2364 skb_put(skb, pkt_len);
2365 pci_dma_sync_single_for_device(np->pci_dev,
2366 np->rx_dma[entry],
2367 buflen,
2368 PCI_DMA_FROMDEVICE);
2369 } else {
2370 pci_unmap_single(np->pci_dev, np->rx_dma[entry],
2371 buflen + NATSEMI_PADDING,
2372 PCI_DMA_FROMDEVICE);
2373 skb_put(skb = np->rx_skbuff[entry], pkt_len);
2374 np->rx_skbuff[entry] = NULL;
2375 }
2376 skb->protocol = eth_type_trans(skb, dev);
2377 netif_receive_skb(skb);
2378 dev->stats.rx_packets++;
2379 dev->stats.rx_bytes += pkt_len;
2380 }
2381 entry = (++np->cur_rx) % RX_RING_SIZE;
2382 np->rx_head_desc = &np->rx_ring[entry];
2383 desc_status = le32_to_cpu(np->rx_head_desc->cmd_status);
2384 }
2385 refill_rx(dev);
2386
2387
2388 if (np->oom)
2389 mod_timer(&np->timer, jiffies + 1);
2390 else
2391 writel(RxOn, ioaddr + ChipCmd);
2392}
2393
2394static void netdev_error(struct net_device *dev, int intr_status)
2395{
2396 struct netdev_private *np = netdev_priv(dev);
2397 void __iomem * ioaddr = ns_ioaddr(dev);
2398
2399 spin_lock(&np->lock);
2400 if (intr_status & LinkChange) {
2401 u16 lpa = mdio_read(dev, MII_LPA);
2402 if (mdio_read(dev, MII_BMCR) & BMCR_ANENABLE &&
2403 netif_msg_link(np)) {
2404 printk(KERN_INFO
2405 "%s: Autonegotiation advertising"
2406 " %#04x partner %#04x.\n", dev->name,
2407 np->advertising, lpa);
2408 }
2409
2410
2411 readw(ioaddr + MIntrStatus);
2412 check_link(dev);
2413 }
2414 if (intr_status & StatsMax) {
2415 __get_stats(dev);
2416 }
2417 if (intr_status & IntrTxUnderrun) {
2418 if ((np->tx_config & TxDrthMask) < TX_DRTH_VAL_LIMIT) {
2419 np->tx_config += TX_DRTH_VAL_INC;
2420 if (netif_msg_tx_err(np))
2421 printk(KERN_NOTICE
2422 "%s: increased tx threshold, txcfg %#08x.\n",
2423 dev->name, np->tx_config);
2424 } else {
2425 if (netif_msg_tx_err(np))
2426 printk(KERN_NOTICE
2427 "%s: tx underrun with maximum tx threshold, txcfg %#08x.\n",
2428 dev->name, np->tx_config);
2429 }
2430 writel(np->tx_config, ioaddr + TxConfig);
2431 }
2432 if (intr_status & WOLPkt && netif_msg_wol(np)) {
2433 int wol_status = readl(ioaddr + WOLCmd);
2434 printk(KERN_NOTICE "%s: Link wake-up event %#08x\n",
2435 dev->name, wol_status);
2436 }
2437 if (intr_status & RxStatusFIFOOver) {
2438 if (netif_msg_rx_err(np) && netif_msg_intr(np)) {
2439 printk(KERN_NOTICE "%s: Rx status FIFO overrun\n",
2440 dev->name);
2441 }
2442 dev->stats.rx_fifo_errors++;
2443 dev->stats.rx_errors++;
2444 }
2445
2446 if (intr_status & IntrPCIErr) {
2447 printk(KERN_NOTICE "%s: PCI error %#08x\n", dev->name,
2448 intr_status & IntrPCIErr);
2449 dev->stats.tx_fifo_errors++;
2450 dev->stats.tx_errors++;
2451 dev->stats.rx_fifo_errors++;
2452 dev->stats.rx_errors++;
2453 }
2454 spin_unlock(&np->lock);
2455}
2456
2457static void __get_stats(struct net_device *dev)
2458{
2459 void __iomem * ioaddr = ns_ioaddr(dev);
2460
2461
2462 dev->stats.rx_crc_errors += readl(ioaddr + RxCRCErrs);
2463 dev->stats.rx_missed_errors += readl(ioaddr + RxMissed);
2464}
2465
2466static struct net_device_stats *get_stats(struct net_device *dev)
2467{
2468 struct netdev_private *np = netdev_priv(dev);
2469
2470
2471 spin_lock_irq(&np->lock);
2472 if (netif_running(dev) && !np->hands_off)
2473 __get_stats(dev);
2474 spin_unlock_irq(&np->lock);
2475
2476 return &dev->stats;
2477}
2478
2479#ifdef CONFIG_NET_POLL_CONTROLLER
2480static void natsemi_poll_controller(struct net_device *dev)
2481{
2482 struct netdev_private *np = netdev_priv(dev);
2483 const int irq = np->pci_dev->irq;
2484
2485 disable_irq(irq);
2486 intr_handler(irq, dev);
2487 enable_irq(irq);
2488}
2489#endif
2490
2491#define HASH_TABLE 0x200
2492static void __set_rx_mode(struct net_device *dev)
2493{
2494 void __iomem * ioaddr = ns_ioaddr(dev);
2495 struct netdev_private *np = netdev_priv(dev);
2496 u8 mc_filter[64];
2497 u32 rx_mode;
2498
2499 if (dev->flags & IFF_PROMISC) {
2500 rx_mode = RxFilterEnable | AcceptBroadcast
2501 | AcceptAllMulticast | AcceptAllPhys | AcceptMyPhys;
2502 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
2503 (dev->flags & IFF_ALLMULTI)) {
2504 rx_mode = RxFilterEnable | AcceptBroadcast
2505 | AcceptAllMulticast | AcceptMyPhys;
2506 } else {
2507 struct netdev_hw_addr *ha;
2508 int i;
2509
2510 memset(mc_filter, 0, sizeof(mc_filter));
2511 netdev_for_each_mc_addr(ha, dev) {
2512 int b = (ether_crc(ETH_ALEN, ha->addr) >> 23) & 0x1ff;
2513 mc_filter[b/8] |= (1 << (b & 0x07));
2514 }
2515 rx_mode = RxFilterEnable | AcceptBroadcast
2516 | AcceptMulticast | AcceptMyPhys;
2517 for (i = 0; i < 64; i += 2) {
2518 writel(HASH_TABLE + i, ioaddr + RxFilterAddr);
2519 writel((mc_filter[i + 1] << 8) + mc_filter[i],
2520 ioaddr + RxFilterData);
2521 }
2522 }
2523 writel(rx_mode, ioaddr + RxFilterAddr);
2524 np->cur_rx_mode = rx_mode;
2525}
2526
2527static int natsemi_change_mtu(struct net_device *dev, int new_mtu)
2528{
2529 if (new_mtu < 64 || new_mtu > NATSEMI_RX_LIMIT-NATSEMI_HEADERS)
2530 return -EINVAL;
2531
2532 dev->mtu = new_mtu;
2533
2534
2535 if (netif_running(dev)) {
2536 struct netdev_private *np = netdev_priv(dev);
2537 void __iomem * ioaddr = ns_ioaddr(dev);
2538 const int irq = np->pci_dev->irq;
2539
2540 disable_irq(irq);
2541 spin_lock(&np->lock);
2542
2543 natsemi_stop_rxtx(dev);
2544
2545 drain_rx(dev);
2546
2547 set_bufsize(dev);
2548 reinit_rx(dev);
2549 writel(np->ring_dma, ioaddr + RxRingPtr);
2550
2551 writel(RxOn | TxOn, ioaddr + ChipCmd);
2552 spin_unlock(&np->lock);
2553 enable_irq(irq);
2554 }
2555 return 0;
2556}
2557
2558static void set_rx_mode(struct net_device *dev)
2559{
2560 struct netdev_private *np = netdev_priv(dev);
2561 spin_lock_irq(&np->lock);
2562 if (!np->hands_off)
2563 __set_rx_mode(dev);
2564 spin_unlock_irq(&np->lock);
2565}
2566
2567static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2568{
2569 struct netdev_private *np = netdev_priv(dev);
2570 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
2571 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
2572 strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
2573}
2574
2575static int get_regs_len(struct net_device *dev)
2576{
2577 return NATSEMI_REGS_SIZE;
2578}
2579
2580static int get_eeprom_len(struct net_device *dev)
2581{
2582 struct netdev_private *np = netdev_priv(dev);
2583 return np->eeprom_size;
2584}
2585
2586static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
2587{
2588 struct netdev_private *np = netdev_priv(dev);
2589 spin_lock_irq(&np->lock);
2590 netdev_get_ecmd(dev, ecmd);
2591 spin_unlock_irq(&np->lock);
2592 return 0;
2593}
2594
2595static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
2596{
2597 struct netdev_private *np = netdev_priv(dev);
2598 int res;
2599 spin_lock_irq(&np->lock);
2600 res = netdev_set_ecmd(dev, ecmd);
2601 spin_unlock_irq(&np->lock);
2602 return res;
2603}
2604
2605static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2606{
2607 struct netdev_private *np = netdev_priv(dev);
2608 spin_lock_irq(&np->lock);
2609 netdev_get_wol(dev, &wol->supported, &wol->wolopts);
2610 netdev_get_sopass(dev, wol->sopass);
2611 spin_unlock_irq(&np->lock);
2612}
2613
2614static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2615{
2616 struct netdev_private *np = netdev_priv(dev);
2617 int res;
2618 spin_lock_irq(&np->lock);
2619 netdev_set_wol(dev, wol->wolopts);
2620 res = netdev_set_sopass(dev, wol->sopass);
2621 spin_unlock_irq(&np->lock);
2622 return res;
2623}
2624
2625static void get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
2626{
2627 struct netdev_private *np = netdev_priv(dev);
2628 regs->version = NATSEMI_REGS_VER;
2629 spin_lock_irq(&np->lock);
2630 netdev_get_regs(dev, buf);
2631 spin_unlock_irq(&np->lock);
2632}
2633
2634static u32 get_msglevel(struct net_device *dev)
2635{
2636 struct netdev_private *np = netdev_priv(dev);
2637 return np->msg_enable;
2638}
2639
2640static void set_msglevel(struct net_device *dev, u32 val)
2641{
2642 struct netdev_private *np = netdev_priv(dev);
2643 np->msg_enable = val;
2644}
2645
2646static int nway_reset(struct net_device *dev)
2647{
2648 int tmp;
2649 int r = -EINVAL;
2650
2651 tmp = mdio_read(dev, MII_BMCR);
2652 if (tmp & BMCR_ANENABLE) {
2653 tmp |= (BMCR_ANRESTART);
2654 mdio_write(dev, MII_BMCR, tmp);
2655 r = 0;
2656 }
2657 return r;
2658}
2659
2660static u32 get_link(struct net_device *dev)
2661{
2662
2663 mdio_read(dev, MII_BMSR);
2664 return (mdio_read(dev, MII_BMSR)&BMSR_LSTATUS) ? 1:0;
2665}
2666
2667static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
2668{
2669 struct netdev_private *np = netdev_priv(dev);
2670 u8 *eebuf;
2671 int res;
2672
2673 eebuf = kmalloc(np->eeprom_size, GFP_KERNEL);
2674 if (!eebuf)
2675 return -ENOMEM;
2676
2677 eeprom->magic = PCI_VENDOR_ID_NS | (PCI_DEVICE_ID_NS_83815<<16);
2678 spin_lock_irq(&np->lock);
2679 res = netdev_get_eeprom(dev, eebuf);
2680 spin_unlock_irq(&np->lock);
2681 if (!res)
2682 memcpy(data, eebuf+eeprom->offset, eeprom->len);
2683 kfree(eebuf);
2684 return res;
2685}
2686
2687static const struct ethtool_ops ethtool_ops = {
2688 .get_drvinfo = get_drvinfo,
2689 .get_regs_len = get_regs_len,
2690 .get_eeprom_len = get_eeprom_len,
2691 .get_settings = get_settings,
2692 .set_settings = set_settings,
2693 .get_wol = get_wol,
2694 .set_wol = set_wol,
2695 .get_regs = get_regs,
2696 .get_msglevel = get_msglevel,
2697 .set_msglevel = set_msglevel,
2698 .nway_reset = nway_reset,
2699 .get_link = get_link,
2700 .get_eeprom = get_eeprom,
2701};
2702
2703static int netdev_set_wol(struct net_device *dev, u32 newval)
2704{
2705 struct netdev_private *np = netdev_priv(dev);
2706 void __iomem * ioaddr = ns_ioaddr(dev);
2707 u32 data = readl(ioaddr + WOLCmd) & ~WakeOptsSummary;
2708
2709
2710 if (newval & WAKE_PHY)
2711 data |= WakePhy;
2712 if (newval & WAKE_UCAST)
2713 data |= WakeUnicast;
2714 if (newval & WAKE_MCAST)
2715 data |= WakeMulticast;
2716 if (newval & WAKE_BCAST)
2717 data |= WakeBroadcast;
2718 if (newval & WAKE_ARP)
2719 data |= WakeArp;
2720 if (newval & WAKE_MAGIC)
2721 data |= WakeMagic;
2722 if (np->srr >= SRR_DP83815_D) {
2723 if (newval & WAKE_MAGICSECURE) {
2724 data |= WakeMagicSecure;
2725 }
2726 }
2727
2728 writel(data, ioaddr + WOLCmd);
2729
2730 return 0;
2731}
2732
2733static int netdev_get_wol(struct net_device *dev, u32 *supported, u32 *cur)
2734{
2735 struct netdev_private *np = netdev_priv(dev);
2736 void __iomem * ioaddr = ns_ioaddr(dev);
2737 u32 regval = readl(ioaddr + WOLCmd);
2738
2739 *supported = (WAKE_PHY | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST
2740 | WAKE_ARP | WAKE_MAGIC);
2741
2742 if (np->srr >= SRR_DP83815_D) {
2743
2744 *supported |= WAKE_MAGICSECURE;
2745 }
2746 *cur = 0;
2747
2748
2749 if (regval & WakePhy)
2750 *cur |= WAKE_PHY;
2751 if (regval & WakeUnicast)
2752 *cur |= WAKE_UCAST;
2753 if (regval & WakeMulticast)
2754 *cur |= WAKE_MCAST;
2755 if (regval & WakeBroadcast)
2756 *cur |= WAKE_BCAST;
2757 if (regval & WakeArp)
2758 *cur |= WAKE_ARP;
2759 if (regval & WakeMagic)
2760 *cur |= WAKE_MAGIC;
2761 if (regval & WakeMagicSecure) {
2762
2763 *cur |= WAKE_MAGICSECURE;
2764 }
2765
2766 return 0;
2767}
2768
2769static int netdev_set_sopass(struct net_device *dev, u8 *newval)
2770{
2771 struct netdev_private *np = netdev_priv(dev);
2772 void __iomem * ioaddr = ns_ioaddr(dev);
2773 u16 *sval = (u16 *)newval;
2774 u32 addr;
2775
2776 if (np->srr < SRR_DP83815_D) {
2777 return 0;
2778 }
2779
2780
2781 addr = readl(ioaddr + RxFilterAddr) & ~RFCRAddressMask;
2782 addr &= ~RxFilterEnable;
2783 writel(addr, ioaddr + RxFilterAddr);
2784
2785
2786 writel(addr | 0xa, ioaddr + RxFilterAddr);
2787 writew(sval[0], ioaddr + RxFilterData);
2788
2789 writel(addr | 0xc, ioaddr + RxFilterAddr);
2790 writew(sval[1], ioaddr + RxFilterData);
2791
2792 writel(addr | 0xe, ioaddr + RxFilterAddr);
2793 writew(sval[2], ioaddr + RxFilterData);
2794
2795
2796 writel(addr | RxFilterEnable, ioaddr + RxFilterAddr);
2797
2798 return 0;
2799}
2800
2801static int netdev_get_sopass(struct net_device *dev, u8 *data)
2802{
2803 struct netdev_private *np = netdev_priv(dev);
2804 void __iomem * ioaddr = ns_ioaddr(dev);
2805 u16 *sval = (u16 *)data;
2806 u32 addr;
2807
2808 if (np->srr < SRR_DP83815_D) {
2809 sval[0] = sval[1] = sval[2] = 0;
2810 return 0;
2811 }
2812
2813
2814 addr = readl(ioaddr + RxFilterAddr) & ~RFCRAddressMask;
2815
2816 writel(addr | 0xa, ioaddr + RxFilterAddr);
2817 sval[0] = readw(ioaddr + RxFilterData);
2818
2819 writel(addr | 0xc, ioaddr + RxFilterAddr);
2820 sval[1] = readw(ioaddr + RxFilterData);
2821
2822 writel(addr | 0xe, ioaddr + RxFilterAddr);
2823 sval[2] = readw(ioaddr + RxFilterData);
2824
2825 writel(addr, ioaddr + RxFilterAddr);
2826
2827 return 0;
2828}
2829
2830static int netdev_get_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
2831{
2832 struct netdev_private *np = netdev_priv(dev);
2833 u32 tmp;
2834
2835 ecmd->port = dev->if_port;
2836 ethtool_cmd_speed_set(ecmd, np->speed);
2837 ecmd->duplex = np->duplex;
2838 ecmd->autoneg = np->autoneg;
2839 ecmd->advertising = 0;
2840 if (np->advertising & ADVERTISE_10HALF)
2841 ecmd->advertising |= ADVERTISED_10baseT_Half;
2842 if (np->advertising & ADVERTISE_10FULL)
2843 ecmd->advertising |= ADVERTISED_10baseT_Full;
2844 if (np->advertising & ADVERTISE_100HALF)
2845 ecmd->advertising |= ADVERTISED_100baseT_Half;
2846 if (np->advertising & ADVERTISE_100FULL)
2847 ecmd->advertising |= ADVERTISED_100baseT_Full;
2848 ecmd->supported = (SUPPORTED_Autoneg |
2849 SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
2850 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
2851 SUPPORTED_TP | SUPPORTED_MII | SUPPORTED_FIBRE);
2852 ecmd->phy_address = np->phy_addr_external;
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872 switch (ecmd->port) {
2873 default:
2874 case PORT_TP:
2875 ecmd->advertising |= ADVERTISED_TP;
2876 ecmd->transceiver = XCVR_INTERNAL;
2877 break;
2878 case PORT_MII:
2879 ecmd->advertising |= ADVERTISED_MII;
2880 ecmd->transceiver = XCVR_EXTERNAL;
2881 break;
2882 case PORT_FIBRE:
2883 ecmd->advertising |= ADVERTISED_FIBRE;
2884 ecmd->transceiver = XCVR_EXTERNAL;
2885 break;
2886 }
2887
2888
2889 if (ecmd->autoneg == AUTONEG_ENABLE) {
2890 ecmd->advertising |= ADVERTISED_Autoneg;
2891 tmp = mii_nway_result(
2892 np->advertising & mdio_read(dev, MII_LPA));
2893 if (tmp == LPA_100FULL || tmp == LPA_100HALF)
2894 ethtool_cmd_speed_set(ecmd, SPEED_100);
2895 else
2896 ethtool_cmd_speed_set(ecmd, SPEED_10);
2897 if (tmp == LPA_100FULL || tmp == LPA_10FULL)
2898 ecmd->duplex = DUPLEX_FULL;
2899 else
2900 ecmd->duplex = DUPLEX_HALF;
2901 }
2902
2903
2904
2905 return 0;
2906}
2907
2908static int netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
2909{
2910 struct netdev_private *np = netdev_priv(dev);
2911
2912 if (ecmd->port != PORT_TP && ecmd->port != PORT_MII && ecmd->port != PORT_FIBRE)
2913 return -EINVAL;
2914 if (ecmd->transceiver != XCVR_INTERNAL && ecmd->transceiver != XCVR_EXTERNAL)
2915 return -EINVAL;
2916 if (ecmd->autoneg == AUTONEG_ENABLE) {
2917 if ((ecmd->advertising & (ADVERTISED_10baseT_Half |
2918 ADVERTISED_10baseT_Full |
2919 ADVERTISED_100baseT_Half |
2920 ADVERTISED_100baseT_Full)) == 0) {
2921 return -EINVAL;
2922 }
2923 } else if (ecmd->autoneg == AUTONEG_DISABLE) {
2924 u32 speed = ethtool_cmd_speed(ecmd);
2925 if (speed != SPEED_10 && speed != SPEED_100)
2926 return -EINVAL;
2927 if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
2928 return -EINVAL;
2929 } else {
2930 return -EINVAL;
2931 }
2932
2933
2934
2935
2936
2937
2938 if (np->ignore_phy && (ecmd->autoneg == AUTONEG_ENABLE ||
2939 ecmd->port == PORT_TP))
2940 return -EINVAL;
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954
2955
2956
2957
2958 dev->if_port = ecmd->port;
2959 np->autoneg = ecmd->autoneg;
2960 np->phy_addr_external = ecmd->phy_address & PhyAddrMask;
2961 if (np->autoneg == AUTONEG_ENABLE) {
2962
2963 np->advertising &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
2964 if (ecmd->advertising & ADVERTISED_10baseT_Half)
2965 np->advertising |= ADVERTISE_10HALF;
2966 if (ecmd->advertising & ADVERTISED_10baseT_Full)
2967 np->advertising |= ADVERTISE_10FULL;
2968 if (ecmd->advertising & ADVERTISED_100baseT_Half)
2969 np->advertising |= ADVERTISE_100HALF;
2970 if (ecmd->advertising & ADVERTISED_100baseT_Full)
2971 np->advertising |= ADVERTISE_100FULL;
2972 } else {
2973 np->speed = ethtool_cmd_speed(ecmd);
2974 np->duplex = ecmd->duplex;
2975
2976 if (np->duplex == DUPLEX_HALF)
2977 np->full_duplex = 0;
2978 }
2979
2980
2981 if (ecmd->port == PORT_TP)
2982 switch_port_internal(dev);
2983 else
2984 switch_port_external(dev);
2985
2986
2987 init_phy_fixup(dev);
2988 check_link(dev);
2989 return 0;
2990}
2991
2992static int netdev_get_regs(struct net_device *dev, u8 *buf)
2993{
2994 int i;
2995 int j;
2996 u32 rfcr;
2997 u32 *rbuf = (u32 *)buf;
2998 void __iomem * ioaddr = ns_ioaddr(dev);
2999
3000
3001 for (i = 0; i < NATSEMI_PG0_NREGS/2; i++) {
3002 rbuf[i] = readl(ioaddr + i*4);
3003 }
3004
3005
3006 for (i = NATSEMI_PG0_NREGS/2; i < NATSEMI_PG0_NREGS; i++)
3007 rbuf[i] = mdio_read(dev, i & 0x1f);
3008
3009
3010 writew(1, ioaddr + PGSEL);
3011 rbuf[i++] = readw(ioaddr + PMDCSR);
3012 rbuf[i++] = readw(ioaddr + TSTDAT);
3013 rbuf[i++] = readw(ioaddr + DSPCFG);
3014 rbuf[i++] = readw(ioaddr + SDCFG);
3015 writew(0, ioaddr + PGSEL);
3016
3017
3018 rfcr = readl(ioaddr + RxFilterAddr);
3019 for (j = 0; j < NATSEMI_RFDR_NREGS; j++) {
3020 writel(j*2, ioaddr + RxFilterAddr);
3021 rbuf[i++] = readw(ioaddr + RxFilterData);
3022 }
3023 writel(rfcr, ioaddr + RxFilterAddr);
3024
3025
3026 if (rbuf[4] & rbuf[5]) {
3027 printk(KERN_WARNING
3028 "%s: shoot, we dropped an interrupt (%#08x)\n",
3029 dev->name, rbuf[4] & rbuf[5]);
3030 }
3031
3032 return 0;
3033}
3034
3035#define SWAP_BITS(x) ( (((x) & 0x0001) << 15) | (((x) & 0x0002) << 13) \
3036 | (((x) & 0x0004) << 11) | (((x) & 0x0008) << 9) \
3037 | (((x) & 0x0010) << 7) | (((x) & 0x0020) << 5) \
3038 | (((x) & 0x0040) << 3) | (((x) & 0x0080) << 1) \
3039 | (((x) & 0x0100) >> 1) | (((x) & 0x0200) >> 3) \
3040 | (((x) & 0x0400) >> 5) | (((x) & 0x0800) >> 7) \
3041 | (((x) & 0x1000) >> 9) | (((x) & 0x2000) >> 11) \
3042 | (((x) & 0x4000) >> 13) | (((x) & 0x8000) >> 15) )
3043
3044static int netdev_get_eeprom(struct net_device *dev, u8 *buf)
3045{
3046 int i;
3047 u16 *ebuf = (u16 *)buf;
3048 void __iomem * ioaddr = ns_ioaddr(dev);
3049 struct netdev_private *np = netdev_priv(dev);
3050
3051
3052 for (i = 0; i < np->eeprom_size/2; i++) {
3053 ebuf[i] = eeprom_read(ioaddr, i);
3054
3055
3056
3057 ebuf[i] = SWAP_BITS(ebuf[i]);
3058 }
3059 return 0;
3060}
3061
3062static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3063{
3064 struct mii_ioctl_data *data = if_mii(rq);
3065 struct netdev_private *np = netdev_priv(dev);
3066
3067 switch(cmd) {
3068 case SIOCGMIIPHY:
3069 data->phy_id = np->phy_addr_external;
3070
3071
3072 case SIOCGMIIREG:
3073
3074
3075
3076
3077 if (dev->if_port == PORT_TP) {
3078 if ((data->phy_id & 0x1f) == np->phy_addr_external)
3079 data->val_out = mdio_read(dev,
3080 data->reg_num & 0x1f);
3081 else
3082 data->val_out = 0;
3083 } else {
3084 move_int_phy(dev, data->phy_id & 0x1f);
3085 data->val_out = miiport_read(dev, data->phy_id & 0x1f,
3086 data->reg_num & 0x1f);
3087 }
3088 return 0;
3089
3090 case SIOCSMIIREG:
3091 if (dev->if_port == PORT_TP) {
3092 if ((data->phy_id & 0x1f) == np->phy_addr_external) {
3093 if ((data->reg_num & 0x1f) == MII_ADVERTISE)
3094 np->advertising = data->val_in;
3095 mdio_write(dev, data->reg_num & 0x1f,
3096 data->val_in);
3097 }
3098 } else {
3099 if ((data->phy_id & 0x1f) == np->phy_addr_external) {
3100 if ((data->reg_num & 0x1f) == MII_ADVERTISE)
3101 np->advertising = data->val_in;
3102 }
3103 move_int_phy(dev, data->phy_id & 0x1f);
3104 miiport_write(dev, data->phy_id & 0x1f,
3105 data->reg_num & 0x1f,
3106 data->val_in);
3107 }
3108 return 0;
3109 default:
3110 return -EOPNOTSUPP;
3111 }
3112}
3113
3114static void enable_wol_mode(struct net_device *dev, int enable_intr)
3115{
3116 void __iomem * ioaddr = ns_ioaddr(dev);
3117 struct netdev_private *np = netdev_priv(dev);
3118
3119 if (netif_msg_wol(np))
3120 printk(KERN_INFO "%s: remaining active for wake-on-lan\n",
3121 dev->name);
3122
3123
3124
3125
3126
3127 writel(0, ioaddr + RxRingPtr);
3128
3129
3130 readl(ioaddr + WOLCmd);
3131
3132
3133 writel(np->SavedClkRun | PMEEnable | PMEStatus, ioaddr + ClkRun);
3134
3135
3136 writel(RxOn, ioaddr + ChipCmd);
3137
3138 if (enable_intr) {
3139
3140
3141
3142 writel(WOLPkt | LinkChange, ioaddr + IntrMask);
3143 natsemi_irq_enable(dev);
3144 }
3145}
3146
3147static int netdev_close(struct net_device *dev)
3148{
3149 void __iomem * ioaddr = ns_ioaddr(dev);
3150 struct netdev_private *np = netdev_priv(dev);
3151 const int irq = np->pci_dev->irq;
3152
3153 if (netif_msg_ifdown(np))
3154 printk(KERN_DEBUG
3155 "%s: Shutting down ethercard, status was %#04x.\n",
3156 dev->name, (int)readl(ioaddr + ChipCmd));
3157 if (netif_msg_pktdata(np))
3158 printk(KERN_DEBUG
3159 "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
3160 dev->name, np->cur_tx, np->dirty_tx,
3161 np->cur_rx, np->dirty_rx);
3162
3163 napi_disable(&np->napi);
3164
3165
3166
3167
3168
3169
3170
3171
3172 del_timer_sync(&np->timer);
3173 disable_irq(irq);
3174 spin_lock_irq(&np->lock);
3175 natsemi_irq_disable(dev);
3176 np->hands_off = 1;
3177 spin_unlock_irq(&np->lock);
3178 enable_irq(irq);
3179
3180 free_irq(irq, dev);
3181
3182
3183
3184
3185
3186 spin_lock_irq(&np->lock);
3187 np->hands_off = 0;
3188 readl(ioaddr + IntrMask);
3189 readw(ioaddr + MIntrStatus);
3190
3191
3192 writel(StatsFreeze, ioaddr + StatsCtrl);
3193
3194
3195 natsemi_stop_rxtx(dev);
3196
3197 __get_stats(dev);
3198 spin_unlock_irq(&np->lock);
3199
3200
3201 netif_carrier_off(dev);
3202 netif_stop_queue(dev);
3203
3204 dump_ring(dev);
3205 drain_ring(dev);
3206 free_ring(dev);
3207
3208 {
3209 u32 wol = readl(ioaddr + WOLCmd) & WakeOptsSummary;
3210 if (wol) {
3211
3212
3213
3214 enable_wol_mode(dev, 0);
3215 } else {
3216
3217 writel(np->SavedClkRun, ioaddr + ClkRun);
3218 }
3219 }
3220 return 0;
3221}
3222
3223
3224static void natsemi_remove1(struct pci_dev *pdev)
3225{
3226 struct net_device *dev = pci_get_drvdata(pdev);
3227 void __iomem * ioaddr = ns_ioaddr(dev);
3228
3229 NATSEMI_REMOVE_FILE(pdev, dspcfg_workaround);
3230 unregister_netdev (dev);
3231 pci_release_regions (pdev);
3232 iounmap(ioaddr);
3233 free_netdev (dev);
3234}
3235
3236#ifdef CONFIG_PM
3237
3238
3239
3240
3241
3242
3243
3244
3245
3246
3247
3248
3249
3250
3251
3252
3253
3254
3255
3256
3257
3258
3259
3260
3261
3262
3263
3264static int natsemi_suspend (struct pci_dev *pdev, pm_message_t state)
3265{
3266 struct net_device *dev = pci_get_drvdata (pdev);
3267 struct netdev_private *np = netdev_priv(dev);
3268 void __iomem * ioaddr = ns_ioaddr(dev);
3269
3270 rtnl_lock();
3271 if (netif_running (dev)) {
3272 const int irq = np->pci_dev->irq;
3273
3274 del_timer_sync(&np->timer);
3275
3276 disable_irq(irq);
3277 spin_lock_irq(&np->lock);
3278
3279 natsemi_irq_disable(dev);
3280 np->hands_off = 1;
3281 natsemi_stop_rxtx(dev);
3282 netif_stop_queue(dev);
3283
3284 spin_unlock_irq(&np->lock);
3285 enable_irq(irq);
3286
3287 napi_disable(&np->napi);
3288
3289
3290 __get_stats(dev);
3291
3292
3293 drain_ring(dev);
3294 {
3295 u32 wol = readl(ioaddr + WOLCmd) & WakeOptsSummary;
3296
3297 if (wol) {
3298
3299
3300
3301
3302 enable_wol_mode(dev, 0);
3303 } else {
3304
3305 writel(np->SavedClkRun, ioaddr + ClkRun);
3306 }
3307 }
3308 }
3309 netif_device_detach(dev);
3310 rtnl_unlock();
3311 return 0;
3312}
3313
3314
3315static int natsemi_resume (struct pci_dev *pdev)
3316{
3317 struct net_device *dev = pci_get_drvdata (pdev);
3318 struct netdev_private *np = netdev_priv(dev);
3319 int ret = 0;
3320
3321 rtnl_lock();
3322 if (netif_device_present(dev))
3323 goto out;
3324 if (netif_running(dev)) {
3325 const int irq = np->pci_dev->irq;
3326
3327 BUG_ON(!np->hands_off);
3328 ret = pci_enable_device(pdev);
3329 if (ret < 0) {
3330 dev_err(&pdev->dev,
3331 "pci_enable_device() failed: %d\n", ret);
3332 goto out;
3333 }
3334
3335
3336 napi_enable(&np->napi);
3337
3338 natsemi_reset(dev);
3339 init_ring(dev);
3340 disable_irq(irq);
3341 spin_lock_irq(&np->lock);
3342 np->hands_off = 0;
3343 init_registers(dev);
3344 netif_device_attach(dev);
3345 spin_unlock_irq(&np->lock);
3346 enable_irq(irq);
3347
3348 mod_timer(&np->timer, round_jiffies(jiffies + 1*HZ));
3349 }
3350 netif_device_attach(dev);
3351out:
3352 rtnl_unlock();
3353 return ret;
3354}
3355
3356#endif
3357
3358static struct pci_driver natsemi_driver = {
3359 .name = DRV_NAME,
3360 .id_table = natsemi_pci_tbl,
3361 .probe = natsemi_probe1,
3362 .remove = natsemi_remove1,
3363#ifdef CONFIG_PM
3364 .suspend = natsemi_suspend,
3365 .resume = natsemi_resume,
3366#endif
3367};
3368
3369static int __init natsemi_init_mod (void)
3370{
3371
3372#ifdef MODULE
3373 printk(version);
3374#endif
3375
3376 return pci_register_driver(&natsemi_driver);
3377}
3378
3379static void __exit natsemi_exit_mod (void)
3380{
3381 pci_unregister_driver (&natsemi_driver);
3382}
3383
3384module_init(natsemi_init_mod);
3385module_exit(natsemi_exit_mod);
3386
3387