1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30#include <linux/module.h>
31#include <linux/kernel.h>
32#include <linux/string.h>
33#include <linux/timer.h>
34#include <linux/errno.h>
35#include <linux/ioport.h>
36#include <linux/slab.h>
37#include <linux/interrupt.h>
38#include <linux/pci.h>
39#include <linux/netdevice.h>
40#include <linux/etherdevice.h>
41#include <linux/skbuff.h>
42#include <linux/init.h>
43#include <linux/spinlock.h>
44#include <linux/ethtool.h>
45#include <linux/delay.h>
46#include <linux/rtnetlink.h>
47#include <linux/mii.h>
48#include <linux/crc32.h>
49#include <linux/bitops.h>
50#include <linux/prefetch.h>
51#include <asm/processor.h>
52#include <asm/io.h>
53#include <asm/irq.h>
54#include <linux/uaccess.h>
55
56#define DRV_NAME "natsemi"
57#define DRV_VERSION "2.1"
58#define DRV_RELDATE "Sept 11, 2006"
59
60#define RX_OFFSET 2
61
62
63
64
65
66
67#define NATSEMI_DEF_MSG (NETIF_MSG_DRV | \
68 NETIF_MSG_LINK | \
69 NETIF_MSG_WOL | \
70 NETIF_MSG_RX_ERR | \
71 NETIF_MSG_TX_ERR)
72static int debug = -1;
73
74static int mtu;
75
76
77
78static const int multicast_filter_limit = 100;
79
80
81
82static int rx_copybreak;
83
84static int dspcfg_workaround = 1;
85
86
87
88
89
90
91#define MAX_UNITS 8
92static int options[MAX_UNITS];
93static int full_duplex[MAX_UNITS];
94
95
96
97
98
99
100
101
102#define TX_RING_SIZE 16
103#define TX_QUEUE_LEN 10
104#define RX_RING_SIZE 32
105
106
107
108#define TX_TIMEOUT (2*HZ)
109
110#define NATSEMI_HW_TIMEOUT 400
111#define NATSEMI_TIMER_FREQ 5*HZ
112#define NATSEMI_PG0_NREGS 64
113#define NATSEMI_RFDR_NREGS 8
114#define NATSEMI_PG1_NREGS 4
115#define NATSEMI_NREGS (NATSEMI_PG0_NREGS + NATSEMI_RFDR_NREGS + \
116 NATSEMI_PG1_NREGS)
117#define NATSEMI_REGS_VER 1
118#define NATSEMI_REGS_SIZE (NATSEMI_NREGS * sizeof(u32))
119
120
121
122
123
124#define NATSEMI_HEADERS 22
125#define NATSEMI_PADDING 16
126#define NATSEMI_LONGPKT 1518
127#define NATSEMI_RX_LIMIT 2046
128
129
130static const char version[] =
131 KERN_INFO DRV_NAME " dp8381x driver, version "
132 DRV_VERSION ", " DRV_RELDATE "\n"
133 " originally by Donald Becker <becker@scyld.com>\n"
134 " 2.4.x kernel port by Jeff Garzik, Tjeerd Mulder\n";
135
136MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
137MODULE_DESCRIPTION("National Semiconductor DP8381x series PCI Ethernet driver");
138MODULE_LICENSE("GPL");
139
140module_param(mtu, int, 0);
141module_param(debug, int, 0);
142module_param(rx_copybreak, int, 0);
143module_param(dspcfg_workaround, int, 0);
144module_param_array(options, int, NULL, 0);
145module_param_array(full_duplex, int, NULL, 0);
146MODULE_PARM_DESC(mtu, "DP8381x MTU (all boards)");
147MODULE_PARM_DESC(debug, "DP8381x default debug level");
148MODULE_PARM_DESC(rx_copybreak,
149 "DP8381x copy breakpoint for copy-only-tiny-frames");
150MODULE_PARM_DESC(dspcfg_workaround, "DP8381x: control DspCfg workaround");
151MODULE_PARM_DESC(options,
152 "DP8381x: Bits 0-3: media type, bit 17: full duplex");
153MODULE_PARM_DESC(full_duplex, "DP8381x full duplex setting(s) (1)");
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228#define PHYID_AM79C874 0x0022561b
229
230enum {
231 MII_MCTRL = 0x15,
232 MII_FX_SEL = 0x0001,
233 MII_EN_SCRM = 0x0004,
234};
235
236enum {
237 NATSEMI_FLAG_IGNORE_PHY = 0x1,
238};
239
240
241static struct {
242 const char *name;
243 unsigned long flags;
244 unsigned int eeprom_size;
245} natsemi_pci_info[] = {
246 { "Aculab E1/T1 PMXc cPCI carrier card", NATSEMI_FLAG_IGNORE_PHY, 128 },
247 { "NatSemi DP8381[56]", 0, 24 },
248};
249
250static const struct pci_device_id natsemi_pci_tbl[] = {
251 { PCI_VENDOR_ID_NS, 0x0020, 0x12d9, 0x000c, 0, 0, 0 },
252 { PCI_VENDOR_ID_NS, 0x0020, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
253 { }
254};
255MODULE_DEVICE_TABLE(pci, natsemi_pci_tbl);
256
257
258
259
260
261
262enum register_offsets {
263 ChipCmd = 0x00,
264 ChipConfig = 0x04,
265 EECtrl = 0x08,
266 PCIBusCfg = 0x0C,
267 IntrStatus = 0x10,
268 IntrMask = 0x14,
269 IntrEnable = 0x18,
270 IntrHoldoff = 0x1C,
271 TxRingPtr = 0x20,
272 TxConfig = 0x24,
273 RxRingPtr = 0x30,
274 RxConfig = 0x34,
275 ClkRun = 0x3C,
276 WOLCmd = 0x40,
277 PauseCmd = 0x44,
278 RxFilterAddr = 0x48,
279 RxFilterData = 0x4C,
280 BootRomAddr = 0x50,
281 BootRomData = 0x54,
282 SiliconRev = 0x58,
283 StatsCtrl = 0x5C,
284 StatsData = 0x60,
285 RxPktErrs = 0x60,
286 RxMissed = 0x68,
287 RxCRCErrs = 0x64,
288 BasicControl = 0x80,
289 BasicStatus = 0x84,
290 AnegAdv = 0x90,
291 AnegPeer = 0x94,
292 PhyStatus = 0xC0,
293 MIntrCtrl = 0xC4,
294 MIntrStatus = 0xC8,
295 PhyCtrl = 0xE4,
296
297
298
299 PGSEL = 0xCC,
300 PMDCSR = 0xE4,
301 TSTDAT = 0xFC,
302 DSPCFG = 0xF4,
303 SDCFG = 0xF8
304};
305
306#define PMDCSR_VAL 0x189c
307#define TSTDAT_VAL 0x0
308#define DSPCFG_VAL 0x5040
309#define SDCFG_VAL 0x008c
310#define DSPCFG_LOCK 0x20
311#define DSPCFG_COEF 0x1000
312#define TSTDAT_FIXED 0xe8
313
314
315enum pci_register_offsets {
316 PCIPM = 0x44,
317};
318
319enum ChipCmd_bits {
320 ChipReset = 0x100,
321 RxReset = 0x20,
322 TxReset = 0x10,
323 RxOff = 0x08,
324 RxOn = 0x04,
325 TxOff = 0x02,
326 TxOn = 0x01,
327};
328
329enum ChipConfig_bits {
330 CfgPhyDis = 0x200,
331 CfgPhyRst = 0x400,
332 CfgExtPhy = 0x1000,
333 CfgAnegEnable = 0x2000,
334 CfgAneg100 = 0x4000,
335 CfgAnegFull = 0x8000,
336 CfgAnegDone = 0x8000000,
337 CfgFullDuplex = 0x20000000,
338 CfgSpeed100 = 0x40000000,
339 CfgLink = 0x80000000,
340};
341
342enum EECtrl_bits {
343 EE_ShiftClk = 0x04,
344 EE_DataIn = 0x01,
345 EE_ChipSelect = 0x08,
346 EE_DataOut = 0x02,
347 MII_Data = 0x10,
348 MII_Write = 0x20,
349 MII_ShiftClk = 0x40,
350};
351
352enum PCIBusCfg_bits {
353 EepromReload = 0x4,
354};
355
356
357enum IntrStatus_bits {
358 IntrRxDone = 0x0001,
359 IntrRxIntr = 0x0002,
360 IntrRxErr = 0x0004,
361 IntrRxEarly = 0x0008,
362 IntrRxIdle = 0x0010,
363 IntrRxOverrun = 0x0020,
364 IntrTxDone = 0x0040,
365 IntrTxIntr = 0x0080,
366 IntrTxErr = 0x0100,
367 IntrTxIdle = 0x0200,
368 IntrTxUnderrun = 0x0400,
369 StatsMax = 0x0800,
370 SWInt = 0x1000,
371 WOLPkt = 0x2000,
372 LinkChange = 0x4000,
373 IntrHighBits = 0x8000,
374 RxStatusFIFOOver = 0x10000,
375 IntrPCIErr = 0xf00000,
376 RxResetDone = 0x1000000,
377 TxResetDone = 0x2000000,
378 IntrAbnormalSummary = 0xCD20,
379};
380
381
382
383
384
385
386
387
388
389
390#define DEFAULT_INTR 0x00f1cd65
391
392enum TxConfig_bits {
393 TxDrthMask = 0x3f,
394 TxFlthMask = 0x3f00,
395 TxMxdmaMask = 0x700000,
396 TxMxdma_512 = 0x0,
397 TxMxdma_4 = 0x100000,
398 TxMxdma_8 = 0x200000,
399 TxMxdma_16 = 0x300000,
400 TxMxdma_32 = 0x400000,
401 TxMxdma_64 = 0x500000,
402 TxMxdma_128 = 0x600000,
403 TxMxdma_256 = 0x700000,
404 TxCollRetry = 0x800000,
405 TxAutoPad = 0x10000000,
406 TxMacLoop = 0x20000000,
407 TxHeartIgn = 0x40000000,
408 TxCarrierIgn = 0x80000000
409};
410
411
412
413
414
415
416
417
418
419
420
421
422#define TX_FLTH_VAL ((512/32) << 8)
423#define TX_DRTH_VAL_START (64/32)
424#define TX_DRTH_VAL_INC 2
425#define TX_DRTH_VAL_LIMIT (1472/32)
426
427enum RxConfig_bits {
428 RxDrthMask = 0x3e,
429 RxMxdmaMask = 0x700000,
430 RxMxdma_512 = 0x0,
431 RxMxdma_4 = 0x100000,
432 RxMxdma_8 = 0x200000,
433 RxMxdma_16 = 0x300000,
434 RxMxdma_32 = 0x400000,
435 RxMxdma_64 = 0x500000,
436 RxMxdma_128 = 0x600000,
437 RxMxdma_256 = 0x700000,
438 RxAcceptLong = 0x8000000,
439 RxAcceptTx = 0x10000000,
440 RxAcceptRunt = 0x40000000,
441 RxAcceptErr = 0x80000000
442};
443#define RX_DRTH_VAL (128/8)
444
445enum ClkRun_bits {
446 PMEEnable = 0x100,
447 PMEStatus = 0x8000,
448};
449
450enum WolCmd_bits {
451 WakePhy = 0x1,
452 WakeUnicast = 0x2,
453 WakeMulticast = 0x4,
454 WakeBroadcast = 0x8,
455 WakeArp = 0x10,
456 WakePMatch0 = 0x20,
457 WakePMatch1 = 0x40,
458 WakePMatch2 = 0x80,
459 WakePMatch3 = 0x100,
460 WakeMagic = 0x200,
461 WakeMagicSecure = 0x400,
462 SecureHack = 0x100000,
463 WokePhy = 0x400000,
464 WokeUnicast = 0x800000,
465 WokeMulticast = 0x1000000,
466 WokeBroadcast = 0x2000000,
467 WokeArp = 0x4000000,
468 WokePMatch0 = 0x8000000,
469 WokePMatch1 = 0x10000000,
470 WokePMatch2 = 0x20000000,
471 WokePMatch3 = 0x40000000,
472 WokeMagic = 0x80000000,
473 WakeOptsSummary = 0x7ff
474};
475
476enum RxFilterAddr_bits {
477 RFCRAddressMask = 0x3ff,
478 AcceptMulticast = 0x00200000,
479 AcceptMyPhys = 0x08000000,
480 AcceptAllPhys = 0x10000000,
481 AcceptAllMulticast = 0x20000000,
482 AcceptBroadcast = 0x40000000,
483 RxFilterEnable = 0x80000000
484};
485
486enum StatsCtrl_bits {
487 StatsWarn = 0x1,
488 StatsFreeze = 0x2,
489 StatsClear = 0x4,
490 StatsStrobe = 0x8,
491};
492
493enum MIntrCtrl_bits {
494 MICRIntEn = 0x2,
495};
496
497enum PhyCtrl_bits {
498 PhyAddrMask = 0x1f,
499};
500
501#define PHY_ADDR_NONE 32
502#define PHY_ADDR_INTERNAL 1
503
504
505#define SRR_DP83815_C 0x0302
506#define SRR_DP83815_D 0x0403
507#define SRR_DP83816_A4 0x0504
508#define SRR_DP83816_A5 0x0505
509
510
511
512
513struct netdev_desc {
514 __le32 next_desc;
515 __le32 cmd_status;
516 __le32 addr;
517 __le32 software_use;
518};
519
520
521enum desc_status_bits {
522 DescOwn=0x80000000, DescMore=0x40000000, DescIntr=0x20000000,
523 DescNoCRC=0x10000000, DescPktOK=0x08000000,
524 DescSizeMask=0xfff,
525
526 DescTxAbort=0x04000000, DescTxFIFO=0x02000000,
527 DescTxCarrier=0x01000000, DescTxDefer=0x00800000,
528 DescTxExcDefer=0x00400000, DescTxOOWCol=0x00200000,
529 DescTxExcColl=0x00100000, DescTxCollCount=0x000f0000,
530
531 DescRxAbort=0x04000000, DescRxOver=0x02000000,
532 DescRxDest=0x01800000, DescRxLong=0x00400000,
533 DescRxRunt=0x00200000, DescRxInvalid=0x00100000,
534 DescRxCRC=0x00080000, DescRxAlign=0x00040000,
535 DescRxLoop=0x00020000, DesRxColl=0x00010000,
536};
537
538struct netdev_private {
539
540 dma_addr_t ring_dma;
541 struct netdev_desc *rx_ring;
542 struct netdev_desc *tx_ring;
543
544 struct sk_buff *rx_skbuff[RX_RING_SIZE];
545 dma_addr_t rx_dma[RX_RING_SIZE];
546
547 struct sk_buff *tx_skbuff[TX_RING_SIZE];
548 dma_addr_t tx_dma[TX_RING_SIZE];
549 struct net_device *dev;
550 void __iomem *ioaddr;
551 struct napi_struct napi;
552
553 struct timer_list timer;
554
555 struct pci_dev *pci_dev;
556 struct netdev_desc *rx_head_desc;
557
558 unsigned int cur_rx, dirty_rx;
559 unsigned int cur_tx, dirty_tx;
560
561 unsigned int rx_buf_sz;
562 int oom;
563
564 u32 intr_status;
565
566 int hands_off;
567
568 int ignore_phy;
569
570 int mii;
571 int phy_addr_external;
572 unsigned int full_duplex;
573
574 u32 cur_rx_mode;
575 u32 rx_filter[16];
576
577 u32 tx_config, rx_config;
578
579 u32 SavedClkRun;
580
581 u32 srr;
582
583 u16 dspcfg;
584 int dspcfg_workaround;
585
586 u16 speed;
587 u8 duplex;
588 u8 autoneg;
589
590 u16 advertising;
591 unsigned int iosize;
592 spinlock_t lock;
593 u32 msg_enable;
594
595 int eeprom_size;
596};
597
598static void move_int_phy(struct net_device *dev, int addr);
599static int eeprom_read(void __iomem *ioaddr, int location);
600static int mdio_read(struct net_device *dev, int reg);
601static void mdio_write(struct net_device *dev, int reg, u16 data);
602static void init_phy_fixup(struct net_device *dev);
603static int miiport_read(struct net_device *dev, int phy_id, int reg);
604static void miiport_write(struct net_device *dev, int phy_id, int reg, u16 data);
605static int find_mii(struct net_device *dev);
606static void natsemi_reset(struct net_device *dev);
607static void natsemi_reload_eeprom(struct net_device *dev);
608static void natsemi_stop_rxtx(struct net_device *dev);
609static int netdev_open(struct net_device *dev);
610static void do_cable_magic(struct net_device *dev);
611static void undo_cable_magic(struct net_device *dev);
612static void check_link(struct net_device *dev);
613static void netdev_timer(struct timer_list *t);
614static void dump_ring(struct net_device *dev);
615static void ns_tx_timeout(struct net_device *dev, unsigned int txqueue);
616static int alloc_ring(struct net_device *dev);
617static void refill_rx(struct net_device *dev);
618static void init_ring(struct net_device *dev);
619static void drain_tx(struct net_device *dev);
620static void drain_ring(struct net_device *dev);
621static void free_ring(struct net_device *dev);
622static void reinit_ring(struct net_device *dev);
623static void init_registers(struct net_device *dev);
624static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
625static irqreturn_t intr_handler(int irq, void *dev_instance);
626static void netdev_error(struct net_device *dev, int intr_status);
627static int natsemi_poll(struct napi_struct *napi, int budget);
628static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do);
629static void netdev_tx_done(struct net_device *dev);
630static int natsemi_change_mtu(struct net_device *dev, int new_mtu);
631#ifdef CONFIG_NET_POLL_CONTROLLER
632static void natsemi_poll_controller(struct net_device *dev);
633#endif
634static void __set_rx_mode(struct net_device *dev);
635static void set_rx_mode(struct net_device *dev);
636static void __get_stats(struct net_device *dev);
637static struct net_device_stats *get_stats(struct net_device *dev);
638static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
639static int netdev_set_wol(struct net_device *dev, u32 newval);
640static int netdev_get_wol(struct net_device *dev, u32 *supported, u32 *cur);
641static int netdev_set_sopass(struct net_device *dev, u8 *newval);
642static int netdev_get_sopass(struct net_device *dev, u8 *data);
643static int netdev_get_ecmd(struct net_device *dev,
644 struct ethtool_link_ksettings *ecmd);
645static int netdev_set_ecmd(struct net_device *dev,
646 const struct ethtool_link_ksettings *ecmd);
647static void enable_wol_mode(struct net_device *dev, int enable_intr);
648static int netdev_close(struct net_device *dev);
649static int netdev_get_regs(struct net_device *dev, u8 *buf);
650static int netdev_get_eeprom(struct net_device *dev, u8 *buf);
651static const struct ethtool_ops ethtool_ops;
652
653#define NATSEMI_ATTR(_name) \
654static ssize_t natsemi_show_##_name(struct device *dev, \
655 struct device_attribute *attr, char *buf); \
656 static ssize_t natsemi_set_##_name(struct device *dev, \
657 struct device_attribute *attr, \
658 const char *buf, size_t count); \
659 static DEVICE_ATTR(_name, 0644, natsemi_show_##_name, natsemi_set_##_name)
660
661#define NATSEMI_CREATE_FILE(_dev, _name) \
662 device_create_file(&_dev->dev, &dev_attr_##_name)
663#define NATSEMI_REMOVE_FILE(_dev, _name) \
664 device_remove_file(&_dev->dev, &dev_attr_##_name)
665
666NATSEMI_ATTR(dspcfg_workaround);
667
668static ssize_t natsemi_show_dspcfg_workaround(struct device *dev,
669 struct device_attribute *attr,
670 char *buf)
671{
672 struct netdev_private *np = netdev_priv(to_net_dev(dev));
673
674 return sprintf(buf, "%s\n", np->dspcfg_workaround ? "on" : "off");
675}
676
677static ssize_t natsemi_set_dspcfg_workaround(struct device *dev,
678 struct device_attribute *attr,
679 const char *buf, size_t count)
680{
681 struct netdev_private *np = netdev_priv(to_net_dev(dev));
682 int new_setting;
683 unsigned long flags;
684
685
686 if (!strncmp("on", buf, count - 1) || !strncmp("1", buf, count - 1))
687 new_setting = 1;
688 else if (!strncmp("off", buf, count - 1) ||
689 !strncmp("0", buf, count - 1))
690 new_setting = 0;
691 else
692 return count;
693
694 spin_lock_irqsave(&np->lock, flags);
695
696 np->dspcfg_workaround = new_setting;
697
698 spin_unlock_irqrestore(&np->lock, flags);
699
700 return count;
701}
702
703static inline void __iomem *ns_ioaddr(struct net_device *dev)
704{
705 struct netdev_private *np = netdev_priv(dev);
706
707 return np->ioaddr;
708}
709
710static inline void natsemi_irq_enable(struct net_device *dev)
711{
712 writel(1, ns_ioaddr(dev) + IntrEnable);
713 readl(ns_ioaddr(dev) + IntrEnable);
714}
715
716static inline void natsemi_irq_disable(struct net_device *dev)
717{
718 writel(0, ns_ioaddr(dev) + IntrEnable);
719 readl(ns_ioaddr(dev) + IntrEnable);
720}
721
722static void move_int_phy(struct net_device *dev, int addr)
723{
724 struct netdev_private *np = netdev_priv(dev);
725 void __iomem *ioaddr = ns_ioaddr(dev);
726 int target = 31;
727
728
729
730
731
732
733
734
735
736
737
738 if (target == addr)
739 target--;
740 if (target == np->phy_addr_external)
741 target--;
742 writew(target, ioaddr + PhyCtrl);
743 readw(ioaddr + PhyCtrl);
744 udelay(1);
745}
746
747static void natsemi_init_media(struct net_device *dev)
748{
749 struct netdev_private *np = netdev_priv(dev);
750 u32 tmp;
751
752 if (np->ignore_phy)
753 netif_carrier_on(dev);
754 else
755 netif_carrier_off(dev);
756
757
758 tmp = mdio_read(dev, MII_BMCR);
759 np->speed = (tmp & BMCR_SPEED100)? SPEED_100 : SPEED_10;
760 np->duplex = (tmp & BMCR_FULLDPLX)? DUPLEX_FULL : DUPLEX_HALF;
761 np->autoneg = (tmp & BMCR_ANENABLE)? AUTONEG_ENABLE: AUTONEG_DISABLE;
762 np->advertising= mdio_read(dev, MII_ADVERTISE);
763
764 if ((np->advertising & ADVERTISE_ALL) != ADVERTISE_ALL &&
765 netif_msg_probe(np)) {
766 printk(KERN_INFO "natsemi %s: Transceiver default autonegotiation %s "
767 "10%s %s duplex.\n",
768 pci_name(np->pci_dev),
769 (mdio_read(dev, MII_BMCR) & BMCR_ANENABLE)?
770 "enabled, advertise" : "disabled, force",
771 (np->advertising &
772 (ADVERTISE_100FULL|ADVERTISE_100HALF))?
773 "0" : "",
774 (np->advertising &
775 (ADVERTISE_100FULL|ADVERTISE_10FULL))?
776 "full" : "half");
777 }
778 if (netif_msg_probe(np))
779 printk(KERN_INFO
780 "natsemi %s: Transceiver status %#04x advertising %#04x.\n",
781 pci_name(np->pci_dev), mdio_read(dev, MII_BMSR),
782 np->advertising);
783
784}
785
786static const struct net_device_ops natsemi_netdev_ops = {
787 .ndo_open = netdev_open,
788 .ndo_stop = netdev_close,
789 .ndo_start_xmit = start_tx,
790 .ndo_get_stats = get_stats,
791 .ndo_set_rx_mode = set_rx_mode,
792 .ndo_change_mtu = natsemi_change_mtu,
793 .ndo_do_ioctl = netdev_ioctl,
794 .ndo_tx_timeout = ns_tx_timeout,
795 .ndo_set_mac_address = eth_mac_addr,
796 .ndo_validate_addr = eth_validate_addr,
797#ifdef CONFIG_NET_POLL_CONTROLLER
798 .ndo_poll_controller = natsemi_poll_controller,
799#endif
800};
801
802static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
803{
804 struct net_device *dev;
805 struct netdev_private *np;
806 int i, option, irq, chip_idx = ent->driver_data;
807 static int find_cnt = -1;
808 resource_size_t iostart;
809 unsigned long iosize;
810 void __iomem *ioaddr;
811 const int pcibar = 1;
812 int prev_eedata;
813 u32 tmp;
814
815
816#ifndef MODULE
817 static int printed_version;
818 if (!printed_version++)
819 printk(version);
820#endif
821
822 i = pci_enable_device(pdev);
823 if (i) return i;
824
825
826
827
828
829 pci_read_config_dword(pdev, PCIPM, &tmp);
830 if (tmp & PCI_PM_CTRL_STATE_MASK) {
831
832 u32 newtmp = tmp & ~PCI_PM_CTRL_STATE_MASK;
833 pci_write_config_dword(pdev, PCIPM, newtmp);
834 }
835
836 find_cnt++;
837 iostart = pci_resource_start(pdev, pcibar);
838 iosize = pci_resource_len(pdev, pcibar);
839 irq = pdev->irq;
840
841 pci_set_master(pdev);
842
843 dev = alloc_etherdev(sizeof (struct netdev_private));
844 if (!dev)
845 return -ENOMEM;
846 SET_NETDEV_DEV(dev, &pdev->dev);
847
848 i = pci_request_regions(pdev, DRV_NAME);
849 if (i)
850 goto err_pci_request_regions;
851
852 ioaddr = ioremap(iostart, iosize);
853 if (!ioaddr) {
854 i = -ENOMEM;
855 goto err_ioremap;
856 }
857
858
859 prev_eedata = eeprom_read(ioaddr, 6);
860 for (i = 0; i < 3; i++) {
861 int eedata = eeprom_read(ioaddr, i + 7);
862 dev->dev_addr[i*2] = (eedata << 1) + (prev_eedata >> 15);
863 dev->dev_addr[i*2+1] = eedata >> 7;
864 prev_eedata = eedata;
865 }
866
867 np = netdev_priv(dev);
868 np->ioaddr = ioaddr;
869
870 netif_napi_add(dev, &np->napi, natsemi_poll, 64);
871 np->dev = dev;
872
873 np->pci_dev = pdev;
874 pci_set_drvdata(pdev, dev);
875 np->iosize = iosize;
876 spin_lock_init(&np->lock);
877 np->msg_enable = (debug >= 0) ? (1<<debug)-1 : NATSEMI_DEF_MSG;
878 np->hands_off = 0;
879 np->intr_status = 0;
880 np->eeprom_size = natsemi_pci_info[chip_idx].eeprom_size;
881 if (natsemi_pci_info[chip_idx].flags & NATSEMI_FLAG_IGNORE_PHY)
882 np->ignore_phy = 1;
883 else
884 np->ignore_phy = 0;
885 np->dspcfg_workaround = dspcfg_workaround;
886
887
888
889
890
891
892
893
894
895
896 if (np->ignore_phy || readl(ioaddr + ChipConfig) & CfgExtPhy)
897 dev->if_port = PORT_MII;
898 else
899 dev->if_port = PORT_TP;
900
901 natsemi_reload_eeprom(dev);
902 natsemi_reset(dev);
903
904 if (dev->if_port != PORT_TP) {
905 np->phy_addr_external = find_mii(dev);
906
907
908 if (!np->ignore_phy && np->phy_addr_external == PHY_ADDR_NONE) {
909 dev->if_port = PORT_TP;
910 np->phy_addr_external = PHY_ADDR_INTERNAL;
911 }
912 } else {
913 np->phy_addr_external = PHY_ADDR_INTERNAL;
914 }
915
916 option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
917
918 if (option) {
919 if (option & 0x200)
920 np->full_duplex = 1;
921 if (option & 15)
922 printk(KERN_INFO
923 "natsemi %s: ignoring user supplied media type %d",
924 pci_name(np->pci_dev), option & 15);
925 }
926 if (find_cnt < MAX_UNITS && full_duplex[find_cnt])
927 np->full_duplex = 1;
928
929 dev->netdev_ops = &natsemi_netdev_ops;
930 dev->watchdog_timeo = TX_TIMEOUT;
931
932 dev->ethtool_ops = ðtool_ops;
933
934
935 dev->min_mtu = ETH_ZLEN + ETH_FCS_LEN;
936 dev->max_mtu = NATSEMI_RX_LIMIT - NATSEMI_HEADERS;
937
938 if (mtu)
939 dev->mtu = mtu;
940
941 natsemi_init_media(dev);
942
943
944 np->srr = readl(ioaddr + SiliconRev);
945 if (netif_msg_hw(np))
946 printk(KERN_INFO "natsemi %s: silicon revision %#04x.\n",
947 pci_name(np->pci_dev), np->srr);
948
949 i = register_netdev(dev);
950 if (i)
951 goto err_register_netdev;
952 i = NATSEMI_CREATE_FILE(pdev, dspcfg_workaround);
953 if (i)
954 goto err_create_file;
955
956 if (netif_msg_drv(np)) {
957 printk(KERN_INFO "natsemi %s: %s at %#08llx "
958 "(%s), %pM, IRQ %d",
959 dev->name, natsemi_pci_info[chip_idx].name,
960 (unsigned long long)iostart, pci_name(np->pci_dev),
961 dev->dev_addr, irq);
962 if (dev->if_port == PORT_TP)
963 printk(", port TP.\n");
964 else if (np->ignore_phy)
965 printk(", port MII, ignoring PHY\n");
966 else
967 printk(", port MII, phy ad %d.\n", np->phy_addr_external);
968 }
969 return 0;
970
971 err_create_file:
972 unregister_netdev(dev);
973
974 err_register_netdev:
975 iounmap(ioaddr);
976
977 err_ioremap:
978 pci_release_regions(pdev);
979
980 err_pci_request_regions:
981 free_netdev(dev);
982 return i;
983}
984
985
986
987
988
989
990
991
992
993
994
995
996#define eeprom_delay(ee_addr) readl(ee_addr)
997
998#define EE_Write0 (EE_ChipSelect)
999#define EE_Write1 (EE_ChipSelect | EE_DataIn)
1000
1001
1002enum EEPROM_Cmds {
1003 EE_WriteCmd=(5 << 6), EE_ReadCmd=(6 << 6), EE_EraseCmd=(7 << 6),
1004};
1005
1006static int eeprom_read(void __iomem *addr, int location)
1007{
1008 int i;
1009 int retval = 0;
1010 void __iomem *ee_addr = addr + EECtrl;
1011 int read_cmd = location | EE_ReadCmd;
1012
1013 writel(EE_Write0, ee_addr);
1014
1015
1016 for (i = 10; i >= 0; i--) {
1017 short dataval = (read_cmd & (1 << i)) ? EE_Write1 : EE_Write0;
1018 writel(dataval, ee_addr);
1019 eeprom_delay(ee_addr);
1020 writel(dataval | EE_ShiftClk, ee_addr);
1021 eeprom_delay(ee_addr);
1022 }
1023 writel(EE_ChipSelect, ee_addr);
1024 eeprom_delay(ee_addr);
1025
1026 for (i = 0; i < 16; i++) {
1027 writel(EE_ChipSelect | EE_ShiftClk, ee_addr);
1028 eeprom_delay(ee_addr);
1029 retval |= (readl(ee_addr) & EE_DataOut) ? 1 << i : 0;
1030 writel(EE_ChipSelect, ee_addr);
1031 eeprom_delay(ee_addr);
1032 }
1033
1034
1035 writel(EE_Write0, ee_addr);
1036 writel(0, ee_addr);
1037 return retval;
1038}
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049#define mii_delay(ioaddr) readl(ioaddr + EECtrl)
1050
1051static int mii_getbit (struct net_device *dev)
1052{
1053 int data;
1054 void __iomem *ioaddr = ns_ioaddr(dev);
1055
1056 writel(MII_ShiftClk, ioaddr + EECtrl);
1057 data = readl(ioaddr + EECtrl);
1058 writel(0, ioaddr + EECtrl);
1059 mii_delay(ioaddr);
1060 return (data & MII_Data)? 1 : 0;
1061}
1062
1063static void mii_send_bits (struct net_device *dev, u32 data, int len)
1064{
1065 u32 i;
1066 void __iomem *ioaddr = ns_ioaddr(dev);
1067
1068 for (i = (1 << (len-1)); i; i >>= 1)
1069 {
1070 u32 mdio_val = MII_Write | ((data & i)? MII_Data : 0);
1071 writel(mdio_val, ioaddr + EECtrl);
1072 mii_delay(ioaddr);
1073 writel(mdio_val | MII_ShiftClk, ioaddr + EECtrl);
1074 mii_delay(ioaddr);
1075 }
1076 writel(0, ioaddr + EECtrl);
1077 mii_delay(ioaddr);
1078}
1079
1080static int miiport_read(struct net_device *dev, int phy_id, int reg)
1081{
1082 u32 cmd;
1083 int i;
1084 u32 retval = 0;
1085
1086
1087 mii_send_bits (dev, 0xffffffff, 32);
1088
1089
1090 cmd = (0x06 << 10) | (phy_id << 5) | reg;
1091 mii_send_bits (dev, cmd, 14);
1092
1093 if (mii_getbit (dev))
1094 return 0;
1095
1096 for (i = 0; i < 16; i++) {
1097 retval <<= 1;
1098 retval |= mii_getbit (dev);
1099 }
1100
1101 mii_getbit (dev);
1102 return retval;
1103}
1104
1105static void miiport_write(struct net_device *dev, int phy_id, int reg, u16 data)
1106{
1107 u32 cmd;
1108
1109
1110 mii_send_bits (dev, 0xffffffff, 32);
1111
1112
1113 cmd = (0x5002 << 16) | (phy_id << 23) | (reg << 18) | data;
1114 mii_send_bits (dev, cmd, 32);
1115
1116 mii_getbit (dev);
1117}
1118
1119static int mdio_read(struct net_device *dev, int reg)
1120{
1121 struct netdev_private *np = netdev_priv(dev);
1122 void __iomem *ioaddr = ns_ioaddr(dev);
1123
1124
1125
1126
1127
1128 if (dev->if_port == PORT_TP)
1129 return readw(ioaddr+BasicControl+(reg<<2));
1130 else
1131 return miiport_read(dev, np->phy_addr_external, reg);
1132}
1133
1134static void mdio_write(struct net_device *dev, int reg, u16 data)
1135{
1136 struct netdev_private *np = netdev_priv(dev);
1137 void __iomem *ioaddr = ns_ioaddr(dev);
1138
1139
1140 if (dev->if_port == PORT_TP)
1141 writew(data, ioaddr+BasicControl+(reg<<2));
1142 else
1143 miiport_write(dev, np->phy_addr_external, reg, data);
1144}
1145
1146static void init_phy_fixup(struct net_device *dev)
1147{
1148 struct netdev_private *np = netdev_priv(dev);
1149 void __iomem *ioaddr = ns_ioaddr(dev);
1150 int i;
1151 u32 cfg;
1152 u16 tmp;
1153
1154
1155 tmp = mdio_read(dev, MII_BMCR);
1156 if (np->autoneg == AUTONEG_ENABLE) {
1157
1158 if ((tmp & BMCR_ANENABLE) == 0 ||
1159 np->advertising != mdio_read(dev, MII_ADVERTISE))
1160 {
1161
1162 tmp |= (BMCR_ANENABLE | BMCR_ANRESTART);
1163 mdio_write(dev, MII_ADVERTISE, np->advertising);
1164 }
1165 } else {
1166
1167 tmp &= ~(BMCR_ANENABLE | BMCR_SPEED100 | BMCR_FULLDPLX);
1168 if (np->speed == SPEED_100)
1169 tmp |= BMCR_SPEED100;
1170 if (np->duplex == DUPLEX_FULL)
1171 tmp |= BMCR_FULLDPLX;
1172
1173
1174
1175
1176
1177
1178
1179 }
1180 mdio_write(dev, MII_BMCR, tmp);
1181 readl(ioaddr + ChipConfig);
1182 udelay(1);
1183
1184
1185 np->mii = (mdio_read(dev, MII_PHYSID1) << 16)
1186 + mdio_read(dev, MII_PHYSID2);
1187
1188
1189 switch (np->mii) {
1190 case PHYID_AM79C874:
1191
1192 tmp = mdio_read(dev, MII_MCTRL);
1193 tmp &= ~(MII_FX_SEL | MII_EN_SCRM);
1194 if (dev->if_port == PORT_FIBRE)
1195 tmp |= MII_FX_SEL;
1196 else
1197 tmp |= MII_EN_SCRM;
1198 mdio_write(dev, MII_MCTRL, tmp);
1199 break;
1200 default:
1201 break;
1202 }
1203 cfg = readl(ioaddr + ChipConfig);
1204 if (cfg & CfgExtPhy)
1205 return;
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218 for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
1219
1220 int dspcfg;
1221 writew(1, ioaddr + PGSEL);
1222 writew(PMDCSR_VAL, ioaddr + PMDCSR);
1223 writew(TSTDAT_VAL, ioaddr + TSTDAT);
1224 np->dspcfg = (np->srr <= SRR_DP83815_C)?
1225 DSPCFG_VAL : (DSPCFG_COEF | readw(ioaddr + DSPCFG));
1226 writew(np->dspcfg, ioaddr + DSPCFG);
1227 writew(SDCFG_VAL, ioaddr + SDCFG);
1228 writew(0, ioaddr + PGSEL);
1229 readl(ioaddr + ChipConfig);
1230 udelay(10);
1231
1232 writew(1, ioaddr + PGSEL);
1233 dspcfg = readw(ioaddr + DSPCFG);
1234 writew(0, ioaddr + PGSEL);
1235 if (np->dspcfg == dspcfg)
1236 break;
1237 }
1238
1239 if (netif_msg_link(np)) {
1240 if (i==NATSEMI_HW_TIMEOUT) {
1241 printk(KERN_INFO
1242 "%s: DSPCFG mismatch after retrying for %d usec.\n",
1243 dev->name, i*10);
1244 } else {
1245 printk(KERN_INFO
1246 "%s: DSPCFG accepted after %d usec.\n",
1247 dev->name, i*10);
1248 }
1249 }
1250
1251
1252
1253
1254
1255 readw(ioaddr + MIntrStatus);
1256 writew(MICRIntEn, ioaddr + MIntrCtrl);
1257}
1258
1259static int switch_port_external(struct net_device *dev)
1260{
1261 struct netdev_private *np = netdev_priv(dev);
1262 void __iomem *ioaddr = ns_ioaddr(dev);
1263 u32 cfg;
1264
1265 cfg = readl(ioaddr + ChipConfig);
1266 if (cfg & CfgExtPhy)
1267 return 0;
1268
1269 if (netif_msg_link(np)) {
1270 printk(KERN_INFO "%s: switching to external transceiver.\n",
1271 dev->name);
1272 }
1273
1274
1275 writel(cfg | (CfgExtPhy | CfgPhyDis), ioaddr + ChipConfig);
1276 readl(ioaddr + ChipConfig);
1277 udelay(1);
1278
1279
1280
1281
1282
1283
1284
1285
1286 move_int_phy(dev, np->phy_addr_external);
1287 init_phy_fixup(dev);
1288
1289 return 1;
1290}
1291
1292static int switch_port_internal(struct net_device *dev)
1293{
1294 struct netdev_private *np = netdev_priv(dev);
1295 void __iomem *ioaddr = ns_ioaddr(dev);
1296 int i;
1297 u32 cfg;
1298 u16 bmcr;
1299
1300 cfg = readl(ioaddr + ChipConfig);
1301 if (!(cfg &CfgExtPhy))
1302 return 0;
1303
1304 if (netif_msg_link(np)) {
1305 printk(KERN_INFO "%s: switching to internal transceiver.\n",
1306 dev->name);
1307 }
1308
1309 cfg = cfg & ~(CfgExtPhy | CfgPhyDis);
1310 writel(cfg, ioaddr + ChipConfig);
1311 readl(ioaddr + ChipConfig);
1312 udelay(1);
1313
1314
1315 bmcr = readw(ioaddr+BasicControl+(MII_BMCR<<2));
1316 writel(bmcr | BMCR_RESET, ioaddr+BasicControl+(MII_BMCR<<2));
1317 readl(ioaddr + ChipConfig);
1318 udelay(10);
1319 for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
1320 bmcr = readw(ioaddr+BasicControl+(MII_BMCR<<2));
1321 if (!(bmcr & BMCR_RESET))
1322 break;
1323 udelay(10);
1324 }
1325 if (i==NATSEMI_HW_TIMEOUT && netif_msg_link(np)) {
1326 printk(KERN_INFO
1327 "%s: phy reset did not complete in %d usec.\n",
1328 dev->name, i*10);
1329 }
1330
1331 init_phy_fixup(dev);
1332
1333 return 1;
1334}
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344static int find_mii(struct net_device *dev)
1345{
1346 struct netdev_private *np = netdev_priv(dev);
1347 int tmp;
1348 int i;
1349 int did_switch;
1350
1351
1352 did_switch = switch_port_external(dev);
1353
1354
1355
1356
1357
1358
1359
1360 for (i = 1; i <= 31; i++) {
1361 move_int_phy(dev, i);
1362 tmp = miiport_read(dev, i, MII_BMSR);
1363 if (tmp != 0xffff && tmp != 0x0000) {
1364
1365 np->mii = (mdio_read(dev, MII_PHYSID1) << 16)
1366 + mdio_read(dev, MII_PHYSID2);
1367 if (netif_msg_probe(np)) {
1368 printk(KERN_INFO "natsemi %s: found external phy %08x at address %d.\n",
1369 pci_name(np->pci_dev), np->mii, i);
1370 }
1371 break;
1372 }
1373 }
1374
1375 if (did_switch)
1376 switch_port_internal(dev);
1377 return i;
1378}
1379
1380
1381#define CFG_RESET_SAVE 0xfde000
1382
1383#define WCSR_RESET_SAVE 0x61f
1384
1385#define RFCR_RESET_SAVE 0xf8500000
1386
1387static void natsemi_reset(struct net_device *dev)
1388{
1389 int i;
1390 u32 cfg;
1391 u32 wcsr;
1392 u32 rfcr;
1393 u16 pmatch[3];
1394 u16 sopass[3];
1395 struct netdev_private *np = netdev_priv(dev);
1396 void __iomem *ioaddr = ns_ioaddr(dev);
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407 cfg = readl(ioaddr + ChipConfig) & CFG_RESET_SAVE;
1408
1409 wcsr = readl(ioaddr + WOLCmd) & WCSR_RESET_SAVE;
1410
1411 rfcr = readl(ioaddr + RxFilterAddr) & RFCR_RESET_SAVE;
1412
1413 for (i = 0; i < 3; i++) {
1414 writel(i*2, ioaddr + RxFilterAddr);
1415 pmatch[i] = readw(ioaddr + RxFilterData);
1416 }
1417
1418 for (i = 0; i < 3; i++) {
1419 writel(0xa+(i*2), ioaddr + RxFilterAddr);
1420 sopass[i] = readw(ioaddr + RxFilterData);
1421 }
1422
1423
1424 writel(ChipReset, ioaddr + ChipCmd);
1425 for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
1426 if (!(readl(ioaddr + ChipCmd) & ChipReset))
1427 break;
1428 udelay(5);
1429 }
1430 if (i==NATSEMI_HW_TIMEOUT) {
1431 printk(KERN_WARNING "%s: reset did not complete in %d usec.\n",
1432 dev->name, i*5);
1433 } else if (netif_msg_hw(np)) {
1434 printk(KERN_DEBUG "%s: reset completed in %d usec.\n",
1435 dev->name, i*5);
1436 }
1437
1438
1439 cfg |= readl(ioaddr + ChipConfig) & ~CFG_RESET_SAVE;
1440
1441 if (dev->if_port == PORT_TP)
1442 cfg &= ~(CfgExtPhy | CfgPhyDis);
1443 else
1444 cfg |= (CfgExtPhy | CfgPhyDis);
1445 writel(cfg, ioaddr + ChipConfig);
1446
1447 wcsr |= readl(ioaddr + WOLCmd) & ~WCSR_RESET_SAVE;
1448 writel(wcsr, ioaddr + WOLCmd);
1449
1450 rfcr |= readl(ioaddr + RxFilterAddr) & ~RFCR_RESET_SAVE;
1451
1452 for (i = 0; i < 3; i++) {
1453 writel(i*2, ioaddr + RxFilterAddr);
1454 writew(pmatch[i], ioaddr + RxFilterData);
1455 }
1456 for (i = 0; i < 3; i++) {
1457 writel(0xa+(i*2), ioaddr + RxFilterAddr);
1458 writew(sopass[i], ioaddr + RxFilterData);
1459 }
1460
1461 writel(rfcr, ioaddr + RxFilterAddr);
1462}
1463
1464static void reset_rx(struct net_device *dev)
1465{
1466 int i;
1467 struct netdev_private *np = netdev_priv(dev);
1468 void __iomem *ioaddr = ns_ioaddr(dev);
1469
1470 np->intr_status &= ~RxResetDone;
1471
1472 writel(RxReset, ioaddr + ChipCmd);
1473
1474 for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
1475 np->intr_status |= readl(ioaddr + IntrStatus);
1476 if (np->intr_status & RxResetDone)
1477 break;
1478 udelay(15);
1479 }
1480 if (i==NATSEMI_HW_TIMEOUT) {
1481 printk(KERN_WARNING "%s: RX reset did not complete in %d usec.\n",
1482 dev->name, i*15);
1483 } else if (netif_msg_hw(np)) {
1484 printk(KERN_WARNING "%s: RX reset took %d usec.\n",
1485 dev->name, i*15);
1486 }
1487}
1488
1489static void natsemi_reload_eeprom(struct net_device *dev)
1490{
1491 struct netdev_private *np = netdev_priv(dev);
1492 void __iomem *ioaddr = ns_ioaddr(dev);
1493 int i;
1494
1495 writel(EepromReload, ioaddr + PCIBusCfg);
1496 for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
1497 udelay(50);
1498 if (!(readl(ioaddr + PCIBusCfg) & EepromReload))
1499 break;
1500 }
1501 if (i==NATSEMI_HW_TIMEOUT) {
1502 printk(KERN_WARNING "natsemi %s: EEPROM did not reload in %d usec.\n",
1503 pci_name(np->pci_dev), i*50);
1504 } else if (netif_msg_hw(np)) {
1505 printk(KERN_DEBUG "natsemi %s: EEPROM reloaded in %d usec.\n",
1506 pci_name(np->pci_dev), i*50);
1507 }
1508}
1509
1510static void natsemi_stop_rxtx(struct net_device *dev)
1511{
1512 void __iomem * ioaddr = ns_ioaddr(dev);
1513 struct netdev_private *np = netdev_priv(dev);
1514 int i;
1515
1516 writel(RxOff | TxOff, ioaddr + ChipCmd);
1517 for(i=0;i< NATSEMI_HW_TIMEOUT;i++) {
1518 if ((readl(ioaddr + ChipCmd) & (TxOn|RxOn)) == 0)
1519 break;
1520 udelay(5);
1521 }
1522 if (i==NATSEMI_HW_TIMEOUT) {
1523 printk(KERN_WARNING "%s: Tx/Rx process did not stop in %d usec.\n",
1524 dev->name, i*5);
1525 } else if (netif_msg_hw(np)) {
1526 printk(KERN_DEBUG "%s: Tx/Rx process stopped in %d usec.\n",
1527 dev->name, i*5);
1528 }
1529}
1530
1531static int netdev_open(struct net_device *dev)
1532{
1533 struct netdev_private *np = netdev_priv(dev);
1534 void __iomem * ioaddr = ns_ioaddr(dev);
1535 const int irq = np->pci_dev->irq;
1536 int i;
1537
1538
1539 natsemi_reset(dev);
1540
1541 i = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
1542 if (i) return i;
1543
1544 if (netif_msg_ifup(np))
1545 printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
1546 dev->name, irq);
1547 i = alloc_ring(dev);
1548 if (i < 0) {
1549 free_irq(irq, dev);
1550 return i;
1551 }
1552 napi_enable(&np->napi);
1553
1554 init_ring(dev);
1555 spin_lock_irq(&np->lock);
1556 init_registers(dev);
1557
1558 for (i = 0; i < 3; i++) {
1559 u16 mac = (dev->dev_addr[2*i+1]<<8) + dev->dev_addr[2*i];
1560
1561 writel(i*2, ioaddr + RxFilterAddr);
1562 writew(mac, ioaddr + RxFilterData);
1563 }
1564 writel(np->cur_rx_mode, ioaddr + RxFilterAddr);
1565 spin_unlock_irq(&np->lock);
1566
1567 netif_start_queue(dev);
1568
1569 if (netif_msg_ifup(np))
1570 printk(KERN_DEBUG "%s: Done netdev_open(), status: %#08x.\n",
1571 dev->name, (int)readl(ioaddr + ChipCmd));
1572
1573
1574 timer_setup(&np->timer, netdev_timer, 0);
1575 np->timer.expires = round_jiffies(jiffies + NATSEMI_TIMER_FREQ);
1576 add_timer(&np->timer);
1577
1578 return 0;
1579}
1580
1581static void do_cable_magic(struct net_device *dev)
1582{
1583 struct netdev_private *np = netdev_priv(dev);
1584 void __iomem *ioaddr = ns_ioaddr(dev);
1585
1586 if (dev->if_port != PORT_TP)
1587 return;
1588
1589 if (np->srr >= SRR_DP83816_A5)
1590 return;
1591
1592
1593
1594
1595
1596
1597
1598 if (readl(ioaddr + ChipConfig) & CfgSpeed100) {
1599 u16 data;
1600
1601 writew(1, ioaddr + PGSEL);
1602
1603
1604
1605
1606 data = readw(ioaddr + TSTDAT) & 0xff;
1607
1608
1609
1610
1611 if (!(data & 0x80) || ((data >= 0xd8) && (data <= 0xff))) {
1612 np = netdev_priv(dev);
1613
1614
1615 writew(TSTDAT_FIXED, ioaddr + TSTDAT);
1616
1617 data = readw(ioaddr + DSPCFG);
1618 np->dspcfg = data | DSPCFG_LOCK;
1619 writew(np->dspcfg, ioaddr + DSPCFG);
1620 }
1621 writew(0, ioaddr + PGSEL);
1622 }
1623}
1624
1625static void undo_cable_magic(struct net_device *dev)
1626{
1627 u16 data;
1628 struct netdev_private *np = netdev_priv(dev);
1629 void __iomem * ioaddr = ns_ioaddr(dev);
1630
1631 if (dev->if_port != PORT_TP)
1632 return;
1633
1634 if (np->srr >= SRR_DP83816_A5)
1635 return;
1636
1637 writew(1, ioaddr + PGSEL);
1638
1639 data = readw(ioaddr + DSPCFG);
1640 np->dspcfg = data & ~DSPCFG_LOCK;
1641 writew(np->dspcfg, ioaddr + DSPCFG);
1642 writew(0, ioaddr + PGSEL);
1643}
1644
1645static void check_link(struct net_device *dev)
1646{
1647 struct netdev_private *np = netdev_priv(dev);
1648 void __iomem * ioaddr = ns_ioaddr(dev);
1649 int duplex = np->duplex;
1650 u16 bmsr;
1651
1652
1653 if (np->ignore_phy)
1654 goto propagate_state;
1655
1656
1657
1658
1659
1660 mdio_read(dev, MII_BMSR);
1661 bmsr = mdio_read(dev, MII_BMSR);
1662
1663 if (!(bmsr & BMSR_LSTATUS)) {
1664 if (netif_carrier_ok(dev)) {
1665 if (netif_msg_link(np))
1666 printk(KERN_NOTICE "%s: link down.\n",
1667 dev->name);
1668 netif_carrier_off(dev);
1669 undo_cable_magic(dev);
1670 }
1671 return;
1672 }
1673 if (!netif_carrier_ok(dev)) {
1674 if (netif_msg_link(np))
1675 printk(KERN_NOTICE "%s: link up.\n", dev->name);
1676 netif_carrier_on(dev);
1677 do_cable_magic(dev);
1678 }
1679
1680 duplex = np->full_duplex;
1681 if (!duplex) {
1682 if (bmsr & BMSR_ANEGCOMPLETE) {
1683 int tmp = mii_nway_result(
1684 np->advertising & mdio_read(dev, MII_LPA));
1685 if (tmp == LPA_100FULL || tmp == LPA_10FULL)
1686 duplex = 1;
1687 } else if (mdio_read(dev, MII_BMCR) & BMCR_FULLDPLX)
1688 duplex = 1;
1689 }
1690
1691propagate_state:
1692
1693 if (duplex ^ !!(np->rx_config & RxAcceptTx)) {
1694 if (netif_msg_link(np))
1695 printk(KERN_INFO
1696 "%s: Setting %s-duplex based on negotiated "
1697 "link capability.\n", dev->name,
1698 duplex ? "full" : "half");
1699 if (duplex) {
1700 np->rx_config |= RxAcceptTx;
1701 np->tx_config |= TxCarrierIgn | TxHeartIgn;
1702 } else {
1703 np->rx_config &= ~RxAcceptTx;
1704 np->tx_config &= ~(TxCarrierIgn | TxHeartIgn);
1705 }
1706 writel(np->tx_config, ioaddr + TxConfig);
1707 writel(np->rx_config, ioaddr + RxConfig);
1708 }
1709}
1710
1711static void init_registers(struct net_device *dev)
1712{
1713 struct netdev_private *np = netdev_priv(dev);
1714 void __iomem * ioaddr = ns_ioaddr(dev);
1715
1716 init_phy_fixup(dev);
1717
1718
1719 readl(ioaddr + IntrStatus);
1720
1721 writel(np->ring_dma, ioaddr + RxRingPtr);
1722 writel(np->ring_dma + RX_RING_SIZE * sizeof(struct netdev_desc),
1723 ioaddr + TxRingPtr);
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739 np->tx_config = TxAutoPad | TxCollRetry | TxMxdma_256 |
1740 TX_FLTH_VAL | TX_DRTH_VAL_START;
1741 writel(np->tx_config, ioaddr + TxConfig);
1742
1743
1744
1745
1746 np->rx_config = RxMxdma_256 | RX_DRTH_VAL;
1747
1748 if (np->rx_buf_sz > NATSEMI_LONGPKT)
1749 np->rx_config |= RxAcceptLong;
1750
1751 writel(np->rx_config, ioaddr + RxConfig);
1752
1753
1754
1755
1756
1757
1758
1759 np->SavedClkRun = readl(ioaddr + ClkRun);
1760 writel(np->SavedClkRun & ~PMEEnable, ioaddr + ClkRun);
1761 if (np->SavedClkRun & PMEStatus && netif_msg_wol(np)) {
1762 printk(KERN_NOTICE "%s: Wake-up event %#08x\n",
1763 dev->name, readl(ioaddr + WOLCmd));
1764 }
1765
1766 check_link(dev);
1767 __set_rx_mode(dev);
1768
1769
1770 writel(DEFAULT_INTR, ioaddr + IntrMask);
1771 natsemi_irq_enable(dev);
1772
1773 writel(RxOn | TxOn, ioaddr + ChipCmd);
1774 writel(StatsClear, ioaddr + StatsCtrl);
1775}
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790static void netdev_timer(struct timer_list *t)
1791{
1792 struct netdev_private *np = from_timer(np, t, timer);
1793 struct net_device *dev = np->dev;
1794 void __iomem * ioaddr = ns_ioaddr(dev);
1795 int next_tick = NATSEMI_TIMER_FREQ;
1796 const int irq = np->pci_dev->irq;
1797
1798 if (netif_msg_timer(np)) {
1799
1800
1801
1802 printk(KERN_DEBUG "%s: Media selection timer tick.\n",
1803 dev->name);
1804 }
1805
1806 if (dev->if_port == PORT_TP) {
1807 u16 dspcfg;
1808
1809 spin_lock_irq(&np->lock);
1810
1811 writew(1, ioaddr+PGSEL);
1812 dspcfg = readw(ioaddr+DSPCFG);
1813 writew(0, ioaddr+PGSEL);
1814 if (np->dspcfg_workaround && dspcfg != np->dspcfg) {
1815 if (!netif_queue_stopped(dev)) {
1816 spin_unlock_irq(&np->lock);
1817 if (netif_msg_drv(np))
1818 printk(KERN_NOTICE "%s: possible phy reset: "
1819 "re-initializing\n", dev->name);
1820 disable_irq(irq);
1821 spin_lock_irq(&np->lock);
1822 natsemi_stop_rxtx(dev);
1823 dump_ring(dev);
1824 reinit_ring(dev);
1825 init_registers(dev);
1826 spin_unlock_irq(&np->lock);
1827 enable_irq(irq);
1828 } else {
1829
1830 next_tick = HZ;
1831 spin_unlock_irq(&np->lock);
1832 }
1833 } else {
1834
1835 check_link(dev);
1836 spin_unlock_irq(&np->lock);
1837 }
1838 } else {
1839 spin_lock_irq(&np->lock);
1840 check_link(dev);
1841 spin_unlock_irq(&np->lock);
1842 }
1843 if (np->oom) {
1844 disable_irq(irq);
1845 np->oom = 0;
1846 refill_rx(dev);
1847 enable_irq(irq);
1848 if (!np->oom) {
1849 writel(RxOn, ioaddr + ChipCmd);
1850 } else {
1851 next_tick = 1;
1852 }
1853 }
1854
1855 if (next_tick > 1)
1856 mod_timer(&np->timer, round_jiffies(jiffies + next_tick));
1857 else
1858 mod_timer(&np->timer, jiffies + next_tick);
1859}
1860
1861static void dump_ring(struct net_device *dev)
1862{
1863 struct netdev_private *np = netdev_priv(dev);
1864
1865 if (netif_msg_pktdata(np)) {
1866 int i;
1867 printk(KERN_DEBUG " Tx ring at %p:\n", np->tx_ring);
1868 for (i = 0; i < TX_RING_SIZE; i++) {
1869 printk(KERN_DEBUG " #%d desc. %#08x %#08x %#08x.\n",
1870 i, np->tx_ring[i].next_desc,
1871 np->tx_ring[i].cmd_status,
1872 np->tx_ring[i].addr);
1873 }
1874 printk(KERN_DEBUG " Rx ring %p:\n", np->rx_ring);
1875 for (i = 0; i < RX_RING_SIZE; i++) {
1876 printk(KERN_DEBUG " #%d desc. %#08x %#08x %#08x.\n",
1877 i, np->rx_ring[i].next_desc,
1878 np->rx_ring[i].cmd_status,
1879 np->rx_ring[i].addr);
1880 }
1881 }
1882}
1883
1884static void ns_tx_timeout(struct net_device *dev, unsigned int txqueue)
1885{
1886 struct netdev_private *np = netdev_priv(dev);
1887 void __iomem * ioaddr = ns_ioaddr(dev);
1888 const int irq = np->pci_dev->irq;
1889
1890 disable_irq(irq);
1891 spin_lock_irq(&np->lock);
1892 if (!np->hands_off) {
1893 if (netif_msg_tx_err(np))
1894 printk(KERN_WARNING
1895 "%s: Transmit timed out, status %#08x,"
1896 " resetting...\n",
1897 dev->name, readl(ioaddr + IntrStatus));
1898 dump_ring(dev);
1899
1900 natsemi_reset(dev);
1901 reinit_ring(dev);
1902 init_registers(dev);
1903 } else {
1904 printk(KERN_WARNING
1905 "%s: tx_timeout while in hands_off state?\n",
1906 dev->name);
1907 }
1908 spin_unlock_irq(&np->lock);
1909 enable_irq(irq);
1910
1911 netif_trans_update(dev);
1912 dev->stats.tx_errors++;
1913 netif_wake_queue(dev);
1914}
1915
1916static int alloc_ring(struct net_device *dev)
1917{
1918 struct netdev_private *np = netdev_priv(dev);
1919 np->rx_ring = dma_alloc_coherent(&np->pci_dev->dev,
1920 sizeof(struct netdev_desc) * (RX_RING_SIZE + TX_RING_SIZE),
1921 &np->ring_dma, GFP_KERNEL);
1922 if (!np->rx_ring)
1923 return -ENOMEM;
1924 np->tx_ring = &np->rx_ring[RX_RING_SIZE];
1925 return 0;
1926}
1927
1928static void refill_rx(struct net_device *dev)
1929{
1930 struct netdev_private *np = netdev_priv(dev);
1931
1932
1933 for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
1934 struct sk_buff *skb;
1935 int entry = np->dirty_rx % RX_RING_SIZE;
1936 if (np->rx_skbuff[entry] == NULL) {
1937 unsigned int buflen = np->rx_buf_sz+NATSEMI_PADDING;
1938 skb = netdev_alloc_skb(dev, buflen);
1939 np->rx_skbuff[entry] = skb;
1940 if (skb == NULL)
1941 break;
1942 np->rx_dma[entry] = dma_map_single(&np->pci_dev->dev,
1943 skb->data, buflen,
1944 DMA_FROM_DEVICE);
1945 if (dma_mapping_error(&np->pci_dev->dev, np->rx_dma[entry])) {
1946 dev_kfree_skb_any(skb);
1947 np->rx_skbuff[entry] = NULL;
1948 break;
1949 }
1950 np->rx_ring[entry].addr = cpu_to_le32(np->rx_dma[entry]);
1951 }
1952 np->rx_ring[entry].cmd_status = cpu_to_le32(np->rx_buf_sz);
1953 }
1954 if (np->cur_rx - np->dirty_rx == RX_RING_SIZE) {
1955 if (netif_msg_rx_err(np))
1956 printk(KERN_WARNING "%s: going OOM.\n", dev->name);
1957 np->oom = 1;
1958 }
1959}
1960
1961static void set_bufsize(struct net_device *dev)
1962{
1963 struct netdev_private *np = netdev_priv(dev);
1964 if (dev->mtu <= ETH_DATA_LEN)
1965 np->rx_buf_sz = ETH_DATA_LEN + NATSEMI_HEADERS;
1966 else
1967 np->rx_buf_sz = dev->mtu + NATSEMI_HEADERS;
1968}
1969
1970
1971static void init_ring(struct net_device *dev)
1972{
1973 struct netdev_private *np = netdev_priv(dev);
1974 int i;
1975
1976
1977 np->dirty_tx = np->cur_tx = 0;
1978 for (i = 0; i < TX_RING_SIZE; i++) {
1979 np->tx_skbuff[i] = NULL;
1980 np->tx_ring[i].next_desc = cpu_to_le32(np->ring_dma
1981 +sizeof(struct netdev_desc)
1982 *((i+1)%TX_RING_SIZE+RX_RING_SIZE));
1983 np->tx_ring[i].cmd_status = 0;
1984 }
1985
1986
1987 np->dirty_rx = 0;
1988 np->cur_rx = RX_RING_SIZE;
1989 np->oom = 0;
1990 set_bufsize(dev);
1991
1992 np->rx_head_desc = &np->rx_ring[0];
1993
1994
1995
1996
1997
1998 for (i = 0; i < RX_RING_SIZE; i++) {
1999 np->rx_ring[i].next_desc = cpu_to_le32(np->ring_dma
2000 +sizeof(struct netdev_desc)
2001 *((i+1)%RX_RING_SIZE));
2002 np->rx_ring[i].cmd_status = cpu_to_le32(DescOwn);
2003 np->rx_skbuff[i] = NULL;
2004 }
2005 refill_rx(dev);
2006 dump_ring(dev);
2007}
2008
2009static void drain_tx(struct net_device *dev)
2010{
2011 struct netdev_private *np = netdev_priv(dev);
2012 int i;
2013
2014 for (i = 0; i < TX_RING_SIZE; i++) {
2015 if (np->tx_skbuff[i]) {
2016 dma_unmap_single(&np->pci_dev->dev, np->tx_dma[i],
2017 np->tx_skbuff[i]->len, DMA_TO_DEVICE);
2018 dev_kfree_skb(np->tx_skbuff[i]);
2019 dev->stats.tx_dropped++;
2020 }
2021 np->tx_skbuff[i] = NULL;
2022 }
2023}
2024
2025static void drain_rx(struct net_device *dev)
2026{
2027 struct netdev_private *np = netdev_priv(dev);
2028 unsigned int buflen = np->rx_buf_sz;
2029 int i;
2030
2031
2032 for (i = 0; i < RX_RING_SIZE; i++) {
2033 np->rx_ring[i].cmd_status = 0;
2034 np->rx_ring[i].addr = cpu_to_le32(0xBADF00D0);
2035 if (np->rx_skbuff[i]) {
2036 dma_unmap_single(&np->pci_dev->dev, np->rx_dma[i],
2037 buflen + NATSEMI_PADDING,
2038 DMA_FROM_DEVICE);
2039 dev_kfree_skb(np->rx_skbuff[i]);
2040 }
2041 np->rx_skbuff[i] = NULL;
2042 }
2043}
2044
2045static void drain_ring(struct net_device *dev)
2046{
2047 drain_rx(dev);
2048 drain_tx(dev);
2049}
2050
2051static void free_ring(struct net_device *dev)
2052{
2053 struct netdev_private *np = netdev_priv(dev);
2054 dma_free_coherent(&np->pci_dev->dev,
2055 sizeof(struct netdev_desc) * (RX_RING_SIZE + TX_RING_SIZE),
2056 np->rx_ring, np->ring_dma);
2057}
2058
2059static void reinit_rx(struct net_device *dev)
2060{
2061 struct netdev_private *np = netdev_priv(dev);
2062 int i;
2063
2064
2065 np->dirty_rx = 0;
2066 np->cur_rx = RX_RING_SIZE;
2067 np->rx_head_desc = &np->rx_ring[0];
2068
2069 for (i = 0; i < RX_RING_SIZE; i++)
2070 np->rx_ring[i].cmd_status = cpu_to_le32(DescOwn);
2071
2072 refill_rx(dev);
2073}
2074
2075static void reinit_ring(struct net_device *dev)
2076{
2077 struct netdev_private *np = netdev_priv(dev);
2078 int i;
2079
2080
2081 drain_tx(dev);
2082 np->dirty_tx = np->cur_tx = 0;
2083 for (i=0;i<TX_RING_SIZE;i++)
2084 np->tx_ring[i].cmd_status = 0;
2085
2086 reinit_rx(dev);
2087}
2088
2089static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
2090{
2091 struct netdev_private *np = netdev_priv(dev);
2092 void __iomem * ioaddr = ns_ioaddr(dev);
2093 unsigned entry;
2094 unsigned long flags;
2095
2096
2097
2098
2099
2100 entry = np->cur_tx % TX_RING_SIZE;
2101
2102 np->tx_skbuff[entry] = skb;
2103 np->tx_dma[entry] = dma_map_single(&np->pci_dev->dev, skb->data,
2104 skb->len, DMA_TO_DEVICE);
2105 if (dma_mapping_error(&np->pci_dev->dev, np->tx_dma[entry])) {
2106 np->tx_skbuff[entry] = NULL;
2107 dev_kfree_skb_irq(skb);
2108 dev->stats.tx_dropped++;
2109 return NETDEV_TX_OK;
2110 }
2111
2112 np->tx_ring[entry].addr = cpu_to_le32(np->tx_dma[entry]);
2113
2114 spin_lock_irqsave(&np->lock, flags);
2115
2116 if (!np->hands_off) {
2117 np->tx_ring[entry].cmd_status = cpu_to_le32(DescOwn | skb->len);
2118
2119
2120 wmb();
2121 np->cur_tx++;
2122 if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1) {
2123 netdev_tx_done(dev);
2124 if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1)
2125 netif_stop_queue(dev);
2126 }
2127
2128 writel(TxOn, ioaddr + ChipCmd);
2129 } else {
2130 dev_kfree_skb_irq(skb);
2131 dev->stats.tx_dropped++;
2132 }
2133 spin_unlock_irqrestore(&np->lock, flags);
2134
2135 if (netif_msg_tx_queued(np)) {
2136 printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
2137 dev->name, np->cur_tx, entry);
2138 }
2139 return NETDEV_TX_OK;
2140}
2141
2142static void netdev_tx_done(struct net_device *dev)
2143{
2144 struct netdev_private *np = netdev_priv(dev);
2145
2146 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
2147 int entry = np->dirty_tx % TX_RING_SIZE;
2148 if (np->tx_ring[entry].cmd_status & cpu_to_le32(DescOwn))
2149 break;
2150 if (netif_msg_tx_done(np))
2151 printk(KERN_DEBUG
2152 "%s: tx frame #%d finished, status %#08x.\n",
2153 dev->name, np->dirty_tx,
2154 le32_to_cpu(np->tx_ring[entry].cmd_status));
2155 if (np->tx_ring[entry].cmd_status & cpu_to_le32(DescPktOK)) {
2156 dev->stats.tx_packets++;
2157 dev->stats.tx_bytes += np->tx_skbuff[entry]->len;
2158 } else {
2159 int tx_status =
2160 le32_to_cpu(np->tx_ring[entry].cmd_status);
2161 if (tx_status & (DescTxAbort|DescTxExcColl))
2162 dev->stats.tx_aborted_errors++;
2163 if (tx_status & DescTxFIFO)
2164 dev->stats.tx_fifo_errors++;
2165 if (tx_status & DescTxCarrier)
2166 dev->stats.tx_carrier_errors++;
2167 if (tx_status & DescTxOOWCol)
2168 dev->stats.tx_window_errors++;
2169 dev->stats.tx_errors++;
2170 }
2171 dma_unmap_single(&np->pci_dev->dev, np->tx_dma[entry],
2172 np->tx_skbuff[entry]->len, DMA_TO_DEVICE);
2173
2174 dev_consume_skb_irq(np->tx_skbuff[entry]);
2175 np->tx_skbuff[entry] = NULL;
2176 }
2177 if (netif_queue_stopped(dev) &&
2178 np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
2179
2180 netif_wake_queue(dev);
2181 }
2182}
2183
2184
2185
2186static irqreturn_t intr_handler(int irq, void *dev_instance)
2187{
2188 struct net_device *dev = dev_instance;
2189 struct netdev_private *np = netdev_priv(dev);
2190 void __iomem * ioaddr = ns_ioaddr(dev);
2191
2192
2193
2194
2195 if (np->hands_off || !readl(ioaddr + IntrEnable))
2196 return IRQ_NONE;
2197
2198 np->intr_status = readl(ioaddr + IntrStatus);
2199
2200 if (!np->intr_status)
2201 return IRQ_NONE;
2202
2203 if (netif_msg_intr(np))
2204 printk(KERN_DEBUG
2205 "%s: Interrupt, status %#08x, mask %#08x.\n",
2206 dev->name, np->intr_status,
2207 readl(ioaddr + IntrMask));
2208
2209 prefetch(&np->rx_skbuff[np->cur_rx % RX_RING_SIZE]);
2210
2211 if (napi_schedule_prep(&np->napi)) {
2212
2213 natsemi_irq_disable(dev);
2214 __napi_schedule(&np->napi);
2215 } else
2216 printk(KERN_WARNING
2217 "%s: Ignoring interrupt, status %#08x, mask %#08x.\n",
2218 dev->name, np->intr_status,
2219 readl(ioaddr + IntrMask));
2220
2221 return IRQ_HANDLED;
2222}
2223
2224
2225
2226
2227static int natsemi_poll(struct napi_struct *napi, int budget)
2228{
2229 struct netdev_private *np = container_of(napi, struct netdev_private, napi);
2230 struct net_device *dev = np->dev;
2231 void __iomem * ioaddr = ns_ioaddr(dev);
2232 int work_done = 0;
2233
2234 do {
2235 if (netif_msg_intr(np))
2236 printk(KERN_DEBUG
2237 "%s: Poll, status %#08x, mask %#08x.\n",
2238 dev->name, np->intr_status,
2239 readl(ioaddr + IntrMask));
2240
2241
2242
2243 if (np->intr_status &
2244 (IntrRxDone | IntrRxIntr | RxStatusFIFOOver |
2245 IntrRxErr | IntrRxOverrun)) {
2246 netdev_rx(dev, &work_done, budget);
2247 }
2248
2249 if (np->intr_status &
2250 (IntrTxDone | IntrTxIntr | IntrTxIdle | IntrTxErr)) {
2251 spin_lock(&np->lock);
2252 netdev_tx_done(dev);
2253 spin_unlock(&np->lock);
2254 }
2255
2256
2257 if (np->intr_status & IntrAbnormalSummary)
2258 netdev_error(dev, np->intr_status);
2259
2260 if (work_done >= budget)
2261 return work_done;
2262
2263 np->intr_status = readl(ioaddr + IntrStatus);
2264 } while (np->intr_status);
2265
2266 napi_complete_done(napi, work_done);
2267
2268
2269
2270 spin_lock(&np->lock);
2271 if (!np->hands_off)
2272 natsemi_irq_enable(dev);
2273 spin_unlock(&np->lock);
2274
2275 return work_done;
2276}
2277
2278
2279
2280static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do)
2281{
2282 struct netdev_private *np = netdev_priv(dev);
2283 int entry = np->cur_rx % RX_RING_SIZE;
2284 int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
2285 s32 desc_status = le32_to_cpu(np->rx_head_desc->cmd_status);
2286 unsigned int buflen = np->rx_buf_sz;
2287 void __iomem * ioaddr = ns_ioaddr(dev);
2288
2289
2290 while (desc_status < 0) {
2291 int pkt_len;
2292 if (netif_msg_rx_status(np))
2293 printk(KERN_DEBUG
2294 " netdev_rx() entry %d status was %#08x.\n",
2295 entry, desc_status);
2296 if (--boguscnt < 0)
2297 break;
2298
2299 if (*work_done >= work_to_do)
2300 break;
2301
2302 (*work_done)++;
2303
2304 pkt_len = (desc_status & DescSizeMask) - 4;
2305 if ((desc_status&(DescMore|DescPktOK|DescRxLong)) != DescPktOK){
2306 if (desc_status & DescMore) {
2307 unsigned long flags;
2308
2309 if (netif_msg_rx_err(np))
2310 printk(KERN_WARNING
2311 "%s: Oversized(?) Ethernet "
2312 "frame spanned multiple "
2313 "buffers, entry %#08x "
2314 "status %#08x.\n", dev->name,
2315 np->cur_rx, desc_status);
2316 dev->stats.rx_length_errors++;
2317
2318
2319
2320
2321
2322
2323 spin_lock_irqsave(&np->lock, flags);
2324 reset_rx(dev);
2325 reinit_rx(dev);
2326 writel(np->ring_dma, ioaddr + RxRingPtr);
2327 check_link(dev);
2328 spin_unlock_irqrestore(&np->lock, flags);
2329
2330
2331
2332 break;
2333
2334 } else {
2335
2336 dev->stats.rx_errors++;
2337 if (desc_status & (DescRxAbort|DescRxOver))
2338 dev->stats.rx_over_errors++;
2339 if (desc_status & (DescRxLong|DescRxRunt))
2340 dev->stats.rx_length_errors++;
2341 if (desc_status & (DescRxInvalid|DescRxAlign))
2342 dev->stats.rx_frame_errors++;
2343 if (desc_status & DescRxCRC)
2344 dev->stats.rx_crc_errors++;
2345 }
2346 } else if (pkt_len > np->rx_buf_sz) {
2347
2348
2349
2350
2351 } else {
2352 struct sk_buff *skb;
2353
2354
2355
2356 if (pkt_len < rx_copybreak &&
2357 (skb = netdev_alloc_skb(dev, pkt_len + RX_OFFSET)) != NULL) {
2358
2359 skb_reserve(skb, RX_OFFSET);
2360 dma_sync_single_for_cpu(&np->pci_dev->dev,
2361 np->rx_dma[entry],
2362 buflen,
2363 DMA_FROM_DEVICE);
2364 skb_copy_to_linear_data(skb,
2365 np->rx_skbuff[entry]->data, pkt_len);
2366 skb_put(skb, pkt_len);
2367 dma_sync_single_for_device(&np->pci_dev->dev,
2368 np->rx_dma[entry],
2369 buflen,
2370 DMA_FROM_DEVICE);
2371 } else {
2372 dma_unmap_single(&np->pci_dev->dev,
2373 np->rx_dma[entry],
2374 buflen + NATSEMI_PADDING,
2375 DMA_FROM_DEVICE);
2376 skb_put(skb = np->rx_skbuff[entry], pkt_len);
2377 np->rx_skbuff[entry] = NULL;
2378 }
2379 skb->protocol = eth_type_trans(skb, dev);
2380 netif_receive_skb(skb);
2381 dev->stats.rx_packets++;
2382 dev->stats.rx_bytes += pkt_len;
2383 }
2384 entry = (++np->cur_rx) % RX_RING_SIZE;
2385 np->rx_head_desc = &np->rx_ring[entry];
2386 desc_status = le32_to_cpu(np->rx_head_desc->cmd_status);
2387 }
2388 refill_rx(dev);
2389
2390
2391 if (np->oom)
2392 mod_timer(&np->timer, jiffies + 1);
2393 else
2394 writel(RxOn, ioaddr + ChipCmd);
2395}
2396
2397static void netdev_error(struct net_device *dev, int intr_status)
2398{
2399 struct netdev_private *np = netdev_priv(dev);
2400 void __iomem * ioaddr = ns_ioaddr(dev);
2401
2402 spin_lock(&np->lock);
2403 if (intr_status & LinkChange) {
2404 u16 lpa = mdio_read(dev, MII_LPA);
2405 if (mdio_read(dev, MII_BMCR) & BMCR_ANENABLE &&
2406 netif_msg_link(np)) {
2407 printk(KERN_INFO
2408 "%s: Autonegotiation advertising"
2409 " %#04x partner %#04x.\n", dev->name,
2410 np->advertising, lpa);
2411 }
2412
2413
2414 readw(ioaddr + MIntrStatus);
2415 check_link(dev);
2416 }
2417 if (intr_status & StatsMax) {
2418 __get_stats(dev);
2419 }
2420 if (intr_status & IntrTxUnderrun) {
2421 if ((np->tx_config & TxDrthMask) < TX_DRTH_VAL_LIMIT) {
2422 np->tx_config += TX_DRTH_VAL_INC;
2423 if (netif_msg_tx_err(np))
2424 printk(KERN_NOTICE
2425 "%s: increased tx threshold, txcfg %#08x.\n",
2426 dev->name, np->tx_config);
2427 } else {
2428 if (netif_msg_tx_err(np))
2429 printk(KERN_NOTICE
2430 "%s: tx underrun with maximum tx threshold, txcfg %#08x.\n",
2431 dev->name, np->tx_config);
2432 }
2433 writel(np->tx_config, ioaddr + TxConfig);
2434 }
2435 if (intr_status & WOLPkt && netif_msg_wol(np)) {
2436 int wol_status = readl(ioaddr + WOLCmd);
2437 printk(KERN_NOTICE "%s: Link wake-up event %#08x\n",
2438 dev->name, wol_status);
2439 }
2440 if (intr_status & RxStatusFIFOOver) {
2441 if (netif_msg_rx_err(np) && netif_msg_intr(np)) {
2442 printk(KERN_NOTICE "%s: Rx status FIFO overrun\n",
2443 dev->name);
2444 }
2445 dev->stats.rx_fifo_errors++;
2446 dev->stats.rx_errors++;
2447 }
2448
2449 if (intr_status & IntrPCIErr) {
2450 printk(KERN_NOTICE "%s: PCI error %#08x\n", dev->name,
2451 intr_status & IntrPCIErr);
2452 dev->stats.tx_fifo_errors++;
2453 dev->stats.tx_errors++;
2454 dev->stats.rx_fifo_errors++;
2455 dev->stats.rx_errors++;
2456 }
2457 spin_unlock(&np->lock);
2458}
2459
2460static void __get_stats(struct net_device *dev)
2461{
2462 void __iomem * ioaddr = ns_ioaddr(dev);
2463
2464
2465 dev->stats.rx_crc_errors += readl(ioaddr + RxCRCErrs);
2466 dev->stats.rx_missed_errors += readl(ioaddr + RxMissed);
2467}
2468
2469static struct net_device_stats *get_stats(struct net_device *dev)
2470{
2471 struct netdev_private *np = netdev_priv(dev);
2472
2473
2474 spin_lock_irq(&np->lock);
2475 if (netif_running(dev) && !np->hands_off)
2476 __get_stats(dev);
2477 spin_unlock_irq(&np->lock);
2478
2479 return &dev->stats;
2480}
2481
2482#ifdef CONFIG_NET_POLL_CONTROLLER
2483static void natsemi_poll_controller(struct net_device *dev)
2484{
2485 struct netdev_private *np = netdev_priv(dev);
2486 const int irq = np->pci_dev->irq;
2487
2488 disable_irq(irq);
2489 intr_handler(irq, dev);
2490 enable_irq(irq);
2491}
2492#endif
2493
2494#define HASH_TABLE 0x200
2495static void __set_rx_mode(struct net_device *dev)
2496{
2497 void __iomem * ioaddr = ns_ioaddr(dev);
2498 struct netdev_private *np = netdev_priv(dev);
2499 u8 mc_filter[64];
2500 u32 rx_mode;
2501
2502 if (dev->flags & IFF_PROMISC) {
2503 rx_mode = RxFilterEnable | AcceptBroadcast
2504 | AcceptAllMulticast | AcceptAllPhys | AcceptMyPhys;
2505 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
2506 (dev->flags & IFF_ALLMULTI)) {
2507 rx_mode = RxFilterEnable | AcceptBroadcast
2508 | AcceptAllMulticast | AcceptMyPhys;
2509 } else {
2510 struct netdev_hw_addr *ha;
2511 int i;
2512
2513 memset(mc_filter, 0, sizeof(mc_filter));
2514 netdev_for_each_mc_addr(ha, dev) {
2515 int b = (ether_crc(ETH_ALEN, ha->addr) >> 23) & 0x1ff;
2516 mc_filter[b/8] |= (1 << (b & 0x07));
2517 }
2518 rx_mode = RxFilterEnable | AcceptBroadcast
2519 | AcceptMulticast | AcceptMyPhys;
2520 for (i = 0; i < 64; i += 2) {
2521 writel(HASH_TABLE + i, ioaddr + RxFilterAddr);
2522 writel((mc_filter[i + 1] << 8) + mc_filter[i],
2523 ioaddr + RxFilterData);
2524 }
2525 }
2526 writel(rx_mode, ioaddr + RxFilterAddr);
2527 np->cur_rx_mode = rx_mode;
2528}
2529
2530static int natsemi_change_mtu(struct net_device *dev, int new_mtu)
2531{
2532 dev->mtu = new_mtu;
2533
2534
2535 if (netif_running(dev)) {
2536 struct netdev_private *np = netdev_priv(dev);
2537 void __iomem * ioaddr = ns_ioaddr(dev);
2538 const int irq = np->pci_dev->irq;
2539
2540 disable_irq(irq);
2541 spin_lock(&np->lock);
2542
2543 natsemi_stop_rxtx(dev);
2544
2545 drain_rx(dev);
2546
2547 set_bufsize(dev);
2548 reinit_rx(dev);
2549 writel(np->ring_dma, ioaddr + RxRingPtr);
2550
2551 writel(RxOn | TxOn, ioaddr + ChipCmd);
2552 spin_unlock(&np->lock);
2553 enable_irq(irq);
2554 }
2555 return 0;
2556}
2557
2558static void set_rx_mode(struct net_device *dev)
2559{
2560 struct netdev_private *np = netdev_priv(dev);
2561 spin_lock_irq(&np->lock);
2562 if (!np->hands_off)
2563 __set_rx_mode(dev);
2564 spin_unlock_irq(&np->lock);
2565}
2566
2567static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2568{
2569 struct netdev_private *np = netdev_priv(dev);
2570 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
2571 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
2572 strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
2573}
2574
2575static int get_regs_len(struct net_device *dev)
2576{
2577 return NATSEMI_REGS_SIZE;
2578}
2579
2580static int get_eeprom_len(struct net_device *dev)
2581{
2582 struct netdev_private *np = netdev_priv(dev);
2583 return np->eeprom_size;
2584}
2585
2586static int get_link_ksettings(struct net_device *dev,
2587 struct ethtool_link_ksettings *ecmd)
2588{
2589 struct netdev_private *np = netdev_priv(dev);
2590 spin_lock_irq(&np->lock);
2591 netdev_get_ecmd(dev, ecmd);
2592 spin_unlock_irq(&np->lock);
2593 return 0;
2594}
2595
2596static int set_link_ksettings(struct net_device *dev,
2597 const struct ethtool_link_ksettings *ecmd)
2598{
2599 struct netdev_private *np = netdev_priv(dev);
2600 int res;
2601 spin_lock_irq(&np->lock);
2602 res = netdev_set_ecmd(dev, ecmd);
2603 spin_unlock_irq(&np->lock);
2604 return res;
2605}
2606
2607static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2608{
2609 struct netdev_private *np = netdev_priv(dev);
2610 spin_lock_irq(&np->lock);
2611 netdev_get_wol(dev, &wol->supported, &wol->wolopts);
2612 netdev_get_sopass(dev, wol->sopass);
2613 spin_unlock_irq(&np->lock);
2614}
2615
2616static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2617{
2618 struct netdev_private *np = netdev_priv(dev);
2619 int res;
2620 spin_lock_irq(&np->lock);
2621 netdev_set_wol(dev, wol->wolopts);
2622 res = netdev_set_sopass(dev, wol->sopass);
2623 spin_unlock_irq(&np->lock);
2624 return res;
2625}
2626
2627static void get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
2628{
2629 struct netdev_private *np = netdev_priv(dev);
2630 regs->version = NATSEMI_REGS_VER;
2631 spin_lock_irq(&np->lock);
2632 netdev_get_regs(dev, buf);
2633 spin_unlock_irq(&np->lock);
2634}
2635
2636static u32 get_msglevel(struct net_device *dev)
2637{
2638 struct netdev_private *np = netdev_priv(dev);
2639 return np->msg_enable;
2640}
2641
2642static void set_msglevel(struct net_device *dev, u32 val)
2643{
2644 struct netdev_private *np = netdev_priv(dev);
2645 np->msg_enable = val;
2646}
2647
2648static int nway_reset(struct net_device *dev)
2649{
2650 int tmp;
2651 int r = -EINVAL;
2652
2653 tmp = mdio_read(dev, MII_BMCR);
2654 if (tmp & BMCR_ANENABLE) {
2655 tmp |= (BMCR_ANRESTART);
2656 mdio_write(dev, MII_BMCR, tmp);
2657 r = 0;
2658 }
2659 return r;
2660}
2661
2662static u32 get_link(struct net_device *dev)
2663{
2664
2665 mdio_read(dev, MII_BMSR);
2666 return (mdio_read(dev, MII_BMSR)&BMSR_LSTATUS) ? 1:0;
2667}
2668
2669static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
2670{
2671 struct netdev_private *np = netdev_priv(dev);
2672 u8 *eebuf;
2673 int res;
2674
2675 eebuf = kmalloc(np->eeprom_size, GFP_KERNEL);
2676 if (!eebuf)
2677 return -ENOMEM;
2678
2679 eeprom->magic = PCI_VENDOR_ID_NS | (PCI_DEVICE_ID_NS_83815<<16);
2680 spin_lock_irq(&np->lock);
2681 res = netdev_get_eeprom(dev, eebuf);
2682 spin_unlock_irq(&np->lock);
2683 if (!res)
2684 memcpy(data, eebuf+eeprom->offset, eeprom->len);
2685 kfree(eebuf);
2686 return res;
2687}
2688
2689static const struct ethtool_ops ethtool_ops = {
2690 .get_drvinfo = get_drvinfo,
2691 .get_regs_len = get_regs_len,
2692 .get_eeprom_len = get_eeprom_len,
2693 .get_wol = get_wol,
2694 .set_wol = set_wol,
2695 .get_regs = get_regs,
2696 .get_msglevel = get_msglevel,
2697 .set_msglevel = set_msglevel,
2698 .nway_reset = nway_reset,
2699 .get_link = get_link,
2700 .get_eeprom = get_eeprom,
2701 .get_link_ksettings = get_link_ksettings,
2702 .set_link_ksettings = set_link_ksettings,
2703};
2704
2705static int netdev_set_wol(struct net_device *dev, u32 newval)
2706{
2707 struct netdev_private *np = netdev_priv(dev);
2708 void __iomem * ioaddr = ns_ioaddr(dev);
2709 u32 data = readl(ioaddr + WOLCmd) & ~WakeOptsSummary;
2710
2711
2712 if (newval & WAKE_PHY)
2713 data |= WakePhy;
2714 if (newval & WAKE_UCAST)
2715 data |= WakeUnicast;
2716 if (newval & WAKE_MCAST)
2717 data |= WakeMulticast;
2718 if (newval & WAKE_BCAST)
2719 data |= WakeBroadcast;
2720 if (newval & WAKE_ARP)
2721 data |= WakeArp;
2722 if (newval & WAKE_MAGIC)
2723 data |= WakeMagic;
2724 if (np->srr >= SRR_DP83815_D) {
2725 if (newval & WAKE_MAGICSECURE) {
2726 data |= WakeMagicSecure;
2727 }
2728 }
2729
2730 writel(data, ioaddr + WOLCmd);
2731
2732 return 0;
2733}
2734
2735static int netdev_get_wol(struct net_device *dev, u32 *supported, u32 *cur)
2736{
2737 struct netdev_private *np = netdev_priv(dev);
2738 void __iomem * ioaddr = ns_ioaddr(dev);
2739 u32 regval = readl(ioaddr + WOLCmd);
2740
2741 *supported = (WAKE_PHY | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST
2742 | WAKE_ARP | WAKE_MAGIC);
2743
2744 if (np->srr >= SRR_DP83815_D) {
2745
2746 *supported |= WAKE_MAGICSECURE;
2747 }
2748 *cur = 0;
2749
2750
2751 if (regval & WakePhy)
2752 *cur |= WAKE_PHY;
2753 if (regval & WakeUnicast)
2754 *cur |= WAKE_UCAST;
2755 if (regval & WakeMulticast)
2756 *cur |= WAKE_MCAST;
2757 if (regval & WakeBroadcast)
2758 *cur |= WAKE_BCAST;
2759 if (regval & WakeArp)
2760 *cur |= WAKE_ARP;
2761 if (regval & WakeMagic)
2762 *cur |= WAKE_MAGIC;
2763 if (regval & WakeMagicSecure) {
2764
2765 *cur |= WAKE_MAGICSECURE;
2766 }
2767
2768 return 0;
2769}
2770
2771static int netdev_set_sopass(struct net_device *dev, u8 *newval)
2772{
2773 struct netdev_private *np = netdev_priv(dev);
2774 void __iomem * ioaddr = ns_ioaddr(dev);
2775 u16 *sval = (u16 *)newval;
2776 u32 addr;
2777
2778 if (np->srr < SRR_DP83815_D) {
2779 return 0;
2780 }
2781
2782
2783 addr = readl(ioaddr + RxFilterAddr) & ~RFCRAddressMask;
2784 addr &= ~RxFilterEnable;
2785 writel(addr, ioaddr + RxFilterAddr);
2786
2787
2788 writel(addr | 0xa, ioaddr + RxFilterAddr);
2789 writew(sval[0], ioaddr + RxFilterData);
2790
2791 writel(addr | 0xc, ioaddr + RxFilterAddr);
2792 writew(sval[1], ioaddr + RxFilterData);
2793
2794 writel(addr | 0xe, ioaddr + RxFilterAddr);
2795 writew(sval[2], ioaddr + RxFilterData);
2796
2797
2798 writel(addr | RxFilterEnable, ioaddr + RxFilterAddr);
2799
2800 return 0;
2801}
2802
2803static int netdev_get_sopass(struct net_device *dev, u8 *data)
2804{
2805 struct netdev_private *np = netdev_priv(dev);
2806 void __iomem * ioaddr = ns_ioaddr(dev);
2807 u16 *sval = (u16 *)data;
2808 u32 addr;
2809
2810 if (np->srr < SRR_DP83815_D) {
2811 sval[0] = sval[1] = sval[2] = 0;
2812 return 0;
2813 }
2814
2815
2816 addr = readl(ioaddr + RxFilterAddr) & ~RFCRAddressMask;
2817
2818 writel(addr | 0xa, ioaddr + RxFilterAddr);
2819 sval[0] = readw(ioaddr + RxFilterData);
2820
2821 writel(addr | 0xc, ioaddr + RxFilterAddr);
2822 sval[1] = readw(ioaddr + RxFilterData);
2823
2824 writel(addr | 0xe, ioaddr + RxFilterAddr);
2825 sval[2] = readw(ioaddr + RxFilterData);
2826
2827 writel(addr, ioaddr + RxFilterAddr);
2828
2829 return 0;
2830}
2831
2832static int netdev_get_ecmd(struct net_device *dev,
2833 struct ethtool_link_ksettings *ecmd)
2834{
2835 struct netdev_private *np = netdev_priv(dev);
2836 u32 supported, advertising;
2837 u32 tmp;
2838
2839 ecmd->base.port = dev->if_port;
2840 ecmd->base.speed = np->speed;
2841 ecmd->base.duplex = np->duplex;
2842 ecmd->base.autoneg = np->autoneg;
2843 advertising = 0;
2844
2845 if (np->advertising & ADVERTISE_10HALF)
2846 advertising |= ADVERTISED_10baseT_Half;
2847 if (np->advertising & ADVERTISE_10FULL)
2848 advertising |= ADVERTISED_10baseT_Full;
2849 if (np->advertising & ADVERTISE_100HALF)
2850 advertising |= ADVERTISED_100baseT_Half;
2851 if (np->advertising & ADVERTISE_100FULL)
2852 advertising |= ADVERTISED_100baseT_Full;
2853 supported = (SUPPORTED_Autoneg |
2854 SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
2855 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
2856 SUPPORTED_TP | SUPPORTED_MII | SUPPORTED_FIBRE);
2857 ecmd->base.phy_address = np->phy_addr_external;
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875
2876
2877 switch (ecmd->base.port) {
2878 default:
2879 case PORT_TP:
2880 advertising |= ADVERTISED_TP;
2881 break;
2882 case PORT_MII:
2883 advertising |= ADVERTISED_MII;
2884 break;
2885 case PORT_FIBRE:
2886 advertising |= ADVERTISED_FIBRE;
2887 break;
2888 }
2889
2890
2891 if (ecmd->base.autoneg == AUTONEG_ENABLE) {
2892 advertising |= ADVERTISED_Autoneg;
2893 tmp = mii_nway_result(
2894 np->advertising & mdio_read(dev, MII_LPA));
2895 if (tmp == LPA_100FULL || tmp == LPA_100HALF)
2896 ecmd->base.speed = SPEED_100;
2897 else
2898 ecmd->base.speed = SPEED_10;
2899 if (tmp == LPA_100FULL || tmp == LPA_10FULL)
2900 ecmd->base.duplex = DUPLEX_FULL;
2901 else
2902 ecmd->base.duplex = DUPLEX_HALF;
2903 }
2904
2905
2906
2907 ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.supported,
2908 supported);
2909 ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.advertising,
2910 advertising);
2911
2912 return 0;
2913}
2914
2915static int netdev_set_ecmd(struct net_device *dev,
2916 const struct ethtool_link_ksettings *ecmd)
2917{
2918 struct netdev_private *np = netdev_priv(dev);
2919 u32 advertising;
2920
2921 ethtool_convert_link_mode_to_legacy_u32(&advertising,
2922 ecmd->link_modes.advertising);
2923
2924 if (ecmd->base.port != PORT_TP &&
2925 ecmd->base.port != PORT_MII &&
2926 ecmd->base.port != PORT_FIBRE)
2927 return -EINVAL;
2928 if (ecmd->base.autoneg == AUTONEG_ENABLE) {
2929 if ((advertising & (ADVERTISED_10baseT_Half |
2930 ADVERTISED_10baseT_Full |
2931 ADVERTISED_100baseT_Half |
2932 ADVERTISED_100baseT_Full)) == 0) {
2933 return -EINVAL;
2934 }
2935 } else if (ecmd->base.autoneg == AUTONEG_DISABLE) {
2936 u32 speed = ecmd->base.speed;
2937 if (speed != SPEED_10 && speed != SPEED_100)
2938 return -EINVAL;
2939 if (ecmd->base.duplex != DUPLEX_HALF &&
2940 ecmd->base.duplex != DUPLEX_FULL)
2941 return -EINVAL;
2942 } else {
2943 return -EINVAL;
2944 }
2945
2946
2947
2948
2949
2950
2951 if (np->ignore_phy && (ecmd->base.autoneg == AUTONEG_ENABLE ||
2952 ecmd->base.port == PORT_TP))
2953 return -EINVAL;
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970
2971 dev->if_port = ecmd->base.port;
2972 np->autoneg = ecmd->base.autoneg;
2973 np->phy_addr_external = ecmd->base.phy_address & PhyAddrMask;
2974 if (np->autoneg == AUTONEG_ENABLE) {
2975
2976 np->advertising &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
2977 if (advertising & ADVERTISED_10baseT_Half)
2978 np->advertising |= ADVERTISE_10HALF;
2979 if (advertising & ADVERTISED_10baseT_Full)
2980 np->advertising |= ADVERTISE_10FULL;
2981 if (advertising & ADVERTISED_100baseT_Half)
2982 np->advertising |= ADVERTISE_100HALF;
2983 if (advertising & ADVERTISED_100baseT_Full)
2984 np->advertising |= ADVERTISE_100FULL;
2985 } else {
2986 np->speed = ecmd->base.speed;
2987 np->duplex = ecmd->base.duplex;
2988
2989 if (np->duplex == DUPLEX_HALF)
2990 np->full_duplex = 0;
2991 }
2992
2993
2994 if (ecmd->base.port == PORT_TP)
2995 switch_port_internal(dev);
2996 else
2997 switch_port_external(dev);
2998
2999
3000 init_phy_fixup(dev);
3001 check_link(dev);
3002 return 0;
3003}
3004
3005static int netdev_get_regs(struct net_device *dev, u8 *buf)
3006{
3007 int i;
3008 int j;
3009 u32 rfcr;
3010 u32 *rbuf = (u32 *)buf;
3011 void __iomem * ioaddr = ns_ioaddr(dev);
3012
3013
3014 for (i = 0; i < NATSEMI_PG0_NREGS/2; i++) {
3015 rbuf[i] = readl(ioaddr + i*4);
3016 }
3017
3018
3019 for (i = NATSEMI_PG0_NREGS/2; i < NATSEMI_PG0_NREGS; i++)
3020 rbuf[i] = mdio_read(dev, i & 0x1f);
3021
3022
3023 writew(1, ioaddr + PGSEL);
3024 rbuf[i++] = readw(ioaddr + PMDCSR);
3025 rbuf[i++] = readw(ioaddr + TSTDAT);
3026 rbuf[i++] = readw(ioaddr + DSPCFG);
3027 rbuf[i++] = readw(ioaddr + SDCFG);
3028 writew(0, ioaddr + PGSEL);
3029
3030
3031 rfcr = readl(ioaddr + RxFilterAddr);
3032 for (j = 0; j < NATSEMI_RFDR_NREGS; j++) {
3033 writel(j*2, ioaddr + RxFilterAddr);
3034 rbuf[i++] = readw(ioaddr + RxFilterData);
3035 }
3036 writel(rfcr, ioaddr + RxFilterAddr);
3037
3038
3039 if (rbuf[4] & rbuf[5]) {
3040 printk(KERN_WARNING
3041 "%s: shoot, we dropped an interrupt (%#08x)\n",
3042 dev->name, rbuf[4] & rbuf[5]);
3043 }
3044
3045 return 0;
3046}
3047
3048#define SWAP_BITS(x) ( (((x) & 0x0001) << 15) | (((x) & 0x0002) << 13) \
3049 | (((x) & 0x0004) << 11) | (((x) & 0x0008) << 9) \
3050 | (((x) & 0x0010) << 7) | (((x) & 0x0020) << 5) \
3051 | (((x) & 0x0040) << 3) | (((x) & 0x0080) << 1) \
3052 | (((x) & 0x0100) >> 1) | (((x) & 0x0200) >> 3) \
3053 | (((x) & 0x0400) >> 5) | (((x) & 0x0800) >> 7) \
3054 | (((x) & 0x1000) >> 9) | (((x) & 0x2000) >> 11) \
3055 | (((x) & 0x4000) >> 13) | (((x) & 0x8000) >> 15) )
3056
3057static int netdev_get_eeprom(struct net_device *dev, u8 *buf)
3058{
3059 int i;
3060 u16 *ebuf = (u16 *)buf;
3061 void __iomem * ioaddr = ns_ioaddr(dev);
3062 struct netdev_private *np = netdev_priv(dev);
3063
3064
3065 for (i = 0; i < np->eeprom_size/2; i++) {
3066 ebuf[i] = eeprom_read(ioaddr, i);
3067
3068
3069
3070 ebuf[i] = SWAP_BITS(ebuf[i]);
3071 }
3072 return 0;
3073}
3074
3075static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3076{
3077 struct mii_ioctl_data *data = if_mii(rq);
3078 struct netdev_private *np = netdev_priv(dev);
3079
3080 switch(cmd) {
3081 case SIOCGMIIPHY:
3082 data->phy_id = np->phy_addr_external;
3083 fallthrough;
3084
3085 case SIOCGMIIREG:
3086
3087
3088
3089
3090 if (dev->if_port == PORT_TP) {
3091 if ((data->phy_id & 0x1f) == np->phy_addr_external)
3092 data->val_out = mdio_read(dev,
3093 data->reg_num & 0x1f);
3094 else
3095 data->val_out = 0;
3096 } else {
3097 move_int_phy(dev, data->phy_id & 0x1f);
3098 data->val_out = miiport_read(dev, data->phy_id & 0x1f,
3099 data->reg_num & 0x1f);
3100 }
3101 return 0;
3102
3103 case SIOCSMIIREG:
3104 if (dev->if_port == PORT_TP) {
3105 if ((data->phy_id & 0x1f) == np->phy_addr_external) {
3106 if ((data->reg_num & 0x1f) == MII_ADVERTISE)
3107 np->advertising = data->val_in;
3108 mdio_write(dev, data->reg_num & 0x1f,
3109 data->val_in);
3110 }
3111 } else {
3112 if ((data->phy_id & 0x1f) == np->phy_addr_external) {
3113 if ((data->reg_num & 0x1f) == MII_ADVERTISE)
3114 np->advertising = data->val_in;
3115 }
3116 move_int_phy(dev, data->phy_id & 0x1f);
3117 miiport_write(dev, data->phy_id & 0x1f,
3118 data->reg_num & 0x1f,
3119 data->val_in);
3120 }
3121 return 0;
3122 default:
3123 return -EOPNOTSUPP;
3124 }
3125}
3126
3127static void enable_wol_mode(struct net_device *dev, int enable_intr)
3128{
3129 void __iomem * ioaddr = ns_ioaddr(dev);
3130 struct netdev_private *np = netdev_priv(dev);
3131
3132 if (netif_msg_wol(np))
3133 printk(KERN_INFO "%s: remaining active for wake-on-lan\n",
3134 dev->name);
3135
3136
3137
3138
3139
3140 writel(0, ioaddr + RxRingPtr);
3141
3142
3143 readl(ioaddr + WOLCmd);
3144
3145
3146 writel(np->SavedClkRun | PMEEnable | PMEStatus, ioaddr + ClkRun);
3147
3148
3149 writel(RxOn, ioaddr + ChipCmd);
3150
3151 if (enable_intr) {
3152
3153
3154
3155 writel(WOLPkt | LinkChange, ioaddr + IntrMask);
3156 natsemi_irq_enable(dev);
3157 }
3158}
3159
3160static int netdev_close(struct net_device *dev)
3161{
3162 void __iomem * ioaddr = ns_ioaddr(dev);
3163 struct netdev_private *np = netdev_priv(dev);
3164 const int irq = np->pci_dev->irq;
3165
3166 if (netif_msg_ifdown(np))
3167 printk(KERN_DEBUG
3168 "%s: Shutting down ethercard, status was %#04x.\n",
3169 dev->name, (int)readl(ioaddr + ChipCmd));
3170 if (netif_msg_pktdata(np))
3171 printk(KERN_DEBUG
3172 "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
3173 dev->name, np->cur_tx, np->dirty_tx,
3174 np->cur_rx, np->dirty_rx);
3175
3176 napi_disable(&np->napi);
3177
3178
3179
3180
3181
3182
3183
3184
3185 del_timer_sync(&np->timer);
3186 disable_irq(irq);
3187 spin_lock_irq(&np->lock);
3188 natsemi_irq_disable(dev);
3189 np->hands_off = 1;
3190 spin_unlock_irq(&np->lock);
3191 enable_irq(irq);
3192
3193 free_irq(irq, dev);
3194
3195
3196
3197
3198
3199 spin_lock_irq(&np->lock);
3200 np->hands_off = 0;
3201 readl(ioaddr + IntrMask);
3202 readw(ioaddr + MIntrStatus);
3203
3204
3205 writel(StatsFreeze, ioaddr + StatsCtrl);
3206
3207
3208 natsemi_stop_rxtx(dev);
3209
3210 __get_stats(dev);
3211 spin_unlock_irq(&np->lock);
3212
3213
3214 netif_carrier_off(dev);
3215 netif_stop_queue(dev);
3216
3217 dump_ring(dev);
3218 drain_ring(dev);
3219 free_ring(dev);
3220
3221 {
3222 u32 wol = readl(ioaddr + WOLCmd) & WakeOptsSummary;
3223 if (wol) {
3224
3225
3226
3227 enable_wol_mode(dev, 0);
3228 } else {
3229
3230 writel(np->SavedClkRun, ioaddr + ClkRun);
3231 }
3232 }
3233 return 0;
3234}
3235
3236
3237static void natsemi_remove1(struct pci_dev *pdev)
3238{
3239 struct net_device *dev = pci_get_drvdata(pdev);
3240 void __iomem * ioaddr = ns_ioaddr(dev);
3241
3242 NATSEMI_REMOVE_FILE(pdev, dspcfg_workaround);
3243 unregister_netdev (dev);
3244 pci_release_regions (pdev);
3245 iounmap(ioaddr);
3246 free_netdev (dev);
3247}
3248
3249
3250
3251
3252
3253
3254
3255
3256
3257
3258
3259
3260
3261
3262
3263
3264
3265
3266
3267
3268
3269
3270
3271
3272
3273
3274
3275static int __maybe_unused natsemi_suspend(struct device *dev_d)
3276{
3277 struct net_device *dev = dev_get_drvdata(dev_d);
3278 struct netdev_private *np = netdev_priv(dev);
3279 void __iomem * ioaddr = ns_ioaddr(dev);
3280
3281 rtnl_lock();
3282 if (netif_running (dev)) {
3283 const int irq = np->pci_dev->irq;
3284
3285 del_timer_sync(&np->timer);
3286
3287 disable_irq(irq);
3288 spin_lock_irq(&np->lock);
3289
3290 natsemi_irq_disable(dev);
3291 np->hands_off = 1;
3292 natsemi_stop_rxtx(dev);
3293 netif_stop_queue(dev);
3294
3295 spin_unlock_irq(&np->lock);
3296 enable_irq(irq);
3297
3298 napi_disable(&np->napi);
3299
3300
3301 __get_stats(dev);
3302
3303
3304 drain_ring(dev);
3305 {
3306 u32 wol = readl(ioaddr + WOLCmd) & WakeOptsSummary;
3307
3308 if (wol) {
3309
3310
3311
3312
3313 enable_wol_mode(dev, 0);
3314 } else {
3315
3316 writel(np->SavedClkRun, ioaddr + ClkRun);
3317 }
3318 }
3319 }
3320 netif_device_detach(dev);
3321 rtnl_unlock();
3322 return 0;
3323}
3324
3325
3326static int __maybe_unused natsemi_resume(struct device *dev_d)
3327{
3328 struct net_device *dev = dev_get_drvdata(dev_d);
3329 struct netdev_private *np = netdev_priv(dev);
3330
3331 rtnl_lock();
3332 if (netif_device_present(dev))
3333 goto out;
3334 if (netif_running(dev)) {
3335 const int irq = np->pci_dev->irq;
3336
3337 BUG_ON(!np->hands_off);
3338
3339
3340 napi_enable(&np->napi);
3341
3342 natsemi_reset(dev);
3343 init_ring(dev);
3344 disable_irq(irq);
3345 spin_lock_irq(&np->lock);
3346 np->hands_off = 0;
3347 init_registers(dev);
3348 netif_device_attach(dev);
3349 spin_unlock_irq(&np->lock);
3350 enable_irq(irq);
3351
3352 mod_timer(&np->timer, round_jiffies(jiffies + 1*HZ));
3353 }
3354 netif_device_attach(dev);
3355out:
3356 rtnl_unlock();
3357 return 0;
3358}
3359
3360static SIMPLE_DEV_PM_OPS(natsemi_pm_ops, natsemi_suspend, natsemi_resume);
3361
3362static struct pci_driver natsemi_driver = {
3363 .name = DRV_NAME,
3364 .id_table = natsemi_pci_tbl,
3365 .probe = natsemi_probe1,
3366 .remove = natsemi_remove1,
3367 .driver.pm = &natsemi_pm_ops,
3368};
3369
3370static int __init natsemi_init_mod (void)
3371{
3372
3373#ifdef MODULE
3374 printk(version);
3375#endif
3376
3377 return pci_register_driver(&natsemi_driver);
3378}
3379
3380static void __exit natsemi_exit_mod (void)
3381{
3382 pci_unregister_driver (&natsemi_driver);
3383}
3384
3385module_init(natsemi_init_mod);
3386module_exit(natsemi_exit_mod);
3387
3388