1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30#include <linux/module.h>
31#include <linux/kernel.h>
32#include <linux/string.h>
33#include <linux/timer.h>
34#include <linux/errno.h>
35#include <linux/ioport.h>
36#include <linux/slab.h>
37#include <linux/interrupt.h>
38#include <linux/pci.h>
39#include <linux/netdevice.h>
40#include <linux/etherdevice.h>
41#include <linux/skbuff.h>
42#include <linux/init.h>
43#include <linux/spinlock.h>
44#include <linux/ethtool.h>
45#include <linux/delay.h>
46#include <linux/rtnetlink.h>
47#include <linux/mii.h>
48#include <linux/crc32.h>
49#include <linux/bitops.h>
50#include <linux/prefetch.h>
51#include <asm/processor.h>
52#include <asm/io.h>
53#include <asm/irq.h>
54#include <linux/uaccess.h>
55
56#define DRV_NAME "natsemi"
57#define DRV_VERSION "2.1"
58#define DRV_RELDATE "Sept 11, 2006"
59
60#define RX_OFFSET 2
61
62
63
64
65
66
67#define NATSEMI_DEF_MSG (NETIF_MSG_DRV | \
68 NETIF_MSG_LINK | \
69 NETIF_MSG_WOL | \
70 NETIF_MSG_RX_ERR | \
71 NETIF_MSG_TX_ERR)
72static int debug = -1;
73
74static int mtu;
75
76
77
78static const int multicast_filter_limit = 100;
79
80
81
82static int rx_copybreak;
83
84static int dspcfg_workaround = 1;
85
86
87
88
89
90
91#define MAX_UNITS 8
92static int options[MAX_UNITS];
93static int full_duplex[MAX_UNITS];
94
95
96
97
98
99
100
101
102#define TX_RING_SIZE 16
103#define TX_QUEUE_LEN 10
104#define RX_RING_SIZE 32
105
106
107
108#define TX_TIMEOUT (2*HZ)
109
110#define NATSEMI_HW_TIMEOUT 400
111#define NATSEMI_TIMER_FREQ 5*HZ
112#define NATSEMI_PG0_NREGS 64
113#define NATSEMI_RFDR_NREGS 8
114#define NATSEMI_PG1_NREGS 4
115#define NATSEMI_NREGS (NATSEMI_PG0_NREGS + NATSEMI_RFDR_NREGS + \
116 NATSEMI_PG1_NREGS)
117#define NATSEMI_REGS_VER 1
118#define NATSEMI_REGS_SIZE (NATSEMI_NREGS * sizeof(u32))
119
120
121
122
123
124#define NATSEMI_HEADERS 22
125#define NATSEMI_PADDING 16
126#define NATSEMI_LONGPKT 1518
127#define NATSEMI_RX_LIMIT 2046
128
129
130static const char version[] =
131 KERN_INFO DRV_NAME " dp8381x driver, version "
132 DRV_VERSION ", " DRV_RELDATE "\n"
133 " originally by Donald Becker <becker@scyld.com>\n"
134 " 2.4.x kernel port by Jeff Garzik, Tjeerd Mulder\n";
135
136MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
137MODULE_DESCRIPTION("National Semiconductor DP8381x series PCI Ethernet driver");
138MODULE_LICENSE("GPL");
139
140module_param(mtu, int, 0);
141module_param(debug, int, 0);
142module_param(rx_copybreak, int, 0);
143module_param(dspcfg_workaround, int, 0);
144module_param_array(options, int, NULL, 0);
145module_param_array(full_duplex, int, NULL, 0);
146MODULE_PARM_DESC(mtu, "DP8381x MTU (all boards)");
147MODULE_PARM_DESC(debug, "DP8381x default debug level");
148MODULE_PARM_DESC(rx_copybreak,
149 "DP8381x copy breakpoint for copy-only-tiny-frames");
150MODULE_PARM_DESC(dspcfg_workaround, "DP8381x: control DspCfg workaround");
151MODULE_PARM_DESC(options,
152 "DP8381x: Bits 0-3: media type, bit 17: full duplex");
153MODULE_PARM_DESC(full_duplex, "DP8381x full duplex setting(s) (1)");
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228#define PHYID_AM79C874 0x0022561b
229
230enum {
231 MII_MCTRL = 0x15,
232 MII_FX_SEL = 0x0001,
233 MII_EN_SCRM = 0x0004,
234};
235
236enum {
237 NATSEMI_FLAG_IGNORE_PHY = 0x1,
238};
239
240
241static struct {
242 const char *name;
243 unsigned long flags;
244 unsigned int eeprom_size;
245} natsemi_pci_info[] = {
246 { "Aculab E1/T1 PMXc cPCI carrier card", NATSEMI_FLAG_IGNORE_PHY, 128 },
247 { "NatSemi DP8381[56]", 0, 24 },
248};
249
250static const struct pci_device_id natsemi_pci_tbl[] = {
251 { PCI_VENDOR_ID_NS, 0x0020, 0x12d9, 0x000c, 0, 0, 0 },
252 { PCI_VENDOR_ID_NS, 0x0020, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
253 { }
254};
255MODULE_DEVICE_TABLE(pci, natsemi_pci_tbl);
256
257
258
259
260
261
262enum register_offsets {
263 ChipCmd = 0x00,
264 ChipConfig = 0x04,
265 EECtrl = 0x08,
266 PCIBusCfg = 0x0C,
267 IntrStatus = 0x10,
268 IntrMask = 0x14,
269 IntrEnable = 0x18,
270 IntrHoldoff = 0x1C,
271 TxRingPtr = 0x20,
272 TxConfig = 0x24,
273 RxRingPtr = 0x30,
274 RxConfig = 0x34,
275 ClkRun = 0x3C,
276 WOLCmd = 0x40,
277 PauseCmd = 0x44,
278 RxFilterAddr = 0x48,
279 RxFilterData = 0x4C,
280 BootRomAddr = 0x50,
281 BootRomData = 0x54,
282 SiliconRev = 0x58,
283 StatsCtrl = 0x5C,
284 StatsData = 0x60,
285 RxPktErrs = 0x60,
286 RxMissed = 0x68,
287 RxCRCErrs = 0x64,
288 BasicControl = 0x80,
289 BasicStatus = 0x84,
290 AnegAdv = 0x90,
291 AnegPeer = 0x94,
292 PhyStatus = 0xC0,
293 MIntrCtrl = 0xC4,
294 MIntrStatus = 0xC8,
295 PhyCtrl = 0xE4,
296
297
298
299 PGSEL = 0xCC,
300 PMDCSR = 0xE4,
301 TSTDAT = 0xFC,
302 DSPCFG = 0xF4,
303 SDCFG = 0xF8
304};
305
306#define PMDCSR_VAL 0x189c
307#define TSTDAT_VAL 0x0
308#define DSPCFG_VAL 0x5040
309#define SDCFG_VAL 0x008c
310#define DSPCFG_LOCK 0x20
311#define DSPCFG_COEF 0x1000
312#define TSTDAT_FIXED 0xe8
313
314
315enum pci_register_offsets {
316 PCIPM = 0x44,
317};
318
319enum ChipCmd_bits {
320 ChipReset = 0x100,
321 RxReset = 0x20,
322 TxReset = 0x10,
323 RxOff = 0x08,
324 RxOn = 0x04,
325 TxOff = 0x02,
326 TxOn = 0x01,
327};
328
329enum ChipConfig_bits {
330 CfgPhyDis = 0x200,
331 CfgPhyRst = 0x400,
332 CfgExtPhy = 0x1000,
333 CfgAnegEnable = 0x2000,
334 CfgAneg100 = 0x4000,
335 CfgAnegFull = 0x8000,
336 CfgAnegDone = 0x8000000,
337 CfgFullDuplex = 0x20000000,
338 CfgSpeed100 = 0x40000000,
339 CfgLink = 0x80000000,
340};
341
342enum EECtrl_bits {
343 EE_ShiftClk = 0x04,
344 EE_DataIn = 0x01,
345 EE_ChipSelect = 0x08,
346 EE_DataOut = 0x02,
347 MII_Data = 0x10,
348 MII_Write = 0x20,
349 MII_ShiftClk = 0x40,
350};
351
352enum PCIBusCfg_bits {
353 EepromReload = 0x4,
354};
355
356
357enum IntrStatus_bits {
358 IntrRxDone = 0x0001,
359 IntrRxIntr = 0x0002,
360 IntrRxErr = 0x0004,
361 IntrRxEarly = 0x0008,
362 IntrRxIdle = 0x0010,
363 IntrRxOverrun = 0x0020,
364 IntrTxDone = 0x0040,
365 IntrTxIntr = 0x0080,
366 IntrTxErr = 0x0100,
367 IntrTxIdle = 0x0200,
368 IntrTxUnderrun = 0x0400,
369 StatsMax = 0x0800,
370 SWInt = 0x1000,
371 WOLPkt = 0x2000,
372 LinkChange = 0x4000,
373 IntrHighBits = 0x8000,
374 RxStatusFIFOOver = 0x10000,
375 IntrPCIErr = 0xf00000,
376 RxResetDone = 0x1000000,
377 TxResetDone = 0x2000000,
378 IntrAbnormalSummary = 0xCD20,
379};
380
381
382
383
384
385
386
387
388
389
390#define DEFAULT_INTR 0x00f1cd65
391
392enum TxConfig_bits {
393 TxDrthMask = 0x3f,
394 TxFlthMask = 0x3f00,
395 TxMxdmaMask = 0x700000,
396 TxMxdma_512 = 0x0,
397 TxMxdma_4 = 0x100000,
398 TxMxdma_8 = 0x200000,
399 TxMxdma_16 = 0x300000,
400 TxMxdma_32 = 0x400000,
401 TxMxdma_64 = 0x500000,
402 TxMxdma_128 = 0x600000,
403 TxMxdma_256 = 0x700000,
404 TxCollRetry = 0x800000,
405 TxAutoPad = 0x10000000,
406 TxMacLoop = 0x20000000,
407 TxHeartIgn = 0x40000000,
408 TxCarrierIgn = 0x80000000
409};
410
411
412
413
414
415
416
417
418
419
420
421
422#define TX_FLTH_VAL ((512/32) << 8)
423#define TX_DRTH_VAL_START (64/32)
424#define TX_DRTH_VAL_INC 2
425#define TX_DRTH_VAL_LIMIT (1472/32)
426
427enum RxConfig_bits {
428 RxDrthMask = 0x3e,
429 RxMxdmaMask = 0x700000,
430 RxMxdma_512 = 0x0,
431 RxMxdma_4 = 0x100000,
432 RxMxdma_8 = 0x200000,
433 RxMxdma_16 = 0x300000,
434 RxMxdma_32 = 0x400000,
435 RxMxdma_64 = 0x500000,
436 RxMxdma_128 = 0x600000,
437 RxMxdma_256 = 0x700000,
438 RxAcceptLong = 0x8000000,
439 RxAcceptTx = 0x10000000,
440 RxAcceptRunt = 0x40000000,
441 RxAcceptErr = 0x80000000
442};
443#define RX_DRTH_VAL (128/8)
444
445enum ClkRun_bits {
446 PMEEnable = 0x100,
447 PMEStatus = 0x8000,
448};
449
450enum WolCmd_bits {
451 WakePhy = 0x1,
452 WakeUnicast = 0x2,
453 WakeMulticast = 0x4,
454 WakeBroadcast = 0x8,
455 WakeArp = 0x10,
456 WakePMatch0 = 0x20,
457 WakePMatch1 = 0x40,
458 WakePMatch2 = 0x80,
459 WakePMatch3 = 0x100,
460 WakeMagic = 0x200,
461 WakeMagicSecure = 0x400,
462 SecureHack = 0x100000,
463 WokePhy = 0x400000,
464 WokeUnicast = 0x800000,
465 WokeMulticast = 0x1000000,
466 WokeBroadcast = 0x2000000,
467 WokeArp = 0x4000000,
468 WokePMatch0 = 0x8000000,
469 WokePMatch1 = 0x10000000,
470 WokePMatch2 = 0x20000000,
471 WokePMatch3 = 0x40000000,
472 WokeMagic = 0x80000000,
473 WakeOptsSummary = 0x7ff
474};
475
476enum RxFilterAddr_bits {
477 RFCRAddressMask = 0x3ff,
478 AcceptMulticast = 0x00200000,
479 AcceptMyPhys = 0x08000000,
480 AcceptAllPhys = 0x10000000,
481 AcceptAllMulticast = 0x20000000,
482 AcceptBroadcast = 0x40000000,
483 RxFilterEnable = 0x80000000
484};
485
486enum StatsCtrl_bits {
487 StatsWarn = 0x1,
488 StatsFreeze = 0x2,
489 StatsClear = 0x4,
490 StatsStrobe = 0x8,
491};
492
493enum MIntrCtrl_bits {
494 MICRIntEn = 0x2,
495};
496
497enum PhyCtrl_bits {
498 PhyAddrMask = 0x1f,
499};
500
501#define PHY_ADDR_NONE 32
502#define PHY_ADDR_INTERNAL 1
503
504
505#define SRR_DP83815_C 0x0302
506#define SRR_DP83815_D 0x0403
507#define SRR_DP83816_A4 0x0504
508#define SRR_DP83816_A5 0x0505
509
510
511
512
513struct netdev_desc {
514 __le32 next_desc;
515 __le32 cmd_status;
516 __le32 addr;
517 __le32 software_use;
518};
519
520
521enum desc_status_bits {
522 DescOwn=0x80000000, DescMore=0x40000000, DescIntr=0x20000000,
523 DescNoCRC=0x10000000, DescPktOK=0x08000000,
524 DescSizeMask=0xfff,
525
526 DescTxAbort=0x04000000, DescTxFIFO=0x02000000,
527 DescTxCarrier=0x01000000, DescTxDefer=0x00800000,
528 DescTxExcDefer=0x00400000, DescTxOOWCol=0x00200000,
529 DescTxExcColl=0x00100000, DescTxCollCount=0x000f0000,
530
531 DescRxAbort=0x04000000, DescRxOver=0x02000000,
532 DescRxDest=0x01800000, DescRxLong=0x00400000,
533 DescRxRunt=0x00200000, DescRxInvalid=0x00100000,
534 DescRxCRC=0x00080000, DescRxAlign=0x00040000,
535 DescRxLoop=0x00020000, DesRxColl=0x00010000,
536};
537
538struct netdev_private {
539
540 dma_addr_t ring_dma;
541 struct netdev_desc *rx_ring;
542 struct netdev_desc *tx_ring;
543
544 struct sk_buff *rx_skbuff[RX_RING_SIZE];
545 dma_addr_t rx_dma[RX_RING_SIZE];
546
547 struct sk_buff *tx_skbuff[TX_RING_SIZE];
548 dma_addr_t tx_dma[TX_RING_SIZE];
549 struct net_device *dev;
550 void __iomem *ioaddr;
551 struct napi_struct napi;
552
553 struct timer_list timer;
554
555 struct pci_dev *pci_dev;
556 struct netdev_desc *rx_head_desc;
557
558 unsigned int cur_rx, dirty_rx;
559 unsigned int cur_tx, dirty_tx;
560
561 unsigned int rx_buf_sz;
562 int oom;
563
564 u32 intr_status;
565
566 int hands_off;
567
568 int ignore_phy;
569
570 int mii;
571 int phy_addr_external;
572 unsigned int full_duplex;
573
574 u32 cur_rx_mode;
575 u32 rx_filter[16];
576
577 u32 tx_config, rx_config;
578
579 u32 SavedClkRun;
580
581 u32 srr;
582
583 u16 dspcfg;
584 int dspcfg_workaround;
585
586 u16 speed;
587 u8 duplex;
588 u8 autoneg;
589
590 u16 advertising;
591 unsigned int iosize;
592 spinlock_t lock;
593 u32 msg_enable;
594
595 int eeprom_size;
596};
597
598static void move_int_phy(struct net_device *dev, int addr);
599static int eeprom_read(void __iomem *ioaddr, int location);
600static int mdio_read(struct net_device *dev, int reg);
601static void mdio_write(struct net_device *dev, int reg, u16 data);
602static void init_phy_fixup(struct net_device *dev);
603static int miiport_read(struct net_device *dev, int phy_id, int reg);
604static void miiport_write(struct net_device *dev, int phy_id, int reg, u16 data);
605static int find_mii(struct net_device *dev);
606static void natsemi_reset(struct net_device *dev);
607static void natsemi_reload_eeprom(struct net_device *dev);
608static void natsemi_stop_rxtx(struct net_device *dev);
609static int netdev_open(struct net_device *dev);
610static void do_cable_magic(struct net_device *dev);
611static void undo_cable_magic(struct net_device *dev);
612static void check_link(struct net_device *dev);
613static void netdev_timer(struct timer_list *t);
614static void dump_ring(struct net_device *dev);
615static void ns_tx_timeout(struct net_device *dev, unsigned int txqueue);
616static int alloc_ring(struct net_device *dev);
617static void refill_rx(struct net_device *dev);
618static void init_ring(struct net_device *dev);
619static void drain_tx(struct net_device *dev);
620static void drain_ring(struct net_device *dev);
621static void free_ring(struct net_device *dev);
622static void reinit_ring(struct net_device *dev);
623static void init_registers(struct net_device *dev);
624static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
625static irqreturn_t intr_handler(int irq, void *dev_instance);
626static void netdev_error(struct net_device *dev, int intr_status);
627static int natsemi_poll(struct napi_struct *napi, int budget);
628static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do);
629static void netdev_tx_done(struct net_device *dev);
630static int natsemi_change_mtu(struct net_device *dev, int new_mtu);
631#ifdef CONFIG_NET_POLL_CONTROLLER
632static void natsemi_poll_controller(struct net_device *dev);
633#endif
634static void __set_rx_mode(struct net_device *dev);
635static void set_rx_mode(struct net_device *dev);
636static void __get_stats(struct net_device *dev);
637static struct net_device_stats *get_stats(struct net_device *dev);
638static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
639static int netdev_set_wol(struct net_device *dev, u32 newval);
640static int netdev_get_wol(struct net_device *dev, u32 *supported, u32 *cur);
641static int netdev_set_sopass(struct net_device *dev, u8 *newval);
642static int netdev_get_sopass(struct net_device *dev, u8 *data);
643static int netdev_get_ecmd(struct net_device *dev,
644 struct ethtool_link_ksettings *ecmd);
645static int netdev_set_ecmd(struct net_device *dev,
646 const struct ethtool_link_ksettings *ecmd);
647static void enable_wol_mode(struct net_device *dev, int enable_intr);
648static int netdev_close(struct net_device *dev);
649static int netdev_get_regs(struct net_device *dev, u8 *buf);
650static int netdev_get_eeprom(struct net_device *dev, u8 *buf);
651static const struct ethtool_ops ethtool_ops;
652
653#define NATSEMI_ATTR(_name) \
654static ssize_t natsemi_show_##_name(struct device *dev, \
655 struct device_attribute *attr, char *buf); \
656 static ssize_t natsemi_set_##_name(struct device *dev, \
657 struct device_attribute *attr, \
658 const char *buf, size_t count); \
659 static DEVICE_ATTR(_name, 0644, natsemi_show_##_name, natsemi_set_##_name)
660
661#define NATSEMI_CREATE_FILE(_dev, _name) \
662 device_create_file(&_dev->dev, &dev_attr_##_name)
663#define NATSEMI_REMOVE_FILE(_dev, _name) \
664 device_remove_file(&_dev->dev, &dev_attr_##_name)
665
666NATSEMI_ATTR(dspcfg_workaround);
667
668static ssize_t natsemi_show_dspcfg_workaround(struct device *dev,
669 struct device_attribute *attr,
670 char *buf)
671{
672 struct netdev_private *np = netdev_priv(to_net_dev(dev));
673
674 return sprintf(buf, "%s\n", np->dspcfg_workaround ? "on" : "off");
675}
676
677static ssize_t natsemi_set_dspcfg_workaround(struct device *dev,
678 struct device_attribute *attr,
679 const char *buf, size_t count)
680{
681 struct netdev_private *np = netdev_priv(to_net_dev(dev));
682 int new_setting;
683 unsigned long flags;
684
685
686 if (!strncmp("on", buf, count - 1) || !strncmp("1", buf, count - 1))
687 new_setting = 1;
688 else if (!strncmp("off", buf, count - 1) ||
689 !strncmp("0", buf, count - 1))
690 new_setting = 0;
691 else
692 return count;
693
694 spin_lock_irqsave(&np->lock, flags);
695
696 np->dspcfg_workaround = new_setting;
697
698 spin_unlock_irqrestore(&np->lock, flags);
699
700 return count;
701}
702
703static inline void __iomem *ns_ioaddr(struct net_device *dev)
704{
705 struct netdev_private *np = netdev_priv(dev);
706
707 return np->ioaddr;
708}
709
710static inline void natsemi_irq_enable(struct net_device *dev)
711{
712 writel(1, ns_ioaddr(dev) + IntrEnable);
713 readl(ns_ioaddr(dev) + IntrEnable);
714}
715
716static inline void natsemi_irq_disable(struct net_device *dev)
717{
718 writel(0, ns_ioaddr(dev) + IntrEnable);
719 readl(ns_ioaddr(dev) + IntrEnable);
720}
721
722static void move_int_phy(struct net_device *dev, int addr)
723{
724 struct netdev_private *np = netdev_priv(dev);
725 void __iomem *ioaddr = ns_ioaddr(dev);
726 int target = 31;
727
728
729
730
731
732
733
734
735
736
737
738 if (target == addr)
739 target--;
740 if (target == np->phy_addr_external)
741 target--;
742 writew(target, ioaddr + PhyCtrl);
743 readw(ioaddr + PhyCtrl);
744 udelay(1);
745}
746
747static void natsemi_init_media(struct net_device *dev)
748{
749 struct netdev_private *np = netdev_priv(dev);
750 u32 tmp;
751
752 if (np->ignore_phy)
753 netif_carrier_on(dev);
754 else
755 netif_carrier_off(dev);
756
757
758 tmp = mdio_read(dev, MII_BMCR);
759 np->speed = (tmp & BMCR_SPEED100)? SPEED_100 : SPEED_10;
760 np->duplex = (tmp & BMCR_FULLDPLX)? DUPLEX_FULL : DUPLEX_HALF;
761 np->autoneg = (tmp & BMCR_ANENABLE)? AUTONEG_ENABLE: AUTONEG_DISABLE;
762 np->advertising= mdio_read(dev, MII_ADVERTISE);
763
764 if ((np->advertising & ADVERTISE_ALL) != ADVERTISE_ALL &&
765 netif_msg_probe(np)) {
766 printk(KERN_INFO "natsemi %s: Transceiver default autonegotiation %s "
767 "10%s %s duplex.\n",
768 pci_name(np->pci_dev),
769 (mdio_read(dev, MII_BMCR) & BMCR_ANENABLE)?
770 "enabled, advertise" : "disabled, force",
771 (np->advertising &
772 (ADVERTISE_100FULL|ADVERTISE_100HALF))?
773 "0" : "",
774 (np->advertising &
775 (ADVERTISE_100FULL|ADVERTISE_10FULL))?
776 "full" : "half");
777 }
778 if (netif_msg_probe(np))
779 printk(KERN_INFO
780 "natsemi %s: Transceiver status %#04x advertising %#04x.\n",
781 pci_name(np->pci_dev), mdio_read(dev, MII_BMSR),
782 np->advertising);
783
784}
785
786static const struct net_device_ops natsemi_netdev_ops = {
787 .ndo_open = netdev_open,
788 .ndo_stop = netdev_close,
789 .ndo_start_xmit = start_tx,
790 .ndo_get_stats = get_stats,
791 .ndo_set_rx_mode = set_rx_mode,
792 .ndo_change_mtu = natsemi_change_mtu,
793 .ndo_eth_ioctl = netdev_ioctl,
794 .ndo_tx_timeout = ns_tx_timeout,
795 .ndo_set_mac_address = eth_mac_addr,
796 .ndo_validate_addr = eth_validate_addr,
797#ifdef CONFIG_NET_POLL_CONTROLLER
798 .ndo_poll_controller = natsemi_poll_controller,
799#endif
800};
801
802static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
803{
804 struct net_device *dev;
805 struct netdev_private *np;
806 int i, option, irq, chip_idx = ent->driver_data;
807 static int find_cnt = -1;
808 resource_size_t iostart;
809 unsigned long iosize;
810 void __iomem *ioaddr;
811 const int pcibar = 1;
812 u8 addr[ETH_ALEN];
813 int prev_eedata;
814 u32 tmp;
815
816
817#ifndef MODULE
818 static int printed_version;
819 if (!printed_version++)
820 printk(version);
821#endif
822
823 i = pcim_enable_device(pdev);
824 if (i) return i;
825
826
827
828
829
830 pci_read_config_dword(pdev, PCIPM, &tmp);
831 if (tmp & PCI_PM_CTRL_STATE_MASK) {
832
833 u32 newtmp = tmp & ~PCI_PM_CTRL_STATE_MASK;
834 pci_write_config_dword(pdev, PCIPM, newtmp);
835 }
836
837 find_cnt++;
838 iostart = pci_resource_start(pdev, pcibar);
839 iosize = pci_resource_len(pdev, pcibar);
840 irq = pdev->irq;
841
842 pci_set_master(pdev);
843
844 dev = alloc_etherdev(sizeof (struct netdev_private));
845 if (!dev)
846 return -ENOMEM;
847 SET_NETDEV_DEV(dev, &pdev->dev);
848
849 i = pci_request_regions(pdev, DRV_NAME);
850 if (i)
851 goto err_pci_request_regions;
852
853 ioaddr = ioremap(iostart, iosize);
854 if (!ioaddr) {
855 i = -ENOMEM;
856 goto err_pci_request_regions;
857 }
858
859
860 prev_eedata = eeprom_read(ioaddr, 6);
861 for (i = 0; i < 3; i++) {
862 int eedata = eeprom_read(ioaddr, i + 7);
863 addr[i*2] = (eedata << 1) + (prev_eedata >> 15);
864 addr[i*2+1] = eedata >> 7;
865 prev_eedata = eedata;
866 }
867 eth_hw_addr_set(dev, addr);
868
869 np = netdev_priv(dev);
870 np->ioaddr = ioaddr;
871
872 netif_napi_add(dev, &np->napi, natsemi_poll, 64);
873 np->dev = dev;
874
875 np->pci_dev = pdev;
876 pci_set_drvdata(pdev, dev);
877 np->iosize = iosize;
878 spin_lock_init(&np->lock);
879 np->msg_enable = (debug >= 0) ? (1<<debug)-1 : NATSEMI_DEF_MSG;
880 np->hands_off = 0;
881 np->intr_status = 0;
882 np->eeprom_size = natsemi_pci_info[chip_idx].eeprom_size;
883 if (natsemi_pci_info[chip_idx].flags & NATSEMI_FLAG_IGNORE_PHY)
884 np->ignore_phy = 1;
885 else
886 np->ignore_phy = 0;
887 np->dspcfg_workaround = dspcfg_workaround;
888
889
890
891
892
893
894
895
896
897
898 if (np->ignore_phy || readl(ioaddr + ChipConfig) & CfgExtPhy)
899 dev->if_port = PORT_MII;
900 else
901 dev->if_port = PORT_TP;
902
903 natsemi_reload_eeprom(dev);
904 natsemi_reset(dev);
905
906 if (dev->if_port != PORT_TP) {
907 np->phy_addr_external = find_mii(dev);
908
909
910 if (!np->ignore_phy && np->phy_addr_external == PHY_ADDR_NONE) {
911 dev->if_port = PORT_TP;
912 np->phy_addr_external = PHY_ADDR_INTERNAL;
913 }
914 } else {
915 np->phy_addr_external = PHY_ADDR_INTERNAL;
916 }
917
918 option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
919
920 if (option) {
921 if (option & 0x200)
922 np->full_duplex = 1;
923 if (option & 15)
924 printk(KERN_INFO
925 "natsemi %s: ignoring user supplied media type %d",
926 pci_name(np->pci_dev), option & 15);
927 }
928 if (find_cnt < MAX_UNITS && full_duplex[find_cnt])
929 np->full_duplex = 1;
930
931 dev->netdev_ops = &natsemi_netdev_ops;
932 dev->watchdog_timeo = TX_TIMEOUT;
933
934 dev->ethtool_ops = ðtool_ops;
935
936
937 dev->min_mtu = ETH_ZLEN + ETH_FCS_LEN;
938 dev->max_mtu = NATSEMI_RX_LIMIT - NATSEMI_HEADERS;
939
940 if (mtu)
941 dev->mtu = mtu;
942
943 natsemi_init_media(dev);
944
945
946 np->srr = readl(ioaddr + SiliconRev);
947 if (netif_msg_hw(np))
948 printk(KERN_INFO "natsemi %s: silicon revision %#04x.\n",
949 pci_name(np->pci_dev), np->srr);
950
951 i = register_netdev(dev);
952 if (i)
953 goto err_register_netdev;
954 i = NATSEMI_CREATE_FILE(pdev, dspcfg_workaround);
955 if (i)
956 goto err_create_file;
957
958 if (netif_msg_drv(np)) {
959 printk(KERN_INFO "natsemi %s: %s at %#08llx "
960 "(%s), %pM, IRQ %d",
961 dev->name, natsemi_pci_info[chip_idx].name,
962 (unsigned long long)iostart, pci_name(np->pci_dev),
963 dev->dev_addr, irq);
964 if (dev->if_port == PORT_TP)
965 printk(", port TP.\n");
966 else if (np->ignore_phy)
967 printk(", port MII, ignoring PHY\n");
968 else
969 printk(", port MII, phy ad %d.\n", np->phy_addr_external);
970 }
971 return 0;
972
973 err_create_file:
974 unregister_netdev(dev);
975
976 err_register_netdev:
977 iounmap(ioaddr);
978
979 err_pci_request_regions:
980 free_netdev(dev);
981 return i;
982}
983
984
985
986
987
988
989
990
991
992
993
994
995#define eeprom_delay(ee_addr) readl(ee_addr)
996
997#define EE_Write0 (EE_ChipSelect)
998#define EE_Write1 (EE_ChipSelect | EE_DataIn)
999
1000
1001enum EEPROM_Cmds {
1002 EE_WriteCmd=(5 << 6), EE_ReadCmd=(6 << 6), EE_EraseCmd=(7 << 6),
1003};
1004
1005static int eeprom_read(void __iomem *addr, int location)
1006{
1007 int i;
1008 int retval = 0;
1009 void __iomem *ee_addr = addr + EECtrl;
1010 int read_cmd = location | EE_ReadCmd;
1011
1012 writel(EE_Write0, ee_addr);
1013
1014
1015 for (i = 10; i >= 0; i--) {
1016 short dataval = (read_cmd & (1 << i)) ? EE_Write1 : EE_Write0;
1017 writel(dataval, ee_addr);
1018 eeprom_delay(ee_addr);
1019 writel(dataval | EE_ShiftClk, ee_addr);
1020 eeprom_delay(ee_addr);
1021 }
1022 writel(EE_ChipSelect, ee_addr);
1023 eeprom_delay(ee_addr);
1024
1025 for (i = 0; i < 16; i++) {
1026 writel(EE_ChipSelect | EE_ShiftClk, ee_addr);
1027 eeprom_delay(ee_addr);
1028 retval |= (readl(ee_addr) & EE_DataOut) ? 1 << i : 0;
1029 writel(EE_ChipSelect, ee_addr);
1030 eeprom_delay(ee_addr);
1031 }
1032
1033
1034 writel(EE_Write0, ee_addr);
1035 writel(0, ee_addr);
1036 return retval;
1037}
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048#define mii_delay(ioaddr) readl(ioaddr + EECtrl)
1049
1050static int mii_getbit (struct net_device *dev)
1051{
1052 int data;
1053 void __iomem *ioaddr = ns_ioaddr(dev);
1054
1055 writel(MII_ShiftClk, ioaddr + EECtrl);
1056 data = readl(ioaddr + EECtrl);
1057 writel(0, ioaddr + EECtrl);
1058 mii_delay(ioaddr);
1059 return (data & MII_Data)? 1 : 0;
1060}
1061
1062static void mii_send_bits (struct net_device *dev, u32 data, int len)
1063{
1064 u32 i;
1065 void __iomem *ioaddr = ns_ioaddr(dev);
1066
1067 for (i = (1 << (len-1)); i; i >>= 1)
1068 {
1069 u32 mdio_val = MII_Write | ((data & i)? MII_Data : 0);
1070 writel(mdio_val, ioaddr + EECtrl);
1071 mii_delay(ioaddr);
1072 writel(mdio_val | MII_ShiftClk, ioaddr + EECtrl);
1073 mii_delay(ioaddr);
1074 }
1075 writel(0, ioaddr + EECtrl);
1076 mii_delay(ioaddr);
1077}
1078
1079static int miiport_read(struct net_device *dev, int phy_id, int reg)
1080{
1081 u32 cmd;
1082 int i;
1083 u32 retval = 0;
1084
1085
1086 mii_send_bits (dev, 0xffffffff, 32);
1087
1088
1089 cmd = (0x06 << 10) | (phy_id << 5) | reg;
1090 mii_send_bits (dev, cmd, 14);
1091
1092 if (mii_getbit (dev))
1093 return 0;
1094
1095 for (i = 0; i < 16; i++) {
1096 retval <<= 1;
1097 retval |= mii_getbit (dev);
1098 }
1099
1100 mii_getbit (dev);
1101 return retval;
1102}
1103
1104static void miiport_write(struct net_device *dev, int phy_id, int reg, u16 data)
1105{
1106 u32 cmd;
1107
1108
1109 mii_send_bits (dev, 0xffffffff, 32);
1110
1111
1112 cmd = (0x5002 << 16) | (phy_id << 23) | (reg << 18) | data;
1113 mii_send_bits (dev, cmd, 32);
1114
1115 mii_getbit (dev);
1116}
1117
1118static int mdio_read(struct net_device *dev, int reg)
1119{
1120 struct netdev_private *np = netdev_priv(dev);
1121 void __iomem *ioaddr = ns_ioaddr(dev);
1122
1123
1124
1125
1126
1127 if (dev->if_port == PORT_TP)
1128 return readw(ioaddr+BasicControl+(reg<<2));
1129 else
1130 return miiport_read(dev, np->phy_addr_external, reg);
1131}
1132
1133static void mdio_write(struct net_device *dev, int reg, u16 data)
1134{
1135 struct netdev_private *np = netdev_priv(dev);
1136 void __iomem *ioaddr = ns_ioaddr(dev);
1137
1138
1139 if (dev->if_port == PORT_TP)
1140 writew(data, ioaddr+BasicControl+(reg<<2));
1141 else
1142 miiport_write(dev, np->phy_addr_external, reg, data);
1143}
1144
1145static void init_phy_fixup(struct net_device *dev)
1146{
1147 struct netdev_private *np = netdev_priv(dev);
1148 void __iomem *ioaddr = ns_ioaddr(dev);
1149 int i;
1150 u32 cfg;
1151 u16 tmp;
1152
1153
1154 tmp = mdio_read(dev, MII_BMCR);
1155 if (np->autoneg == AUTONEG_ENABLE) {
1156
1157 if ((tmp & BMCR_ANENABLE) == 0 ||
1158 np->advertising != mdio_read(dev, MII_ADVERTISE))
1159 {
1160
1161 tmp |= (BMCR_ANENABLE | BMCR_ANRESTART);
1162 mdio_write(dev, MII_ADVERTISE, np->advertising);
1163 }
1164 } else {
1165
1166 tmp &= ~(BMCR_ANENABLE | BMCR_SPEED100 | BMCR_FULLDPLX);
1167 if (np->speed == SPEED_100)
1168 tmp |= BMCR_SPEED100;
1169 if (np->duplex == DUPLEX_FULL)
1170 tmp |= BMCR_FULLDPLX;
1171
1172
1173
1174
1175
1176
1177
1178 }
1179 mdio_write(dev, MII_BMCR, tmp);
1180 readl(ioaddr + ChipConfig);
1181 udelay(1);
1182
1183
1184 np->mii = (mdio_read(dev, MII_PHYSID1) << 16)
1185 + mdio_read(dev, MII_PHYSID2);
1186
1187
1188 switch (np->mii) {
1189 case PHYID_AM79C874:
1190
1191 tmp = mdio_read(dev, MII_MCTRL);
1192 tmp &= ~(MII_FX_SEL | MII_EN_SCRM);
1193 if (dev->if_port == PORT_FIBRE)
1194 tmp |= MII_FX_SEL;
1195 else
1196 tmp |= MII_EN_SCRM;
1197 mdio_write(dev, MII_MCTRL, tmp);
1198 break;
1199 default:
1200 break;
1201 }
1202 cfg = readl(ioaddr + ChipConfig);
1203 if (cfg & CfgExtPhy)
1204 return;
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217 for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
1218
1219 int dspcfg;
1220 writew(1, ioaddr + PGSEL);
1221 writew(PMDCSR_VAL, ioaddr + PMDCSR);
1222 writew(TSTDAT_VAL, ioaddr + TSTDAT);
1223 np->dspcfg = (np->srr <= SRR_DP83815_C)?
1224 DSPCFG_VAL : (DSPCFG_COEF | readw(ioaddr + DSPCFG));
1225 writew(np->dspcfg, ioaddr + DSPCFG);
1226 writew(SDCFG_VAL, ioaddr + SDCFG);
1227 writew(0, ioaddr + PGSEL);
1228 readl(ioaddr + ChipConfig);
1229 udelay(10);
1230
1231 writew(1, ioaddr + PGSEL);
1232 dspcfg = readw(ioaddr + DSPCFG);
1233 writew(0, ioaddr + PGSEL);
1234 if (np->dspcfg == dspcfg)
1235 break;
1236 }
1237
1238 if (netif_msg_link(np)) {
1239 if (i==NATSEMI_HW_TIMEOUT) {
1240 printk(KERN_INFO
1241 "%s: DSPCFG mismatch after retrying for %d usec.\n",
1242 dev->name, i*10);
1243 } else {
1244 printk(KERN_INFO
1245 "%s: DSPCFG accepted after %d usec.\n",
1246 dev->name, i*10);
1247 }
1248 }
1249
1250
1251
1252
1253
1254 readw(ioaddr + MIntrStatus);
1255 writew(MICRIntEn, ioaddr + MIntrCtrl);
1256}
1257
1258static int switch_port_external(struct net_device *dev)
1259{
1260 struct netdev_private *np = netdev_priv(dev);
1261 void __iomem *ioaddr = ns_ioaddr(dev);
1262 u32 cfg;
1263
1264 cfg = readl(ioaddr + ChipConfig);
1265 if (cfg & CfgExtPhy)
1266 return 0;
1267
1268 if (netif_msg_link(np)) {
1269 printk(KERN_INFO "%s: switching to external transceiver.\n",
1270 dev->name);
1271 }
1272
1273
1274 writel(cfg | (CfgExtPhy | CfgPhyDis), ioaddr + ChipConfig);
1275 readl(ioaddr + ChipConfig);
1276 udelay(1);
1277
1278
1279
1280
1281
1282
1283
1284
1285 move_int_phy(dev, np->phy_addr_external);
1286 init_phy_fixup(dev);
1287
1288 return 1;
1289}
1290
1291static int switch_port_internal(struct net_device *dev)
1292{
1293 struct netdev_private *np = netdev_priv(dev);
1294 void __iomem *ioaddr = ns_ioaddr(dev);
1295 int i;
1296 u32 cfg;
1297 u16 bmcr;
1298
1299 cfg = readl(ioaddr + ChipConfig);
1300 if (!(cfg &CfgExtPhy))
1301 return 0;
1302
1303 if (netif_msg_link(np)) {
1304 printk(KERN_INFO "%s: switching to internal transceiver.\n",
1305 dev->name);
1306 }
1307
1308 cfg = cfg & ~(CfgExtPhy | CfgPhyDis);
1309 writel(cfg, ioaddr + ChipConfig);
1310 readl(ioaddr + ChipConfig);
1311 udelay(1);
1312
1313
1314 bmcr = readw(ioaddr+BasicControl+(MII_BMCR<<2));
1315 writel(bmcr | BMCR_RESET, ioaddr+BasicControl+(MII_BMCR<<2));
1316 readl(ioaddr + ChipConfig);
1317 udelay(10);
1318 for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
1319 bmcr = readw(ioaddr+BasicControl+(MII_BMCR<<2));
1320 if (!(bmcr & BMCR_RESET))
1321 break;
1322 udelay(10);
1323 }
1324 if (i==NATSEMI_HW_TIMEOUT && netif_msg_link(np)) {
1325 printk(KERN_INFO
1326 "%s: phy reset did not complete in %d usec.\n",
1327 dev->name, i*10);
1328 }
1329
1330 init_phy_fixup(dev);
1331
1332 return 1;
1333}
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343static int find_mii(struct net_device *dev)
1344{
1345 struct netdev_private *np = netdev_priv(dev);
1346 int tmp;
1347 int i;
1348 int did_switch;
1349
1350
1351 did_switch = switch_port_external(dev);
1352
1353
1354
1355
1356
1357
1358
1359 for (i = 1; i <= 31; i++) {
1360 move_int_phy(dev, i);
1361 tmp = miiport_read(dev, i, MII_BMSR);
1362 if (tmp != 0xffff && tmp != 0x0000) {
1363
1364 np->mii = (mdio_read(dev, MII_PHYSID1) << 16)
1365 + mdio_read(dev, MII_PHYSID2);
1366 if (netif_msg_probe(np)) {
1367 printk(KERN_INFO "natsemi %s: found external phy %08x at address %d.\n",
1368 pci_name(np->pci_dev), np->mii, i);
1369 }
1370 break;
1371 }
1372 }
1373
1374 if (did_switch)
1375 switch_port_internal(dev);
1376 return i;
1377}
1378
1379
1380#define CFG_RESET_SAVE 0xfde000
1381
1382#define WCSR_RESET_SAVE 0x61f
1383
1384#define RFCR_RESET_SAVE 0xf8500000
1385
1386static void natsemi_reset(struct net_device *dev)
1387{
1388 int i;
1389 u32 cfg;
1390 u32 wcsr;
1391 u32 rfcr;
1392 u16 pmatch[3];
1393 u16 sopass[3];
1394 struct netdev_private *np = netdev_priv(dev);
1395 void __iomem *ioaddr = ns_ioaddr(dev);
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406 cfg = readl(ioaddr + ChipConfig) & CFG_RESET_SAVE;
1407
1408 wcsr = readl(ioaddr + WOLCmd) & WCSR_RESET_SAVE;
1409
1410 rfcr = readl(ioaddr + RxFilterAddr) & RFCR_RESET_SAVE;
1411
1412 for (i = 0; i < 3; i++) {
1413 writel(i*2, ioaddr + RxFilterAddr);
1414 pmatch[i] = readw(ioaddr + RxFilterData);
1415 }
1416
1417 for (i = 0; i < 3; i++) {
1418 writel(0xa+(i*2), ioaddr + RxFilterAddr);
1419 sopass[i] = readw(ioaddr + RxFilterData);
1420 }
1421
1422
1423 writel(ChipReset, ioaddr + ChipCmd);
1424 for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
1425 if (!(readl(ioaddr + ChipCmd) & ChipReset))
1426 break;
1427 udelay(5);
1428 }
1429 if (i==NATSEMI_HW_TIMEOUT) {
1430 printk(KERN_WARNING "%s: reset did not complete in %d usec.\n",
1431 dev->name, i*5);
1432 } else if (netif_msg_hw(np)) {
1433 printk(KERN_DEBUG "%s: reset completed in %d usec.\n",
1434 dev->name, i*5);
1435 }
1436
1437
1438 cfg |= readl(ioaddr + ChipConfig) & ~CFG_RESET_SAVE;
1439
1440 if (dev->if_port == PORT_TP)
1441 cfg &= ~(CfgExtPhy | CfgPhyDis);
1442 else
1443 cfg |= (CfgExtPhy | CfgPhyDis);
1444 writel(cfg, ioaddr + ChipConfig);
1445
1446 wcsr |= readl(ioaddr + WOLCmd) & ~WCSR_RESET_SAVE;
1447 writel(wcsr, ioaddr + WOLCmd);
1448
1449 rfcr |= readl(ioaddr + RxFilterAddr) & ~RFCR_RESET_SAVE;
1450
1451 for (i = 0; i < 3; i++) {
1452 writel(i*2, ioaddr + RxFilterAddr);
1453 writew(pmatch[i], ioaddr + RxFilterData);
1454 }
1455 for (i = 0; i < 3; i++) {
1456 writel(0xa+(i*2), ioaddr + RxFilterAddr);
1457 writew(sopass[i], ioaddr + RxFilterData);
1458 }
1459
1460 writel(rfcr, ioaddr + RxFilterAddr);
1461}
1462
1463static void reset_rx(struct net_device *dev)
1464{
1465 int i;
1466 struct netdev_private *np = netdev_priv(dev);
1467 void __iomem *ioaddr = ns_ioaddr(dev);
1468
1469 np->intr_status &= ~RxResetDone;
1470
1471 writel(RxReset, ioaddr + ChipCmd);
1472
1473 for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
1474 np->intr_status |= readl(ioaddr + IntrStatus);
1475 if (np->intr_status & RxResetDone)
1476 break;
1477 udelay(15);
1478 }
1479 if (i==NATSEMI_HW_TIMEOUT) {
1480 printk(KERN_WARNING "%s: RX reset did not complete in %d usec.\n",
1481 dev->name, i*15);
1482 } else if (netif_msg_hw(np)) {
1483 printk(KERN_WARNING "%s: RX reset took %d usec.\n",
1484 dev->name, i*15);
1485 }
1486}
1487
1488static void natsemi_reload_eeprom(struct net_device *dev)
1489{
1490 struct netdev_private *np = netdev_priv(dev);
1491 void __iomem *ioaddr = ns_ioaddr(dev);
1492 int i;
1493
1494 writel(EepromReload, ioaddr + PCIBusCfg);
1495 for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
1496 udelay(50);
1497 if (!(readl(ioaddr + PCIBusCfg) & EepromReload))
1498 break;
1499 }
1500 if (i==NATSEMI_HW_TIMEOUT) {
1501 printk(KERN_WARNING "natsemi %s: EEPROM did not reload in %d usec.\n",
1502 pci_name(np->pci_dev), i*50);
1503 } else if (netif_msg_hw(np)) {
1504 printk(KERN_DEBUG "natsemi %s: EEPROM reloaded in %d usec.\n",
1505 pci_name(np->pci_dev), i*50);
1506 }
1507}
1508
1509static void natsemi_stop_rxtx(struct net_device *dev)
1510{
1511 void __iomem * ioaddr = ns_ioaddr(dev);
1512 struct netdev_private *np = netdev_priv(dev);
1513 int i;
1514
1515 writel(RxOff | TxOff, ioaddr + ChipCmd);
1516 for(i=0;i< NATSEMI_HW_TIMEOUT;i++) {
1517 if ((readl(ioaddr + ChipCmd) & (TxOn|RxOn)) == 0)
1518 break;
1519 udelay(5);
1520 }
1521 if (i==NATSEMI_HW_TIMEOUT) {
1522 printk(KERN_WARNING "%s: Tx/Rx process did not stop in %d usec.\n",
1523 dev->name, i*5);
1524 } else if (netif_msg_hw(np)) {
1525 printk(KERN_DEBUG "%s: Tx/Rx process stopped in %d usec.\n",
1526 dev->name, i*5);
1527 }
1528}
1529
1530static int netdev_open(struct net_device *dev)
1531{
1532 struct netdev_private *np = netdev_priv(dev);
1533 void __iomem * ioaddr = ns_ioaddr(dev);
1534 const int irq = np->pci_dev->irq;
1535 int i;
1536
1537
1538 natsemi_reset(dev);
1539
1540 i = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
1541 if (i) return i;
1542
1543 if (netif_msg_ifup(np))
1544 printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
1545 dev->name, irq);
1546 i = alloc_ring(dev);
1547 if (i < 0) {
1548 free_irq(irq, dev);
1549 return i;
1550 }
1551 napi_enable(&np->napi);
1552
1553 init_ring(dev);
1554 spin_lock_irq(&np->lock);
1555 init_registers(dev);
1556
1557 for (i = 0; i < 3; i++) {
1558 u16 mac = (dev->dev_addr[2*i+1]<<8) + dev->dev_addr[2*i];
1559
1560 writel(i*2, ioaddr + RxFilterAddr);
1561 writew(mac, ioaddr + RxFilterData);
1562 }
1563 writel(np->cur_rx_mode, ioaddr + RxFilterAddr);
1564 spin_unlock_irq(&np->lock);
1565
1566 netif_start_queue(dev);
1567
1568 if (netif_msg_ifup(np))
1569 printk(KERN_DEBUG "%s: Done netdev_open(), status: %#08x.\n",
1570 dev->name, (int)readl(ioaddr + ChipCmd));
1571
1572
1573 timer_setup(&np->timer, netdev_timer, 0);
1574 np->timer.expires = round_jiffies(jiffies + NATSEMI_TIMER_FREQ);
1575 add_timer(&np->timer);
1576
1577 return 0;
1578}
1579
1580static void do_cable_magic(struct net_device *dev)
1581{
1582 struct netdev_private *np = netdev_priv(dev);
1583 void __iomem *ioaddr = ns_ioaddr(dev);
1584
1585 if (dev->if_port != PORT_TP)
1586 return;
1587
1588 if (np->srr >= SRR_DP83816_A5)
1589 return;
1590
1591
1592
1593
1594
1595
1596
1597 if (readl(ioaddr + ChipConfig) & CfgSpeed100) {
1598 u16 data;
1599
1600 writew(1, ioaddr + PGSEL);
1601
1602
1603
1604
1605 data = readw(ioaddr + TSTDAT) & 0xff;
1606
1607
1608
1609
1610 if (!(data & 0x80) || ((data >= 0xd8) && (data <= 0xff))) {
1611 np = netdev_priv(dev);
1612
1613
1614 writew(TSTDAT_FIXED, ioaddr + TSTDAT);
1615
1616 data = readw(ioaddr + DSPCFG);
1617 np->dspcfg = data | DSPCFG_LOCK;
1618 writew(np->dspcfg, ioaddr + DSPCFG);
1619 }
1620 writew(0, ioaddr + PGSEL);
1621 }
1622}
1623
1624static void undo_cable_magic(struct net_device *dev)
1625{
1626 u16 data;
1627 struct netdev_private *np = netdev_priv(dev);
1628 void __iomem * ioaddr = ns_ioaddr(dev);
1629
1630 if (dev->if_port != PORT_TP)
1631 return;
1632
1633 if (np->srr >= SRR_DP83816_A5)
1634 return;
1635
1636 writew(1, ioaddr + PGSEL);
1637
1638 data = readw(ioaddr + DSPCFG);
1639 np->dspcfg = data & ~DSPCFG_LOCK;
1640 writew(np->dspcfg, ioaddr + DSPCFG);
1641 writew(0, ioaddr + PGSEL);
1642}
1643
1644static void check_link(struct net_device *dev)
1645{
1646 struct netdev_private *np = netdev_priv(dev);
1647 void __iomem * ioaddr = ns_ioaddr(dev);
1648 int duplex = np->duplex;
1649 u16 bmsr;
1650
1651
1652 if (np->ignore_phy)
1653 goto propagate_state;
1654
1655
1656
1657
1658
1659 mdio_read(dev, MII_BMSR);
1660 bmsr = mdio_read(dev, MII_BMSR);
1661
1662 if (!(bmsr & BMSR_LSTATUS)) {
1663 if (netif_carrier_ok(dev)) {
1664 if (netif_msg_link(np))
1665 printk(KERN_NOTICE "%s: link down.\n",
1666 dev->name);
1667 netif_carrier_off(dev);
1668 undo_cable_magic(dev);
1669 }
1670 return;
1671 }
1672 if (!netif_carrier_ok(dev)) {
1673 if (netif_msg_link(np))
1674 printk(KERN_NOTICE "%s: link up.\n", dev->name);
1675 netif_carrier_on(dev);
1676 do_cable_magic(dev);
1677 }
1678
1679 duplex = np->full_duplex;
1680 if (!duplex) {
1681 if (bmsr & BMSR_ANEGCOMPLETE) {
1682 int tmp = mii_nway_result(
1683 np->advertising & mdio_read(dev, MII_LPA));
1684 if (tmp == LPA_100FULL || tmp == LPA_10FULL)
1685 duplex = 1;
1686 } else if (mdio_read(dev, MII_BMCR) & BMCR_FULLDPLX)
1687 duplex = 1;
1688 }
1689
1690propagate_state:
1691
1692 if (duplex ^ !!(np->rx_config & RxAcceptTx)) {
1693 if (netif_msg_link(np))
1694 printk(KERN_INFO
1695 "%s: Setting %s-duplex based on negotiated "
1696 "link capability.\n", dev->name,
1697 duplex ? "full" : "half");
1698 if (duplex) {
1699 np->rx_config |= RxAcceptTx;
1700 np->tx_config |= TxCarrierIgn | TxHeartIgn;
1701 } else {
1702 np->rx_config &= ~RxAcceptTx;
1703 np->tx_config &= ~(TxCarrierIgn | TxHeartIgn);
1704 }
1705 writel(np->tx_config, ioaddr + TxConfig);
1706 writel(np->rx_config, ioaddr + RxConfig);
1707 }
1708}
1709
1710static void init_registers(struct net_device *dev)
1711{
1712 struct netdev_private *np = netdev_priv(dev);
1713 void __iomem * ioaddr = ns_ioaddr(dev);
1714
1715 init_phy_fixup(dev);
1716
1717
1718 readl(ioaddr + IntrStatus);
1719
1720 writel(np->ring_dma, ioaddr + RxRingPtr);
1721 writel(np->ring_dma + RX_RING_SIZE * sizeof(struct netdev_desc),
1722 ioaddr + TxRingPtr);
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738 np->tx_config = TxAutoPad | TxCollRetry | TxMxdma_256 |
1739 TX_FLTH_VAL | TX_DRTH_VAL_START;
1740 writel(np->tx_config, ioaddr + TxConfig);
1741
1742
1743
1744
1745 np->rx_config = RxMxdma_256 | RX_DRTH_VAL;
1746
1747 if (np->rx_buf_sz > NATSEMI_LONGPKT)
1748 np->rx_config |= RxAcceptLong;
1749
1750 writel(np->rx_config, ioaddr + RxConfig);
1751
1752
1753
1754
1755
1756
1757
1758 np->SavedClkRun = readl(ioaddr + ClkRun);
1759 writel(np->SavedClkRun & ~PMEEnable, ioaddr + ClkRun);
1760 if (np->SavedClkRun & PMEStatus && netif_msg_wol(np)) {
1761 printk(KERN_NOTICE "%s: Wake-up event %#08x\n",
1762 dev->name, readl(ioaddr + WOLCmd));
1763 }
1764
1765 check_link(dev);
1766 __set_rx_mode(dev);
1767
1768
1769 writel(DEFAULT_INTR, ioaddr + IntrMask);
1770 natsemi_irq_enable(dev);
1771
1772 writel(RxOn | TxOn, ioaddr + ChipCmd);
1773 writel(StatsClear, ioaddr + StatsCtrl);
1774}
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789static void netdev_timer(struct timer_list *t)
1790{
1791 struct netdev_private *np = from_timer(np, t, timer);
1792 struct net_device *dev = np->dev;
1793 void __iomem * ioaddr = ns_ioaddr(dev);
1794 int next_tick = NATSEMI_TIMER_FREQ;
1795 const int irq = np->pci_dev->irq;
1796
1797 if (netif_msg_timer(np)) {
1798
1799
1800
1801 printk(KERN_DEBUG "%s: Media selection timer tick.\n",
1802 dev->name);
1803 }
1804
1805 if (dev->if_port == PORT_TP) {
1806 u16 dspcfg;
1807
1808 spin_lock_irq(&np->lock);
1809
1810 writew(1, ioaddr+PGSEL);
1811 dspcfg = readw(ioaddr+DSPCFG);
1812 writew(0, ioaddr+PGSEL);
1813 if (np->dspcfg_workaround && dspcfg != np->dspcfg) {
1814 if (!netif_queue_stopped(dev)) {
1815 spin_unlock_irq(&np->lock);
1816 if (netif_msg_drv(np))
1817 printk(KERN_NOTICE "%s: possible phy reset: "
1818 "re-initializing\n", dev->name);
1819 disable_irq(irq);
1820 spin_lock_irq(&np->lock);
1821 natsemi_stop_rxtx(dev);
1822 dump_ring(dev);
1823 reinit_ring(dev);
1824 init_registers(dev);
1825 spin_unlock_irq(&np->lock);
1826 enable_irq(irq);
1827 } else {
1828
1829 next_tick = HZ;
1830 spin_unlock_irq(&np->lock);
1831 }
1832 } else {
1833
1834 check_link(dev);
1835 spin_unlock_irq(&np->lock);
1836 }
1837 } else {
1838 spin_lock_irq(&np->lock);
1839 check_link(dev);
1840 spin_unlock_irq(&np->lock);
1841 }
1842 if (np->oom) {
1843 disable_irq(irq);
1844 np->oom = 0;
1845 refill_rx(dev);
1846 enable_irq(irq);
1847 if (!np->oom) {
1848 writel(RxOn, ioaddr + ChipCmd);
1849 } else {
1850 next_tick = 1;
1851 }
1852 }
1853
1854 if (next_tick > 1)
1855 mod_timer(&np->timer, round_jiffies(jiffies + next_tick));
1856 else
1857 mod_timer(&np->timer, jiffies + next_tick);
1858}
1859
1860static void dump_ring(struct net_device *dev)
1861{
1862 struct netdev_private *np = netdev_priv(dev);
1863
1864 if (netif_msg_pktdata(np)) {
1865 int i;
1866 printk(KERN_DEBUG " Tx ring at %p:\n", np->tx_ring);
1867 for (i = 0; i < TX_RING_SIZE; i++) {
1868 printk(KERN_DEBUG " #%d desc. %#08x %#08x %#08x.\n",
1869 i, np->tx_ring[i].next_desc,
1870 np->tx_ring[i].cmd_status,
1871 np->tx_ring[i].addr);
1872 }
1873 printk(KERN_DEBUG " Rx ring %p:\n", np->rx_ring);
1874 for (i = 0; i < RX_RING_SIZE; i++) {
1875 printk(KERN_DEBUG " #%d desc. %#08x %#08x %#08x.\n",
1876 i, np->rx_ring[i].next_desc,
1877 np->rx_ring[i].cmd_status,
1878 np->rx_ring[i].addr);
1879 }
1880 }
1881}
1882
1883static void ns_tx_timeout(struct net_device *dev, unsigned int txqueue)
1884{
1885 struct netdev_private *np = netdev_priv(dev);
1886 void __iomem * ioaddr = ns_ioaddr(dev);
1887 const int irq = np->pci_dev->irq;
1888
1889 disable_irq(irq);
1890 spin_lock_irq(&np->lock);
1891 if (!np->hands_off) {
1892 if (netif_msg_tx_err(np))
1893 printk(KERN_WARNING
1894 "%s: Transmit timed out, status %#08x,"
1895 " resetting...\n",
1896 dev->name, readl(ioaddr + IntrStatus));
1897 dump_ring(dev);
1898
1899 natsemi_reset(dev);
1900 reinit_ring(dev);
1901 init_registers(dev);
1902 } else {
1903 printk(KERN_WARNING
1904 "%s: tx_timeout while in hands_off state?\n",
1905 dev->name);
1906 }
1907 spin_unlock_irq(&np->lock);
1908 enable_irq(irq);
1909
1910 netif_trans_update(dev);
1911 dev->stats.tx_errors++;
1912 netif_wake_queue(dev);
1913}
1914
1915static int alloc_ring(struct net_device *dev)
1916{
1917 struct netdev_private *np = netdev_priv(dev);
1918 np->rx_ring = dma_alloc_coherent(&np->pci_dev->dev,
1919 sizeof(struct netdev_desc) * (RX_RING_SIZE + TX_RING_SIZE),
1920 &np->ring_dma, GFP_KERNEL);
1921 if (!np->rx_ring)
1922 return -ENOMEM;
1923 np->tx_ring = &np->rx_ring[RX_RING_SIZE];
1924 return 0;
1925}
1926
1927static void refill_rx(struct net_device *dev)
1928{
1929 struct netdev_private *np = netdev_priv(dev);
1930
1931
1932 for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
1933 struct sk_buff *skb;
1934 int entry = np->dirty_rx % RX_RING_SIZE;
1935 if (np->rx_skbuff[entry] == NULL) {
1936 unsigned int buflen = np->rx_buf_sz+NATSEMI_PADDING;
1937 skb = netdev_alloc_skb(dev, buflen);
1938 np->rx_skbuff[entry] = skb;
1939 if (skb == NULL)
1940 break;
1941 np->rx_dma[entry] = dma_map_single(&np->pci_dev->dev,
1942 skb->data, buflen,
1943 DMA_FROM_DEVICE);
1944 if (dma_mapping_error(&np->pci_dev->dev, np->rx_dma[entry])) {
1945 dev_kfree_skb_any(skb);
1946 np->rx_skbuff[entry] = NULL;
1947 break;
1948 }
1949 np->rx_ring[entry].addr = cpu_to_le32(np->rx_dma[entry]);
1950 }
1951 np->rx_ring[entry].cmd_status = cpu_to_le32(np->rx_buf_sz);
1952 }
1953 if (np->cur_rx - np->dirty_rx == RX_RING_SIZE) {
1954 if (netif_msg_rx_err(np))
1955 printk(KERN_WARNING "%s: going OOM.\n", dev->name);
1956 np->oom = 1;
1957 }
1958}
1959
1960static void set_bufsize(struct net_device *dev)
1961{
1962 struct netdev_private *np = netdev_priv(dev);
1963 if (dev->mtu <= ETH_DATA_LEN)
1964 np->rx_buf_sz = ETH_DATA_LEN + NATSEMI_HEADERS;
1965 else
1966 np->rx_buf_sz = dev->mtu + NATSEMI_HEADERS;
1967}
1968
1969
1970static void init_ring(struct net_device *dev)
1971{
1972 struct netdev_private *np = netdev_priv(dev);
1973 int i;
1974
1975
1976 np->dirty_tx = np->cur_tx = 0;
1977 for (i = 0; i < TX_RING_SIZE; i++) {
1978 np->tx_skbuff[i] = NULL;
1979 np->tx_ring[i].next_desc = cpu_to_le32(np->ring_dma
1980 +sizeof(struct netdev_desc)
1981 *((i+1)%TX_RING_SIZE+RX_RING_SIZE));
1982 np->tx_ring[i].cmd_status = 0;
1983 }
1984
1985
1986 np->dirty_rx = 0;
1987 np->cur_rx = RX_RING_SIZE;
1988 np->oom = 0;
1989 set_bufsize(dev);
1990
1991 np->rx_head_desc = &np->rx_ring[0];
1992
1993
1994
1995
1996
1997 for (i = 0; i < RX_RING_SIZE; i++) {
1998 np->rx_ring[i].next_desc = cpu_to_le32(np->ring_dma
1999 +sizeof(struct netdev_desc)
2000 *((i+1)%RX_RING_SIZE));
2001 np->rx_ring[i].cmd_status = cpu_to_le32(DescOwn);
2002 np->rx_skbuff[i] = NULL;
2003 }
2004 refill_rx(dev);
2005 dump_ring(dev);
2006}
2007
2008static void drain_tx(struct net_device *dev)
2009{
2010 struct netdev_private *np = netdev_priv(dev);
2011 int i;
2012
2013 for (i = 0; i < TX_RING_SIZE; i++) {
2014 if (np->tx_skbuff[i]) {
2015 dma_unmap_single(&np->pci_dev->dev, np->tx_dma[i],
2016 np->tx_skbuff[i]->len, DMA_TO_DEVICE);
2017 dev_kfree_skb(np->tx_skbuff[i]);
2018 dev->stats.tx_dropped++;
2019 }
2020 np->tx_skbuff[i] = NULL;
2021 }
2022}
2023
2024static void drain_rx(struct net_device *dev)
2025{
2026 struct netdev_private *np = netdev_priv(dev);
2027 unsigned int buflen = np->rx_buf_sz;
2028 int i;
2029
2030
2031 for (i = 0; i < RX_RING_SIZE; i++) {
2032 np->rx_ring[i].cmd_status = 0;
2033 np->rx_ring[i].addr = cpu_to_le32(0xBADF00D0);
2034 if (np->rx_skbuff[i]) {
2035 dma_unmap_single(&np->pci_dev->dev, np->rx_dma[i],
2036 buflen + NATSEMI_PADDING,
2037 DMA_FROM_DEVICE);
2038 dev_kfree_skb(np->rx_skbuff[i]);
2039 }
2040 np->rx_skbuff[i] = NULL;
2041 }
2042}
2043
2044static void drain_ring(struct net_device *dev)
2045{
2046 drain_rx(dev);
2047 drain_tx(dev);
2048}
2049
2050static void free_ring(struct net_device *dev)
2051{
2052 struct netdev_private *np = netdev_priv(dev);
2053 dma_free_coherent(&np->pci_dev->dev,
2054 sizeof(struct netdev_desc) * (RX_RING_SIZE + TX_RING_SIZE),
2055 np->rx_ring, np->ring_dma);
2056}
2057
2058static void reinit_rx(struct net_device *dev)
2059{
2060 struct netdev_private *np = netdev_priv(dev);
2061 int i;
2062
2063
2064 np->dirty_rx = 0;
2065 np->cur_rx = RX_RING_SIZE;
2066 np->rx_head_desc = &np->rx_ring[0];
2067
2068 for (i = 0; i < RX_RING_SIZE; i++)
2069 np->rx_ring[i].cmd_status = cpu_to_le32(DescOwn);
2070
2071 refill_rx(dev);
2072}
2073
2074static void reinit_ring(struct net_device *dev)
2075{
2076 struct netdev_private *np = netdev_priv(dev);
2077 int i;
2078
2079
2080 drain_tx(dev);
2081 np->dirty_tx = np->cur_tx = 0;
2082 for (i=0;i<TX_RING_SIZE;i++)
2083 np->tx_ring[i].cmd_status = 0;
2084
2085 reinit_rx(dev);
2086}
2087
2088static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
2089{
2090 struct netdev_private *np = netdev_priv(dev);
2091 void __iomem * ioaddr = ns_ioaddr(dev);
2092 unsigned entry;
2093 unsigned long flags;
2094
2095
2096
2097
2098
2099 entry = np->cur_tx % TX_RING_SIZE;
2100
2101 np->tx_skbuff[entry] = skb;
2102 np->tx_dma[entry] = dma_map_single(&np->pci_dev->dev, skb->data,
2103 skb->len, DMA_TO_DEVICE);
2104 if (dma_mapping_error(&np->pci_dev->dev, np->tx_dma[entry])) {
2105 np->tx_skbuff[entry] = NULL;
2106 dev_kfree_skb_irq(skb);
2107 dev->stats.tx_dropped++;
2108 return NETDEV_TX_OK;
2109 }
2110
2111 np->tx_ring[entry].addr = cpu_to_le32(np->tx_dma[entry]);
2112
2113 spin_lock_irqsave(&np->lock, flags);
2114
2115 if (!np->hands_off) {
2116 np->tx_ring[entry].cmd_status = cpu_to_le32(DescOwn | skb->len);
2117
2118
2119 wmb();
2120 np->cur_tx++;
2121 if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1) {
2122 netdev_tx_done(dev);
2123 if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1)
2124 netif_stop_queue(dev);
2125 }
2126
2127 writel(TxOn, ioaddr + ChipCmd);
2128 } else {
2129 dev_kfree_skb_irq(skb);
2130 dev->stats.tx_dropped++;
2131 }
2132 spin_unlock_irqrestore(&np->lock, flags);
2133
2134 if (netif_msg_tx_queued(np)) {
2135 printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
2136 dev->name, np->cur_tx, entry);
2137 }
2138 return NETDEV_TX_OK;
2139}
2140
2141static void netdev_tx_done(struct net_device *dev)
2142{
2143 struct netdev_private *np = netdev_priv(dev);
2144
2145 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
2146 int entry = np->dirty_tx % TX_RING_SIZE;
2147 if (np->tx_ring[entry].cmd_status & cpu_to_le32(DescOwn))
2148 break;
2149 if (netif_msg_tx_done(np))
2150 printk(KERN_DEBUG
2151 "%s: tx frame #%d finished, status %#08x.\n",
2152 dev->name, np->dirty_tx,
2153 le32_to_cpu(np->tx_ring[entry].cmd_status));
2154 if (np->tx_ring[entry].cmd_status & cpu_to_le32(DescPktOK)) {
2155 dev->stats.tx_packets++;
2156 dev->stats.tx_bytes += np->tx_skbuff[entry]->len;
2157 } else {
2158 int tx_status =
2159 le32_to_cpu(np->tx_ring[entry].cmd_status);
2160 if (tx_status & (DescTxAbort|DescTxExcColl))
2161 dev->stats.tx_aborted_errors++;
2162 if (tx_status & DescTxFIFO)
2163 dev->stats.tx_fifo_errors++;
2164 if (tx_status & DescTxCarrier)
2165 dev->stats.tx_carrier_errors++;
2166 if (tx_status & DescTxOOWCol)
2167 dev->stats.tx_window_errors++;
2168 dev->stats.tx_errors++;
2169 }
2170 dma_unmap_single(&np->pci_dev->dev, np->tx_dma[entry],
2171 np->tx_skbuff[entry]->len, DMA_TO_DEVICE);
2172
2173 dev_consume_skb_irq(np->tx_skbuff[entry]);
2174 np->tx_skbuff[entry] = NULL;
2175 }
2176 if (netif_queue_stopped(dev) &&
2177 np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
2178
2179 netif_wake_queue(dev);
2180 }
2181}
2182
2183
2184
2185static irqreturn_t intr_handler(int irq, void *dev_instance)
2186{
2187 struct net_device *dev = dev_instance;
2188 struct netdev_private *np = netdev_priv(dev);
2189 void __iomem * ioaddr = ns_ioaddr(dev);
2190
2191
2192
2193
2194 if (np->hands_off || !readl(ioaddr + IntrEnable))
2195 return IRQ_NONE;
2196
2197 np->intr_status = readl(ioaddr + IntrStatus);
2198
2199 if (!np->intr_status)
2200 return IRQ_NONE;
2201
2202 if (netif_msg_intr(np))
2203 printk(KERN_DEBUG
2204 "%s: Interrupt, status %#08x, mask %#08x.\n",
2205 dev->name, np->intr_status,
2206 readl(ioaddr + IntrMask));
2207
2208 prefetch(&np->rx_skbuff[np->cur_rx % RX_RING_SIZE]);
2209
2210 if (napi_schedule_prep(&np->napi)) {
2211
2212 natsemi_irq_disable(dev);
2213 __napi_schedule(&np->napi);
2214 } else
2215 printk(KERN_WARNING
2216 "%s: Ignoring interrupt, status %#08x, mask %#08x.\n",
2217 dev->name, np->intr_status,
2218 readl(ioaddr + IntrMask));
2219
2220 return IRQ_HANDLED;
2221}
2222
2223
2224
2225
2226static int natsemi_poll(struct napi_struct *napi, int budget)
2227{
2228 struct netdev_private *np = container_of(napi, struct netdev_private, napi);
2229 struct net_device *dev = np->dev;
2230 void __iomem * ioaddr = ns_ioaddr(dev);
2231 int work_done = 0;
2232
2233 do {
2234 if (netif_msg_intr(np))
2235 printk(KERN_DEBUG
2236 "%s: Poll, status %#08x, mask %#08x.\n",
2237 dev->name, np->intr_status,
2238 readl(ioaddr + IntrMask));
2239
2240
2241
2242 if (np->intr_status &
2243 (IntrRxDone | IntrRxIntr | RxStatusFIFOOver |
2244 IntrRxErr | IntrRxOverrun)) {
2245 netdev_rx(dev, &work_done, budget);
2246 }
2247
2248 if (np->intr_status &
2249 (IntrTxDone | IntrTxIntr | IntrTxIdle | IntrTxErr)) {
2250 spin_lock(&np->lock);
2251 netdev_tx_done(dev);
2252 spin_unlock(&np->lock);
2253 }
2254
2255
2256 if (np->intr_status & IntrAbnormalSummary)
2257 netdev_error(dev, np->intr_status);
2258
2259 if (work_done >= budget)
2260 return work_done;
2261
2262 np->intr_status = readl(ioaddr + IntrStatus);
2263 } while (np->intr_status);
2264
2265 napi_complete_done(napi, work_done);
2266
2267
2268
2269 spin_lock(&np->lock);
2270 if (!np->hands_off)
2271 natsemi_irq_enable(dev);
2272 spin_unlock(&np->lock);
2273
2274 return work_done;
2275}
2276
2277
2278
2279static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do)
2280{
2281 struct netdev_private *np = netdev_priv(dev);
2282 int entry = np->cur_rx % RX_RING_SIZE;
2283 int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
2284 s32 desc_status = le32_to_cpu(np->rx_head_desc->cmd_status);
2285 unsigned int buflen = np->rx_buf_sz;
2286 void __iomem * ioaddr = ns_ioaddr(dev);
2287
2288
2289 while (desc_status < 0) {
2290 int pkt_len;
2291 if (netif_msg_rx_status(np))
2292 printk(KERN_DEBUG
2293 " netdev_rx() entry %d status was %#08x.\n",
2294 entry, desc_status);
2295 if (--boguscnt < 0)
2296 break;
2297
2298 if (*work_done >= work_to_do)
2299 break;
2300
2301 (*work_done)++;
2302
2303 pkt_len = (desc_status & DescSizeMask) - 4;
2304 if ((desc_status&(DescMore|DescPktOK|DescRxLong)) != DescPktOK){
2305 if (desc_status & DescMore) {
2306 unsigned long flags;
2307
2308 if (netif_msg_rx_err(np))
2309 printk(KERN_WARNING
2310 "%s: Oversized(?) Ethernet "
2311 "frame spanned multiple "
2312 "buffers, entry %#08x "
2313 "status %#08x.\n", dev->name,
2314 np->cur_rx, desc_status);
2315 dev->stats.rx_length_errors++;
2316
2317
2318
2319
2320
2321
2322 spin_lock_irqsave(&np->lock, flags);
2323 reset_rx(dev);
2324 reinit_rx(dev);
2325 writel(np->ring_dma, ioaddr + RxRingPtr);
2326 check_link(dev);
2327 spin_unlock_irqrestore(&np->lock, flags);
2328
2329
2330
2331 break;
2332
2333 } else {
2334
2335 dev->stats.rx_errors++;
2336 if (desc_status & (DescRxAbort|DescRxOver))
2337 dev->stats.rx_over_errors++;
2338 if (desc_status & (DescRxLong|DescRxRunt))
2339 dev->stats.rx_length_errors++;
2340 if (desc_status & (DescRxInvalid|DescRxAlign))
2341 dev->stats.rx_frame_errors++;
2342 if (desc_status & DescRxCRC)
2343 dev->stats.rx_crc_errors++;
2344 }
2345 } else if (pkt_len > np->rx_buf_sz) {
2346
2347
2348
2349
2350 } else {
2351 struct sk_buff *skb;
2352
2353
2354
2355 if (pkt_len < rx_copybreak &&
2356 (skb = netdev_alloc_skb(dev, pkt_len + RX_OFFSET)) != NULL) {
2357
2358 skb_reserve(skb, RX_OFFSET);
2359 dma_sync_single_for_cpu(&np->pci_dev->dev,
2360 np->rx_dma[entry],
2361 buflen,
2362 DMA_FROM_DEVICE);
2363 skb_copy_to_linear_data(skb,
2364 np->rx_skbuff[entry]->data, pkt_len);
2365 skb_put(skb, pkt_len);
2366 dma_sync_single_for_device(&np->pci_dev->dev,
2367 np->rx_dma[entry],
2368 buflen,
2369 DMA_FROM_DEVICE);
2370 } else {
2371 dma_unmap_single(&np->pci_dev->dev,
2372 np->rx_dma[entry],
2373 buflen + NATSEMI_PADDING,
2374 DMA_FROM_DEVICE);
2375 skb_put(skb = np->rx_skbuff[entry], pkt_len);
2376 np->rx_skbuff[entry] = NULL;
2377 }
2378 skb->protocol = eth_type_trans(skb, dev);
2379 netif_receive_skb(skb);
2380 dev->stats.rx_packets++;
2381 dev->stats.rx_bytes += pkt_len;
2382 }
2383 entry = (++np->cur_rx) % RX_RING_SIZE;
2384 np->rx_head_desc = &np->rx_ring[entry];
2385 desc_status = le32_to_cpu(np->rx_head_desc->cmd_status);
2386 }
2387 refill_rx(dev);
2388
2389
2390 if (np->oom)
2391 mod_timer(&np->timer, jiffies + 1);
2392 else
2393 writel(RxOn, ioaddr + ChipCmd);
2394}
2395
2396static void netdev_error(struct net_device *dev, int intr_status)
2397{
2398 struct netdev_private *np = netdev_priv(dev);
2399 void __iomem * ioaddr = ns_ioaddr(dev);
2400
2401 spin_lock(&np->lock);
2402 if (intr_status & LinkChange) {
2403 u16 lpa = mdio_read(dev, MII_LPA);
2404 if (mdio_read(dev, MII_BMCR) & BMCR_ANENABLE &&
2405 netif_msg_link(np)) {
2406 printk(KERN_INFO
2407 "%s: Autonegotiation advertising"
2408 " %#04x partner %#04x.\n", dev->name,
2409 np->advertising, lpa);
2410 }
2411
2412
2413 readw(ioaddr + MIntrStatus);
2414 check_link(dev);
2415 }
2416 if (intr_status & StatsMax) {
2417 __get_stats(dev);
2418 }
2419 if (intr_status & IntrTxUnderrun) {
2420 if ((np->tx_config & TxDrthMask) < TX_DRTH_VAL_LIMIT) {
2421 np->tx_config += TX_DRTH_VAL_INC;
2422 if (netif_msg_tx_err(np))
2423 printk(KERN_NOTICE
2424 "%s: increased tx threshold, txcfg %#08x.\n",
2425 dev->name, np->tx_config);
2426 } else {
2427 if (netif_msg_tx_err(np))
2428 printk(KERN_NOTICE
2429 "%s: tx underrun with maximum tx threshold, txcfg %#08x.\n",
2430 dev->name, np->tx_config);
2431 }
2432 writel(np->tx_config, ioaddr + TxConfig);
2433 }
2434 if (intr_status & WOLPkt && netif_msg_wol(np)) {
2435 int wol_status = readl(ioaddr + WOLCmd);
2436 printk(KERN_NOTICE "%s: Link wake-up event %#08x\n",
2437 dev->name, wol_status);
2438 }
2439 if (intr_status & RxStatusFIFOOver) {
2440 if (netif_msg_rx_err(np) && netif_msg_intr(np)) {
2441 printk(KERN_NOTICE "%s: Rx status FIFO overrun\n",
2442 dev->name);
2443 }
2444 dev->stats.rx_fifo_errors++;
2445 dev->stats.rx_errors++;
2446 }
2447
2448 if (intr_status & IntrPCIErr) {
2449 printk(KERN_NOTICE "%s: PCI error %#08x\n", dev->name,
2450 intr_status & IntrPCIErr);
2451 dev->stats.tx_fifo_errors++;
2452 dev->stats.tx_errors++;
2453 dev->stats.rx_fifo_errors++;
2454 dev->stats.rx_errors++;
2455 }
2456 spin_unlock(&np->lock);
2457}
2458
2459static void __get_stats(struct net_device *dev)
2460{
2461 void __iomem * ioaddr = ns_ioaddr(dev);
2462
2463
2464 dev->stats.rx_crc_errors += readl(ioaddr + RxCRCErrs);
2465 dev->stats.rx_missed_errors += readl(ioaddr + RxMissed);
2466}
2467
2468static struct net_device_stats *get_stats(struct net_device *dev)
2469{
2470 struct netdev_private *np = netdev_priv(dev);
2471
2472
2473 spin_lock_irq(&np->lock);
2474 if (netif_running(dev) && !np->hands_off)
2475 __get_stats(dev);
2476 spin_unlock_irq(&np->lock);
2477
2478 return &dev->stats;
2479}
2480
2481#ifdef CONFIG_NET_POLL_CONTROLLER
2482static void natsemi_poll_controller(struct net_device *dev)
2483{
2484 struct netdev_private *np = netdev_priv(dev);
2485 const int irq = np->pci_dev->irq;
2486
2487 disable_irq(irq);
2488 intr_handler(irq, dev);
2489 enable_irq(irq);
2490}
2491#endif
2492
2493#define HASH_TABLE 0x200
2494static void __set_rx_mode(struct net_device *dev)
2495{
2496 void __iomem * ioaddr = ns_ioaddr(dev);
2497 struct netdev_private *np = netdev_priv(dev);
2498 u8 mc_filter[64];
2499 u32 rx_mode;
2500
2501 if (dev->flags & IFF_PROMISC) {
2502 rx_mode = RxFilterEnable | AcceptBroadcast
2503 | AcceptAllMulticast | AcceptAllPhys | AcceptMyPhys;
2504 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
2505 (dev->flags & IFF_ALLMULTI)) {
2506 rx_mode = RxFilterEnable | AcceptBroadcast
2507 | AcceptAllMulticast | AcceptMyPhys;
2508 } else {
2509 struct netdev_hw_addr *ha;
2510 int i;
2511
2512 memset(mc_filter, 0, sizeof(mc_filter));
2513 netdev_for_each_mc_addr(ha, dev) {
2514 int b = (ether_crc(ETH_ALEN, ha->addr) >> 23) & 0x1ff;
2515 mc_filter[b/8] |= (1 << (b & 0x07));
2516 }
2517 rx_mode = RxFilterEnable | AcceptBroadcast
2518 | AcceptMulticast | AcceptMyPhys;
2519 for (i = 0; i < 64; i += 2) {
2520 writel(HASH_TABLE + i, ioaddr + RxFilterAddr);
2521 writel((mc_filter[i + 1] << 8) + mc_filter[i],
2522 ioaddr + RxFilterData);
2523 }
2524 }
2525 writel(rx_mode, ioaddr + RxFilterAddr);
2526 np->cur_rx_mode = rx_mode;
2527}
2528
2529static int natsemi_change_mtu(struct net_device *dev, int new_mtu)
2530{
2531 dev->mtu = new_mtu;
2532
2533
2534 if (netif_running(dev)) {
2535 struct netdev_private *np = netdev_priv(dev);
2536 void __iomem * ioaddr = ns_ioaddr(dev);
2537 const int irq = np->pci_dev->irq;
2538
2539 disable_irq(irq);
2540 spin_lock(&np->lock);
2541
2542 natsemi_stop_rxtx(dev);
2543
2544 drain_rx(dev);
2545
2546 set_bufsize(dev);
2547 reinit_rx(dev);
2548 writel(np->ring_dma, ioaddr + RxRingPtr);
2549
2550 writel(RxOn | TxOn, ioaddr + ChipCmd);
2551 spin_unlock(&np->lock);
2552 enable_irq(irq);
2553 }
2554 return 0;
2555}
2556
2557static void set_rx_mode(struct net_device *dev)
2558{
2559 struct netdev_private *np = netdev_priv(dev);
2560 spin_lock_irq(&np->lock);
2561 if (!np->hands_off)
2562 __set_rx_mode(dev);
2563 spin_unlock_irq(&np->lock);
2564}
2565
2566static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2567{
2568 struct netdev_private *np = netdev_priv(dev);
2569 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
2570 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
2571 strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
2572}
2573
2574static int get_regs_len(struct net_device *dev)
2575{
2576 return NATSEMI_REGS_SIZE;
2577}
2578
2579static int get_eeprom_len(struct net_device *dev)
2580{
2581 struct netdev_private *np = netdev_priv(dev);
2582 return np->eeprom_size;
2583}
2584
2585static int get_link_ksettings(struct net_device *dev,
2586 struct ethtool_link_ksettings *ecmd)
2587{
2588 struct netdev_private *np = netdev_priv(dev);
2589 spin_lock_irq(&np->lock);
2590 netdev_get_ecmd(dev, ecmd);
2591 spin_unlock_irq(&np->lock);
2592 return 0;
2593}
2594
2595static int set_link_ksettings(struct net_device *dev,
2596 const struct ethtool_link_ksettings *ecmd)
2597{
2598 struct netdev_private *np = netdev_priv(dev);
2599 int res;
2600 spin_lock_irq(&np->lock);
2601 res = netdev_set_ecmd(dev, ecmd);
2602 spin_unlock_irq(&np->lock);
2603 return res;
2604}
2605
2606static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2607{
2608 struct netdev_private *np = netdev_priv(dev);
2609 spin_lock_irq(&np->lock);
2610 netdev_get_wol(dev, &wol->supported, &wol->wolopts);
2611 netdev_get_sopass(dev, wol->sopass);
2612 spin_unlock_irq(&np->lock);
2613}
2614
2615static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2616{
2617 struct netdev_private *np = netdev_priv(dev);
2618 int res;
2619 spin_lock_irq(&np->lock);
2620 netdev_set_wol(dev, wol->wolopts);
2621 res = netdev_set_sopass(dev, wol->sopass);
2622 spin_unlock_irq(&np->lock);
2623 return res;
2624}
2625
2626static void get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
2627{
2628 struct netdev_private *np = netdev_priv(dev);
2629 regs->version = NATSEMI_REGS_VER;
2630 spin_lock_irq(&np->lock);
2631 netdev_get_regs(dev, buf);
2632 spin_unlock_irq(&np->lock);
2633}
2634
2635static u32 get_msglevel(struct net_device *dev)
2636{
2637 struct netdev_private *np = netdev_priv(dev);
2638 return np->msg_enable;
2639}
2640
2641static void set_msglevel(struct net_device *dev, u32 val)
2642{
2643 struct netdev_private *np = netdev_priv(dev);
2644 np->msg_enable = val;
2645}
2646
2647static int nway_reset(struct net_device *dev)
2648{
2649 int tmp;
2650 int r = -EINVAL;
2651
2652 tmp = mdio_read(dev, MII_BMCR);
2653 if (tmp & BMCR_ANENABLE) {
2654 tmp |= (BMCR_ANRESTART);
2655 mdio_write(dev, MII_BMCR, tmp);
2656 r = 0;
2657 }
2658 return r;
2659}
2660
2661static u32 get_link(struct net_device *dev)
2662{
2663
2664 mdio_read(dev, MII_BMSR);
2665 return (mdio_read(dev, MII_BMSR)&BMSR_LSTATUS) ? 1:0;
2666}
2667
2668static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
2669{
2670 struct netdev_private *np = netdev_priv(dev);
2671 u8 *eebuf;
2672 int res;
2673
2674 eebuf = kmalloc(np->eeprom_size, GFP_KERNEL);
2675 if (!eebuf)
2676 return -ENOMEM;
2677
2678 eeprom->magic = PCI_VENDOR_ID_NS | (PCI_DEVICE_ID_NS_83815<<16);
2679 spin_lock_irq(&np->lock);
2680 res = netdev_get_eeprom(dev, eebuf);
2681 spin_unlock_irq(&np->lock);
2682 if (!res)
2683 memcpy(data, eebuf+eeprom->offset, eeprom->len);
2684 kfree(eebuf);
2685 return res;
2686}
2687
2688static const struct ethtool_ops ethtool_ops = {
2689 .get_drvinfo = get_drvinfo,
2690 .get_regs_len = get_regs_len,
2691 .get_eeprom_len = get_eeprom_len,
2692 .get_wol = get_wol,
2693 .set_wol = set_wol,
2694 .get_regs = get_regs,
2695 .get_msglevel = get_msglevel,
2696 .set_msglevel = set_msglevel,
2697 .nway_reset = nway_reset,
2698 .get_link = get_link,
2699 .get_eeprom = get_eeprom,
2700 .get_link_ksettings = get_link_ksettings,
2701 .set_link_ksettings = set_link_ksettings,
2702};
2703
2704static int netdev_set_wol(struct net_device *dev, u32 newval)
2705{
2706 struct netdev_private *np = netdev_priv(dev);
2707 void __iomem * ioaddr = ns_ioaddr(dev);
2708 u32 data = readl(ioaddr + WOLCmd) & ~WakeOptsSummary;
2709
2710
2711 if (newval & WAKE_PHY)
2712 data |= WakePhy;
2713 if (newval & WAKE_UCAST)
2714 data |= WakeUnicast;
2715 if (newval & WAKE_MCAST)
2716 data |= WakeMulticast;
2717 if (newval & WAKE_BCAST)
2718 data |= WakeBroadcast;
2719 if (newval & WAKE_ARP)
2720 data |= WakeArp;
2721 if (newval & WAKE_MAGIC)
2722 data |= WakeMagic;
2723 if (np->srr >= SRR_DP83815_D) {
2724 if (newval & WAKE_MAGICSECURE) {
2725 data |= WakeMagicSecure;
2726 }
2727 }
2728
2729 writel(data, ioaddr + WOLCmd);
2730
2731 return 0;
2732}
2733
2734static int netdev_get_wol(struct net_device *dev, u32 *supported, u32 *cur)
2735{
2736 struct netdev_private *np = netdev_priv(dev);
2737 void __iomem * ioaddr = ns_ioaddr(dev);
2738 u32 regval = readl(ioaddr + WOLCmd);
2739
2740 *supported = (WAKE_PHY | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST
2741 | WAKE_ARP | WAKE_MAGIC);
2742
2743 if (np->srr >= SRR_DP83815_D) {
2744
2745 *supported |= WAKE_MAGICSECURE;
2746 }
2747 *cur = 0;
2748
2749
2750 if (regval & WakePhy)
2751 *cur |= WAKE_PHY;
2752 if (regval & WakeUnicast)
2753 *cur |= WAKE_UCAST;
2754 if (regval & WakeMulticast)
2755 *cur |= WAKE_MCAST;
2756 if (regval & WakeBroadcast)
2757 *cur |= WAKE_BCAST;
2758 if (regval & WakeArp)
2759 *cur |= WAKE_ARP;
2760 if (regval & WakeMagic)
2761 *cur |= WAKE_MAGIC;
2762 if (regval & WakeMagicSecure) {
2763
2764 *cur |= WAKE_MAGICSECURE;
2765 }
2766
2767 return 0;
2768}
2769
2770static int netdev_set_sopass(struct net_device *dev, u8 *newval)
2771{
2772 struct netdev_private *np = netdev_priv(dev);
2773 void __iomem * ioaddr = ns_ioaddr(dev);
2774 u16 *sval = (u16 *)newval;
2775 u32 addr;
2776
2777 if (np->srr < SRR_DP83815_D) {
2778 return 0;
2779 }
2780
2781
2782 addr = readl(ioaddr + RxFilterAddr) & ~RFCRAddressMask;
2783 addr &= ~RxFilterEnable;
2784 writel(addr, ioaddr + RxFilterAddr);
2785
2786
2787 writel(addr | 0xa, ioaddr + RxFilterAddr);
2788 writew(sval[0], ioaddr + RxFilterData);
2789
2790 writel(addr | 0xc, ioaddr + RxFilterAddr);
2791 writew(sval[1], ioaddr + RxFilterData);
2792
2793 writel(addr | 0xe, ioaddr + RxFilterAddr);
2794 writew(sval[2], ioaddr + RxFilterData);
2795
2796
2797 writel(addr | RxFilterEnable, ioaddr + RxFilterAddr);
2798
2799 return 0;
2800}
2801
2802static int netdev_get_sopass(struct net_device *dev, u8 *data)
2803{
2804 struct netdev_private *np = netdev_priv(dev);
2805 void __iomem * ioaddr = ns_ioaddr(dev);
2806 u16 *sval = (u16 *)data;
2807 u32 addr;
2808
2809 if (np->srr < SRR_DP83815_D) {
2810 sval[0] = sval[1] = sval[2] = 0;
2811 return 0;
2812 }
2813
2814
2815 addr = readl(ioaddr + RxFilterAddr) & ~RFCRAddressMask;
2816
2817 writel(addr | 0xa, ioaddr + RxFilterAddr);
2818 sval[0] = readw(ioaddr + RxFilterData);
2819
2820 writel(addr | 0xc, ioaddr + RxFilterAddr);
2821 sval[1] = readw(ioaddr + RxFilterData);
2822
2823 writel(addr | 0xe, ioaddr + RxFilterAddr);
2824 sval[2] = readw(ioaddr + RxFilterData);
2825
2826 writel(addr, ioaddr + RxFilterAddr);
2827
2828 return 0;
2829}
2830
2831static int netdev_get_ecmd(struct net_device *dev,
2832 struct ethtool_link_ksettings *ecmd)
2833{
2834 struct netdev_private *np = netdev_priv(dev);
2835 u32 supported, advertising;
2836 u32 tmp;
2837
2838 ecmd->base.port = dev->if_port;
2839 ecmd->base.speed = np->speed;
2840 ecmd->base.duplex = np->duplex;
2841 ecmd->base.autoneg = np->autoneg;
2842 advertising = 0;
2843
2844 if (np->advertising & ADVERTISE_10HALF)
2845 advertising |= ADVERTISED_10baseT_Half;
2846 if (np->advertising & ADVERTISE_10FULL)
2847 advertising |= ADVERTISED_10baseT_Full;
2848 if (np->advertising & ADVERTISE_100HALF)
2849 advertising |= ADVERTISED_100baseT_Half;
2850 if (np->advertising & ADVERTISE_100FULL)
2851 advertising |= ADVERTISED_100baseT_Full;
2852 supported = (SUPPORTED_Autoneg |
2853 SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
2854 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
2855 SUPPORTED_TP | SUPPORTED_MII | SUPPORTED_FIBRE);
2856 ecmd->base.phy_address = np->phy_addr_external;
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875
2876 switch (ecmd->base.port) {
2877 default:
2878 case PORT_TP:
2879 advertising |= ADVERTISED_TP;
2880 break;
2881 case PORT_MII:
2882 advertising |= ADVERTISED_MII;
2883 break;
2884 case PORT_FIBRE:
2885 advertising |= ADVERTISED_FIBRE;
2886 break;
2887 }
2888
2889
2890 if (ecmd->base.autoneg == AUTONEG_ENABLE) {
2891 advertising |= ADVERTISED_Autoneg;
2892 tmp = mii_nway_result(
2893 np->advertising & mdio_read(dev, MII_LPA));
2894 if (tmp == LPA_100FULL || tmp == LPA_100HALF)
2895 ecmd->base.speed = SPEED_100;
2896 else
2897 ecmd->base.speed = SPEED_10;
2898 if (tmp == LPA_100FULL || tmp == LPA_10FULL)
2899 ecmd->base.duplex = DUPLEX_FULL;
2900 else
2901 ecmd->base.duplex = DUPLEX_HALF;
2902 }
2903
2904
2905
2906 ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.supported,
2907 supported);
2908 ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.advertising,
2909 advertising);
2910
2911 return 0;
2912}
2913
2914static int netdev_set_ecmd(struct net_device *dev,
2915 const struct ethtool_link_ksettings *ecmd)
2916{
2917 struct netdev_private *np = netdev_priv(dev);
2918 u32 advertising;
2919
2920 ethtool_convert_link_mode_to_legacy_u32(&advertising,
2921 ecmd->link_modes.advertising);
2922
2923 if (ecmd->base.port != PORT_TP &&
2924 ecmd->base.port != PORT_MII &&
2925 ecmd->base.port != PORT_FIBRE)
2926 return -EINVAL;
2927 if (ecmd->base.autoneg == AUTONEG_ENABLE) {
2928 if ((advertising & (ADVERTISED_10baseT_Half |
2929 ADVERTISED_10baseT_Full |
2930 ADVERTISED_100baseT_Half |
2931 ADVERTISED_100baseT_Full)) == 0) {
2932 return -EINVAL;
2933 }
2934 } else if (ecmd->base.autoneg == AUTONEG_DISABLE) {
2935 u32 speed = ecmd->base.speed;
2936 if (speed != SPEED_10 && speed != SPEED_100)
2937 return -EINVAL;
2938 if (ecmd->base.duplex != DUPLEX_HALF &&
2939 ecmd->base.duplex != DUPLEX_FULL)
2940 return -EINVAL;
2941 } else {
2942 return -EINVAL;
2943 }
2944
2945
2946
2947
2948
2949
2950 if (np->ignore_phy && (ecmd->base.autoneg == AUTONEG_ENABLE ||
2951 ecmd->base.port == PORT_TP))
2952 return -EINVAL;
2953
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970 dev->if_port = ecmd->base.port;
2971 np->autoneg = ecmd->base.autoneg;
2972 np->phy_addr_external = ecmd->base.phy_address & PhyAddrMask;
2973 if (np->autoneg == AUTONEG_ENABLE) {
2974
2975 np->advertising &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
2976 if (advertising & ADVERTISED_10baseT_Half)
2977 np->advertising |= ADVERTISE_10HALF;
2978 if (advertising & ADVERTISED_10baseT_Full)
2979 np->advertising |= ADVERTISE_10FULL;
2980 if (advertising & ADVERTISED_100baseT_Half)
2981 np->advertising |= ADVERTISE_100HALF;
2982 if (advertising & ADVERTISED_100baseT_Full)
2983 np->advertising |= ADVERTISE_100FULL;
2984 } else {
2985 np->speed = ecmd->base.speed;
2986 np->duplex = ecmd->base.duplex;
2987
2988 if (np->duplex == DUPLEX_HALF)
2989 np->full_duplex = 0;
2990 }
2991
2992
2993 if (ecmd->base.port == PORT_TP)
2994 switch_port_internal(dev);
2995 else
2996 switch_port_external(dev);
2997
2998
2999 init_phy_fixup(dev);
3000 check_link(dev);
3001 return 0;
3002}
3003
3004static int netdev_get_regs(struct net_device *dev, u8 *buf)
3005{
3006 int i;
3007 int j;
3008 u32 rfcr;
3009 u32 *rbuf = (u32 *)buf;
3010 void __iomem * ioaddr = ns_ioaddr(dev);
3011
3012
3013 for (i = 0; i < NATSEMI_PG0_NREGS/2; i++) {
3014 rbuf[i] = readl(ioaddr + i*4);
3015 }
3016
3017
3018 for (i = NATSEMI_PG0_NREGS/2; i < NATSEMI_PG0_NREGS; i++)
3019 rbuf[i] = mdio_read(dev, i & 0x1f);
3020
3021
3022 writew(1, ioaddr + PGSEL);
3023 rbuf[i++] = readw(ioaddr + PMDCSR);
3024 rbuf[i++] = readw(ioaddr + TSTDAT);
3025 rbuf[i++] = readw(ioaddr + DSPCFG);
3026 rbuf[i++] = readw(ioaddr + SDCFG);
3027 writew(0, ioaddr + PGSEL);
3028
3029
3030 rfcr = readl(ioaddr + RxFilterAddr);
3031 for (j = 0; j < NATSEMI_RFDR_NREGS; j++) {
3032 writel(j*2, ioaddr + RxFilterAddr);
3033 rbuf[i++] = readw(ioaddr + RxFilterData);
3034 }
3035 writel(rfcr, ioaddr + RxFilterAddr);
3036
3037
3038 if (rbuf[4] & rbuf[5]) {
3039 printk(KERN_WARNING
3040 "%s: shoot, we dropped an interrupt (%#08x)\n",
3041 dev->name, rbuf[4] & rbuf[5]);
3042 }
3043
3044 return 0;
3045}
3046
3047#define SWAP_BITS(x) ( (((x) & 0x0001) << 15) | (((x) & 0x0002) << 13) \
3048 | (((x) & 0x0004) << 11) | (((x) & 0x0008) << 9) \
3049 | (((x) & 0x0010) << 7) | (((x) & 0x0020) << 5) \
3050 | (((x) & 0x0040) << 3) | (((x) & 0x0080) << 1) \
3051 | (((x) & 0x0100) >> 1) | (((x) & 0x0200) >> 3) \
3052 | (((x) & 0x0400) >> 5) | (((x) & 0x0800) >> 7) \
3053 | (((x) & 0x1000) >> 9) | (((x) & 0x2000) >> 11) \
3054 | (((x) & 0x4000) >> 13) | (((x) & 0x8000) >> 15) )
3055
3056static int netdev_get_eeprom(struct net_device *dev, u8 *buf)
3057{
3058 int i;
3059 u16 *ebuf = (u16 *)buf;
3060 void __iomem * ioaddr = ns_ioaddr(dev);
3061 struct netdev_private *np = netdev_priv(dev);
3062
3063
3064 for (i = 0; i < np->eeprom_size/2; i++) {
3065 ebuf[i] = eeprom_read(ioaddr, i);
3066
3067
3068
3069 ebuf[i] = SWAP_BITS(ebuf[i]);
3070 }
3071 return 0;
3072}
3073
3074static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3075{
3076 struct mii_ioctl_data *data = if_mii(rq);
3077 struct netdev_private *np = netdev_priv(dev);
3078
3079 switch(cmd) {
3080 case SIOCGMIIPHY:
3081 data->phy_id = np->phy_addr_external;
3082 fallthrough;
3083
3084 case SIOCGMIIREG:
3085
3086
3087
3088
3089 if (dev->if_port == PORT_TP) {
3090 if ((data->phy_id & 0x1f) == np->phy_addr_external)
3091 data->val_out = mdio_read(dev,
3092 data->reg_num & 0x1f);
3093 else
3094 data->val_out = 0;
3095 } else {
3096 move_int_phy(dev, data->phy_id & 0x1f);
3097 data->val_out = miiport_read(dev, data->phy_id & 0x1f,
3098 data->reg_num & 0x1f);
3099 }
3100 return 0;
3101
3102 case SIOCSMIIREG:
3103 if (dev->if_port == PORT_TP) {
3104 if ((data->phy_id & 0x1f) == np->phy_addr_external) {
3105 if ((data->reg_num & 0x1f) == MII_ADVERTISE)
3106 np->advertising = data->val_in;
3107 mdio_write(dev, data->reg_num & 0x1f,
3108 data->val_in);
3109 }
3110 } else {
3111 if ((data->phy_id & 0x1f) == np->phy_addr_external) {
3112 if ((data->reg_num & 0x1f) == MII_ADVERTISE)
3113 np->advertising = data->val_in;
3114 }
3115 move_int_phy(dev, data->phy_id & 0x1f);
3116 miiport_write(dev, data->phy_id & 0x1f,
3117 data->reg_num & 0x1f,
3118 data->val_in);
3119 }
3120 return 0;
3121 default:
3122 return -EOPNOTSUPP;
3123 }
3124}
3125
3126static void enable_wol_mode(struct net_device *dev, int enable_intr)
3127{
3128 void __iomem * ioaddr = ns_ioaddr(dev);
3129 struct netdev_private *np = netdev_priv(dev);
3130
3131 if (netif_msg_wol(np))
3132 printk(KERN_INFO "%s: remaining active for wake-on-lan\n",
3133 dev->name);
3134
3135
3136
3137
3138
3139 writel(0, ioaddr + RxRingPtr);
3140
3141
3142 readl(ioaddr + WOLCmd);
3143
3144
3145 writel(np->SavedClkRun | PMEEnable | PMEStatus, ioaddr + ClkRun);
3146
3147
3148 writel(RxOn, ioaddr + ChipCmd);
3149
3150 if (enable_intr) {
3151
3152
3153
3154 writel(WOLPkt | LinkChange, ioaddr + IntrMask);
3155 natsemi_irq_enable(dev);
3156 }
3157}
3158
3159static int netdev_close(struct net_device *dev)
3160{
3161 void __iomem * ioaddr = ns_ioaddr(dev);
3162 struct netdev_private *np = netdev_priv(dev);
3163 const int irq = np->pci_dev->irq;
3164
3165 if (netif_msg_ifdown(np))
3166 printk(KERN_DEBUG
3167 "%s: Shutting down ethercard, status was %#04x.\n",
3168 dev->name, (int)readl(ioaddr + ChipCmd));
3169 if (netif_msg_pktdata(np))
3170 printk(KERN_DEBUG
3171 "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
3172 dev->name, np->cur_tx, np->dirty_tx,
3173 np->cur_rx, np->dirty_rx);
3174
3175 napi_disable(&np->napi);
3176
3177
3178
3179
3180
3181
3182
3183
3184 del_timer_sync(&np->timer);
3185 disable_irq(irq);
3186 spin_lock_irq(&np->lock);
3187 natsemi_irq_disable(dev);
3188 np->hands_off = 1;
3189 spin_unlock_irq(&np->lock);
3190 enable_irq(irq);
3191
3192 free_irq(irq, dev);
3193
3194
3195
3196
3197
3198 spin_lock_irq(&np->lock);
3199 np->hands_off = 0;
3200 readl(ioaddr + IntrMask);
3201 readw(ioaddr + MIntrStatus);
3202
3203
3204 writel(StatsFreeze, ioaddr + StatsCtrl);
3205
3206
3207 natsemi_stop_rxtx(dev);
3208
3209 __get_stats(dev);
3210 spin_unlock_irq(&np->lock);
3211
3212
3213 netif_carrier_off(dev);
3214 netif_stop_queue(dev);
3215
3216 dump_ring(dev);
3217 drain_ring(dev);
3218 free_ring(dev);
3219
3220 {
3221 u32 wol = readl(ioaddr + WOLCmd) & WakeOptsSummary;
3222 if (wol) {
3223
3224
3225
3226 enable_wol_mode(dev, 0);
3227 } else {
3228
3229 writel(np->SavedClkRun, ioaddr + ClkRun);
3230 }
3231 }
3232 return 0;
3233}
3234
3235
3236static void natsemi_remove1(struct pci_dev *pdev)
3237{
3238 struct net_device *dev = pci_get_drvdata(pdev);
3239 void __iomem * ioaddr = ns_ioaddr(dev);
3240
3241 NATSEMI_REMOVE_FILE(pdev, dspcfg_workaround);
3242 unregister_netdev (dev);
3243 iounmap(ioaddr);
3244 free_netdev (dev);
3245}
3246
3247
3248
3249
3250
3251
3252
3253
3254
3255
3256
3257
3258
3259
3260
3261
3262
3263
3264
3265
3266
3267
3268
3269
3270
3271
3272
3273static int __maybe_unused natsemi_suspend(struct device *dev_d)
3274{
3275 struct net_device *dev = dev_get_drvdata(dev_d);
3276 struct netdev_private *np = netdev_priv(dev);
3277 void __iomem * ioaddr = ns_ioaddr(dev);
3278
3279 rtnl_lock();
3280 if (netif_running (dev)) {
3281 const int irq = np->pci_dev->irq;
3282
3283 del_timer_sync(&np->timer);
3284
3285 disable_irq(irq);
3286 spin_lock_irq(&np->lock);
3287
3288 natsemi_irq_disable(dev);
3289 np->hands_off = 1;
3290 natsemi_stop_rxtx(dev);
3291 netif_stop_queue(dev);
3292
3293 spin_unlock_irq(&np->lock);
3294 enable_irq(irq);
3295
3296 napi_disable(&np->napi);
3297
3298
3299 __get_stats(dev);
3300
3301
3302 drain_ring(dev);
3303 {
3304 u32 wol = readl(ioaddr + WOLCmd) & WakeOptsSummary;
3305
3306 if (wol) {
3307
3308
3309
3310
3311 enable_wol_mode(dev, 0);
3312 } else {
3313
3314 writel(np->SavedClkRun, ioaddr + ClkRun);
3315 }
3316 }
3317 }
3318 netif_device_detach(dev);
3319 rtnl_unlock();
3320 return 0;
3321}
3322
3323
3324static int __maybe_unused natsemi_resume(struct device *dev_d)
3325{
3326 struct net_device *dev = dev_get_drvdata(dev_d);
3327 struct netdev_private *np = netdev_priv(dev);
3328
3329 rtnl_lock();
3330 if (netif_device_present(dev))
3331 goto out;
3332 if (netif_running(dev)) {
3333 const int irq = np->pci_dev->irq;
3334
3335 BUG_ON(!np->hands_off);
3336
3337
3338 napi_enable(&np->napi);
3339
3340 natsemi_reset(dev);
3341 init_ring(dev);
3342 disable_irq(irq);
3343 spin_lock_irq(&np->lock);
3344 np->hands_off = 0;
3345 init_registers(dev);
3346 netif_device_attach(dev);
3347 spin_unlock_irq(&np->lock);
3348 enable_irq(irq);
3349
3350 mod_timer(&np->timer, round_jiffies(jiffies + 1*HZ));
3351 }
3352 netif_device_attach(dev);
3353out:
3354 rtnl_unlock();
3355 return 0;
3356}
3357
3358static SIMPLE_DEV_PM_OPS(natsemi_pm_ops, natsemi_suspend, natsemi_resume);
3359
3360static struct pci_driver natsemi_driver = {
3361 .name = DRV_NAME,
3362 .id_table = natsemi_pci_tbl,
3363 .probe = natsemi_probe1,
3364 .remove = natsemi_remove1,
3365 .driver.pm = &natsemi_pm_ops,
3366};
3367
3368static int __init natsemi_init_mod (void)
3369{
3370
3371#ifdef MODULE
3372 printk(version);
3373#endif
3374
3375 return pci_register_driver(&natsemi_driver);
3376}
3377
3378static void __exit natsemi_exit_mod (void)
3379{
3380 pci_unregister_driver (&natsemi_driver);
3381}
3382
3383module_init(natsemi_init_mod);
3384module_exit(natsemi_exit_mod);
3385
3386