1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#include <linux/module.h>
23#include <linux/ioport.h>
24#include <linux/netdevice.h>
25#include <linux/etherdevice.h>
26#include <linux/interrupt.h>
27#include <linux/skbuff.h>
28#include <linux/spinlock.h>
29#include <linux/crc32.h>
30#include <linux/mii.h>
31#include <linux/of.h>
32#include <linux/of_net.h>
33#include <linux/ethtool.h>
34#include <linux/dm9000.h>
35#include <linux/delay.h>
36#include <linux/platform_device.h>
37#include <linux/irq.h>
38#include <linux/slab.h>
39#include <linux/regulator/consumer.h>
40#include <linux/gpio.h>
41#include <linux/of_gpio.h>
42
43#include <asm/delay.h>
44#include <asm/irq.h>
45#include <asm/io.h>
46
47#include "dm9000.h"
48
49
50
51#define DM9000_PHY 0x40
52
53#define CARDNAME "dm9000"
54#define DRV_VERSION "1.31"
55
56
57
58
59static int watchdog = 5000;
60module_param(watchdog, int, 0400);
61MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds");
62
63
64
65
66static int debug;
67module_param(debug, int, 0644);
68MODULE_PARM_DESC(debug, "dm9000 debug level (0-6)");
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92enum dm9000_type {
93 TYPE_DM9000E,
94 TYPE_DM9000A,
95 TYPE_DM9000B
96};
97
98
99struct board_info {
100
101 void __iomem *io_addr;
102 void __iomem *io_data;
103 u16 irq;
104
105 u16 tx_pkt_cnt;
106 u16 queue_pkt_len;
107 u16 queue_start_addr;
108 u16 queue_ip_summed;
109 u16 dbug_cnt;
110 u8 io_mode;
111 u8 phy_addr;
112 u8 imr_all;
113
114 unsigned int flags;
115 unsigned int in_timeout:1;
116 unsigned int in_suspend:1;
117 unsigned int wake_supported:1;
118
119 enum dm9000_type type;
120
121 void (*inblk)(void __iomem *port, void *data, int length);
122 void (*outblk)(void __iomem *port, void *data, int length);
123 void (*dumpblk)(void __iomem *port, int length);
124
125 struct device *dev;
126
127 struct resource *addr_res;
128 struct resource *data_res;
129 struct resource *addr_req;
130 struct resource *data_req;
131
132 int irq_wake;
133
134 struct mutex addr_lock;
135
136 struct delayed_work phy_poll;
137 struct net_device *ndev;
138
139 spinlock_t lock;
140
141 struct mii_if_info mii;
142 u32 msg_enable;
143 u32 wake_state;
144
145 int ip_summed;
146};
147
148
149
150#define dm9000_dbg(db, lev, msg...) do { \
151 if ((lev) < debug) { \
152 dev_dbg(db->dev, msg); \
153 } \
154} while (0)
155
156static inline struct board_info *to_dm9000_board(struct net_device *dev)
157{
158 return netdev_priv(dev);
159}
160
161
162
163
164
165
166static u8
167ior(struct board_info *db, int reg)
168{
169 writeb(reg, db->io_addr);
170 return readb(db->io_data);
171}
172
173
174
175
176
177static void
178iow(struct board_info *db, int reg, int value)
179{
180 writeb(reg, db->io_addr);
181 writeb(value, db->io_data);
182}
183
184static void
185dm9000_reset(struct board_info *db)
186{
187 dev_dbg(db->dev, "resetting device\n");
188
189
190
191
192
193 iow(db, DM9000_NCR, NCR_RST | NCR_MAC_LBK);
194 udelay(100);
195 if (ior(db, DM9000_NCR) & 1)
196 dev_err(db->dev, "dm9000 did not respond to first reset\n");
197
198 iow(db, DM9000_NCR, 0);
199 iow(db, DM9000_NCR, NCR_RST | NCR_MAC_LBK);
200 udelay(100);
201 if (ior(db, DM9000_NCR) & 1)
202 dev_err(db->dev, "dm9000 did not respond to second reset\n");
203}
204
205
206
207static void dm9000_outblk_8bit(void __iomem *reg, void *data, int count)
208{
209 iowrite8_rep(reg, data, count);
210}
211
212static void dm9000_outblk_16bit(void __iomem *reg, void *data, int count)
213{
214 iowrite16_rep(reg, data, (count+1) >> 1);
215}
216
217static void dm9000_outblk_32bit(void __iomem *reg, void *data, int count)
218{
219 iowrite32_rep(reg, data, (count+3) >> 2);
220}
221
222
223
224static void dm9000_inblk_8bit(void __iomem *reg, void *data, int count)
225{
226 ioread8_rep(reg, data, count);
227}
228
229
230static void dm9000_inblk_16bit(void __iomem *reg, void *data, int count)
231{
232 ioread16_rep(reg, data, (count+1) >> 1);
233}
234
235static void dm9000_inblk_32bit(void __iomem *reg, void *data, int count)
236{
237 ioread32_rep(reg, data, (count+3) >> 2);
238}
239
240
241
242static void dm9000_dumpblk_8bit(void __iomem *reg, int count)
243{
244 int i;
245 int tmp;
246
247 for (i = 0; i < count; i++)
248 tmp = readb(reg);
249}
250
251static void dm9000_dumpblk_16bit(void __iomem *reg, int count)
252{
253 int i;
254 int tmp;
255
256 count = (count + 1) >> 1;
257
258 for (i = 0; i < count; i++)
259 tmp = readw(reg);
260}
261
262static void dm9000_dumpblk_32bit(void __iomem *reg, int count)
263{
264 int i;
265 int tmp;
266
267 count = (count + 3) >> 2;
268
269 for (i = 0; i < count; i++)
270 tmp = readl(reg);
271}
272
273
274
275
276
277static void dm9000_msleep(struct board_info *db, unsigned int ms)
278{
279 if (db->in_suspend || db->in_timeout)
280 mdelay(ms);
281 else
282 msleep(ms);
283}
284
285
286static int
287dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg)
288{
289 struct board_info *db = netdev_priv(dev);
290 unsigned long flags;
291 unsigned int reg_save;
292 int ret;
293
294 mutex_lock(&db->addr_lock);
295
296 spin_lock_irqsave(&db->lock, flags);
297
298
299 reg_save = readb(db->io_addr);
300
301
302 iow(db, DM9000_EPAR, DM9000_PHY | reg);
303
304
305 iow(db, DM9000_EPCR, EPCR_ERPRR | EPCR_EPOS);
306
307 writeb(reg_save, db->io_addr);
308 spin_unlock_irqrestore(&db->lock, flags);
309
310 dm9000_msleep(db, 1);
311
312 spin_lock_irqsave(&db->lock, flags);
313 reg_save = readb(db->io_addr);
314
315 iow(db, DM9000_EPCR, 0x0);
316
317
318 ret = (ior(db, DM9000_EPDRH) << 8) | ior(db, DM9000_EPDRL);
319
320
321 writeb(reg_save, db->io_addr);
322 spin_unlock_irqrestore(&db->lock, flags);
323
324 mutex_unlock(&db->addr_lock);
325
326 dm9000_dbg(db, 5, "phy_read[%02x] -> %04x\n", reg, ret);
327 return ret;
328}
329
330
331static void
332dm9000_phy_write(struct net_device *dev,
333 int phyaddr_unused, int reg, int value)
334{
335 struct board_info *db = netdev_priv(dev);
336 unsigned long flags;
337 unsigned long reg_save;
338
339 dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value);
340 if (!db->in_timeout)
341 mutex_lock(&db->addr_lock);
342
343 spin_lock_irqsave(&db->lock, flags);
344
345
346 reg_save = readb(db->io_addr);
347
348
349 iow(db, DM9000_EPAR, DM9000_PHY | reg);
350
351
352 iow(db, DM9000_EPDRL, value);
353 iow(db, DM9000_EPDRH, value >> 8);
354
355
356 iow(db, DM9000_EPCR, EPCR_EPOS | EPCR_ERPRW);
357
358 writeb(reg_save, db->io_addr);
359 spin_unlock_irqrestore(&db->lock, flags);
360
361 dm9000_msleep(db, 1);
362
363 spin_lock_irqsave(&db->lock, flags);
364 reg_save = readb(db->io_addr);
365
366 iow(db, DM9000_EPCR, 0x0);
367
368
369 writeb(reg_save, db->io_addr);
370
371 spin_unlock_irqrestore(&db->lock, flags);
372 if (!db->in_timeout)
373 mutex_unlock(&db->addr_lock);
374}
375
376
377
378
379
380
381
382static void dm9000_set_io(struct board_info *db, int byte_width)
383{
384
385
386
387
388 switch (byte_width) {
389 case 1:
390 db->dumpblk = dm9000_dumpblk_8bit;
391 db->outblk = dm9000_outblk_8bit;
392 db->inblk = dm9000_inblk_8bit;
393 break;
394
395
396 case 3:
397 dev_dbg(db->dev, ": 3 byte IO, falling back to 16bit\n");
398
399 case 2:
400 db->dumpblk = dm9000_dumpblk_16bit;
401 db->outblk = dm9000_outblk_16bit;
402 db->inblk = dm9000_inblk_16bit;
403 break;
404
405 case 4:
406 default:
407 db->dumpblk = dm9000_dumpblk_32bit;
408 db->outblk = dm9000_outblk_32bit;
409 db->inblk = dm9000_inblk_32bit;
410 break;
411 }
412}
413
414static void dm9000_schedule_poll(struct board_info *db)
415{
416 if (db->type == TYPE_DM9000E)
417 schedule_delayed_work(&db->phy_poll, HZ * 2);
418}
419
420static int dm9000_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
421{
422 struct board_info *dm = to_dm9000_board(dev);
423
424 if (!netif_running(dev))
425 return -EINVAL;
426
427 return generic_mii_ioctl(&dm->mii, if_mii(req), cmd, NULL);
428}
429
430static unsigned int
431dm9000_read_locked(struct board_info *db, int reg)
432{
433 unsigned long flags;
434 unsigned int ret;
435
436 spin_lock_irqsave(&db->lock, flags);
437 ret = ior(db, reg);
438 spin_unlock_irqrestore(&db->lock, flags);
439
440 return ret;
441}
442
443static int dm9000_wait_eeprom(struct board_info *db)
444{
445 unsigned int status;
446 int timeout = 8;
447
448
449
450
451
452
453
454
455
456
457
458
459 while (1) {
460 status = dm9000_read_locked(db, DM9000_EPCR);
461
462 if ((status & EPCR_ERRE) == 0)
463 break;
464
465 msleep(1);
466
467 if (timeout-- < 0) {
468 dev_dbg(db->dev, "timeout waiting EEPROM\n");
469 break;
470 }
471 }
472
473 return 0;
474}
475
476
477
478
479static void
480dm9000_read_eeprom(struct board_info *db, int offset, u8 *to)
481{
482 unsigned long flags;
483
484 if (db->flags & DM9000_PLATF_NO_EEPROM) {
485 to[0] = 0xff;
486 to[1] = 0xff;
487 return;
488 }
489
490 mutex_lock(&db->addr_lock);
491
492 spin_lock_irqsave(&db->lock, flags);
493
494 iow(db, DM9000_EPAR, offset);
495 iow(db, DM9000_EPCR, EPCR_ERPRR);
496
497 spin_unlock_irqrestore(&db->lock, flags);
498
499 dm9000_wait_eeprom(db);
500
501
502 msleep(1);
503
504 spin_lock_irqsave(&db->lock, flags);
505
506 iow(db, DM9000_EPCR, 0x0);
507
508 to[0] = ior(db, DM9000_EPDRL);
509 to[1] = ior(db, DM9000_EPDRH);
510
511 spin_unlock_irqrestore(&db->lock, flags);
512
513 mutex_unlock(&db->addr_lock);
514}
515
516
517
518
519static void
520dm9000_write_eeprom(struct board_info *db, int offset, u8 *data)
521{
522 unsigned long flags;
523
524 if (db->flags & DM9000_PLATF_NO_EEPROM)
525 return;
526
527 mutex_lock(&db->addr_lock);
528
529 spin_lock_irqsave(&db->lock, flags);
530 iow(db, DM9000_EPAR, offset);
531 iow(db, DM9000_EPDRH, data[1]);
532 iow(db, DM9000_EPDRL, data[0]);
533 iow(db, DM9000_EPCR, EPCR_WEP | EPCR_ERPRW);
534 spin_unlock_irqrestore(&db->lock, flags);
535
536 dm9000_wait_eeprom(db);
537
538 mdelay(1);
539
540 spin_lock_irqsave(&db->lock, flags);
541 iow(db, DM9000_EPCR, 0);
542 spin_unlock_irqrestore(&db->lock, flags);
543
544 mutex_unlock(&db->addr_lock);
545}
546
547
548
549static void dm9000_get_drvinfo(struct net_device *dev,
550 struct ethtool_drvinfo *info)
551{
552 struct board_info *dm = to_dm9000_board(dev);
553
554 strlcpy(info->driver, CARDNAME, sizeof(info->driver));
555 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
556 strlcpy(info->bus_info, to_platform_device(dm->dev)->name,
557 sizeof(info->bus_info));
558}
559
560static u32 dm9000_get_msglevel(struct net_device *dev)
561{
562 struct board_info *dm = to_dm9000_board(dev);
563
564 return dm->msg_enable;
565}
566
567static void dm9000_set_msglevel(struct net_device *dev, u32 value)
568{
569 struct board_info *dm = to_dm9000_board(dev);
570
571 dm->msg_enable = value;
572}
573
574static int dm9000_get_link_ksettings(struct net_device *dev,
575 struct ethtool_link_ksettings *cmd)
576{
577 struct board_info *dm = to_dm9000_board(dev);
578
579 mii_ethtool_get_link_ksettings(&dm->mii, cmd);
580 return 0;
581}
582
583static int dm9000_set_link_ksettings(struct net_device *dev,
584 const struct ethtool_link_ksettings *cmd)
585{
586 struct board_info *dm = to_dm9000_board(dev);
587
588 return mii_ethtool_set_link_ksettings(&dm->mii, cmd);
589}
590
591static int dm9000_nway_reset(struct net_device *dev)
592{
593 struct board_info *dm = to_dm9000_board(dev);
594 return mii_nway_restart(&dm->mii);
595}
596
597static int dm9000_set_features(struct net_device *dev,
598 netdev_features_t features)
599{
600 struct board_info *dm = to_dm9000_board(dev);
601 netdev_features_t changed = dev->features ^ features;
602 unsigned long flags;
603
604 if (!(changed & NETIF_F_RXCSUM))
605 return 0;
606
607 spin_lock_irqsave(&dm->lock, flags);
608 iow(dm, DM9000_RCSR, (features & NETIF_F_RXCSUM) ? RCSR_CSUM : 0);
609 spin_unlock_irqrestore(&dm->lock, flags);
610
611 return 0;
612}
613
614static u32 dm9000_get_link(struct net_device *dev)
615{
616 struct board_info *dm = to_dm9000_board(dev);
617 u32 ret;
618
619 if (dm->flags & DM9000_PLATF_EXT_PHY)
620 ret = mii_link_ok(&dm->mii);
621 else
622 ret = dm9000_read_locked(dm, DM9000_NSR) & NSR_LINKST ? 1 : 0;
623
624 return ret;
625}
626
627#define DM_EEPROM_MAGIC (0x444D394B)
628
629static int dm9000_get_eeprom_len(struct net_device *dev)
630{
631 return 128;
632}
633
634static int dm9000_get_eeprom(struct net_device *dev,
635 struct ethtool_eeprom *ee, u8 *data)
636{
637 struct board_info *dm = to_dm9000_board(dev);
638 int offset = ee->offset;
639 int len = ee->len;
640 int i;
641
642
643
644 if ((len & 1) != 0 || (offset & 1) != 0)
645 return -EINVAL;
646
647 if (dm->flags & DM9000_PLATF_NO_EEPROM)
648 return -ENOENT;
649
650 ee->magic = DM_EEPROM_MAGIC;
651
652 for (i = 0; i < len; i += 2)
653 dm9000_read_eeprom(dm, (offset + i) / 2, data + i);
654
655 return 0;
656}
657
658static int dm9000_set_eeprom(struct net_device *dev,
659 struct ethtool_eeprom *ee, u8 *data)
660{
661 struct board_info *dm = to_dm9000_board(dev);
662 int offset = ee->offset;
663 int len = ee->len;
664 int done;
665
666
667
668 if (dm->flags & DM9000_PLATF_NO_EEPROM)
669 return -ENOENT;
670
671 if (ee->magic != DM_EEPROM_MAGIC)
672 return -EINVAL;
673
674 while (len > 0) {
675 if (len & 1 || offset & 1) {
676 int which = offset & 1;
677 u8 tmp[2];
678
679 dm9000_read_eeprom(dm, offset / 2, tmp);
680 tmp[which] = *data;
681 dm9000_write_eeprom(dm, offset / 2, tmp);
682
683 done = 1;
684 } else {
685 dm9000_write_eeprom(dm, offset / 2, data);
686 done = 2;
687 }
688
689 data += done;
690 offset += done;
691 len -= done;
692 }
693
694 return 0;
695}
696
697static void dm9000_get_wol(struct net_device *dev, struct ethtool_wolinfo *w)
698{
699 struct board_info *dm = to_dm9000_board(dev);
700
701 memset(w, 0, sizeof(struct ethtool_wolinfo));
702
703
704 w->supported = dm->wake_supported ? WAKE_MAGIC : 0;
705 w->wolopts = dm->wake_state;
706}
707
708static int dm9000_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
709{
710 struct board_info *dm = to_dm9000_board(dev);
711 unsigned long flags;
712 u32 opts = w->wolopts;
713 u32 wcr = 0;
714
715 if (!dm->wake_supported)
716 return -EOPNOTSUPP;
717
718 if (opts & ~WAKE_MAGIC)
719 return -EINVAL;
720
721 if (opts & WAKE_MAGIC)
722 wcr |= WCR_MAGICEN;
723
724 mutex_lock(&dm->addr_lock);
725
726 spin_lock_irqsave(&dm->lock, flags);
727 iow(dm, DM9000_WCR, wcr);
728 spin_unlock_irqrestore(&dm->lock, flags);
729
730 mutex_unlock(&dm->addr_lock);
731
732 if (dm->wake_state != opts) {
733
734
735 if (!dm->wake_state)
736 irq_set_irq_wake(dm->irq_wake, 1);
737 else if (dm->wake_state && !opts)
738 irq_set_irq_wake(dm->irq_wake, 0);
739 }
740
741 dm->wake_state = opts;
742 return 0;
743}
744
745static const struct ethtool_ops dm9000_ethtool_ops = {
746 .get_drvinfo = dm9000_get_drvinfo,
747 .get_msglevel = dm9000_get_msglevel,
748 .set_msglevel = dm9000_set_msglevel,
749 .nway_reset = dm9000_nway_reset,
750 .get_link = dm9000_get_link,
751 .get_wol = dm9000_get_wol,
752 .set_wol = dm9000_set_wol,
753 .get_eeprom_len = dm9000_get_eeprom_len,
754 .get_eeprom = dm9000_get_eeprom,
755 .set_eeprom = dm9000_set_eeprom,
756 .get_link_ksettings = dm9000_get_link_ksettings,
757 .set_link_ksettings = dm9000_set_link_ksettings,
758};
759
760static void dm9000_show_carrier(struct board_info *db,
761 unsigned carrier, unsigned nsr)
762{
763 int lpa;
764 struct net_device *ndev = db->ndev;
765 struct mii_if_info *mii = &db->mii;
766 unsigned ncr = dm9000_read_locked(db, DM9000_NCR);
767
768 if (carrier) {
769 lpa = mii->mdio_read(mii->dev, mii->phy_id, MII_LPA);
770 dev_info(db->dev,
771 "%s: link up, %dMbps, %s-duplex, lpa 0x%04X\n",
772 ndev->name, (nsr & NSR_SPEED) ? 10 : 100,
773 (ncr & NCR_FDX) ? "full" : "half", lpa);
774 } else {
775 dev_info(db->dev, "%s: link down\n", ndev->name);
776 }
777}
778
779static void
780dm9000_poll_work(struct work_struct *w)
781{
782 struct delayed_work *dw = to_delayed_work(w);
783 struct board_info *db = container_of(dw, struct board_info, phy_poll);
784 struct net_device *ndev = db->ndev;
785
786 if (db->flags & DM9000_PLATF_SIMPLE_PHY &&
787 !(db->flags & DM9000_PLATF_EXT_PHY)) {
788 unsigned nsr = dm9000_read_locked(db, DM9000_NSR);
789 unsigned old_carrier = netif_carrier_ok(ndev) ? 1 : 0;
790 unsigned new_carrier;
791
792 new_carrier = (nsr & NSR_LINKST) ? 1 : 0;
793
794 if (old_carrier != new_carrier) {
795 if (netif_msg_link(db))
796 dm9000_show_carrier(db, new_carrier, nsr);
797
798 if (!new_carrier)
799 netif_carrier_off(ndev);
800 else
801 netif_carrier_on(ndev);
802 }
803 } else
804 mii_check_media(&db->mii, netif_msg_link(db), 0);
805
806 if (netif_running(ndev))
807 dm9000_schedule_poll(db);
808}
809
810
811
812
813
814
815static void
816dm9000_release_board(struct platform_device *pdev, struct board_info *db)
817{
818
819
820 iounmap(db->io_addr);
821 iounmap(db->io_data);
822
823
824
825 if (db->data_req)
826 release_resource(db->data_req);
827 kfree(db->data_req);
828
829 if (db->addr_req)
830 release_resource(db->addr_req);
831 kfree(db->addr_req);
832}
833
834static unsigned char dm9000_type_to_char(enum dm9000_type type)
835{
836 switch (type) {
837 case TYPE_DM9000E: return 'e';
838 case TYPE_DM9000A: return 'a';
839 case TYPE_DM9000B: return 'b';
840 }
841
842 return '?';
843}
844
845
846
847
848static void
849dm9000_hash_table_unlocked(struct net_device *dev)
850{
851 struct board_info *db = netdev_priv(dev);
852 struct netdev_hw_addr *ha;
853 int i, oft;
854 u32 hash_val;
855 u16 hash_table[4] = { 0, 0, 0, 0x8000 };
856 u8 rcr = RCR_DIS_LONG | RCR_DIS_CRC | RCR_RXEN;
857
858 dm9000_dbg(db, 1, "entering %s\n", __func__);
859
860 for (i = 0, oft = DM9000_PAR; i < 6; i++, oft++)
861 iow(db, oft, dev->dev_addr[i]);
862
863 if (dev->flags & IFF_PROMISC)
864 rcr |= RCR_PRMSC;
865
866 if (dev->flags & IFF_ALLMULTI)
867 rcr |= RCR_ALL;
868
869
870 netdev_for_each_mc_addr(ha, dev) {
871 hash_val = ether_crc_le(6, ha->addr) & 0x3f;
872 hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16);
873 }
874
875
876 for (i = 0, oft = DM9000_MAR; i < 4; i++) {
877 iow(db, oft++, hash_table[i]);
878 iow(db, oft++, hash_table[i] >> 8);
879 }
880
881 iow(db, DM9000_RCR, rcr);
882}
883
884static void
885dm9000_hash_table(struct net_device *dev)
886{
887 struct board_info *db = netdev_priv(dev);
888 unsigned long flags;
889
890 spin_lock_irqsave(&db->lock, flags);
891 dm9000_hash_table_unlocked(dev);
892 spin_unlock_irqrestore(&db->lock, flags);
893}
894
895static void
896dm9000_mask_interrupts(struct board_info *db)
897{
898 iow(db, DM9000_IMR, IMR_PAR);
899}
900
901static void
902dm9000_unmask_interrupts(struct board_info *db)
903{
904 iow(db, DM9000_IMR, db->imr_all);
905}
906
907
908
909
910static void
911dm9000_init_dm9000(struct net_device *dev)
912{
913 struct board_info *db = netdev_priv(dev);
914 unsigned int imr;
915 unsigned int ncr;
916
917 dm9000_dbg(db, 1, "entering %s\n", __func__);
918
919 dm9000_reset(db);
920 dm9000_mask_interrupts(db);
921
922
923 db->io_mode = ior(db, DM9000_ISR) >> 6;
924
925
926 if (dev->hw_features & NETIF_F_RXCSUM)
927 iow(db, DM9000_RCSR,
928 (dev->features & NETIF_F_RXCSUM) ? RCSR_CSUM : 0);
929
930 iow(db, DM9000_GPCR, GPCR_GEP_CNTL);
931 iow(db, DM9000_GPR, 0);
932
933
934
935
936 if (db->type == TYPE_DM9000B) {
937 dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET);
938 dm9000_phy_write(dev, 0, MII_DM_DSPCR, DSPCR_INIT_PARAM);
939 }
940
941 ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0;
942
943
944
945
946 if (db->wake_supported)
947 ncr |= NCR_WAKEEN;
948
949 iow(db, DM9000_NCR, ncr);
950
951
952 iow(db, DM9000_TCR, 0);
953 iow(db, DM9000_BPTR, 0x3f);
954 iow(db, DM9000_FCR, 0xff);
955 iow(db, DM9000_SMCR, 0);
956
957 iow(db, DM9000_NSR, NSR_WAKEST | NSR_TX2END | NSR_TX1END);
958 iow(db, DM9000_ISR, ISR_CLR_STATUS);
959
960
961 dm9000_hash_table_unlocked(dev);
962
963 imr = IMR_PAR | IMR_PTM | IMR_PRM;
964 if (db->type != TYPE_DM9000E)
965 imr |= IMR_LNKCHNG;
966
967 db->imr_all = imr;
968
969
970 db->tx_pkt_cnt = 0;
971 db->queue_pkt_len = 0;
972 netif_trans_update(dev);
973}
974
975
976static void dm9000_timeout(struct net_device *dev)
977{
978 struct board_info *db = netdev_priv(dev);
979 u8 reg_save;
980 unsigned long flags;
981
982
983 spin_lock_irqsave(&db->lock, flags);
984 db->in_timeout = 1;
985 reg_save = readb(db->io_addr);
986
987 netif_stop_queue(dev);
988 dm9000_init_dm9000(dev);
989 dm9000_unmask_interrupts(db);
990
991 netif_trans_update(dev);
992 netif_wake_queue(dev);
993
994
995 writeb(reg_save, db->io_addr);
996 db->in_timeout = 0;
997 spin_unlock_irqrestore(&db->lock, flags);
998}
999
1000static void dm9000_send_packet(struct net_device *dev,
1001 int ip_summed,
1002 u16 pkt_len)
1003{
1004 struct board_info *dm = to_dm9000_board(dev);
1005
1006
1007 if (dm->ip_summed != ip_summed) {
1008 if (ip_summed == CHECKSUM_NONE)
1009 iow(dm, DM9000_TCCR, 0);
1010 else
1011 iow(dm, DM9000_TCCR, TCCR_IP | TCCR_UDP | TCCR_TCP);
1012 dm->ip_summed = ip_summed;
1013 }
1014
1015
1016 iow(dm, DM9000_TXPLL, pkt_len);
1017 iow(dm, DM9000_TXPLH, pkt_len >> 8);
1018
1019
1020 iow(dm, DM9000_TCR, TCR_TXREQ);
1021}
1022
1023
1024
1025
1026
1027static int
1028dm9000_start_xmit(struct sk_buff *skb, struct net_device *dev)
1029{
1030 unsigned long flags;
1031 struct board_info *db = netdev_priv(dev);
1032
1033 dm9000_dbg(db, 3, "%s:\n", __func__);
1034
1035 if (db->tx_pkt_cnt > 1)
1036 return NETDEV_TX_BUSY;
1037
1038 spin_lock_irqsave(&db->lock, flags);
1039
1040
1041 writeb(DM9000_MWCMD, db->io_addr);
1042
1043 (db->outblk)(db->io_data, skb->data, skb->len);
1044 dev->stats.tx_bytes += skb->len;
1045
1046 db->tx_pkt_cnt++;
1047
1048 if (db->tx_pkt_cnt == 1) {
1049 dm9000_send_packet(dev, skb->ip_summed, skb->len);
1050 } else {
1051
1052 db->queue_pkt_len = skb->len;
1053 db->queue_ip_summed = skb->ip_summed;
1054 netif_stop_queue(dev);
1055 }
1056
1057 spin_unlock_irqrestore(&db->lock, flags);
1058
1059
1060 dev_consume_skb_any(skb);
1061
1062 return NETDEV_TX_OK;
1063}
1064
1065
1066
1067
1068
1069
1070static void dm9000_tx_done(struct net_device *dev, struct board_info *db)
1071{
1072 int tx_status = ior(db, DM9000_NSR);
1073
1074 if (tx_status & (NSR_TX2END | NSR_TX1END)) {
1075
1076 db->tx_pkt_cnt--;
1077 dev->stats.tx_packets++;
1078
1079 if (netif_msg_tx_done(db))
1080 dev_dbg(db->dev, "tx done, NSR %02x\n", tx_status);
1081
1082
1083 if (db->tx_pkt_cnt > 0)
1084 dm9000_send_packet(dev, db->queue_ip_summed,
1085 db->queue_pkt_len);
1086 netif_wake_queue(dev);
1087 }
1088}
1089
1090struct dm9000_rxhdr {
1091 u8 RxPktReady;
1092 u8 RxStatus;
1093 __le16 RxLen;
1094} __packed;
1095
1096
1097
1098
1099static void
1100dm9000_rx(struct net_device *dev)
1101{
1102 struct board_info *db = netdev_priv(dev);
1103 struct dm9000_rxhdr rxhdr;
1104 struct sk_buff *skb;
1105 u8 rxbyte, *rdptr;
1106 bool GoodPacket;
1107 int RxLen;
1108
1109
1110 do {
1111 ior(db, DM9000_MRCMDX);
1112
1113
1114 rxbyte = readb(db->io_data);
1115
1116
1117 if (rxbyte & DM9000_PKT_ERR) {
1118 dev_warn(db->dev, "status check fail: %d\n", rxbyte);
1119 iow(db, DM9000_RCR, 0x00);
1120 return;
1121 }
1122
1123 if (!(rxbyte & DM9000_PKT_RDY))
1124 return;
1125
1126
1127 GoodPacket = true;
1128 writeb(DM9000_MRCMD, db->io_addr);
1129
1130 (db->inblk)(db->io_data, &rxhdr, sizeof(rxhdr));
1131
1132 RxLen = le16_to_cpu(rxhdr.RxLen);
1133
1134 if (netif_msg_rx_status(db))
1135 dev_dbg(db->dev, "RX: status %02x, length %04x\n",
1136 rxhdr.RxStatus, RxLen);
1137
1138
1139 if (RxLen < 0x40) {
1140 GoodPacket = false;
1141 if (netif_msg_rx_err(db))
1142 dev_dbg(db->dev, "RX: Bad Packet (runt)\n");
1143 }
1144
1145 if (RxLen > DM9000_PKT_MAX) {
1146 dev_dbg(db->dev, "RST: RX Len:%x\n", RxLen);
1147 }
1148
1149
1150 if (rxhdr.RxStatus & (RSR_FOE | RSR_CE | RSR_AE |
1151 RSR_PLE | RSR_RWTO |
1152 RSR_LCS | RSR_RF)) {
1153 GoodPacket = false;
1154 if (rxhdr.RxStatus & RSR_FOE) {
1155 if (netif_msg_rx_err(db))
1156 dev_dbg(db->dev, "fifo error\n");
1157 dev->stats.rx_fifo_errors++;
1158 }
1159 if (rxhdr.RxStatus & RSR_CE) {
1160 if (netif_msg_rx_err(db))
1161 dev_dbg(db->dev, "crc error\n");
1162 dev->stats.rx_crc_errors++;
1163 }
1164 if (rxhdr.RxStatus & RSR_RF) {
1165 if (netif_msg_rx_err(db))
1166 dev_dbg(db->dev, "length error\n");
1167 dev->stats.rx_length_errors++;
1168 }
1169 }
1170
1171
1172 if (GoodPacket &&
1173 ((skb = netdev_alloc_skb(dev, RxLen + 4)) != NULL)) {
1174 skb_reserve(skb, 2);
1175 rdptr = skb_put(skb, RxLen - 4);
1176
1177
1178
1179 (db->inblk)(db->io_data, rdptr, RxLen);
1180 dev->stats.rx_bytes += RxLen;
1181
1182
1183 skb->protocol = eth_type_trans(skb, dev);
1184 if (dev->features & NETIF_F_RXCSUM) {
1185 if ((((rxbyte & 0x1c) << 3) & rxbyte) == 0)
1186 skb->ip_summed = CHECKSUM_UNNECESSARY;
1187 else
1188 skb_checksum_none_assert(skb);
1189 }
1190 netif_rx(skb);
1191 dev->stats.rx_packets++;
1192
1193 } else {
1194
1195
1196 (db->dumpblk)(db->io_data, RxLen);
1197 }
1198 } while (rxbyte & DM9000_PKT_RDY);
1199}
1200
1201static irqreturn_t dm9000_interrupt(int irq, void *dev_id)
1202{
1203 struct net_device *dev = dev_id;
1204 struct board_info *db = netdev_priv(dev);
1205 int int_status;
1206 unsigned long flags;
1207 u8 reg_save;
1208
1209 dm9000_dbg(db, 3, "entering %s\n", __func__);
1210
1211
1212
1213
1214 spin_lock_irqsave(&db->lock, flags);
1215
1216
1217 reg_save = readb(db->io_addr);
1218
1219 dm9000_mask_interrupts(db);
1220
1221 int_status = ior(db, DM9000_ISR);
1222 iow(db, DM9000_ISR, int_status);
1223
1224 if (netif_msg_intr(db))
1225 dev_dbg(db->dev, "interrupt status %02x\n", int_status);
1226
1227
1228 if (int_status & ISR_PRS)
1229 dm9000_rx(dev);
1230
1231
1232 if (int_status & ISR_PTS)
1233 dm9000_tx_done(dev, db);
1234
1235 if (db->type != TYPE_DM9000E) {
1236 if (int_status & ISR_LNKCHNG) {
1237
1238 schedule_delayed_work(&db->phy_poll, 1);
1239 }
1240 }
1241
1242 dm9000_unmask_interrupts(db);
1243
1244 writeb(reg_save, db->io_addr);
1245
1246 spin_unlock_irqrestore(&db->lock, flags);
1247
1248 return IRQ_HANDLED;
1249}
1250
1251static irqreturn_t dm9000_wol_interrupt(int irq, void *dev_id)
1252{
1253 struct net_device *dev = dev_id;
1254 struct board_info *db = netdev_priv(dev);
1255 unsigned long flags;
1256 unsigned nsr, wcr;
1257
1258 spin_lock_irqsave(&db->lock, flags);
1259
1260 nsr = ior(db, DM9000_NSR);
1261 wcr = ior(db, DM9000_WCR);
1262
1263 dev_dbg(db->dev, "%s: NSR=0x%02x, WCR=0x%02x\n", __func__, nsr, wcr);
1264
1265 if (nsr & NSR_WAKEST) {
1266
1267 iow(db, DM9000_NSR, NSR_WAKEST);
1268
1269 if (wcr & WCR_LINKST)
1270 dev_info(db->dev, "wake by link status change\n");
1271 if (wcr & WCR_SAMPLEST)
1272 dev_info(db->dev, "wake by sample packet\n");
1273 if (wcr & WCR_MAGICST)
1274 dev_info(db->dev, "wake by magic packet\n");
1275 if (!(wcr & (WCR_LINKST | WCR_SAMPLEST | WCR_MAGICST)))
1276 dev_err(db->dev, "wake signalled with no reason? "
1277 "NSR=0x%02x, WSR=0x%02x\n", nsr, wcr);
1278 }
1279
1280 spin_unlock_irqrestore(&db->lock, flags);
1281
1282 return (nsr & NSR_WAKEST) ? IRQ_HANDLED : IRQ_NONE;
1283}
1284
1285#ifdef CONFIG_NET_POLL_CONTROLLER
1286
1287
1288
1289static void dm9000_poll_controller(struct net_device *dev)
1290{
1291 disable_irq(dev->irq);
1292 dm9000_interrupt(dev->irq, dev);
1293 enable_irq(dev->irq);
1294}
1295#endif
1296
1297
1298
1299
1300
1301static int
1302dm9000_open(struct net_device *dev)
1303{
1304 struct board_info *db = netdev_priv(dev);
1305 unsigned int irq_flags = irq_get_trigger_type(dev->irq);
1306
1307 if (netif_msg_ifup(db))
1308 dev_dbg(db->dev, "enabling %s\n", dev->name);
1309
1310
1311
1312
1313 if (irq_flags == IRQF_TRIGGER_NONE)
1314 dev_warn(db->dev, "WARNING: no IRQ resource flags set.\n");
1315
1316 irq_flags |= IRQF_SHARED;
1317
1318
1319 iow(db, DM9000_GPR, 0);
1320 mdelay(1);
1321
1322
1323 dm9000_init_dm9000(dev);
1324
1325 if (request_irq(dev->irq, dm9000_interrupt, irq_flags, dev->name, dev))
1326 return -EAGAIN;
1327
1328
1329
1330 dm9000_unmask_interrupts(db);
1331
1332
1333 db->dbug_cnt = 0;
1334
1335 mii_check_media(&db->mii, netif_msg_link(db), 1);
1336 netif_start_queue(dev);
1337
1338
1339 schedule_delayed_work(&db->phy_poll, 1);
1340
1341 return 0;
1342}
1343
1344static void
1345dm9000_shutdown(struct net_device *dev)
1346{
1347 struct board_info *db = netdev_priv(dev);
1348
1349
1350 dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET);
1351 iow(db, DM9000_GPR, 0x01);
1352 dm9000_mask_interrupts(db);
1353 iow(db, DM9000_RCR, 0x00);
1354}
1355
1356
1357
1358
1359
1360static int
1361dm9000_stop(struct net_device *ndev)
1362{
1363 struct board_info *db = netdev_priv(ndev);
1364
1365 if (netif_msg_ifdown(db))
1366 dev_dbg(db->dev, "shutting down %s\n", ndev->name);
1367
1368 cancel_delayed_work_sync(&db->phy_poll);
1369
1370 netif_stop_queue(ndev);
1371 netif_carrier_off(ndev);
1372
1373
1374 free_irq(ndev->irq, ndev);
1375
1376 dm9000_shutdown(ndev);
1377
1378 return 0;
1379}
1380
1381static const struct net_device_ops dm9000_netdev_ops = {
1382 .ndo_open = dm9000_open,
1383 .ndo_stop = dm9000_stop,
1384 .ndo_start_xmit = dm9000_start_xmit,
1385 .ndo_tx_timeout = dm9000_timeout,
1386 .ndo_set_rx_mode = dm9000_hash_table,
1387 .ndo_do_ioctl = dm9000_ioctl,
1388 .ndo_set_features = dm9000_set_features,
1389 .ndo_validate_addr = eth_validate_addr,
1390 .ndo_set_mac_address = eth_mac_addr,
1391#ifdef CONFIG_NET_POLL_CONTROLLER
1392 .ndo_poll_controller = dm9000_poll_controller,
1393#endif
1394};
1395
1396static struct dm9000_plat_data *dm9000_parse_dt(struct device *dev)
1397{
1398 struct dm9000_plat_data *pdata;
1399 struct device_node *np = dev->of_node;
1400 const void *mac_addr;
1401
1402 if (!IS_ENABLED(CONFIG_OF) || !np)
1403 return ERR_PTR(-ENXIO);
1404
1405 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
1406 if (!pdata)
1407 return ERR_PTR(-ENOMEM);
1408
1409 if (of_find_property(np, "davicom,ext-phy", NULL))
1410 pdata->flags |= DM9000_PLATF_EXT_PHY;
1411 if (of_find_property(np, "davicom,no-eeprom", NULL))
1412 pdata->flags |= DM9000_PLATF_NO_EEPROM;
1413
1414 mac_addr = of_get_mac_address(np);
1415 if (mac_addr)
1416 memcpy(pdata->dev_addr, mac_addr, sizeof(pdata->dev_addr));
1417
1418 return pdata;
1419}
1420
1421
1422
1423
1424static int
1425dm9000_probe(struct platform_device *pdev)
1426{
1427 struct dm9000_plat_data *pdata = dev_get_platdata(&pdev->dev);
1428 struct board_info *db;
1429 struct net_device *ndev;
1430 struct device *dev = &pdev->dev;
1431 const unsigned char *mac_src;
1432 int ret = 0;
1433 int iosize;
1434 int i;
1435 u32 id_val;
1436 int reset_gpios;
1437 enum of_gpio_flags flags;
1438 struct regulator *power;
1439 bool inv_mac_addr = false;
1440
1441 power = devm_regulator_get(dev, "vcc");
1442 if (IS_ERR(power)) {
1443 if (PTR_ERR(power) == -EPROBE_DEFER)
1444 return -EPROBE_DEFER;
1445 dev_dbg(dev, "no regulator provided\n");
1446 } else {
1447 ret = regulator_enable(power);
1448 if (ret != 0) {
1449 dev_err(dev,
1450 "Failed to enable power regulator: %d\n", ret);
1451 return ret;
1452 }
1453 dev_dbg(dev, "regulator enabled\n");
1454 }
1455
1456 reset_gpios = of_get_named_gpio_flags(dev->of_node, "reset-gpios", 0,
1457 &flags);
1458 if (gpio_is_valid(reset_gpios)) {
1459 ret = devm_gpio_request_one(dev, reset_gpios, flags,
1460 "dm9000_reset");
1461 if (ret) {
1462 dev_err(dev, "failed to request reset gpio %d: %d\n",
1463 reset_gpios, ret);
1464 return -ENODEV;
1465 }
1466
1467
1468 msleep(2);
1469 gpio_set_value(reset_gpios, 1);
1470
1471 msleep(4);
1472 }
1473
1474 if (!pdata) {
1475 pdata = dm9000_parse_dt(&pdev->dev);
1476 if (IS_ERR(pdata))
1477 return PTR_ERR(pdata);
1478 }
1479
1480
1481 ndev = alloc_etherdev(sizeof(struct board_info));
1482 if (!ndev)
1483 return -ENOMEM;
1484
1485 SET_NETDEV_DEV(ndev, &pdev->dev);
1486
1487 dev_dbg(&pdev->dev, "dm9000_probe()\n");
1488
1489
1490 db = netdev_priv(ndev);
1491
1492 db->dev = &pdev->dev;
1493 db->ndev = ndev;
1494
1495 spin_lock_init(&db->lock);
1496 mutex_init(&db->addr_lock);
1497
1498 INIT_DELAYED_WORK(&db->phy_poll, dm9000_poll_work);
1499
1500 db->addr_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1501 db->data_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1502
1503 if (!db->addr_res || !db->data_res) {
1504 dev_err(db->dev, "insufficient resources addr=%p data=%p\n",
1505 db->addr_res, db->data_res);
1506 ret = -ENOENT;
1507 goto out;
1508 }
1509
1510 ndev->irq = platform_get_irq(pdev, 0);
1511 if (ndev->irq < 0) {
1512 dev_err(db->dev, "interrupt resource unavailable: %d\n",
1513 ndev->irq);
1514 ret = ndev->irq;
1515 goto out;
1516 }
1517
1518 db->irq_wake = platform_get_irq(pdev, 1);
1519 if (db->irq_wake >= 0) {
1520 dev_dbg(db->dev, "wakeup irq %d\n", db->irq_wake);
1521
1522 ret = request_irq(db->irq_wake, dm9000_wol_interrupt,
1523 IRQF_SHARED, dev_name(db->dev), ndev);
1524 if (ret) {
1525 dev_err(db->dev, "cannot get wakeup irq (%d)\n", ret);
1526 } else {
1527
1528
1529 ret = irq_set_irq_wake(db->irq_wake, 1);
1530 if (ret) {
1531 dev_err(db->dev, "irq %d cannot set wakeup (%d)\n",
1532 db->irq_wake, ret);
1533 ret = 0;
1534 } else {
1535 irq_set_irq_wake(db->irq_wake, 0);
1536 db->wake_supported = 1;
1537 }
1538 }
1539 }
1540
1541 iosize = resource_size(db->addr_res);
1542 db->addr_req = request_mem_region(db->addr_res->start, iosize,
1543 pdev->name);
1544
1545 if (db->addr_req == NULL) {
1546 dev_err(db->dev, "cannot claim address reg area\n");
1547 ret = -EIO;
1548 goto out;
1549 }
1550
1551 db->io_addr = ioremap(db->addr_res->start, iosize);
1552
1553 if (db->io_addr == NULL) {
1554 dev_err(db->dev, "failed to ioremap address reg\n");
1555 ret = -EINVAL;
1556 goto out;
1557 }
1558
1559 iosize = resource_size(db->data_res);
1560 db->data_req = request_mem_region(db->data_res->start, iosize,
1561 pdev->name);
1562
1563 if (db->data_req == NULL) {
1564 dev_err(db->dev, "cannot claim data reg area\n");
1565 ret = -EIO;
1566 goto out;
1567 }
1568
1569 db->io_data = ioremap(db->data_res->start, iosize);
1570
1571 if (db->io_data == NULL) {
1572 dev_err(db->dev, "failed to ioremap data reg\n");
1573 ret = -EINVAL;
1574 goto out;
1575 }
1576
1577
1578 ndev->base_addr = (unsigned long)db->io_addr;
1579
1580
1581 dm9000_set_io(db, iosize);
1582
1583
1584 if (pdata != NULL) {
1585
1586
1587
1588 if (pdata->flags & DM9000_PLATF_8BITONLY)
1589 dm9000_set_io(db, 1);
1590
1591 if (pdata->flags & DM9000_PLATF_16BITONLY)
1592 dm9000_set_io(db, 2);
1593
1594 if (pdata->flags & DM9000_PLATF_32BITONLY)
1595 dm9000_set_io(db, 4);
1596
1597
1598
1599
1600 if (pdata->inblk != NULL)
1601 db->inblk = pdata->inblk;
1602
1603 if (pdata->outblk != NULL)
1604 db->outblk = pdata->outblk;
1605
1606 if (pdata->dumpblk != NULL)
1607 db->dumpblk = pdata->dumpblk;
1608
1609 db->flags = pdata->flags;
1610 }
1611
1612#ifdef CONFIG_DM9000_FORCE_SIMPLE_PHY_POLL
1613 db->flags |= DM9000_PLATF_SIMPLE_PHY;
1614#endif
1615
1616 dm9000_reset(db);
1617
1618
1619 for (i = 0; i < 8; i++) {
1620 id_val = ior(db, DM9000_VIDL);
1621 id_val |= (u32)ior(db, DM9000_VIDH) << 8;
1622 id_val |= (u32)ior(db, DM9000_PIDL) << 16;
1623 id_val |= (u32)ior(db, DM9000_PIDH) << 24;
1624
1625 if (id_val == DM9000_ID)
1626 break;
1627 dev_err(db->dev, "read wrong id 0x%08x\n", id_val);
1628 }
1629
1630 if (id_val != DM9000_ID) {
1631 dev_err(db->dev, "wrong id: 0x%08x\n", id_val);
1632 ret = -ENODEV;
1633 goto out;
1634 }
1635
1636
1637
1638 id_val = ior(db, DM9000_CHIPR);
1639 dev_dbg(db->dev, "dm9000 revision 0x%02x\n", id_val);
1640
1641 switch (id_val) {
1642 case CHIPR_DM9000A:
1643 db->type = TYPE_DM9000A;
1644 break;
1645 case CHIPR_DM9000B:
1646 db->type = TYPE_DM9000B;
1647 break;
1648 default:
1649 dev_dbg(db->dev, "ID %02x => defaulting to DM9000E\n", id_val);
1650 db->type = TYPE_DM9000E;
1651 }
1652
1653
1654 if (db->type == TYPE_DM9000A || db->type == TYPE_DM9000B) {
1655 ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM;
1656 ndev->features |= ndev->hw_features;
1657 }
1658
1659
1660
1661 ndev->netdev_ops = &dm9000_netdev_ops;
1662 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
1663 ndev->ethtool_ops = &dm9000_ethtool_ops;
1664
1665 db->msg_enable = NETIF_MSG_LINK;
1666 db->mii.phy_id_mask = 0x1f;
1667 db->mii.reg_num_mask = 0x1f;
1668 db->mii.force_media = 0;
1669 db->mii.full_duplex = 0;
1670 db->mii.dev = ndev;
1671 db->mii.mdio_read = dm9000_phy_read;
1672 db->mii.mdio_write = dm9000_phy_write;
1673
1674 mac_src = "eeprom";
1675
1676
1677 for (i = 0; i < 6; i += 2)
1678 dm9000_read_eeprom(db, i / 2, ndev->dev_addr+i);
1679
1680 if (!is_valid_ether_addr(ndev->dev_addr) && pdata != NULL) {
1681 mac_src = "platform data";
1682 memcpy(ndev->dev_addr, pdata->dev_addr, ETH_ALEN);
1683 }
1684
1685 if (!is_valid_ether_addr(ndev->dev_addr)) {
1686
1687
1688 mac_src = "chip";
1689 for (i = 0; i < 6; i++)
1690 ndev->dev_addr[i] = ior(db, i+DM9000_PAR);
1691 }
1692
1693 if (!is_valid_ether_addr(ndev->dev_addr)) {
1694 inv_mac_addr = true;
1695 eth_hw_addr_random(ndev);
1696 mac_src = "random";
1697 }
1698
1699
1700 platform_set_drvdata(pdev, ndev);
1701 ret = register_netdev(ndev);
1702
1703 if (ret == 0) {
1704 if (inv_mac_addr)
1705 dev_warn(db->dev, "%s: Invalid ethernet MAC address. Please set using ip\n",
1706 ndev->name);
1707 printk(KERN_INFO "%s: dm9000%c at %p,%p IRQ %d MAC: %pM (%s)\n",
1708 ndev->name, dm9000_type_to_char(db->type),
1709 db->io_addr, db->io_data, ndev->irq,
1710 ndev->dev_addr, mac_src);
1711 }
1712 return 0;
1713
1714out:
1715 dev_err(db->dev, "not found (%d).\n", ret);
1716
1717 dm9000_release_board(pdev, db);
1718 free_netdev(ndev);
1719
1720 return ret;
1721}
1722
1723static int
1724dm9000_drv_suspend(struct device *dev)
1725{
1726 struct net_device *ndev = dev_get_drvdata(dev);
1727 struct board_info *db;
1728
1729 if (ndev) {
1730 db = netdev_priv(ndev);
1731 db->in_suspend = 1;
1732
1733 if (!netif_running(ndev))
1734 return 0;
1735
1736 netif_device_detach(ndev);
1737
1738
1739 if (!db->wake_state)
1740 dm9000_shutdown(ndev);
1741 }
1742 return 0;
1743}
1744
1745static int
1746dm9000_drv_resume(struct device *dev)
1747{
1748 struct net_device *ndev = dev_get_drvdata(dev);
1749 struct board_info *db = netdev_priv(ndev);
1750
1751 if (ndev) {
1752 if (netif_running(ndev)) {
1753
1754
1755 if (!db->wake_state) {
1756 dm9000_init_dm9000(ndev);
1757 dm9000_unmask_interrupts(db);
1758 }
1759
1760 netif_device_attach(ndev);
1761 }
1762
1763 db->in_suspend = 0;
1764 }
1765 return 0;
1766}
1767
1768static const struct dev_pm_ops dm9000_drv_pm_ops = {
1769 .suspend = dm9000_drv_suspend,
1770 .resume = dm9000_drv_resume,
1771};
1772
1773static int
1774dm9000_drv_remove(struct platform_device *pdev)
1775{
1776 struct net_device *ndev = platform_get_drvdata(pdev);
1777
1778 unregister_netdev(ndev);
1779 dm9000_release_board(pdev, netdev_priv(ndev));
1780 free_netdev(ndev);
1781
1782 dev_dbg(&pdev->dev, "released and freed device\n");
1783 return 0;
1784}
1785
1786#ifdef CONFIG_OF
1787static const struct of_device_id dm9000_of_matches[] = {
1788 { .compatible = "davicom,dm9000", },
1789 { }
1790};
1791MODULE_DEVICE_TABLE(of, dm9000_of_matches);
1792#endif
1793
1794static struct platform_driver dm9000_driver = {
1795 .driver = {
1796 .name = "dm9000",
1797 .pm = &dm9000_drv_pm_ops,
1798 .of_match_table = of_match_ptr(dm9000_of_matches),
1799 },
1800 .probe = dm9000_probe,
1801 .remove = dm9000_drv_remove,
1802};
1803
1804module_platform_driver(dm9000_driver);
1805
1806MODULE_AUTHOR("Sascha Hauer, Ben Dooks");
1807MODULE_DESCRIPTION("Davicom DM9000 network driver");
1808MODULE_LICENSE("GPL");
1809MODULE_ALIAS("platform:dm9000");
1810