1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#include <linux/module.h>
23#include <linux/ioport.h>
24#include <linux/netdevice.h>
25#include <linux/etherdevice.h>
26#include <linux/init.h>
27#include <linux/interrupt.h>
28#include <linux/skbuff.h>
29#include <linux/spinlock.h>
30#include <linux/crc32.h>
31#include <linux/mii.h>
32#include <linux/of.h>
33#include <linux/of_net.h>
34#include <linux/ethtool.h>
35#include <linux/dm9000.h>
36#include <linux/delay.h>
37#include <linux/platform_device.h>
38#include <linux/irq.h>
39#include <linux/slab.h>
40
41#include <asm/delay.h>
42#include <asm/irq.h>
43#include <asm/io.h>
44
45#include "dm9000.h"
46
47
48
49#define DM9000_PHY 0x40
50
51#define CARDNAME "dm9000"
52#define DRV_VERSION "1.31"
53
54
55
56
57static int watchdog = 5000;
58module_param(watchdog, int, 0400);
59MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds");
60
61
62
63
64static int debug;
65module_param(debug, int, 0644);
66MODULE_PARM_DESC(debug, "dm9000 debug level (0-4)");
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90enum dm9000_type {
91 TYPE_DM9000E,
92 TYPE_DM9000A,
93 TYPE_DM9000B
94};
95
96
97typedef struct board_info {
98
99 void __iomem *io_addr;
100 void __iomem *io_data;
101 u16 irq;
102
103 u16 tx_pkt_cnt;
104 u16 queue_pkt_len;
105 u16 queue_start_addr;
106 u16 queue_ip_summed;
107 u16 dbug_cnt;
108 u8 io_mode;
109 u8 phy_addr;
110 u8 imr_all;
111
112 unsigned int flags;
113 unsigned int in_suspend :1;
114 unsigned int wake_supported :1;
115
116 enum dm9000_type type;
117
118 void (*inblk)(void __iomem *port, void *data, int length);
119 void (*outblk)(void __iomem *port, void *data, int length);
120 void (*dumpblk)(void __iomem *port, int length);
121
122 struct device *dev;
123
124 struct resource *addr_res;
125 struct resource *data_res;
126 struct resource *addr_req;
127 struct resource *data_req;
128 struct resource *irq_res;
129
130 int irq_wake;
131
132 struct mutex addr_lock;
133
134 struct delayed_work phy_poll;
135 struct net_device *ndev;
136
137 spinlock_t lock;
138
139 struct mii_if_info mii;
140 u32 msg_enable;
141 u32 wake_state;
142
143 int ip_summed;
144} board_info_t;
145
146
147
148#define dm9000_dbg(db, lev, msg...) do { \
149 if ((lev) < debug) { \
150 dev_dbg(db->dev, msg); \
151 } \
152} while (0)
153
154static inline board_info_t *to_dm9000_board(struct net_device *dev)
155{
156 return netdev_priv(dev);
157}
158
159
160
161
162
163
164static u8
165ior(board_info_t * db, int reg)
166{
167 writeb(reg, db->io_addr);
168 return readb(db->io_data);
169}
170
171
172
173
174
175static void
176iow(board_info_t * db, int reg, int value)
177{
178 writeb(reg, db->io_addr);
179 writeb(value, db->io_data);
180}
181
182static void
183dm9000_reset(board_info_t *db)
184{
185 dev_dbg(db->dev, "resetting device\n");
186
187
188
189
190
191 iow(db, DM9000_NCR, 0x03);
192 udelay(100);
193 if (ior(db, DM9000_NCR) & 1)
194 dev_err(db->dev, "dm9000 did not respond to first reset\n");
195
196 iow(db, DM9000_NCR, 0);
197 iow(db, DM9000_NCR, 0x03);
198 udelay(100);
199 if (ior(db, DM9000_NCR) & 1)
200 dev_err(db->dev, "dm9000 did not respond to second reset\n");
201}
202
203
204
205static void dm9000_outblk_8bit(void __iomem *reg, void *data, int count)
206{
207 iowrite8_rep(reg, data, count);
208}
209
210static void dm9000_outblk_16bit(void __iomem *reg, void *data, int count)
211{
212 iowrite16_rep(reg, data, (count+1) >> 1);
213}
214
215static void dm9000_outblk_32bit(void __iomem *reg, void *data, int count)
216{
217 iowrite32_rep(reg, data, (count+3) >> 2);
218}
219
220
221
222static void dm9000_inblk_8bit(void __iomem *reg, void *data, int count)
223{
224 ioread8_rep(reg, data, count);
225}
226
227
228static void dm9000_inblk_16bit(void __iomem *reg, void *data, int count)
229{
230 ioread16_rep(reg, data, (count+1) >> 1);
231}
232
233static void dm9000_inblk_32bit(void __iomem *reg, void *data, int count)
234{
235 ioread32_rep(reg, data, (count+3) >> 2);
236}
237
238
239
240static void dm9000_dumpblk_8bit(void __iomem *reg, int count)
241{
242 int i;
243 int tmp;
244
245 for (i = 0; i < count; i++)
246 tmp = readb(reg);
247}
248
249static void dm9000_dumpblk_16bit(void __iomem *reg, int count)
250{
251 int i;
252 int tmp;
253
254 count = (count + 1) >> 1;
255
256 for (i = 0; i < count; i++)
257 tmp = readw(reg);
258}
259
260static void dm9000_dumpblk_32bit(void __iomem *reg, int count)
261{
262 int i;
263 int tmp;
264
265 count = (count + 3) >> 2;
266
267 for (i = 0; i < count; i++)
268 tmp = readl(reg);
269}
270
271
272
273
274
275static void dm9000_msleep(board_info_t *db, unsigned int ms)
276{
277 if (db->in_suspend)
278 mdelay(ms);
279 else
280 msleep(ms);
281}
282
283
284static int
285dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg)
286{
287 board_info_t *db = netdev_priv(dev);
288 unsigned long flags;
289 unsigned int reg_save;
290 int ret;
291
292 mutex_lock(&db->addr_lock);
293
294 spin_lock_irqsave(&db->lock, flags);
295
296
297 reg_save = readb(db->io_addr);
298
299
300 iow(db, DM9000_EPAR, DM9000_PHY | reg);
301
302
303 iow(db, DM9000_EPCR, EPCR_ERPRR | EPCR_EPOS);
304
305 writeb(reg_save, db->io_addr);
306 spin_unlock_irqrestore(&db->lock, flags);
307
308 dm9000_msleep(db, 1);
309
310 spin_lock_irqsave(&db->lock, flags);
311 reg_save = readb(db->io_addr);
312
313 iow(db, DM9000_EPCR, 0x0);
314
315
316 ret = (ior(db, DM9000_EPDRH) << 8) | ior(db, DM9000_EPDRL);
317
318
319 writeb(reg_save, db->io_addr);
320 spin_unlock_irqrestore(&db->lock, flags);
321
322 mutex_unlock(&db->addr_lock);
323
324 dm9000_dbg(db, 5, "phy_read[%02x] -> %04x\n", reg, ret);
325 return ret;
326}
327
328
329static void
330dm9000_phy_write(struct net_device *dev,
331 int phyaddr_unused, int reg, int value)
332{
333 board_info_t *db = netdev_priv(dev);
334 unsigned long flags;
335 unsigned long reg_save;
336
337 dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value);
338 mutex_lock(&db->addr_lock);
339
340 spin_lock_irqsave(&db->lock, flags);
341
342
343 reg_save = readb(db->io_addr);
344
345
346 iow(db, DM9000_EPAR, DM9000_PHY | reg);
347
348
349 iow(db, DM9000_EPDRL, value);
350 iow(db, DM9000_EPDRH, value >> 8);
351
352
353 iow(db, DM9000_EPCR, EPCR_EPOS | EPCR_ERPRW);
354
355 writeb(reg_save, db->io_addr);
356 spin_unlock_irqrestore(&db->lock, flags);
357
358 dm9000_msleep(db, 1);
359
360 spin_lock_irqsave(&db->lock, flags);
361 reg_save = readb(db->io_addr);
362
363 iow(db, DM9000_EPCR, 0x0);
364
365
366 writeb(reg_save, db->io_addr);
367
368 spin_unlock_irqrestore(&db->lock, flags);
369 mutex_unlock(&db->addr_lock);
370}
371
372
373
374
375
376
377
378static void dm9000_set_io(struct board_info *db, int byte_width)
379{
380
381
382
383
384 switch (byte_width) {
385 case 1:
386 db->dumpblk = dm9000_dumpblk_8bit;
387 db->outblk = dm9000_outblk_8bit;
388 db->inblk = dm9000_inblk_8bit;
389 break;
390
391
392 case 3:
393 dev_dbg(db->dev, ": 3 byte IO, falling back to 16bit\n");
394 case 2:
395 db->dumpblk = dm9000_dumpblk_16bit;
396 db->outblk = dm9000_outblk_16bit;
397 db->inblk = dm9000_inblk_16bit;
398 break;
399
400 case 4:
401 default:
402 db->dumpblk = dm9000_dumpblk_32bit;
403 db->outblk = dm9000_outblk_32bit;
404 db->inblk = dm9000_inblk_32bit;
405 break;
406 }
407}
408
409static void dm9000_schedule_poll(board_info_t *db)
410{
411 if (db->type == TYPE_DM9000E)
412 schedule_delayed_work(&db->phy_poll, HZ * 2);
413}
414
415static int dm9000_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
416{
417 board_info_t *dm = to_dm9000_board(dev);
418
419 if (!netif_running(dev))
420 return -EINVAL;
421
422 return generic_mii_ioctl(&dm->mii, if_mii(req), cmd, NULL);
423}
424
425static unsigned int
426dm9000_read_locked(board_info_t *db, int reg)
427{
428 unsigned long flags;
429 unsigned int ret;
430
431 spin_lock_irqsave(&db->lock, flags);
432 ret = ior(db, reg);
433 spin_unlock_irqrestore(&db->lock, flags);
434
435 return ret;
436}
437
438static int dm9000_wait_eeprom(board_info_t *db)
439{
440 unsigned int status;
441 int timeout = 8;
442
443
444
445
446
447
448
449
450
451
452
453
454 while (1) {
455 status = dm9000_read_locked(db, DM9000_EPCR);
456
457 if ((status & EPCR_ERRE) == 0)
458 break;
459
460 msleep(1);
461
462 if (timeout-- < 0) {
463 dev_dbg(db->dev, "timeout waiting EEPROM\n");
464 break;
465 }
466 }
467
468 return 0;
469}
470
471
472
473
474static void
475dm9000_read_eeprom(board_info_t *db, int offset, u8 *to)
476{
477 unsigned long flags;
478
479 if (db->flags & DM9000_PLATF_NO_EEPROM) {
480 to[0] = 0xff;
481 to[1] = 0xff;
482 return;
483 }
484
485 mutex_lock(&db->addr_lock);
486
487 spin_lock_irqsave(&db->lock, flags);
488
489 iow(db, DM9000_EPAR, offset);
490 iow(db, DM9000_EPCR, EPCR_ERPRR);
491
492 spin_unlock_irqrestore(&db->lock, flags);
493
494 dm9000_wait_eeprom(db);
495
496
497 msleep(1);
498
499 spin_lock_irqsave(&db->lock, flags);
500
501 iow(db, DM9000_EPCR, 0x0);
502
503 to[0] = ior(db, DM9000_EPDRL);
504 to[1] = ior(db, DM9000_EPDRH);
505
506 spin_unlock_irqrestore(&db->lock, flags);
507
508 mutex_unlock(&db->addr_lock);
509}
510
511
512
513
514static void
515dm9000_write_eeprom(board_info_t *db, int offset, u8 *data)
516{
517 unsigned long flags;
518
519 if (db->flags & DM9000_PLATF_NO_EEPROM)
520 return;
521
522 mutex_lock(&db->addr_lock);
523
524 spin_lock_irqsave(&db->lock, flags);
525 iow(db, DM9000_EPAR, offset);
526 iow(db, DM9000_EPDRH, data[1]);
527 iow(db, DM9000_EPDRL, data[0]);
528 iow(db, DM9000_EPCR, EPCR_WEP | EPCR_ERPRW);
529 spin_unlock_irqrestore(&db->lock, flags);
530
531 dm9000_wait_eeprom(db);
532
533 mdelay(1);
534
535 spin_lock_irqsave(&db->lock, flags);
536 iow(db, DM9000_EPCR, 0);
537 spin_unlock_irqrestore(&db->lock, flags);
538
539 mutex_unlock(&db->addr_lock);
540}
541
542
543
544static void dm9000_get_drvinfo(struct net_device *dev,
545 struct ethtool_drvinfo *info)
546{
547 board_info_t *dm = to_dm9000_board(dev);
548
549 strlcpy(info->driver, CARDNAME, sizeof(info->driver));
550 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
551 strlcpy(info->bus_info, to_platform_device(dm->dev)->name,
552 sizeof(info->bus_info));
553}
554
555static u32 dm9000_get_msglevel(struct net_device *dev)
556{
557 board_info_t *dm = to_dm9000_board(dev);
558
559 return dm->msg_enable;
560}
561
562static void dm9000_set_msglevel(struct net_device *dev, u32 value)
563{
564 board_info_t *dm = to_dm9000_board(dev);
565
566 dm->msg_enable = value;
567}
568
569static int dm9000_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
570{
571 board_info_t *dm = to_dm9000_board(dev);
572
573 mii_ethtool_gset(&dm->mii, cmd);
574 return 0;
575}
576
577static int dm9000_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
578{
579 board_info_t *dm = to_dm9000_board(dev);
580
581 return mii_ethtool_sset(&dm->mii, cmd);
582}
583
584static int dm9000_nway_reset(struct net_device *dev)
585{
586 board_info_t *dm = to_dm9000_board(dev);
587 return mii_nway_restart(&dm->mii);
588}
589
590static int dm9000_set_features(struct net_device *dev,
591 netdev_features_t features)
592{
593 board_info_t *dm = to_dm9000_board(dev);
594 netdev_features_t changed = dev->features ^ features;
595 unsigned long flags;
596
597 if (!(changed & NETIF_F_RXCSUM))
598 return 0;
599
600 spin_lock_irqsave(&dm->lock, flags);
601 iow(dm, DM9000_RCSR, (features & NETIF_F_RXCSUM) ? RCSR_CSUM : 0);
602 spin_unlock_irqrestore(&dm->lock, flags);
603
604 return 0;
605}
606
607static u32 dm9000_get_link(struct net_device *dev)
608{
609 board_info_t *dm = to_dm9000_board(dev);
610 u32 ret;
611
612 if (dm->flags & DM9000_PLATF_EXT_PHY)
613 ret = mii_link_ok(&dm->mii);
614 else
615 ret = dm9000_read_locked(dm, DM9000_NSR) & NSR_LINKST ? 1 : 0;
616
617 return ret;
618}
619
620#define DM_EEPROM_MAGIC (0x444D394B)
621
622static int dm9000_get_eeprom_len(struct net_device *dev)
623{
624 return 128;
625}
626
627static int dm9000_get_eeprom(struct net_device *dev,
628 struct ethtool_eeprom *ee, u8 *data)
629{
630 board_info_t *dm = to_dm9000_board(dev);
631 int offset = ee->offset;
632 int len = ee->len;
633 int i;
634
635
636
637 if ((len & 1) != 0 || (offset & 1) != 0)
638 return -EINVAL;
639
640 if (dm->flags & DM9000_PLATF_NO_EEPROM)
641 return -ENOENT;
642
643 ee->magic = DM_EEPROM_MAGIC;
644
645 for (i = 0; i < len; i += 2)
646 dm9000_read_eeprom(dm, (offset + i) / 2, data + i);
647
648 return 0;
649}
650
651static int dm9000_set_eeprom(struct net_device *dev,
652 struct ethtool_eeprom *ee, u8 *data)
653{
654 board_info_t *dm = to_dm9000_board(dev);
655 int offset = ee->offset;
656 int len = ee->len;
657 int done;
658
659
660
661 if (dm->flags & DM9000_PLATF_NO_EEPROM)
662 return -ENOENT;
663
664 if (ee->magic != DM_EEPROM_MAGIC)
665 return -EINVAL;
666
667 while (len > 0) {
668 if (len & 1 || offset & 1) {
669 int which = offset & 1;
670 u8 tmp[2];
671
672 dm9000_read_eeprom(dm, offset / 2, tmp);
673 tmp[which] = *data;
674 dm9000_write_eeprom(dm, offset / 2, tmp);
675
676 done = 1;
677 } else {
678 dm9000_write_eeprom(dm, offset / 2, data);
679 done = 2;
680 }
681
682 data += done;
683 offset += done;
684 len -= done;
685 }
686
687 return 0;
688}
689
690static void dm9000_get_wol(struct net_device *dev, struct ethtool_wolinfo *w)
691{
692 board_info_t *dm = to_dm9000_board(dev);
693
694 memset(w, 0, sizeof(struct ethtool_wolinfo));
695
696
697 w->supported = dm->wake_supported ? WAKE_MAGIC : 0;
698 w->wolopts = dm->wake_state;
699}
700
701static int dm9000_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
702{
703 board_info_t *dm = to_dm9000_board(dev);
704 unsigned long flags;
705 u32 opts = w->wolopts;
706 u32 wcr = 0;
707
708 if (!dm->wake_supported)
709 return -EOPNOTSUPP;
710
711 if (opts & ~WAKE_MAGIC)
712 return -EINVAL;
713
714 if (opts & WAKE_MAGIC)
715 wcr |= WCR_MAGICEN;
716
717 mutex_lock(&dm->addr_lock);
718
719 spin_lock_irqsave(&dm->lock, flags);
720 iow(dm, DM9000_WCR, wcr);
721 spin_unlock_irqrestore(&dm->lock, flags);
722
723 mutex_unlock(&dm->addr_lock);
724
725 if (dm->wake_state != opts) {
726
727
728 if (!dm->wake_state)
729 irq_set_irq_wake(dm->irq_wake, 1);
730 else if (dm->wake_state && !opts)
731 irq_set_irq_wake(dm->irq_wake, 0);
732 }
733
734 dm->wake_state = opts;
735 return 0;
736}
737
738static const struct ethtool_ops dm9000_ethtool_ops = {
739 .get_drvinfo = dm9000_get_drvinfo,
740 .get_settings = dm9000_get_settings,
741 .set_settings = dm9000_set_settings,
742 .get_msglevel = dm9000_get_msglevel,
743 .set_msglevel = dm9000_set_msglevel,
744 .nway_reset = dm9000_nway_reset,
745 .get_link = dm9000_get_link,
746 .get_wol = dm9000_get_wol,
747 .set_wol = dm9000_set_wol,
748 .get_eeprom_len = dm9000_get_eeprom_len,
749 .get_eeprom = dm9000_get_eeprom,
750 .set_eeprom = dm9000_set_eeprom,
751};
752
753static void dm9000_show_carrier(board_info_t *db,
754 unsigned carrier, unsigned nsr)
755{
756 int lpa;
757 struct net_device *ndev = db->ndev;
758 struct mii_if_info *mii = &db->mii;
759 unsigned ncr = dm9000_read_locked(db, DM9000_NCR);
760
761 if (carrier) {
762 lpa = mii->mdio_read(mii->dev, mii->phy_id, MII_LPA);
763 dev_info(db->dev,
764 "%s: link up, %dMbps, %s-duplex, lpa 0x%04X\n",
765 ndev->name, (nsr & NSR_SPEED) ? 10 : 100,
766 (ncr & NCR_FDX) ? "full" : "half", lpa);
767 } else {
768 dev_info(db->dev, "%s: link down\n", ndev->name);
769 }
770}
771
772static void
773dm9000_poll_work(struct work_struct *w)
774{
775 struct delayed_work *dw = to_delayed_work(w);
776 board_info_t *db = container_of(dw, board_info_t, phy_poll);
777 struct net_device *ndev = db->ndev;
778
779 if (db->flags & DM9000_PLATF_SIMPLE_PHY &&
780 !(db->flags & DM9000_PLATF_EXT_PHY)) {
781 unsigned nsr = dm9000_read_locked(db, DM9000_NSR);
782 unsigned old_carrier = netif_carrier_ok(ndev) ? 1 : 0;
783 unsigned new_carrier;
784
785 new_carrier = (nsr & NSR_LINKST) ? 1 : 0;
786
787 if (old_carrier != new_carrier) {
788 if (netif_msg_link(db))
789 dm9000_show_carrier(db, new_carrier, nsr);
790
791 if (!new_carrier)
792 netif_carrier_off(ndev);
793 else
794 netif_carrier_on(ndev);
795 }
796 } else
797 mii_check_media(&db->mii, netif_msg_link(db), 0);
798
799 if (netif_running(ndev))
800 dm9000_schedule_poll(db);
801}
802
803
804
805
806
807
808static void
809dm9000_release_board(struct platform_device *pdev, struct board_info *db)
810{
811
812
813 iounmap(db->io_addr);
814 iounmap(db->io_data);
815
816
817
818 release_resource(db->data_req);
819 kfree(db->data_req);
820
821 release_resource(db->addr_req);
822 kfree(db->addr_req);
823}
824
825static unsigned char dm9000_type_to_char(enum dm9000_type type)
826{
827 switch (type) {
828 case TYPE_DM9000E: return 'e';
829 case TYPE_DM9000A: return 'a';
830 case TYPE_DM9000B: return 'b';
831 }
832
833 return '?';
834}
835
836
837
838
839static void
840dm9000_hash_table_unlocked(struct net_device *dev)
841{
842 board_info_t *db = netdev_priv(dev);
843 struct netdev_hw_addr *ha;
844 int i, oft;
845 u32 hash_val;
846 u16 hash_table[4] = { 0, 0, 0, 0x8000 };
847 u8 rcr = RCR_DIS_LONG | RCR_DIS_CRC | RCR_RXEN;
848
849 dm9000_dbg(db, 1, "entering %s\n", __func__);
850
851 for (i = 0, oft = DM9000_PAR; i < 6; i++, oft++)
852 iow(db, oft, dev->dev_addr[i]);
853
854 if (dev->flags & IFF_PROMISC)
855 rcr |= RCR_PRMSC;
856
857 if (dev->flags & IFF_ALLMULTI)
858 rcr |= RCR_ALL;
859
860
861 netdev_for_each_mc_addr(ha, dev) {
862 hash_val = ether_crc_le(6, ha->addr) & 0x3f;
863 hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16);
864 }
865
866
867 for (i = 0, oft = DM9000_MAR; i < 4; i++) {
868 iow(db, oft++, hash_table[i]);
869 iow(db, oft++, hash_table[i] >> 8);
870 }
871
872 iow(db, DM9000_RCR, rcr);
873}
874
875static void
876dm9000_hash_table(struct net_device *dev)
877{
878 board_info_t *db = netdev_priv(dev);
879 unsigned long flags;
880
881 spin_lock_irqsave(&db->lock, flags);
882 dm9000_hash_table_unlocked(dev);
883 spin_unlock_irqrestore(&db->lock, flags);
884}
885
886
887
888
889static void
890dm9000_init_dm9000(struct net_device *dev)
891{
892 board_info_t *db = netdev_priv(dev);
893 unsigned int imr;
894 unsigned int ncr;
895
896 dm9000_dbg(db, 1, "entering %s\n", __func__);
897
898
899 db->io_mode = ior(db, DM9000_ISR) >> 6;
900
901
902 if (dev->hw_features & NETIF_F_RXCSUM)
903 iow(db, DM9000_RCSR,
904 (dev->features & NETIF_F_RXCSUM) ? RCSR_CSUM : 0);
905
906 iow(db, DM9000_GPCR, GPCR_GEP_CNTL);
907 iow(db, DM9000_GPR, 0);
908
909
910
911
912 if (db->type == TYPE_DM9000B) {
913 dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET);
914 dm9000_phy_write(dev, 0, MII_DM_DSPCR, DSPCR_INIT_PARAM);
915 }
916
917 ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0;
918
919
920
921
922 if (db->wake_supported)
923 ncr |= NCR_WAKEEN;
924
925 iow(db, DM9000_NCR, ncr);
926
927
928 iow(db, DM9000_TCR, 0);
929 iow(db, DM9000_BPTR, 0x3f);
930 iow(db, DM9000_FCR, 0xff);
931 iow(db, DM9000_SMCR, 0);
932
933 iow(db, DM9000_NSR, NSR_WAKEST | NSR_TX2END | NSR_TX1END);
934 iow(db, DM9000_ISR, ISR_CLR_STATUS);
935
936
937 dm9000_hash_table_unlocked(dev);
938
939 imr = IMR_PAR | IMR_PTM | IMR_PRM;
940 if (db->type != TYPE_DM9000E)
941 imr |= IMR_LNKCHNG;
942
943 db->imr_all = imr;
944
945
946 iow(db, DM9000_IMR, imr);
947
948
949 db->tx_pkt_cnt = 0;
950 db->queue_pkt_len = 0;
951 dev->trans_start = jiffies;
952}
953
954
955static void dm9000_timeout(struct net_device *dev)
956{
957 board_info_t *db = netdev_priv(dev);
958 u8 reg_save;
959 unsigned long flags;
960
961
962 spin_lock_irqsave(&db->lock, flags);
963 reg_save = readb(db->io_addr);
964
965 netif_stop_queue(dev);
966 dm9000_reset(db);
967 dm9000_init_dm9000(dev);
968
969 dev->trans_start = jiffies;
970 netif_wake_queue(dev);
971
972
973 writeb(reg_save, db->io_addr);
974 spin_unlock_irqrestore(&db->lock, flags);
975}
976
977static void dm9000_send_packet(struct net_device *dev,
978 int ip_summed,
979 u16 pkt_len)
980{
981 board_info_t *dm = to_dm9000_board(dev);
982
983
984 if (dm->ip_summed != ip_summed) {
985 if (ip_summed == CHECKSUM_NONE)
986 iow(dm, DM9000_TCCR, 0);
987 else
988 iow(dm, DM9000_TCCR, TCCR_IP | TCCR_UDP | TCCR_TCP);
989 dm->ip_summed = ip_summed;
990 }
991
992
993 iow(dm, DM9000_TXPLL, pkt_len);
994 iow(dm, DM9000_TXPLH, pkt_len >> 8);
995
996
997 iow(dm, DM9000_TCR, TCR_TXREQ);
998}
999
1000
1001
1002
1003
1004static int
1005dm9000_start_xmit(struct sk_buff *skb, struct net_device *dev)
1006{
1007 unsigned long flags;
1008 board_info_t *db = netdev_priv(dev);
1009
1010 dm9000_dbg(db, 3, "%s:\n", __func__);
1011
1012 if (db->tx_pkt_cnt > 1)
1013 return NETDEV_TX_BUSY;
1014
1015 spin_lock_irqsave(&db->lock, flags);
1016
1017
1018 writeb(DM9000_MWCMD, db->io_addr);
1019
1020 (db->outblk)(db->io_data, skb->data, skb->len);
1021 dev->stats.tx_bytes += skb->len;
1022
1023 db->tx_pkt_cnt++;
1024
1025 if (db->tx_pkt_cnt == 1) {
1026 dm9000_send_packet(dev, skb->ip_summed, skb->len);
1027 } else {
1028
1029 db->queue_pkt_len = skb->len;
1030 db->queue_ip_summed = skb->ip_summed;
1031 netif_stop_queue(dev);
1032 }
1033
1034 spin_unlock_irqrestore(&db->lock, flags);
1035
1036
1037 dev_kfree_skb(skb);
1038
1039 return NETDEV_TX_OK;
1040}
1041
1042
1043
1044
1045
1046
1047static void dm9000_tx_done(struct net_device *dev, board_info_t *db)
1048{
1049 int tx_status = ior(db, DM9000_NSR);
1050
1051 if (tx_status & (NSR_TX2END | NSR_TX1END)) {
1052
1053 db->tx_pkt_cnt--;
1054 dev->stats.tx_packets++;
1055
1056 if (netif_msg_tx_done(db))
1057 dev_dbg(db->dev, "tx done, NSR %02x\n", tx_status);
1058
1059
1060 if (db->tx_pkt_cnt > 0)
1061 dm9000_send_packet(dev, db->queue_ip_summed,
1062 db->queue_pkt_len);
1063 netif_wake_queue(dev);
1064 }
1065}
1066
1067struct dm9000_rxhdr {
1068 u8 RxPktReady;
1069 u8 RxStatus;
1070 __le16 RxLen;
1071} __packed;
1072
1073
1074
1075
1076static void
1077dm9000_rx(struct net_device *dev)
1078{
1079 board_info_t *db = netdev_priv(dev);
1080 struct dm9000_rxhdr rxhdr;
1081 struct sk_buff *skb;
1082 u8 rxbyte, *rdptr;
1083 bool GoodPacket;
1084 int RxLen;
1085
1086
1087 do {
1088 ior(db, DM9000_MRCMDX);
1089
1090
1091 rxbyte = readb(db->io_data);
1092
1093
1094 if (rxbyte & DM9000_PKT_ERR) {
1095 dev_warn(db->dev, "status check fail: %d\n", rxbyte);
1096 iow(db, DM9000_RCR, 0x00);
1097 iow(db, DM9000_ISR, IMR_PAR);
1098 return;
1099 }
1100
1101 if (!(rxbyte & DM9000_PKT_RDY))
1102 return;
1103
1104
1105 GoodPacket = true;
1106 writeb(DM9000_MRCMD, db->io_addr);
1107
1108 (db->inblk)(db->io_data, &rxhdr, sizeof(rxhdr));
1109
1110 RxLen = le16_to_cpu(rxhdr.RxLen);
1111
1112 if (netif_msg_rx_status(db))
1113 dev_dbg(db->dev, "RX: status %02x, length %04x\n",
1114 rxhdr.RxStatus, RxLen);
1115
1116
1117 if (RxLen < 0x40) {
1118 GoodPacket = false;
1119 if (netif_msg_rx_err(db))
1120 dev_dbg(db->dev, "RX: Bad Packet (runt)\n");
1121 }
1122
1123 if (RxLen > DM9000_PKT_MAX) {
1124 dev_dbg(db->dev, "RST: RX Len:%x\n", RxLen);
1125 }
1126
1127
1128 if (rxhdr.RxStatus & (RSR_FOE | RSR_CE | RSR_AE |
1129 RSR_PLE | RSR_RWTO |
1130 RSR_LCS | RSR_RF)) {
1131 GoodPacket = false;
1132 if (rxhdr.RxStatus & RSR_FOE) {
1133 if (netif_msg_rx_err(db))
1134 dev_dbg(db->dev, "fifo error\n");
1135 dev->stats.rx_fifo_errors++;
1136 }
1137 if (rxhdr.RxStatus & RSR_CE) {
1138 if (netif_msg_rx_err(db))
1139 dev_dbg(db->dev, "crc error\n");
1140 dev->stats.rx_crc_errors++;
1141 }
1142 if (rxhdr.RxStatus & RSR_RF) {
1143 if (netif_msg_rx_err(db))
1144 dev_dbg(db->dev, "length error\n");
1145 dev->stats.rx_length_errors++;
1146 }
1147 }
1148
1149
1150 if (GoodPacket &&
1151 ((skb = netdev_alloc_skb(dev, RxLen + 4)) != NULL)) {
1152 skb_reserve(skb, 2);
1153 rdptr = (u8 *) skb_put(skb, RxLen - 4);
1154
1155
1156
1157 (db->inblk)(db->io_data, rdptr, RxLen);
1158 dev->stats.rx_bytes += RxLen;
1159
1160
1161 skb->protocol = eth_type_trans(skb, dev);
1162 if (dev->features & NETIF_F_RXCSUM) {
1163 if ((((rxbyte & 0x1c) << 3) & rxbyte) == 0)
1164 skb->ip_summed = CHECKSUM_UNNECESSARY;
1165 else
1166 skb_checksum_none_assert(skb);
1167 }
1168 netif_rx(skb);
1169 dev->stats.rx_packets++;
1170
1171 } else {
1172
1173
1174 (db->dumpblk)(db->io_data, RxLen);
1175 }
1176 } while (rxbyte & DM9000_PKT_RDY);
1177}
1178
1179static irqreturn_t dm9000_interrupt(int irq, void *dev_id)
1180{
1181 struct net_device *dev = dev_id;
1182 board_info_t *db = netdev_priv(dev);
1183 int int_status;
1184 unsigned long flags;
1185 u8 reg_save;
1186
1187 dm9000_dbg(db, 3, "entering %s\n", __func__);
1188
1189
1190
1191
1192 spin_lock_irqsave(&db->lock, flags);
1193
1194
1195 reg_save = readb(db->io_addr);
1196
1197
1198 iow(db, DM9000_IMR, IMR_PAR);
1199
1200
1201 int_status = ior(db, DM9000_ISR);
1202 iow(db, DM9000_ISR, int_status);
1203
1204 if (netif_msg_intr(db))
1205 dev_dbg(db->dev, "interrupt status %02x\n", int_status);
1206
1207
1208 if (int_status & ISR_PRS)
1209 dm9000_rx(dev);
1210
1211
1212 if (int_status & ISR_PTS)
1213 dm9000_tx_done(dev, db);
1214
1215 if (db->type != TYPE_DM9000E) {
1216 if (int_status & ISR_LNKCHNG) {
1217
1218 schedule_delayed_work(&db->phy_poll, 1);
1219 }
1220 }
1221
1222
1223 iow(db, DM9000_IMR, db->imr_all);
1224
1225
1226 writeb(reg_save, db->io_addr);
1227
1228 spin_unlock_irqrestore(&db->lock, flags);
1229
1230 return IRQ_HANDLED;
1231}
1232
1233static irqreturn_t dm9000_wol_interrupt(int irq, void *dev_id)
1234{
1235 struct net_device *dev = dev_id;
1236 board_info_t *db = netdev_priv(dev);
1237 unsigned long flags;
1238 unsigned nsr, wcr;
1239
1240 spin_lock_irqsave(&db->lock, flags);
1241
1242 nsr = ior(db, DM9000_NSR);
1243 wcr = ior(db, DM9000_WCR);
1244
1245 dev_dbg(db->dev, "%s: NSR=0x%02x, WCR=0x%02x\n", __func__, nsr, wcr);
1246
1247 if (nsr & NSR_WAKEST) {
1248
1249 iow(db, DM9000_NSR, NSR_WAKEST);
1250
1251 if (wcr & WCR_LINKST)
1252 dev_info(db->dev, "wake by link status change\n");
1253 if (wcr & WCR_SAMPLEST)
1254 dev_info(db->dev, "wake by sample packet\n");
1255 if (wcr & WCR_MAGICST )
1256 dev_info(db->dev, "wake by magic packet\n");
1257 if (!(wcr & (WCR_LINKST | WCR_SAMPLEST | WCR_MAGICST)))
1258 dev_err(db->dev, "wake signalled with no reason? "
1259 "NSR=0x%02x, WSR=0x%02x\n", nsr, wcr);
1260
1261 }
1262
1263 spin_unlock_irqrestore(&db->lock, flags);
1264
1265 return (nsr & NSR_WAKEST) ? IRQ_HANDLED : IRQ_NONE;
1266}
1267
1268#ifdef CONFIG_NET_POLL_CONTROLLER
1269
1270
1271
1272static void dm9000_poll_controller(struct net_device *dev)
1273{
1274 disable_irq(dev->irq);
1275 dm9000_interrupt(dev->irq, dev);
1276 enable_irq(dev->irq);
1277}
1278#endif
1279
1280
1281
1282
1283
1284static int
1285dm9000_open(struct net_device *dev)
1286{
1287 board_info_t *db = netdev_priv(dev);
1288 unsigned long irqflags = db->irq_res->flags & IRQF_TRIGGER_MASK;
1289
1290 if (netif_msg_ifup(db))
1291 dev_dbg(db->dev, "enabling %s\n", dev->name);
1292
1293
1294
1295
1296 if (irqflags == IRQF_TRIGGER_NONE)
1297 dev_warn(db->dev, "WARNING: no IRQ resource flags set.\n");
1298
1299 irqflags |= IRQF_SHARED;
1300
1301
1302 iow(db, DM9000_GPR, 0);
1303 mdelay(1);
1304
1305
1306 dm9000_reset(db);
1307 dm9000_init_dm9000(dev);
1308
1309 if (request_irq(dev->irq, dm9000_interrupt, irqflags, dev->name, dev))
1310 return -EAGAIN;
1311
1312
1313 db->dbug_cnt = 0;
1314
1315 mii_check_media(&db->mii, netif_msg_link(db), 1);
1316 netif_start_queue(dev);
1317
1318 dm9000_schedule_poll(db);
1319
1320 return 0;
1321}
1322
1323static void
1324dm9000_shutdown(struct net_device *dev)
1325{
1326 board_info_t *db = netdev_priv(dev);
1327
1328
1329 dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET);
1330 iow(db, DM9000_GPR, 0x01);
1331 iow(db, DM9000_IMR, IMR_PAR);
1332 iow(db, DM9000_RCR, 0x00);
1333}
1334
1335
1336
1337
1338
1339static int
1340dm9000_stop(struct net_device *ndev)
1341{
1342 board_info_t *db = netdev_priv(ndev);
1343
1344 if (netif_msg_ifdown(db))
1345 dev_dbg(db->dev, "shutting down %s\n", ndev->name);
1346
1347 cancel_delayed_work_sync(&db->phy_poll);
1348
1349 netif_stop_queue(ndev);
1350 netif_carrier_off(ndev);
1351
1352
1353 free_irq(ndev->irq, ndev);
1354
1355 dm9000_shutdown(ndev);
1356
1357 return 0;
1358}
1359
1360static const struct net_device_ops dm9000_netdev_ops = {
1361 .ndo_open = dm9000_open,
1362 .ndo_stop = dm9000_stop,
1363 .ndo_start_xmit = dm9000_start_xmit,
1364 .ndo_tx_timeout = dm9000_timeout,
1365 .ndo_set_rx_mode = dm9000_hash_table,
1366 .ndo_do_ioctl = dm9000_ioctl,
1367 .ndo_change_mtu = eth_change_mtu,
1368 .ndo_set_features = dm9000_set_features,
1369 .ndo_validate_addr = eth_validate_addr,
1370 .ndo_set_mac_address = eth_mac_addr,
1371#ifdef CONFIG_NET_POLL_CONTROLLER
1372 .ndo_poll_controller = dm9000_poll_controller,
1373#endif
1374};
1375
1376static struct dm9000_plat_data *dm9000_parse_dt(struct device *dev)
1377{
1378 struct dm9000_plat_data *pdata;
1379 struct device_node *np = dev->of_node;
1380 const void *mac_addr;
1381
1382 if (!IS_ENABLED(CONFIG_OF) || !np)
1383 return NULL;
1384
1385 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
1386 if (!pdata)
1387 return ERR_PTR(-ENOMEM);
1388
1389 if (of_find_property(np, "davicom,ext-phy", NULL))
1390 pdata->flags |= DM9000_PLATF_EXT_PHY;
1391 if (of_find_property(np, "davicom,no-eeprom", NULL))
1392 pdata->flags |= DM9000_PLATF_NO_EEPROM;
1393
1394 mac_addr = of_get_mac_address(np);
1395 if (mac_addr)
1396 memcpy(pdata->dev_addr, mac_addr, sizeof(pdata->dev_addr));
1397
1398 return pdata;
1399}
1400
1401
1402
1403
1404static int
1405dm9000_probe(struct platform_device *pdev)
1406{
1407 struct dm9000_plat_data *pdata = dev_get_platdata(&pdev->dev);
1408 struct board_info *db;
1409 struct net_device *ndev;
1410 const unsigned char *mac_src;
1411 int ret = 0;
1412 int iosize;
1413 int i;
1414 u32 id_val;
1415
1416 if (!pdata) {
1417 pdata = dm9000_parse_dt(&pdev->dev);
1418 if (IS_ERR(pdata))
1419 return PTR_ERR(pdata);
1420 }
1421
1422
1423 ndev = alloc_etherdev(sizeof(struct board_info));
1424 if (!ndev)
1425 return -ENOMEM;
1426
1427 SET_NETDEV_DEV(ndev, &pdev->dev);
1428
1429 dev_dbg(&pdev->dev, "dm9000_probe()\n");
1430
1431
1432 db = netdev_priv(ndev);
1433
1434 db->dev = &pdev->dev;
1435 db->ndev = ndev;
1436
1437 spin_lock_init(&db->lock);
1438 mutex_init(&db->addr_lock);
1439
1440 INIT_DELAYED_WORK(&db->phy_poll, dm9000_poll_work);
1441
1442 db->addr_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1443 db->data_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1444 db->irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1445
1446 if (db->addr_res == NULL || db->data_res == NULL ||
1447 db->irq_res == NULL) {
1448 dev_err(db->dev, "insufficient resources\n");
1449 ret = -ENOENT;
1450 goto out;
1451 }
1452
1453 db->irq_wake = platform_get_irq(pdev, 1);
1454 if (db->irq_wake >= 0) {
1455 dev_dbg(db->dev, "wakeup irq %d\n", db->irq_wake);
1456
1457 ret = request_irq(db->irq_wake, dm9000_wol_interrupt,
1458 IRQF_SHARED, dev_name(db->dev), ndev);
1459 if (ret) {
1460 dev_err(db->dev, "cannot get wakeup irq (%d)\n", ret);
1461 } else {
1462
1463
1464 ret = irq_set_irq_wake(db->irq_wake, 1);
1465 if (ret) {
1466 dev_err(db->dev, "irq %d cannot set wakeup (%d)\n",
1467 db->irq_wake, ret);
1468 ret = 0;
1469 } else {
1470 irq_set_irq_wake(db->irq_wake, 0);
1471 db->wake_supported = 1;
1472 }
1473 }
1474 }
1475
1476 iosize = resource_size(db->addr_res);
1477 db->addr_req = request_mem_region(db->addr_res->start, iosize,
1478 pdev->name);
1479
1480 if (db->addr_req == NULL) {
1481 dev_err(db->dev, "cannot claim address reg area\n");
1482 ret = -EIO;
1483 goto out;
1484 }
1485
1486 db->io_addr = ioremap(db->addr_res->start, iosize);
1487
1488 if (db->io_addr == NULL) {
1489 dev_err(db->dev, "failed to ioremap address reg\n");
1490 ret = -EINVAL;
1491 goto out;
1492 }
1493
1494 iosize = resource_size(db->data_res);
1495 db->data_req = request_mem_region(db->data_res->start, iosize,
1496 pdev->name);
1497
1498 if (db->data_req == NULL) {
1499 dev_err(db->dev, "cannot claim data reg area\n");
1500 ret = -EIO;
1501 goto out;
1502 }
1503
1504 db->io_data = ioremap(db->data_res->start, iosize);
1505
1506 if (db->io_data == NULL) {
1507 dev_err(db->dev, "failed to ioremap data reg\n");
1508 ret = -EINVAL;
1509 goto out;
1510 }
1511
1512
1513 ndev->base_addr = (unsigned long)db->io_addr;
1514 ndev->irq = db->irq_res->start;
1515
1516
1517 dm9000_set_io(db, iosize);
1518
1519
1520 if (pdata != NULL) {
1521
1522
1523
1524 if (pdata->flags & DM9000_PLATF_8BITONLY)
1525 dm9000_set_io(db, 1);
1526
1527 if (pdata->flags & DM9000_PLATF_16BITONLY)
1528 dm9000_set_io(db, 2);
1529
1530 if (pdata->flags & DM9000_PLATF_32BITONLY)
1531 dm9000_set_io(db, 4);
1532
1533
1534
1535
1536 if (pdata->inblk != NULL)
1537 db->inblk = pdata->inblk;
1538
1539 if (pdata->outblk != NULL)
1540 db->outblk = pdata->outblk;
1541
1542 if (pdata->dumpblk != NULL)
1543 db->dumpblk = pdata->dumpblk;
1544
1545 db->flags = pdata->flags;
1546 }
1547
1548#ifdef CONFIG_DM9000_FORCE_SIMPLE_PHY_POLL
1549 db->flags |= DM9000_PLATF_SIMPLE_PHY;
1550#endif
1551
1552
1553
1554
1555
1556
1557 iow(db, DM9000_NCR, NCR_MAC_LBK | NCR_RST);
1558
1559
1560 for (i = 0; i < 8; i++) {
1561 id_val = ior(db, DM9000_VIDL);
1562 id_val |= (u32)ior(db, DM9000_VIDH) << 8;
1563 id_val |= (u32)ior(db, DM9000_PIDL) << 16;
1564 id_val |= (u32)ior(db, DM9000_PIDH) << 24;
1565
1566 if (id_val == DM9000_ID)
1567 break;
1568 dev_err(db->dev, "read wrong id 0x%08x\n", id_val);
1569 }
1570
1571 if (id_val != DM9000_ID) {
1572 dev_err(db->dev, "wrong id: 0x%08x\n", id_val);
1573 ret = -ENODEV;
1574 goto out;
1575 }
1576
1577
1578
1579 id_val = ior(db, DM9000_CHIPR);
1580 dev_dbg(db->dev, "dm9000 revision 0x%02x\n", id_val);
1581
1582 switch (id_val) {
1583 case CHIPR_DM9000A:
1584 db->type = TYPE_DM9000A;
1585 break;
1586 case CHIPR_DM9000B:
1587 db->type = TYPE_DM9000B;
1588 break;
1589 default:
1590 dev_dbg(db->dev, "ID %02x => defaulting to DM9000E\n", id_val);
1591 db->type = TYPE_DM9000E;
1592 }
1593
1594
1595 if (db->type == TYPE_DM9000A || db->type == TYPE_DM9000B) {
1596 ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM;
1597 ndev->features |= ndev->hw_features;
1598 }
1599
1600
1601
1602
1603 ether_setup(ndev);
1604
1605 ndev->netdev_ops = &dm9000_netdev_ops;
1606 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
1607 ndev->ethtool_ops = &dm9000_ethtool_ops;
1608
1609 db->msg_enable = NETIF_MSG_LINK;
1610 db->mii.phy_id_mask = 0x1f;
1611 db->mii.reg_num_mask = 0x1f;
1612 db->mii.force_media = 0;
1613 db->mii.full_duplex = 0;
1614 db->mii.dev = ndev;
1615 db->mii.mdio_read = dm9000_phy_read;
1616 db->mii.mdio_write = dm9000_phy_write;
1617
1618 mac_src = "eeprom";
1619
1620
1621 for (i = 0; i < 6; i += 2)
1622 dm9000_read_eeprom(db, i / 2, ndev->dev_addr+i);
1623
1624 if (!is_valid_ether_addr(ndev->dev_addr) && pdata != NULL) {
1625 mac_src = "platform data";
1626 memcpy(ndev->dev_addr, pdata->dev_addr, 6);
1627 }
1628
1629 if (!is_valid_ether_addr(ndev->dev_addr)) {
1630
1631
1632 mac_src = "chip";
1633 for (i = 0; i < 6; i++)
1634 ndev->dev_addr[i] = ior(db, i+DM9000_PAR);
1635 }
1636
1637 if (!is_valid_ether_addr(ndev->dev_addr)) {
1638 dev_warn(db->dev, "%s: Invalid ethernet MAC address. Please "
1639 "set using ifconfig\n", ndev->name);
1640
1641 eth_hw_addr_random(ndev);
1642 mac_src = "random";
1643 }
1644
1645
1646 platform_set_drvdata(pdev, ndev);
1647 ret = register_netdev(ndev);
1648
1649 if (ret == 0)
1650 printk(KERN_INFO "%s: dm9000%c at %p,%p IRQ %d MAC: %pM (%s)\n",
1651 ndev->name, dm9000_type_to_char(db->type),
1652 db->io_addr, db->io_data, ndev->irq,
1653 ndev->dev_addr, mac_src);
1654 return 0;
1655
1656out:
1657 dev_err(db->dev, "not found (%d).\n", ret);
1658
1659 dm9000_release_board(pdev, db);
1660 free_netdev(ndev);
1661
1662 return ret;
1663}
1664
1665static int
1666dm9000_drv_suspend(struct device *dev)
1667{
1668 struct platform_device *pdev = to_platform_device(dev);
1669 struct net_device *ndev = platform_get_drvdata(pdev);
1670 board_info_t *db;
1671
1672 if (ndev) {
1673 db = netdev_priv(ndev);
1674 db->in_suspend = 1;
1675
1676 if (!netif_running(ndev))
1677 return 0;
1678
1679 netif_device_detach(ndev);
1680
1681
1682 if (!db->wake_state)
1683 dm9000_shutdown(ndev);
1684 }
1685 return 0;
1686}
1687
1688static int
1689dm9000_drv_resume(struct device *dev)
1690{
1691 struct platform_device *pdev = to_platform_device(dev);
1692 struct net_device *ndev = platform_get_drvdata(pdev);
1693 board_info_t *db = netdev_priv(ndev);
1694
1695 if (ndev) {
1696 if (netif_running(ndev)) {
1697
1698
1699 if (!db->wake_state) {
1700 dm9000_reset(db);
1701 dm9000_init_dm9000(ndev);
1702 }
1703
1704 netif_device_attach(ndev);
1705 }
1706
1707 db->in_suspend = 0;
1708 }
1709 return 0;
1710}
1711
1712static const struct dev_pm_ops dm9000_drv_pm_ops = {
1713 .suspend = dm9000_drv_suspend,
1714 .resume = dm9000_drv_resume,
1715};
1716
1717static int
1718dm9000_drv_remove(struct platform_device *pdev)
1719{
1720 struct net_device *ndev = platform_get_drvdata(pdev);
1721
1722 unregister_netdev(ndev);
1723 dm9000_release_board(pdev, netdev_priv(ndev));
1724 free_netdev(ndev);
1725
1726 dev_dbg(&pdev->dev, "released and freed device\n");
1727 return 0;
1728}
1729
1730#ifdef CONFIG_OF
1731static const struct of_device_id dm9000_of_matches[] = {
1732 { .compatible = "davicom,dm9000", },
1733 { }
1734};
1735MODULE_DEVICE_TABLE(of, dm9000_of_matches);
1736#endif
1737
1738static struct platform_driver dm9000_driver = {
1739 .driver = {
1740 .name = "dm9000",
1741 .owner = THIS_MODULE,
1742 .pm = &dm9000_drv_pm_ops,
1743 .of_match_table = of_match_ptr(dm9000_of_matches),
1744 },
1745 .probe = dm9000_probe,
1746 .remove = dm9000_drv_remove,
1747};
1748
1749module_platform_driver(dm9000_driver);
1750
1751MODULE_AUTHOR("Sascha Hauer, Ben Dooks");
1752MODULE_DESCRIPTION("Davicom DM9000 network driver");
1753MODULE_LICENSE("GPL");
1754MODULE_ALIAS("platform:dm9000");
1755