1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#include <linux/module.h>
23#include <linux/ioport.h>
24#include <linux/netdevice.h>
25#include <linux/etherdevice.h>
26#include <linux/init.h>
27#include <linux/interrupt.h>
28#include <linux/skbuff.h>
29#include <linux/spinlock.h>
30#include <linux/crc32.h>
31#include <linux/mii.h>
32#include <linux/ethtool.h>
33#include <linux/dm9000.h>
34#include <linux/delay.h>
35#include <linux/platform_device.h>
36#include <linux/irq.h>
37#include <linux/slab.h>
38
39#include <asm/delay.h>
40#include <asm/irq.h>
41#include <asm/io.h>
42
43#include "dm9000.h"
44
45
46
47#define DM9000_PHY 0x40
48
49#define CARDNAME "dm9000"
50#define DRV_VERSION "1.31"
51
52
53
54
55static int watchdog = 5000;
56module_param(watchdog, int, 0400);
57MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds");
58
59
60
61
62static int debug;
63module_param(debug, int, 0644);
64MODULE_PARM_DESC(debug, "dm9000 debug level (0-4)");
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88enum dm9000_type {
89 TYPE_DM9000E,
90 TYPE_DM9000A,
91 TYPE_DM9000B
92};
93
94
95typedef struct board_info {
96
97 void __iomem *io_addr;
98 void __iomem *io_data;
99 u16 irq;
100
101 u16 tx_pkt_cnt;
102 u16 queue_pkt_len;
103 u16 queue_start_addr;
104 u16 queue_ip_summed;
105 u16 dbug_cnt;
106 u8 io_mode;
107 u8 phy_addr;
108 u8 imr_all;
109
110 unsigned int flags;
111 unsigned int in_suspend :1;
112 unsigned int wake_supported :1;
113
114 enum dm9000_type type;
115
116 void (*inblk)(void __iomem *port, void *data, int length);
117 void (*outblk)(void __iomem *port, void *data, int length);
118 void (*dumpblk)(void __iomem *port, int length);
119
120 struct device *dev;
121
122 struct resource *addr_res;
123 struct resource *data_res;
124 struct resource *addr_req;
125 struct resource *data_req;
126 struct resource *irq_res;
127
128 int irq_wake;
129
130 struct mutex addr_lock;
131
132 struct delayed_work phy_poll;
133 struct net_device *ndev;
134
135 spinlock_t lock;
136
137 struct mii_if_info mii;
138 u32 msg_enable;
139 u32 wake_state;
140
141 int ip_summed;
142} board_info_t;
143
144
145
146#define dm9000_dbg(db, lev, msg...) do { \
147 if ((lev) < debug) { \
148 dev_dbg(db->dev, msg); \
149 } \
150} while (0)
151
152static inline board_info_t *to_dm9000_board(struct net_device *dev)
153{
154 return netdev_priv(dev);
155}
156
157
158
159static void
160dm9000_reset(board_info_t * db)
161{
162 dev_dbg(db->dev, "resetting device\n");
163
164
165 writeb(DM9000_NCR, db->io_addr);
166 udelay(200);
167 writeb(NCR_RST, db->io_data);
168 udelay(200);
169}
170
171
172
173
174static u8
175ior(board_info_t * db, int reg)
176{
177 writeb(reg, db->io_addr);
178 return readb(db->io_data);
179}
180
181
182
183
184
185static void
186iow(board_info_t * db, int reg, int value)
187{
188 writeb(reg, db->io_addr);
189 writeb(value, db->io_data);
190}
191
192
193
194static void dm9000_outblk_8bit(void __iomem *reg, void *data, int count)
195{
196 iowrite8_rep(reg, data, count);
197}
198
199static void dm9000_outblk_16bit(void __iomem *reg, void *data, int count)
200{
201 iowrite16_rep(reg, data, (count+1) >> 1);
202}
203
204static void dm9000_outblk_32bit(void __iomem *reg, void *data, int count)
205{
206 iowrite32_rep(reg, data, (count+3) >> 2);
207}
208
209
210
211static void dm9000_inblk_8bit(void __iomem *reg, void *data, int count)
212{
213 ioread8_rep(reg, data, count);
214}
215
216
217static void dm9000_inblk_16bit(void __iomem *reg, void *data, int count)
218{
219 ioread16_rep(reg, data, (count+1) >> 1);
220}
221
222static void dm9000_inblk_32bit(void __iomem *reg, void *data, int count)
223{
224 ioread32_rep(reg, data, (count+3) >> 2);
225}
226
227
228
229static void dm9000_dumpblk_8bit(void __iomem *reg, int count)
230{
231 int i;
232 int tmp;
233
234 for (i = 0; i < count; i++)
235 tmp = readb(reg);
236}
237
238static void dm9000_dumpblk_16bit(void __iomem *reg, int count)
239{
240 int i;
241 int tmp;
242
243 count = (count + 1) >> 1;
244
245 for (i = 0; i < count; i++)
246 tmp = readw(reg);
247}
248
249static void dm9000_dumpblk_32bit(void __iomem *reg, int count)
250{
251 int i;
252 int tmp;
253
254 count = (count + 3) >> 2;
255
256 for (i = 0; i < count; i++)
257 tmp = readl(reg);
258}
259
260
261
262
263
264static void dm9000_msleep(board_info_t *db, unsigned int ms)
265{
266 if (db->in_suspend)
267 mdelay(ms);
268 else
269 msleep(ms);
270}
271
272
273static int
274dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg)
275{
276 board_info_t *db = netdev_priv(dev);
277 unsigned long flags;
278 unsigned int reg_save;
279 int ret;
280
281 mutex_lock(&db->addr_lock);
282
283 spin_lock_irqsave(&db->lock, flags);
284
285
286 reg_save = readb(db->io_addr);
287
288
289 iow(db, DM9000_EPAR, DM9000_PHY | reg);
290
291
292 iow(db, DM9000_EPCR, EPCR_ERPRR | EPCR_EPOS);
293
294 writeb(reg_save, db->io_addr);
295 spin_unlock_irqrestore(&db->lock, flags);
296
297 dm9000_msleep(db, 1);
298
299 spin_lock_irqsave(&db->lock, flags);
300 reg_save = readb(db->io_addr);
301
302 iow(db, DM9000_EPCR, 0x0);
303
304
305 ret = (ior(db, DM9000_EPDRH) << 8) | ior(db, DM9000_EPDRL);
306
307
308 writeb(reg_save, db->io_addr);
309 spin_unlock_irqrestore(&db->lock, flags);
310
311 mutex_unlock(&db->addr_lock);
312
313 dm9000_dbg(db, 5, "phy_read[%02x] -> %04x\n", reg, ret);
314 return ret;
315}
316
317
318static void
319dm9000_phy_write(struct net_device *dev,
320 int phyaddr_unused, int reg, int value)
321{
322 board_info_t *db = netdev_priv(dev);
323 unsigned long flags;
324 unsigned long reg_save;
325
326 dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value);
327 mutex_lock(&db->addr_lock);
328
329 spin_lock_irqsave(&db->lock, flags);
330
331
332 reg_save = readb(db->io_addr);
333
334
335 iow(db, DM9000_EPAR, DM9000_PHY | reg);
336
337
338 iow(db, DM9000_EPDRL, value);
339 iow(db, DM9000_EPDRH, value >> 8);
340
341
342 iow(db, DM9000_EPCR, EPCR_EPOS | EPCR_ERPRW);
343
344 writeb(reg_save, db->io_addr);
345 spin_unlock_irqrestore(&db->lock, flags);
346
347 dm9000_msleep(db, 1);
348
349 spin_lock_irqsave(&db->lock, flags);
350 reg_save = readb(db->io_addr);
351
352 iow(db, DM9000_EPCR, 0x0);
353
354
355 writeb(reg_save, db->io_addr);
356
357 spin_unlock_irqrestore(&db->lock, flags);
358 mutex_unlock(&db->addr_lock);
359}
360
361
362
363
364
365
366
367static void dm9000_set_io(struct board_info *db, int byte_width)
368{
369
370
371
372
373 switch (byte_width) {
374 case 1:
375 db->dumpblk = dm9000_dumpblk_8bit;
376 db->outblk = dm9000_outblk_8bit;
377 db->inblk = dm9000_inblk_8bit;
378 break;
379
380
381 case 3:
382 dev_dbg(db->dev, ": 3 byte IO, falling back to 16bit\n");
383 case 2:
384 db->dumpblk = dm9000_dumpblk_16bit;
385 db->outblk = dm9000_outblk_16bit;
386 db->inblk = dm9000_inblk_16bit;
387 break;
388
389 case 4:
390 default:
391 db->dumpblk = dm9000_dumpblk_32bit;
392 db->outblk = dm9000_outblk_32bit;
393 db->inblk = dm9000_inblk_32bit;
394 break;
395 }
396}
397
398static void dm9000_schedule_poll(board_info_t *db)
399{
400 if (db->type == TYPE_DM9000E)
401 schedule_delayed_work(&db->phy_poll, HZ * 2);
402}
403
404static int dm9000_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
405{
406 board_info_t *dm = to_dm9000_board(dev);
407
408 if (!netif_running(dev))
409 return -EINVAL;
410
411 return generic_mii_ioctl(&dm->mii, if_mii(req), cmd, NULL);
412}
413
414static unsigned int
415dm9000_read_locked(board_info_t *db, int reg)
416{
417 unsigned long flags;
418 unsigned int ret;
419
420 spin_lock_irqsave(&db->lock, flags);
421 ret = ior(db, reg);
422 spin_unlock_irqrestore(&db->lock, flags);
423
424 return ret;
425}
426
427static int dm9000_wait_eeprom(board_info_t *db)
428{
429 unsigned int status;
430 int timeout = 8;
431
432
433
434
435
436
437
438
439
440
441
442
443 while (1) {
444 status = dm9000_read_locked(db, DM9000_EPCR);
445
446 if ((status & EPCR_ERRE) == 0)
447 break;
448
449 msleep(1);
450
451 if (timeout-- < 0) {
452 dev_dbg(db->dev, "timeout waiting EEPROM\n");
453 break;
454 }
455 }
456
457 return 0;
458}
459
460
461
462
463static void
464dm9000_read_eeprom(board_info_t *db, int offset, u8 *to)
465{
466 unsigned long flags;
467
468 if (db->flags & DM9000_PLATF_NO_EEPROM) {
469 to[0] = 0xff;
470 to[1] = 0xff;
471 return;
472 }
473
474 mutex_lock(&db->addr_lock);
475
476 spin_lock_irqsave(&db->lock, flags);
477
478 iow(db, DM9000_EPAR, offset);
479 iow(db, DM9000_EPCR, EPCR_ERPRR);
480
481 spin_unlock_irqrestore(&db->lock, flags);
482
483 dm9000_wait_eeprom(db);
484
485
486 msleep(1);
487
488 spin_lock_irqsave(&db->lock, flags);
489
490 iow(db, DM9000_EPCR, 0x0);
491
492 to[0] = ior(db, DM9000_EPDRL);
493 to[1] = ior(db, DM9000_EPDRH);
494
495 spin_unlock_irqrestore(&db->lock, flags);
496
497 mutex_unlock(&db->addr_lock);
498}
499
500
501
502
503static void
504dm9000_write_eeprom(board_info_t *db, int offset, u8 *data)
505{
506 unsigned long flags;
507
508 if (db->flags & DM9000_PLATF_NO_EEPROM)
509 return;
510
511 mutex_lock(&db->addr_lock);
512
513 spin_lock_irqsave(&db->lock, flags);
514 iow(db, DM9000_EPAR, offset);
515 iow(db, DM9000_EPDRH, data[1]);
516 iow(db, DM9000_EPDRL, data[0]);
517 iow(db, DM9000_EPCR, EPCR_WEP | EPCR_ERPRW);
518 spin_unlock_irqrestore(&db->lock, flags);
519
520 dm9000_wait_eeprom(db);
521
522 mdelay(1);
523
524 spin_lock_irqsave(&db->lock, flags);
525 iow(db, DM9000_EPCR, 0);
526 spin_unlock_irqrestore(&db->lock, flags);
527
528 mutex_unlock(&db->addr_lock);
529}
530
531
532
533static void dm9000_get_drvinfo(struct net_device *dev,
534 struct ethtool_drvinfo *info)
535{
536 board_info_t *dm = to_dm9000_board(dev);
537
538 strlcpy(info->driver, CARDNAME, sizeof(info->driver));
539 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
540 strlcpy(info->bus_info, to_platform_device(dm->dev)->name,
541 sizeof(info->bus_info));
542}
543
544static u32 dm9000_get_msglevel(struct net_device *dev)
545{
546 board_info_t *dm = to_dm9000_board(dev);
547
548 return dm->msg_enable;
549}
550
551static void dm9000_set_msglevel(struct net_device *dev, u32 value)
552{
553 board_info_t *dm = to_dm9000_board(dev);
554
555 dm->msg_enable = value;
556}
557
558static int dm9000_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
559{
560 board_info_t *dm = to_dm9000_board(dev);
561
562 mii_ethtool_gset(&dm->mii, cmd);
563 return 0;
564}
565
566static int dm9000_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
567{
568 board_info_t *dm = to_dm9000_board(dev);
569
570 return mii_ethtool_sset(&dm->mii, cmd);
571}
572
573static int dm9000_nway_reset(struct net_device *dev)
574{
575 board_info_t *dm = to_dm9000_board(dev);
576 return mii_nway_restart(&dm->mii);
577}
578
579static int dm9000_set_features(struct net_device *dev,
580 netdev_features_t features)
581{
582 board_info_t *dm = to_dm9000_board(dev);
583 netdev_features_t changed = dev->features ^ features;
584 unsigned long flags;
585
586 if (!(changed & NETIF_F_RXCSUM))
587 return 0;
588
589 spin_lock_irqsave(&dm->lock, flags);
590 iow(dm, DM9000_RCSR, (features & NETIF_F_RXCSUM) ? RCSR_CSUM : 0);
591 spin_unlock_irqrestore(&dm->lock, flags);
592
593 return 0;
594}
595
596static u32 dm9000_get_link(struct net_device *dev)
597{
598 board_info_t *dm = to_dm9000_board(dev);
599 u32 ret;
600
601 if (dm->flags & DM9000_PLATF_EXT_PHY)
602 ret = mii_link_ok(&dm->mii);
603 else
604 ret = dm9000_read_locked(dm, DM9000_NSR) & NSR_LINKST ? 1 : 0;
605
606 return ret;
607}
608
609#define DM_EEPROM_MAGIC (0x444D394B)
610
611static int dm9000_get_eeprom_len(struct net_device *dev)
612{
613 return 128;
614}
615
616static int dm9000_get_eeprom(struct net_device *dev,
617 struct ethtool_eeprom *ee, u8 *data)
618{
619 board_info_t *dm = to_dm9000_board(dev);
620 int offset = ee->offset;
621 int len = ee->len;
622 int i;
623
624
625
626 if ((len & 1) != 0 || (offset & 1) != 0)
627 return -EINVAL;
628
629 if (dm->flags & DM9000_PLATF_NO_EEPROM)
630 return -ENOENT;
631
632 ee->magic = DM_EEPROM_MAGIC;
633
634 for (i = 0; i < len; i += 2)
635 dm9000_read_eeprom(dm, (offset + i) / 2, data + i);
636
637 return 0;
638}
639
640static int dm9000_set_eeprom(struct net_device *dev,
641 struct ethtool_eeprom *ee, u8 *data)
642{
643 board_info_t *dm = to_dm9000_board(dev);
644 int offset = ee->offset;
645 int len = ee->len;
646 int done;
647
648
649
650 if (dm->flags & DM9000_PLATF_NO_EEPROM)
651 return -ENOENT;
652
653 if (ee->magic != DM_EEPROM_MAGIC)
654 return -EINVAL;
655
656 while (len > 0) {
657 if (len & 1 || offset & 1) {
658 int which = offset & 1;
659 u8 tmp[2];
660
661 dm9000_read_eeprom(dm, offset / 2, tmp);
662 tmp[which] = *data;
663 dm9000_write_eeprom(dm, offset / 2, tmp);
664
665 done = 1;
666 } else {
667 dm9000_write_eeprom(dm, offset / 2, data);
668 done = 2;
669 }
670
671 data += done;
672 offset += done;
673 len -= done;
674 }
675
676 return 0;
677}
678
679static void dm9000_get_wol(struct net_device *dev, struct ethtool_wolinfo *w)
680{
681 board_info_t *dm = to_dm9000_board(dev);
682
683 memset(w, 0, sizeof(struct ethtool_wolinfo));
684
685
686 w->supported = dm->wake_supported ? WAKE_MAGIC : 0;
687 w->wolopts = dm->wake_state;
688}
689
690static int dm9000_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
691{
692 board_info_t *dm = to_dm9000_board(dev);
693 unsigned long flags;
694 u32 opts = w->wolopts;
695 u32 wcr = 0;
696
697 if (!dm->wake_supported)
698 return -EOPNOTSUPP;
699
700 if (opts & ~WAKE_MAGIC)
701 return -EINVAL;
702
703 if (opts & WAKE_MAGIC)
704 wcr |= WCR_MAGICEN;
705
706 mutex_lock(&dm->addr_lock);
707
708 spin_lock_irqsave(&dm->lock, flags);
709 iow(dm, DM9000_WCR, wcr);
710 spin_unlock_irqrestore(&dm->lock, flags);
711
712 mutex_unlock(&dm->addr_lock);
713
714 if (dm->wake_state != opts) {
715
716
717 if (!dm->wake_state)
718 irq_set_irq_wake(dm->irq_wake, 1);
719 else if (dm->wake_state && !opts)
720 irq_set_irq_wake(dm->irq_wake, 0);
721 }
722
723 dm->wake_state = opts;
724 return 0;
725}
726
727static const struct ethtool_ops dm9000_ethtool_ops = {
728 .get_drvinfo = dm9000_get_drvinfo,
729 .get_settings = dm9000_get_settings,
730 .set_settings = dm9000_set_settings,
731 .get_msglevel = dm9000_get_msglevel,
732 .set_msglevel = dm9000_set_msglevel,
733 .nway_reset = dm9000_nway_reset,
734 .get_link = dm9000_get_link,
735 .get_wol = dm9000_get_wol,
736 .set_wol = dm9000_set_wol,
737 .get_eeprom_len = dm9000_get_eeprom_len,
738 .get_eeprom = dm9000_get_eeprom,
739 .set_eeprom = dm9000_set_eeprom,
740};
741
742static void dm9000_show_carrier(board_info_t *db,
743 unsigned carrier, unsigned nsr)
744{
745 struct net_device *ndev = db->ndev;
746 unsigned ncr = dm9000_read_locked(db, DM9000_NCR);
747
748 if (carrier)
749 dev_info(db->dev, "%s: link up, %dMbps, %s-duplex, no LPA\n",
750 ndev->name, (nsr & NSR_SPEED) ? 10 : 100,
751 (ncr & NCR_FDX) ? "full" : "half");
752 else
753 dev_info(db->dev, "%s: link down\n", ndev->name);
754}
755
756static void
757dm9000_poll_work(struct work_struct *w)
758{
759 struct delayed_work *dw = to_delayed_work(w);
760 board_info_t *db = container_of(dw, board_info_t, phy_poll);
761 struct net_device *ndev = db->ndev;
762
763 if (db->flags & DM9000_PLATF_SIMPLE_PHY &&
764 !(db->flags & DM9000_PLATF_EXT_PHY)) {
765 unsigned nsr = dm9000_read_locked(db, DM9000_NSR);
766 unsigned old_carrier = netif_carrier_ok(ndev) ? 1 : 0;
767 unsigned new_carrier;
768
769 new_carrier = (nsr & NSR_LINKST) ? 1 : 0;
770
771 if (old_carrier != new_carrier) {
772 if (netif_msg_link(db))
773 dm9000_show_carrier(db, new_carrier, nsr);
774
775 if (!new_carrier)
776 netif_carrier_off(ndev);
777 else
778 netif_carrier_on(ndev);
779 }
780 } else
781 mii_check_media(&db->mii, netif_msg_link(db), 0);
782
783 if (netif_running(ndev))
784 dm9000_schedule_poll(db);
785}
786
787
788
789
790
791
792static void
793dm9000_release_board(struct platform_device *pdev, struct board_info *db)
794{
795
796
797 iounmap(db->io_addr);
798 iounmap(db->io_data);
799
800
801
802 release_resource(db->data_req);
803 kfree(db->data_req);
804
805 release_resource(db->addr_req);
806 kfree(db->addr_req);
807}
808
809static unsigned char dm9000_type_to_char(enum dm9000_type type)
810{
811 switch (type) {
812 case TYPE_DM9000E: return 'e';
813 case TYPE_DM9000A: return 'a';
814 case TYPE_DM9000B: return 'b';
815 }
816
817 return '?';
818}
819
820
821
822
823static void
824dm9000_hash_table_unlocked(struct net_device *dev)
825{
826 board_info_t *db = netdev_priv(dev);
827 struct netdev_hw_addr *ha;
828 int i, oft;
829 u32 hash_val;
830 u16 hash_table[4];
831 u8 rcr = RCR_DIS_LONG | RCR_DIS_CRC | RCR_RXEN;
832
833 dm9000_dbg(db, 1, "entering %s\n", __func__);
834
835 for (i = 0, oft = DM9000_PAR; i < 6; i++, oft++)
836 iow(db, oft, dev->dev_addr[i]);
837
838
839 for (i = 0; i < 4; i++)
840 hash_table[i] = 0x0;
841
842
843 hash_table[3] = 0x8000;
844
845 if (dev->flags & IFF_PROMISC)
846 rcr |= RCR_PRMSC;
847
848 if (dev->flags & IFF_ALLMULTI)
849 rcr |= RCR_ALL;
850
851
852 netdev_for_each_mc_addr(ha, dev) {
853 hash_val = ether_crc_le(6, ha->addr) & 0x3f;
854 hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16);
855 }
856
857
858 for (i = 0, oft = DM9000_MAR; i < 4; i++) {
859 iow(db, oft++, hash_table[i]);
860 iow(db, oft++, hash_table[i] >> 8);
861 }
862
863 iow(db, DM9000_RCR, rcr);
864}
865
866static void
867dm9000_hash_table(struct net_device *dev)
868{
869 board_info_t *db = netdev_priv(dev);
870 unsigned long flags;
871
872 spin_lock_irqsave(&db->lock, flags);
873 dm9000_hash_table_unlocked(dev);
874 spin_unlock_irqrestore(&db->lock, flags);
875}
876
877
878
879
880static void
881dm9000_init_dm9000(struct net_device *dev)
882{
883 board_info_t *db = netdev_priv(dev);
884 unsigned int imr;
885 unsigned int ncr;
886
887 dm9000_dbg(db, 1, "entering %s\n", __func__);
888
889
890 db->io_mode = ior(db, DM9000_ISR) >> 6;
891
892
893 if (dev->hw_features & NETIF_F_RXCSUM)
894 iow(db, DM9000_RCSR,
895 (dev->features & NETIF_F_RXCSUM) ? RCSR_CSUM : 0);
896
897 iow(db, DM9000_GPCR, GPCR_GEP_CNTL);
898
899 dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET);
900 dm9000_phy_write(dev, 0, MII_DM_DSPCR, DSPCR_INIT_PARAM);
901
902 ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0;
903
904
905
906
907 if (db->wake_supported)
908 ncr |= NCR_WAKEEN;
909
910 iow(db, DM9000_NCR, ncr);
911
912
913 iow(db, DM9000_TCR, 0);
914 iow(db, DM9000_BPTR, 0x3f);
915 iow(db, DM9000_FCR, 0xff);
916 iow(db, DM9000_SMCR, 0);
917
918 iow(db, DM9000_NSR, NSR_WAKEST | NSR_TX2END | NSR_TX1END);
919 iow(db, DM9000_ISR, ISR_CLR_STATUS);
920
921
922 dm9000_hash_table_unlocked(dev);
923
924 imr = IMR_PAR | IMR_PTM | IMR_PRM;
925 if (db->type != TYPE_DM9000E)
926 imr |= IMR_LNKCHNG;
927
928 db->imr_all = imr;
929
930
931 iow(db, DM9000_IMR, imr);
932
933
934 db->tx_pkt_cnt = 0;
935 db->queue_pkt_len = 0;
936 netif_trans_update(dev);
937}
938
939
940static void dm9000_timeout(struct net_device *dev)
941{
942 board_info_t *db = netdev_priv(dev);
943 u8 reg_save;
944 unsigned long flags;
945
946
947 spin_lock_irqsave(&db->lock, flags);
948 reg_save = readb(db->io_addr);
949
950 netif_stop_queue(dev);
951 dm9000_reset(db);
952 dm9000_init_dm9000(dev);
953
954 netif_trans_update(dev);
955 netif_wake_queue(dev);
956
957
958 writeb(reg_save, db->io_addr);
959 spin_unlock_irqrestore(&db->lock, flags);
960}
961
962static void dm9000_send_packet(struct net_device *dev,
963 int ip_summed,
964 u16 pkt_len)
965{
966 board_info_t *dm = to_dm9000_board(dev);
967
968
969 if (dm->ip_summed != ip_summed) {
970 if (ip_summed == CHECKSUM_NONE)
971 iow(dm, DM9000_TCCR, 0);
972 else
973 iow(dm, DM9000_TCCR, TCCR_IP | TCCR_UDP | TCCR_TCP);
974 dm->ip_summed = ip_summed;
975 }
976
977
978 iow(dm, DM9000_TXPLL, pkt_len);
979 iow(dm, DM9000_TXPLH, pkt_len >> 8);
980
981
982 iow(dm, DM9000_TCR, TCR_TXREQ);
983}
984
985
986
987
988
989static int
990dm9000_start_xmit(struct sk_buff *skb, struct net_device *dev)
991{
992 unsigned long flags;
993 board_info_t *db = netdev_priv(dev);
994
995 dm9000_dbg(db, 3, "%s:\n", __func__);
996
997 if (db->tx_pkt_cnt > 1)
998 return NETDEV_TX_BUSY;
999
1000 spin_lock_irqsave(&db->lock, flags);
1001
1002
1003 writeb(DM9000_MWCMD, db->io_addr);
1004
1005 (db->outblk)(db->io_data, skb->data, skb->len);
1006 dev->stats.tx_bytes += skb->len;
1007
1008 db->tx_pkt_cnt++;
1009
1010 if (db->tx_pkt_cnt == 1) {
1011 dm9000_send_packet(dev, skb->ip_summed, skb->len);
1012 } else {
1013
1014 db->queue_pkt_len = skb->len;
1015 db->queue_ip_summed = skb->ip_summed;
1016 netif_stop_queue(dev);
1017 }
1018
1019 spin_unlock_irqrestore(&db->lock, flags);
1020
1021
1022 dev_kfree_skb(skb);
1023
1024 return NETDEV_TX_OK;
1025}
1026
1027
1028
1029
1030
1031
1032static void dm9000_tx_done(struct net_device *dev, board_info_t *db)
1033{
1034 int tx_status = ior(db, DM9000_NSR);
1035
1036 if (tx_status & (NSR_TX2END | NSR_TX1END)) {
1037
1038 db->tx_pkt_cnt--;
1039 dev->stats.tx_packets++;
1040
1041 if (netif_msg_tx_done(db))
1042 dev_dbg(db->dev, "tx done, NSR %02x\n", tx_status);
1043
1044
1045 if (db->tx_pkt_cnt > 0)
1046 dm9000_send_packet(dev, db->queue_ip_summed,
1047 db->queue_pkt_len);
1048 netif_wake_queue(dev);
1049 }
1050}
1051
1052struct dm9000_rxhdr {
1053 u8 RxPktReady;
1054 u8 RxStatus;
1055 __le16 RxLen;
1056} __packed;
1057
1058
1059
1060
1061static void
1062dm9000_rx(struct net_device *dev)
1063{
1064 board_info_t *db = netdev_priv(dev);
1065 struct dm9000_rxhdr rxhdr;
1066 struct sk_buff *skb;
1067 u8 rxbyte, *rdptr;
1068 bool GoodPacket;
1069 int RxLen;
1070
1071
1072 do {
1073 ior(db, DM9000_MRCMDX);
1074
1075
1076 rxbyte = readb(db->io_data);
1077
1078
1079 if (rxbyte & DM9000_PKT_ERR) {
1080 dev_warn(db->dev, "status check fail: %d\n", rxbyte);
1081 iow(db, DM9000_RCR, 0x00);
1082 iow(db, DM9000_ISR, IMR_PAR);
1083 return;
1084 }
1085
1086 if (!(rxbyte & DM9000_PKT_RDY))
1087 return;
1088
1089
1090 GoodPacket = true;
1091 writeb(DM9000_MRCMD, db->io_addr);
1092
1093 (db->inblk)(db->io_data, &rxhdr, sizeof(rxhdr));
1094
1095 RxLen = le16_to_cpu(rxhdr.RxLen);
1096
1097 if (netif_msg_rx_status(db))
1098 dev_dbg(db->dev, "RX: status %02x, length %04x\n",
1099 rxhdr.RxStatus, RxLen);
1100
1101
1102 if (RxLen < 0x40) {
1103 GoodPacket = false;
1104 if (netif_msg_rx_err(db))
1105 dev_dbg(db->dev, "RX: Bad Packet (runt)\n");
1106 }
1107
1108 if (RxLen > DM9000_PKT_MAX) {
1109 dev_dbg(db->dev, "RST: RX Len:%x\n", RxLen);
1110 }
1111
1112
1113 if (rxhdr.RxStatus & (RSR_FOE | RSR_CE | RSR_AE |
1114 RSR_PLE | RSR_RWTO |
1115 RSR_LCS | RSR_RF)) {
1116 GoodPacket = false;
1117 if (rxhdr.RxStatus & RSR_FOE) {
1118 if (netif_msg_rx_err(db))
1119 dev_dbg(db->dev, "fifo error\n");
1120 dev->stats.rx_fifo_errors++;
1121 }
1122 if (rxhdr.RxStatus & RSR_CE) {
1123 if (netif_msg_rx_err(db))
1124 dev_dbg(db->dev, "crc error\n");
1125 dev->stats.rx_crc_errors++;
1126 }
1127 if (rxhdr.RxStatus & RSR_RF) {
1128 if (netif_msg_rx_err(db))
1129 dev_dbg(db->dev, "length error\n");
1130 dev->stats.rx_length_errors++;
1131 }
1132 }
1133
1134
1135 if (GoodPacket &&
1136 ((skb = netdev_alloc_skb(dev, RxLen + 4)) != NULL)) {
1137 skb_reserve(skb, 2);
1138 rdptr = (u8 *) skb_put(skb, RxLen - 4);
1139
1140
1141
1142 (db->inblk)(db->io_data, rdptr, RxLen);
1143 dev->stats.rx_bytes += RxLen;
1144
1145
1146 skb->protocol = eth_type_trans(skb, dev);
1147 if (dev->features & NETIF_F_RXCSUM) {
1148 if ((((rxbyte & 0x1c) << 3) & rxbyte) == 0)
1149 skb->ip_summed = CHECKSUM_UNNECESSARY;
1150 else
1151 skb_checksum_none_assert(skb);
1152 }
1153 netif_rx(skb);
1154 dev->stats.rx_packets++;
1155
1156 } else {
1157
1158
1159 (db->dumpblk)(db->io_data, RxLen);
1160 }
1161 } while (rxbyte & DM9000_PKT_RDY);
1162}
1163
1164static irqreturn_t dm9000_interrupt(int irq, void *dev_id)
1165{
1166 struct net_device *dev = dev_id;
1167 board_info_t *db = netdev_priv(dev);
1168 int int_status;
1169 unsigned long flags;
1170 u8 reg_save;
1171
1172 dm9000_dbg(db, 3, "entering %s\n", __func__);
1173
1174
1175
1176
1177 spin_lock_irqsave(&db->lock, flags);
1178
1179
1180 reg_save = readb(db->io_addr);
1181
1182
1183 iow(db, DM9000_IMR, IMR_PAR);
1184
1185
1186 int_status = ior(db, DM9000_ISR);
1187 iow(db, DM9000_ISR, int_status);
1188
1189 if (netif_msg_intr(db))
1190 dev_dbg(db->dev, "interrupt status %02x\n", int_status);
1191
1192
1193 if (int_status & ISR_PRS)
1194 dm9000_rx(dev);
1195
1196
1197 if (int_status & ISR_PTS)
1198 dm9000_tx_done(dev, db);
1199
1200 if (db->type != TYPE_DM9000E) {
1201 if (int_status & ISR_LNKCHNG) {
1202
1203 schedule_delayed_work(&db->phy_poll, 1);
1204 }
1205 }
1206
1207
1208 iow(db, DM9000_IMR, db->imr_all);
1209
1210
1211 writeb(reg_save, db->io_addr);
1212
1213 spin_unlock_irqrestore(&db->lock, flags);
1214
1215 return IRQ_HANDLED;
1216}
1217
1218static irqreturn_t dm9000_wol_interrupt(int irq, void *dev_id)
1219{
1220 struct net_device *dev = dev_id;
1221 board_info_t *db = netdev_priv(dev);
1222 unsigned long flags;
1223 unsigned nsr, wcr;
1224
1225 spin_lock_irqsave(&db->lock, flags);
1226
1227 nsr = ior(db, DM9000_NSR);
1228 wcr = ior(db, DM9000_WCR);
1229
1230 dev_dbg(db->dev, "%s: NSR=0x%02x, WCR=0x%02x\n", __func__, nsr, wcr);
1231
1232 if (nsr & NSR_WAKEST) {
1233
1234 iow(db, DM9000_NSR, NSR_WAKEST);
1235
1236 if (wcr & WCR_LINKST)
1237 dev_info(db->dev, "wake by link status change\n");
1238 if (wcr & WCR_SAMPLEST)
1239 dev_info(db->dev, "wake by sample packet\n");
1240 if (wcr & WCR_MAGICST )
1241 dev_info(db->dev, "wake by magic packet\n");
1242 if (!(wcr & (WCR_LINKST | WCR_SAMPLEST | WCR_MAGICST)))
1243 dev_err(db->dev, "wake signalled with no reason? "
1244 "NSR=0x%02x, WSR=0x%02x\n", nsr, wcr);
1245
1246 }
1247
1248 spin_unlock_irqrestore(&db->lock, flags);
1249
1250 return (nsr & NSR_WAKEST) ? IRQ_HANDLED : IRQ_NONE;
1251}
1252
1253#ifdef CONFIG_NET_POLL_CONTROLLER
1254
1255
1256
1257static void dm9000_poll_controller(struct net_device *dev)
1258{
1259 disable_irq(dev->irq);
1260 dm9000_interrupt(dev->irq, dev);
1261 enable_irq(dev->irq);
1262}
1263#endif
1264
1265
1266
1267
1268
1269static int
1270dm9000_open(struct net_device *dev)
1271{
1272 board_info_t *db = netdev_priv(dev);
1273 unsigned long irqflags = db->irq_res->flags & IRQF_TRIGGER_MASK;
1274
1275 if (netif_msg_ifup(db))
1276 dev_dbg(db->dev, "enabling %s\n", dev->name);
1277
1278
1279
1280
1281 if (irqflags == IRQF_TRIGGER_NONE)
1282 dev_warn(db->dev, "WARNING: no IRQ resource flags set.\n");
1283
1284 irqflags |= IRQF_SHARED;
1285
1286
1287 iow(db, DM9000_GPR, 0);
1288 mdelay(1);
1289
1290
1291 dm9000_reset(db);
1292 dm9000_init_dm9000(dev);
1293
1294 if (request_irq(dev->irq, dm9000_interrupt, irqflags, dev->name, dev))
1295 return -EAGAIN;
1296
1297
1298 db->dbug_cnt = 0;
1299
1300 mii_check_media(&db->mii, netif_msg_link(db), 1);
1301 netif_start_queue(dev);
1302
1303 dm9000_schedule_poll(db);
1304
1305 return 0;
1306}
1307
1308static void
1309dm9000_shutdown(struct net_device *dev)
1310{
1311 board_info_t *db = netdev_priv(dev);
1312
1313
1314 dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET);
1315 iow(db, DM9000_GPR, 0x01);
1316 iow(db, DM9000_IMR, IMR_PAR);
1317 iow(db, DM9000_RCR, 0x00);
1318}
1319
1320
1321
1322
1323
1324static int
1325dm9000_stop(struct net_device *ndev)
1326{
1327 board_info_t *db = netdev_priv(ndev);
1328
1329 if (netif_msg_ifdown(db))
1330 dev_dbg(db->dev, "shutting down %s\n", ndev->name);
1331
1332 cancel_delayed_work_sync(&db->phy_poll);
1333
1334 netif_stop_queue(ndev);
1335 netif_carrier_off(ndev);
1336
1337
1338 free_irq(ndev->irq, ndev);
1339
1340 dm9000_shutdown(ndev);
1341
1342 return 0;
1343}
1344
1345static const struct net_device_ops dm9000_netdev_ops = {
1346 .ndo_open = dm9000_open,
1347 .ndo_stop = dm9000_stop,
1348 .ndo_start_xmit = dm9000_start_xmit,
1349 .ndo_tx_timeout = dm9000_timeout,
1350 .ndo_set_rx_mode = dm9000_hash_table,
1351 .ndo_do_ioctl = dm9000_ioctl,
1352 .ndo_change_mtu = eth_change_mtu,
1353 .ndo_set_features = dm9000_set_features,
1354 .ndo_validate_addr = eth_validate_addr,
1355 .ndo_set_mac_address = eth_mac_addr,
1356#ifdef CONFIG_NET_POLL_CONTROLLER
1357 .ndo_poll_controller = dm9000_poll_controller,
1358#endif
1359};
1360
1361
1362
1363
1364static int
1365dm9000_probe(struct platform_device *pdev)
1366{
1367 struct dm9000_plat_data *pdata = pdev->dev.platform_data;
1368 struct board_info *db;
1369 struct net_device *ndev;
1370 const unsigned char *mac_src;
1371 int ret = 0;
1372 int iosize;
1373 int i;
1374 u32 id_val;
1375
1376
1377 ndev = alloc_etherdev(sizeof(struct board_info));
1378 if (!ndev)
1379 return -ENOMEM;
1380
1381 SET_NETDEV_DEV(ndev, &pdev->dev);
1382
1383 dev_dbg(&pdev->dev, "dm9000_probe()\n");
1384
1385
1386 db = netdev_priv(ndev);
1387
1388 db->dev = &pdev->dev;
1389 db->ndev = ndev;
1390
1391 spin_lock_init(&db->lock);
1392 mutex_init(&db->addr_lock);
1393
1394 INIT_DELAYED_WORK(&db->phy_poll, dm9000_poll_work);
1395
1396 db->addr_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1397 db->data_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1398 db->irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1399
1400 if (db->addr_res == NULL || db->data_res == NULL ||
1401 db->irq_res == NULL) {
1402 dev_err(db->dev, "insufficient resources\n");
1403 ret = -ENOENT;
1404 goto out;
1405 }
1406
1407 db->irq_wake = platform_get_irq(pdev, 1);
1408 if (db->irq_wake >= 0) {
1409 dev_dbg(db->dev, "wakeup irq %d\n", db->irq_wake);
1410
1411 ret = request_irq(db->irq_wake, dm9000_wol_interrupt,
1412 IRQF_SHARED, dev_name(db->dev), ndev);
1413 if (ret) {
1414 dev_err(db->dev, "cannot get wakeup irq (%d)\n", ret);
1415 } else {
1416
1417
1418 ret = irq_set_irq_wake(db->irq_wake, 1);
1419 if (ret) {
1420 dev_err(db->dev, "irq %d cannot set wakeup (%d)\n",
1421 db->irq_wake, ret);
1422 ret = 0;
1423 } else {
1424 irq_set_irq_wake(db->irq_wake, 0);
1425 db->wake_supported = 1;
1426 }
1427 }
1428 }
1429
1430 iosize = resource_size(db->addr_res);
1431 db->addr_req = request_mem_region(db->addr_res->start, iosize,
1432 pdev->name);
1433
1434 if (db->addr_req == NULL) {
1435 dev_err(db->dev, "cannot claim address reg area\n");
1436 ret = -EIO;
1437 goto out;
1438 }
1439
1440 db->io_addr = ioremap(db->addr_res->start, iosize);
1441
1442 if (db->io_addr == NULL) {
1443 dev_err(db->dev, "failed to ioremap address reg\n");
1444 ret = -EINVAL;
1445 goto out;
1446 }
1447
1448 iosize = resource_size(db->data_res);
1449 db->data_req = request_mem_region(db->data_res->start, iosize,
1450 pdev->name);
1451
1452 if (db->data_req == NULL) {
1453 dev_err(db->dev, "cannot claim data reg area\n");
1454 ret = -EIO;
1455 goto out;
1456 }
1457
1458 db->io_data = ioremap(db->data_res->start, iosize);
1459
1460 if (db->io_data == NULL) {
1461 dev_err(db->dev, "failed to ioremap data reg\n");
1462 ret = -EINVAL;
1463 goto out;
1464 }
1465
1466
1467 ndev->base_addr = (unsigned long)db->io_addr;
1468 ndev->irq = db->irq_res->start;
1469
1470
1471 dm9000_set_io(db, iosize);
1472
1473
1474 if (pdata != NULL) {
1475
1476
1477
1478 if (pdata->flags & DM9000_PLATF_8BITONLY)
1479 dm9000_set_io(db, 1);
1480
1481 if (pdata->flags & DM9000_PLATF_16BITONLY)
1482 dm9000_set_io(db, 2);
1483
1484 if (pdata->flags & DM9000_PLATF_32BITONLY)
1485 dm9000_set_io(db, 4);
1486
1487
1488
1489
1490 if (pdata->inblk != NULL)
1491 db->inblk = pdata->inblk;
1492
1493 if (pdata->outblk != NULL)
1494 db->outblk = pdata->outblk;
1495
1496 if (pdata->dumpblk != NULL)
1497 db->dumpblk = pdata->dumpblk;
1498
1499 db->flags = pdata->flags;
1500 }
1501
1502#ifdef CONFIG_DM9000_FORCE_SIMPLE_PHY_POLL
1503 db->flags |= DM9000_PLATF_SIMPLE_PHY;
1504#endif
1505
1506
1507
1508
1509
1510
1511 iow(db, DM9000_NCR, NCR_MAC_LBK | NCR_RST);
1512
1513
1514 for (i = 0; i < 8; i++) {
1515 id_val = ior(db, DM9000_VIDL);
1516 id_val |= (u32)ior(db, DM9000_VIDH) << 8;
1517 id_val |= (u32)ior(db, DM9000_PIDL) << 16;
1518 id_val |= (u32)ior(db, DM9000_PIDH) << 24;
1519
1520 if (id_val == DM9000_ID)
1521 break;
1522 dev_err(db->dev, "read wrong id 0x%08x\n", id_val);
1523 }
1524
1525 if (id_val != DM9000_ID) {
1526 dev_err(db->dev, "wrong id: 0x%08x\n", id_val);
1527 ret = -ENODEV;
1528 goto out;
1529 }
1530
1531
1532
1533 id_val = ior(db, DM9000_CHIPR);
1534 dev_dbg(db->dev, "dm9000 revision 0x%02x\n", id_val);
1535
1536 switch (id_val) {
1537 case CHIPR_DM9000A:
1538 db->type = TYPE_DM9000A;
1539 break;
1540 case CHIPR_DM9000B:
1541 db->type = TYPE_DM9000B;
1542 break;
1543 default:
1544 dev_dbg(db->dev, "ID %02x => defaulting to DM9000E\n", id_val);
1545 db->type = TYPE_DM9000E;
1546 }
1547
1548
1549 if (db->type == TYPE_DM9000A || db->type == TYPE_DM9000B) {
1550 ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM;
1551 ndev->features |= ndev->hw_features;
1552 }
1553
1554
1555
1556
1557 ether_setup(ndev);
1558
1559 ndev->netdev_ops = &dm9000_netdev_ops;
1560 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
1561 ndev->ethtool_ops = &dm9000_ethtool_ops;
1562
1563 db->msg_enable = NETIF_MSG_LINK;
1564 db->mii.phy_id_mask = 0x1f;
1565 db->mii.reg_num_mask = 0x1f;
1566 db->mii.force_media = 0;
1567 db->mii.full_duplex = 0;
1568 db->mii.dev = ndev;
1569 db->mii.mdio_read = dm9000_phy_read;
1570 db->mii.mdio_write = dm9000_phy_write;
1571
1572 mac_src = "eeprom";
1573
1574
1575 for (i = 0; i < 6; i += 2)
1576 dm9000_read_eeprom(db, i / 2, ndev->dev_addr+i);
1577
1578 if (!is_valid_ether_addr(ndev->dev_addr) && pdata != NULL) {
1579 mac_src = "platform data";
1580 memcpy(ndev->dev_addr, pdata->dev_addr, ETH_ALEN);
1581 }
1582
1583 if (!is_valid_ether_addr(ndev->dev_addr)) {
1584
1585
1586 mac_src = "chip";
1587 for (i = 0; i < 6; i++)
1588 ndev->dev_addr[i] = ior(db, i+DM9000_PAR);
1589 }
1590
1591 if (!is_valid_ether_addr(ndev->dev_addr)) {
1592 dev_warn(db->dev, "%s: Invalid ethernet MAC address. Please "
1593 "set using ifconfig\n", ndev->name);
1594
1595 eth_hw_addr_random(ndev);
1596 mac_src = "random";
1597 }
1598
1599
1600 platform_set_drvdata(pdev, ndev);
1601 ret = register_netdev(ndev);
1602
1603 if (ret == 0)
1604 printk(KERN_INFO "%s: dm9000%c at %p,%p IRQ %d MAC: %pM (%s)\n",
1605 ndev->name, dm9000_type_to_char(db->type),
1606 db->io_addr, db->io_data, ndev->irq,
1607 ndev->dev_addr, mac_src);
1608 return 0;
1609
1610out:
1611 dev_err(db->dev, "not found (%d).\n", ret);
1612
1613 dm9000_release_board(pdev, db);
1614 free_netdev(ndev);
1615
1616 return ret;
1617}
1618
1619static int
1620dm9000_drv_suspend(struct device *dev)
1621{
1622 struct platform_device *pdev = to_platform_device(dev);
1623 struct net_device *ndev = platform_get_drvdata(pdev);
1624 board_info_t *db;
1625
1626 if (ndev) {
1627 db = netdev_priv(ndev);
1628 db->in_suspend = 1;
1629
1630 if (!netif_running(ndev))
1631 return 0;
1632
1633 netif_device_detach(ndev);
1634
1635
1636 if (!db->wake_state)
1637 dm9000_shutdown(ndev);
1638 }
1639 return 0;
1640}
1641
1642static int
1643dm9000_drv_resume(struct device *dev)
1644{
1645 struct platform_device *pdev = to_platform_device(dev);
1646 struct net_device *ndev = platform_get_drvdata(pdev);
1647 board_info_t *db = netdev_priv(ndev);
1648
1649 if (ndev) {
1650 if (netif_running(ndev)) {
1651
1652
1653 if (!db->wake_state) {
1654 dm9000_reset(db);
1655 dm9000_init_dm9000(ndev);
1656 }
1657
1658 netif_device_attach(ndev);
1659 }
1660
1661 db->in_suspend = 0;
1662 }
1663 return 0;
1664}
1665
1666static const struct dev_pm_ops dm9000_drv_pm_ops = {
1667 .suspend = dm9000_drv_suspend,
1668 .resume = dm9000_drv_resume,
1669};
1670
1671static int
1672dm9000_drv_remove(struct platform_device *pdev)
1673{
1674 struct net_device *ndev = platform_get_drvdata(pdev);
1675
1676 platform_set_drvdata(pdev, NULL);
1677
1678 unregister_netdev(ndev);
1679 dm9000_release_board(pdev, netdev_priv(ndev));
1680 free_netdev(ndev);
1681
1682 dev_dbg(&pdev->dev, "released and freed device\n");
1683 return 0;
1684}
1685
1686static struct platform_driver dm9000_driver = {
1687 .driver = {
1688 .name = "dm9000",
1689 .owner = THIS_MODULE,
1690 .pm = &dm9000_drv_pm_ops,
1691 },
1692 .probe = dm9000_probe,
1693 .remove = dm9000_drv_remove,
1694};
1695
1696module_platform_driver(dm9000_driver);
1697
1698MODULE_AUTHOR("Sascha Hauer, Ben Dooks");
1699MODULE_DESCRIPTION("Davicom DM9000 network driver");
1700MODULE_LICENSE("GPL");
1701MODULE_ALIAS("platform:dm9000");
1702