1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#include <linux/module.h>
23#include <linux/ioport.h>
24#include <linux/netdevice.h>
25#include <linux/etherdevice.h>
26#include <linux/init.h>
27#include <linux/skbuff.h>
28#include <linux/spinlock.h>
29#include <linux/crc32.h>
30#include <linux/mii.h>
31#include <linux/ethtool.h>
32#include <linux/dm9000.h>
33#include <linux/delay.h>
34#include <linux/platform_device.h>
35#include <linux/irq.h>
36#include <linux/slab.h>
37
38#include <asm/delay.h>
39#include <asm/irq.h>
40#include <asm/io.h>
41
42#include "dm9000.h"
43
44
45
46#define DM9000_PHY 0x40
47
48#define CARDNAME "dm9000"
49#define DRV_VERSION "1.31"
50
51
52
53
54static int watchdog = 5000;
55module_param(watchdog, int, 0400);
56MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds");
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80enum dm9000_type {
81 TYPE_DM9000E,
82 TYPE_DM9000A,
83 TYPE_DM9000B
84};
85
86
87typedef struct board_info {
88
89 void __iomem *io_addr;
90 void __iomem *io_data;
91 u16 irq;
92
93 u16 tx_pkt_cnt;
94 u16 queue_pkt_len;
95 u16 queue_start_addr;
96 u16 queue_ip_summed;
97 u16 dbug_cnt;
98 u8 io_mode;
99 u8 phy_addr;
100 u8 imr_all;
101
102 unsigned int flags;
103 unsigned int in_suspend :1;
104 unsigned int wake_supported :1;
105 int debug_level;
106
107 enum dm9000_type type;
108
109 void (*inblk)(void __iomem *port, void *data, int length);
110 void (*outblk)(void __iomem *port, void *data, int length);
111 void (*dumpblk)(void __iomem *port, int length);
112
113 struct device *dev;
114
115 struct resource *addr_res;
116 struct resource *data_res;
117 struct resource *addr_req;
118 struct resource *data_req;
119 struct resource *irq_res;
120
121 int irq_wake;
122
123 struct mutex addr_lock;
124
125 struct delayed_work phy_poll;
126 struct net_device *ndev;
127
128 spinlock_t lock;
129
130 struct mii_if_info mii;
131 u32 msg_enable;
132 u32 wake_state;
133
134 int rx_csum;
135 int can_csum;
136 int ip_summed;
137} board_info_t;
138
139
140
141#define dm9000_dbg(db, lev, msg...) do { \
142 if ((lev) < CONFIG_DM9000_DEBUGLEVEL && \
143 (lev) < db->debug_level) { \
144 dev_dbg(db->dev, msg); \
145 } \
146} while (0)
147
148static inline board_info_t *to_dm9000_board(struct net_device *dev)
149{
150 return netdev_priv(dev);
151}
152
153
154
155static void
156dm9000_reset(board_info_t * db)
157{
158 dev_dbg(db->dev, "resetting device\n");
159
160
161 writeb(DM9000_NCR, db->io_addr);
162 udelay(200);
163 writeb(NCR_RST, db->io_data);
164 udelay(200);
165}
166
167
168
169
170static u8
171ior(board_info_t * db, int reg)
172{
173 writeb(reg, db->io_addr);
174 return readb(db->io_data);
175}
176
177
178
179
180
181static void
182iow(board_info_t * db, int reg, int value)
183{
184 writeb(reg, db->io_addr);
185 writeb(value, db->io_data);
186}
187
188
189
190static void dm9000_outblk_8bit(void __iomem *reg, void *data, int count)
191{
192 writesb(reg, data, count);
193}
194
195static void dm9000_outblk_16bit(void __iomem *reg, void *data, int count)
196{
197 writesw(reg, data, (count+1) >> 1);
198}
199
200static void dm9000_outblk_32bit(void __iomem *reg, void *data, int count)
201{
202 writesl(reg, data, (count+3) >> 2);
203}
204
205
206
207static void dm9000_inblk_8bit(void __iomem *reg, void *data, int count)
208{
209 readsb(reg, data, count);
210}
211
212
213static void dm9000_inblk_16bit(void __iomem *reg, void *data, int count)
214{
215 readsw(reg, data, (count+1) >> 1);
216}
217
218static void dm9000_inblk_32bit(void __iomem *reg, void *data, int count)
219{
220 readsl(reg, data, (count+3) >> 2);
221}
222
223
224
225static void dm9000_dumpblk_8bit(void __iomem *reg, int count)
226{
227 int i;
228 int tmp;
229
230 for (i = 0; i < count; i++)
231 tmp = readb(reg);
232}
233
234static void dm9000_dumpblk_16bit(void __iomem *reg, int count)
235{
236 int i;
237 int tmp;
238
239 count = (count + 1) >> 1;
240
241 for (i = 0; i < count; i++)
242 tmp = readw(reg);
243}
244
245static void dm9000_dumpblk_32bit(void __iomem *reg, int count)
246{
247 int i;
248 int tmp;
249
250 count = (count + 3) >> 2;
251
252 for (i = 0; i < count; i++)
253 tmp = readl(reg);
254}
255
256
257
258
259
260
261
262static void dm9000_set_io(struct board_info *db, int byte_width)
263{
264
265
266
267
268 switch (byte_width) {
269 case 1:
270 db->dumpblk = dm9000_dumpblk_8bit;
271 db->outblk = dm9000_outblk_8bit;
272 db->inblk = dm9000_inblk_8bit;
273 break;
274
275
276 case 3:
277 dev_dbg(db->dev, ": 3 byte IO, falling back to 16bit\n");
278 case 2:
279 db->dumpblk = dm9000_dumpblk_16bit;
280 db->outblk = dm9000_outblk_16bit;
281 db->inblk = dm9000_inblk_16bit;
282 break;
283
284 case 4:
285 default:
286 db->dumpblk = dm9000_dumpblk_32bit;
287 db->outblk = dm9000_outblk_32bit;
288 db->inblk = dm9000_inblk_32bit;
289 break;
290 }
291}
292
293static void dm9000_schedule_poll(board_info_t *db)
294{
295 if (db->type == TYPE_DM9000E)
296 schedule_delayed_work(&db->phy_poll, HZ * 2);
297}
298
299static int dm9000_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
300{
301 board_info_t *dm = to_dm9000_board(dev);
302
303 if (!netif_running(dev))
304 return -EINVAL;
305
306 return generic_mii_ioctl(&dm->mii, if_mii(req), cmd, NULL);
307}
308
309static unsigned int
310dm9000_read_locked(board_info_t *db, int reg)
311{
312 unsigned long flags;
313 unsigned int ret;
314
315 spin_lock_irqsave(&db->lock, flags);
316 ret = ior(db, reg);
317 spin_unlock_irqrestore(&db->lock, flags);
318
319 return ret;
320}
321
322static int dm9000_wait_eeprom(board_info_t *db)
323{
324 unsigned int status;
325 int timeout = 8;
326
327
328
329
330
331
332
333
334
335
336
337
338 while (1) {
339 status = dm9000_read_locked(db, DM9000_EPCR);
340
341 if ((status & EPCR_ERRE) == 0)
342 break;
343
344 msleep(1);
345
346 if (timeout-- < 0) {
347 dev_dbg(db->dev, "timeout waiting EEPROM\n");
348 break;
349 }
350 }
351
352 return 0;
353}
354
355
356
357
358static void
359dm9000_read_eeprom(board_info_t *db, int offset, u8 *to)
360{
361 unsigned long flags;
362
363 if (db->flags & DM9000_PLATF_NO_EEPROM) {
364 to[0] = 0xff;
365 to[1] = 0xff;
366 return;
367 }
368
369 mutex_lock(&db->addr_lock);
370
371 spin_lock_irqsave(&db->lock, flags);
372
373 iow(db, DM9000_EPAR, offset);
374 iow(db, DM9000_EPCR, EPCR_ERPRR);
375
376 spin_unlock_irqrestore(&db->lock, flags);
377
378 dm9000_wait_eeprom(db);
379
380
381 msleep(1);
382
383 spin_lock_irqsave(&db->lock, flags);
384
385 iow(db, DM9000_EPCR, 0x0);
386
387 to[0] = ior(db, DM9000_EPDRL);
388 to[1] = ior(db, DM9000_EPDRH);
389
390 spin_unlock_irqrestore(&db->lock, flags);
391
392 mutex_unlock(&db->addr_lock);
393}
394
395
396
397
398static void
399dm9000_write_eeprom(board_info_t *db, int offset, u8 *data)
400{
401 unsigned long flags;
402
403 if (db->flags & DM9000_PLATF_NO_EEPROM)
404 return;
405
406 mutex_lock(&db->addr_lock);
407
408 spin_lock_irqsave(&db->lock, flags);
409 iow(db, DM9000_EPAR, offset);
410 iow(db, DM9000_EPDRH, data[1]);
411 iow(db, DM9000_EPDRL, data[0]);
412 iow(db, DM9000_EPCR, EPCR_WEP | EPCR_ERPRW);
413 spin_unlock_irqrestore(&db->lock, flags);
414
415 dm9000_wait_eeprom(db);
416
417 mdelay(1);
418
419 spin_lock_irqsave(&db->lock, flags);
420 iow(db, DM9000_EPCR, 0);
421 spin_unlock_irqrestore(&db->lock, flags);
422
423 mutex_unlock(&db->addr_lock);
424}
425
426
427
428static void dm9000_get_drvinfo(struct net_device *dev,
429 struct ethtool_drvinfo *info)
430{
431 board_info_t *dm = to_dm9000_board(dev);
432
433 strcpy(info->driver, CARDNAME);
434 strcpy(info->version, DRV_VERSION);
435 strcpy(info->bus_info, to_platform_device(dm->dev)->name);
436}
437
438static u32 dm9000_get_msglevel(struct net_device *dev)
439{
440 board_info_t *dm = to_dm9000_board(dev);
441
442 return dm->msg_enable;
443}
444
445static void dm9000_set_msglevel(struct net_device *dev, u32 value)
446{
447 board_info_t *dm = to_dm9000_board(dev);
448
449 dm->msg_enable = value;
450}
451
452static int dm9000_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
453{
454 board_info_t *dm = to_dm9000_board(dev);
455
456 mii_ethtool_gset(&dm->mii, cmd);
457 return 0;
458}
459
460static int dm9000_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
461{
462 board_info_t *dm = to_dm9000_board(dev);
463
464 return mii_ethtool_sset(&dm->mii, cmd);
465}
466
467static int dm9000_nway_reset(struct net_device *dev)
468{
469 board_info_t *dm = to_dm9000_board(dev);
470 return mii_nway_restart(&dm->mii);
471}
472
473static uint32_t dm9000_get_rx_csum(struct net_device *dev)
474{
475 board_info_t *dm = to_dm9000_board(dev);
476 return dm->rx_csum;
477}
478
479static int dm9000_set_rx_csum_unlocked(struct net_device *dev, uint32_t data)
480{
481 board_info_t *dm = to_dm9000_board(dev);
482
483 if (dm->can_csum) {
484 dm->rx_csum = data;
485 iow(dm, DM9000_RCSR, dm->rx_csum ? RCSR_CSUM : 0);
486
487 return 0;
488 }
489
490 return -EOPNOTSUPP;
491}
492
493static int dm9000_set_rx_csum(struct net_device *dev, uint32_t data)
494{
495 board_info_t *dm = to_dm9000_board(dev);
496 unsigned long flags;
497 int ret;
498
499 spin_lock_irqsave(&dm->lock, flags);
500 ret = dm9000_set_rx_csum_unlocked(dev, data);
501 spin_unlock_irqrestore(&dm->lock, flags);
502
503 return ret;
504}
505
506static int dm9000_set_tx_csum(struct net_device *dev, uint32_t data)
507{
508 board_info_t *dm = to_dm9000_board(dev);
509 int ret = -EOPNOTSUPP;
510
511 if (dm->can_csum)
512 ret = ethtool_op_set_tx_csum(dev, data);
513 return ret;
514}
515
516static u32 dm9000_get_link(struct net_device *dev)
517{
518 board_info_t *dm = to_dm9000_board(dev);
519 u32 ret;
520
521 if (dm->flags & DM9000_PLATF_EXT_PHY)
522 ret = mii_link_ok(&dm->mii);
523 else
524 ret = dm9000_read_locked(dm, DM9000_NSR) & NSR_LINKST ? 1 : 0;
525
526 return ret;
527}
528
529#define DM_EEPROM_MAGIC (0x444D394B)
530
531static int dm9000_get_eeprom_len(struct net_device *dev)
532{
533 return 128;
534}
535
536static int dm9000_get_eeprom(struct net_device *dev,
537 struct ethtool_eeprom *ee, u8 *data)
538{
539 board_info_t *dm = to_dm9000_board(dev);
540 int offset = ee->offset;
541 int len = ee->len;
542 int i;
543
544
545
546 if ((len & 1) != 0 || (offset & 1) != 0)
547 return -EINVAL;
548
549 if (dm->flags & DM9000_PLATF_NO_EEPROM)
550 return -ENOENT;
551
552 ee->magic = DM_EEPROM_MAGIC;
553
554 for (i = 0; i < len; i += 2)
555 dm9000_read_eeprom(dm, (offset + i) / 2, data + i);
556
557 return 0;
558}
559
560static int dm9000_set_eeprom(struct net_device *dev,
561 struct ethtool_eeprom *ee, u8 *data)
562{
563 board_info_t *dm = to_dm9000_board(dev);
564 int offset = ee->offset;
565 int len = ee->len;
566 int i;
567
568
569
570 if ((len & 1) != 0 || (offset & 1) != 0)
571 return -EINVAL;
572
573 if (dm->flags & DM9000_PLATF_NO_EEPROM)
574 return -ENOENT;
575
576 if (ee->magic != DM_EEPROM_MAGIC)
577 return -EINVAL;
578
579 for (i = 0; i < len; i += 2)
580 dm9000_write_eeprom(dm, (offset + i) / 2, data + i);
581
582 return 0;
583}
584
585static void dm9000_get_wol(struct net_device *dev, struct ethtool_wolinfo *w)
586{
587 board_info_t *dm = to_dm9000_board(dev);
588
589 memset(w, 0, sizeof(struct ethtool_wolinfo));
590
591
592 w->supported = dm->wake_supported ? WAKE_MAGIC : 0;
593 w->wolopts = dm->wake_state;
594}
595
596static int dm9000_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
597{
598 board_info_t *dm = to_dm9000_board(dev);
599 unsigned long flags;
600 u32 opts = w->wolopts;
601 u32 wcr = 0;
602
603 if (!dm->wake_supported)
604 return -EOPNOTSUPP;
605
606 if (opts & ~WAKE_MAGIC)
607 return -EINVAL;
608
609 if (opts & WAKE_MAGIC)
610 wcr |= WCR_MAGICEN;
611
612 mutex_lock(&dm->addr_lock);
613
614 spin_lock_irqsave(&dm->lock, flags);
615 iow(dm, DM9000_WCR, wcr);
616 spin_unlock_irqrestore(&dm->lock, flags);
617
618 mutex_unlock(&dm->addr_lock);
619
620 if (dm->wake_state != opts) {
621
622
623 if (!dm->wake_state)
624 irq_set_irq_wake(dm->irq_wake, 1);
625 else if (dm->wake_state & !opts)
626 irq_set_irq_wake(dm->irq_wake, 0);
627 }
628
629 dm->wake_state = opts;
630 return 0;
631}
632
633static const struct ethtool_ops dm9000_ethtool_ops = {
634 .get_drvinfo = dm9000_get_drvinfo,
635 .get_settings = dm9000_get_settings,
636 .set_settings = dm9000_set_settings,
637 .get_msglevel = dm9000_get_msglevel,
638 .set_msglevel = dm9000_set_msglevel,
639 .nway_reset = dm9000_nway_reset,
640 .get_link = dm9000_get_link,
641 .get_wol = dm9000_get_wol,
642 .set_wol = dm9000_set_wol,
643 .get_eeprom_len = dm9000_get_eeprom_len,
644 .get_eeprom = dm9000_get_eeprom,
645 .set_eeprom = dm9000_set_eeprom,
646 .get_rx_csum = dm9000_get_rx_csum,
647 .set_rx_csum = dm9000_set_rx_csum,
648 .get_tx_csum = ethtool_op_get_tx_csum,
649 .set_tx_csum = dm9000_set_tx_csum,
650};
651
652static void dm9000_show_carrier(board_info_t *db,
653 unsigned carrier, unsigned nsr)
654{
655 struct net_device *ndev = db->ndev;
656 unsigned ncr = dm9000_read_locked(db, DM9000_NCR);
657
658 if (carrier)
659 dev_info(db->dev, "%s: link up, %dMbps, %s-duplex, no LPA\n",
660 ndev->name, (nsr & NSR_SPEED) ? 10 : 100,
661 (ncr & NCR_FDX) ? "full" : "half");
662 else
663 dev_info(db->dev, "%s: link down\n", ndev->name);
664}
665
666static void
667dm9000_poll_work(struct work_struct *w)
668{
669 struct delayed_work *dw = to_delayed_work(w);
670 board_info_t *db = container_of(dw, board_info_t, phy_poll);
671 struct net_device *ndev = db->ndev;
672
673 if (db->flags & DM9000_PLATF_SIMPLE_PHY &&
674 !(db->flags & DM9000_PLATF_EXT_PHY)) {
675 unsigned nsr = dm9000_read_locked(db, DM9000_NSR);
676 unsigned old_carrier = netif_carrier_ok(ndev) ? 1 : 0;
677 unsigned new_carrier;
678
679 new_carrier = (nsr & NSR_LINKST) ? 1 : 0;
680
681 if (old_carrier != new_carrier) {
682 if (netif_msg_link(db))
683 dm9000_show_carrier(db, new_carrier, nsr);
684
685 if (!new_carrier)
686 netif_carrier_off(ndev);
687 else
688 netif_carrier_on(ndev);
689 }
690 } else
691 mii_check_media(&db->mii, netif_msg_link(db), 0);
692
693 if (netif_running(ndev))
694 dm9000_schedule_poll(db);
695}
696
697
698
699
700
701
702static void
703dm9000_release_board(struct platform_device *pdev, struct board_info *db)
704{
705
706
707 iounmap(db->io_addr);
708 iounmap(db->io_data);
709
710
711
712 release_resource(db->data_req);
713 kfree(db->data_req);
714
715 release_resource(db->addr_req);
716 kfree(db->addr_req);
717}
718
719static unsigned char dm9000_type_to_char(enum dm9000_type type)
720{
721 switch (type) {
722 case TYPE_DM9000E: return 'e';
723 case TYPE_DM9000A: return 'a';
724 case TYPE_DM9000B: return 'b';
725 }
726
727 return '?';
728}
729
730
731
732
733static void
734dm9000_hash_table_unlocked(struct net_device *dev)
735{
736 board_info_t *db = netdev_priv(dev);
737 struct netdev_hw_addr *ha;
738 int i, oft;
739 u32 hash_val;
740 u16 hash_table[4];
741 u8 rcr = RCR_DIS_LONG | RCR_DIS_CRC | RCR_RXEN;
742
743 dm9000_dbg(db, 1, "entering %s\n", __func__);
744
745 for (i = 0, oft = DM9000_PAR; i < 6; i++, oft++)
746 iow(db, oft, dev->dev_addr[i]);
747
748
749 for (i = 0; i < 4; i++)
750 hash_table[i] = 0x0;
751
752
753 hash_table[3] = 0x8000;
754
755 if (dev->flags & IFF_PROMISC)
756 rcr |= RCR_PRMSC;
757
758 if (dev->flags & IFF_ALLMULTI)
759 rcr |= RCR_ALL;
760
761
762 netdev_for_each_mc_addr(ha, dev) {
763 hash_val = ether_crc_le(6, ha->addr) & 0x3f;
764 hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16);
765 }
766
767
768 for (i = 0, oft = DM9000_MAR; i < 4; i++) {
769 iow(db, oft++, hash_table[i]);
770 iow(db, oft++, hash_table[i] >> 8);
771 }
772
773 iow(db, DM9000_RCR, rcr);
774}
775
776static void
777dm9000_hash_table(struct net_device *dev)
778{
779 board_info_t *db = netdev_priv(dev);
780 unsigned long flags;
781
782 spin_lock_irqsave(&db->lock, flags);
783 dm9000_hash_table_unlocked(dev);
784 spin_unlock_irqrestore(&db->lock, flags);
785}
786
787
788
789
790static void
791dm9000_init_dm9000(struct net_device *dev)
792{
793 board_info_t *db = netdev_priv(dev);
794 unsigned int imr;
795 unsigned int ncr;
796
797 dm9000_dbg(db, 1, "entering %s\n", __func__);
798
799
800 db->io_mode = ior(db, DM9000_ISR) >> 6;
801
802
803 dm9000_set_rx_csum_unlocked(dev, db->rx_csum);
804
805 iow(db, DM9000_GPCR, GPCR_GEP_CNTL);
806
807 ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0;
808
809
810
811
812 if (db->wake_supported)
813 ncr |= NCR_WAKEEN;
814
815 iow(db, DM9000_NCR, ncr);
816
817
818 iow(db, DM9000_TCR, 0);
819 iow(db, DM9000_BPTR, 0x3f);
820 iow(db, DM9000_FCR, 0xff);
821 iow(db, DM9000_SMCR, 0);
822
823 iow(db, DM9000_NSR, NSR_WAKEST | NSR_TX2END | NSR_TX1END);
824 iow(db, DM9000_ISR, ISR_CLR_STATUS);
825
826
827 dm9000_hash_table_unlocked(dev);
828
829 imr = IMR_PAR | IMR_PTM | IMR_PRM;
830 if (db->type != TYPE_DM9000E)
831 imr |= IMR_LNKCHNG;
832
833 db->imr_all = imr;
834
835
836 iow(db, DM9000_IMR, imr);
837
838
839 db->tx_pkt_cnt = 0;
840 db->queue_pkt_len = 0;
841 dev->trans_start = jiffies;
842}
843
844
845static void dm9000_timeout(struct net_device *dev)
846{
847 board_info_t *db = netdev_priv(dev);
848 u8 reg_save;
849 unsigned long flags;
850
851
852 spin_lock_irqsave(&db->lock, flags);
853 reg_save = readb(db->io_addr);
854
855 netif_stop_queue(dev);
856 dm9000_reset(db);
857 dm9000_init_dm9000(dev);
858
859 dev->trans_start = jiffies;
860 netif_wake_queue(dev);
861
862
863 writeb(reg_save, db->io_addr);
864 spin_unlock_irqrestore(&db->lock, flags);
865}
866
867static void dm9000_send_packet(struct net_device *dev,
868 int ip_summed,
869 u16 pkt_len)
870{
871 board_info_t *dm = to_dm9000_board(dev);
872
873
874 if (dm->ip_summed != ip_summed) {
875 if (ip_summed == CHECKSUM_NONE)
876 iow(dm, DM9000_TCCR, 0);
877 else
878 iow(dm, DM9000_TCCR, TCCR_IP | TCCR_UDP | TCCR_TCP);
879 dm->ip_summed = ip_summed;
880 }
881
882
883 iow(dm, DM9000_TXPLL, pkt_len);
884 iow(dm, DM9000_TXPLH, pkt_len >> 8);
885
886
887 iow(dm, DM9000_TCR, TCR_TXREQ);
888}
889
890
891
892
893
894static int
895dm9000_start_xmit(struct sk_buff *skb, struct net_device *dev)
896{
897 unsigned long flags;
898 board_info_t *db = netdev_priv(dev);
899
900 dm9000_dbg(db, 3, "%s:\n", __func__);
901
902 if (db->tx_pkt_cnt > 1)
903 return NETDEV_TX_BUSY;
904
905 spin_lock_irqsave(&db->lock, flags);
906
907
908 writeb(DM9000_MWCMD, db->io_addr);
909
910 (db->outblk)(db->io_data, skb->data, skb->len);
911 dev->stats.tx_bytes += skb->len;
912
913 db->tx_pkt_cnt++;
914
915 if (db->tx_pkt_cnt == 1) {
916 dm9000_send_packet(dev, skb->ip_summed, skb->len);
917 } else {
918
919 db->queue_pkt_len = skb->len;
920 db->queue_ip_summed = skb->ip_summed;
921 netif_stop_queue(dev);
922 }
923
924 spin_unlock_irqrestore(&db->lock, flags);
925
926
927 dev_kfree_skb(skb);
928
929 return NETDEV_TX_OK;
930}
931
932
933
934
935
936
937static void dm9000_tx_done(struct net_device *dev, board_info_t *db)
938{
939 int tx_status = ior(db, DM9000_NSR);
940
941 if (tx_status & (NSR_TX2END | NSR_TX1END)) {
942
943 db->tx_pkt_cnt--;
944 dev->stats.tx_packets++;
945
946 if (netif_msg_tx_done(db))
947 dev_dbg(db->dev, "tx done, NSR %02x\n", tx_status);
948
949
950 if (db->tx_pkt_cnt > 0)
951 dm9000_send_packet(dev, db->queue_ip_summed,
952 db->queue_pkt_len);
953 netif_wake_queue(dev);
954 }
955}
956
957struct dm9000_rxhdr {
958 u8 RxPktReady;
959 u8 RxStatus;
960 __le16 RxLen;
961} __packed;
962
963
964
965
966static void
967dm9000_rx(struct net_device *dev)
968{
969 board_info_t *db = netdev_priv(dev);
970 struct dm9000_rxhdr rxhdr;
971 struct sk_buff *skb;
972 u8 rxbyte, *rdptr;
973 bool GoodPacket;
974 int RxLen;
975
976
977 do {
978 ior(db, DM9000_MRCMDX);
979
980
981 rxbyte = readb(db->io_data);
982
983
984 if (rxbyte & DM9000_PKT_ERR) {
985 dev_warn(db->dev, "status check fail: %d\n", rxbyte);
986 iow(db, DM9000_RCR, 0x00);
987 iow(db, DM9000_ISR, IMR_PAR);
988 return;
989 }
990
991 if (!(rxbyte & DM9000_PKT_RDY))
992 return;
993
994
995 GoodPacket = true;
996 writeb(DM9000_MRCMD, db->io_addr);
997
998 (db->inblk)(db->io_data, &rxhdr, sizeof(rxhdr));
999
1000 RxLen = le16_to_cpu(rxhdr.RxLen);
1001
1002 if (netif_msg_rx_status(db))
1003 dev_dbg(db->dev, "RX: status %02x, length %04x\n",
1004 rxhdr.RxStatus, RxLen);
1005
1006
1007 if (RxLen < 0x40) {
1008 GoodPacket = false;
1009 if (netif_msg_rx_err(db))
1010 dev_dbg(db->dev, "RX: Bad Packet (runt)\n");
1011 }
1012
1013 if (RxLen > DM9000_PKT_MAX) {
1014 dev_dbg(db->dev, "RST: RX Len:%x\n", RxLen);
1015 }
1016
1017
1018 if (rxhdr.RxStatus & (RSR_FOE | RSR_CE | RSR_AE |
1019 RSR_PLE | RSR_RWTO |
1020 RSR_LCS | RSR_RF)) {
1021 GoodPacket = false;
1022 if (rxhdr.RxStatus & RSR_FOE) {
1023 if (netif_msg_rx_err(db))
1024 dev_dbg(db->dev, "fifo error\n");
1025 dev->stats.rx_fifo_errors++;
1026 }
1027 if (rxhdr.RxStatus & RSR_CE) {
1028 if (netif_msg_rx_err(db))
1029 dev_dbg(db->dev, "crc error\n");
1030 dev->stats.rx_crc_errors++;
1031 }
1032 if (rxhdr.RxStatus & RSR_RF) {
1033 if (netif_msg_rx_err(db))
1034 dev_dbg(db->dev, "length error\n");
1035 dev->stats.rx_length_errors++;
1036 }
1037 }
1038
1039
1040 if (GoodPacket &&
1041 ((skb = dev_alloc_skb(RxLen + 4)) != NULL)) {
1042 skb_reserve(skb, 2);
1043 rdptr = (u8 *) skb_put(skb, RxLen - 4);
1044
1045
1046
1047 (db->inblk)(db->io_data, rdptr, RxLen);
1048 dev->stats.rx_bytes += RxLen;
1049
1050
1051 skb->protocol = eth_type_trans(skb, dev);
1052 if (db->rx_csum) {
1053 if ((((rxbyte & 0x1c) << 3) & rxbyte) == 0)
1054 skb->ip_summed = CHECKSUM_UNNECESSARY;
1055 else
1056 skb_checksum_none_assert(skb);
1057 }
1058 netif_rx(skb);
1059 dev->stats.rx_packets++;
1060
1061 } else {
1062
1063
1064 (db->dumpblk)(db->io_data, RxLen);
1065 }
1066 } while (rxbyte & DM9000_PKT_RDY);
1067}
1068
1069static irqreturn_t dm9000_interrupt(int irq, void *dev_id)
1070{
1071 struct net_device *dev = dev_id;
1072 board_info_t *db = netdev_priv(dev);
1073 int int_status;
1074 unsigned long flags;
1075 u8 reg_save;
1076
1077 dm9000_dbg(db, 3, "entering %s\n", __func__);
1078
1079
1080
1081
1082 spin_lock_irqsave(&db->lock, flags);
1083
1084
1085 reg_save = readb(db->io_addr);
1086
1087
1088 iow(db, DM9000_IMR, IMR_PAR);
1089
1090
1091 int_status = ior(db, DM9000_ISR);
1092 iow(db, DM9000_ISR, int_status);
1093
1094 if (netif_msg_intr(db))
1095 dev_dbg(db->dev, "interrupt status %02x\n", int_status);
1096
1097
1098 if (int_status & ISR_PRS)
1099 dm9000_rx(dev);
1100
1101
1102 if (int_status & ISR_PTS)
1103 dm9000_tx_done(dev, db);
1104
1105 if (db->type != TYPE_DM9000E) {
1106 if (int_status & ISR_LNKCHNG) {
1107
1108 schedule_delayed_work(&db->phy_poll, 1);
1109 }
1110 }
1111
1112
1113 iow(db, DM9000_IMR, db->imr_all);
1114
1115
1116 writeb(reg_save, db->io_addr);
1117
1118 spin_unlock_irqrestore(&db->lock, flags);
1119
1120 return IRQ_HANDLED;
1121}
1122
1123static irqreturn_t dm9000_wol_interrupt(int irq, void *dev_id)
1124{
1125 struct net_device *dev = dev_id;
1126 board_info_t *db = netdev_priv(dev);
1127 unsigned long flags;
1128 unsigned nsr, wcr;
1129
1130 spin_lock_irqsave(&db->lock, flags);
1131
1132 nsr = ior(db, DM9000_NSR);
1133 wcr = ior(db, DM9000_WCR);
1134
1135 dev_dbg(db->dev, "%s: NSR=0x%02x, WCR=0x%02x\n", __func__, nsr, wcr);
1136
1137 if (nsr & NSR_WAKEST) {
1138
1139 iow(db, DM9000_NSR, NSR_WAKEST);
1140
1141 if (wcr & WCR_LINKST)
1142 dev_info(db->dev, "wake by link status change\n");
1143 if (wcr & WCR_SAMPLEST)
1144 dev_info(db->dev, "wake by sample packet\n");
1145 if (wcr & WCR_MAGICST )
1146 dev_info(db->dev, "wake by magic packet\n");
1147 if (!(wcr & (WCR_LINKST | WCR_SAMPLEST | WCR_MAGICST)))
1148 dev_err(db->dev, "wake signalled with no reason? "
1149 "NSR=0x%02x, WSR=0x%02x\n", nsr, wcr);
1150
1151 }
1152
1153 spin_unlock_irqrestore(&db->lock, flags);
1154
1155 return (nsr & NSR_WAKEST) ? IRQ_HANDLED : IRQ_NONE;
1156}
1157
1158#ifdef CONFIG_NET_POLL_CONTROLLER
1159
1160
1161
1162static void dm9000_poll_controller(struct net_device *dev)
1163{
1164 disable_irq(dev->irq);
1165 dm9000_interrupt(dev->irq, dev);
1166 enable_irq(dev->irq);
1167}
1168#endif
1169
1170
1171
1172
1173
1174static int
1175dm9000_open(struct net_device *dev)
1176{
1177 board_info_t *db = netdev_priv(dev);
1178 unsigned long irqflags = db->irq_res->flags & IRQF_TRIGGER_MASK;
1179
1180 if (netif_msg_ifup(db))
1181 dev_dbg(db->dev, "enabling %s\n", dev->name);
1182
1183
1184
1185
1186 if (irqflags == IRQF_TRIGGER_NONE)
1187 dev_warn(db->dev, "WARNING: no IRQ resource flags set.\n");
1188
1189 irqflags |= IRQF_SHARED;
1190
1191 if (request_irq(dev->irq, dm9000_interrupt, irqflags, dev->name, dev))
1192 return -EAGAIN;
1193
1194
1195 iow(db, DM9000_GPR, 0);
1196 mdelay(1);
1197
1198
1199 dm9000_reset(db);
1200 dm9000_init_dm9000(dev);
1201
1202
1203 db->dbug_cnt = 0;
1204
1205 mii_check_media(&db->mii, netif_msg_link(db), 1);
1206 netif_start_queue(dev);
1207
1208 dm9000_schedule_poll(db);
1209
1210 return 0;
1211}
1212
1213
1214
1215
1216
1217static void dm9000_msleep(board_info_t *db, unsigned int ms)
1218{
1219 if (db->in_suspend)
1220 mdelay(ms);
1221 else
1222 msleep(ms);
1223}
1224
1225
1226
1227
1228static int
1229dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg)
1230{
1231 board_info_t *db = netdev_priv(dev);
1232 unsigned long flags;
1233 unsigned int reg_save;
1234 int ret;
1235
1236 mutex_lock(&db->addr_lock);
1237
1238 spin_lock_irqsave(&db->lock,flags);
1239
1240
1241 reg_save = readb(db->io_addr);
1242
1243
1244 iow(db, DM9000_EPAR, DM9000_PHY | reg);
1245
1246 iow(db, DM9000_EPCR, EPCR_ERPRR | EPCR_EPOS);
1247
1248 writeb(reg_save, db->io_addr);
1249 spin_unlock_irqrestore(&db->lock,flags);
1250
1251 dm9000_msleep(db, 1);
1252
1253 spin_lock_irqsave(&db->lock,flags);
1254 reg_save = readb(db->io_addr);
1255
1256 iow(db, DM9000_EPCR, 0x0);
1257
1258
1259 ret = (ior(db, DM9000_EPDRH) << 8) | ior(db, DM9000_EPDRL);
1260
1261
1262 writeb(reg_save, db->io_addr);
1263 spin_unlock_irqrestore(&db->lock,flags);
1264
1265 mutex_unlock(&db->addr_lock);
1266
1267 dm9000_dbg(db, 5, "phy_read[%02x] -> %04x\n", reg, ret);
1268 return ret;
1269}
1270
1271
1272
1273
1274static void
1275dm9000_phy_write(struct net_device *dev,
1276 int phyaddr_unused, int reg, int value)
1277{
1278 board_info_t *db = netdev_priv(dev);
1279 unsigned long flags;
1280 unsigned long reg_save;
1281
1282 dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value);
1283 mutex_lock(&db->addr_lock);
1284
1285 spin_lock_irqsave(&db->lock,flags);
1286
1287
1288 reg_save = readb(db->io_addr);
1289
1290
1291 iow(db, DM9000_EPAR, DM9000_PHY | reg);
1292
1293
1294 iow(db, DM9000_EPDRL, value);
1295 iow(db, DM9000_EPDRH, value >> 8);
1296
1297 iow(db, DM9000_EPCR, EPCR_EPOS | EPCR_ERPRW);
1298
1299 writeb(reg_save, db->io_addr);
1300 spin_unlock_irqrestore(&db->lock, flags);
1301
1302 dm9000_msleep(db, 1);
1303
1304 spin_lock_irqsave(&db->lock,flags);
1305 reg_save = readb(db->io_addr);
1306
1307 iow(db, DM9000_EPCR, 0x0);
1308
1309
1310 writeb(reg_save, db->io_addr);
1311
1312 spin_unlock_irqrestore(&db->lock, flags);
1313 mutex_unlock(&db->addr_lock);
1314}
1315
1316static void
1317dm9000_shutdown(struct net_device *dev)
1318{
1319 board_info_t *db = netdev_priv(dev);
1320
1321
1322 dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET);
1323 iow(db, DM9000_GPR, 0x01);
1324 iow(db, DM9000_IMR, IMR_PAR);
1325 iow(db, DM9000_RCR, 0x00);
1326}
1327
1328
1329
1330
1331
1332static int
1333dm9000_stop(struct net_device *ndev)
1334{
1335 board_info_t *db = netdev_priv(ndev);
1336
1337 if (netif_msg_ifdown(db))
1338 dev_dbg(db->dev, "shutting down %s\n", ndev->name);
1339
1340 cancel_delayed_work_sync(&db->phy_poll);
1341
1342 netif_stop_queue(ndev);
1343 netif_carrier_off(ndev);
1344
1345
1346 free_irq(ndev->irq, ndev);
1347
1348 dm9000_shutdown(ndev);
1349
1350 return 0;
1351}
1352
1353static const struct net_device_ops dm9000_netdev_ops = {
1354 .ndo_open = dm9000_open,
1355 .ndo_stop = dm9000_stop,
1356 .ndo_start_xmit = dm9000_start_xmit,
1357 .ndo_tx_timeout = dm9000_timeout,
1358 .ndo_set_multicast_list = dm9000_hash_table,
1359 .ndo_do_ioctl = dm9000_ioctl,
1360 .ndo_change_mtu = eth_change_mtu,
1361 .ndo_validate_addr = eth_validate_addr,
1362 .ndo_set_mac_address = eth_mac_addr,
1363#ifdef CONFIG_NET_POLL_CONTROLLER
1364 .ndo_poll_controller = dm9000_poll_controller,
1365#endif
1366};
1367
1368
1369
1370
1371static int __devinit
1372dm9000_probe(struct platform_device *pdev)
1373{
1374 struct dm9000_plat_data *pdata = pdev->dev.platform_data;
1375 struct board_info *db;
1376 struct net_device *ndev;
1377 const unsigned char *mac_src;
1378 int ret = 0;
1379 int iosize;
1380 int i;
1381 u32 id_val;
1382
1383
1384 ndev = alloc_etherdev(sizeof(struct board_info));
1385 if (!ndev) {
1386 dev_err(&pdev->dev, "could not allocate device.\n");
1387 return -ENOMEM;
1388 }
1389
1390 SET_NETDEV_DEV(ndev, &pdev->dev);
1391
1392 dev_dbg(&pdev->dev, "dm9000_probe()\n");
1393
1394
1395 db = netdev_priv(ndev);
1396
1397 db->dev = &pdev->dev;
1398 db->ndev = ndev;
1399
1400 spin_lock_init(&db->lock);
1401 mutex_init(&db->addr_lock);
1402
1403 INIT_DELAYED_WORK(&db->phy_poll, dm9000_poll_work);
1404
1405 db->addr_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1406 db->data_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1407 db->irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1408
1409 if (db->addr_res == NULL || db->data_res == NULL ||
1410 db->irq_res == NULL) {
1411 dev_err(db->dev, "insufficient resources\n");
1412 ret = -ENOENT;
1413 goto out;
1414 }
1415
1416 db->irq_wake = platform_get_irq(pdev, 1);
1417 if (db->irq_wake >= 0) {
1418 dev_dbg(db->dev, "wakeup irq %d\n", db->irq_wake);
1419
1420 ret = request_irq(db->irq_wake, dm9000_wol_interrupt,
1421 IRQF_SHARED, dev_name(db->dev), ndev);
1422 if (ret) {
1423 dev_err(db->dev, "cannot get wakeup irq (%d)\n", ret);
1424 } else {
1425
1426
1427 ret = irq_set_irq_wake(db->irq_wake, 1);
1428 if (ret) {
1429 dev_err(db->dev, "irq %d cannot set wakeup (%d)\n",
1430 db->irq_wake, ret);
1431 ret = 0;
1432 } else {
1433 irq_set_irq_wake(db->irq_wake, 0);
1434 db->wake_supported = 1;
1435 }
1436 }
1437 }
1438
1439 iosize = resource_size(db->addr_res);
1440 db->addr_req = request_mem_region(db->addr_res->start, iosize,
1441 pdev->name);
1442
1443 if (db->addr_req == NULL) {
1444 dev_err(db->dev, "cannot claim address reg area\n");
1445 ret = -EIO;
1446 goto out;
1447 }
1448
1449 db->io_addr = ioremap(db->addr_res->start, iosize);
1450
1451 if (db->io_addr == NULL) {
1452 dev_err(db->dev, "failed to ioremap address reg\n");
1453 ret = -EINVAL;
1454 goto out;
1455 }
1456
1457 iosize = resource_size(db->data_res);
1458 db->data_req = request_mem_region(db->data_res->start, iosize,
1459 pdev->name);
1460
1461 if (db->data_req == NULL) {
1462 dev_err(db->dev, "cannot claim data reg area\n");
1463 ret = -EIO;
1464 goto out;
1465 }
1466
1467 db->io_data = ioremap(db->data_res->start, iosize);
1468
1469 if (db->io_data == NULL) {
1470 dev_err(db->dev, "failed to ioremap data reg\n");
1471 ret = -EINVAL;
1472 goto out;
1473 }
1474
1475
1476 ndev->base_addr = (unsigned long)db->io_addr;
1477 ndev->irq = db->irq_res->start;
1478
1479
1480 dm9000_set_io(db, iosize);
1481
1482
1483 if (pdata != NULL) {
1484
1485
1486
1487 if (pdata->flags & DM9000_PLATF_8BITONLY)
1488 dm9000_set_io(db, 1);
1489
1490 if (pdata->flags & DM9000_PLATF_16BITONLY)
1491 dm9000_set_io(db, 2);
1492
1493 if (pdata->flags & DM9000_PLATF_32BITONLY)
1494 dm9000_set_io(db, 4);
1495
1496
1497
1498
1499 if (pdata->inblk != NULL)
1500 db->inblk = pdata->inblk;
1501
1502 if (pdata->outblk != NULL)
1503 db->outblk = pdata->outblk;
1504
1505 if (pdata->dumpblk != NULL)
1506 db->dumpblk = pdata->dumpblk;
1507
1508 db->flags = pdata->flags;
1509 }
1510
1511#ifdef CONFIG_DM9000_FORCE_SIMPLE_PHY_POLL
1512 db->flags |= DM9000_PLATF_SIMPLE_PHY;
1513#endif
1514
1515 dm9000_reset(db);
1516
1517
1518 for (i = 0; i < 8; i++) {
1519 id_val = ior(db, DM9000_VIDL);
1520 id_val |= (u32)ior(db, DM9000_VIDH) << 8;
1521 id_val |= (u32)ior(db, DM9000_PIDL) << 16;
1522 id_val |= (u32)ior(db, DM9000_PIDH) << 24;
1523
1524 if (id_val == DM9000_ID)
1525 break;
1526 dev_err(db->dev, "read wrong id 0x%08x\n", id_val);
1527 }
1528
1529 if (id_val != DM9000_ID) {
1530 dev_err(db->dev, "wrong id: 0x%08x\n", id_val);
1531 ret = -ENODEV;
1532 goto out;
1533 }
1534
1535
1536
1537 id_val = ior(db, DM9000_CHIPR);
1538 dev_dbg(db->dev, "dm9000 revision 0x%02x\n", id_val);
1539
1540 switch (id_val) {
1541 case CHIPR_DM9000A:
1542 db->type = TYPE_DM9000A;
1543 break;
1544 case CHIPR_DM9000B:
1545 db->type = TYPE_DM9000B;
1546 break;
1547 default:
1548 dev_dbg(db->dev, "ID %02x => defaulting to DM9000E\n", id_val);
1549 db->type = TYPE_DM9000E;
1550 }
1551
1552
1553 if (db->type == TYPE_DM9000A || db->type == TYPE_DM9000B) {
1554 db->can_csum = 1;
1555 db->rx_csum = 1;
1556 ndev->features |= NETIF_F_IP_CSUM;
1557 }
1558
1559
1560
1561
1562 ether_setup(ndev);
1563
1564 ndev->netdev_ops = &dm9000_netdev_ops;
1565 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
1566 ndev->ethtool_ops = &dm9000_ethtool_ops;
1567
1568 db->msg_enable = NETIF_MSG_LINK;
1569 db->mii.phy_id_mask = 0x1f;
1570 db->mii.reg_num_mask = 0x1f;
1571 db->mii.force_media = 0;
1572 db->mii.full_duplex = 0;
1573 db->mii.dev = ndev;
1574 db->mii.mdio_read = dm9000_phy_read;
1575 db->mii.mdio_write = dm9000_phy_write;
1576
1577 mac_src = "eeprom";
1578
1579
1580 for (i = 0; i < 6; i += 2)
1581 dm9000_read_eeprom(db, i / 2, ndev->dev_addr+i);
1582
1583 if (!is_valid_ether_addr(ndev->dev_addr) && pdata != NULL) {
1584 mac_src = "platform data";
1585 memcpy(ndev->dev_addr, pdata->dev_addr, 6);
1586 }
1587
1588 if (!is_valid_ether_addr(ndev->dev_addr)) {
1589
1590
1591 mac_src = "chip";
1592 for (i = 0; i < 6; i++)
1593 ndev->dev_addr[i] = ior(db, i+DM9000_PAR);
1594 }
1595
1596 if (!is_valid_ether_addr(ndev->dev_addr)) {
1597 dev_warn(db->dev, "%s: Invalid ethernet MAC address. Please "
1598 "set using ifconfig\n", ndev->name);
1599
1600 random_ether_addr(ndev->dev_addr);
1601 mac_src = "random";
1602 }
1603
1604
1605 platform_set_drvdata(pdev, ndev);
1606 ret = register_netdev(ndev);
1607
1608 if (ret == 0)
1609 printk(KERN_INFO "%s: dm9000%c at %p,%p IRQ %d MAC: %pM (%s)\n",
1610 ndev->name, dm9000_type_to_char(db->type),
1611 db->io_addr, db->io_data, ndev->irq,
1612 ndev->dev_addr, mac_src);
1613 return 0;
1614
1615out:
1616 dev_err(db->dev, "not found (%d).\n", ret);
1617
1618 dm9000_release_board(pdev, db);
1619 free_netdev(ndev);
1620
1621 return ret;
1622}
1623
1624static int
1625dm9000_drv_suspend(struct device *dev)
1626{
1627 struct platform_device *pdev = to_platform_device(dev);
1628 struct net_device *ndev = platform_get_drvdata(pdev);
1629 board_info_t *db;
1630
1631 if (ndev) {
1632 db = netdev_priv(ndev);
1633 db->in_suspend = 1;
1634
1635 if (!netif_running(ndev))
1636 return 0;
1637
1638 netif_device_detach(ndev);
1639
1640
1641 if (!db->wake_state)
1642 dm9000_shutdown(ndev);
1643 }
1644 return 0;
1645}
1646
1647static int
1648dm9000_drv_resume(struct device *dev)
1649{
1650 struct platform_device *pdev = to_platform_device(dev);
1651 struct net_device *ndev = platform_get_drvdata(pdev);
1652 board_info_t *db = netdev_priv(ndev);
1653
1654 if (ndev) {
1655 if (netif_running(ndev)) {
1656
1657
1658 if (!db->wake_state) {
1659 dm9000_reset(db);
1660 dm9000_init_dm9000(ndev);
1661 }
1662
1663 netif_device_attach(ndev);
1664 }
1665
1666 db->in_suspend = 0;
1667 }
1668 return 0;
1669}
1670
1671static const struct dev_pm_ops dm9000_drv_pm_ops = {
1672 .suspend = dm9000_drv_suspend,
1673 .resume = dm9000_drv_resume,
1674};
1675
1676static int __devexit
1677dm9000_drv_remove(struct platform_device *pdev)
1678{
1679 struct net_device *ndev = platform_get_drvdata(pdev);
1680
1681 platform_set_drvdata(pdev, NULL);
1682
1683 unregister_netdev(ndev);
1684 dm9000_release_board(pdev, netdev_priv(ndev));
1685 free_netdev(ndev);
1686
1687 dev_dbg(&pdev->dev, "released and freed device\n");
1688 return 0;
1689}
1690
1691static struct platform_driver dm9000_driver = {
1692 .driver = {
1693 .name = "dm9000",
1694 .owner = THIS_MODULE,
1695 .pm = &dm9000_drv_pm_ops,
1696 },
1697 .probe = dm9000_probe,
1698 .remove = __devexit_p(dm9000_drv_remove),
1699};
1700
1701static int __init
1702dm9000_init(void)
1703{
1704 printk(KERN_INFO "%s Ethernet Driver, V%s\n", CARDNAME, DRV_VERSION);
1705
1706 return platform_driver_register(&dm9000_driver);
1707}
1708
1709static void __exit
1710dm9000_cleanup(void)
1711{
1712 platform_driver_unregister(&dm9000_driver);
1713}
1714
1715module_init(dm9000_init);
1716module_exit(dm9000_cleanup);
1717
1718MODULE_AUTHOR("Sascha Hauer, Ben Dooks");
1719MODULE_DESCRIPTION("Davicom DM9000 network driver");
1720MODULE_LICENSE("GPL");
1721MODULE_ALIAS("platform:dm9000");
1722