1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
65
66#define DRV_NAME "dmfe"
67#define DRV_VERSION "1.36.4"
68#define DRV_RELDATE "2002-01-17"
69
70#include <linux/module.h>
71#include <linux/kernel.h>
72#include <linux/string.h>
73#include <linux/timer.h>
74#include <linux/ptrace.h>
75#include <linux/errno.h>
76#include <linux/ioport.h>
77#include <linux/interrupt.h>
78#include <linux/pci.h>
79#include <linux/dma-mapping.h>
80#include <linux/init.h>
81#include <linux/netdevice.h>
82#include <linux/etherdevice.h>
83#include <linux/ethtool.h>
84#include <linux/skbuff.h>
85#include <linux/delay.h>
86#include <linux/spinlock.h>
87#include <linux/crc32.h>
88#include <linux/bitops.h>
89
90#include <asm/processor.h>
91#include <asm/io.h>
92#include <asm/dma.h>
93#include <asm/uaccess.h>
94#include <asm/irq.h>
95
96#ifdef CONFIG_TULIP_DM910X
97#include <linux/of.h>
98#endif
99
100
101
102#define PCI_DM9132_ID 0x91321282
103#define PCI_DM9102_ID 0x91021282
104#define PCI_DM9100_ID 0x91001282
105#define PCI_DM9009_ID 0x90091282
106
107#define DM9102_IO_SIZE 0x80
108#define DM9102A_IO_SIZE 0x100
109#define TX_MAX_SEND_CNT 0x1
110#define TX_DESC_CNT 0x10
111#define RX_DESC_CNT 0x20
112#define TX_FREE_DESC_CNT (TX_DESC_CNT - 2)
113#define TX_WAKE_DESC_CNT (TX_DESC_CNT - 3)
114#define DESC_ALL_CNT (TX_DESC_CNT + RX_DESC_CNT)
115#define TX_BUF_ALLOC 0x600
116#define RX_ALLOC_SIZE 0x620
117#define DM910X_RESET 1
118#define CR0_DEFAULT 0x00E00000
119#define CR6_DEFAULT 0x00080000
120#define CR7_DEFAULT 0x180c1
121#define CR15_DEFAULT 0x06
122#define TDES0_ERR_MASK 0x4302
123#define MAX_PACKET_SIZE 1514
124#define DMFE_MAX_MULTICAST 14
125#define RX_COPY_SIZE 100
126#define MAX_CHECK_PACKET 0x8000
127#define DM9801_NOISE_FLOOR 8
128#define DM9802_NOISE_FLOOR 5
129
130#define DMFE_WOL_LINKCHANGE 0x20000000
131#define DMFE_WOL_SAMPLEPACKET 0x10000000
132#define DMFE_WOL_MAGICPACKET 0x08000000
133
134
135#define DMFE_10MHF 0
136#define DMFE_100MHF 1
137#define DMFE_10MFD 4
138#define DMFE_100MFD 5
139#define DMFE_AUTO 8
140#define DMFE_1M_HPNA 0x10
141
142#define DMFE_TXTH_72 0x400000
143#define DMFE_TXTH_96 0x404000
144#define DMFE_TXTH_128 0x0000
145#define DMFE_TXTH_256 0x4000
146#define DMFE_TXTH_512 0x8000
147#define DMFE_TXTH_1K 0xC000
148
149#define DMFE_TIMER_WUT (jiffies + HZ * 1)
150#define DMFE_TX_TIMEOUT ((3*HZ)/2)
151#define DMFE_TX_KICK (HZ/2)
152
153#define dw32(reg, val) iowrite32(val, ioaddr + (reg))
154#define dw16(reg, val) iowrite16(val, ioaddr + (reg))
155#define dr32(reg) ioread32(ioaddr + (reg))
156#define dr16(reg) ioread16(ioaddr + (reg))
157#define dr8(reg) ioread8(ioaddr + (reg))
158
159#define DMFE_DBUG(dbug_now, msg, value) \
160 do { \
161 if (dmfe_debug || (dbug_now)) \
162 pr_err("%s %lx\n", \
163 (msg), (long) (value)); \
164 } while (0)
165
166#define SHOW_MEDIA_TYPE(mode) \
167 pr_info("Change Speed to %sMhz %s duplex\n" , \
168 (mode & 1) ? "100":"10", \
169 (mode & 4) ? "full":"half");
170
171
172
173#define CR9_SROM_READ 0x4800
174#define CR9_SRCS 0x1
175#define CR9_SRCLK 0x2
176#define CR9_CRDOUT 0x8
177#define SROM_DATA_0 0x0
178#define SROM_DATA_1 0x4
179#define PHY_DATA_1 0x20000
180#define PHY_DATA_0 0x00000
181#define MDCLKH 0x10000
182
183#define PHY_POWER_DOWN 0x800
184
185#define SROM_V41_CODE 0x14
186
187#define __CHK_IO_SIZE(pci_id, dev_rev) \
188 (( ((pci_id)==PCI_DM9132_ID) || ((dev_rev) >= 0x30) ) ? \
189 DM9102A_IO_SIZE: DM9102_IO_SIZE)
190
191#define CHK_IO_SIZE(pci_dev) \
192 (__CHK_IO_SIZE(((pci_dev)->device << 16) | (pci_dev)->vendor, \
193 (pci_dev)->revision))
194
195
196#define DEVICE net_device
197
198
199struct tx_desc {
200 __le32 tdes0, tdes1, tdes2, tdes3;
201 char *tx_buf_ptr;
202 struct tx_desc *next_tx_desc;
203} __attribute__(( aligned(32) ));
204
205struct rx_desc {
206 __le32 rdes0, rdes1, rdes2, rdes3;
207 struct sk_buff *rx_skb_ptr;
208 struct rx_desc *next_rx_desc;
209} __attribute__(( aligned(32) ));
210
211struct dmfe_board_info {
212 u32 chip_id;
213 u8 chip_revision;
214 struct net_device *next_dev;
215 struct pci_dev *pdev;
216 spinlock_t lock;
217
218 void __iomem *ioaddr;
219 u32 cr0_data;
220 u32 cr5_data;
221 u32 cr6_data;
222 u32 cr7_data;
223 u32 cr15_data;
224
225
226 dma_addr_t buf_pool_dma_ptr;
227 dma_addr_t buf_pool_dma_start;
228 dma_addr_t desc_pool_dma_ptr;
229 dma_addr_t first_tx_desc_dma;
230 dma_addr_t first_rx_desc_dma;
231
232
233 unsigned char *buf_pool_ptr;
234 unsigned char *buf_pool_start;
235 unsigned char *desc_pool_ptr;
236 struct tx_desc *first_tx_desc;
237 struct tx_desc *tx_insert_ptr;
238 struct tx_desc *tx_remove_ptr;
239 struct rx_desc *first_rx_desc;
240 struct rx_desc *rx_insert_ptr;
241 struct rx_desc *rx_ready_ptr;
242 unsigned long tx_packet_cnt;
243 unsigned long tx_queue_cnt;
244 unsigned long rx_avail_cnt;
245 unsigned long interval_rx_cnt;
246
247 u16 HPNA_command;
248 u16 HPNA_timer;
249 u16 dbug_cnt;
250 u16 NIC_capability;
251 u16 PHY_reg4;
252
253 u8 HPNA_present;
254 u8 chip_type;
255 u8 media_mode;
256 u8 op_mode;
257 u8 phy_addr;
258 u8 wait_reset;
259 u8 dm910x_chk_mode;
260 u8 first_in_callback;
261 u8 wol_mode;
262 struct timer_list timer;
263
264
265 unsigned long tx_fifo_underrun;
266 unsigned long tx_loss_carrier;
267 unsigned long tx_no_carrier;
268 unsigned long tx_late_collision;
269 unsigned long tx_excessive_collision;
270 unsigned long tx_jabber_timeout;
271 unsigned long reset_count;
272 unsigned long reset_cr8;
273 unsigned long reset_fatal;
274 unsigned long reset_TXtimeout;
275
276
277 unsigned char srom[128];
278};
279
280enum dmfe_offsets {
281 DCR0 = 0x00, DCR1 = 0x08, DCR2 = 0x10, DCR3 = 0x18, DCR4 = 0x20,
282 DCR5 = 0x28, DCR6 = 0x30, DCR7 = 0x38, DCR8 = 0x40, DCR9 = 0x48,
283 DCR10 = 0x50, DCR11 = 0x58, DCR12 = 0x60, DCR13 = 0x68, DCR14 = 0x70,
284 DCR15 = 0x78
285};
286
287enum dmfe_CR6_bits {
288 CR6_RXSC = 0x2, CR6_PBF = 0x8, CR6_PM = 0x40, CR6_PAM = 0x80,
289 CR6_FDM = 0x200, CR6_TXSC = 0x2000, CR6_STI = 0x100000,
290 CR6_SFT = 0x200000, CR6_RXA = 0x40000000, CR6_NO_PURGE = 0x20000000
291};
292
293
294static int printed_version;
295static const char version[] =
296 "Davicom DM9xxx net driver, version " DRV_VERSION " (" DRV_RELDATE ")";
297
298static int dmfe_debug;
299static unsigned char dmfe_media_mode = DMFE_AUTO;
300static u32 dmfe_cr6_user_set;
301
302
303static int debug;
304static u32 cr6set;
305static unsigned char mode = 8;
306static u8 chkmode = 1;
307static u8 HPNA_mode;
308static u8 HPNA_rx_cmd;
309static u8 HPNA_tx_cmd;
310static u8 HPNA_NoiseFloor;
311static u8 SF_mode;
312
313
314
315
316static int dmfe_open(struct DEVICE *);
317static netdev_tx_t dmfe_start_xmit(struct sk_buff *, struct DEVICE *);
318static int dmfe_stop(struct DEVICE *);
319static void dmfe_set_filter_mode(struct DEVICE *);
320static const struct ethtool_ops netdev_ethtool_ops;
321static u16 read_srom_word(void __iomem *, int);
322static irqreturn_t dmfe_interrupt(int , void *);
323#ifdef CONFIG_NET_POLL_CONTROLLER
324static void poll_dmfe (struct net_device *dev);
325#endif
326static void dmfe_descriptor_init(struct net_device *);
327static void allocate_rx_buffer(struct net_device *);
328static void update_cr6(u32, void __iomem *);
329static void send_filter_frame(struct DEVICE *);
330static void dm9132_id_table(struct DEVICE *);
331static u16 dmfe_phy_read(void __iomem *, u8, u8, u32);
332static void dmfe_phy_write(void __iomem *, u8, u8, u16, u32);
333static void dmfe_phy_write_1bit(void __iomem *, u32);
334static u16 dmfe_phy_read_1bit(void __iomem *);
335static u8 dmfe_sense_speed(struct dmfe_board_info *);
336static void dmfe_process_mode(struct dmfe_board_info *);
337static void dmfe_timer(unsigned long);
338static inline u32 cal_CRC(unsigned char *, unsigned int, u8);
339static void dmfe_rx_packet(struct DEVICE *, struct dmfe_board_info *);
340static void dmfe_free_tx_pkt(struct DEVICE *, struct dmfe_board_info *);
341static void dmfe_reuse_skb(struct dmfe_board_info *, struct sk_buff *);
342static void dmfe_dynamic_reset(struct DEVICE *);
343static void dmfe_free_rxbuffer(struct dmfe_board_info *);
344static void dmfe_init_dm910x(struct DEVICE *);
345static void dmfe_parse_srom(struct dmfe_board_info *);
346static void dmfe_program_DM9801(struct dmfe_board_info *, int);
347static void dmfe_program_DM9802(struct dmfe_board_info *);
348static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * );
349static void dmfe_set_phyxcer(struct dmfe_board_info *);
350
351
352
353static const struct net_device_ops netdev_ops = {
354 .ndo_open = dmfe_open,
355 .ndo_stop = dmfe_stop,
356 .ndo_start_xmit = dmfe_start_xmit,
357 .ndo_set_rx_mode = dmfe_set_filter_mode,
358 .ndo_change_mtu = eth_change_mtu,
359 .ndo_set_mac_address = eth_mac_addr,
360 .ndo_validate_addr = eth_validate_addr,
361#ifdef CONFIG_NET_POLL_CONTROLLER
362 .ndo_poll_controller = poll_dmfe,
363#endif
364};
365
366
367
368
369
370static int dmfe_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
371{
372 struct dmfe_board_info *db;
373 struct net_device *dev;
374 u32 pci_pmr;
375 int i, err;
376
377 DMFE_DBUG(0, "dmfe_init_one()", 0);
378
379 if (!printed_version++)
380 pr_info("%s\n", version);
381
382
383
384
385
386#ifdef CONFIG_TULIP_DM910X
387 if ((ent->driver_data == PCI_DM9100_ID && pdev->revision >= 0x30) ||
388 ent->driver_data == PCI_DM9102_ID) {
389 struct device_node *dp = pci_device_to_OF_node(pdev);
390
391 if (dp && of_get_property(dp, "local-mac-address", NULL)) {
392 pr_info("skipping on-board DM910x (use tulip)\n");
393 return -ENODEV;
394 }
395 }
396#endif
397
398
399 dev = alloc_etherdev(sizeof(*db));
400 if (dev == NULL)
401 return -ENOMEM;
402 SET_NETDEV_DEV(dev, &pdev->dev);
403
404 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
405 pr_warn("32-bit PCI DMA not available\n");
406 err = -ENODEV;
407 goto err_out_free;
408 }
409
410
411 err = pci_enable_device(pdev);
412 if (err)
413 goto err_out_free;
414
415 if (!pci_resource_start(pdev, 0)) {
416 pr_err("I/O base is zero\n");
417 err = -ENODEV;
418 goto err_out_disable;
419 }
420
421 if (pci_resource_len(pdev, 0) < (CHK_IO_SIZE(pdev)) ) {
422 pr_err("Allocated I/O size too small\n");
423 err = -ENODEV;
424 goto err_out_disable;
425 }
426
427#if 0
428
429
430
431
432
433 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x80);
434#endif
435
436 if (pci_request_regions(pdev, DRV_NAME)) {
437 pr_err("Failed to request PCI regions\n");
438 err = -ENODEV;
439 goto err_out_disable;
440 }
441
442
443 db = netdev_priv(dev);
444
445
446 db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) *
447 DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr);
448 if (!db->desc_pool_ptr) {
449 err = -ENOMEM;
450 goto err_out_res;
451 }
452
453 db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC *
454 TX_DESC_CNT + 4, &db->buf_pool_dma_ptr);
455 if (!db->buf_pool_ptr) {
456 err = -ENOMEM;
457 goto err_out_free_desc;
458 }
459
460 db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr;
461 db->first_tx_desc_dma = db->desc_pool_dma_ptr;
462 db->buf_pool_start = db->buf_pool_ptr;
463 db->buf_pool_dma_start = db->buf_pool_dma_ptr;
464
465 db->chip_id = ent->driver_data;
466
467 db->ioaddr = pci_iomap(pdev, 0, 0);
468 if (!db->ioaddr) {
469 err = -ENOMEM;
470 goto err_out_free_buf;
471 }
472
473 db->chip_revision = pdev->revision;
474 db->wol_mode = 0;
475
476 db->pdev = pdev;
477
478 pci_set_drvdata(pdev, dev);
479 dev->netdev_ops = &netdev_ops;
480 dev->ethtool_ops = &netdev_ethtool_ops;
481 netif_carrier_off(dev);
482 spin_lock_init(&db->lock);
483
484 pci_read_config_dword(pdev, 0x50, &pci_pmr);
485 pci_pmr &= 0x70000;
486 if ( (pci_pmr == 0x10000) && (db->chip_revision == 0x31) )
487 db->chip_type = 1;
488 else
489 db->chip_type = 0;
490
491
492 for (i = 0; i < 64; i++) {
493 ((__le16 *) db->srom)[i] =
494 cpu_to_le16(read_srom_word(db->ioaddr, i));
495 }
496
497
498 for (i = 0; i < 6; i++)
499 dev->dev_addr[i] = db->srom[20 + i];
500
501 err = register_netdev (dev);
502 if (err)
503 goto err_out_unmap;
504
505 dev_info(&dev->dev, "Davicom DM%04lx at pci%s, %pM, irq %d\n",
506 ent->driver_data >> 16,
507 pci_name(pdev), dev->dev_addr, pdev->irq);
508
509 pci_set_master(pdev);
510
511 return 0;
512
513err_out_unmap:
514 pci_iounmap(pdev, db->ioaddr);
515err_out_free_buf:
516 pci_free_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
517 db->buf_pool_ptr, db->buf_pool_dma_ptr);
518err_out_free_desc:
519 pci_free_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20,
520 db->desc_pool_ptr, db->desc_pool_dma_ptr);
521err_out_res:
522 pci_release_regions(pdev);
523err_out_disable:
524 pci_disable_device(pdev);
525err_out_free:
526 free_netdev(dev);
527
528 return err;
529}
530
531
532static void dmfe_remove_one(struct pci_dev *pdev)
533{
534 struct net_device *dev = pci_get_drvdata(pdev);
535 struct dmfe_board_info *db = netdev_priv(dev);
536
537 DMFE_DBUG(0, "dmfe_remove_one()", 0);
538
539 if (dev) {
540
541 unregister_netdev(dev);
542 pci_iounmap(db->pdev, db->ioaddr);
543 pci_free_consistent(db->pdev, sizeof(struct tx_desc) *
544 DESC_ALL_CNT + 0x20, db->desc_pool_ptr,
545 db->desc_pool_dma_ptr);
546 pci_free_consistent(db->pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
547 db->buf_pool_ptr, db->buf_pool_dma_ptr);
548 pci_release_regions(pdev);
549 free_netdev(dev);
550 }
551
552 DMFE_DBUG(0, "dmfe_remove_one() exit", 0);
553}
554
555
556
557
558
559
560
561static int dmfe_open(struct DEVICE *dev)
562{
563 struct dmfe_board_info *db = netdev_priv(dev);
564 const int irq = db->pdev->irq;
565 int ret;
566
567 DMFE_DBUG(0, "dmfe_open", 0);
568
569 ret = request_irq(irq, dmfe_interrupt, IRQF_SHARED, dev->name, dev);
570 if (ret)
571 return ret;
572
573
574 db->cr6_data = CR6_DEFAULT | dmfe_cr6_user_set;
575 db->tx_packet_cnt = 0;
576 db->tx_queue_cnt = 0;
577 db->rx_avail_cnt = 0;
578 db->wait_reset = 0;
579
580 db->first_in_callback = 0;
581 db->NIC_capability = 0xf;
582 db->PHY_reg4 = 0x1e0;
583
584
585 if ( !chkmode || (db->chip_id == PCI_DM9132_ID) ||
586 (db->chip_revision >= 0x30) ) {
587 db->cr6_data |= DMFE_TXTH_256;
588 db->cr0_data = CR0_DEFAULT;
589 db->dm910x_chk_mode=4;
590 } else {
591 db->cr6_data |= CR6_SFT;
592 db->cr0_data = 0;
593 db->dm910x_chk_mode = 1;
594 }
595
596
597 dmfe_init_dm910x(dev);
598
599
600 netif_wake_queue(dev);
601
602
603 init_timer(&db->timer);
604 db->timer.expires = DMFE_TIMER_WUT + HZ * 2;
605 db->timer.data = (unsigned long)dev;
606 db->timer.function = dmfe_timer;
607 add_timer(&db->timer);
608
609 return 0;
610}
611
612
613
614
615
616
617
618
619
620static void dmfe_init_dm910x(struct DEVICE *dev)
621{
622 struct dmfe_board_info *db = netdev_priv(dev);
623 void __iomem *ioaddr = db->ioaddr;
624
625 DMFE_DBUG(0, "dmfe_init_dm910x()", 0);
626
627
628 dw32(DCR0, DM910X_RESET);
629 udelay(100);
630 dw32(DCR0, db->cr0_data);
631 udelay(5);
632
633
634 db->phy_addr = 1;
635
636
637 dmfe_parse_srom(db);
638 db->media_mode = dmfe_media_mode;
639
640
641 dw32(DCR12, 0x180);
642 if (db->chip_id == PCI_DM9009_ID) {
643 dw32(DCR12, 0x80);
644 mdelay(300);
645 }
646 dw32(DCR12, 0x0);
647
648
649 if ( !(db->media_mode & 0x10) )
650 dmfe_set_phyxcer(db);
651
652
653 if ( !(db->media_mode & DMFE_AUTO) )
654 db->op_mode = db->media_mode;
655
656
657 dmfe_descriptor_init(dev);
658
659
660 update_cr6(db->cr6_data, ioaddr);
661
662
663 if (db->chip_id == PCI_DM9132_ID)
664 dm9132_id_table(dev);
665 else
666 send_filter_frame(dev);
667
668
669 db->cr7_data = CR7_DEFAULT;
670 dw32(DCR7, db->cr7_data);
671
672
673 dw32(DCR15, db->cr15_data);
674
675
676 db->cr6_data |= CR6_RXSC | CR6_TXSC | 0x40000;
677 update_cr6(db->cr6_data, ioaddr);
678}
679
680
681
682
683
684
685
686static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
687 struct DEVICE *dev)
688{
689 struct dmfe_board_info *db = netdev_priv(dev);
690 void __iomem *ioaddr = db->ioaddr;
691 struct tx_desc *txptr;
692 unsigned long flags;
693
694 DMFE_DBUG(0, "dmfe_start_xmit", 0);
695
696
697 if (skb->len > MAX_PACKET_SIZE) {
698 pr_err("big packet = %d\n", (u16)skb->len);
699 dev_kfree_skb_any(skb);
700 return NETDEV_TX_OK;
701 }
702
703
704 netif_stop_queue(dev);
705
706 spin_lock_irqsave(&db->lock, flags);
707
708
709 if (db->tx_queue_cnt >= TX_FREE_DESC_CNT) {
710 spin_unlock_irqrestore(&db->lock, flags);
711 pr_err("No Tx resource %ld\n", db->tx_queue_cnt);
712 return NETDEV_TX_BUSY;
713 }
714
715
716 dw32(DCR7, 0);
717
718
719 txptr = db->tx_insert_ptr;
720 skb_copy_from_linear_data(skb, txptr->tx_buf_ptr, skb->len);
721 txptr->tdes1 = cpu_to_le32(0xe1000000 | skb->len);
722
723
724 db->tx_insert_ptr = txptr->next_tx_desc;
725
726
727 if ( (!db->tx_queue_cnt) && (db->tx_packet_cnt < TX_MAX_SEND_CNT) ) {
728 txptr->tdes0 = cpu_to_le32(0x80000000);
729 db->tx_packet_cnt++;
730 dw32(DCR1, 0x1);
731 dev->trans_start = jiffies;
732 } else {
733 db->tx_queue_cnt++;
734 dw32(DCR1, 0x1);
735 }
736
737
738 if ( db->tx_queue_cnt < TX_FREE_DESC_CNT )
739 netif_wake_queue(dev);
740
741
742 spin_unlock_irqrestore(&db->lock, flags);
743 dw32(DCR7, db->cr7_data);
744
745
746 dev_consume_skb_any(skb);
747
748 return NETDEV_TX_OK;
749}
750
751
752
753
754
755
756
757static int dmfe_stop(struct DEVICE *dev)
758{
759 struct dmfe_board_info *db = netdev_priv(dev);
760 void __iomem *ioaddr = db->ioaddr;
761
762 DMFE_DBUG(0, "dmfe_stop", 0);
763
764
765 netif_stop_queue(dev);
766
767
768 del_timer_sync(&db->timer);
769
770
771 dw32(DCR0, DM910X_RESET);
772 udelay(100);
773 dmfe_phy_write(ioaddr, db->phy_addr, 0, 0x8000, db->chip_id);
774
775
776 free_irq(db->pdev->irq, dev);
777
778
779 dmfe_free_rxbuffer(db);
780
781#if 0
782
783 printk("FU:%lx EC:%lx LC:%lx NC:%lx LOC:%lx TXJT:%lx RESET:%lx RCR8:%lx FAL:%lx TT:%lx\n",
784 db->tx_fifo_underrun, db->tx_excessive_collision,
785 db->tx_late_collision, db->tx_no_carrier, db->tx_loss_carrier,
786 db->tx_jabber_timeout, db->reset_count, db->reset_cr8,
787 db->reset_fatal, db->reset_TXtimeout);
788#endif
789
790 return 0;
791}
792
793
794
795
796
797
798
799static irqreturn_t dmfe_interrupt(int irq, void *dev_id)
800{
801 struct DEVICE *dev = dev_id;
802 struct dmfe_board_info *db = netdev_priv(dev);
803 void __iomem *ioaddr = db->ioaddr;
804 unsigned long flags;
805
806 DMFE_DBUG(0, "dmfe_interrupt()", 0);
807
808 spin_lock_irqsave(&db->lock, flags);
809
810
811 db->cr5_data = dr32(DCR5);
812 dw32(DCR5, db->cr5_data);
813 if ( !(db->cr5_data & 0xc1) ) {
814 spin_unlock_irqrestore(&db->lock, flags);
815 return IRQ_HANDLED;
816 }
817
818
819 dw32(DCR7, 0);
820
821
822 if (db->cr5_data & 0x2000) {
823
824 DMFE_DBUG(1, "System bus error happen. CR5=", db->cr5_data);
825 db->reset_fatal++;
826 db->wait_reset = 1;
827 spin_unlock_irqrestore(&db->lock, flags);
828 return IRQ_HANDLED;
829 }
830
831
832 if ( (db->cr5_data & 0x40) && db->rx_avail_cnt )
833 dmfe_rx_packet(dev, db);
834
835
836 if (db->rx_avail_cnt<RX_DESC_CNT)
837 allocate_rx_buffer(dev);
838
839
840 if ( db->cr5_data & 0x01)
841 dmfe_free_tx_pkt(dev, db);
842
843
844 if (db->dm910x_chk_mode & 0x2) {
845 db->dm910x_chk_mode = 0x4;
846 db->cr6_data |= 0x100;
847 update_cr6(db->cr6_data, ioaddr);
848 }
849
850
851 dw32(DCR7, db->cr7_data);
852
853 spin_unlock_irqrestore(&db->lock, flags);
854 return IRQ_HANDLED;
855}
856
857
858#ifdef CONFIG_NET_POLL_CONTROLLER
859
860
861
862
863
864
865static void poll_dmfe (struct net_device *dev)
866{
867 struct dmfe_board_info *db = netdev_priv(dev);
868 const int irq = db->pdev->irq;
869
870
871
872 disable_irq(irq);
873 dmfe_interrupt (irq, dev);
874 enable_irq(irq);
875}
876#endif
877
878
879
880
881
882static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db)
883{
884 struct tx_desc *txptr;
885 void __iomem *ioaddr = db->ioaddr;
886 u32 tdes0;
887
888 txptr = db->tx_remove_ptr;
889 while(db->tx_packet_cnt) {
890 tdes0 = le32_to_cpu(txptr->tdes0);
891 if (tdes0 & 0x80000000)
892 break;
893
894
895 db->tx_packet_cnt--;
896 dev->stats.tx_packets++;
897
898
899 if ( tdes0 != 0x7fffffff ) {
900 dev->stats.collisions += (tdes0 >> 3) & 0xf;
901 dev->stats.tx_bytes += le32_to_cpu(txptr->tdes1) & 0x7ff;
902 if (tdes0 & TDES0_ERR_MASK) {
903 dev->stats.tx_errors++;
904
905 if (tdes0 & 0x0002) {
906 db->tx_fifo_underrun++;
907 if ( !(db->cr6_data & CR6_SFT) ) {
908 db->cr6_data = db->cr6_data | CR6_SFT;
909 update_cr6(db->cr6_data, ioaddr);
910 }
911 }
912 if (tdes0 & 0x0100)
913 db->tx_excessive_collision++;
914 if (tdes0 & 0x0200)
915 db->tx_late_collision++;
916 if (tdes0 & 0x0400)
917 db->tx_no_carrier++;
918 if (tdes0 & 0x0800)
919 db->tx_loss_carrier++;
920 if (tdes0 & 0x4000)
921 db->tx_jabber_timeout++;
922 }
923 }
924
925 txptr = txptr->next_tx_desc;
926 }
927
928
929 db->tx_remove_ptr = txptr;
930
931
932 if ( (db->tx_packet_cnt < TX_MAX_SEND_CNT) && db->tx_queue_cnt ) {
933 txptr->tdes0 = cpu_to_le32(0x80000000);
934 db->tx_packet_cnt++;
935 db->tx_queue_cnt--;
936 dw32(DCR1, 0x1);
937 dev->trans_start = jiffies;
938 }
939
940
941 if ( db->tx_queue_cnt < TX_WAKE_DESC_CNT )
942 netif_wake_queue(dev);
943}
944
945
946
947
948
949
950
951
952static inline u32 cal_CRC(unsigned char * Data, unsigned int Len, u8 flag)
953{
954 u32 crc = crc32(~0, Data, Len);
955 if (flag) crc = ~crc;
956 return crc;
957}
958
959
960
961
962
963
964static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db)
965{
966 struct rx_desc *rxptr;
967 struct sk_buff *skb, *newskb;
968 int rxlen;
969 u32 rdes0;
970
971 rxptr = db->rx_ready_ptr;
972
973 while(db->rx_avail_cnt) {
974 rdes0 = le32_to_cpu(rxptr->rdes0);
975 if (rdes0 & 0x80000000)
976 break;
977
978 db->rx_avail_cnt--;
979 db->interval_rx_cnt++;
980
981 pci_unmap_single(db->pdev, le32_to_cpu(rxptr->rdes2),
982 RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
983
984 if ( (rdes0 & 0x300) != 0x300) {
985
986
987 DMFE_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
988 dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
989 } else {
990
991 rxlen = ( (rdes0 >> 16) & 0x3fff) - 4;
992
993
994 if (rdes0 & 0x8000) {
995
996 dev->stats.rx_errors++;
997 if (rdes0 & 1)
998 dev->stats.rx_fifo_errors++;
999 if (rdes0 & 2)
1000 dev->stats.rx_crc_errors++;
1001 if (rdes0 & 0x80)
1002 dev->stats.rx_length_errors++;
1003 }
1004
1005 if ( !(rdes0 & 0x8000) ||
1006 ((db->cr6_data & CR6_PM) && (rxlen>6)) ) {
1007 skb = rxptr->rx_skb_ptr;
1008
1009
1010 if ( (db->dm910x_chk_mode & 1) &&
1011 (cal_CRC(skb->data, rxlen, 1) !=
1012 (*(u32 *) (skb->data+rxlen) ))) {
1013
1014 dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
1015 db->dm910x_chk_mode = 3;
1016 } else {
1017
1018
1019 if ((rxlen < RX_COPY_SIZE) &&
1020 ((newskb = netdev_alloc_skb(dev, rxlen + 2))
1021 != NULL)) {
1022
1023 skb = newskb;
1024
1025 skb_reserve(skb, 2);
1026 skb_copy_from_linear_data(rxptr->rx_skb_ptr,
1027 skb_put(skb, rxlen),
1028 rxlen);
1029 dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
1030 } else
1031 skb_put(skb, rxlen);
1032
1033 skb->protocol = eth_type_trans(skb, dev);
1034 netif_rx(skb);
1035 dev->stats.rx_packets++;
1036 dev->stats.rx_bytes += rxlen;
1037 }
1038 } else {
1039
1040 DMFE_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
1041 dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
1042 }
1043 }
1044
1045 rxptr = rxptr->next_rx_desc;
1046 }
1047
1048 db->rx_ready_ptr = rxptr;
1049}
1050
1051
1052
1053
1054
1055static void dmfe_set_filter_mode(struct DEVICE * dev)
1056{
1057 struct dmfe_board_info *db = netdev_priv(dev);
1058 unsigned long flags;
1059 int mc_count = netdev_mc_count(dev);
1060
1061 DMFE_DBUG(0, "dmfe_set_filter_mode()", 0);
1062 spin_lock_irqsave(&db->lock, flags);
1063
1064 if (dev->flags & IFF_PROMISC) {
1065 DMFE_DBUG(0, "Enable PROM Mode", 0);
1066 db->cr6_data |= CR6_PM | CR6_PBF;
1067 update_cr6(db->cr6_data, db->ioaddr);
1068 spin_unlock_irqrestore(&db->lock, flags);
1069 return;
1070 }
1071
1072 if (dev->flags & IFF_ALLMULTI || mc_count > DMFE_MAX_MULTICAST) {
1073 DMFE_DBUG(0, "Pass all multicast address", mc_count);
1074 db->cr6_data &= ~(CR6_PM | CR6_PBF);
1075 db->cr6_data |= CR6_PAM;
1076 spin_unlock_irqrestore(&db->lock, flags);
1077 return;
1078 }
1079
1080 DMFE_DBUG(0, "Set multicast address", mc_count);
1081 if (db->chip_id == PCI_DM9132_ID)
1082 dm9132_id_table(dev);
1083 else
1084 send_filter_frame(dev);
1085 spin_unlock_irqrestore(&db->lock, flags);
1086}
1087
1088
1089
1090
1091
1092static void dmfe_ethtool_get_drvinfo(struct net_device *dev,
1093 struct ethtool_drvinfo *info)
1094{
1095 struct dmfe_board_info *np = netdev_priv(dev);
1096
1097 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1098 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1099 strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
1100}
1101
1102static int dmfe_ethtool_set_wol(struct net_device *dev,
1103 struct ethtool_wolinfo *wolinfo)
1104{
1105 struct dmfe_board_info *db = netdev_priv(dev);
1106
1107 if (wolinfo->wolopts & (WAKE_UCAST | WAKE_MCAST | WAKE_BCAST |
1108 WAKE_ARP | WAKE_MAGICSECURE))
1109 return -EOPNOTSUPP;
1110
1111 db->wol_mode = wolinfo->wolopts;
1112 return 0;
1113}
1114
1115static void dmfe_ethtool_get_wol(struct net_device *dev,
1116 struct ethtool_wolinfo *wolinfo)
1117{
1118 struct dmfe_board_info *db = netdev_priv(dev);
1119
1120 wolinfo->supported = WAKE_PHY | WAKE_MAGIC;
1121 wolinfo->wolopts = db->wol_mode;
1122}
1123
1124
1125static const struct ethtool_ops netdev_ethtool_ops = {
1126 .get_drvinfo = dmfe_ethtool_get_drvinfo,
1127 .get_link = ethtool_op_get_link,
1128 .set_wol = dmfe_ethtool_set_wol,
1129 .get_wol = dmfe_ethtool_get_wol,
1130};
1131
1132
1133
1134
1135
1136
1137static void dmfe_timer(unsigned long data)
1138{
1139 struct net_device *dev = (struct net_device *)data;
1140 struct dmfe_board_info *db = netdev_priv(dev);
1141 void __iomem *ioaddr = db->ioaddr;
1142 u32 tmp_cr8;
1143 unsigned char tmp_cr12;
1144 unsigned long flags;
1145
1146 int link_ok, link_ok_phy;
1147
1148 DMFE_DBUG(0, "dmfe_timer()", 0);
1149 spin_lock_irqsave(&db->lock, flags);
1150
1151
1152 if (db->first_in_callback == 0) {
1153 db->first_in_callback = 1;
1154 if (db->chip_type && (db->chip_id==PCI_DM9102_ID)) {
1155 db->cr6_data &= ~0x40000;
1156 update_cr6(db->cr6_data, ioaddr);
1157 dmfe_phy_write(ioaddr, db->phy_addr, 0, 0x1000, db->chip_id);
1158 db->cr6_data |= 0x40000;
1159 update_cr6(db->cr6_data, ioaddr);
1160 db->timer.expires = DMFE_TIMER_WUT + HZ * 2;
1161 add_timer(&db->timer);
1162 spin_unlock_irqrestore(&db->lock, flags);
1163 return;
1164 }
1165 }
1166
1167
1168
1169 if ( (db->dm910x_chk_mode & 0x1) &&
1170 (dev->stats.rx_packets > MAX_CHECK_PACKET) )
1171 db->dm910x_chk_mode = 0x4;
1172
1173
1174 tmp_cr8 = dr32(DCR8);
1175 if ( (db->interval_rx_cnt==0) && (tmp_cr8) ) {
1176 db->reset_cr8++;
1177 db->wait_reset = 1;
1178 }
1179 db->interval_rx_cnt = 0;
1180
1181
1182 if ( db->tx_packet_cnt &&
1183 time_after(jiffies, dev_trans_start(dev) + DMFE_TX_KICK) ) {
1184 dw32(DCR1, 0x1);
1185
1186
1187 if (time_after(jiffies, dev_trans_start(dev) + DMFE_TX_TIMEOUT) ) {
1188 db->reset_TXtimeout++;
1189 db->wait_reset = 1;
1190 dev_warn(&dev->dev, "Tx timeout - resetting\n");
1191 }
1192 }
1193
1194 if (db->wait_reset) {
1195 DMFE_DBUG(0, "Dynamic Reset device", db->tx_packet_cnt);
1196 db->reset_count++;
1197 dmfe_dynamic_reset(dev);
1198 db->first_in_callback = 0;
1199 db->timer.expires = DMFE_TIMER_WUT;
1200 add_timer(&db->timer);
1201 spin_unlock_irqrestore(&db->lock, flags);
1202 return;
1203 }
1204
1205
1206 if (db->chip_id == PCI_DM9132_ID)
1207 tmp_cr12 = dr8(DCR9 + 3);
1208 else
1209 tmp_cr12 = dr8(DCR12);
1210
1211 if ( ((db->chip_id == PCI_DM9102_ID) &&
1212 (db->chip_revision == 0x30)) ||
1213 ((db->chip_id == PCI_DM9132_ID) &&
1214 (db->chip_revision == 0x10)) ) {
1215
1216 if (tmp_cr12 & 2)
1217 link_ok = 0;
1218 else
1219 link_ok = 1;
1220 }
1221 else
1222
1223
1224 link_ok = (tmp_cr12 & 0x43) ? 1 : 0;
1225
1226
1227
1228
1229
1230
1231
1232
1233 dmfe_phy_read (db->ioaddr, db->phy_addr, 1, db->chip_id);
1234 link_ok_phy = (dmfe_phy_read (db->ioaddr,
1235 db->phy_addr, 1, db->chip_id) & 0x4) ? 1 : 0;
1236
1237 if (link_ok_phy != link_ok) {
1238 DMFE_DBUG (0, "PHY and chip report different link status", 0);
1239 link_ok = link_ok | link_ok_phy;
1240 }
1241
1242 if ( !link_ok && netif_carrier_ok(dev)) {
1243
1244 DMFE_DBUG(0, "Link Failed", tmp_cr12);
1245 netif_carrier_off(dev);
1246
1247
1248
1249 if ( !(db->media_mode & 0x38) )
1250 dmfe_phy_write(db->ioaddr, db->phy_addr,
1251 0, 0x1000, db->chip_id);
1252
1253
1254 if (db->media_mode & DMFE_AUTO) {
1255
1256 db->cr6_data|=0x00040000;
1257 db->cr6_data&=~0x00000200;
1258 update_cr6(db->cr6_data, ioaddr);
1259 }
1260 } else if (!netif_carrier_ok(dev)) {
1261
1262 DMFE_DBUG(0, "Link link OK", tmp_cr12);
1263
1264
1265 if ( !(db->media_mode & DMFE_AUTO) || !dmfe_sense_speed(db)) {
1266 netif_carrier_on(dev);
1267 SHOW_MEDIA_TYPE(db->op_mode);
1268 }
1269
1270 dmfe_process_mode(db);
1271 }
1272
1273
1274 if (db->HPNA_command & 0xf00) {
1275 db->HPNA_timer--;
1276 if (!db->HPNA_timer)
1277 dmfe_HPNA_remote_cmd_chk(db);
1278 }
1279
1280
1281 db->timer.expires = DMFE_TIMER_WUT;
1282 add_timer(&db->timer);
1283 spin_unlock_irqrestore(&db->lock, flags);
1284}
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295static void dmfe_dynamic_reset(struct net_device *dev)
1296{
1297 struct dmfe_board_info *db = netdev_priv(dev);
1298 void __iomem *ioaddr = db->ioaddr;
1299
1300 DMFE_DBUG(0, "dmfe_dynamic_reset()", 0);
1301
1302
1303 db->cr6_data &= ~(CR6_RXSC | CR6_TXSC);
1304 update_cr6(db->cr6_data, ioaddr);
1305 dw32(DCR7, 0);
1306 dw32(DCR5, dr32(DCR5));
1307
1308
1309 netif_stop_queue(dev);
1310
1311
1312 dmfe_free_rxbuffer(db);
1313
1314
1315 db->tx_packet_cnt = 0;
1316 db->tx_queue_cnt = 0;
1317 db->rx_avail_cnt = 0;
1318 netif_carrier_off(dev);
1319 db->wait_reset = 0;
1320
1321
1322 dmfe_init_dm910x(dev);
1323
1324
1325 netif_wake_queue(dev);
1326}
1327
1328
1329
1330
1331
1332
1333static void dmfe_free_rxbuffer(struct dmfe_board_info * db)
1334{
1335 DMFE_DBUG(0, "dmfe_free_rxbuffer()", 0);
1336
1337
1338 while (db->rx_avail_cnt) {
1339 dev_kfree_skb(db->rx_ready_ptr->rx_skb_ptr);
1340 db->rx_ready_ptr = db->rx_ready_ptr->next_rx_desc;
1341 db->rx_avail_cnt--;
1342 }
1343}
1344
1345
1346
1347
1348
1349
1350static void dmfe_reuse_skb(struct dmfe_board_info *db, struct sk_buff * skb)
1351{
1352 struct rx_desc *rxptr = db->rx_insert_ptr;
1353
1354 if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) {
1355 rxptr->rx_skb_ptr = skb;
1356 rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev,
1357 skb->data, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
1358 wmb();
1359 rxptr->rdes0 = cpu_to_le32(0x80000000);
1360 db->rx_avail_cnt++;
1361 db->rx_insert_ptr = rxptr->next_rx_desc;
1362 } else
1363 DMFE_DBUG(0, "SK Buffer reuse method error", db->rx_avail_cnt);
1364}
1365
1366
1367
1368
1369
1370
1371
1372static void dmfe_descriptor_init(struct net_device *dev)
1373{
1374 struct dmfe_board_info *db = netdev_priv(dev);
1375 void __iomem *ioaddr = db->ioaddr;
1376 struct tx_desc *tmp_tx;
1377 struct rx_desc *tmp_rx;
1378 unsigned char *tmp_buf;
1379 dma_addr_t tmp_tx_dma, tmp_rx_dma;
1380 dma_addr_t tmp_buf_dma;
1381 int i;
1382
1383 DMFE_DBUG(0, "dmfe_descriptor_init()", 0);
1384
1385
1386 db->tx_insert_ptr = db->first_tx_desc;
1387 db->tx_remove_ptr = db->first_tx_desc;
1388 dw32(DCR4, db->first_tx_desc_dma);
1389
1390
1391 db->first_rx_desc = (void *)db->first_tx_desc +
1392 sizeof(struct tx_desc) * TX_DESC_CNT;
1393
1394 db->first_rx_desc_dma = db->first_tx_desc_dma +
1395 sizeof(struct tx_desc) * TX_DESC_CNT;
1396 db->rx_insert_ptr = db->first_rx_desc;
1397 db->rx_ready_ptr = db->first_rx_desc;
1398 dw32(DCR3, db->first_rx_desc_dma);
1399
1400
1401 tmp_buf = db->buf_pool_start;
1402 tmp_buf_dma = db->buf_pool_dma_start;
1403 tmp_tx_dma = db->first_tx_desc_dma;
1404 for (tmp_tx = db->first_tx_desc, i = 0; i < TX_DESC_CNT; i++, tmp_tx++) {
1405 tmp_tx->tx_buf_ptr = tmp_buf;
1406 tmp_tx->tdes0 = cpu_to_le32(0);
1407 tmp_tx->tdes1 = cpu_to_le32(0x81000000);
1408 tmp_tx->tdes2 = cpu_to_le32(tmp_buf_dma);
1409 tmp_tx_dma += sizeof(struct tx_desc);
1410 tmp_tx->tdes3 = cpu_to_le32(tmp_tx_dma);
1411 tmp_tx->next_tx_desc = tmp_tx + 1;
1412 tmp_buf = tmp_buf + TX_BUF_ALLOC;
1413 tmp_buf_dma = tmp_buf_dma + TX_BUF_ALLOC;
1414 }
1415 (--tmp_tx)->tdes3 = cpu_to_le32(db->first_tx_desc_dma);
1416 tmp_tx->next_tx_desc = db->first_tx_desc;
1417
1418
1419 tmp_rx_dma=db->first_rx_desc_dma;
1420 for (tmp_rx = db->first_rx_desc, i = 0; i < RX_DESC_CNT; i++, tmp_rx++) {
1421 tmp_rx->rdes0 = cpu_to_le32(0);
1422 tmp_rx->rdes1 = cpu_to_le32(0x01000600);
1423 tmp_rx_dma += sizeof(struct rx_desc);
1424 tmp_rx->rdes3 = cpu_to_le32(tmp_rx_dma);
1425 tmp_rx->next_rx_desc = tmp_rx + 1;
1426 }
1427 (--tmp_rx)->rdes3 = cpu_to_le32(db->first_rx_desc_dma);
1428 tmp_rx->next_rx_desc = db->first_rx_desc;
1429
1430
1431 allocate_rx_buffer(dev);
1432}
1433
1434
1435
1436
1437
1438
1439
1440static void update_cr6(u32 cr6_data, void __iomem *ioaddr)
1441{
1442 u32 cr6_tmp;
1443
1444 cr6_tmp = cr6_data & ~0x2002;
1445 dw32(DCR6, cr6_tmp);
1446 udelay(5);
1447 dw32(DCR6, cr6_data);
1448 udelay(5);
1449}
1450
1451
1452
1453
1454
1455
1456
1457static void dm9132_id_table(struct net_device *dev)
1458{
1459 struct dmfe_board_info *db = netdev_priv(dev);
1460 void __iomem *ioaddr = db->ioaddr + 0xc0;
1461 u16 *addrptr = (u16 *)dev->dev_addr;
1462 struct netdev_hw_addr *ha;
1463 u16 i, hash_table[4];
1464
1465
1466 for (i = 0; i < 3; i++) {
1467 dw16(0, addrptr[i]);
1468 ioaddr += 4;
1469 }
1470
1471
1472 memset(hash_table, 0, sizeof(hash_table));
1473
1474
1475 hash_table[3] = 0x8000;
1476
1477
1478 netdev_for_each_mc_addr(ha, dev) {
1479 u32 hash_val = cal_CRC((char *)ha->addr, 6, 0) & 0x3f;
1480
1481 hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16);
1482 }
1483
1484
1485 for (i = 0; i < 4; i++, ioaddr += 4)
1486 dw16(0, hash_table[i]);
1487}
1488
1489
1490
1491
1492
1493
1494
1495static void send_filter_frame(struct net_device *dev)
1496{
1497 struct dmfe_board_info *db = netdev_priv(dev);
1498 struct netdev_hw_addr *ha;
1499 struct tx_desc *txptr;
1500 u16 * addrptr;
1501 u32 * suptr;
1502 int i;
1503
1504 DMFE_DBUG(0, "send_filter_frame()", 0);
1505
1506 txptr = db->tx_insert_ptr;
1507 suptr = (u32 *) txptr->tx_buf_ptr;
1508
1509
1510 addrptr = (u16 *) dev->dev_addr;
1511 *suptr++ = addrptr[0];
1512 *suptr++ = addrptr[1];
1513 *suptr++ = addrptr[2];
1514
1515
1516 *suptr++ = 0xffff;
1517 *suptr++ = 0xffff;
1518 *suptr++ = 0xffff;
1519
1520
1521 netdev_for_each_mc_addr(ha, dev) {
1522 addrptr = (u16 *) ha->addr;
1523 *suptr++ = addrptr[0];
1524 *suptr++ = addrptr[1];
1525 *suptr++ = addrptr[2];
1526 }
1527
1528 for (i = netdev_mc_count(dev); i < 14; i++) {
1529 *suptr++ = 0xffff;
1530 *suptr++ = 0xffff;
1531 *suptr++ = 0xffff;
1532 }
1533
1534
1535 db->tx_insert_ptr = txptr->next_tx_desc;
1536 txptr->tdes1 = cpu_to_le32(0x890000c0);
1537
1538
1539 if (!db->tx_packet_cnt) {
1540 void __iomem *ioaddr = db->ioaddr;
1541
1542
1543 db->tx_packet_cnt++;
1544 txptr->tdes0 = cpu_to_le32(0x80000000);
1545 update_cr6(db->cr6_data | 0x2000, ioaddr);
1546 dw32(DCR1, 0x1);
1547 update_cr6(db->cr6_data, ioaddr);
1548 dev->trans_start = jiffies;
1549 } else
1550 db->tx_queue_cnt++;
1551}
1552
1553
1554
1555
1556
1557
1558
1559static void allocate_rx_buffer(struct net_device *dev)
1560{
1561 struct dmfe_board_info *db = netdev_priv(dev);
1562 struct rx_desc *rxptr;
1563 struct sk_buff *skb;
1564
1565 rxptr = db->rx_insert_ptr;
1566
1567 while(db->rx_avail_cnt < RX_DESC_CNT) {
1568 if ( ( skb = netdev_alloc_skb(dev, RX_ALLOC_SIZE) ) == NULL )
1569 break;
1570 rxptr->rx_skb_ptr = skb;
1571 rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->data,
1572 RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
1573 wmb();
1574 rxptr->rdes0 = cpu_to_le32(0x80000000);
1575 rxptr = rxptr->next_rx_desc;
1576 db->rx_avail_cnt++;
1577 }
1578
1579 db->rx_insert_ptr = rxptr;
1580}
1581
1582static void srom_clk_write(void __iomem *ioaddr, u32 data)
1583{
1584 static const u32 cmd[] = {
1585 CR9_SROM_READ | CR9_SRCS,
1586 CR9_SROM_READ | CR9_SRCS | CR9_SRCLK,
1587 CR9_SROM_READ | CR9_SRCS
1588 };
1589 int i;
1590
1591 for (i = 0; i < ARRAY_SIZE(cmd); i++) {
1592 dw32(DCR9, data | cmd[i]);
1593 udelay(5);
1594 }
1595}
1596
1597
1598
1599
1600static u16 read_srom_word(void __iomem *ioaddr, int offset)
1601{
1602 u16 srom_data;
1603 int i;
1604
1605 dw32(DCR9, CR9_SROM_READ);
1606 udelay(5);
1607 dw32(DCR9, CR9_SROM_READ | CR9_SRCS);
1608 udelay(5);
1609
1610
1611 srom_clk_write(ioaddr, SROM_DATA_1);
1612 srom_clk_write(ioaddr, SROM_DATA_1);
1613 srom_clk_write(ioaddr, SROM_DATA_0);
1614
1615
1616 for (i = 5; i >= 0; i--) {
1617 srom_data = (offset & (1 << i)) ? SROM_DATA_1 : SROM_DATA_0;
1618 srom_clk_write(ioaddr, srom_data);
1619 }
1620
1621 dw32(DCR9, CR9_SROM_READ | CR9_SRCS);
1622 udelay(5);
1623
1624 for (i = 16; i > 0; i--) {
1625 dw32(DCR9, CR9_SROM_READ | CR9_SRCS | CR9_SRCLK);
1626 udelay(5);
1627 srom_data = (srom_data << 1) |
1628 ((dr32(DCR9) & CR9_CRDOUT) ? 1 : 0);
1629 dw32(DCR9, CR9_SROM_READ | CR9_SRCS);
1630 udelay(5);
1631 }
1632
1633 dw32(DCR9, CR9_SROM_READ);
1634 udelay(5);
1635 return srom_data;
1636}
1637
1638
1639
1640
1641
1642
1643static u8 dmfe_sense_speed(struct dmfe_board_info *db)
1644{
1645 void __iomem *ioaddr = db->ioaddr;
1646 u8 ErrFlag = 0;
1647 u16 phy_mode;
1648
1649
1650 update_cr6(db->cr6_data & ~0x40000, ioaddr);
1651
1652 phy_mode = dmfe_phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
1653 phy_mode = dmfe_phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
1654
1655 if ( (phy_mode & 0x24) == 0x24 ) {
1656 if (db->chip_id == PCI_DM9132_ID)
1657 phy_mode = dmfe_phy_read(db->ioaddr,
1658 db->phy_addr, 7, db->chip_id) & 0xf000;
1659 else
1660 phy_mode = dmfe_phy_read(db->ioaddr,
1661 db->phy_addr, 17, db->chip_id) & 0xf000;
1662 switch (phy_mode) {
1663 case 0x1000: db->op_mode = DMFE_10MHF; break;
1664 case 0x2000: db->op_mode = DMFE_10MFD; break;
1665 case 0x4000: db->op_mode = DMFE_100MHF; break;
1666 case 0x8000: db->op_mode = DMFE_100MFD; break;
1667 default: db->op_mode = DMFE_10MHF;
1668 ErrFlag = 1;
1669 break;
1670 }
1671 } else {
1672 db->op_mode = DMFE_10MHF;
1673 DMFE_DBUG(0, "Link Failed :", phy_mode);
1674 ErrFlag = 1;
1675 }
1676
1677 return ErrFlag;
1678}
1679
1680
1681
1682
1683
1684
1685
1686
1687static void dmfe_set_phyxcer(struct dmfe_board_info *db)
1688{
1689 void __iomem *ioaddr = db->ioaddr;
1690 u16 phy_reg;
1691
1692
1693 db->cr6_data &= ~0x40000;
1694 update_cr6(db->cr6_data, ioaddr);
1695
1696
1697 if (db->chip_id == PCI_DM9009_ID) {
1698 phy_reg = dmfe_phy_read(db->ioaddr,
1699 db->phy_addr, 18, db->chip_id) & ~0x1000;
1700
1701 dmfe_phy_write(db->ioaddr,
1702 db->phy_addr, 18, phy_reg, db->chip_id);
1703 }
1704
1705
1706 phy_reg = dmfe_phy_read(db->ioaddr, db->phy_addr, 4, db->chip_id) & ~0x01e0;
1707
1708 if (db->media_mode & DMFE_AUTO) {
1709
1710 phy_reg |= db->PHY_reg4;
1711 } else {
1712
1713 switch(db->media_mode) {
1714 case DMFE_10MHF: phy_reg |= 0x20; break;
1715 case DMFE_10MFD: phy_reg |= 0x40; break;
1716 case DMFE_100MHF: phy_reg |= 0x80; break;
1717 case DMFE_100MFD: phy_reg |= 0x100; break;
1718 }
1719 if (db->chip_id == PCI_DM9009_ID) phy_reg &= 0x61;
1720 }
1721
1722
1723 if ( !(phy_reg & 0x01e0)) {
1724 phy_reg|=db->PHY_reg4;
1725 db->media_mode|=DMFE_AUTO;
1726 }
1727 dmfe_phy_write(db->ioaddr, db->phy_addr, 4, phy_reg, db->chip_id);
1728
1729
1730 if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) )
1731 dmfe_phy_write(db->ioaddr, db->phy_addr, 0, 0x1800, db->chip_id);
1732 if ( !db->chip_type )
1733 dmfe_phy_write(db->ioaddr, db->phy_addr, 0, 0x1200, db->chip_id);
1734}
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744static void dmfe_process_mode(struct dmfe_board_info *db)
1745{
1746 u16 phy_reg;
1747
1748
1749 if (db->op_mode & 0x4)
1750 db->cr6_data |= CR6_FDM;
1751 else
1752 db->cr6_data &= ~CR6_FDM;
1753
1754
1755 if (db->op_mode & 0x10)
1756 db->cr6_data |= 0x40000;
1757 else
1758 db->cr6_data &= ~0x40000;
1759
1760 update_cr6(db->cr6_data, db->ioaddr);
1761
1762
1763 if ( !(db->media_mode & 0x18)) {
1764
1765 phy_reg = dmfe_phy_read(db->ioaddr, db->phy_addr, 6, db->chip_id);
1766 if ( !(phy_reg & 0x1) ) {
1767
1768 phy_reg = 0x0;
1769 switch(db->op_mode) {
1770 case DMFE_10MHF: phy_reg = 0x0; break;
1771 case DMFE_10MFD: phy_reg = 0x100; break;
1772 case DMFE_100MHF: phy_reg = 0x2000; break;
1773 case DMFE_100MFD: phy_reg = 0x2100; break;
1774 }
1775 dmfe_phy_write(db->ioaddr,
1776 db->phy_addr, 0, phy_reg, db->chip_id);
1777 if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) )
1778 mdelay(20);
1779 dmfe_phy_write(db->ioaddr,
1780 db->phy_addr, 0, phy_reg, db->chip_id);
1781 }
1782 }
1783}
1784
1785
1786
1787
1788
1789
1790static void dmfe_phy_write(void __iomem *ioaddr, u8 phy_addr, u8 offset,
1791 u16 phy_data, u32 chip_id)
1792{
1793 u16 i;
1794
1795 if (chip_id == PCI_DM9132_ID) {
1796 dw16(0x80 + offset * 4, phy_data);
1797 } else {
1798
1799
1800
1801 for (i = 0; i < 35; i++)
1802 dmfe_phy_write_1bit(ioaddr, PHY_DATA_1);
1803
1804
1805 dmfe_phy_write_1bit(ioaddr, PHY_DATA_0);
1806 dmfe_phy_write_1bit(ioaddr, PHY_DATA_1);
1807
1808
1809 dmfe_phy_write_1bit(ioaddr, PHY_DATA_0);
1810 dmfe_phy_write_1bit(ioaddr, PHY_DATA_1);
1811
1812
1813 for (i = 0x10; i > 0; i = i >> 1)
1814 dmfe_phy_write_1bit(ioaddr,
1815 phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
1816
1817
1818 for (i = 0x10; i > 0; i = i >> 1)
1819 dmfe_phy_write_1bit(ioaddr,
1820 offset & i ? PHY_DATA_1 : PHY_DATA_0);
1821
1822
1823 dmfe_phy_write_1bit(ioaddr, PHY_DATA_1);
1824 dmfe_phy_write_1bit(ioaddr, PHY_DATA_0);
1825
1826
1827 for ( i = 0x8000; i > 0; i >>= 1)
1828 dmfe_phy_write_1bit(ioaddr,
1829 phy_data & i ? PHY_DATA_1 : PHY_DATA_0);
1830 }
1831}
1832
1833
1834
1835
1836
1837
1838static u16 dmfe_phy_read(void __iomem *ioaddr, u8 phy_addr, u8 offset, u32 chip_id)
1839{
1840 int i;
1841 u16 phy_data;
1842
1843 if (chip_id == PCI_DM9132_ID) {
1844
1845 phy_data = dr16(0x80 + offset * 4);
1846 } else {
1847
1848
1849
1850 for (i = 0; i < 35; i++)
1851 dmfe_phy_write_1bit(ioaddr, PHY_DATA_1);
1852
1853
1854 dmfe_phy_write_1bit(ioaddr, PHY_DATA_0);
1855 dmfe_phy_write_1bit(ioaddr, PHY_DATA_1);
1856
1857
1858 dmfe_phy_write_1bit(ioaddr, PHY_DATA_1);
1859 dmfe_phy_write_1bit(ioaddr, PHY_DATA_0);
1860
1861
1862 for (i = 0x10; i > 0; i = i >> 1)
1863 dmfe_phy_write_1bit(ioaddr,
1864 phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
1865
1866
1867 for (i = 0x10; i > 0; i = i >> 1)
1868 dmfe_phy_write_1bit(ioaddr,
1869 offset & i ? PHY_DATA_1 : PHY_DATA_0);
1870
1871
1872 dmfe_phy_read_1bit(ioaddr);
1873
1874
1875 for (phy_data = 0, i = 0; i < 16; i++) {
1876 phy_data <<= 1;
1877 phy_data |= dmfe_phy_read_1bit(ioaddr);
1878 }
1879 }
1880
1881 return phy_data;
1882}
1883
1884
1885
1886
1887
1888
1889static void dmfe_phy_write_1bit(void __iomem *ioaddr, u32 phy_data)
1890{
1891 dw32(DCR9, phy_data);
1892 udelay(1);
1893 dw32(DCR9, phy_data | MDCLKH);
1894 udelay(1);
1895 dw32(DCR9, phy_data);
1896 udelay(1);
1897}
1898
1899
1900
1901
1902
1903
1904static u16 dmfe_phy_read_1bit(void __iomem *ioaddr)
1905{
1906 u16 phy_data;
1907
1908 dw32(DCR9, 0x50000);
1909 udelay(1);
1910 phy_data = (dr32(DCR9) >> 19) & 0x1;
1911 dw32(DCR9, 0x40000);
1912 udelay(1);
1913
1914 return phy_data;
1915}
1916
1917
1918
1919
1920
1921
1922static void dmfe_parse_srom(struct dmfe_board_info * db)
1923{
1924 char * srom = db->srom;
1925 int dmfe_mode, tmp_reg;
1926
1927 DMFE_DBUG(0, "dmfe_parse_srom() ", 0);
1928
1929
1930 db->cr15_data = CR15_DEFAULT;
1931
1932
1933 if ( ( (int) srom[18] & 0xff) == SROM_V41_CODE) {
1934
1935
1936 db->NIC_capability = le16_to_cpup((__le16 *) (srom + 34));
1937 db->PHY_reg4 = 0;
1938 for (tmp_reg = 1; tmp_reg < 0x10; tmp_reg <<= 1) {
1939 switch( db->NIC_capability & tmp_reg ) {
1940 case 0x1: db->PHY_reg4 |= 0x0020; break;
1941 case 0x2: db->PHY_reg4 |= 0x0040; break;
1942 case 0x4: db->PHY_reg4 |= 0x0080; break;
1943 case 0x8: db->PHY_reg4 |= 0x0100; break;
1944 }
1945 }
1946
1947
1948 dmfe_mode = (le32_to_cpup((__le32 *) (srom + 34)) &
1949 le32_to_cpup((__le32 *) (srom + 36)));
1950 switch(dmfe_mode) {
1951 case 0x4: dmfe_media_mode = DMFE_100MHF; break;
1952 case 0x2: dmfe_media_mode = DMFE_10MFD; break;
1953 case 0x8: dmfe_media_mode = DMFE_100MFD; break;
1954 case 0x100:
1955 case 0x200: dmfe_media_mode = DMFE_1M_HPNA; break;
1956 }
1957
1958
1959
1960 if ( (SF_mode & 0x1) || (srom[43] & 0x80) )
1961 db->cr15_data |= 0x40;
1962
1963
1964 if ( (SF_mode & 0x2) || (srom[40] & 0x1) )
1965 db->cr15_data |= 0x400;
1966
1967
1968 if ( (SF_mode & 0x4) || (srom[40] & 0xe) )
1969 db->cr15_data |= 0x9800;
1970 }
1971
1972
1973 db->HPNA_command = 1;
1974
1975
1976 if (HPNA_rx_cmd == 0)
1977 db->HPNA_command |= 0x8000;
1978
1979
1980 if (HPNA_tx_cmd == 1)
1981 switch(HPNA_mode) {
1982 case 0: db->HPNA_command |= 0x0904; break;
1983 case 1: db->HPNA_command |= 0x0a00; break;
1984 case 2: db->HPNA_command |= 0x0506; break;
1985 case 3: db->HPNA_command |= 0x0602; break;
1986 }
1987 else
1988 switch(HPNA_mode) {
1989 case 0: db->HPNA_command |= 0x0004; break;
1990 case 1: db->HPNA_command |= 0x0000; break;
1991 case 2: db->HPNA_command |= 0x0006; break;
1992 case 3: db->HPNA_command |= 0x0002; break;
1993 }
1994
1995
1996 db->HPNA_present = 0;
1997 update_cr6(db->cr6_data | 0x40000, db->ioaddr);
1998 tmp_reg = dmfe_phy_read(db->ioaddr, db->phy_addr, 3, db->chip_id);
1999 if ( ( tmp_reg & 0xfff0 ) == 0xb900 ) {
2000
2001 db->HPNA_timer = 8;
2002 if ( dmfe_phy_read(db->ioaddr, db->phy_addr, 31, db->chip_id) == 0x4404) {
2003
2004 db->HPNA_present = 1;
2005 dmfe_program_DM9801(db, tmp_reg);
2006 } else {
2007
2008 db->HPNA_present = 2;
2009 dmfe_program_DM9802(db);
2010 }
2011 }
2012
2013}
2014
2015
2016
2017
2018
2019
2020static void dmfe_program_DM9801(struct dmfe_board_info * db, int HPNA_rev)
2021{
2022 uint reg17, reg25;
2023
2024 if ( !HPNA_NoiseFloor ) HPNA_NoiseFloor = DM9801_NOISE_FLOOR;
2025 switch(HPNA_rev) {
2026 case 0xb900:
2027 db->HPNA_command |= 0x1000;
2028 reg25 = dmfe_phy_read(db->ioaddr, db->phy_addr, 24, db->chip_id);
2029 reg25 = ( (reg25 + HPNA_NoiseFloor) & 0xff) | 0xf000;
2030 reg17 = dmfe_phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
2031 break;
2032 case 0xb901:
2033 reg25 = dmfe_phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
2034 reg25 = (reg25 & 0xff00) + HPNA_NoiseFloor;
2035 reg17 = dmfe_phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
2036 reg17 = (reg17 & 0xfff0) + HPNA_NoiseFloor + 3;
2037 break;
2038 case 0xb902:
2039 case 0xb903:
2040 default:
2041 db->HPNA_command |= 0x1000;
2042 reg25 = dmfe_phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
2043 reg25 = (reg25 & 0xff00) + HPNA_NoiseFloor - 5;
2044 reg17 = dmfe_phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
2045 reg17 = (reg17 & 0xfff0) + HPNA_NoiseFloor;
2046 break;
2047 }
2048 dmfe_phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id);
2049 dmfe_phy_write(db->ioaddr, db->phy_addr, 17, reg17, db->chip_id);
2050 dmfe_phy_write(db->ioaddr, db->phy_addr, 25, reg25, db->chip_id);
2051}
2052
2053
2054
2055
2056
2057
2058static void dmfe_program_DM9802(struct dmfe_board_info * db)
2059{
2060 uint phy_reg;
2061
2062 if ( !HPNA_NoiseFloor ) HPNA_NoiseFloor = DM9802_NOISE_FLOOR;
2063 dmfe_phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id);
2064 phy_reg = dmfe_phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
2065 phy_reg = ( phy_reg & 0xff00) + HPNA_NoiseFloor;
2066 dmfe_phy_write(db->ioaddr, db->phy_addr, 25, phy_reg, db->chip_id);
2067}
2068
2069
2070
2071
2072
2073
2074
2075static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * db)
2076{
2077 uint phy_reg;
2078
2079
2080 phy_reg = dmfe_phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id) & 0x60;
2081 switch(phy_reg) {
2082 case 0x00: phy_reg = 0x0a00;break;
2083 case 0x20: phy_reg = 0x0900;break;
2084 case 0x40: phy_reg = 0x0600;break;
2085 case 0x60: phy_reg = 0x0500;break;
2086 }
2087
2088
2089 if ( phy_reg != (db->HPNA_command & 0x0f00) ) {
2090 dmfe_phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command,
2091 db->chip_id);
2092 db->HPNA_timer=8;
2093 } else
2094 db->HPNA_timer=600;
2095}
2096
2097
2098
2099static const struct pci_device_id dmfe_pci_tbl[] = {
2100 { 0x1282, 0x9132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9132_ID },
2101 { 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9102_ID },
2102 { 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9100_ID },
2103 { 0x1282, 0x9009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9009_ID },
2104 { 0, }
2105};
2106MODULE_DEVICE_TABLE(pci, dmfe_pci_tbl);
2107
2108
2109#ifdef CONFIG_PM
2110static int dmfe_suspend(struct pci_dev *pci_dev, pm_message_t state)
2111{
2112 struct net_device *dev = pci_get_drvdata(pci_dev);
2113 struct dmfe_board_info *db = netdev_priv(dev);
2114 void __iomem *ioaddr = db->ioaddr;
2115 u32 tmp;
2116
2117
2118 netif_device_detach(dev);
2119
2120
2121 db->cr6_data &= ~(CR6_RXSC | CR6_TXSC);
2122 update_cr6(db->cr6_data, ioaddr);
2123
2124
2125 dw32(DCR7, 0);
2126 dw32(DCR5, dr32(DCR5));
2127
2128
2129 dmfe_free_rxbuffer(db);
2130
2131
2132 pci_read_config_dword(pci_dev, 0x40, &tmp);
2133 tmp &= ~(DMFE_WOL_LINKCHANGE|DMFE_WOL_MAGICPACKET);
2134
2135 if (db->wol_mode & WAKE_PHY)
2136 tmp |= DMFE_WOL_LINKCHANGE;
2137 if (db->wol_mode & WAKE_MAGIC)
2138 tmp |= DMFE_WOL_MAGICPACKET;
2139
2140 pci_write_config_dword(pci_dev, 0x40, tmp);
2141
2142 pci_enable_wake(pci_dev, PCI_D3hot, 1);
2143 pci_enable_wake(pci_dev, PCI_D3cold, 1);
2144
2145
2146 pci_save_state(pci_dev);
2147 pci_set_power_state(pci_dev, pci_choose_state (pci_dev, state));
2148
2149 return 0;
2150}
2151
2152static int dmfe_resume(struct pci_dev *pci_dev)
2153{
2154 struct net_device *dev = pci_get_drvdata(pci_dev);
2155 u32 tmp;
2156
2157 pci_set_power_state(pci_dev, PCI_D0);
2158 pci_restore_state(pci_dev);
2159
2160
2161 dmfe_init_dm910x(dev);
2162
2163
2164 pci_read_config_dword(pci_dev, 0x40, &tmp);
2165
2166 tmp &= ~(DMFE_WOL_LINKCHANGE | DMFE_WOL_MAGICPACKET);
2167 pci_write_config_dword(pci_dev, 0x40, tmp);
2168
2169 pci_enable_wake(pci_dev, PCI_D3hot, 0);
2170 pci_enable_wake(pci_dev, PCI_D3cold, 0);
2171
2172
2173 netif_device_attach(dev);
2174
2175 return 0;
2176}
2177#else
2178#define dmfe_suspend NULL
2179#define dmfe_resume NULL
2180#endif
2181
2182static struct pci_driver dmfe_driver = {
2183 .name = "dmfe",
2184 .id_table = dmfe_pci_tbl,
2185 .probe = dmfe_init_one,
2186 .remove = dmfe_remove_one,
2187 .suspend = dmfe_suspend,
2188 .resume = dmfe_resume
2189};
2190
2191MODULE_AUTHOR("Sten Wang, sten_wang@davicom.com.tw");
2192MODULE_DESCRIPTION("Davicom DM910X fast ethernet driver");
2193MODULE_LICENSE("GPL");
2194MODULE_VERSION(DRV_VERSION);
2195
2196module_param(debug, int, 0);
2197module_param(mode, byte, 0);
2198module_param(cr6set, int, 0);
2199module_param(chkmode, byte, 0);
2200module_param(HPNA_mode, byte, 0);
2201module_param(HPNA_rx_cmd, byte, 0);
2202module_param(HPNA_tx_cmd, byte, 0);
2203module_param(HPNA_NoiseFloor, byte, 0);
2204module_param(SF_mode, byte, 0);
2205MODULE_PARM_DESC(debug, "Davicom DM9xxx enable debugging (0-1)");
2206MODULE_PARM_DESC(mode, "Davicom DM9xxx: "
2207 "Bit 0: 10/100Mbps, bit 2: duplex, bit 8: HomePNA");
2208
2209MODULE_PARM_DESC(SF_mode, "Davicom DM9xxx special function "
2210 "(bit 0: VLAN, bit 1 Flow Control, bit 2: TX pause packet)");
2211
2212
2213
2214
2215
2216
2217static int __init dmfe_init_module(void)
2218{
2219 int rc;
2220
2221 pr_info("%s\n", version);
2222 printed_version = 1;
2223
2224 DMFE_DBUG(0, "init_module() ", debug);
2225
2226 if (debug)
2227 dmfe_debug = debug;
2228 if (cr6set)
2229 dmfe_cr6_user_set = cr6set;
2230
2231 switch(mode) {
2232 case DMFE_10MHF:
2233 case DMFE_100MHF:
2234 case DMFE_10MFD:
2235 case DMFE_100MFD:
2236 case DMFE_1M_HPNA:
2237 dmfe_media_mode = mode;
2238 break;
2239 default:dmfe_media_mode = DMFE_AUTO;
2240 break;
2241 }
2242
2243 if (HPNA_mode > 4)
2244 HPNA_mode = 0;
2245 if (HPNA_rx_cmd > 1)
2246 HPNA_rx_cmd = 0;
2247 if (HPNA_tx_cmd > 1)
2248 HPNA_tx_cmd = 0;
2249 if (HPNA_NoiseFloor > 15)
2250 HPNA_NoiseFloor = 0;
2251
2252 rc = pci_register_driver(&dmfe_driver);
2253 if (rc < 0)
2254 return rc;
2255
2256 return 0;
2257}
2258
2259
2260
2261
2262
2263
2264
2265
2266static void __exit dmfe_cleanup_module(void)
2267{
2268 DMFE_DBUG(0, "dmfe_clean_module() ", debug);
2269 pci_unregister_driver(&dmfe_driver);
2270}
2271
2272module_init(dmfe_init_module);
2273module_exit(dmfe_cleanup_module);
2274