1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
65
66#define DRV_NAME "dmfe"
67#define DRV_VERSION "1.36.4"
68#define DRV_RELDATE "2002-01-17"
69
70#include <linux/module.h>
71#include <linux/kernel.h>
72#include <linux/string.h>
73#include <linux/timer.h>
74#include <linux/ptrace.h>
75#include <linux/errno.h>
76#include <linux/ioport.h>
77#include <linux/interrupt.h>
78#include <linux/pci.h>
79#include <linux/dma-mapping.h>
80#include <linux/init.h>
81#include <linux/netdevice.h>
82#include <linux/etherdevice.h>
83#include <linux/ethtool.h>
84#include <linux/skbuff.h>
85#include <linux/delay.h>
86#include <linux/spinlock.h>
87#include <linux/crc32.h>
88#include <linux/bitops.h>
89
90#include <asm/processor.h>
91#include <asm/io.h>
92#include <asm/dma.h>
93#include <asm/uaccess.h>
94#include <asm/irq.h>
95
96#ifdef CONFIG_TULIP_DM910X
97#include <linux/of.h>
98#endif
99
100
101
102#define PCI_DM9132_ID 0x91321282
103#define PCI_DM9102_ID 0x91021282
104#define PCI_DM9100_ID 0x91001282
105#define PCI_DM9009_ID 0x90091282
106
107#define DM9102_IO_SIZE 0x80
108#define DM9102A_IO_SIZE 0x100
109#define TX_MAX_SEND_CNT 0x1
110#define TX_DESC_CNT 0x10
111#define RX_DESC_CNT 0x20
112#define TX_FREE_DESC_CNT (TX_DESC_CNT - 2)
113#define TX_WAKE_DESC_CNT (TX_DESC_CNT - 3)
114#define DESC_ALL_CNT (TX_DESC_CNT + RX_DESC_CNT)
115#define TX_BUF_ALLOC 0x600
116#define RX_ALLOC_SIZE 0x620
117#define DM910X_RESET 1
118#define CR0_DEFAULT 0x00E00000
119#define CR6_DEFAULT 0x00080000
120#define CR7_DEFAULT 0x180c1
121#define CR15_DEFAULT 0x06
122#define TDES0_ERR_MASK 0x4302
123#define MAX_PACKET_SIZE 1514
124#define DMFE_MAX_MULTICAST 14
125#define RX_COPY_SIZE 100
126#define MAX_CHECK_PACKET 0x8000
127#define DM9801_NOISE_FLOOR 8
128#define DM9802_NOISE_FLOOR 5
129
130#define DMFE_WOL_LINKCHANGE 0x20000000
131#define DMFE_WOL_SAMPLEPACKET 0x10000000
132#define DMFE_WOL_MAGICPACKET 0x08000000
133
134
135#define DMFE_10MHF 0
136#define DMFE_100MHF 1
137#define DMFE_10MFD 4
138#define DMFE_100MFD 5
139#define DMFE_AUTO 8
140#define DMFE_1M_HPNA 0x10
141
142#define DMFE_TXTH_72 0x400000
143#define DMFE_TXTH_96 0x404000
144#define DMFE_TXTH_128 0x0000
145#define DMFE_TXTH_256 0x4000
146#define DMFE_TXTH_512 0x8000
147#define DMFE_TXTH_1K 0xC000
148
149#define DMFE_TIMER_WUT (jiffies + HZ * 1)
150#define DMFE_TX_TIMEOUT ((3*HZ)/2)
151#define DMFE_TX_KICK (HZ/2)
152
153#define dw32(reg, val) iowrite32(val, ioaddr + (reg))
154#define dw16(reg, val) iowrite16(val, ioaddr + (reg))
155#define dr32(reg) ioread32(ioaddr + (reg))
156#define dr16(reg) ioread16(ioaddr + (reg))
157#define dr8(reg) ioread8(ioaddr + (reg))
158
159#define DMFE_DBUG(dbug_now, msg, value) \
160 do { \
161 if (dmfe_debug || (dbug_now)) \
162 pr_err("%s %lx\n", \
163 (msg), (long) (value)); \
164 } while (0)
165
166#define SHOW_MEDIA_TYPE(mode) \
167 pr_info("Change Speed to %sMhz %s duplex\n" , \
168 (mode & 1) ? "100":"10", \
169 (mode & 4) ? "full":"half");
170
171
172
173#define CR9_SROM_READ 0x4800
174#define CR9_SRCS 0x1
175#define CR9_SRCLK 0x2
176#define CR9_CRDOUT 0x8
177#define SROM_DATA_0 0x0
178#define SROM_DATA_1 0x4
179#define PHY_DATA_1 0x20000
180#define PHY_DATA_0 0x00000
181#define MDCLKH 0x10000
182
183#define PHY_POWER_DOWN 0x800
184
185#define SROM_V41_CODE 0x14
186
187#define __CHK_IO_SIZE(pci_id, dev_rev) \
188 (( ((pci_id)==PCI_DM9132_ID) || ((dev_rev) >= 0x30) ) ? \
189 DM9102A_IO_SIZE: DM9102_IO_SIZE)
190
191#define CHK_IO_SIZE(pci_dev) \
192 (__CHK_IO_SIZE(((pci_dev)->device << 16) | (pci_dev)->vendor, \
193 (pci_dev)->revision))
194
195
196#define DEVICE net_device
197
198
199struct tx_desc {
200 __le32 tdes0, tdes1, tdes2, tdes3;
201 char *tx_buf_ptr;
202 struct tx_desc *next_tx_desc;
203} __attribute__(( aligned(32) ));
204
205struct rx_desc {
206 __le32 rdes0, rdes1, rdes2, rdes3;
207 struct sk_buff *rx_skb_ptr;
208 struct rx_desc *next_rx_desc;
209} __attribute__(( aligned(32) ));
210
211struct dmfe_board_info {
212 u32 chip_id;
213 u8 chip_revision;
214 struct net_device *next_dev;
215 struct pci_dev *pdev;
216 spinlock_t lock;
217
218 void __iomem *ioaddr;
219 u32 cr0_data;
220 u32 cr5_data;
221 u32 cr6_data;
222 u32 cr7_data;
223 u32 cr15_data;
224
225
226 dma_addr_t buf_pool_dma_ptr;
227 dma_addr_t buf_pool_dma_start;
228 dma_addr_t desc_pool_dma_ptr;
229 dma_addr_t first_tx_desc_dma;
230 dma_addr_t first_rx_desc_dma;
231
232
233 unsigned char *buf_pool_ptr;
234 unsigned char *buf_pool_start;
235 unsigned char *desc_pool_ptr;
236 struct tx_desc *first_tx_desc;
237 struct tx_desc *tx_insert_ptr;
238 struct tx_desc *tx_remove_ptr;
239 struct rx_desc *first_rx_desc;
240 struct rx_desc *rx_insert_ptr;
241 struct rx_desc *rx_ready_ptr;
242 unsigned long tx_packet_cnt;
243 unsigned long tx_queue_cnt;
244 unsigned long rx_avail_cnt;
245 unsigned long interval_rx_cnt;
246
247 u16 HPNA_command;
248 u16 HPNA_timer;
249 u16 dbug_cnt;
250 u16 NIC_capability;
251 u16 PHY_reg4;
252
253 u8 HPNA_present;
254 u8 chip_type;
255 u8 media_mode;
256 u8 op_mode;
257 u8 phy_addr;
258 u8 wait_reset;
259 u8 dm910x_chk_mode;
260 u8 first_in_callback;
261 u8 wol_mode;
262 struct timer_list timer;
263
264
265 unsigned long tx_fifo_underrun;
266 unsigned long tx_loss_carrier;
267 unsigned long tx_no_carrier;
268 unsigned long tx_late_collision;
269 unsigned long tx_excessive_collision;
270 unsigned long tx_jabber_timeout;
271 unsigned long reset_count;
272 unsigned long reset_cr8;
273 unsigned long reset_fatal;
274 unsigned long reset_TXtimeout;
275
276
277 unsigned char srom[128];
278};
279
280enum dmfe_offsets {
281 DCR0 = 0x00, DCR1 = 0x08, DCR2 = 0x10, DCR3 = 0x18, DCR4 = 0x20,
282 DCR5 = 0x28, DCR6 = 0x30, DCR7 = 0x38, DCR8 = 0x40, DCR9 = 0x48,
283 DCR10 = 0x50, DCR11 = 0x58, DCR12 = 0x60, DCR13 = 0x68, DCR14 = 0x70,
284 DCR15 = 0x78
285};
286
287enum dmfe_CR6_bits {
288 CR6_RXSC = 0x2, CR6_PBF = 0x8, CR6_PM = 0x40, CR6_PAM = 0x80,
289 CR6_FDM = 0x200, CR6_TXSC = 0x2000, CR6_STI = 0x100000,
290 CR6_SFT = 0x200000, CR6_RXA = 0x40000000, CR6_NO_PURGE = 0x20000000
291};
292
293
294static int printed_version;
295static const char version[] =
296 "Davicom DM9xxx net driver, version " DRV_VERSION " (" DRV_RELDATE ")";
297
298static int dmfe_debug;
299static unsigned char dmfe_media_mode = DMFE_AUTO;
300static u32 dmfe_cr6_user_set;
301
302
303static int debug;
304static u32 cr6set;
305static unsigned char mode = 8;
306static u8 chkmode = 1;
307static u8 HPNA_mode;
308static u8 HPNA_rx_cmd;
309static u8 HPNA_tx_cmd;
310static u8 HPNA_NoiseFloor;
311static u8 SF_mode;
312
313
314
315
316static int dmfe_open(struct DEVICE *);
317static netdev_tx_t dmfe_start_xmit(struct sk_buff *, struct DEVICE *);
318static int dmfe_stop(struct DEVICE *);
319static void dmfe_set_filter_mode(struct DEVICE *);
320static const struct ethtool_ops netdev_ethtool_ops;
321static u16 read_srom_word(void __iomem *, int);
322static irqreturn_t dmfe_interrupt(int , void *);
323#ifdef CONFIG_NET_POLL_CONTROLLER
324static void poll_dmfe (struct net_device *dev);
325#endif
326static void dmfe_descriptor_init(struct net_device *);
327static void allocate_rx_buffer(struct net_device *);
328static void update_cr6(u32, void __iomem *);
329static void send_filter_frame(struct DEVICE *);
330static void dm9132_id_table(struct DEVICE *);
331static u16 phy_read(void __iomem *, u8, u8, u32);
332static void phy_write(void __iomem *, u8, u8, u16, u32);
333static void phy_write_1bit(void __iomem *, u32);
334static u16 phy_read_1bit(void __iomem *);
335static u8 dmfe_sense_speed(struct dmfe_board_info *);
336static void dmfe_process_mode(struct dmfe_board_info *);
337static void dmfe_timer(unsigned long);
338static inline u32 cal_CRC(unsigned char *, unsigned int, u8);
339static void dmfe_rx_packet(struct DEVICE *, struct dmfe_board_info *);
340static void dmfe_free_tx_pkt(struct DEVICE *, struct dmfe_board_info *);
341static void dmfe_reuse_skb(struct dmfe_board_info *, struct sk_buff *);
342static void dmfe_dynamic_reset(struct DEVICE *);
343static void dmfe_free_rxbuffer(struct dmfe_board_info *);
344static void dmfe_init_dm910x(struct DEVICE *);
345static void dmfe_parse_srom(struct dmfe_board_info *);
346static void dmfe_program_DM9801(struct dmfe_board_info *, int);
347static void dmfe_program_DM9802(struct dmfe_board_info *);
348static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * );
349static void dmfe_set_phyxcer(struct dmfe_board_info *);
350
351
352
353static const struct net_device_ops netdev_ops = {
354 .ndo_open = dmfe_open,
355 .ndo_stop = dmfe_stop,
356 .ndo_start_xmit = dmfe_start_xmit,
357 .ndo_set_rx_mode = dmfe_set_filter_mode,
358 .ndo_change_mtu_rh74 = eth_change_mtu,
359 .ndo_set_mac_address = eth_mac_addr,
360 .ndo_validate_addr = eth_validate_addr,
361#ifdef CONFIG_NET_POLL_CONTROLLER
362 .ndo_poll_controller = poll_dmfe,
363#endif
364};
365
366
367
368
369
370static int dmfe_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
371{
372 struct dmfe_board_info *db;
373 struct net_device *dev;
374 u32 pci_pmr;
375 int i, err;
376
377 DMFE_DBUG(0, "dmfe_init_one()", 0);
378
379 if (!printed_version++)
380 pr_info("%s\n", version);
381
382
383
384
385
386#ifdef CONFIG_TULIP_DM910X
387 if ((ent->driver_data == PCI_DM9100_ID && pdev->revision >= 0x30) ||
388 ent->driver_data == PCI_DM9102_ID) {
389 struct device_node *dp = pci_device_to_OF_node(pdev);
390
391 if (dp && of_get_property(dp, "local-mac-address", NULL)) {
392 pr_info("skipping on-board DM910x (use tulip)\n");
393 return -ENODEV;
394 }
395 }
396#endif
397
398
399 dev = alloc_etherdev(sizeof(*db));
400 if (dev == NULL)
401 return -ENOMEM;
402 SET_NETDEV_DEV(dev, &pdev->dev);
403
404 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
405 pr_warn("32-bit PCI DMA not available\n");
406 err = -ENODEV;
407 goto err_out_free;
408 }
409
410
411 err = pci_enable_device(pdev);
412 if (err)
413 goto err_out_free;
414
415 if (!pci_resource_start(pdev, 0)) {
416 pr_err("I/O base is zero\n");
417 err = -ENODEV;
418 goto err_out_disable;
419 }
420
421 if (pci_resource_len(pdev, 0) < (CHK_IO_SIZE(pdev)) ) {
422 pr_err("Allocated I/O size too small\n");
423 err = -ENODEV;
424 goto err_out_disable;
425 }
426
427#if 0
428
429
430
431
432
433 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x80);
434#endif
435
436 if (pci_request_regions(pdev, DRV_NAME)) {
437 pr_err("Failed to request PCI regions\n");
438 err = -ENODEV;
439 goto err_out_disable;
440 }
441
442
443 db = netdev_priv(dev);
444
445
446 db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) *
447 DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr);
448 if (!db->desc_pool_ptr) {
449 err = -ENOMEM;
450 goto err_out_res;
451 }
452
453 db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC *
454 TX_DESC_CNT + 4, &db->buf_pool_dma_ptr);
455 if (!db->buf_pool_ptr) {
456 err = -ENOMEM;
457 goto err_out_free_desc;
458 }
459
460 db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr;
461 db->first_tx_desc_dma = db->desc_pool_dma_ptr;
462 db->buf_pool_start = db->buf_pool_ptr;
463 db->buf_pool_dma_start = db->buf_pool_dma_ptr;
464
465 db->chip_id = ent->driver_data;
466
467 db->ioaddr = pci_iomap(pdev, 0, 0);
468 if (!db->ioaddr) {
469 err = -ENOMEM;
470 goto err_out_free_buf;
471 }
472
473 db->chip_revision = pdev->revision;
474 db->wol_mode = 0;
475
476 db->pdev = pdev;
477
478 pci_set_drvdata(pdev, dev);
479 dev->netdev_ops = &netdev_ops;
480 dev->ethtool_ops = &netdev_ethtool_ops;
481 netif_carrier_off(dev);
482 spin_lock_init(&db->lock);
483
484 pci_read_config_dword(pdev, 0x50, &pci_pmr);
485 pci_pmr &= 0x70000;
486 if ( (pci_pmr == 0x10000) && (db->chip_revision == 0x31) )
487 db->chip_type = 1;
488 else
489 db->chip_type = 0;
490
491
492 for (i = 0; i < 64; i++) {
493 ((__le16 *) db->srom)[i] =
494 cpu_to_le16(read_srom_word(db->ioaddr, i));
495 }
496
497
498 for (i = 0; i < 6; i++)
499 dev->dev_addr[i] = db->srom[20 + i];
500
501 err = register_netdev (dev);
502 if (err)
503 goto err_out_unmap;
504
505 dev_info(&dev->dev, "Davicom DM%04lx at pci%s, %pM, irq %d\n",
506 ent->driver_data >> 16,
507 pci_name(pdev), dev->dev_addr, pdev->irq);
508
509 pci_set_master(pdev);
510
511 return 0;
512
513err_out_unmap:
514 pci_iounmap(pdev, db->ioaddr);
515err_out_free_buf:
516 pci_free_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
517 db->buf_pool_ptr, db->buf_pool_dma_ptr);
518err_out_free_desc:
519 pci_free_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20,
520 db->desc_pool_ptr, db->desc_pool_dma_ptr);
521err_out_res:
522 pci_release_regions(pdev);
523err_out_disable:
524 pci_disable_device(pdev);
525err_out_free:
526 pci_set_drvdata(pdev, NULL);
527 free_netdev(dev);
528
529 return err;
530}
531
532
533static void dmfe_remove_one(struct pci_dev *pdev)
534{
535 struct net_device *dev = pci_get_drvdata(pdev);
536 struct dmfe_board_info *db = netdev_priv(dev);
537
538 DMFE_DBUG(0, "dmfe_remove_one()", 0);
539
540 if (dev) {
541
542 unregister_netdev(dev);
543 pci_iounmap(db->pdev, db->ioaddr);
544 pci_free_consistent(db->pdev, sizeof(struct tx_desc) *
545 DESC_ALL_CNT + 0x20, db->desc_pool_ptr,
546 db->desc_pool_dma_ptr);
547 pci_free_consistent(db->pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
548 db->buf_pool_ptr, db->buf_pool_dma_ptr);
549 pci_release_regions(pdev);
550 free_netdev(dev);
551
552 pci_set_drvdata(pdev, NULL);
553 }
554
555 DMFE_DBUG(0, "dmfe_remove_one() exit", 0);
556}
557
558
559
560
561
562
563
564static int dmfe_open(struct DEVICE *dev)
565{
566 struct dmfe_board_info *db = netdev_priv(dev);
567 const int irq = db->pdev->irq;
568 int ret;
569
570 DMFE_DBUG(0, "dmfe_open", 0);
571
572 ret = request_irq(irq, dmfe_interrupt, IRQF_SHARED, dev->name, dev);
573 if (ret)
574 return ret;
575
576
577 db->cr6_data = CR6_DEFAULT | dmfe_cr6_user_set;
578 db->tx_packet_cnt = 0;
579 db->tx_queue_cnt = 0;
580 db->rx_avail_cnt = 0;
581 db->wait_reset = 0;
582
583 db->first_in_callback = 0;
584 db->NIC_capability = 0xf;
585 db->PHY_reg4 = 0x1e0;
586
587
588 if ( !chkmode || (db->chip_id == PCI_DM9132_ID) ||
589 (db->chip_revision >= 0x30) ) {
590 db->cr6_data |= DMFE_TXTH_256;
591 db->cr0_data = CR0_DEFAULT;
592 db->dm910x_chk_mode=4;
593 } else {
594 db->cr6_data |= CR6_SFT;
595 db->cr0_data = 0;
596 db->dm910x_chk_mode = 1;
597 }
598
599
600 dmfe_init_dm910x(dev);
601
602
603 netif_wake_queue(dev);
604
605
606 init_timer(&db->timer);
607 db->timer.expires = DMFE_TIMER_WUT + HZ * 2;
608 db->timer.data = (unsigned long)dev;
609 db->timer.function = dmfe_timer;
610 add_timer(&db->timer);
611
612 return 0;
613}
614
615
616
617
618
619
620
621
622
623static void dmfe_init_dm910x(struct DEVICE *dev)
624{
625 struct dmfe_board_info *db = netdev_priv(dev);
626 void __iomem *ioaddr = db->ioaddr;
627
628 DMFE_DBUG(0, "dmfe_init_dm910x()", 0);
629
630
631 dw32(DCR0, DM910X_RESET);
632 udelay(100);
633 dw32(DCR0, db->cr0_data);
634 udelay(5);
635
636
637 db->phy_addr = 1;
638
639
640 dmfe_parse_srom(db);
641 db->media_mode = dmfe_media_mode;
642
643
644 dw32(DCR12, 0x180);
645 if (db->chip_id == PCI_DM9009_ID) {
646 dw32(DCR12, 0x80);
647 mdelay(300);
648 }
649 dw32(DCR12, 0x0);
650
651
652 if ( !(db->media_mode & 0x10) )
653 dmfe_set_phyxcer(db);
654
655
656 if ( !(db->media_mode & DMFE_AUTO) )
657 db->op_mode = db->media_mode;
658
659
660 dmfe_descriptor_init(dev);
661
662
663 update_cr6(db->cr6_data, ioaddr);
664
665
666 if (db->chip_id == PCI_DM9132_ID)
667 dm9132_id_table(dev);
668 else
669 send_filter_frame(dev);
670
671
672 db->cr7_data = CR7_DEFAULT;
673 dw32(DCR7, db->cr7_data);
674
675
676 dw32(DCR15, db->cr15_data);
677
678
679 db->cr6_data |= CR6_RXSC | CR6_TXSC | 0x40000;
680 update_cr6(db->cr6_data, ioaddr);
681}
682
683
684
685
686
687
688
689static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
690 struct DEVICE *dev)
691{
692 struct dmfe_board_info *db = netdev_priv(dev);
693 void __iomem *ioaddr = db->ioaddr;
694 struct tx_desc *txptr;
695 unsigned long flags;
696
697 DMFE_DBUG(0, "dmfe_start_xmit", 0);
698
699
700 if (skb->len > MAX_PACKET_SIZE) {
701 pr_err("big packet = %d\n", (u16)skb->len);
702 dev_kfree_skb(skb);
703 return NETDEV_TX_OK;
704 }
705
706
707 netif_stop_queue(dev);
708
709 spin_lock_irqsave(&db->lock, flags);
710
711
712 if (db->tx_queue_cnt >= TX_FREE_DESC_CNT) {
713 spin_unlock_irqrestore(&db->lock, flags);
714 pr_err("No Tx resource %ld\n", db->tx_queue_cnt);
715 return NETDEV_TX_BUSY;
716 }
717
718
719 dw32(DCR7, 0);
720
721
722 txptr = db->tx_insert_ptr;
723 skb_copy_from_linear_data(skb, txptr->tx_buf_ptr, skb->len);
724 txptr->tdes1 = cpu_to_le32(0xe1000000 | skb->len);
725
726
727 db->tx_insert_ptr = txptr->next_tx_desc;
728
729
730 if ( (!db->tx_queue_cnt) && (db->tx_packet_cnt < TX_MAX_SEND_CNT) ) {
731 txptr->tdes0 = cpu_to_le32(0x80000000);
732 db->tx_packet_cnt++;
733 dw32(DCR1, 0x1);
734 netif_trans_update(dev);
735 } else {
736 db->tx_queue_cnt++;
737 dw32(DCR1, 0x1);
738 }
739
740
741 if ( db->tx_queue_cnt < TX_FREE_DESC_CNT )
742 netif_wake_queue(dev);
743
744
745 spin_unlock_irqrestore(&db->lock, flags);
746 dw32(DCR7, db->cr7_data);
747
748
749 dev_kfree_skb(skb);
750
751 return NETDEV_TX_OK;
752}
753
754
755
756
757
758
759
760static int dmfe_stop(struct DEVICE *dev)
761{
762 struct dmfe_board_info *db = netdev_priv(dev);
763 void __iomem *ioaddr = db->ioaddr;
764
765 DMFE_DBUG(0, "dmfe_stop", 0);
766
767
768 netif_stop_queue(dev);
769
770
771 del_timer_sync(&db->timer);
772
773
774 dw32(DCR0, DM910X_RESET);
775 udelay(100);
776 phy_write(ioaddr, db->phy_addr, 0, 0x8000, db->chip_id);
777
778
779 free_irq(db->pdev->irq, dev);
780
781
782 dmfe_free_rxbuffer(db);
783
784#if 0
785
786 printk("FU:%lx EC:%lx LC:%lx NC:%lx LOC:%lx TXJT:%lx RESET:%lx RCR8:%lx FAL:%lx TT:%lx\n",
787 db->tx_fifo_underrun, db->tx_excessive_collision,
788 db->tx_late_collision, db->tx_no_carrier, db->tx_loss_carrier,
789 db->tx_jabber_timeout, db->reset_count, db->reset_cr8,
790 db->reset_fatal, db->reset_TXtimeout);
791#endif
792
793 return 0;
794}
795
796
797
798
799
800
801
802static irqreturn_t dmfe_interrupt(int irq, void *dev_id)
803{
804 struct DEVICE *dev = dev_id;
805 struct dmfe_board_info *db = netdev_priv(dev);
806 void __iomem *ioaddr = db->ioaddr;
807 unsigned long flags;
808
809 DMFE_DBUG(0, "dmfe_interrupt()", 0);
810
811 spin_lock_irqsave(&db->lock, flags);
812
813
814 db->cr5_data = dr32(DCR5);
815 dw32(DCR5, db->cr5_data);
816 if ( !(db->cr5_data & 0xc1) ) {
817 spin_unlock_irqrestore(&db->lock, flags);
818 return IRQ_HANDLED;
819 }
820
821
822 dw32(DCR7, 0);
823
824
825 if (db->cr5_data & 0x2000) {
826
827 DMFE_DBUG(1, "System bus error happen. CR5=", db->cr5_data);
828 db->reset_fatal++;
829 db->wait_reset = 1;
830 spin_unlock_irqrestore(&db->lock, flags);
831 return IRQ_HANDLED;
832 }
833
834
835 if ( (db->cr5_data & 0x40) && db->rx_avail_cnt )
836 dmfe_rx_packet(dev, db);
837
838
839 if (db->rx_avail_cnt<RX_DESC_CNT)
840 allocate_rx_buffer(dev);
841
842
843 if ( db->cr5_data & 0x01)
844 dmfe_free_tx_pkt(dev, db);
845
846
847 if (db->dm910x_chk_mode & 0x2) {
848 db->dm910x_chk_mode = 0x4;
849 db->cr6_data |= 0x100;
850 update_cr6(db->cr6_data, ioaddr);
851 }
852
853
854 dw32(DCR7, db->cr7_data);
855
856 spin_unlock_irqrestore(&db->lock, flags);
857 return IRQ_HANDLED;
858}
859
860
861#ifdef CONFIG_NET_POLL_CONTROLLER
862
863
864
865
866
867
868static void poll_dmfe (struct net_device *dev)
869{
870 struct dmfe_board_info *db = netdev_priv(dev);
871 const int irq = db->pdev->irq;
872
873
874
875 disable_irq(irq);
876 dmfe_interrupt (irq, dev);
877 enable_irq(irq);
878}
879#endif
880
881
882
883
884
885static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db)
886{
887 struct tx_desc *txptr;
888 void __iomem *ioaddr = db->ioaddr;
889 u32 tdes0;
890
891 txptr = db->tx_remove_ptr;
892 while(db->tx_packet_cnt) {
893 tdes0 = le32_to_cpu(txptr->tdes0);
894 if (tdes0 & 0x80000000)
895 break;
896
897
898 db->tx_packet_cnt--;
899 dev->stats.tx_packets++;
900
901
902 if ( tdes0 != 0x7fffffff ) {
903 dev->stats.collisions += (tdes0 >> 3) & 0xf;
904 dev->stats.tx_bytes += le32_to_cpu(txptr->tdes1) & 0x7ff;
905 if (tdes0 & TDES0_ERR_MASK) {
906 dev->stats.tx_errors++;
907
908 if (tdes0 & 0x0002) {
909 db->tx_fifo_underrun++;
910 if ( !(db->cr6_data & CR6_SFT) ) {
911 db->cr6_data = db->cr6_data | CR6_SFT;
912 update_cr6(db->cr6_data, ioaddr);
913 }
914 }
915 if (tdes0 & 0x0100)
916 db->tx_excessive_collision++;
917 if (tdes0 & 0x0200)
918 db->tx_late_collision++;
919 if (tdes0 & 0x0400)
920 db->tx_no_carrier++;
921 if (tdes0 & 0x0800)
922 db->tx_loss_carrier++;
923 if (tdes0 & 0x4000)
924 db->tx_jabber_timeout++;
925 }
926 }
927
928 txptr = txptr->next_tx_desc;
929 }
930
931
932 db->tx_remove_ptr = txptr;
933
934
935 if ( (db->tx_packet_cnt < TX_MAX_SEND_CNT) && db->tx_queue_cnt ) {
936 txptr->tdes0 = cpu_to_le32(0x80000000);
937 db->tx_packet_cnt++;
938 db->tx_queue_cnt--;
939 dw32(DCR1, 0x1);
940 netif_trans_update(dev);
941 }
942
943
944 if ( db->tx_queue_cnt < TX_WAKE_DESC_CNT )
945 netif_wake_queue(dev);
946}
947
948
949
950
951
952
953
954
955static inline u32 cal_CRC(unsigned char * Data, unsigned int Len, u8 flag)
956{
957 u32 crc = crc32(~0, Data, Len);
958 if (flag) crc = ~crc;
959 return crc;
960}
961
962
963
964
965
966
967static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db)
968{
969 struct rx_desc *rxptr;
970 struct sk_buff *skb, *newskb;
971 int rxlen;
972 u32 rdes0;
973
974 rxptr = db->rx_ready_ptr;
975
976 while(db->rx_avail_cnt) {
977 rdes0 = le32_to_cpu(rxptr->rdes0);
978 if (rdes0 & 0x80000000)
979 break;
980
981 db->rx_avail_cnt--;
982 db->interval_rx_cnt++;
983
984 pci_unmap_single(db->pdev, le32_to_cpu(rxptr->rdes2),
985 RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
986
987 if ( (rdes0 & 0x300) != 0x300) {
988
989
990 DMFE_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
991 dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
992 } else {
993
994 rxlen = ( (rdes0 >> 16) & 0x3fff) - 4;
995
996
997 if (rdes0 & 0x8000) {
998
999 dev->stats.rx_errors++;
1000 if (rdes0 & 1)
1001 dev->stats.rx_fifo_errors++;
1002 if (rdes0 & 2)
1003 dev->stats.rx_crc_errors++;
1004 if (rdes0 & 0x80)
1005 dev->stats.rx_length_errors++;
1006 }
1007
1008 if ( !(rdes0 & 0x8000) ||
1009 ((db->cr6_data & CR6_PM) && (rxlen>6)) ) {
1010 skb = rxptr->rx_skb_ptr;
1011
1012
1013 if ( (db->dm910x_chk_mode & 1) &&
1014 (cal_CRC(skb->data, rxlen, 1) !=
1015 (*(u32 *) (skb->data+rxlen) ))) {
1016
1017 dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
1018 db->dm910x_chk_mode = 3;
1019 } else {
1020
1021
1022 if ((rxlen < RX_COPY_SIZE) &&
1023 ((newskb = netdev_alloc_skb(dev, rxlen + 2))
1024 != NULL)) {
1025
1026 skb = newskb;
1027
1028 skb_reserve(skb, 2);
1029 skb_copy_from_linear_data(rxptr->rx_skb_ptr,
1030 skb_put(skb, rxlen),
1031 rxlen);
1032 dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
1033 } else
1034 skb_put(skb, rxlen);
1035
1036 skb->protocol = eth_type_trans(skb, dev);
1037 netif_rx(skb);
1038 dev->stats.rx_packets++;
1039 dev->stats.rx_bytes += rxlen;
1040 }
1041 } else {
1042
1043 DMFE_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
1044 dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
1045 }
1046 }
1047
1048 rxptr = rxptr->next_rx_desc;
1049 }
1050
1051 db->rx_ready_ptr = rxptr;
1052}
1053
1054
1055
1056
1057
1058static void dmfe_set_filter_mode(struct DEVICE * dev)
1059{
1060 struct dmfe_board_info *db = netdev_priv(dev);
1061 unsigned long flags;
1062 int mc_count = netdev_mc_count(dev);
1063
1064 DMFE_DBUG(0, "dmfe_set_filter_mode()", 0);
1065 spin_lock_irqsave(&db->lock, flags);
1066
1067 if (dev->flags & IFF_PROMISC) {
1068 DMFE_DBUG(0, "Enable PROM Mode", 0);
1069 db->cr6_data |= CR6_PM | CR6_PBF;
1070 update_cr6(db->cr6_data, db->ioaddr);
1071 spin_unlock_irqrestore(&db->lock, flags);
1072 return;
1073 }
1074
1075 if (dev->flags & IFF_ALLMULTI || mc_count > DMFE_MAX_MULTICAST) {
1076 DMFE_DBUG(0, "Pass all multicast address", mc_count);
1077 db->cr6_data &= ~(CR6_PM | CR6_PBF);
1078 db->cr6_data |= CR6_PAM;
1079 spin_unlock_irqrestore(&db->lock, flags);
1080 return;
1081 }
1082
1083 DMFE_DBUG(0, "Set multicast address", mc_count);
1084 if (db->chip_id == PCI_DM9132_ID)
1085 dm9132_id_table(dev);
1086 else
1087 send_filter_frame(dev);
1088 spin_unlock_irqrestore(&db->lock, flags);
1089}
1090
1091
1092
1093
1094
1095static void dmfe_ethtool_get_drvinfo(struct net_device *dev,
1096 struct ethtool_drvinfo *info)
1097{
1098 struct dmfe_board_info *np = netdev_priv(dev);
1099
1100 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1101 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1102 strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
1103}
1104
1105static int dmfe_ethtool_set_wol(struct net_device *dev,
1106 struct ethtool_wolinfo *wolinfo)
1107{
1108 struct dmfe_board_info *db = netdev_priv(dev);
1109
1110 if (wolinfo->wolopts & (WAKE_UCAST | WAKE_MCAST | WAKE_BCAST |
1111 WAKE_ARP | WAKE_MAGICSECURE))
1112 return -EOPNOTSUPP;
1113
1114 db->wol_mode = wolinfo->wolopts;
1115 return 0;
1116}
1117
1118static void dmfe_ethtool_get_wol(struct net_device *dev,
1119 struct ethtool_wolinfo *wolinfo)
1120{
1121 struct dmfe_board_info *db = netdev_priv(dev);
1122
1123 wolinfo->supported = WAKE_PHY | WAKE_MAGIC;
1124 wolinfo->wolopts = db->wol_mode;
1125}
1126
1127
1128static const struct ethtool_ops netdev_ethtool_ops = {
1129 .get_drvinfo = dmfe_ethtool_get_drvinfo,
1130 .get_link = ethtool_op_get_link,
1131 .set_wol = dmfe_ethtool_set_wol,
1132 .get_wol = dmfe_ethtool_get_wol,
1133};
1134
1135
1136
1137
1138
1139
1140static void dmfe_timer(unsigned long data)
1141{
1142 struct net_device *dev = (struct net_device *)data;
1143 struct dmfe_board_info *db = netdev_priv(dev);
1144 void __iomem *ioaddr = db->ioaddr;
1145 u32 tmp_cr8;
1146 unsigned char tmp_cr12;
1147 unsigned long flags;
1148
1149 int link_ok, link_ok_phy;
1150
1151 DMFE_DBUG(0, "dmfe_timer()", 0);
1152 spin_lock_irqsave(&db->lock, flags);
1153
1154
1155 if (db->first_in_callback == 0) {
1156 db->first_in_callback = 1;
1157 if (db->chip_type && (db->chip_id==PCI_DM9102_ID)) {
1158 db->cr6_data &= ~0x40000;
1159 update_cr6(db->cr6_data, ioaddr);
1160 phy_write(ioaddr, db->phy_addr, 0, 0x1000, db->chip_id);
1161 db->cr6_data |= 0x40000;
1162 update_cr6(db->cr6_data, ioaddr);
1163 db->timer.expires = DMFE_TIMER_WUT + HZ * 2;
1164 add_timer(&db->timer);
1165 spin_unlock_irqrestore(&db->lock, flags);
1166 return;
1167 }
1168 }
1169
1170
1171
1172 if ( (db->dm910x_chk_mode & 0x1) &&
1173 (dev->stats.rx_packets > MAX_CHECK_PACKET) )
1174 db->dm910x_chk_mode = 0x4;
1175
1176
1177 tmp_cr8 = dr32(DCR8);
1178 if ( (db->interval_rx_cnt==0) && (tmp_cr8) ) {
1179 db->reset_cr8++;
1180 db->wait_reset = 1;
1181 }
1182 db->interval_rx_cnt = 0;
1183
1184
1185 if ( db->tx_packet_cnt &&
1186 time_after(jiffies, dev_trans_start(dev) + DMFE_TX_KICK) ) {
1187 dw32(DCR1, 0x1);
1188
1189
1190 if (time_after(jiffies, dev_trans_start(dev) + DMFE_TX_TIMEOUT) ) {
1191 db->reset_TXtimeout++;
1192 db->wait_reset = 1;
1193 dev_warn(&dev->dev, "Tx timeout - resetting\n");
1194 }
1195 }
1196
1197 if (db->wait_reset) {
1198 DMFE_DBUG(0, "Dynamic Reset device", db->tx_packet_cnt);
1199 db->reset_count++;
1200 dmfe_dynamic_reset(dev);
1201 db->first_in_callback = 0;
1202 db->timer.expires = DMFE_TIMER_WUT;
1203 add_timer(&db->timer);
1204 spin_unlock_irqrestore(&db->lock, flags);
1205 return;
1206 }
1207
1208
1209 if (db->chip_id == PCI_DM9132_ID)
1210 tmp_cr12 = dr8(DCR9 + 3);
1211 else
1212 tmp_cr12 = dr8(DCR12);
1213
1214 if ( ((db->chip_id == PCI_DM9102_ID) &&
1215 (db->chip_revision == 0x30)) ||
1216 ((db->chip_id == PCI_DM9132_ID) &&
1217 (db->chip_revision == 0x10)) ) {
1218
1219 if (tmp_cr12 & 2)
1220 link_ok = 0;
1221 else
1222 link_ok = 1;
1223 }
1224 else
1225
1226
1227 link_ok = (tmp_cr12 & 0x43) ? 1 : 0;
1228
1229
1230
1231
1232
1233
1234
1235
1236 phy_read (db->ioaddr, db->phy_addr, 1, db->chip_id);
1237 link_ok_phy = (phy_read (db->ioaddr,
1238 db->phy_addr, 1, db->chip_id) & 0x4) ? 1 : 0;
1239
1240 if (link_ok_phy != link_ok) {
1241 DMFE_DBUG (0, "PHY and chip report different link status", 0);
1242 link_ok = link_ok | link_ok_phy;
1243 }
1244
1245 if ( !link_ok && netif_carrier_ok(dev)) {
1246
1247 DMFE_DBUG(0, "Link Failed", tmp_cr12);
1248 netif_carrier_off(dev);
1249
1250
1251
1252 if ( !(db->media_mode & 0x38) )
1253 phy_write(db->ioaddr, db->phy_addr,
1254 0, 0x1000, db->chip_id);
1255
1256
1257 if (db->media_mode & DMFE_AUTO) {
1258
1259 db->cr6_data|=0x00040000;
1260 db->cr6_data&=~0x00000200;
1261 update_cr6(db->cr6_data, ioaddr);
1262 }
1263 } else if (!netif_carrier_ok(dev)) {
1264
1265 DMFE_DBUG(0, "Link link OK", tmp_cr12);
1266
1267
1268 if ( !(db->media_mode & DMFE_AUTO) || !dmfe_sense_speed(db)) {
1269 netif_carrier_on(dev);
1270 SHOW_MEDIA_TYPE(db->op_mode);
1271 }
1272
1273 dmfe_process_mode(db);
1274 }
1275
1276
1277 if (db->HPNA_command & 0xf00) {
1278 db->HPNA_timer--;
1279 if (!db->HPNA_timer)
1280 dmfe_HPNA_remote_cmd_chk(db);
1281 }
1282
1283
1284 db->timer.expires = DMFE_TIMER_WUT;
1285 add_timer(&db->timer);
1286 spin_unlock_irqrestore(&db->lock, flags);
1287}
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298static void dmfe_dynamic_reset(struct net_device *dev)
1299{
1300 struct dmfe_board_info *db = netdev_priv(dev);
1301 void __iomem *ioaddr = db->ioaddr;
1302
1303 DMFE_DBUG(0, "dmfe_dynamic_reset()", 0);
1304
1305
1306 db->cr6_data &= ~(CR6_RXSC | CR6_TXSC);
1307 update_cr6(db->cr6_data, ioaddr);
1308 dw32(DCR7, 0);
1309 dw32(DCR5, dr32(DCR5));
1310
1311
1312 netif_stop_queue(dev);
1313
1314
1315 dmfe_free_rxbuffer(db);
1316
1317
1318 db->tx_packet_cnt = 0;
1319 db->tx_queue_cnt = 0;
1320 db->rx_avail_cnt = 0;
1321 netif_carrier_off(dev);
1322 db->wait_reset = 0;
1323
1324
1325 dmfe_init_dm910x(dev);
1326
1327
1328 netif_wake_queue(dev);
1329}
1330
1331
1332
1333
1334
1335
1336static void dmfe_free_rxbuffer(struct dmfe_board_info * db)
1337{
1338 DMFE_DBUG(0, "dmfe_free_rxbuffer()", 0);
1339
1340
1341 while (db->rx_avail_cnt) {
1342 dev_kfree_skb(db->rx_ready_ptr->rx_skb_ptr);
1343 db->rx_ready_ptr = db->rx_ready_ptr->next_rx_desc;
1344 db->rx_avail_cnt--;
1345 }
1346}
1347
1348
1349
1350
1351
1352
1353static void dmfe_reuse_skb(struct dmfe_board_info *db, struct sk_buff * skb)
1354{
1355 struct rx_desc *rxptr = db->rx_insert_ptr;
1356
1357 if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) {
1358 rxptr->rx_skb_ptr = skb;
1359 rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev,
1360 skb->data, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
1361 wmb();
1362 rxptr->rdes0 = cpu_to_le32(0x80000000);
1363 db->rx_avail_cnt++;
1364 db->rx_insert_ptr = rxptr->next_rx_desc;
1365 } else
1366 DMFE_DBUG(0, "SK Buffer reuse method error", db->rx_avail_cnt);
1367}
1368
1369
1370
1371
1372
1373
1374
1375static void dmfe_descriptor_init(struct net_device *dev)
1376{
1377 struct dmfe_board_info *db = netdev_priv(dev);
1378 void __iomem *ioaddr = db->ioaddr;
1379 struct tx_desc *tmp_tx;
1380 struct rx_desc *tmp_rx;
1381 unsigned char *tmp_buf;
1382 dma_addr_t tmp_tx_dma, tmp_rx_dma;
1383 dma_addr_t tmp_buf_dma;
1384 int i;
1385
1386 DMFE_DBUG(0, "dmfe_descriptor_init()", 0);
1387
1388
1389 db->tx_insert_ptr = db->first_tx_desc;
1390 db->tx_remove_ptr = db->first_tx_desc;
1391 dw32(DCR4, db->first_tx_desc_dma);
1392
1393
1394 db->first_rx_desc = (void *)db->first_tx_desc +
1395 sizeof(struct tx_desc) * TX_DESC_CNT;
1396
1397 db->first_rx_desc_dma = db->first_tx_desc_dma +
1398 sizeof(struct tx_desc) * TX_DESC_CNT;
1399 db->rx_insert_ptr = db->first_rx_desc;
1400 db->rx_ready_ptr = db->first_rx_desc;
1401 dw32(DCR3, db->first_rx_desc_dma);
1402
1403
1404 tmp_buf = db->buf_pool_start;
1405 tmp_buf_dma = db->buf_pool_dma_start;
1406 tmp_tx_dma = db->first_tx_desc_dma;
1407 for (tmp_tx = db->first_tx_desc, i = 0; i < TX_DESC_CNT; i++, tmp_tx++) {
1408 tmp_tx->tx_buf_ptr = tmp_buf;
1409 tmp_tx->tdes0 = cpu_to_le32(0);
1410 tmp_tx->tdes1 = cpu_to_le32(0x81000000);
1411 tmp_tx->tdes2 = cpu_to_le32(tmp_buf_dma);
1412 tmp_tx_dma += sizeof(struct tx_desc);
1413 tmp_tx->tdes3 = cpu_to_le32(tmp_tx_dma);
1414 tmp_tx->next_tx_desc = tmp_tx + 1;
1415 tmp_buf = tmp_buf + TX_BUF_ALLOC;
1416 tmp_buf_dma = tmp_buf_dma + TX_BUF_ALLOC;
1417 }
1418 (--tmp_tx)->tdes3 = cpu_to_le32(db->first_tx_desc_dma);
1419 tmp_tx->next_tx_desc = db->first_tx_desc;
1420
1421
1422 tmp_rx_dma=db->first_rx_desc_dma;
1423 for (tmp_rx = db->first_rx_desc, i = 0; i < RX_DESC_CNT; i++, tmp_rx++) {
1424 tmp_rx->rdes0 = cpu_to_le32(0);
1425 tmp_rx->rdes1 = cpu_to_le32(0x01000600);
1426 tmp_rx_dma += sizeof(struct rx_desc);
1427 tmp_rx->rdes3 = cpu_to_le32(tmp_rx_dma);
1428 tmp_rx->next_rx_desc = tmp_rx + 1;
1429 }
1430 (--tmp_rx)->rdes3 = cpu_to_le32(db->first_rx_desc_dma);
1431 tmp_rx->next_rx_desc = db->first_rx_desc;
1432
1433
1434 allocate_rx_buffer(dev);
1435}
1436
1437
1438
1439
1440
1441
1442
1443static void update_cr6(u32 cr6_data, void __iomem *ioaddr)
1444{
1445 u32 cr6_tmp;
1446
1447 cr6_tmp = cr6_data & ~0x2002;
1448 dw32(DCR6, cr6_tmp);
1449 udelay(5);
1450 dw32(DCR6, cr6_data);
1451 udelay(5);
1452}
1453
1454
1455
1456
1457
1458
1459
1460static void dm9132_id_table(struct net_device *dev)
1461{
1462 struct dmfe_board_info *db = netdev_priv(dev);
1463 void __iomem *ioaddr = db->ioaddr + 0xc0;
1464 u16 *addrptr = (u16 *)dev->dev_addr;
1465 struct netdev_hw_addr *ha;
1466 u16 i, hash_table[4];
1467
1468
1469 for (i = 0; i < 3; i++) {
1470 dw16(0, addrptr[i]);
1471 ioaddr += 4;
1472 }
1473
1474
1475 memset(hash_table, 0, sizeof(hash_table));
1476
1477
1478 hash_table[3] = 0x8000;
1479
1480
1481 netdev_for_each_mc_addr(ha, dev) {
1482 u32 hash_val = cal_CRC((char *)ha->addr, 6, 0) & 0x3f;
1483
1484 hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16);
1485 }
1486
1487
1488 for (i = 0; i < 4; i++, ioaddr += 4)
1489 dw16(0, hash_table[i]);
1490}
1491
1492
1493
1494
1495
1496
1497
1498static void send_filter_frame(struct net_device *dev)
1499{
1500 struct dmfe_board_info *db = netdev_priv(dev);
1501 struct netdev_hw_addr *ha;
1502 struct tx_desc *txptr;
1503 u16 * addrptr;
1504 u32 * suptr;
1505 int i;
1506
1507 DMFE_DBUG(0, "send_filter_frame()", 0);
1508
1509 txptr = db->tx_insert_ptr;
1510 suptr = (u32 *) txptr->tx_buf_ptr;
1511
1512
1513 addrptr = (u16 *) dev->dev_addr;
1514 *suptr++ = addrptr[0];
1515 *suptr++ = addrptr[1];
1516 *suptr++ = addrptr[2];
1517
1518
1519 *suptr++ = 0xffff;
1520 *suptr++ = 0xffff;
1521 *suptr++ = 0xffff;
1522
1523
1524 netdev_for_each_mc_addr(ha, dev) {
1525 addrptr = (u16 *) ha->addr;
1526 *suptr++ = addrptr[0];
1527 *suptr++ = addrptr[1];
1528 *suptr++ = addrptr[2];
1529 }
1530
1531 for (i = netdev_mc_count(dev); i < 14; i++) {
1532 *suptr++ = 0xffff;
1533 *suptr++ = 0xffff;
1534 *suptr++ = 0xffff;
1535 }
1536
1537
1538 db->tx_insert_ptr = txptr->next_tx_desc;
1539 txptr->tdes1 = cpu_to_le32(0x890000c0);
1540
1541
1542 if (!db->tx_packet_cnt) {
1543 void __iomem *ioaddr = db->ioaddr;
1544
1545
1546 db->tx_packet_cnt++;
1547 txptr->tdes0 = cpu_to_le32(0x80000000);
1548 update_cr6(db->cr6_data | 0x2000, ioaddr);
1549 dw32(DCR1, 0x1);
1550 update_cr6(db->cr6_data, ioaddr);
1551 netif_trans_update(dev);
1552 } else
1553 db->tx_queue_cnt++;
1554}
1555
1556
1557
1558
1559
1560
1561
1562static void allocate_rx_buffer(struct net_device *dev)
1563{
1564 struct dmfe_board_info *db = netdev_priv(dev);
1565 struct rx_desc *rxptr;
1566 struct sk_buff *skb;
1567
1568 rxptr = db->rx_insert_ptr;
1569
1570 while(db->rx_avail_cnt < RX_DESC_CNT) {
1571 if ( ( skb = netdev_alloc_skb(dev, RX_ALLOC_SIZE) ) == NULL )
1572 break;
1573 rxptr->rx_skb_ptr = skb;
1574 rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->data,
1575 RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
1576 wmb();
1577 rxptr->rdes0 = cpu_to_le32(0x80000000);
1578 rxptr = rxptr->next_rx_desc;
1579 db->rx_avail_cnt++;
1580 }
1581
1582 db->rx_insert_ptr = rxptr;
1583}
1584
1585static void srom_clk_write(void __iomem *ioaddr, u32 data)
1586{
1587 static const u32 cmd[] = {
1588 CR9_SROM_READ | CR9_SRCS,
1589 CR9_SROM_READ | CR9_SRCS | CR9_SRCLK,
1590 CR9_SROM_READ | CR9_SRCS
1591 };
1592 int i;
1593
1594 for (i = 0; i < ARRAY_SIZE(cmd); i++) {
1595 dw32(DCR9, data | cmd[i]);
1596 udelay(5);
1597 }
1598}
1599
1600
1601
1602
1603static u16 read_srom_word(void __iomem *ioaddr, int offset)
1604{
1605 u16 srom_data;
1606 int i;
1607
1608 dw32(DCR9, CR9_SROM_READ);
1609 udelay(5);
1610 dw32(DCR9, CR9_SROM_READ | CR9_SRCS);
1611 udelay(5);
1612
1613
1614 srom_clk_write(ioaddr, SROM_DATA_1);
1615 srom_clk_write(ioaddr, SROM_DATA_1);
1616 srom_clk_write(ioaddr, SROM_DATA_0);
1617
1618
1619 for (i = 5; i >= 0; i--) {
1620 srom_data = (offset & (1 << i)) ? SROM_DATA_1 : SROM_DATA_0;
1621 srom_clk_write(ioaddr, srom_data);
1622 }
1623
1624 dw32(DCR9, CR9_SROM_READ | CR9_SRCS);
1625 udelay(5);
1626
1627 for (i = 16; i > 0; i--) {
1628 dw32(DCR9, CR9_SROM_READ | CR9_SRCS | CR9_SRCLK);
1629 udelay(5);
1630 srom_data = (srom_data << 1) |
1631 ((dr32(DCR9) & CR9_CRDOUT) ? 1 : 0);
1632 dw32(DCR9, CR9_SROM_READ | CR9_SRCS);
1633 udelay(5);
1634 }
1635
1636 dw32(DCR9, CR9_SROM_READ);
1637 udelay(5);
1638 return srom_data;
1639}
1640
1641
1642
1643
1644
1645
1646static u8 dmfe_sense_speed(struct dmfe_board_info *db)
1647{
1648 void __iomem *ioaddr = db->ioaddr;
1649 u8 ErrFlag = 0;
1650 u16 phy_mode;
1651
1652
1653 update_cr6(db->cr6_data & ~0x40000, ioaddr);
1654
1655 phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
1656 phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
1657
1658 if ( (phy_mode & 0x24) == 0x24 ) {
1659 if (db->chip_id == PCI_DM9132_ID)
1660 phy_mode = phy_read(db->ioaddr,
1661 db->phy_addr, 7, db->chip_id) & 0xf000;
1662 else
1663 phy_mode = phy_read(db->ioaddr,
1664 db->phy_addr, 17, db->chip_id) & 0xf000;
1665 switch (phy_mode) {
1666 case 0x1000: db->op_mode = DMFE_10MHF; break;
1667 case 0x2000: db->op_mode = DMFE_10MFD; break;
1668 case 0x4000: db->op_mode = DMFE_100MHF; break;
1669 case 0x8000: db->op_mode = DMFE_100MFD; break;
1670 default: db->op_mode = DMFE_10MHF;
1671 ErrFlag = 1;
1672 break;
1673 }
1674 } else {
1675 db->op_mode = DMFE_10MHF;
1676 DMFE_DBUG(0, "Link Failed :", phy_mode);
1677 ErrFlag = 1;
1678 }
1679
1680 return ErrFlag;
1681}
1682
1683
1684
1685
1686
1687
1688
1689
1690static void dmfe_set_phyxcer(struct dmfe_board_info *db)
1691{
1692 void __iomem *ioaddr = db->ioaddr;
1693 u16 phy_reg;
1694
1695
1696 db->cr6_data &= ~0x40000;
1697 update_cr6(db->cr6_data, ioaddr);
1698
1699
1700 if (db->chip_id == PCI_DM9009_ID) {
1701 phy_reg = phy_read(db->ioaddr,
1702 db->phy_addr, 18, db->chip_id) & ~0x1000;
1703
1704 phy_write(db->ioaddr,
1705 db->phy_addr, 18, phy_reg, db->chip_id);
1706 }
1707
1708
1709 phy_reg = phy_read(db->ioaddr, db->phy_addr, 4, db->chip_id) & ~0x01e0;
1710
1711 if (db->media_mode & DMFE_AUTO) {
1712
1713 phy_reg |= db->PHY_reg4;
1714 } else {
1715
1716 switch(db->media_mode) {
1717 case DMFE_10MHF: phy_reg |= 0x20; break;
1718 case DMFE_10MFD: phy_reg |= 0x40; break;
1719 case DMFE_100MHF: phy_reg |= 0x80; break;
1720 case DMFE_100MFD: phy_reg |= 0x100; break;
1721 }
1722 if (db->chip_id == PCI_DM9009_ID) phy_reg &= 0x61;
1723 }
1724
1725
1726 if ( !(phy_reg & 0x01e0)) {
1727 phy_reg|=db->PHY_reg4;
1728 db->media_mode|=DMFE_AUTO;
1729 }
1730 phy_write(db->ioaddr, db->phy_addr, 4, phy_reg, db->chip_id);
1731
1732
1733 if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) )
1734 phy_write(db->ioaddr, db->phy_addr, 0, 0x1800, db->chip_id);
1735 if ( !db->chip_type )
1736 phy_write(db->ioaddr, db->phy_addr, 0, 0x1200, db->chip_id);
1737}
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747static void dmfe_process_mode(struct dmfe_board_info *db)
1748{
1749 u16 phy_reg;
1750
1751
1752 if (db->op_mode & 0x4)
1753 db->cr6_data |= CR6_FDM;
1754 else
1755 db->cr6_data &= ~CR6_FDM;
1756
1757
1758 if (db->op_mode & 0x10)
1759 db->cr6_data |= 0x40000;
1760 else
1761 db->cr6_data &= ~0x40000;
1762
1763 update_cr6(db->cr6_data, db->ioaddr);
1764
1765
1766 if ( !(db->media_mode & 0x18)) {
1767
1768 phy_reg = phy_read(db->ioaddr, db->phy_addr, 6, db->chip_id);
1769 if ( !(phy_reg & 0x1) ) {
1770
1771 phy_reg = 0x0;
1772 switch(db->op_mode) {
1773 case DMFE_10MHF: phy_reg = 0x0; break;
1774 case DMFE_10MFD: phy_reg = 0x100; break;
1775 case DMFE_100MHF: phy_reg = 0x2000; break;
1776 case DMFE_100MFD: phy_reg = 0x2100; break;
1777 }
1778 phy_write(db->ioaddr,
1779 db->phy_addr, 0, phy_reg, db->chip_id);
1780 if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) )
1781 mdelay(20);
1782 phy_write(db->ioaddr,
1783 db->phy_addr, 0, phy_reg, db->chip_id);
1784 }
1785 }
1786}
1787
1788
1789
1790
1791
1792
1793static void phy_write(void __iomem *ioaddr, u8 phy_addr, u8 offset,
1794 u16 phy_data, u32 chip_id)
1795{
1796 u16 i;
1797
1798 if (chip_id == PCI_DM9132_ID) {
1799 dw16(0x80 + offset * 4, phy_data);
1800 } else {
1801
1802
1803
1804 for (i = 0; i < 35; i++)
1805 phy_write_1bit(ioaddr, PHY_DATA_1);
1806
1807
1808 phy_write_1bit(ioaddr, PHY_DATA_0);
1809 phy_write_1bit(ioaddr, PHY_DATA_1);
1810
1811
1812 phy_write_1bit(ioaddr, PHY_DATA_0);
1813 phy_write_1bit(ioaddr, PHY_DATA_1);
1814
1815
1816 for (i = 0x10; i > 0; i = i >> 1)
1817 phy_write_1bit(ioaddr,
1818 phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
1819
1820
1821 for (i = 0x10; i > 0; i = i >> 1)
1822 phy_write_1bit(ioaddr,
1823 offset & i ? PHY_DATA_1 : PHY_DATA_0);
1824
1825
1826 phy_write_1bit(ioaddr, PHY_DATA_1);
1827 phy_write_1bit(ioaddr, PHY_DATA_0);
1828
1829
1830 for ( i = 0x8000; i > 0; i >>= 1)
1831 phy_write_1bit(ioaddr,
1832 phy_data & i ? PHY_DATA_1 : PHY_DATA_0);
1833 }
1834}
1835
1836
1837
1838
1839
1840
1841static u16 phy_read(void __iomem *ioaddr, u8 phy_addr, u8 offset, u32 chip_id)
1842{
1843 int i;
1844 u16 phy_data;
1845
1846 if (chip_id == PCI_DM9132_ID) {
1847
1848 phy_data = dr16(0x80 + offset * 4);
1849 } else {
1850
1851
1852
1853 for (i = 0; i < 35; i++)
1854 phy_write_1bit(ioaddr, PHY_DATA_1);
1855
1856
1857 phy_write_1bit(ioaddr, PHY_DATA_0);
1858 phy_write_1bit(ioaddr, PHY_DATA_1);
1859
1860
1861 phy_write_1bit(ioaddr, PHY_DATA_1);
1862 phy_write_1bit(ioaddr, PHY_DATA_0);
1863
1864
1865 for (i = 0x10; i > 0; i = i >> 1)
1866 phy_write_1bit(ioaddr,
1867 phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
1868
1869
1870 for (i = 0x10; i > 0; i = i >> 1)
1871 phy_write_1bit(ioaddr,
1872 offset & i ? PHY_DATA_1 : PHY_DATA_0);
1873
1874
1875 phy_read_1bit(ioaddr);
1876
1877
1878 for (phy_data = 0, i = 0; i < 16; i++) {
1879 phy_data <<= 1;
1880 phy_data |= phy_read_1bit(ioaddr);
1881 }
1882 }
1883
1884 return phy_data;
1885}
1886
1887
1888
1889
1890
1891
1892static void phy_write_1bit(void __iomem *ioaddr, u32 phy_data)
1893{
1894 dw32(DCR9, phy_data);
1895 udelay(1);
1896 dw32(DCR9, phy_data | MDCLKH);
1897 udelay(1);
1898 dw32(DCR9, phy_data);
1899 udelay(1);
1900}
1901
1902
1903
1904
1905
1906
1907static u16 phy_read_1bit(void __iomem *ioaddr)
1908{
1909 u16 phy_data;
1910
1911 dw32(DCR9, 0x50000);
1912 udelay(1);
1913 phy_data = (dr32(DCR9) >> 19) & 0x1;
1914 dw32(DCR9, 0x40000);
1915 udelay(1);
1916
1917 return phy_data;
1918}
1919
1920
1921
1922
1923
1924
1925static void dmfe_parse_srom(struct dmfe_board_info * db)
1926{
1927 char * srom = db->srom;
1928 int dmfe_mode, tmp_reg;
1929
1930 DMFE_DBUG(0, "dmfe_parse_srom() ", 0);
1931
1932
1933 db->cr15_data = CR15_DEFAULT;
1934
1935
1936 if ( ( (int) srom[18] & 0xff) == SROM_V41_CODE) {
1937
1938
1939 db->NIC_capability = le16_to_cpup((__le16 *) (srom + 34));
1940 db->PHY_reg4 = 0;
1941 for (tmp_reg = 1; tmp_reg < 0x10; tmp_reg <<= 1) {
1942 switch( db->NIC_capability & tmp_reg ) {
1943 case 0x1: db->PHY_reg4 |= 0x0020; break;
1944 case 0x2: db->PHY_reg4 |= 0x0040; break;
1945 case 0x4: db->PHY_reg4 |= 0x0080; break;
1946 case 0x8: db->PHY_reg4 |= 0x0100; break;
1947 }
1948 }
1949
1950
1951 dmfe_mode = (le32_to_cpup((__le32 *) (srom + 34)) &
1952 le32_to_cpup((__le32 *) (srom + 36)));
1953 switch(dmfe_mode) {
1954 case 0x4: dmfe_media_mode = DMFE_100MHF; break;
1955 case 0x2: dmfe_media_mode = DMFE_10MFD; break;
1956 case 0x8: dmfe_media_mode = DMFE_100MFD; break;
1957 case 0x100:
1958 case 0x200: dmfe_media_mode = DMFE_1M_HPNA; break;
1959 }
1960
1961
1962
1963 if ( (SF_mode & 0x1) || (srom[43] & 0x80) )
1964 db->cr15_data |= 0x40;
1965
1966
1967 if ( (SF_mode & 0x2) || (srom[40] & 0x1) )
1968 db->cr15_data |= 0x400;
1969
1970
1971 if ( (SF_mode & 0x4) || (srom[40] & 0xe) )
1972 db->cr15_data |= 0x9800;
1973 }
1974
1975
1976 db->HPNA_command = 1;
1977
1978
1979 if (HPNA_rx_cmd == 0)
1980 db->HPNA_command |= 0x8000;
1981
1982
1983 if (HPNA_tx_cmd == 1)
1984 switch(HPNA_mode) {
1985 case 0: db->HPNA_command |= 0x0904; break;
1986 case 1: db->HPNA_command |= 0x0a00; break;
1987 case 2: db->HPNA_command |= 0x0506; break;
1988 case 3: db->HPNA_command |= 0x0602; break;
1989 }
1990 else
1991 switch(HPNA_mode) {
1992 case 0: db->HPNA_command |= 0x0004; break;
1993 case 1: db->HPNA_command |= 0x0000; break;
1994 case 2: db->HPNA_command |= 0x0006; break;
1995 case 3: db->HPNA_command |= 0x0002; break;
1996 }
1997
1998
1999 db->HPNA_present = 0;
2000 update_cr6(db->cr6_data | 0x40000, db->ioaddr);
2001 tmp_reg = phy_read(db->ioaddr, db->phy_addr, 3, db->chip_id);
2002 if ( ( tmp_reg & 0xfff0 ) == 0xb900 ) {
2003
2004 db->HPNA_timer = 8;
2005 if ( phy_read(db->ioaddr, db->phy_addr, 31, db->chip_id) == 0x4404) {
2006
2007 db->HPNA_present = 1;
2008 dmfe_program_DM9801(db, tmp_reg);
2009 } else {
2010
2011 db->HPNA_present = 2;
2012 dmfe_program_DM9802(db);
2013 }
2014 }
2015
2016}
2017
2018
2019
2020
2021
2022
2023static void dmfe_program_DM9801(struct dmfe_board_info * db, int HPNA_rev)
2024{
2025 uint reg17, reg25;
2026
2027 if ( !HPNA_NoiseFloor ) HPNA_NoiseFloor = DM9801_NOISE_FLOOR;
2028 switch(HPNA_rev) {
2029 case 0xb900:
2030 db->HPNA_command |= 0x1000;
2031 reg25 = phy_read(db->ioaddr, db->phy_addr, 24, db->chip_id);
2032 reg25 = ( (reg25 + HPNA_NoiseFloor) & 0xff) | 0xf000;
2033 reg17 = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
2034 break;
2035 case 0xb901:
2036 reg25 = phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
2037 reg25 = (reg25 & 0xff00) + HPNA_NoiseFloor;
2038 reg17 = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
2039 reg17 = (reg17 & 0xfff0) + HPNA_NoiseFloor + 3;
2040 break;
2041 case 0xb902:
2042 case 0xb903:
2043 default:
2044 db->HPNA_command |= 0x1000;
2045 reg25 = phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
2046 reg25 = (reg25 & 0xff00) + HPNA_NoiseFloor - 5;
2047 reg17 = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
2048 reg17 = (reg17 & 0xfff0) + HPNA_NoiseFloor;
2049 break;
2050 }
2051 phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id);
2052 phy_write(db->ioaddr, db->phy_addr, 17, reg17, db->chip_id);
2053 phy_write(db->ioaddr, db->phy_addr, 25, reg25, db->chip_id);
2054}
2055
2056
2057
2058
2059
2060
2061static void dmfe_program_DM9802(struct dmfe_board_info * db)
2062{
2063 uint phy_reg;
2064
2065 if ( !HPNA_NoiseFloor ) HPNA_NoiseFloor = DM9802_NOISE_FLOOR;
2066 phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id);
2067 phy_reg = phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
2068 phy_reg = ( phy_reg & 0xff00) + HPNA_NoiseFloor;
2069 phy_write(db->ioaddr, db->phy_addr, 25, phy_reg, db->chip_id);
2070}
2071
2072
2073
2074
2075
2076
2077
2078static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * db)
2079{
2080 uint phy_reg;
2081
2082
2083 phy_reg = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id) & 0x60;
2084 switch(phy_reg) {
2085 case 0x00: phy_reg = 0x0a00;break;
2086 case 0x20: phy_reg = 0x0900;break;
2087 case 0x40: phy_reg = 0x0600;break;
2088 case 0x60: phy_reg = 0x0500;break;
2089 }
2090
2091
2092 if ( phy_reg != (db->HPNA_command & 0x0f00) ) {
2093 phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command,
2094 db->chip_id);
2095 db->HPNA_timer=8;
2096 } else
2097 db->HPNA_timer=600;
2098}
2099
2100
2101
2102static const struct pci_device_id dmfe_pci_tbl[] = {
2103 { 0x1282, 0x9132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9132_ID },
2104 { 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9102_ID },
2105 { 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9100_ID },
2106 { 0x1282, 0x9009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9009_ID },
2107 { 0, }
2108};
2109MODULE_DEVICE_TABLE(pci, dmfe_pci_tbl);
2110
2111
2112#ifdef CONFIG_PM
2113static int dmfe_suspend(struct pci_dev *pci_dev, pm_message_t state)
2114{
2115 struct net_device *dev = pci_get_drvdata(pci_dev);
2116 struct dmfe_board_info *db = netdev_priv(dev);
2117 void __iomem *ioaddr = db->ioaddr;
2118 u32 tmp;
2119
2120
2121 netif_device_detach(dev);
2122
2123
2124 db->cr6_data &= ~(CR6_RXSC | CR6_TXSC);
2125 update_cr6(db->cr6_data, ioaddr);
2126
2127
2128 dw32(DCR7, 0);
2129 dw32(DCR5, dr32(DCR5));
2130
2131
2132 dmfe_free_rxbuffer(db);
2133
2134
2135 pci_read_config_dword(pci_dev, 0x40, &tmp);
2136 tmp &= ~(DMFE_WOL_LINKCHANGE|DMFE_WOL_MAGICPACKET);
2137
2138 if (db->wol_mode & WAKE_PHY)
2139 tmp |= DMFE_WOL_LINKCHANGE;
2140 if (db->wol_mode & WAKE_MAGIC)
2141 tmp |= DMFE_WOL_MAGICPACKET;
2142
2143 pci_write_config_dword(pci_dev, 0x40, tmp);
2144
2145 pci_enable_wake(pci_dev, PCI_D3hot, 1);
2146 pci_enable_wake(pci_dev, PCI_D3cold, 1);
2147
2148
2149 pci_save_state(pci_dev);
2150 pci_set_power_state(pci_dev, pci_choose_state (pci_dev, state));
2151
2152 return 0;
2153}
2154
2155static int dmfe_resume(struct pci_dev *pci_dev)
2156{
2157 struct net_device *dev = pci_get_drvdata(pci_dev);
2158 u32 tmp;
2159
2160 pci_set_power_state(pci_dev, PCI_D0);
2161 pci_restore_state(pci_dev);
2162
2163
2164 dmfe_init_dm910x(dev);
2165
2166
2167 pci_read_config_dword(pci_dev, 0x40, &tmp);
2168
2169 tmp &= ~(DMFE_WOL_LINKCHANGE | DMFE_WOL_MAGICPACKET);
2170 pci_write_config_dword(pci_dev, 0x40, tmp);
2171
2172 pci_enable_wake(pci_dev, PCI_D3hot, 0);
2173 pci_enable_wake(pci_dev, PCI_D3cold, 0);
2174
2175
2176 netif_device_attach(dev);
2177
2178 return 0;
2179}
2180#else
2181#define dmfe_suspend NULL
2182#define dmfe_resume NULL
2183#endif
2184
2185static struct pci_driver dmfe_driver = {
2186 .name = "dmfe",
2187 .id_table = dmfe_pci_tbl,
2188 .probe = dmfe_init_one,
2189 .remove = dmfe_remove_one,
2190 .suspend = dmfe_suspend,
2191 .resume = dmfe_resume
2192};
2193
2194MODULE_AUTHOR("Sten Wang, sten_wang@davicom.com.tw");
2195MODULE_DESCRIPTION("Davicom DM910X fast ethernet driver");
2196MODULE_LICENSE("GPL");
2197MODULE_VERSION(DRV_VERSION);
2198
2199module_param(debug, int, 0);
2200module_param(mode, byte, 0);
2201module_param(cr6set, int, 0);
2202module_param(chkmode, byte, 0);
2203module_param(HPNA_mode, byte, 0);
2204module_param(HPNA_rx_cmd, byte, 0);
2205module_param(HPNA_tx_cmd, byte, 0);
2206module_param(HPNA_NoiseFloor, byte, 0);
2207module_param(SF_mode, byte, 0);
2208MODULE_PARM_DESC(debug, "Davicom DM9xxx enable debugging (0-1)");
2209MODULE_PARM_DESC(mode, "Davicom DM9xxx: "
2210 "Bit 0: 10/100Mbps, bit 2: duplex, bit 8: HomePNA");
2211
2212MODULE_PARM_DESC(SF_mode, "Davicom DM9xxx special function "
2213 "(bit 0: VLAN, bit 1 Flow Control, bit 2: TX pause packet)");
2214
2215
2216
2217
2218
2219
2220static int __init dmfe_init_module(void)
2221{
2222 int rc;
2223
2224 pr_info("%s\n", version);
2225 printed_version = 1;
2226
2227 DMFE_DBUG(0, "init_module() ", debug);
2228
2229 if (debug)
2230 dmfe_debug = debug;
2231 if (cr6set)
2232 dmfe_cr6_user_set = cr6set;
2233
2234 switch(mode) {
2235 case DMFE_10MHF:
2236 case DMFE_100MHF:
2237 case DMFE_10MFD:
2238 case DMFE_100MFD:
2239 case DMFE_1M_HPNA:
2240 dmfe_media_mode = mode;
2241 break;
2242 default:dmfe_media_mode = DMFE_AUTO;
2243 break;
2244 }
2245
2246 if (HPNA_mode > 4)
2247 HPNA_mode = 0;
2248 if (HPNA_rx_cmd > 1)
2249 HPNA_rx_cmd = 0;
2250 if (HPNA_tx_cmd > 1)
2251 HPNA_tx_cmd = 0;
2252 if (HPNA_NoiseFloor > 15)
2253 HPNA_NoiseFloor = 0;
2254
2255 rc = pci_register_driver(&dmfe_driver);
2256 if (rc < 0)
2257 return rc;
2258
2259 return 0;
2260}
2261
2262
2263
2264
2265
2266
2267
2268
2269static void __exit dmfe_cleanup_module(void)
2270{
2271 DMFE_DBUG(0, "dmfe_clean_module() ", debug);
2272 pci_unregister_driver(&dmfe_driver);
2273}
2274
2275module_init(dmfe_init_module);
2276module_exit(dmfe_cleanup_module);
2277