1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
65
66#define DRV_NAME "dmfe"
67#define DRV_VERSION "1.36.4"
68#define DRV_RELDATE "2002-01-17"
69
70#include <linux/module.h>
71#include <linux/kernel.h>
72#include <linux/string.h>
73#include <linux/timer.h>
74#include <linux/ptrace.h>
75#include <linux/errno.h>
76#include <linux/ioport.h>
77#include <linux/interrupt.h>
78#include <linux/pci.h>
79#include <linux/dma-mapping.h>
80#include <linux/init.h>
81#include <linux/netdevice.h>
82#include <linux/etherdevice.h>
83#include <linux/ethtool.h>
84#include <linux/skbuff.h>
85#include <linux/delay.h>
86#include <linux/spinlock.h>
87#include <linux/crc32.h>
88#include <linux/bitops.h>
89
90#include <asm/processor.h>
91#include <asm/io.h>
92#include <asm/dma.h>
93#include <asm/uaccess.h>
94#include <asm/irq.h>
95
96#ifdef CONFIG_TULIP_DM910X
97#include <linux/of.h>
98#endif
99
100
101
102#define PCI_DM9132_ID 0x91321282
103#define PCI_DM9102_ID 0x91021282
104#define PCI_DM9100_ID 0x91001282
105#define PCI_DM9009_ID 0x90091282
106
107#define DM9102_IO_SIZE 0x80
108#define DM9102A_IO_SIZE 0x100
109#define TX_MAX_SEND_CNT 0x1
110#define TX_DESC_CNT 0x10
111#define RX_DESC_CNT 0x20
112#define TX_FREE_DESC_CNT (TX_DESC_CNT - 2)
113#define TX_WAKE_DESC_CNT (TX_DESC_CNT - 3)
114#define DESC_ALL_CNT (TX_DESC_CNT + RX_DESC_CNT)
115#define TX_BUF_ALLOC 0x600
116#define RX_ALLOC_SIZE 0x620
117#define DM910X_RESET 1
118#define CR0_DEFAULT 0x00E00000
119#define CR6_DEFAULT 0x00080000
120#define CR7_DEFAULT 0x180c1
121#define CR15_DEFAULT 0x06
122#define TDES0_ERR_MASK 0x4302
123#define MAX_PACKET_SIZE 1514
124#define DMFE_MAX_MULTICAST 14
125#define RX_COPY_SIZE 100
126#define MAX_CHECK_PACKET 0x8000
127#define DM9801_NOISE_FLOOR 8
128#define DM9802_NOISE_FLOOR 5
129
130#define DMFE_WOL_LINKCHANGE 0x20000000
131#define DMFE_WOL_SAMPLEPACKET 0x10000000
132#define DMFE_WOL_MAGICPACKET 0x08000000
133
134
135#define DMFE_10MHF 0
136#define DMFE_100MHF 1
137#define DMFE_10MFD 4
138#define DMFE_100MFD 5
139#define DMFE_AUTO 8
140#define DMFE_1M_HPNA 0x10
141
142#define DMFE_TXTH_72 0x400000
143#define DMFE_TXTH_96 0x404000
144#define DMFE_TXTH_128 0x0000
145#define DMFE_TXTH_256 0x4000
146#define DMFE_TXTH_512 0x8000
147#define DMFE_TXTH_1K 0xC000
148
149#define DMFE_TIMER_WUT (jiffies + HZ * 1)
150#define DMFE_TX_TIMEOUT ((3*HZ)/2)
151#define DMFE_TX_KICK (HZ/2)
152
153#define dw32(reg, val) iowrite32(val, ioaddr + (reg))
154#define dw16(reg, val) iowrite16(val, ioaddr + (reg))
155#define dr32(reg) ioread32(ioaddr + (reg))
156#define dr16(reg) ioread16(ioaddr + (reg))
157#define dr8(reg) ioread8(ioaddr + (reg))
158
159#define DMFE_DBUG(dbug_now, msg, value) \
160 do { \
161 if (dmfe_debug || (dbug_now)) \
162 pr_err("%s %lx\n", \
163 (msg), (long) (value)); \
164 } while (0)
165
166#define SHOW_MEDIA_TYPE(mode) \
167 pr_info("Change Speed to %sMhz %s duplex\n" , \
168 (mode & 1) ? "100":"10", \
169 (mode & 4) ? "full":"half");
170
171
172
173#define CR9_SROM_READ 0x4800
174#define CR9_SRCS 0x1
175#define CR9_SRCLK 0x2
176#define CR9_CRDOUT 0x8
177#define SROM_DATA_0 0x0
178#define SROM_DATA_1 0x4
179#define PHY_DATA_1 0x20000
180#define PHY_DATA_0 0x00000
181#define MDCLKH 0x10000
182
183#define PHY_POWER_DOWN 0x800
184
185#define SROM_V41_CODE 0x14
186
187#define __CHK_IO_SIZE(pci_id, dev_rev) \
188 (( ((pci_id)==PCI_DM9132_ID) || ((dev_rev) >= 0x30) ) ? \
189 DM9102A_IO_SIZE: DM9102_IO_SIZE)
190
191#define CHK_IO_SIZE(pci_dev) \
192 (__CHK_IO_SIZE(((pci_dev)->device << 16) | (pci_dev)->vendor, \
193 (pci_dev)->revision))
194
195
196struct tx_desc {
197 __le32 tdes0, tdes1, tdes2, tdes3;
198 char *tx_buf_ptr;
199 struct tx_desc *next_tx_desc;
200} __attribute__(( aligned(32) ));
201
202struct rx_desc {
203 __le32 rdes0, rdes1, rdes2, rdes3;
204 struct sk_buff *rx_skb_ptr;
205 struct rx_desc *next_rx_desc;
206} __attribute__(( aligned(32) ));
207
208struct dmfe_board_info {
209 u32 chip_id;
210 u8 chip_revision;
211 struct net_device *next_dev;
212 struct pci_dev *pdev;
213 spinlock_t lock;
214
215 void __iomem *ioaddr;
216 u32 cr0_data;
217 u32 cr5_data;
218 u32 cr6_data;
219 u32 cr7_data;
220 u32 cr15_data;
221
222
223 dma_addr_t buf_pool_dma_ptr;
224 dma_addr_t buf_pool_dma_start;
225 dma_addr_t desc_pool_dma_ptr;
226 dma_addr_t first_tx_desc_dma;
227 dma_addr_t first_rx_desc_dma;
228
229
230 unsigned char *buf_pool_ptr;
231 unsigned char *buf_pool_start;
232 unsigned char *desc_pool_ptr;
233 struct tx_desc *first_tx_desc;
234 struct tx_desc *tx_insert_ptr;
235 struct tx_desc *tx_remove_ptr;
236 struct rx_desc *first_rx_desc;
237 struct rx_desc *rx_insert_ptr;
238 struct rx_desc *rx_ready_ptr;
239 unsigned long tx_packet_cnt;
240 unsigned long tx_queue_cnt;
241 unsigned long rx_avail_cnt;
242 unsigned long interval_rx_cnt;
243
244 u16 HPNA_command;
245 u16 HPNA_timer;
246 u16 dbug_cnt;
247 u16 NIC_capability;
248 u16 PHY_reg4;
249
250 u8 HPNA_present;
251 u8 chip_type;
252 u8 media_mode;
253 u8 op_mode;
254 u8 phy_addr;
255 u8 wait_reset;
256 u8 dm910x_chk_mode;
257 u8 first_in_callback;
258 u8 wol_mode;
259 struct timer_list timer;
260
261
262 unsigned long tx_fifo_underrun;
263 unsigned long tx_loss_carrier;
264 unsigned long tx_no_carrier;
265 unsigned long tx_late_collision;
266 unsigned long tx_excessive_collision;
267 unsigned long tx_jabber_timeout;
268 unsigned long reset_count;
269 unsigned long reset_cr8;
270 unsigned long reset_fatal;
271 unsigned long reset_TXtimeout;
272
273
274 unsigned char srom[128];
275};
276
277enum dmfe_offsets {
278 DCR0 = 0x00, DCR1 = 0x08, DCR2 = 0x10, DCR3 = 0x18, DCR4 = 0x20,
279 DCR5 = 0x28, DCR6 = 0x30, DCR7 = 0x38, DCR8 = 0x40, DCR9 = 0x48,
280 DCR10 = 0x50, DCR11 = 0x58, DCR12 = 0x60, DCR13 = 0x68, DCR14 = 0x70,
281 DCR15 = 0x78
282};
283
284enum dmfe_CR6_bits {
285 CR6_RXSC = 0x2, CR6_PBF = 0x8, CR6_PM = 0x40, CR6_PAM = 0x80,
286 CR6_FDM = 0x200, CR6_TXSC = 0x2000, CR6_STI = 0x100000,
287 CR6_SFT = 0x200000, CR6_RXA = 0x40000000, CR6_NO_PURGE = 0x20000000
288};
289
290
291static int printed_version;
292static const char version[] =
293 "Davicom DM9xxx net driver, version " DRV_VERSION " (" DRV_RELDATE ")";
294
295static int dmfe_debug;
296static unsigned char dmfe_media_mode = DMFE_AUTO;
297static u32 dmfe_cr6_user_set;
298
299
300static int debug;
301static u32 cr6set;
302static unsigned char mode = 8;
303static u8 chkmode = 1;
304static u8 HPNA_mode;
305static u8 HPNA_rx_cmd;
306static u8 HPNA_tx_cmd;
307static u8 HPNA_NoiseFloor;
308static u8 SF_mode;
309
310
311
312
313static int dmfe_open(struct net_device *);
314static netdev_tx_t dmfe_start_xmit(struct sk_buff *, struct net_device *);
315static int dmfe_stop(struct net_device *);
316static void dmfe_set_filter_mode(struct net_device *);
317static const struct ethtool_ops netdev_ethtool_ops;
318static u16 read_srom_word(void __iomem *, int);
319static irqreturn_t dmfe_interrupt(int , void *);
320#ifdef CONFIG_NET_POLL_CONTROLLER
321static void poll_dmfe (struct net_device *dev);
322#endif
323static void dmfe_descriptor_init(struct net_device *);
324static void allocate_rx_buffer(struct net_device *);
325static void update_cr6(u32, void __iomem *);
326static void send_filter_frame(struct net_device *);
327static void dm9132_id_table(struct net_device *);
328static u16 dmfe_phy_read(void __iomem *, u8, u8, u32);
329static void dmfe_phy_write(void __iomem *, u8, u8, u16, u32);
330static void dmfe_phy_write_1bit(void __iomem *, u32);
331static u16 dmfe_phy_read_1bit(void __iomem *);
332static u8 dmfe_sense_speed(struct dmfe_board_info *);
333static void dmfe_process_mode(struct dmfe_board_info *);
334static void dmfe_timer(unsigned long);
335static inline u32 cal_CRC(unsigned char *, unsigned int, u8);
336static void dmfe_rx_packet(struct net_device *, struct dmfe_board_info *);
337static void dmfe_free_tx_pkt(struct net_device *, struct dmfe_board_info *);
338static void dmfe_reuse_skb(struct dmfe_board_info *, struct sk_buff *);
339static void dmfe_dynamic_reset(struct net_device *);
340static void dmfe_free_rxbuffer(struct dmfe_board_info *);
341static void dmfe_init_dm910x(struct net_device *);
342static void dmfe_parse_srom(struct dmfe_board_info *);
343static void dmfe_program_DM9801(struct dmfe_board_info *, int);
344static void dmfe_program_DM9802(struct dmfe_board_info *);
345static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * );
346static void dmfe_set_phyxcer(struct dmfe_board_info *);
347
348
349
350static const struct net_device_ops netdev_ops = {
351 .ndo_open = dmfe_open,
352 .ndo_stop = dmfe_stop,
353 .ndo_start_xmit = dmfe_start_xmit,
354 .ndo_set_rx_mode = dmfe_set_filter_mode,
355 .ndo_change_mtu = eth_change_mtu,
356 .ndo_set_mac_address = eth_mac_addr,
357 .ndo_validate_addr = eth_validate_addr,
358#ifdef CONFIG_NET_POLL_CONTROLLER
359 .ndo_poll_controller = poll_dmfe,
360#endif
361};
362
363
364
365
366
367static int dmfe_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
368{
369 struct dmfe_board_info *db;
370 struct net_device *dev;
371 u32 pci_pmr;
372 int i, err;
373
374 DMFE_DBUG(0, "dmfe_init_one()", 0);
375
376 if (!printed_version++)
377 pr_info("%s\n", version);
378
379
380
381
382
383#ifdef CONFIG_TULIP_DM910X
384 if ((ent->driver_data == PCI_DM9100_ID && pdev->revision >= 0x30) ||
385 ent->driver_data == PCI_DM9102_ID) {
386 struct device_node *dp = pci_device_to_OF_node(pdev);
387
388 if (dp && of_get_property(dp, "local-mac-address", NULL)) {
389 pr_info("skipping on-board DM910x (use tulip)\n");
390 return -ENODEV;
391 }
392 }
393#endif
394
395
396 dev = alloc_etherdev(sizeof(*db));
397 if (dev == NULL)
398 return -ENOMEM;
399 SET_NETDEV_DEV(dev, &pdev->dev);
400
401 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
402 pr_warn("32-bit PCI DMA not available\n");
403 err = -ENODEV;
404 goto err_out_free;
405 }
406
407
408 err = pci_enable_device(pdev);
409 if (err)
410 goto err_out_free;
411
412 if (!pci_resource_start(pdev, 0)) {
413 pr_err("I/O base is zero\n");
414 err = -ENODEV;
415 goto err_out_disable;
416 }
417
418 if (pci_resource_len(pdev, 0) < (CHK_IO_SIZE(pdev)) ) {
419 pr_err("Allocated I/O size too small\n");
420 err = -ENODEV;
421 goto err_out_disable;
422 }
423
424#if 0
425
426
427
428
429
430 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x80);
431#endif
432
433 if (pci_request_regions(pdev, DRV_NAME)) {
434 pr_err("Failed to request PCI regions\n");
435 err = -ENODEV;
436 goto err_out_disable;
437 }
438
439
440 db = netdev_priv(dev);
441
442
443 db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) *
444 DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr);
445 if (!db->desc_pool_ptr) {
446 err = -ENOMEM;
447 goto err_out_res;
448 }
449
450 db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC *
451 TX_DESC_CNT + 4, &db->buf_pool_dma_ptr);
452 if (!db->buf_pool_ptr) {
453 err = -ENOMEM;
454 goto err_out_free_desc;
455 }
456
457 db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr;
458 db->first_tx_desc_dma = db->desc_pool_dma_ptr;
459 db->buf_pool_start = db->buf_pool_ptr;
460 db->buf_pool_dma_start = db->buf_pool_dma_ptr;
461
462 db->chip_id = ent->driver_data;
463
464 db->ioaddr = pci_iomap(pdev, 0, 0);
465 if (!db->ioaddr) {
466 err = -ENOMEM;
467 goto err_out_free_buf;
468 }
469
470 db->chip_revision = pdev->revision;
471 db->wol_mode = 0;
472
473 db->pdev = pdev;
474
475 pci_set_drvdata(pdev, dev);
476 dev->netdev_ops = &netdev_ops;
477 dev->ethtool_ops = &netdev_ethtool_ops;
478 netif_carrier_off(dev);
479 spin_lock_init(&db->lock);
480
481 pci_read_config_dword(pdev, 0x50, &pci_pmr);
482 pci_pmr &= 0x70000;
483 if ( (pci_pmr == 0x10000) && (db->chip_revision == 0x31) )
484 db->chip_type = 1;
485 else
486 db->chip_type = 0;
487
488
489 for (i = 0; i < 64; i++) {
490 ((__le16 *) db->srom)[i] =
491 cpu_to_le16(read_srom_word(db->ioaddr, i));
492 }
493
494
495 for (i = 0; i < 6; i++)
496 dev->dev_addr[i] = db->srom[20 + i];
497
498 err = register_netdev (dev);
499 if (err)
500 goto err_out_unmap;
501
502 dev_info(&dev->dev, "Davicom DM%04lx at pci%s, %pM, irq %d\n",
503 ent->driver_data >> 16,
504 pci_name(pdev), dev->dev_addr, pdev->irq);
505
506 pci_set_master(pdev);
507
508 return 0;
509
510err_out_unmap:
511 pci_iounmap(pdev, db->ioaddr);
512err_out_free_buf:
513 pci_free_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
514 db->buf_pool_ptr, db->buf_pool_dma_ptr);
515err_out_free_desc:
516 pci_free_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20,
517 db->desc_pool_ptr, db->desc_pool_dma_ptr);
518err_out_res:
519 pci_release_regions(pdev);
520err_out_disable:
521 pci_disable_device(pdev);
522err_out_free:
523 free_netdev(dev);
524
525 return err;
526}
527
528
529static void dmfe_remove_one(struct pci_dev *pdev)
530{
531 struct net_device *dev = pci_get_drvdata(pdev);
532 struct dmfe_board_info *db = netdev_priv(dev);
533
534 DMFE_DBUG(0, "dmfe_remove_one()", 0);
535
536 if (dev) {
537
538 unregister_netdev(dev);
539 pci_iounmap(db->pdev, db->ioaddr);
540 pci_free_consistent(db->pdev, sizeof(struct tx_desc) *
541 DESC_ALL_CNT + 0x20, db->desc_pool_ptr,
542 db->desc_pool_dma_ptr);
543 pci_free_consistent(db->pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
544 db->buf_pool_ptr, db->buf_pool_dma_ptr);
545 pci_release_regions(pdev);
546 free_netdev(dev);
547 }
548
549 DMFE_DBUG(0, "dmfe_remove_one() exit", 0);
550}
551
552
553
554
555
556
557
558static int dmfe_open(struct net_device *dev)
559{
560 struct dmfe_board_info *db = netdev_priv(dev);
561 const int irq = db->pdev->irq;
562 int ret;
563
564 DMFE_DBUG(0, "dmfe_open", 0);
565
566 ret = request_irq(irq, dmfe_interrupt, IRQF_SHARED, dev->name, dev);
567 if (ret)
568 return ret;
569
570
571 db->cr6_data = CR6_DEFAULT | dmfe_cr6_user_set;
572 db->tx_packet_cnt = 0;
573 db->tx_queue_cnt = 0;
574 db->rx_avail_cnt = 0;
575 db->wait_reset = 0;
576
577 db->first_in_callback = 0;
578 db->NIC_capability = 0xf;
579 db->PHY_reg4 = 0x1e0;
580
581
582 if ( !chkmode || (db->chip_id == PCI_DM9132_ID) ||
583 (db->chip_revision >= 0x30) ) {
584 db->cr6_data |= DMFE_TXTH_256;
585 db->cr0_data = CR0_DEFAULT;
586 db->dm910x_chk_mode=4;
587 } else {
588 db->cr6_data |= CR6_SFT;
589 db->cr0_data = 0;
590 db->dm910x_chk_mode = 1;
591 }
592
593
594 dmfe_init_dm910x(dev);
595
596
597 netif_wake_queue(dev);
598
599
600 init_timer(&db->timer);
601 db->timer.expires = DMFE_TIMER_WUT + HZ * 2;
602 db->timer.data = (unsigned long)dev;
603 db->timer.function = dmfe_timer;
604 add_timer(&db->timer);
605
606 return 0;
607}
608
609
610
611
612
613
614
615
616
617static void dmfe_init_dm910x(struct net_device *dev)
618{
619 struct dmfe_board_info *db = netdev_priv(dev);
620 void __iomem *ioaddr = db->ioaddr;
621
622 DMFE_DBUG(0, "dmfe_init_dm910x()", 0);
623
624
625 dw32(DCR0, DM910X_RESET);
626 udelay(100);
627 dw32(DCR0, db->cr0_data);
628 udelay(5);
629
630
631 db->phy_addr = 1;
632
633
634 dmfe_parse_srom(db);
635 db->media_mode = dmfe_media_mode;
636
637
638 dw32(DCR12, 0x180);
639 if (db->chip_id == PCI_DM9009_ID) {
640 dw32(DCR12, 0x80);
641 mdelay(300);
642 }
643 dw32(DCR12, 0x0);
644
645
646 if ( !(db->media_mode & 0x10) )
647 dmfe_set_phyxcer(db);
648
649
650 if ( !(db->media_mode & DMFE_AUTO) )
651 db->op_mode = db->media_mode;
652
653
654 dmfe_descriptor_init(dev);
655
656
657 update_cr6(db->cr6_data, ioaddr);
658
659
660 if (db->chip_id == PCI_DM9132_ID)
661 dm9132_id_table(dev);
662 else
663 send_filter_frame(dev);
664
665
666 db->cr7_data = CR7_DEFAULT;
667 dw32(DCR7, db->cr7_data);
668
669
670 dw32(DCR15, db->cr15_data);
671
672
673 db->cr6_data |= CR6_RXSC | CR6_TXSC | 0x40000;
674 update_cr6(db->cr6_data, ioaddr);
675}
676
677
678
679
680
681
682
683static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
684 struct net_device *dev)
685{
686 struct dmfe_board_info *db = netdev_priv(dev);
687 void __iomem *ioaddr = db->ioaddr;
688 struct tx_desc *txptr;
689 unsigned long flags;
690
691 DMFE_DBUG(0, "dmfe_start_xmit", 0);
692
693
694 if (skb->len > MAX_PACKET_SIZE) {
695 pr_err("big packet = %d\n", (u16)skb->len);
696 dev_kfree_skb_any(skb);
697 return NETDEV_TX_OK;
698 }
699
700
701 netif_stop_queue(dev);
702
703 spin_lock_irqsave(&db->lock, flags);
704
705
706 if (db->tx_queue_cnt >= TX_FREE_DESC_CNT) {
707 spin_unlock_irqrestore(&db->lock, flags);
708 pr_err("No Tx resource %ld\n", db->tx_queue_cnt);
709 return NETDEV_TX_BUSY;
710 }
711
712
713 dw32(DCR7, 0);
714
715
716 txptr = db->tx_insert_ptr;
717 skb_copy_from_linear_data(skb, txptr->tx_buf_ptr, skb->len);
718 txptr->tdes1 = cpu_to_le32(0xe1000000 | skb->len);
719
720
721 db->tx_insert_ptr = txptr->next_tx_desc;
722
723
724 if ( (!db->tx_queue_cnt) && (db->tx_packet_cnt < TX_MAX_SEND_CNT) ) {
725 txptr->tdes0 = cpu_to_le32(0x80000000);
726 db->tx_packet_cnt++;
727 dw32(DCR1, 0x1);
728 netif_trans_update(dev);
729 } else {
730 db->tx_queue_cnt++;
731 dw32(DCR1, 0x1);
732 }
733
734
735 if ( db->tx_queue_cnt < TX_FREE_DESC_CNT )
736 netif_wake_queue(dev);
737
738
739 spin_unlock_irqrestore(&db->lock, flags);
740 dw32(DCR7, db->cr7_data);
741
742
743 dev_consume_skb_any(skb);
744
745 return NETDEV_TX_OK;
746}
747
748
749
750
751
752
753
754static int dmfe_stop(struct net_device *dev)
755{
756 struct dmfe_board_info *db = netdev_priv(dev);
757 void __iomem *ioaddr = db->ioaddr;
758
759 DMFE_DBUG(0, "dmfe_stop", 0);
760
761
762 netif_stop_queue(dev);
763
764
765 del_timer_sync(&db->timer);
766
767
768 dw32(DCR0, DM910X_RESET);
769 udelay(100);
770 dmfe_phy_write(ioaddr, db->phy_addr, 0, 0x8000, db->chip_id);
771
772
773 free_irq(db->pdev->irq, dev);
774
775
776 dmfe_free_rxbuffer(db);
777
778#if 0
779
780 printk("FU:%lx EC:%lx LC:%lx NC:%lx LOC:%lx TXJT:%lx RESET:%lx RCR8:%lx FAL:%lx TT:%lx\n",
781 db->tx_fifo_underrun, db->tx_excessive_collision,
782 db->tx_late_collision, db->tx_no_carrier, db->tx_loss_carrier,
783 db->tx_jabber_timeout, db->reset_count, db->reset_cr8,
784 db->reset_fatal, db->reset_TXtimeout);
785#endif
786
787 return 0;
788}
789
790
791
792
793
794
795
796static irqreturn_t dmfe_interrupt(int irq, void *dev_id)
797{
798 struct net_device *dev = dev_id;
799 struct dmfe_board_info *db = netdev_priv(dev);
800 void __iomem *ioaddr = db->ioaddr;
801 unsigned long flags;
802
803 DMFE_DBUG(0, "dmfe_interrupt()", 0);
804
805 spin_lock_irqsave(&db->lock, flags);
806
807
808 db->cr5_data = dr32(DCR5);
809 dw32(DCR5, db->cr5_data);
810 if ( !(db->cr5_data & 0xc1) ) {
811 spin_unlock_irqrestore(&db->lock, flags);
812 return IRQ_HANDLED;
813 }
814
815
816 dw32(DCR7, 0);
817
818
819 if (db->cr5_data & 0x2000) {
820
821 DMFE_DBUG(1, "System bus error happen. CR5=", db->cr5_data);
822 db->reset_fatal++;
823 db->wait_reset = 1;
824 spin_unlock_irqrestore(&db->lock, flags);
825 return IRQ_HANDLED;
826 }
827
828
829 if ( (db->cr5_data & 0x40) && db->rx_avail_cnt )
830 dmfe_rx_packet(dev, db);
831
832
833 if (db->rx_avail_cnt<RX_DESC_CNT)
834 allocate_rx_buffer(dev);
835
836
837 if ( db->cr5_data & 0x01)
838 dmfe_free_tx_pkt(dev, db);
839
840
841 if (db->dm910x_chk_mode & 0x2) {
842 db->dm910x_chk_mode = 0x4;
843 db->cr6_data |= 0x100;
844 update_cr6(db->cr6_data, ioaddr);
845 }
846
847
848 dw32(DCR7, db->cr7_data);
849
850 spin_unlock_irqrestore(&db->lock, flags);
851 return IRQ_HANDLED;
852}
853
854
855#ifdef CONFIG_NET_POLL_CONTROLLER
856
857
858
859
860
861
862static void poll_dmfe (struct net_device *dev)
863{
864 struct dmfe_board_info *db = netdev_priv(dev);
865 const int irq = db->pdev->irq;
866
867
868
869 disable_irq(irq);
870 dmfe_interrupt (irq, dev);
871 enable_irq(irq);
872}
873#endif
874
875
876
877
878
879static void dmfe_free_tx_pkt(struct net_device *dev, struct dmfe_board_info *db)
880{
881 struct tx_desc *txptr;
882 void __iomem *ioaddr = db->ioaddr;
883 u32 tdes0;
884
885 txptr = db->tx_remove_ptr;
886 while(db->tx_packet_cnt) {
887 tdes0 = le32_to_cpu(txptr->tdes0);
888 if (tdes0 & 0x80000000)
889 break;
890
891
892 db->tx_packet_cnt--;
893 dev->stats.tx_packets++;
894
895
896 if ( tdes0 != 0x7fffffff ) {
897 dev->stats.collisions += (tdes0 >> 3) & 0xf;
898 dev->stats.tx_bytes += le32_to_cpu(txptr->tdes1) & 0x7ff;
899 if (tdes0 & TDES0_ERR_MASK) {
900 dev->stats.tx_errors++;
901
902 if (tdes0 & 0x0002) {
903 db->tx_fifo_underrun++;
904 if ( !(db->cr6_data & CR6_SFT) ) {
905 db->cr6_data = db->cr6_data | CR6_SFT;
906 update_cr6(db->cr6_data, ioaddr);
907 }
908 }
909 if (tdes0 & 0x0100)
910 db->tx_excessive_collision++;
911 if (tdes0 & 0x0200)
912 db->tx_late_collision++;
913 if (tdes0 & 0x0400)
914 db->tx_no_carrier++;
915 if (tdes0 & 0x0800)
916 db->tx_loss_carrier++;
917 if (tdes0 & 0x4000)
918 db->tx_jabber_timeout++;
919 }
920 }
921
922 txptr = txptr->next_tx_desc;
923 }
924
925
926 db->tx_remove_ptr = txptr;
927
928
929 if ( (db->tx_packet_cnt < TX_MAX_SEND_CNT) && db->tx_queue_cnt ) {
930 txptr->tdes0 = cpu_to_le32(0x80000000);
931 db->tx_packet_cnt++;
932 db->tx_queue_cnt--;
933 dw32(DCR1, 0x1);
934 netif_trans_update(dev);
935 }
936
937
938 if ( db->tx_queue_cnt < TX_WAKE_DESC_CNT )
939 netif_wake_queue(dev);
940}
941
942
943
944
945
946
947
948
949static inline u32 cal_CRC(unsigned char * Data, unsigned int Len, u8 flag)
950{
951 u32 crc = crc32(~0, Data, Len);
952 if (flag) crc = ~crc;
953 return crc;
954}
955
956
957
958
959
960
961static void dmfe_rx_packet(struct net_device *dev, struct dmfe_board_info *db)
962{
963 struct rx_desc *rxptr;
964 struct sk_buff *skb, *newskb;
965 int rxlen;
966 u32 rdes0;
967
968 rxptr = db->rx_ready_ptr;
969
970 while(db->rx_avail_cnt) {
971 rdes0 = le32_to_cpu(rxptr->rdes0);
972 if (rdes0 & 0x80000000)
973 break;
974
975 db->rx_avail_cnt--;
976 db->interval_rx_cnt++;
977
978 pci_unmap_single(db->pdev, le32_to_cpu(rxptr->rdes2),
979 RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
980
981 if ( (rdes0 & 0x300) != 0x300) {
982
983
984 DMFE_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
985 dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
986 } else {
987
988 rxlen = ( (rdes0 >> 16) & 0x3fff) - 4;
989
990
991 if (rdes0 & 0x8000) {
992
993 dev->stats.rx_errors++;
994 if (rdes0 & 1)
995 dev->stats.rx_fifo_errors++;
996 if (rdes0 & 2)
997 dev->stats.rx_crc_errors++;
998 if (rdes0 & 0x80)
999 dev->stats.rx_length_errors++;
1000 }
1001
1002 if ( !(rdes0 & 0x8000) ||
1003 ((db->cr6_data & CR6_PM) && (rxlen>6)) ) {
1004 skb = rxptr->rx_skb_ptr;
1005
1006
1007 if ( (db->dm910x_chk_mode & 1) &&
1008 (cal_CRC(skb->data, rxlen, 1) !=
1009 (*(u32 *) (skb->data+rxlen) ))) {
1010
1011 dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
1012 db->dm910x_chk_mode = 3;
1013 } else {
1014
1015
1016 if ((rxlen < RX_COPY_SIZE) &&
1017 ((newskb = netdev_alloc_skb(dev, rxlen + 2))
1018 != NULL)) {
1019
1020 skb = newskb;
1021
1022 skb_reserve(skb, 2);
1023 skb_copy_from_linear_data(rxptr->rx_skb_ptr,
1024 skb_put(skb, rxlen),
1025 rxlen);
1026 dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
1027 } else
1028 skb_put(skb, rxlen);
1029
1030 skb->protocol = eth_type_trans(skb, dev);
1031 netif_rx(skb);
1032 dev->stats.rx_packets++;
1033 dev->stats.rx_bytes += rxlen;
1034 }
1035 } else {
1036
1037 DMFE_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
1038 dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
1039 }
1040 }
1041
1042 rxptr = rxptr->next_rx_desc;
1043 }
1044
1045 db->rx_ready_ptr = rxptr;
1046}
1047
1048
1049
1050
1051
1052static void dmfe_set_filter_mode(struct net_device *dev)
1053{
1054 struct dmfe_board_info *db = netdev_priv(dev);
1055 unsigned long flags;
1056 int mc_count = netdev_mc_count(dev);
1057
1058 DMFE_DBUG(0, "dmfe_set_filter_mode()", 0);
1059 spin_lock_irqsave(&db->lock, flags);
1060
1061 if (dev->flags & IFF_PROMISC) {
1062 DMFE_DBUG(0, "Enable PROM Mode", 0);
1063 db->cr6_data |= CR6_PM | CR6_PBF;
1064 update_cr6(db->cr6_data, db->ioaddr);
1065 spin_unlock_irqrestore(&db->lock, flags);
1066 return;
1067 }
1068
1069 if (dev->flags & IFF_ALLMULTI || mc_count > DMFE_MAX_MULTICAST) {
1070 DMFE_DBUG(0, "Pass all multicast address", mc_count);
1071 db->cr6_data &= ~(CR6_PM | CR6_PBF);
1072 db->cr6_data |= CR6_PAM;
1073 spin_unlock_irqrestore(&db->lock, flags);
1074 return;
1075 }
1076
1077 DMFE_DBUG(0, "Set multicast address", mc_count);
1078 if (db->chip_id == PCI_DM9132_ID)
1079 dm9132_id_table(dev);
1080 else
1081 send_filter_frame(dev);
1082 spin_unlock_irqrestore(&db->lock, flags);
1083}
1084
1085
1086
1087
1088
1089static void dmfe_ethtool_get_drvinfo(struct net_device *dev,
1090 struct ethtool_drvinfo *info)
1091{
1092 struct dmfe_board_info *np = netdev_priv(dev);
1093
1094 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1095 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1096 strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
1097}
1098
1099static int dmfe_ethtool_set_wol(struct net_device *dev,
1100 struct ethtool_wolinfo *wolinfo)
1101{
1102 struct dmfe_board_info *db = netdev_priv(dev);
1103
1104 if (wolinfo->wolopts & (WAKE_UCAST | WAKE_MCAST | WAKE_BCAST |
1105 WAKE_ARP | WAKE_MAGICSECURE))
1106 return -EOPNOTSUPP;
1107
1108 db->wol_mode = wolinfo->wolopts;
1109 return 0;
1110}
1111
1112static void dmfe_ethtool_get_wol(struct net_device *dev,
1113 struct ethtool_wolinfo *wolinfo)
1114{
1115 struct dmfe_board_info *db = netdev_priv(dev);
1116
1117 wolinfo->supported = WAKE_PHY | WAKE_MAGIC;
1118 wolinfo->wolopts = db->wol_mode;
1119}
1120
1121
1122static const struct ethtool_ops netdev_ethtool_ops = {
1123 .get_drvinfo = dmfe_ethtool_get_drvinfo,
1124 .get_link = ethtool_op_get_link,
1125 .set_wol = dmfe_ethtool_set_wol,
1126 .get_wol = dmfe_ethtool_get_wol,
1127};
1128
1129
1130
1131
1132
1133
1134static void dmfe_timer(unsigned long data)
1135{
1136 struct net_device *dev = (struct net_device *)data;
1137 struct dmfe_board_info *db = netdev_priv(dev);
1138 void __iomem *ioaddr = db->ioaddr;
1139 u32 tmp_cr8;
1140 unsigned char tmp_cr12;
1141 unsigned long flags;
1142
1143 int link_ok, link_ok_phy;
1144
1145 DMFE_DBUG(0, "dmfe_timer()", 0);
1146 spin_lock_irqsave(&db->lock, flags);
1147
1148
1149 if (db->first_in_callback == 0) {
1150 db->first_in_callback = 1;
1151 if (db->chip_type && (db->chip_id==PCI_DM9102_ID)) {
1152 db->cr6_data &= ~0x40000;
1153 update_cr6(db->cr6_data, ioaddr);
1154 dmfe_phy_write(ioaddr, db->phy_addr, 0, 0x1000, db->chip_id);
1155 db->cr6_data |= 0x40000;
1156 update_cr6(db->cr6_data, ioaddr);
1157 db->timer.expires = DMFE_TIMER_WUT + HZ * 2;
1158 add_timer(&db->timer);
1159 spin_unlock_irqrestore(&db->lock, flags);
1160 return;
1161 }
1162 }
1163
1164
1165
1166 if ( (db->dm910x_chk_mode & 0x1) &&
1167 (dev->stats.rx_packets > MAX_CHECK_PACKET) )
1168 db->dm910x_chk_mode = 0x4;
1169
1170
1171 tmp_cr8 = dr32(DCR8);
1172 if ( (db->interval_rx_cnt==0) && (tmp_cr8) ) {
1173 db->reset_cr8++;
1174 db->wait_reset = 1;
1175 }
1176 db->interval_rx_cnt = 0;
1177
1178
1179 if ( db->tx_packet_cnt &&
1180 time_after(jiffies, dev_trans_start(dev) + DMFE_TX_KICK) ) {
1181 dw32(DCR1, 0x1);
1182
1183
1184 if (time_after(jiffies, dev_trans_start(dev) + DMFE_TX_TIMEOUT) ) {
1185 db->reset_TXtimeout++;
1186 db->wait_reset = 1;
1187 dev_warn(&dev->dev, "Tx timeout - resetting\n");
1188 }
1189 }
1190
1191 if (db->wait_reset) {
1192 DMFE_DBUG(0, "Dynamic Reset device", db->tx_packet_cnt);
1193 db->reset_count++;
1194 dmfe_dynamic_reset(dev);
1195 db->first_in_callback = 0;
1196 db->timer.expires = DMFE_TIMER_WUT;
1197 add_timer(&db->timer);
1198 spin_unlock_irqrestore(&db->lock, flags);
1199 return;
1200 }
1201
1202
1203 if (db->chip_id == PCI_DM9132_ID)
1204 tmp_cr12 = dr8(DCR9 + 3);
1205 else
1206 tmp_cr12 = dr8(DCR12);
1207
1208 if ( ((db->chip_id == PCI_DM9102_ID) &&
1209 (db->chip_revision == 0x30)) ||
1210 ((db->chip_id == PCI_DM9132_ID) &&
1211 (db->chip_revision == 0x10)) ) {
1212
1213 if (tmp_cr12 & 2)
1214 link_ok = 0;
1215 else
1216 link_ok = 1;
1217 }
1218 else
1219
1220
1221 link_ok = (tmp_cr12 & 0x43) ? 1 : 0;
1222
1223
1224
1225
1226
1227
1228
1229
1230 dmfe_phy_read (db->ioaddr, db->phy_addr, 1, db->chip_id);
1231 link_ok_phy = (dmfe_phy_read (db->ioaddr,
1232 db->phy_addr, 1, db->chip_id) & 0x4) ? 1 : 0;
1233
1234 if (link_ok_phy != link_ok) {
1235 DMFE_DBUG (0, "PHY and chip report different link status", 0);
1236 link_ok = link_ok | link_ok_phy;
1237 }
1238
1239 if ( !link_ok && netif_carrier_ok(dev)) {
1240
1241 DMFE_DBUG(0, "Link Failed", tmp_cr12);
1242 netif_carrier_off(dev);
1243
1244
1245
1246 if ( !(db->media_mode & 0x38) )
1247 dmfe_phy_write(db->ioaddr, db->phy_addr,
1248 0, 0x1000, db->chip_id);
1249
1250
1251 if (db->media_mode & DMFE_AUTO) {
1252
1253 db->cr6_data|=0x00040000;
1254 db->cr6_data&=~0x00000200;
1255 update_cr6(db->cr6_data, ioaddr);
1256 }
1257 } else if (!netif_carrier_ok(dev)) {
1258
1259 DMFE_DBUG(0, "Link link OK", tmp_cr12);
1260
1261
1262 if ( !(db->media_mode & DMFE_AUTO) || !dmfe_sense_speed(db)) {
1263 netif_carrier_on(dev);
1264 SHOW_MEDIA_TYPE(db->op_mode);
1265 }
1266
1267 dmfe_process_mode(db);
1268 }
1269
1270
1271 if (db->HPNA_command & 0xf00) {
1272 db->HPNA_timer--;
1273 if (!db->HPNA_timer)
1274 dmfe_HPNA_remote_cmd_chk(db);
1275 }
1276
1277
1278 db->timer.expires = DMFE_TIMER_WUT;
1279 add_timer(&db->timer);
1280 spin_unlock_irqrestore(&db->lock, flags);
1281}
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292static void dmfe_dynamic_reset(struct net_device *dev)
1293{
1294 struct dmfe_board_info *db = netdev_priv(dev);
1295 void __iomem *ioaddr = db->ioaddr;
1296
1297 DMFE_DBUG(0, "dmfe_dynamic_reset()", 0);
1298
1299
1300 db->cr6_data &= ~(CR6_RXSC | CR6_TXSC);
1301 update_cr6(db->cr6_data, ioaddr);
1302 dw32(DCR7, 0);
1303 dw32(DCR5, dr32(DCR5));
1304
1305
1306 netif_stop_queue(dev);
1307
1308
1309 dmfe_free_rxbuffer(db);
1310
1311
1312 db->tx_packet_cnt = 0;
1313 db->tx_queue_cnt = 0;
1314 db->rx_avail_cnt = 0;
1315 netif_carrier_off(dev);
1316 db->wait_reset = 0;
1317
1318
1319 dmfe_init_dm910x(dev);
1320
1321
1322 netif_wake_queue(dev);
1323}
1324
1325
1326
1327
1328
1329
1330static void dmfe_free_rxbuffer(struct dmfe_board_info * db)
1331{
1332 DMFE_DBUG(0, "dmfe_free_rxbuffer()", 0);
1333
1334
1335 while (db->rx_avail_cnt) {
1336 dev_kfree_skb(db->rx_ready_ptr->rx_skb_ptr);
1337 db->rx_ready_ptr = db->rx_ready_ptr->next_rx_desc;
1338 db->rx_avail_cnt--;
1339 }
1340}
1341
1342
1343
1344
1345
1346
1347static void dmfe_reuse_skb(struct dmfe_board_info *db, struct sk_buff * skb)
1348{
1349 struct rx_desc *rxptr = db->rx_insert_ptr;
1350
1351 if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) {
1352 rxptr->rx_skb_ptr = skb;
1353 rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev,
1354 skb->data, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
1355 wmb();
1356 rxptr->rdes0 = cpu_to_le32(0x80000000);
1357 db->rx_avail_cnt++;
1358 db->rx_insert_ptr = rxptr->next_rx_desc;
1359 } else
1360 DMFE_DBUG(0, "SK Buffer reuse method error", db->rx_avail_cnt);
1361}
1362
1363
1364
1365
1366
1367
1368
1369static void dmfe_descriptor_init(struct net_device *dev)
1370{
1371 struct dmfe_board_info *db = netdev_priv(dev);
1372 void __iomem *ioaddr = db->ioaddr;
1373 struct tx_desc *tmp_tx;
1374 struct rx_desc *tmp_rx;
1375 unsigned char *tmp_buf;
1376 dma_addr_t tmp_tx_dma, tmp_rx_dma;
1377 dma_addr_t tmp_buf_dma;
1378 int i;
1379
1380 DMFE_DBUG(0, "dmfe_descriptor_init()", 0);
1381
1382
1383 db->tx_insert_ptr = db->first_tx_desc;
1384 db->tx_remove_ptr = db->first_tx_desc;
1385 dw32(DCR4, db->first_tx_desc_dma);
1386
1387
1388 db->first_rx_desc = (void *)db->first_tx_desc +
1389 sizeof(struct tx_desc) * TX_DESC_CNT;
1390
1391 db->first_rx_desc_dma = db->first_tx_desc_dma +
1392 sizeof(struct tx_desc) * TX_DESC_CNT;
1393 db->rx_insert_ptr = db->first_rx_desc;
1394 db->rx_ready_ptr = db->first_rx_desc;
1395 dw32(DCR3, db->first_rx_desc_dma);
1396
1397
1398 tmp_buf = db->buf_pool_start;
1399 tmp_buf_dma = db->buf_pool_dma_start;
1400 tmp_tx_dma = db->first_tx_desc_dma;
1401 for (tmp_tx = db->first_tx_desc, i = 0; i < TX_DESC_CNT; i++, tmp_tx++) {
1402 tmp_tx->tx_buf_ptr = tmp_buf;
1403 tmp_tx->tdes0 = cpu_to_le32(0);
1404 tmp_tx->tdes1 = cpu_to_le32(0x81000000);
1405 tmp_tx->tdes2 = cpu_to_le32(tmp_buf_dma);
1406 tmp_tx_dma += sizeof(struct tx_desc);
1407 tmp_tx->tdes3 = cpu_to_le32(tmp_tx_dma);
1408 tmp_tx->next_tx_desc = tmp_tx + 1;
1409 tmp_buf = tmp_buf + TX_BUF_ALLOC;
1410 tmp_buf_dma = tmp_buf_dma + TX_BUF_ALLOC;
1411 }
1412 (--tmp_tx)->tdes3 = cpu_to_le32(db->first_tx_desc_dma);
1413 tmp_tx->next_tx_desc = db->first_tx_desc;
1414
1415
1416 tmp_rx_dma=db->first_rx_desc_dma;
1417 for (tmp_rx = db->first_rx_desc, i = 0; i < RX_DESC_CNT; i++, tmp_rx++) {
1418 tmp_rx->rdes0 = cpu_to_le32(0);
1419 tmp_rx->rdes1 = cpu_to_le32(0x01000600);
1420 tmp_rx_dma += sizeof(struct rx_desc);
1421 tmp_rx->rdes3 = cpu_to_le32(tmp_rx_dma);
1422 tmp_rx->next_rx_desc = tmp_rx + 1;
1423 }
1424 (--tmp_rx)->rdes3 = cpu_to_le32(db->first_rx_desc_dma);
1425 tmp_rx->next_rx_desc = db->first_rx_desc;
1426
1427
1428 allocate_rx_buffer(dev);
1429}
1430
1431
1432
1433
1434
1435
1436
1437static void update_cr6(u32 cr6_data, void __iomem *ioaddr)
1438{
1439 u32 cr6_tmp;
1440
1441 cr6_tmp = cr6_data & ~0x2002;
1442 dw32(DCR6, cr6_tmp);
1443 udelay(5);
1444 dw32(DCR6, cr6_data);
1445 udelay(5);
1446}
1447
1448
1449
1450
1451
1452
1453
1454static void dm9132_id_table(struct net_device *dev)
1455{
1456 struct dmfe_board_info *db = netdev_priv(dev);
1457 void __iomem *ioaddr = db->ioaddr + 0xc0;
1458 u16 *addrptr = (u16 *)dev->dev_addr;
1459 struct netdev_hw_addr *ha;
1460 u16 i, hash_table[4];
1461
1462
1463 for (i = 0; i < 3; i++) {
1464 dw16(0, addrptr[i]);
1465 ioaddr += 4;
1466 }
1467
1468
1469 memset(hash_table, 0, sizeof(hash_table));
1470
1471
1472 hash_table[3] = 0x8000;
1473
1474
1475 netdev_for_each_mc_addr(ha, dev) {
1476 u32 hash_val = cal_CRC((char *)ha->addr, 6, 0) & 0x3f;
1477
1478 hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16);
1479 }
1480
1481
1482 for (i = 0; i < 4; i++, ioaddr += 4)
1483 dw16(0, hash_table[i]);
1484}
1485
1486
1487
1488
1489
1490
1491
1492static void send_filter_frame(struct net_device *dev)
1493{
1494 struct dmfe_board_info *db = netdev_priv(dev);
1495 struct netdev_hw_addr *ha;
1496 struct tx_desc *txptr;
1497 u16 * addrptr;
1498 u32 * suptr;
1499 int i;
1500
1501 DMFE_DBUG(0, "send_filter_frame()", 0);
1502
1503 txptr = db->tx_insert_ptr;
1504 suptr = (u32 *) txptr->tx_buf_ptr;
1505
1506
1507 addrptr = (u16 *) dev->dev_addr;
1508 *suptr++ = addrptr[0];
1509 *suptr++ = addrptr[1];
1510 *suptr++ = addrptr[2];
1511
1512
1513 *suptr++ = 0xffff;
1514 *suptr++ = 0xffff;
1515 *suptr++ = 0xffff;
1516
1517
1518 netdev_for_each_mc_addr(ha, dev) {
1519 addrptr = (u16 *) ha->addr;
1520 *suptr++ = addrptr[0];
1521 *suptr++ = addrptr[1];
1522 *suptr++ = addrptr[2];
1523 }
1524
1525 for (i = netdev_mc_count(dev); i < 14; i++) {
1526 *suptr++ = 0xffff;
1527 *suptr++ = 0xffff;
1528 *suptr++ = 0xffff;
1529 }
1530
1531
1532 db->tx_insert_ptr = txptr->next_tx_desc;
1533 txptr->tdes1 = cpu_to_le32(0x890000c0);
1534
1535
1536 if (!db->tx_packet_cnt) {
1537 void __iomem *ioaddr = db->ioaddr;
1538
1539
1540 db->tx_packet_cnt++;
1541 txptr->tdes0 = cpu_to_le32(0x80000000);
1542 update_cr6(db->cr6_data | 0x2000, ioaddr);
1543 dw32(DCR1, 0x1);
1544 update_cr6(db->cr6_data, ioaddr);
1545 netif_trans_update(dev);
1546 } else
1547 db->tx_queue_cnt++;
1548}
1549
1550
1551
1552
1553
1554
1555
1556static void allocate_rx_buffer(struct net_device *dev)
1557{
1558 struct dmfe_board_info *db = netdev_priv(dev);
1559 struct rx_desc *rxptr;
1560 struct sk_buff *skb;
1561
1562 rxptr = db->rx_insert_ptr;
1563
1564 while(db->rx_avail_cnt < RX_DESC_CNT) {
1565 if ( ( skb = netdev_alloc_skb(dev, RX_ALLOC_SIZE) ) == NULL )
1566 break;
1567 rxptr->rx_skb_ptr = skb;
1568 rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->data,
1569 RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
1570 wmb();
1571 rxptr->rdes0 = cpu_to_le32(0x80000000);
1572 rxptr = rxptr->next_rx_desc;
1573 db->rx_avail_cnt++;
1574 }
1575
1576 db->rx_insert_ptr = rxptr;
1577}
1578
1579static void srom_clk_write(void __iomem *ioaddr, u32 data)
1580{
1581 static const u32 cmd[] = {
1582 CR9_SROM_READ | CR9_SRCS,
1583 CR9_SROM_READ | CR9_SRCS | CR9_SRCLK,
1584 CR9_SROM_READ | CR9_SRCS
1585 };
1586 int i;
1587
1588 for (i = 0; i < ARRAY_SIZE(cmd); i++) {
1589 dw32(DCR9, data | cmd[i]);
1590 udelay(5);
1591 }
1592}
1593
1594
1595
1596
1597static u16 read_srom_word(void __iomem *ioaddr, int offset)
1598{
1599 u16 srom_data;
1600 int i;
1601
1602 dw32(DCR9, CR9_SROM_READ);
1603 udelay(5);
1604 dw32(DCR9, CR9_SROM_READ | CR9_SRCS);
1605 udelay(5);
1606
1607
1608 srom_clk_write(ioaddr, SROM_DATA_1);
1609 srom_clk_write(ioaddr, SROM_DATA_1);
1610 srom_clk_write(ioaddr, SROM_DATA_0);
1611
1612
1613 for (i = 5; i >= 0; i--) {
1614 srom_data = (offset & (1 << i)) ? SROM_DATA_1 : SROM_DATA_0;
1615 srom_clk_write(ioaddr, srom_data);
1616 }
1617
1618 dw32(DCR9, CR9_SROM_READ | CR9_SRCS);
1619 udelay(5);
1620
1621 for (i = 16; i > 0; i--) {
1622 dw32(DCR9, CR9_SROM_READ | CR9_SRCS | CR9_SRCLK);
1623 udelay(5);
1624 srom_data = (srom_data << 1) |
1625 ((dr32(DCR9) & CR9_CRDOUT) ? 1 : 0);
1626 dw32(DCR9, CR9_SROM_READ | CR9_SRCS);
1627 udelay(5);
1628 }
1629
1630 dw32(DCR9, CR9_SROM_READ);
1631 udelay(5);
1632 return srom_data;
1633}
1634
1635
1636
1637
1638
1639
1640static u8 dmfe_sense_speed(struct dmfe_board_info *db)
1641{
1642 void __iomem *ioaddr = db->ioaddr;
1643 u8 ErrFlag = 0;
1644 u16 phy_mode;
1645
1646
1647 update_cr6(db->cr6_data & ~0x40000, ioaddr);
1648
1649 phy_mode = dmfe_phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
1650 phy_mode = dmfe_phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
1651
1652 if ( (phy_mode & 0x24) == 0x24 ) {
1653 if (db->chip_id == PCI_DM9132_ID)
1654 phy_mode = dmfe_phy_read(db->ioaddr,
1655 db->phy_addr, 7, db->chip_id) & 0xf000;
1656 else
1657 phy_mode = dmfe_phy_read(db->ioaddr,
1658 db->phy_addr, 17, db->chip_id) & 0xf000;
1659 switch (phy_mode) {
1660 case 0x1000: db->op_mode = DMFE_10MHF; break;
1661 case 0x2000: db->op_mode = DMFE_10MFD; break;
1662 case 0x4000: db->op_mode = DMFE_100MHF; break;
1663 case 0x8000: db->op_mode = DMFE_100MFD; break;
1664 default: db->op_mode = DMFE_10MHF;
1665 ErrFlag = 1;
1666 break;
1667 }
1668 } else {
1669 db->op_mode = DMFE_10MHF;
1670 DMFE_DBUG(0, "Link Failed :", phy_mode);
1671 ErrFlag = 1;
1672 }
1673
1674 return ErrFlag;
1675}
1676
1677
1678
1679
1680
1681
1682
1683
1684static void dmfe_set_phyxcer(struct dmfe_board_info *db)
1685{
1686 void __iomem *ioaddr = db->ioaddr;
1687 u16 phy_reg;
1688
1689
1690 db->cr6_data &= ~0x40000;
1691 update_cr6(db->cr6_data, ioaddr);
1692
1693
1694 if (db->chip_id == PCI_DM9009_ID) {
1695 phy_reg = dmfe_phy_read(db->ioaddr,
1696 db->phy_addr, 18, db->chip_id) & ~0x1000;
1697
1698 dmfe_phy_write(db->ioaddr,
1699 db->phy_addr, 18, phy_reg, db->chip_id);
1700 }
1701
1702
1703 phy_reg = dmfe_phy_read(db->ioaddr, db->phy_addr, 4, db->chip_id) & ~0x01e0;
1704
1705 if (db->media_mode & DMFE_AUTO) {
1706
1707 phy_reg |= db->PHY_reg4;
1708 } else {
1709
1710 switch(db->media_mode) {
1711 case DMFE_10MHF: phy_reg |= 0x20; break;
1712 case DMFE_10MFD: phy_reg |= 0x40; break;
1713 case DMFE_100MHF: phy_reg |= 0x80; break;
1714 case DMFE_100MFD: phy_reg |= 0x100; break;
1715 }
1716 if (db->chip_id == PCI_DM9009_ID) phy_reg &= 0x61;
1717 }
1718
1719
1720 if ( !(phy_reg & 0x01e0)) {
1721 phy_reg|=db->PHY_reg4;
1722 db->media_mode|=DMFE_AUTO;
1723 }
1724 dmfe_phy_write(db->ioaddr, db->phy_addr, 4, phy_reg, db->chip_id);
1725
1726
1727 if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) )
1728 dmfe_phy_write(db->ioaddr, db->phy_addr, 0, 0x1800, db->chip_id);
1729 if ( !db->chip_type )
1730 dmfe_phy_write(db->ioaddr, db->phy_addr, 0, 0x1200, db->chip_id);
1731}
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741static void dmfe_process_mode(struct dmfe_board_info *db)
1742{
1743 u16 phy_reg;
1744
1745
1746 if (db->op_mode & 0x4)
1747 db->cr6_data |= CR6_FDM;
1748 else
1749 db->cr6_data &= ~CR6_FDM;
1750
1751
1752 if (db->op_mode & 0x10)
1753 db->cr6_data |= 0x40000;
1754 else
1755 db->cr6_data &= ~0x40000;
1756
1757 update_cr6(db->cr6_data, db->ioaddr);
1758
1759
1760 if ( !(db->media_mode & 0x18)) {
1761
1762 phy_reg = dmfe_phy_read(db->ioaddr, db->phy_addr, 6, db->chip_id);
1763 if ( !(phy_reg & 0x1) ) {
1764
1765 phy_reg = 0x0;
1766 switch(db->op_mode) {
1767 case DMFE_10MHF: phy_reg = 0x0; break;
1768 case DMFE_10MFD: phy_reg = 0x100; break;
1769 case DMFE_100MHF: phy_reg = 0x2000; break;
1770 case DMFE_100MFD: phy_reg = 0x2100; break;
1771 }
1772 dmfe_phy_write(db->ioaddr,
1773 db->phy_addr, 0, phy_reg, db->chip_id);
1774 if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) )
1775 mdelay(20);
1776 dmfe_phy_write(db->ioaddr,
1777 db->phy_addr, 0, phy_reg, db->chip_id);
1778 }
1779 }
1780}
1781
1782
1783
1784
1785
1786
1787static void dmfe_phy_write(void __iomem *ioaddr, u8 phy_addr, u8 offset,
1788 u16 phy_data, u32 chip_id)
1789{
1790 u16 i;
1791
1792 if (chip_id == PCI_DM9132_ID) {
1793 dw16(0x80 + offset * 4, phy_data);
1794 } else {
1795
1796
1797
1798 for (i = 0; i < 35; i++)
1799 dmfe_phy_write_1bit(ioaddr, PHY_DATA_1);
1800
1801
1802 dmfe_phy_write_1bit(ioaddr, PHY_DATA_0);
1803 dmfe_phy_write_1bit(ioaddr, PHY_DATA_1);
1804
1805
1806 dmfe_phy_write_1bit(ioaddr, PHY_DATA_0);
1807 dmfe_phy_write_1bit(ioaddr, PHY_DATA_1);
1808
1809
1810 for (i = 0x10; i > 0; i = i >> 1)
1811 dmfe_phy_write_1bit(ioaddr,
1812 phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
1813
1814
1815 for (i = 0x10; i > 0; i = i >> 1)
1816 dmfe_phy_write_1bit(ioaddr,
1817 offset & i ? PHY_DATA_1 : PHY_DATA_0);
1818
1819
1820 dmfe_phy_write_1bit(ioaddr, PHY_DATA_1);
1821 dmfe_phy_write_1bit(ioaddr, PHY_DATA_0);
1822
1823
1824 for ( i = 0x8000; i > 0; i >>= 1)
1825 dmfe_phy_write_1bit(ioaddr,
1826 phy_data & i ? PHY_DATA_1 : PHY_DATA_0);
1827 }
1828}
1829
1830
1831
1832
1833
1834
1835static u16 dmfe_phy_read(void __iomem *ioaddr, u8 phy_addr, u8 offset, u32 chip_id)
1836{
1837 int i;
1838 u16 phy_data;
1839
1840 if (chip_id == PCI_DM9132_ID) {
1841
1842 phy_data = dr16(0x80 + offset * 4);
1843 } else {
1844
1845
1846
1847 for (i = 0; i < 35; i++)
1848 dmfe_phy_write_1bit(ioaddr, PHY_DATA_1);
1849
1850
1851 dmfe_phy_write_1bit(ioaddr, PHY_DATA_0);
1852 dmfe_phy_write_1bit(ioaddr, PHY_DATA_1);
1853
1854
1855 dmfe_phy_write_1bit(ioaddr, PHY_DATA_1);
1856 dmfe_phy_write_1bit(ioaddr, PHY_DATA_0);
1857
1858
1859 for (i = 0x10; i > 0; i = i >> 1)
1860 dmfe_phy_write_1bit(ioaddr,
1861 phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
1862
1863
1864 for (i = 0x10; i > 0; i = i >> 1)
1865 dmfe_phy_write_1bit(ioaddr,
1866 offset & i ? PHY_DATA_1 : PHY_DATA_0);
1867
1868
1869 dmfe_phy_read_1bit(ioaddr);
1870
1871
1872 for (phy_data = 0, i = 0; i < 16; i++) {
1873 phy_data <<= 1;
1874 phy_data |= dmfe_phy_read_1bit(ioaddr);
1875 }
1876 }
1877
1878 return phy_data;
1879}
1880
1881
1882
1883
1884
1885
1886static void dmfe_phy_write_1bit(void __iomem *ioaddr, u32 phy_data)
1887{
1888 dw32(DCR9, phy_data);
1889 udelay(1);
1890 dw32(DCR9, phy_data | MDCLKH);
1891 udelay(1);
1892 dw32(DCR9, phy_data);
1893 udelay(1);
1894}
1895
1896
1897
1898
1899
1900
1901static u16 dmfe_phy_read_1bit(void __iomem *ioaddr)
1902{
1903 u16 phy_data;
1904
1905 dw32(DCR9, 0x50000);
1906 udelay(1);
1907 phy_data = (dr32(DCR9) >> 19) & 0x1;
1908 dw32(DCR9, 0x40000);
1909 udelay(1);
1910
1911 return phy_data;
1912}
1913
1914
1915
1916
1917
1918
1919static void dmfe_parse_srom(struct dmfe_board_info * db)
1920{
1921 char * srom = db->srom;
1922 int dmfe_mode, tmp_reg;
1923
1924 DMFE_DBUG(0, "dmfe_parse_srom() ", 0);
1925
1926
1927 db->cr15_data = CR15_DEFAULT;
1928
1929
1930 if ( ( (int) srom[18] & 0xff) == SROM_V41_CODE) {
1931
1932
1933 db->NIC_capability = le16_to_cpup((__le16 *) (srom + 34));
1934 db->PHY_reg4 = 0;
1935 for (tmp_reg = 1; tmp_reg < 0x10; tmp_reg <<= 1) {
1936 switch( db->NIC_capability & tmp_reg ) {
1937 case 0x1: db->PHY_reg4 |= 0x0020; break;
1938 case 0x2: db->PHY_reg4 |= 0x0040; break;
1939 case 0x4: db->PHY_reg4 |= 0x0080; break;
1940 case 0x8: db->PHY_reg4 |= 0x0100; break;
1941 }
1942 }
1943
1944
1945 dmfe_mode = (le32_to_cpup((__le32 *) (srom + 34)) &
1946 le32_to_cpup((__le32 *) (srom + 36)));
1947 switch(dmfe_mode) {
1948 case 0x4: dmfe_media_mode = DMFE_100MHF; break;
1949 case 0x2: dmfe_media_mode = DMFE_10MFD; break;
1950 case 0x8: dmfe_media_mode = DMFE_100MFD; break;
1951 case 0x100:
1952 case 0x200: dmfe_media_mode = DMFE_1M_HPNA; break;
1953 }
1954
1955
1956
1957 if ( (SF_mode & 0x1) || (srom[43] & 0x80) )
1958 db->cr15_data |= 0x40;
1959
1960
1961 if ( (SF_mode & 0x2) || (srom[40] & 0x1) )
1962 db->cr15_data |= 0x400;
1963
1964
1965 if ( (SF_mode & 0x4) || (srom[40] & 0xe) )
1966 db->cr15_data |= 0x9800;
1967 }
1968
1969
1970 db->HPNA_command = 1;
1971
1972
1973 if (HPNA_rx_cmd == 0)
1974 db->HPNA_command |= 0x8000;
1975
1976
1977 if (HPNA_tx_cmd == 1)
1978 switch(HPNA_mode) {
1979 case 0: db->HPNA_command |= 0x0904; break;
1980 case 1: db->HPNA_command |= 0x0a00; break;
1981 case 2: db->HPNA_command |= 0x0506; break;
1982 case 3: db->HPNA_command |= 0x0602; break;
1983 }
1984 else
1985 switch(HPNA_mode) {
1986 case 0: db->HPNA_command |= 0x0004; break;
1987 case 1: db->HPNA_command |= 0x0000; break;
1988 case 2: db->HPNA_command |= 0x0006; break;
1989 case 3: db->HPNA_command |= 0x0002; break;
1990 }
1991
1992
1993 db->HPNA_present = 0;
1994 update_cr6(db->cr6_data | 0x40000, db->ioaddr);
1995 tmp_reg = dmfe_phy_read(db->ioaddr, db->phy_addr, 3, db->chip_id);
1996 if ( ( tmp_reg & 0xfff0 ) == 0xb900 ) {
1997
1998 db->HPNA_timer = 8;
1999 if ( dmfe_phy_read(db->ioaddr, db->phy_addr, 31, db->chip_id) == 0x4404) {
2000
2001 db->HPNA_present = 1;
2002 dmfe_program_DM9801(db, tmp_reg);
2003 } else {
2004
2005 db->HPNA_present = 2;
2006 dmfe_program_DM9802(db);
2007 }
2008 }
2009
2010}
2011
2012
2013
2014
2015
2016
2017static void dmfe_program_DM9801(struct dmfe_board_info * db, int HPNA_rev)
2018{
2019 uint reg17, reg25;
2020
2021 if ( !HPNA_NoiseFloor ) HPNA_NoiseFloor = DM9801_NOISE_FLOOR;
2022 switch(HPNA_rev) {
2023 case 0xb900:
2024 db->HPNA_command |= 0x1000;
2025 reg25 = dmfe_phy_read(db->ioaddr, db->phy_addr, 24, db->chip_id);
2026 reg25 = ( (reg25 + HPNA_NoiseFloor) & 0xff) | 0xf000;
2027 reg17 = dmfe_phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
2028 break;
2029 case 0xb901:
2030 reg25 = dmfe_phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
2031 reg25 = (reg25 & 0xff00) + HPNA_NoiseFloor;
2032 reg17 = dmfe_phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
2033 reg17 = (reg17 & 0xfff0) + HPNA_NoiseFloor + 3;
2034 break;
2035 case 0xb902:
2036 case 0xb903:
2037 default:
2038 db->HPNA_command |= 0x1000;
2039 reg25 = dmfe_phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
2040 reg25 = (reg25 & 0xff00) + HPNA_NoiseFloor - 5;
2041 reg17 = dmfe_phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
2042 reg17 = (reg17 & 0xfff0) + HPNA_NoiseFloor;
2043 break;
2044 }
2045 dmfe_phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id);
2046 dmfe_phy_write(db->ioaddr, db->phy_addr, 17, reg17, db->chip_id);
2047 dmfe_phy_write(db->ioaddr, db->phy_addr, 25, reg25, db->chip_id);
2048}
2049
2050
2051
2052
2053
2054
2055static void dmfe_program_DM9802(struct dmfe_board_info * db)
2056{
2057 uint phy_reg;
2058
2059 if ( !HPNA_NoiseFloor ) HPNA_NoiseFloor = DM9802_NOISE_FLOOR;
2060 dmfe_phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id);
2061 phy_reg = dmfe_phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
2062 phy_reg = ( phy_reg & 0xff00) + HPNA_NoiseFloor;
2063 dmfe_phy_write(db->ioaddr, db->phy_addr, 25, phy_reg, db->chip_id);
2064}
2065
2066
2067
2068
2069
2070
2071
2072static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * db)
2073{
2074 uint phy_reg;
2075
2076
2077 phy_reg = dmfe_phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id) & 0x60;
2078 switch(phy_reg) {
2079 case 0x00: phy_reg = 0x0a00;break;
2080 case 0x20: phy_reg = 0x0900;break;
2081 case 0x40: phy_reg = 0x0600;break;
2082 case 0x60: phy_reg = 0x0500;break;
2083 }
2084
2085
2086 if ( phy_reg != (db->HPNA_command & 0x0f00) ) {
2087 dmfe_phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command,
2088 db->chip_id);
2089 db->HPNA_timer=8;
2090 } else
2091 db->HPNA_timer=600;
2092}
2093
2094
2095
2096static const struct pci_device_id dmfe_pci_tbl[] = {
2097 { 0x1282, 0x9132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9132_ID },
2098 { 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9102_ID },
2099 { 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9100_ID },
2100 { 0x1282, 0x9009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9009_ID },
2101 { 0, }
2102};
2103MODULE_DEVICE_TABLE(pci, dmfe_pci_tbl);
2104
2105
2106#ifdef CONFIG_PM
2107static int dmfe_suspend(struct pci_dev *pci_dev, pm_message_t state)
2108{
2109 struct net_device *dev = pci_get_drvdata(pci_dev);
2110 struct dmfe_board_info *db = netdev_priv(dev);
2111 void __iomem *ioaddr = db->ioaddr;
2112 u32 tmp;
2113
2114
2115 netif_device_detach(dev);
2116
2117
2118 db->cr6_data &= ~(CR6_RXSC | CR6_TXSC);
2119 update_cr6(db->cr6_data, ioaddr);
2120
2121
2122 dw32(DCR7, 0);
2123 dw32(DCR5, dr32(DCR5));
2124
2125
2126 dmfe_free_rxbuffer(db);
2127
2128
2129 pci_read_config_dword(pci_dev, 0x40, &tmp);
2130 tmp &= ~(DMFE_WOL_LINKCHANGE|DMFE_WOL_MAGICPACKET);
2131
2132 if (db->wol_mode & WAKE_PHY)
2133 tmp |= DMFE_WOL_LINKCHANGE;
2134 if (db->wol_mode & WAKE_MAGIC)
2135 tmp |= DMFE_WOL_MAGICPACKET;
2136
2137 pci_write_config_dword(pci_dev, 0x40, tmp);
2138
2139 pci_enable_wake(pci_dev, PCI_D3hot, 1);
2140 pci_enable_wake(pci_dev, PCI_D3cold, 1);
2141
2142
2143 pci_save_state(pci_dev);
2144 pci_set_power_state(pci_dev, pci_choose_state (pci_dev, state));
2145
2146 return 0;
2147}
2148
2149static int dmfe_resume(struct pci_dev *pci_dev)
2150{
2151 struct net_device *dev = pci_get_drvdata(pci_dev);
2152 u32 tmp;
2153
2154 pci_set_power_state(pci_dev, PCI_D0);
2155 pci_restore_state(pci_dev);
2156
2157
2158 dmfe_init_dm910x(dev);
2159
2160
2161 pci_read_config_dword(pci_dev, 0x40, &tmp);
2162
2163 tmp &= ~(DMFE_WOL_LINKCHANGE | DMFE_WOL_MAGICPACKET);
2164 pci_write_config_dword(pci_dev, 0x40, tmp);
2165
2166 pci_enable_wake(pci_dev, PCI_D3hot, 0);
2167 pci_enable_wake(pci_dev, PCI_D3cold, 0);
2168
2169
2170 netif_device_attach(dev);
2171
2172 return 0;
2173}
2174#else
2175#define dmfe_suspend NULL
2176#define dmfe_resume NULL
2177#endif
2178
2179static struct pci_driver dmfe_driver = {
2180 .name = "dmfe",
2181 .id_table = dmfe_pci_tbl,
2182 .probe = dmfe_init_one,
2183 .remove = dmfe_remove_one,
2184 .suspend = dmfe_suspend,
2185 .resume = dmfe_resume
2186};
2187
2188MODULE_AUTHOR("Sten Wang, sten_wang@davicom.com.tw");
2189MODULE_DESCRIPTION("Davicom DM910X fast ethernet driver");
2190MODULE_LICENSE("GPL");
2191MODULE_VERSION(DRV_VERSION);
2192
2193module_param(debug, int, 0);
2194module_param(mode, byte, 0);
2195module_param(cr6set, int, 0);
2196module_param(chkmode, byte, 0);
2197module_param(HPNA_mode, byte, 0);
2198module_param(HPNA_rx_cmd, byte, 0);
2199module_param(HPNA_tx_cmd, byte, 0);
2200module_param(HPNA_NoiseFloor, byte, 0);
2201module_param(SF_mode, byte, 0);
2202MODULE_PARM_DESC(debug, "Davicom DM9xxx enable debugging (0-1)");
2203MODULE_PARM_DESC(mode, "Davicom DM9xxx: "
2204 "Bit 0: 10/100Mbps, bit 2: duplex, bit 8: HomePNA");
2205
2206MODULE_PARM_DESC(SF_mode, "Davicom DM9xxx special function "
2207 "(bit 0: VLAN, bit 1 Flow Control, bit 2: TX pause packet)");
2208
2209
2210
2211
2212
2213
2214static int __init dmfe_init_module(void)
2215{
2216 int rc;
2217
2218 pr_info("%s\n", version);
2219 printed_version = 1;
2220
2221 DMFE_DBUG(0, "init_module() ", debug);
2222
2223 if (debug)
2224 dmfe_debug = debug;
2225 if (cr6set)
2226 dmfe_cr6_user_set = cr6set;
2227
2228 switch(mode) {
2229 case DMFE_10MHF:
2230 case DMFE_100MHF:
2231 case DMFE_10MFD:
2232 case DMFE_100MFD:
2233 case DMFE_1M_HPNA:
2234 dmfe_media_mode = mode;
2235 break;
2236 default:dmfe_media_mode = DMFE_AUTO;
2237 break;
2238 }
2239
2240 if (HPNA_mode > 4)
2241 HPNA_mode = 0;
2242 if (HPNA_rx_cmd > 1)
2243 HPNA_rx_cmd = 0;
2244 if (HPNA_tx_cmd > 1)
2245 HPNA_tx_cmd = 0;
2246 if (HPNA_NoiseFloor > 15)
2247 HPNA_NoiseFloor = 0;
2248
2249 rc = pci_register_driver(&dmfe_driver);
2250 if (rc < 0)
2251 return rc;
2252
2253 return 0;
2254}
2255
2256
2257
2258
2259
2260
2261
2262
2263static void __exit dmfe_cleanup_module(void)
2264{
2265 DMFE_DBUG(0, "dmfe_cleanup_module() ", debug);
2266 pci_unregister_driver(&dmfe_driver);
2267}
2268
2269module_init(dmfe_init_module);
2270module_exit(dmfe_cleanup_module);
2271