1
2
3
4
5
6
7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8
9#define DRV_NAME "uli526x"
10
11#include <linux/module.h>
12
13#include <linux/kernel.h>
14#include <linux/string.h>
15#include <linux/timer.h>
16#include <linux/errno.h>
17#include <linux/ioport.h>
18#include <linux/interrupt.h>
19#include <linux/pci.h>
20#include <linux/init.h>
21#include <linux/netdevice.h>
22#include <linux/etherdevice.h>
23#include <linux/ethtool.h>
24#include <linux/skbuff.h>
25#include <linux/delay.h>
26#include <linux/spinlock.h>
27#include <linux/dma-mapping.h>
28#include <linux/bitops.h>
29
30#include <asm/processor.h>
31#include <asm/io.h>
32#include <asm/dma.h>
33#include <linux/uaccess.h>
34
35#define uw32(reg, val) iowrite32(val, ioaddr + (reg))
36#define ur32(reg) ioread32(ioaddr + (reg))
37
38
39#define PCI_ULI5261_ID 0x526110B9
40#define PCI_ULI5263_ID 0x526310B9
41
42#define ULI526X_IO_SIZE 0x100
43#define TX_DESC_CNT 0x20
44#define RX_DESC_CNT 0x30
45#define TX_FREE_DESC_CNT (TX_DESC_CNT - 2)
46#define TX_WAKE_DESC_CNT (TX_DESC_CNT - 3)
47#define DESC_ALL_CNT (TX_DESC_CNT + RX_DESC_CNT)
48#define TX_BUF_ALLOC 0x600
49#define RX_ALLOC_SIZE 0x620
50#define ULI526X_RESET 1
51#define CR0_DEFAULT 0
52#define CR6_DEFAULT 0x22200000
53#define CR7_DEFAULT 0x180c1
54#define CR15_DEFAULT 0x06
55#define TDES0_ERR_MASK 0x4302
56#define MAX_PACKET_SIZE 1514
57#define ULI5261_MAX_MULTICAST 14
58#define RX_COPY_SIZE 100
59#define MAX_CHECK_PACKET 0x8000
60
61#define ULI526X_10MHF 0
62#define ULI526X_100MHF 1
63#define ULI526X_10MFD 4
64#define ULI526X_100MFD 5
65#define ULI526X_AUTO 8
66
67#define ULI526X_TXTH_72 0x400000
68#define ULI526X_TXTH_96 0x404000
69#define ULI526X_TXTH_128 0x0000
70#define ULI526X_TXTH_256 0x4000
71#define ULI526X_TXTH_512 0x8000
72#define ULI526X_TXTH_1K 0xC000
73
74#define ULI526X_TIMER_WUT (jiffies + HZ * 1)
75#define ULI526X_TX_TIMEOUT ((16*HZ)/2)
76#define ULI526X_TX_KICK (4*HZ/2)
77
78#define ULI526X_DBUG(dbug_now, msg, value) \
79do { \
80 if (uli526x_debug || (dbug_now)) \
81 pr_err("%s %lx\n", (msg), (long) (value)); \
82} while (0)
83
84#define SHOW_MEDIA_TYPE(mode) \
85 pr_err("Change Speed to %sMhz %s duplex\n", \
86 mode & 1 ? "100" : "10", \
87 mode & 4 ? "full" : "half");
88
89
90
91#define CR9_SROM_READ 0x4800
92#define CR9_SRCS 0x1
93#define CR9_SRCLK 0x2
94#define CR9_CRDOUT 0x8
95#define SROM_DATA_0 0x0
96#define SROM_DATA_1 0x4
97#define PHY_DATA_1 0x20000
98#define PHY_DATA_0 0x00000
99#define MDCLKH 0x10000
100
101#define PHY_POWER_DOWN 0x800
102
103#define SROM_V41_CODE 0x14
104
105
106struct tx_desc {
107 __le32 tdes0, tdes1, tdes2, tdes3;
108 char *tx_buf_ptr;
109 struct tx_desc *next_tx_desc;
110} __attribute__(( aligned(32) ));
111
112struct rx_desc {
113 __le32 rdes0, rdes1, rdes2, rdes3;
114 struct sk_buff *rx_skb_ptr;
115 struct rx_desc *next_rx_desc;
116} __attribute__(( aligned(32) ));
117
118struct uli526x_board_info {
119 struct uli_phy_ops {
120 void (*write)(struct uli526x_board_info *, u8, u8, u16);
121 u16 (*read)(struct uli526x_board_info *, u8, u8);
122 } phy;
123 struct net_device *next_dev;
124 struct pci_dev *pdev;
125 spinlock_t lock;
126
127 void __iomem *ioaddr;
128 u32 cr0_data;
129 u32 cr5_data;
130 u32 cr6_data;
131 u32 cr7_data;
132 u32 cr15_data;
133
134
135 dma_addr_t buf_pool_dma_ptr;
136 dma_addr_t buf_pool_dma_start;
137 dma_addr_t desc_pool_dma_ptr;
138 dma_addr_t first_tx_desc_dma;
139 dma_addr_t first_rx_desc_dma;
140
141
142 unsigned char *buf_pool_ptr;
143 unsigned char *buf_pool_start;
144 unsigned char *desc_pool_ptr;
145 struct tx_desc *first_tx_desc;
146 struct tx_desc *tx_insert_ptr;
147 struct tx_desc *tx_remove_ptr;
148 struct rx_desc *first_rx_desc;
149 struct rx_desc *rx_insert_ptr;
150 struct rx_desc *rx_ready_ptr;
151 unsigned long tx_packet_cnt;
152 unsigned long rx_avail_cnt;
153 unsigned long interval_rx_cnt;
154
155 u16 dbug_cnt;
156 u16 NIC_capability;
157 u16 PHY_reg4;
158
159 u8 media_mode;
160 u8 op_mode;
161 u8 phy_addr;
162 u8 link_failed;
163 u8 wait_reset;
164 struct timer_list timer;
165
166
167 unsigned long tx_fifo_underrun;
168 unsigned long tx_loss_carrier;
169 unsigned long tx_no_carrier;
170 unsigned long tx_late_collision;
171 unsigned long tx_excessive_collision;
172 unsigned long tx_jabber_timeout;
173 unsigned long reset_count;
174 unsigned long reset_cr8;
175 unsigned long reset_fatal;
176 unsigned long reset_TXtimeout;
177
178
179 unsigned char srom[128];
180 u8 init;
181};
182
183enum uli526x_offsets {
184 DCR0 = 0x00, DCR1 = 0x08, DCR2 = 0x10, DCR3 = 0x18, DCR4 = 0x20,
185 DCR5 = 0x28, DCR6 = 0x30, DCR7 = 0x38, DCR8 = 0x40, DCR9 = 0x48,
186 DCR10 = 0x50, DCR11 = 0x58, DCR12 = 0x60, DCR13 = 0x68, DCR14 = 0x70,
187 DCR15 = 0x78
188};
189
190enum uli526x_CR6_bits {
191 CR6_RXSC = 0x2, CR6_PBF = 0x8, CR6_PM = 0x40, CR6_PAM = 0x80,
192 CR6_FDM = 0x200, CR6_TXSC = 0x2000, CR6_STI = 0x100000,
193 CR6_SFT = 0x200000, CR6_RXA = 0x40000000, CR6_NO_PURGE = 0x20000000
194};
195
196
197static int uli526x_debug;
198static unsigned char uli526x_media_mode = ULI526X_AUTO;
199static u32 uli526x_cr6_user_set;
200
201
202static int debug;
203static u32 cr6set;
204static int mode = 8;
205
206
207static int uli526x_open(struct net_device *);
208static netdev_tx_t uli526x_start_xmit(struct sk_buff *,
209 struct net_device *);
210static int uli526x_stop(struct net_device *);
211static void uli526x_set_filter_mode(struct net_device *);
212static const struct ethtool_ops netdev_ethtool_ops;
213static u16 read_srom_word(struct uli526x_board_info *, int);
214static irqreturn_t uli526x_interrupt(int, void *);
215#ifdef CONFIG_NET_POLL_CONTROLLER
216static void uli526x_poll(struct net_device *dev);
217#endif
218static void uli526x_descriptor_init(struct net_device *, void __iomem *);
219static void allocate_rx_buffer(struct net_device *);
220static void update_cr6(u32, void __iomem *);
221static void send_filter_frame(struct net_device *, int);
222static u16 phy_readby_cr9(struct uli526x_board_info *, u8, u8);
223static u16 phy_readby_cr10(struct uli526x_board_info *, u8, u8);
224static void phy_writeby_cr9(struct uli526x_board_info *, u8, u8, u16);
225static void phy_writeby_cr10(struct uli526x_board_info *, u8, u8, u16);
226static void phy_write_1bit(struct uli526x_board_info *db, u32);
227static u16 phy_read_1bit(struct uli526x_board_info *db);
228static u8 uli526x_sense_speed(struct uli526x_board_info *);
229static void uli526x_process_mode(struct uli526x_board_info *);
230static void uli526x_timer(struct timer_list *t);
231static void uli526x_rx_packet(struct net_device *, struct uli526x_board_info *);
232static void uli526x_free_tx_pkt(struct net_device *, struct uli526x_board_info *);
233static void uli526x_reuse_skb(struct uli526x_board_info *, struct sk_buff *);
234static void uli526x_dynamic_reset(struct net_device *);
235static void uli526x_free_rxbuffer(struct uli526x_board_info *);
236static void uli526x_init(struct net_device *);
237static void uli526x_set_phyxcer(struct uli526x_board_info *);
238
239static void srom_clk_write(struct uli526x_board_info *db, u32 data)
240{
241 void __iomem *ioaddr = db->ioaddr;
242
243 uw32(DCR9, data | CR9_SROM_READ | CR9_SRCS);
244 udelay(5);
245 uw32(DCR9, data | CR9_SROM_READ | CR9_SRCS | CR9_SRCLK);
246 udelay(5);
247 uw32(DCR9, data | CR9_SROM_READ | CR9_SRCS);
248 udelay(5);
249}
250
251
252
253static const struct net_device_ops netdev_ops = {
254 .ndo_open = uli526x_open,
255 .ndo_stop = uli526x_stop,
256 .ndo_start_xmit = uli526x_start_xmit,
257 .ndo_set_rx_mode = uli526x_set_filter_mode,
258 .ndo_set_mac_address = eth_mac_addr,
259 .ndo_validate_addr = eth_validate_addr,
260#ifdef CONFIG_NET_POLL_CONTROLLER
261 .ndo_poll_controller = uli526x_poll,
262#endif
263};
264
265
266
267
268
269static int uli526x_init_one(struct pci_dev *pdev,
270 const struct pci_device_id *ent)
271{
272 struct uli526x_board_info *db;
273 struct net_device *dev;
274 void __iomem *ioaddr;
275 int i, err;
276
277 ULI526X_DBUG(0, "uli526x_init_one()", 0);
278
279
280 dev = alloc_etherdev(sizeof(*db));
281 if (dev == NULL)
282 return -ENOMEM;
283 SET_NETDEV_DEV(dev, &pdev->dev);
284
285 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
286 pr_warn("32-bit PCI DMA not available\n");
287 err = -ENODEV;
288 goto err_out_free;
289 }
290
291
292 err = pci_enable_device(pdev);
293 if (err)
294 goto err_out_free;
295
296 if (!pci_resource_start(pdev, 0)) {
297 pr_err("I/O base is zero\n");
298 err = -ENODEV;
299 goto err_out_disable;
300 }
301
302 if (pci_resource_len(pdev, 0) < (ULI526X_IO_SIZE) ) {
303 pr_err("Allocated I/O size too small\n");
304 err = -ENODEV;
305 goto err_out_disable;
306 }
307
308 err = pci_request_regions(pdev, DRV_NAME);
309 if (err < 0) {
310 pr_err("Failed to request PCI regions\n");
311 goto err_out_disable;
312 }
313
314
315 db = netdev_priv(dev);
316
317
318 err = -ENOMEM;
319
320 db->desc_pool_ptr = dma_alloc_coherent(&pdev->dev,
321 sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20,
322 &db->desc_pool_dma_ptr, GFP_KERNEL);
323 if (!db->desc_pool_ptr)
324 goto err_out_release;
325
326 db->buf_pool_ptr = dma_alloc_coherent(&pdev->dev,
327 TX_BUF_ALLOC * TX_DESC_CNT + 4,
328 &db->buf_pool_dma_ptr, GFP_KERNEL);
329 if (!db->buf_pool_ptr)
330 goto err_out_free_tx_desc;
331
332 db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr;
333 db->first_tx_desc_dma = db->desc_pool_dma_ptr;
334 db->buf_pool_start = db->buf_pool_ptr;
335 db->buf_pool_dma_start = db->buf_pool_dma_ptr;
336
337 switch (ent->driver_data) {
338 case PCI_ULI5263_ID:
339 db->phy.write = phy_writeby_cr10;
340 db->phy.read = phy_readby_cr10;
341 break;
342 default:
343 db->phy.write = phy_writeby_cr9;
344 db->phy.read = phy_readby_cr9;
345 break;
346 }
347
348
349 ioaddr = pci_iomap(pdev, 0, 0);
350 if (!ioaddr)
351 goto err_out_free_tx_buf;
352
353 db->ioaddr = ioaddr;
354 db->pdev = pdev;
355 db->init = 1;
356
357 pci_set_drvdata(pdev, dev);
358
359
360 dev->netdev_ops = &netdev_ops;
361 dev->ethtool_ops = &netdev_ethtool_ops;
362
363 spin_lock_init(&db->lock);
364
365
366
367 for (i = 0; i < 64; i++)
368 ((__le16 *) db->srom)[i] = cpu_to_le16(read_srom_word(db, i));
369
370
371 if(((u16 *) db->srom)[0] == 0xffff || ((u16 *) db->srom)[0] == 0)
372 {
373 uw32(DCR0, 0x10000);
374 uw32(DCR13, 0x1c0);
375 uw32(DCR14, 0);
376 uw32(DCR14, 0x10);
377 uw32(DCR14, 0);
378 uw32(DCR13, 0);
379 uw32(DCR13, 0x1b0);
380
381 for (i = 0; i < 6; i++)
382 dev->dev_addr[i] = ur32(DCR14);
383
384 uw32(DCR13, 0);
385 uw32(DCR0, 0);
386 udelay(10);
387 }
388 else
389 {
390 for (i = 0; i < 6; i++)
391 dev->dev_addr[i] = db->srom[20 + i];
392 }
393 err = register_netdev (dev);
394 if (err)
395 goto err_out_unmap;
396
397 netdev_info(dev, "ULi M%04lx at pci%s, %pM, irq %d\n",
398 ent->driver_data >> 16, pci_name(pdev),
399 dev->dev_addr, pdev->irq);
400
401 pci_set_master(pdev);
402
403 return 0;
404
405err_out_unmap:
406 pci_iounmap(pdev, db->ioaddr);
407err_out_free_tx_buf:
408 dma_free_coherent(&pdev->dev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
409 db->buf_pool_ptr, db->buf_pool_dma_ptr);
410err_out_free_tx_desc:
411 dma_free_coherent(&pdev->dev,
412 sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20,
413 db->desc_pool_ptr, db->desc_pool_dma_ptr);
414err_out_release:
415 pci_release_regions(pdev);
416err_out_disable:
417 pci_disable_device(pdev);
418err_out_free:
419 free_netdev(dev);
420
421 return err;
422}
423
424
425static void uli526x_remove_one(struct pci_dev *pdev)
426{
427 struct net_device *dev = pci_get_drvdata(pdev);
428 struct uli526x_board_info *db = netdev_priv(dev);
429
430 unregister_netdev(dev);
431 pci_iounmap(pdev, db->ioaddr);
432 dma_free_coherent(&db->pdev->dev,
433 sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20,
434 db->desc_pool_ptr, db->desc_pool_dma_ptr);
435 dma_free_coherent(&db->pdev->dev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
436 db->buf_pool_ptr, db->buf_pool_dma_ptr);
437 pci_release_regions(pdev);
438 pci_disable_device(pdev);
439 free_netdev(dev);
440}
441
442
443
444
445
446
447
448static int uli526x_open(struct net_device *dev)
449{
450 int ret;
451 struct uli526x_board_info *db = netdev_priv(dev);
452
453 ULI526X_DBUG(0, "uli526x_open", 0);
454
455
456 db->cr6_data = CR6_DEFAULT | uli526x_cr6_user_set;
457 db->tx_packet_cnt = 0;
458 db->rx_avail_cnt = 0;
459 db->link_failed = 1;
460 netif_carrier_off(dev);
461 db->wait_reset = 0;
462
463 db->NIC_capability = 0xf;
464 db->PHY_reg4 = 0x1e0;
465
466
467 db->cr6_data |= ULI526X_TXTH_256;
468 db->cr0_data = CR0_DEFAULT;
469
470
471 uli526x_init(dev);
472
473 ret = request_irq(db->pdev->irq, uli526x_interrupt, IRQF_SHARED,
474 dev->name, dev);
475 if (ret)
476 return ret;
477
478
479 netif_wake_queue(dev);
480
481
482 timer_setup(&db->timer, uli526x_timer, 0);
483 db->timer.expires = ULI526X_TIMER_WUT + HZ * 2;
484 add_timer(&db->timer);
485
486 return 0;
487}
488
489
490
491
492
493
494
495
496
497static void uli526x_init(struct net_device *dev)
498{
499 struct uli526x_board_info *db = netdev_priv(dev);
500 struct uli_phy_ops *phy = &db->phy;
501 void __iomem *ioaddr = db->ioaddr;
502 u8 phy_tmp;
503 u8 timeout;
504 u16 phy_reg_reset;
505
506
507 ULI526X_DBUG(0, "uli526x_init()", 0);
508
509
510 uw32(DCR0, ULI526X_RESET);
511 udelay(100);
512 uw32(DCR0, db->cr0_data);
513 udelay(5);
514
515
516 db->phy_addr = 1;
517 for (phy_tmp = 0; phy_tmp < 32; phy_tmp++) {
518 u16 phy_value;
519
520 phy_value = phy->read(db, phy_tmp, 3);
521 if (phy_value != 0xffff && phy_value != 0) {
522 db->phy_addr = phy_tmp;
523 break;
524 }
525 }
526
527 if (phy_tmp == 32)
528 pr_warn("Can not find the phy address!!!\n");
529
530 db->media_mode = uli526x_media_mode;
531
532
533 phy_reg_reset = phy->read(db, db->phy_addr, 0);
534 phy_reg_reset = (phy_reg_reset | 0x8000);
535 phy->write(db, db->phy_addr, 0, phy_reg_reset);
536
537
538
539
540 udelay(500);
541 timeout = 10;
542 while (timeout-- && phy->read(db, db->phy_addr, 0) & 0x8000)
543 udelay(100);
544
545
546 uli526x_set_phyxcer(db);
547
548
549 if ( !(db->media_mode & ULI526X_AUTO) )
550 db->op_mode = db->media_mode;
551
552
553 uli526x_descriptor_init(dev, ioaddr);
554
555
556 update_cr6(db->cr6_data, ioaddr);
557
558
559 send_filter_frame(dev, netdev_mc_count(dev));
560
561
562 db->cr7_data = CR7_DEFAULT;
563 uw32(DCR7, db->cr7_data);
564
565
566 uw32(DCR15, db->cr15_data);
567
568
569 db->cr6_data |= CR6_RXSC | CR6_TXSC;
570 update_cr6(db->cr6_data, ioaddr);
571}
572
573
574
575
576
577
578
579static netdev_tx_t uli526x_start_xmit(struct sk_buff *skb,
580 struct net_device *dev)
581{
582 struct uli526x_board_info *db = netdev_priv(dev);
583 void __iomem *ioaddr = db->ioaddr;
584 struct tx_desc *txptr;
585 unsigned long flags;
586
587 ULI526X_DBUG(0, "uli526x_start_xmit", 0);
588
589
590 netif_stop_queue(dev);
591
592
593 if (skb->len > MAX_PACKET_SIZE) {
594 netdev_err(dev, "big packet = %d\n", (u16)skb->len);
595 dev_kfree_skb_any(skb);
596 return NETDEV_TX_OK;
597 }
598
599 spin_lock_irqsave(&db->lock, flags);
600
601
602 if (db->tx_packet_cnt >= TX_FREE_DESC_CNT) {
603 spin_unlock_irqrestore(&db->lock, flags);
604 netdev_err(dev, "No Tx resource %ld\n", db->tx_packet_cnt);
605 return NETDEV_TX_BUSY;
606 }
607
608
609 uw32(DCR7, 0);
610
611
612 txptr = db->tx_insert_ptr;
613 skb_copy_from_linear_data(skb, txptr->tx_buf_ptr, skb->len);
614 txptr->tdes1 = cpu_to_le32(0xe1000000 | skb->len);
615
616
617 db->tx_insert_ptr = txptr->next_tx_desc;
618
619
620 if (db->tx_packet_cnt < TX_DESC_CNT) {
621 txptr->tdes0 = cpu_to_le32(0x80000000);
622 db->tx_packet_cnt++;
623 uw32(DCR1, 0x1);
624 netif_trans_update(dev);
625 }
626
627
628 if ( db->tx_packet_cnt < TX_FREE_DESC_CNT )
629 netif_wake_queue(dev);
630
631
632 spin_unlock_irqrestore(&db->lock, flags);
633 uw32(DCR7, db->cr7_data);
634
635
636 dev_consume_skb_any(skb);
637
638 return NETDEV_TX_OK;
639}
640
641
642
643
644
645
646
647static int uli526x_stop(struct net_device *dev)
648{
649 struct uli526x_board_info *db = netdev_priv(dev);
650 void __iomem *ioaddr = db->ioaddr;
651
652
653 netif_stop_queue(dev);
654
655
656 del_timer_sync(&db->timer);
657
658
659 uw32(DCR0, ULI526X_RESET);
660 udelay(5);
661 db->phy.write(db, db->phy_addr, 0, 0x8000);
662
663
664 free_irq(db->pdev->irq, dev);
665
666
667 uli526x_free_rxbuffer(db);
668
669 return 0;
670}
671
672
673
674
675
676
677
678static irqreturn_t uli526x_interrupt(int irq, void *dev_id)
679{
680 struct net_device *dev = dev_id;
681 struct uli526x_board_info *db = netdev_priv(dev);
682 void __iomem *ioaddr = db->ioaddr;
683 unsigned long flags;
684
685 spin_lock_irqsave(&db->lock, flags);
686 uw32(DCR7, 0);
687
688
689 db->cr5_data = ur32(DCR5);
690 uw32(DCR5, db->cr5_data);
691 if ( !(db->cr5_data & 0x180c1) ) {
692
693 uw32(DCR7, db->cr7_data);
694 spin_unlock_irqrestore(&db->lock, flags);
695 return IRQ_HANDLED;
696 }
697
698
699 if (db->cr5_data & 0x2000) {
700
701 ULI526X_DBUG(1, "System bus error happen. CR5=", db->cr5_data);
702 db->reset_fatal++;
703 db->wait_reset = 1;
704 spin_unlock_irqrestore(&db->lock, flags);
705 return IRQ_HANDLED;
706 }
707
708
709 if ( (db->cr5_data & 0x40) && db->rx_avail_cnt )
710 uli526x_rx_packet(dev, db);
711
712
713 if (db->rx_avail_cnt<RX_DESC_CNT)
714 allocate_rx_buffer(dev);
715
716
717 if ( db->cr5_data & 0x01)
718 uli526x_free_tx_pkt(dev, db);
719
720
721 uw32(DCR7, db->cr7_data);
722
723 spin_unlock_irqrestore(&db->lock, flags);
724 return IRQ_HANDLED;
725}
726
727#ifdef CONFIG_NET_POLL_CONTROLLER
728static void uli526x_poll(struct net_device *dev)
729{
730 struct uli526x_board_info *db = netdev_priv(dev);
731
732
733 uli526x_interrupt(db->pdev->irq, dev);
734}
735#endif
736
737
738
739
740
741static void uli526x_free_tx_pkt(struct net_device *dev,
742 struct uli526x_board_info * db)
743{
744 struct tx_desc *txptr;
745 u32 tdes0;
746
747 txptr = db->tx_remove_ptr;
748 while(db->tx_packet_cnt) {
749 tdes0 = le32_to_cpu(txptr->tdes0);
750 if (tdes0 & 0x80000000)
751 break;
752
753
754 db->tx_packet_cnt--;
755 dev->stats.tx_packets++;
756
757
758 if ( tdes0 != 0x7fffffff ) {
759 dev->stats.collisions += (tdes0 >> 3) & 0xf;
760 dev->stats.tx_bytes += le32_to_cpu(txptr->tdes1) & 0x7ff;
761 if (tdes0 & TDES0_ERR_MASK) {
762 dev->stats.tx_errors++;
763 if (tdes0 & 0x0002) {
764 db->tx_fifo_underrun++;
765 if ( !(db->cr6_data & CR6_SFT) ) {
766 db->cr6_data = db->cr6_data | CR6_SFT;
767 update_cr6(db->cr6_data, db->ioaddr);
768 }
769 }
770 if (tdes0 & 0x0100)
771 db->tx_excessive_collision++;
772 if (tdes0 & 0x0200)
773 db->tx_late_collision++;
774 if (tdes0 & 0x0400)
775 db->tx_no_carrier++;
776 if (tdes0 & 0x0800)
777 db->tx_loss_carrier++;
778 if (tdes0 & 0x4000)
779 db->tx_jabber_timeout++;
780 }
781 }
782
783 txptr = txptr->next_tx_desc;
784 }
785
786
787 db->tx_remove_ptr = txptr;
788
789
790 if ( db->tx_packet_cnt < TX_WAKE_DESC_CNT )
791 netif_wake_queue(dev);
792}
793
794
795
796
797
798
799static void uli526x_rx_packet(struct net_device *dev, struct uli526x_board_info * db)
800{
801 struct rx_desc *rxptr;
802 struct sk_buff *skb;
803 int rxlen;
804 u32 rdes0;
805
806 rxptr = db->rx_ready_ptr;
807
808 while(db->rx_avail_cnt) {
809 rdes0 = le32_to_cpu(rxptr->rdes0);
810 if (rdes0 & 0x80000000)
811 {
812 break;
813 }
814
815 db->rx_avail_cnt--;
816 db->interval_rx_cnt++;
817
818 dma_unmap_single(&db->pdev->dev, le32_to_cpu(rxptr->rdes2),
819 RX_ALLOC_SIZE, DMA_FROM_DEVICE);
820 if ( (rdes0 & 0x300) != 0x300) {
821
822
823 ULI526X_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
824 uli526x_reuse_skb(db, rxptr->rx_skb_ptr);
825 } else {
826
827 rxlen = ( (rdes0 >> 16) & 0x3fff) - 4;
828
829
830 if (rdes0 & 0x8000) {
831
832 dev->stats.rx_errors++;
833 if (rdes0 & 1)
834 dev->stats.rx_fifo_errors++;
835 if (rdes0 & 2)
836 dev->stats.rx_crc_errors++;
837 if (rdes0 & 0x80)
838 dev->stats.rx_length_errors++;
839 }
840
841 if ( !(rdes0 & 0x8000) ||
842 ((db->cr6_data & CR6_PM) && (rxlen>6)) ) {
843 struct sk_buff *new_skb = NULL;
844
845 skb = rxptr->rx_skb_ptr;
846
847
848
849 if ((rxlen < RX_COPY_SIZE) &&
850 (((new_skb = netdev_alloc_skb(dev, rxlen + 2)) != NULL))) {
851 skb = new_skb;
852
853 skb_reserve(skb, 2);
854 skb_put_data(skb,
855 skb_tail_pointer(rxptr->rx_skb_ptr),
856 rxlen);
857 uli526x_reuse_skb(db, rxptr->rx_skb_ptr);
858 } else
859 skb_put(skb, rxlen);
860
861 skb->protocol = eth_type_trans(skb, dev);
862 netif_rx(skb);
863 dev->stats.rx_packets++;
864 dev->stats.rx_bytes += rxlen;
865
866 } else {
867
868 ULI526X_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
869 uli526x_reuse_skb(db, rxptr->rx_skb_ptr);
870 }
871 }
872
873 rxptr = rxptr->next_rx_desc;
874 }
875
876 db->rx_ready_ptr = rxptr;
877}
878
879
880
881
882
883
884static void uli526x_set_filter_mode(struct net_device * dev)
885{
886 struct uli526x_board_info *db = netdev_priv(dev);
887 unsigned long flags;
888
889 ULI526X_DBUG(0, "uli526x_set_filter_mode()", 0);
890 spin_lock_irqsave(&db->lock, flags);
891
892 if (dev->flags & IFF_PROMISC) {
893 ULI526X_DBUG(0, "Enable PROM Mode", 0);
894 db->cr6_data |= CR6_PM | CR6_PBF;
895 update_cr6(db->cr6_data, db->ioaddr);
896 spin_unlock_irqrestore(&db->lock, flags);
897 return;
898 }
899
900 if (dev->flags & IFF_ALLMULTI ||
901 netdev_mc_count(dev) > ULI5261_MAX_MULTICAST) {
902 ULI526X_DBUG(0, "Pass all multicast address",
903 netdev_mc_count(dev));
904 db->cr6_data &= ~(CR6_PM | CR6_PBF);
905 db->cr6_data |= CR6_PAM;
906 spin_unlock_irqrestore(&db->lock, flags);
907 return;
908 }
909
910 ULI526X_DBUG(0, "Set multicast address", netdev_mc_count(dev));
911 send_filter_frame(dev, netdev_mc_count(dev));
912 spin_unlock_irqrestore(&db->lock, flags);
913}
914
915static void
916ULi_ethtool_get_link_ksettings(struct uli526x_board_info *db,
917 struct ethtool_link_ksettings *cmd)
918{
919 u32 supported, advertising;
920
921 supported = (SUPPORTED_10baseT_Half |
922 SUPPORTED_10baseT_Full |
923 SUPPORTED_100baseT_Half |
924 SUPPORTED_100baseT_Full |
925 SUPPORTED_Autoneg |
926 SUPPORTED_MII);
927
928 advertising = (ADVERTISED_10baseT_Half |
929 ADVERTISED_10baseT_Full |
930 ADVERTISED_100baseT_Half |
931 ADVERTISED_100baseT_Full |
932 ADVERTISED_Autoneg |
933 ADVERTISED_MII);
934
935 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
936 supported);
937 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
938 advertising);
939
940 cmd->base.port = PORT_MII;
941 cmd->base.phy_address = db->phy_addr;
942
943 cmd->base.speed = SPEED_10;
944 cmd->base.duplex = DUPLEX_HALF;
945
946 if(db->op_mode==ULI526X_100MHF || db->op_mode==ULI526X_100MFD)
947 {
948 cmd->base.speed = SPEED_100;
949 }
950 if(db->op_mode==ULI526X_10MFD || db->op_mode==ULI526X_100MFD)
951 {
952 cmd->base.duplex = DUPLEX_FULL;
953 }
954 if(db->link_failed)
955 {
956 cmd->base.speed = SPEED_UNKNOWN;
957 cmd->base.duplex = DUPLEX_UNKNOWN;
958 }
959
960 if (db->media_mode & ULI526X_AUTO)
961 {
962 cmd->base.autoneg = AUTONEG_ENABLE;
963 }
964}
965
966static void netdev_get_drvinfo(struct net_device *dev,
967 struct ethtool_drvinfo *info)
968{
969 struct uli526x_board_info *np = netdev_priv(dev);
970
971 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
972 strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
973}
974
975static int netdev_get_link_ksettings(struct net_device *dev,
976 struct ethtool_link_ksettings *cmd)
977{
978 struct uli526x_board_info *np = netdev_priv(dev);
979
980 ULi_ethtool_get_link_ksettings(np, cmd);
981
982 return 0;
983}
984
985static u32 netdev_get_link(struct net_device *dev) {
986 struct uli526x_board_info *np = netdev_priv(dev);
987
988 if(np->link_failed)
989 return 0;
990 else
991 return 1;
992}
993
994static void uli526x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
995{
996 wol->supported = WAKE_PHY | WAKE_MAGIC;
997 wol->wolopts = 0;
998}
999
1000static const struct ethtool_ops netdev_ethtool_ops = {
1001 .get_drvinfo = netdev_get_drvinfo,
1002 .get_link = netdev_get_link,
1003 .get_wol = uli526x_get_wol,
1004 .get_link_ksettings = netdev_get_link_ksettings,
1005};
1006
1007
1008
1009
1010
1011
1012static void uli526x_timer(struct timer_list *t)
1013{
1014 struct uli526x_board_info *db = from_timer(db, t, timer);
1015 struct net_device *dev = pci_get_drvdata(db->pdev);
1016 struct uli_phy_ops *phy = &db->phy;
1017 void __iomem *ioaddr = db->ioaddr;
1018 unsigned long flags;
1019 u8 tmp_cr12 = 0;
1020 u32 tmp_cr8;
1021
1022
1023 spin_lock_irqsave(&db->lock, flags);
1024
1025
1026
1027 tmp_cr8 = ur32(DCR8);
1028 if ( (db->interval_rx_cnt==0) && (tmp_cr8) ) {
1029 db->reset_cr8++;
1030 db->wait_reset = 1;
1031 }
1032 db->interval_rx_cnt = 0;
1033
1034
1035 if ( db->tx_packet_cnt &&
1036 time_after(jiffies, dev_trans_start(dev) + ULI526X_TX_KICK) ) {
1037 uw32(DCR1, 0x1);
1038
1039
1040 if ( time_after(jiffies, dev_trans_start(dev) + ULI526X_TX_TIMEOUT) ) {
1041 db->reset_TXtimeout++;
1042 db->wait_reset = 1;
1043 netdev_err(dev, " Tx timeout - resetting\n");
1044 }
1045 }
1046
1047 if (db->wait_reset) {
1048 ULI526X_DBUG(0, "Dynamic Reset device", db->tx_packet_cnt);
1049 db->reset_count++;
1050 uli526x_dynamic_reset(dev);
1051 db->timer.expires = ULI526X_TIMER_WUT;
1052 add_timer(&db->timer);
1053 spin_unlock_irqrestore(&db->lock, flags);
1054 return;
1055 }
1056
1057
1058 if ((phy->read(db, db->phy_addr, 5) & 0x01e0)!=0)
1059 tmp_cr12 = 3;
1060
1061 if ( !(tmp_cr12 & 0x3) && !db->link_failed ) {
1062
1063 ULI526X_DBUG(0, "Link Failed", tmp_cr12);
1064 netif_carrier_off(dev);
1065 netdev_info(dev, "NIC Link is Down\n");
1066 db->link_failed = 1;
1067
1068
1069
1070 if ( !(db->media_mode & 0x8) )
1071 phy->write(db, db->phy_addr, 0, 0x1000);
1072
1073
1074 if (db->media_mode & ULI526X_AUTO) {
1075 db->cr6_data&=~0x00000200;
1076 update_cr6(db->cr6_data, db->ioaddr);
1077 }
1078 } else
1079 if ((tmp_cr12 & 0x3) && db->link_failed) {
1080 ULI526X_DBUG(0, "Link link OK", tmp_cr12);
1081 db->link_failed = 0;
1082
1083
1084 if ( (db->media_mode & ULI526X_AUTO) &&
1085 uli526x_sense_speed(db) )
1086 db->link_failed = 1;
1087 uli526x_process_mode(db);
1088
1089 if(db->link_failed==0)
1090 {
1091 netdev_info(dev, "NIC Link is Up %d Mbps %s duplex\n",
1092 (db->op_mode == ULI526X_100MHF ||
1093 db->op_mode == ULI526X_100MFD)
1094 ? 100 : 10,
1095 (db->op_mode == ULI526X_10MFD ||
1096 db->op_mode == ULI526X_100MFD)
1097 ? "Full" : "Half");
1098 netif_carrier_on(dev);
1099 }
1100
1101 }
1102 else if(!(tmp_cr12 & 0x3) && db->link_failed)
1103 {
1104 if(db->init==1)
1105 {
1106 netdev_info(dev, "NIC Link is Down\n");
1107 netif_carrier_off(dev);
1108 }
1109 }
1110 db->init = 0;
1111
1112
1113 db->timer.expires = ULI526X_TIMER_WUT;
1114 add_timer(&db->timer);
1115 spin_unlock_irqrestore(&db->lock, flags);
1116}
1117
1118
1119
1120
1121
1122
1123
1124
1125static void uli526x_reset_prepare(struct net_device *dev)
1126{
1127 struct uli526x_board_info *db = netdev_priv(dev);
1128 void __iomem *ioaddr = db->ioaddr;
1129
1130
1131 db->cr6_data &= ~(CR6_RXSC | CR6_TXSC);
1132 update_cr6(db->cr6_data, ioaddr);
1133 uw32(DCR7, 0);
1134 uw32(DCR5, ur32(DCR5));
1135
1136
1137 netif_stop_queue(dev);
1138
1139
1140 uli526x_free_rxbuffer(db);
1141
1142
1143 db->tx_packet_cnt = 0;
1144 db->rx_avail_cnt = 0;
1145 db->link_failed = 1;
1146 db->init=1;
1147 db->wait_reset = 0;
1148}
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159static void uli526x_dynamic_reset(struct net_device *dev)
1160{
1161 ULI526X_DBUG(0, "uli526x_dynamic_reset()", 0);
1162
1163 uli526x_reset_prepare(dev);
1164
1165
1166 uli526x_init(dev);
1167
1168
1169 netif_wake_queue(dev);
1170}
1171
1172
1173
1174
1175
1176static int __maybe_unused uli526x_suspend(struct device *dev_d)
1177{
1178 struct net_device *dev = dev_get_drvdata(dev_d);
1179
1180 ULI526X_DBUG(0, "uli526x_suspend", 0);
1181
1182 if (!netif_running(dev))
1183 return 0;
1184
1185 netif_device_detach(dev);
1186 uli526x_reset_prepare(dev);
1187
1188 device_set_wakeup_enable(dev_d, 0);
1189
1190 return 0;
1191}
1192
1193
1194
1195
1196
1197static int __maybe_unused uli526x_resume(struct device *dev_d)
1198{
1199 struct net_device *dev = dev_get_drvdata(dev_d);
1200
1201 ULI526X_DBUG(0, "uli526x_resume", 0);
1202
1203
1204 if (!netif_running(dev))
1205 return 0;
1206
1207 netif_device_attach(dev);
1208
1209 uli526x_init(dev);
1210
1211 netif_wake_queue(dev);
1212
1213 return 0;
1214}
1215
1216
1217
1218
1219
1220static void uli526x_free_rxbuffer(struct uli526x_board_info * db)
1221{
1222 ULI526X_DBUG(0, "uli526x_free_rxbuffer()", 0);
1223
1224
1225 while (db->rx_avail_cnt) {
1226 dev_kfree_skb(db->rx_ready_ptr->rx_skb_ptr);
1227 db->rx_ready_ptr = db->rx_ready_ptr->next_rx_desc;
1228 db->rx_avail_cnt--;
1229 }
1230}
1231
1232
1233
1234
1235
1236
1237static void uli526x_reuse_skb(struct uli526x_board_info *db, struct sk_buff * skb)
1238{
1239 struct rx_desc *rxptr = db->rx_insert_ptr;
1240
1241 if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) {
1242 rxptr->rx_skb_ptr = skb;
1243 rxptr->rdes2 = cpu_to_le32(dma_map_single(&db->pdev->dev, skb_tail_pointer(skb),
1244 RX_ALLOC_SIZE, DMA_FROM_DEVICE));
1245 wmb();
1246 rxptr->rdes0 = cpu_to_le32(0x80000000);
1247 db->rx_avail_cnt++;
1248 db->rx_insert_ptr = rxptr->next_rx_desc;
1249 } else
1250 ULI526X_DBUG(0, "SK Buffer reuse method error", db->rx_avail_cnt);
1251}
1252
1253
1254
1255
1256
1257
1258
1259static void uli526x_descriptor_init(struct net_device *dev, void __iomem *ioaddr)
1260{
1261 struct uli526x_board_info *db = netdev_priv(dev);
1262 struct tx_desc *tmp_tx;
1263 struct rx_desc *tmp_rx;
1264 unsigned char *tmp_buf;
1265 dma_addr_t tmp_tx_dma, tmp_rx_dma;
1266 dma_addr_t tmp_buf_dma;
1267 int i;
1268
1269 ULI526X_DBUG(0, "uli526x_descriptor_init()", 0);
1270
1271
1272 db->tx_insert_ptr = db->first_tx_desc;
1273 db->tx_remove_ptr = db->first_tx_desc;
1274 uw32(DCR4, db->first_tx_desc_dma);
1275
1276
1277 db->first_rx_desc = (void *)db->first_tx_desc + sizeof(struct tx_desc) * TX_DESC_CNT;
1278 db->first_rx_desc_dma = db->first_tx_desc_dma + sizeof(struct tx_desc) * TX_DESC_CNT;
1279 db->rx_insert_ptr = db->first_rx_desc;
1280 db->rx_ready_ptr = db->first_rx_desc;
1281 uw32(DCR3, db->first_rx_desc_dma);
1282
1283
1284 tmp_buf = db->buf_pool_start;
1285 tmp_buf_dma = db->buf_pool_dma_start;
1286 tmp_tx_dma = db->first_tx_desc_dma;
1287 for (tmp_tx = db->first_tx_desc, i = 0; i < TX_DESC_CNT; i++, tmp_tx++) {
1288 tmp_tx->tx_buf_ptr = tmp_buf;
1289 tmp_tx->tdes0 = cpu_to_le32(0);
1290 tmp_tx->tdes1 = cpu_to_le32(0x81000000);
1291 tmp_tx->tdes2 = cpu_to_le32(tmp_buf_dma);
1292 tmp_tx_dma += sizeof(struct tx_desc);
1293 tmp_tx->tdes3 = cpu_to_le32(tmp_tx_dma);
1294 tmp_tx->next_tx_desc = tmp_tx + 1;
1295 tmp_buf = tmp_buf + TX_BUF_ALLOC;
1296 tmp_buf_dma = tmp_buf_dma + TX_BUF_ALLOC;
1297 }
1298 (--tmp_tx)->tdes3 = cpu_to_le32(db->first_tx_desc_dma);
1299 tmp_tx->next_tx_desc = db->first_tx_desc;
1300
1301
1302 tmp_rx_dma=db->first_rx_desc_dma;
1303 for (tmp_rx = db->first_rx_desc, i = 0; i < RX_DESC_CNT; i++, tmp_rx++) {
1304 tmp_rx->rdes0 = cpu_to_le32(0);
1305 tmp_rx->rdes1 = cpu_to_le32(0x01000600);
1306 tmp_rx_dma += sizeof(struct rx_desc);
1307 tmp_rx->rdes3 = cpu_to_le32(tmp_rx_dma);
1308 tmp_rx->next_rx_desc = tmp_rx + 1;
1309 }
1310 (--tmp_rx)->rdes3 = cpu_to_le32(db->first_rx_desc_dma);
1311 tmp_rx->next_rx_desc = db->first_rx_desc;
1312
1313
1314 allocate_rx_buffer(dev);
1315}
1316
1317
1318
1319
1320
1321
1322static void update_cr6(u32 cr6_data, void __iomem *ioaddr)
1323{
1324 uw32(DCR6, cr6_data);
1325 udelay(5);
1326}
1327
1328
1329
1330
1331
1332
1333
1334#ifdef __BIG_ENDIAN
1335#define FLT_SHIFT 16
1336#else
1337#define FLT_SHIFT 0
1338#endif
1339
1340static void send_filter_frame(struct net_device *dev, int mc_cnt)
1341{
1342 struct uli526x_board_info *db = netdev_priv(dev);
1343 void __iomem *ioaddr = db->ioaddr;
1344 struct netdev_hw_addr *ha;
1345 struct tx_desc *txptr;
1346 u16 * addrptr;
1347 u32 * suptr;
1348 int i;
1349
1350 ULI526X_DBUG(0, "send_filter_frame()", 0);
1351
1352 txptr = db->tx_insert_ptr;
1353 suptr = (u32 *) txptr->tx_buf_ptr;
1354
1355
1356 addrptr = (u16 *) dev->dev_addr;
1357 *suptr++ = addrptr[0] << FLT_SHIFT;
1358 *suptr++ = addrptr[1] << FLT_SHIFT;
1359 *suptr++ = addrptr[2] << FLT_SHIFT;
1360
1361
1362 *suptr++ = 0xffff << FLT_SHIFT;
1363 *suptr++ = 0xffff << FLT_SHIFT;
1364 *suptr++ = 0xffff << FLT_SHIFT;
1365
1366
1367 netdev_for_each_mc_addr(ha, dev) {
1368 addrptr = (u16 *) ha->addr;
1369 *suptr++ = addrptr[0] << FLT_SHIFT;
1370 *suptr++ = addrptr[1] << FLT_SHIFT;
1371 *suptr++ = addrptr[2] << FLT_SHIFT;
1372 }
1373
1374 for (i = netdev_mc_count(dev); i < 14; i++) {
1375 *suptr++ = 0xffff << FLT_SHIFT;
1376 *suptr++ = 0xffff << FLT_SHIFT;
1377 *suptr++ = 0xffff << FLT_SHIFT;
1378 }
1379
1380
1381 db->tx_insert_ptr = txptr->next_tx_desc;
1382 txptr->tdes1 = cpu_to_le32(0x890000c0);
1383
1384
1385 if (db->tx_packet_cnt < TX_DESC_CNT) {
1386
1387 db->tx_packet_cnt++;
1388 txptr->tdes0 = cpu_to_le32(0x80000000);
1389 update_cr6(db->cr6_data | 0x2000, ioaddr);
1390 uw32(DCR1, 0x1);
1391 update_cr6(db->cr6_data, ioaddr);
1392 netif_trans_update(dev);
1393 } else
1394 netdev_err(dev, "No Tx resource - Send_filter_frame!\n");
1395}
1396
1397
1398
1399
1400
1401
1402
1403static void allocate_rx_buffer(struct net_device *dev)
1404{
1405 struct uli526x_board_info *db = netdev_priv(dev);
1406 struct rx_desc *rxptr;
1407 struct sk_buff *skb;
1408
1409 rxptr = db->rx_insert_ptr;
1410
1411 while(db->rx_avail_cnt < RX_DESC_CNT) {
1412 skb = netdev_alloc_skb(dev, RX_ALLOC_SIZE);
1413 if (skb == NULL)
1414 break;
1415 rxptr->rx_skb_ptr = skb;
1416 rxptr->rdes2 = cpu_to_le32(dma_map_single(&db->pdev->dev, skb_tail_pointer(skb),
1417 RX_ALLOC_SIZE, DMA_FROM_DEVICE));
1418 wmb();
1419 rxptr->rdes0 = cpu_to_le32(0x80000000);
1420 rxptr = rxptr->next_rx_desc;
1421 db->rx_avail_cnt++;
1422 }
1423
1424 db->rx_insert_ptr = rxptr;
1425}
1426
1427
1428
1429
1430
1431
1432static u16 read_srom_word(struct uli526x_board_info *db, int offset)
1433{
1434 void __iomem *ioaddr = db->ioaddr;
1435 u16 srom_data = 0;
1436 int i;
1437
1438 uw32(DCR9, CR9_SROM_READ);
1439 uw32(DCR9, CR9_SROM_READ | CR9_SRCS);
1440
1441
1442 srom_clk_write(db, SROM_DATA_1);
1443 srom_clk_write(db, SROM_DATA_1);
1444 srom_clk_write(db, SROM_DATA_0);
1445
1446
1447 for (i = 5; i >= 0; i--) {
1448 srom_data = (offset & (1 << i)) ? SROM_DATA_1 : SROM_DATA_0;
1449 srom_clk_write(db, srom_data);
1450 }
1451
1452 uw32(DCR9, CR9_SROM_READ | CR9_SRCS);
1453
1454 for (i = 16; i > 0; i--) {
1455 uw32(DCR9, CR9_SROM_READ | CR9_SRCS | CR9_SRCLK);
1456 udelay(5);
1457 srom_data = (srom_data << 1) |
1458 ((ur32(DCR9) & CR9_CRDOUT) ? 1 : 0);
1459 uw32(DCR9, CR9_SROM_READ | CR9_SRCS);
1460 udelay(5);
1461 }
1462
1463 uw32(DCR9, CR9_SROM_READ);
1464 return srom_data;
1465}
1466
1467
1468
1469
1470
1471
1472static u8 uli526x_sense_speed(struct uli526x_board_info * db)
1473{
1474 struct uli_phy_ops *phy = &db->phy;
1475 u8 ErrFlag = 0;
1476 u16 phy_mode;
1477
1478 phy_mode = phy->read(db, db->phy_addr, 1);
1479 phy_mode = phy->read(db, db->phy_addr, 1);
1480
1481 if ( (phy_mode & 0x24) == 0x24 ) {
1482
1483 phy_mode = ((phy->read(db, db->phy_addr, 5) & 0x01e0)<<7);
1484 if(phy_mode&0x8000)
1485 phy_mode = 0x8000;
1486 else if(phy_mode&0x4000)
1487 phy_mode = 0x4000;
1488 else if(phy_mode&0x2000)
1489 phy_mode = 0x2000;
1490 else
1491 phy_mode = 0x1000;
1492
1493 switch (phy_mode) {
1494 case 0x1000: db->op_mode = ULI526X_10MHF; break;
1495 case 0x2000: db->op_mode = ULI526X_10MFD; break;
1496 case 0x4000: db->op_mode = ULI526X_100MHF; break;
1497 case 0x8000: db->op_mode = ULI526X_100MFD; break;
1498 default: db->op_mode = ULI526X_10MHF; ErrFlag = 1; break;
1499 }
1500 } else {
1501 db->op_mode = ULI526X_10MHF;
1502 ULI526X_DBUG(0, "Link Failed :", phy_mode);
1503 ErrFlag = 1;
1504 }
1505
1506 return ErrFlag;
1507}
1508
1509
1510
1511
1512
1513
1514
1515
1516static void uli526x_set_phyxcer(struct uli526x_board_info *db)
1517{
1518 struct uli_phy_ops *phy = &db->phy;
1519 u16 phy_reg;
1520
1521
1522 phy_reg = phy->read(db, db->phy_addr, 4) & ~0x01e0;
1523
1524 if (db->media_mode & ULI526X_AUTO) {
1525
1526 phy_reg |= db->PHY_reg4;
1527 } else {
1528
1529 switch(db->media_mode) {
1530 case ULI526X_10MHF: phy_reg |= 0x20; break;
1531 case ULI526X_10MFD: phy_reg |= 0x40; break;
1532 case ULI526X_100MHF: phy_reg |= 0x80; break;
1533 case ULI526X_100MFD: phy_reg |= 0x100; break;
1534 }
1535
1536 }
1537
1538
1539 if ( !(phy_reg & 0x01e0)) {
1540 phy_reg|=db->PHY_reg4;
1541 db->media_mode|=ULI526X_AUTO;
1542 }
1543 phy->write(db, db->phy_addr, 4, phy_reg);
1544
1545
1546 phy->write(db, db->phy_addr, 0, 0x1200);
1547 udelay(50);
1548}
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558static void uli526x_process_mode(struct uli526x_board_info *db)
1559{
1560 struct uli_phy_ops *phy = &db->phy;
1561 u16 phy_reg;
1562
1563
1564 if (db->op_mode & 0x4)
1565 db->cr6_data |= CR6_FDM;
1566 else
1567 db->cr6_data &= ~CR6_FDM;
1568
1569 update_cr6(db->cr6_data, db->ioaddr);
1570
1571
1572 if (!(db->media_mode & 0x8)) {
1573
1574 phy_reg = phy->read(db, db->phy_addr, 6);
1575 if (!(phy_reg & 0x1)) {
1576
1577 phy_reg = 0x0;
1578 switch(db->op_mode) {
1579 case ULI526X_10MHF: phy_reg = 0x0; break;
1580 case ULI526X_10MFD: phy_reg = 0x100; break;
1581 case ULI526X_100MHF: phy_reg = 0x2000; break;
1582 case ULI526X_100MFD: phy_reg = 0x2100; break;
1583 }
1584 phy->write(db, db->phy_addr, 0, phy_reg);
1585 }
1586 }
1587}
1588
1589
1590
1591static void phy_writeby_cr9(struct uli526x_board_info *db, u8 phy_addr,
1592 u8 offset, u16 phy_data)
1593{
1594 u16 i;
1595
1596
1597 for (i = 0; i < 35; i++)
1598 phy_write_1bit(db, PHY_DATA_1);
1599
1600
1601 phy_write_1bit(db, PHY_DATA_0);
1602 phy_write_1bit(db, PHY_DATA_1);
1603
1604
1605 phy_write_1bit(db, PHY_DATA_0);
1606 phy_write_1bit(db, PHY_DATA_1);
1607
1608
1609 for (i = 0x10; i > 0; i = i >> 1)
1610 phy_write_1bit(db, phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
1611
1612
1613 for (i = 0x10; i > 0; i = i >> 1)
1614 phy_write_1bit(db, offset & i ? PHY_DATA_1 : PHY_DATA_0);
1615
1616
1617 phy_write_1bit(db, PHY_DATA_1);
1618 phy_write_1bit(db, PHY_DATA_0);
1619
1620
1621 for (i = 0x8000; i > 0; i >>= 1)
1622 phy_write_1bit(db, phy_data & i ? PHY_DATA_1 : PHY_DATA_0);
1623}
1624
1625static u16 phy_readby_cr9(struct uli526x_board_info *db, u8 phy_addr, u8 offset)
1626{
1627 u16 phy_data;
1628 int i;
1629
1630
1631 for (i = 0; i < 35; i++)
1632 phy_write_1bit(db, PHY_DATA_1);
1633
1634
1635 phy_write_1bit(db, PHY_DATA_0);
1636 phy_write_1bit(db, PHY_DATA_1);
1637
1638
1639 phy_write_1bit(db, PHY_DATA_1);
1640 phy_write_1bit(db, PHY_DATA_0);
1641
1642
1643 for (i = 0x10; i > 0; i = i >> 1)
1644 phy_write_1bit(db, phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
1645
1646
1647 for (i = 0x10; i > 0; i = i >> 1)
1648 phy_write_1bit(db, offset & i ? PHY_DATA_1 : PHY_DATA_0);
1649
1650
1651 phy_read_1bit(db);
1652
1653
1654 for (phy_data = 0, i = 0; i < 16; i++) {
1655 phy_data <<= 1;
1656 phy_data |= phy_read_1bit(db);
1657 }
1658
1659 return phy_data;
1660}
1661
1662static u16 phy_readby_cr10(struct uli526x_board_info *db, u8 phy_addr,
1663 u8 offset)
1664{
1665 void __iomem *ioaddr = db->ioaddr;
1666 u32 cr10_value = phy_addr;
1667
1668 cr10_value = (cr10_value << 5) + offset;
1669 cr10_value = (cr10_value << 16) + 0x08000000;
1670 uw32(DCR10, cr10_value);
1671 udelay(1);
1672 while (1) {
1673 cr10_value = ur32(DCR10);
1674 if (cr10_value & 0x10000000)
1675 break;
1676 }
1677 return cr10_value & 0x0ffff;
1678}
1679
1680static void phy_writeby_cr10(struct uli526x_board_info *db, u8 phy_addr,
1681 u8 offset, u16 phy_data)
1682{
1683 void __iomem *ioaddr = db->ioaddr;
1684 u32 cr10_value = phy_addr;
1685
1686 cr10_value = (cr10_value << 5) + offset;
1687 cr10_value = (cr10_value << 16) + 0x04000000 + phy_data;
1688 uw32(DCR10, cr10_value);
1689 udelay(1);
1690}
1691
1692
1693
1694
1695static void phy_write_1bit(struct uli526x_board_info *db, u32 data)
1696{
1697 void __iomem *ioaddr = db->ioaddr;
1698
1699 uw32(DCR9, data);
1700 udelay(1);
1701 uw32(DCR9, data | MDCLKH);
1702 udelay(1);
1703 uw32(DCR9, data);
1704 udelay(1);
1705}
1706
1707
1708
1709
1710
1711
1712static u16 phy_read_1bit(struct uli526x_board_info *db)
1713{
1714 void __iomem *ioaddr = db->ioaddr;
1715 u16 phy_data;
1716
1717 uw32(DCR9, 0x50000);
1718 udelay(1);
1719 phy_data = (ur32(DCR9) >> 19) & 0x1;
1720 uw32(DCR9, 0x40000);
1721 udelay(1);
1722
1723 return phy_data;
1724}
1725
1726
1727static const struct pci_device_id uli526x_pci_tbl[] = {
1728 { 0x10B9, 0x5261, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_ULI5261_ID },
1729 { 0x10B9, 0x5263, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_ULI5263_ID },
1730 { 0, }
1731};
1732MODULE_DEVICE_TABLE(pci, uli526x_pci_tbl);
1733
1734static SIMPLE_DEV_PM_OPS(uli526x_pm_ops, uli526x_suspend, uli526x_resume);
1735
1736static struct pci_driver uli526x_driver = {
1737 .name = "uli526x",
1738 .id_table = uli526x_pci_tbl,
1739 .probe = uli526x_init_one,
1740 .remove = uli526x_remove_one,
1741 .driver.pm = &uli526x_pm_ops,
1742};
1743
1744MODULE_AUTHOR("Peer Chen, peer.chen@uli.com.tw");
1745MODULE_DESCRIPTION("ULi M5261/M5263 fast ethernet driver");
1746MODULE_LICENSE("GPL");
1747
1748module_param(debug, int, 0644);
1749module_param(mode, int, 0);
1750module_param(cr6set, int, 0);
1751MODULE_PARM_DESC(debug, "ULi M5261/M5263 enable debugging (0-1)");
1752MODULE_PARM_DESC(mode, "ULi M5261/M5263: Bit 0: 10/100Mbps, bit 2: duplex, bit 8: HomePNA");
1753
1754
1755
1756
1757
1758
1759static int __init uli526x_init_module(void)
1760{
1761
1762 ULI526X_DBUG(0, "init_module() ", debug);
1763
1764 if (debug)
1765 uli526x_debug = debug;
1766 if (cr6set)
1767 uli526x_cr6_user_set = cr6set;
1768
1769 switch (mode) {
1770 case ULI526X_10MHF:
1771 case ULI526X_100MHF:
1772 case ULI526X_10MFD:
1773 case ULI526X_100MFD:
1774 uli526x_media_mode = mode;
1775 break;
1776 default:
1777 uli526x_media_mode = ULI526X_AUTO;
1778 break;
1779 }
1780
1781 return pci_register_driver(&uli526x_driver);
1782}
1783
1784
1785
1786
1787
1788
1789
1790
1791static void __exit uli526x_cleanup_module(void)
1792{
1793 ULI526X_DBUG(0, "uli526x_cleanup_module() ", debug);
1794 pci_unregister_driver(&uli526x_driver);
1795}
1796
1797module_init(uli526x_init_module);
1798module_exit(uli526x_cleanup_module);
1799