1
2
3
4
5
6
7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8
9#define DRV_NAME "uli526x"
10
11#include <linux/module.h>
12
13#include <linux/kernel.h>
14#include <linux/string.h>
15#include <linux/timer.h>
16#include <linux/errno.h>
17#include <linux/ioport.h>
18#include <linux/interrupt.h>
19#include <linux/pci.h>
20#include <linux/init.h>
21#include <linux/netdevice.h>
22#include <linux/etherdevice.h>
23#include <linux/ethtool.h>
24#include <linux/skbuff.h>
25#include <linux/delay.h>
26#include <linux/spinlock.h>
27#include <linux/dma-mapping.h>
28#include <linux/bitops.h>
29
30#include <asm/processor.h>
31#include <asm/io.h>
32#include <asm/dma.h>
33#include <linux/uaccess.h>
34
35#define uw32(reg, val) iowrite32(val, ioaddr + (reg))
36#define ur32(reg) ioread32(ioaddr + (reg))
37
38
39#define PCI_ULI5261_ID 0x526110B9
40#define PCI_ULI5263_ID 0x526310B9
41
42#define ULI526X_IO_SIZE 0x100
43#define TX_DESC_CNT 0x20
44#define RX_DESC_CNT 0x30
45#define TX_FREE_DESC_CNT (TX_DESC_CNT - 2)
46#define TX_WAKE_DESC_CNT (TX_DESC_CNT - 3)
47#define DESC_ALL_CNT (TX_DESC_CNT + RX_DESC_CNT)
48#define TX_BUF_ALLOC 0x600
49#define RX_ALLOC_SIZE 0x620
50#define ULI526X_RESET 1
51#define CR0_DEFAULT 0
52#define CR6_DEFAULT 0x22200000
53#define CR7_DEFAULT 0x180c1
54#define CR15_DEFAULT 0x06
55#define TDES0_ERR_MASK 0x4302
56#define MAX_PACKET_SIZE 1514
57#define ULI5261_MAX_MULTICAST 14
58#define RX_COPY_SIZE 100
59#define MAX_CHECK_PACKET 0x8000
60
61#define ULI526X_10MHF 0
62#define ULI526X_100MHF 1
63#define ULI526X_10MFD 4
64#define ULI526X_100MFD 5
65#define ULI526X_AUTO 8
66
67#define ULI526X_TXTH_72 0x400000
68#define ULI526X_TXTH_96 0x404000
69#define ULI526X_TXTH_128 0x0000
70#define ULI526X_TXTH_256 0x4000
71#define ULI526X_TXTH_512 0x8000
72#define ULI526X_TXTH_1K 0xC000
73
74#define ULI526X_TIMER_WUT (jiffies + HZ * 1)
75#define ULI526X_TX_TIMEOUT ((16*HZ)/2)
76#define ULI526X_TX_KICK (4*HZ/2)
77
78#define ULI526X_DBUG(dbug_now, msg, value) \
79do { \
80 if (uli526x_debug || (dbug_now)) \
81 pr_err("%s %lx\n", (msg), (long) (value)); \
82} while (0)
83
84#define SHOW_MEDIA_TYPE(mode) \
85 pr_err("Change Speed to %sMhz %s duplex\n", \
86 mode & 1 ? "100" : "10", \
87 mode & 4 ? "full" : "half");
88
89
90
91#define CR9_SROM_READ 0x4800
92#define CR9_SRCS 0x1
93#define CR9_SRCLK 0x2
94#define CR9_CRDOUT 0x8
95#define SROM_DATA_0 0x0
96#define SROM_DATA_1 0x4
97#define PHY_DATA_1 0x20000
98#define PHY_DATA_0 0x00000
99#define MDCLKH 0x10000
100
101#define PHY_POWER_DOWN 0x800
102
103#define SROM_V41_CODE 0x14
104
105
106struct tx_desc {
107 __le32 tdes0, tdes1, tdes2, tdes3;
108 char *tx_buf_ptr;
109 struct tx_desc *next_tx_desc;
110} __attribute__(( aligned(32) ));
111
112struct rx_desc {
113 __le32 rdes0, rdes1, rdes2, rdes3;
114 struct sk_buff *rx_skb_ptr;
115 struct rx_desc *next_rx_desc;
116} __attribute__(( aligned(32) ));
117
118struct uli526x_board_info {
119 struct uli_phy_ops {
120 void (*write)(struct uli526x_board_info *, u8, u8, u16);
121 u16 (*read)(struct uli526x_board_info *, u8, u8);
122 } phy;
123 struct net_device *next_dev;
124 struct pci_dev *pdev;
125 spinlock_t lock;
126
127 void __iomem *ioaddr;
128 u32 cr0_data;
129 u32 cr5_data;
130 u32 cr6_data;
131 u32 cr7_data;
132 u32 cr15_data;
133
134
135 dma_addr_t buf_pool_dma_ptr;
136 dma_addr_t buf_pool_dma_start;
137 dma_addr_t desc_pool_dma_ptr;
138 dma_addr_t first_tx_desc_dma;
139 dma_addr_t first_rx_desc_dma;
140
141
142 unsigned char *buf_pool_ptr;
143 unsigned char *buf_pool_start;
144 unsigned char *desc_pool_ptr;
145 struct tx_desc *first_tx_desc;
146 struct tx_desc *tx_insert_ptr;
147 struct tx_desc *tx_remove_ptr;
148 struct rx_desc *first_rx_desc;
149 struct rx_desc *rx_insert_ptr;
150 struct rx_desc *rx_ready_ptr;
151 unsigned long tx_packet_cnt;
152 unsigned long rx_avail_cnt;
153 unsigned long interval_rx_cnt;
154
155 u16 dbug_cnt;
156 u16 NIC_capability;
157 u16 PHY_reg4;
158
159 u8 media_mode;
160 u8 op_mode;
161 u8 phy_addr;
162 u8 link_failed;
163 u8 wait_reset;
164 struct timer_list timer;
165
166
167 unsigned long tx_fifo_underrun;
168 unsigned long tx_loss_carrier;
169 unsigned long tx_no_carrier;
170 unsigned long tx_late_collision;
171 unsigned long tx_excessive_collision;
172 unsigned long tx_jabber_timeout;
173 unsigned long reset_count;
174 unsigned long reset_cr8;
175 unsigned long reset_fatal;
176 unsigned long reset_TXtimeout;
177
178
179 unsigned char srom[128];
180 u8 init;
181};
182
183enum uli526x_offsets {
184 DCR0 = 0x00, DCR1 = 0x08, DCR2 = 0x10, DCR3 = 0x18, DCR4 = 0x20,
185 DCR5 = 0x28, DCR6 = 0x30, DCR7 = 0x38, DCR8 = 0x40, DCR9 = 0x48,
186 DCR10 = 0x50, DCR11 = 0x58, DCR12 = 0x60, DCR13 = 0x68, DCR14 = 0x70,
187 DCR15 = 0x78
188};
189
190enum uli526x_CR6_bits {
191 CR6_RXSC = 0x2, CR6_PBF = 0x8, CR6_PM = 0x40, CR6_PAM = 0x80,
192 CR6_FDM = 0x200, CR6_TXSC = 0x2000, CR6_STI = 0x100000,
193 CR6_SFT = 0x200000, CR6_RXA = 0x40000000, CR6_NO_PURGE = 0x20000000
194};
195
196
197static int uli526x_debug;
198static unsigned char uli526x_media_mode = ULI526X_AUTO;
199static u32 uli526x_cr6_user_set;
200
201
202static int debug;
203static u32 cr6set;
204static int mode = 8;
205
206
207static int uli526x_open(struct net_device *);
208static netdev_tx_t uli526x_start_xmit(struct sk_buff *,
209 struct net_device *);
210static int uli526x_stop(struct net_device *);
211static void uli526x_set_filter_mode(struct net_device *);
212static const struct ethtool_ops netdev_ethtool_ops;
213static u16 read_srom_word(struct uli526x_board_info *, int);
214static irqreturn_t uli526x_interrupt(int, void *);
215#ifdef CONFIG_NET_POLL_CONTROLLER
216static void uli526x_poll(struct net_device *dev);
217#endif
218static void uli526x_descriptor_init(struct net_device *, void __iomem *);
219static void allocate_rx_buffer(struct net_device *);
220static void update_cr6(u32, void __iomem *);
221static void send_filter_frame(struct net_device *, int);
222static u16 phy_readby_cr9(struct uli526x_board_info *, u8, u8);
223static u16 phy_readby_cr10(struct uli526x_board_info *, u8, u8);
224static void phy_writeby_cr9(struct uli526x_board_info *, u8, u8, u16);
225static void phy_writeby_cr10(struct uli526x_board_info *, u8, u8, u16);
226static void phy_write_1bit(struct uli526x_board_info *db, u32);
227static u16 phy_read_1bit(struct uli526x_board_info *db);
228static u8 uli526x_sense_speed(struct uli526x_board_info *);
229static void uli526x_process_mode(struct uli526x_board_info *);
230static void uli526x_timer(struct timer_list *t);
231static void uli526x_rx_packet(struct net_device *, struct uli526x_board_info *);
232static void uli526x_free_tx_pkt(struct net_device *, struct uli526x_board_info *);
233static void uli526x_reuse_skb(struct uli526x_board_info *, struct sk_buff *);
234static void uli526x_dynamic_reset(struct net_device *);
235static void uli526x_free_rxbuffer(struct uli526x_board_info *);
236static void uli526x_init(struct net_device *);
237static void uli526x_set_phyxcer(struct uli526x_board_info *);
238
239static void srom_clk_write(struct uli526x_board_info *db, u32 data)
240{
241 void __iomem *ioaddr = db->ioaddr;
242
243 uw32(DCR9, data | CR9_SROM_READ | CR9_SRCS);
244 udelay(5);
245 uw32(DCR9, data | CR9_SROM_READ | CR9_SRCS | CR9_SRCLK);
246 udelay(5);
247 uw32(DCR9, data | CR9_SROM_READ | CR9_SRCS);
248 udelay(5);
249}
250
251
252
253static const struct net_device_ops netdev_ops = {
254 .ndo_open = uli526x_open,
255 .ndo_stop = uli526x_stop,
256 .ndo_start_xmit = uli526x_start_xmit,
257 .ndo_set_rx_mode = uli526x_set_filter_mode,
258 .ndo_set_mac_address = eth_mac_addr,
259 .ndo_validate_addr = eth_validate_addr,
260#ifdef CONFIG_NET_POLL_CONTROLLER
261 .ndo_poll_controller = uli526x_poll,
262#endif
263};
264
265
266
267
268
269static int uli526x_init_one(struct pci_dev *pdev,
270 const struct pci_device_id *ent)
271{
272 struct uli526x_board_info *db;
273 struct net_device *dev;
274 void __iomem *ioaddr;
275 int i, err;
276
277 ULI526X_DBUG(0, "uli526x_init_one()", 0);
278
279
280 dev = alloc_etherdev(sizeof(*db));
281 if (dev == NULL)
282 return -ENOMEM;
283 SET_NETDEV_DEV(dev, &pdev->dev);
284
285 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
286 pr_warn("32-bit PCI DMA not available\n");
287 err = -ENODEV;
288 goto err_out_free;
289 }
290
291
292 err = pci_enable_device(pdev);
293 if (err)
294 goto err_out_free;
295
296 if (!pci_resource_start(pdev, 0)) {
297 pr_err("I/O base is zero\n");
298 err = -ENODEV;
299 goto err_out_disable;
300 }
301
302 if (pci_resource_len(pdev, 0) < (ULI526X_IO_SIZE) ) {
303 pr_err("Allocated I/O size too small\n");
304 err = -ENODEV;
305 goto err_out_disable;
306 }
307
308 err = pci_request_regions(pdev, DRV_NAME);
309 if (err < 0) {
310 pr_err("Failed to request PCI regions\n");
311 goto err_out_disable;
312 }
313
314
315 db = netdev_priv(dev);
316
317
318 err = -ENOMEM;
319
320 db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr);
321 if (!db->desc_pool_ptr)
322 goto err_out_release;
323
324 db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4, &db->buf_pool_dma_ptr);
325 if (!db->buf_pool_ptr)
326 goto err_out_free_tx_desc;
327
328 db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr;
329 db->first_tx_desc_dma = db->desc_pool_dma_ptr;
330 db->buf_pool_start = db->buf_pool_ptr;
331 db->buf_pool_dma_start = db->buf_pool_dma_ptr;
332
333 switch (ent->driver_data) {
334 case PCI_ULI5263_ID:
335 db->phy.write = phy_writeby_cr10;
336 db->phy.read = phy_readby_cr10;
337 break;
338 default:
339 db->phy.write = phy_writeby_cr9;
340 db->phy.read = phy_readby_cr9;
341 break;
342 }
343
344
345 ioaddr = pci_iomap(pdev, 0, 0);
346 if (!ioaddr)
347 goto err_out_free_tx_buf;
348
349 db->ioaddr = ioaddr;
350 db->pdev = pdev;
351 db->init = 1;
352
353 pci_set_drvdata(pdev, dev);
354
355
356 dev->netdev_ops = &netdev_ops;
357 dev->ethtool_ops = &netdev_ethtool_ops;
358
359 spin_lock_init(&db->lock);
360
361
362
363 for (i = 0; i < 64; i++)
364 ((__le16 *) db->srom)[i] = cpu_to_le16(read_srom_word(db, i));
365
366
367 if(((u16 *) db->srom)[0] == 0xffff || ((u16 *) db->srom)[0] == 0)
368 {
369 uw32(DCR0, 0x10000);
370 uw32(DCR13, 0x1c0);
371 uw32(DCR14, 0);
372 uw32(DCR14, 0x10);
373 uw32(DCR14, 0);
374 uw32(DCR13, 0);
375 uw32(DCR13, 0x1b0);
376
377 for (i = 0; i < 6; i++)
378 dev->dev_addr[i] = ur32(DCR14);
379
380 uw32(DCR13, 0);
381 uw32(DCR0, 0);
382 udelay(10);
383 }
384 else
385 {
386 for (i = 0; i < 6; i++)
387 dev->dev_addr[i] = db->srom[20 + i];
388 }
389 err = register_netdev (dev);
390 if (err)
391 goto err_out_unmap;
392
393 netdev_info(dev, "ULi M%04lx at pci%s, %pM, irq %d\n",
394 ent->driver_data >> 16, pci_name(pdev),
395 dev->dev_addr, pdev->irq);
396
397 pci_set_master(pdev);
398
399 return 0;
400
401err_out_unmap:
402 pci_iounmap(pdev, db->ioaddr);
403err_out_free_tx_buf:
404 pci_free_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
405 db->buf_pool_ptr, db->buf_pool_dma_ptr);
406err_out_free_tx_desc:
407 pci_free_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20,
408 db->desc_pool_ptr, db->desc_pool_dma_ptr);
409err_out_release:
410 pci_release_regions(pdev);
411err_out_disable:
412 pci_disable_device(pdev);
413err_out_free:
414 free_netdev(dev);
415
416 return err;
417}
418
419
420static void uli526x_remove_one(struct pci_dev *pdev)
421{
422 struct net_device *dev = pci_get_drvdata(pdev);
423 struct uli526x_board_info *db = netdev_priv(dev);
424
425 unregister_netdev(dev);
426 pci_iounmap(pdev, db->ioaddr);
427 pci_free_consistent(db->pdev, sizeof(struct tx_desc) *
428 DESC_ALL_CNT + 0x20, db->desc_pool_ptr,
429 db->desc_pool_dma_ptr);
430 pci_free_consistent(db->pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
431 db->buf_pool_ptr, db->buf_pool_dma_ptr);
432 pci_release_regions(pdev);
433 pci_disable_device(pdev);
434 free_netdev(dev);
435}
436
437
438
439
440
441
442
443static int uli526x_open(struct net_device *dev)
444{
445 int ret;
446 struct uli526x_board_info *db = netdev_priv(dev);
447
448 ULI526X_DBUG(0, "uli526x_open", 0);
449
450
451 db->cr6_data = CR6_DEFAULT | uli526x_cr6_user_set;
452 db->tx_packet_cnt = 0;
453 db->rx_avail_cnt = 0;
454 db->link_failed = 1;
455 netif_carrier_off(dev);
456 db->wait_reset = 0;
457
458 db->NIC_capability = 0xf;
459 db->PHY_reg4 = 0x1e0;
460
461
462 db->cr6_data |= ULI526X_TXTH_256;
463 db->cr0_data = CR0_DEFAULT;
464
465
466 uli526x_init(dev);
467
468 ret = request_irq(db->pdev->irq, uli526x_interrupt, IRQF_SHARED,
469 dev->name, dev);
470 if (ret)
471 return ret;
472
473
474 netif_wake_queue(dev);
475
476
477 timer_setup(&db->timer, uli526x_timer, 0);
478 db->timer.expires = ULI526X_TIMER_WUT + HZ * 2;
479 add_timer(&db->timer);
480
481 return 0;
482}
483
484
485
486
487
488
489
490
491
492static void uli526x_init(struct net_device *dev)
493{
494 struct uli526x_board_info *db = netdev_priv(dev);
495 struct uli_phy_ops *phy = &db->phy;
496 void __iomem *ioaddr = db->ioaddr;
497 u8 phy_tmp;
498 u8 timeout;
499 u16 phy_reg_reset;
500
501
502 ULI526X_DBUG(0, "uli526x_init()", 0);
503
504
505 uw32(DCR0, ULI526X_RESET);
506 udelay(100);
507 uw32(DCR0, db->cr0_data);
508 udelay(5);
509
510
511 db->phy_addr = 1;
512 for (phy_tmp = 0; phy_tmp < 32; phy_tmp++) {
513 u16 phy_value;
514
515 phy_value = phy->read(db, phy_tmp, 3);
516 if (phy_value != 0xffff && phy_value != 0) {
517 db->phy_addr = phy_tmp;
518 break;
519 }
520 }
521
522 if (phy_tmp == 32)
523 pr_warn("Can not find the phy address!!!\n");
524
525 db->media_mode = uli526x_media_mode;
526
527
528 phy_reg_reset = phy->read(db, db->phy_addr, 0);
529 phy_reg_reset = (phy_reg_reset | 0x8000);
530 phy->write(db, db->phy_addr, 0, phy_reg_reset);
531
532
533
534
535 udelay(500);
536 timeout = 10;
537 while (timeout-- && phy->read(db, db->phy_addr, 0) & 0x8000)
538 udelay(100);
539
540
541 uli526x_set_phyxcer(db);
542
543
544 if ( !(db->media_mode & ULI526X_AUTO) )
545 db->op_mode = db->media_mode;
546
547
548 uli526x_descriptor_init(dev, ioaddr);
549
550
551 update_cr6(db->cr6_data, ioaddr);
552
553
554 send_filter_frame(dev, netdev_mc_count(dev));
555
556
557 db->cr7_data = CR7_DEFAULT;
558 uw32(DCR7, db->cr7_data);
559
560
561 uw32(DCR15, db->cr15_data);
562
563
564 db->cr6_data |= CR6_RXSC | CR6_TXSC;
565 update_cr6(db->cr6_data, ioaddr);
566}
567
568
569
570
571
572
573
574static netdev_tx_t uli526x_start_xmit(struct sk_buff *skb,
575 struct net_device *dev)
576{
577 struct uli526x_board_info *db = netdev_priv(dev);
578 void __iomem *ioaddr = db->ioaddr;
579 struct tx_desc *txptr;
580 unsigned long flags;
581
582 ULI526X_DBUG(0, "uli526x_start_xmit", 0);
583
584
585 netif_stop_queue(dev);
586
587
588 if (skb->len > MAX_PACKET_SIZE) {
589 netdev_err(dev, "big packet = %d\n", (u16)skb->len);
590 dev_kfree_skb_any(skb);
591 return NETDEV_TX_OK;
592 }
593
594 spin_lock_irqsave(&db->lock, flags);
595
596
597 if (db->tx_packet_cnt >= TX_FREE_DESC_CNT) {
598 spin_unlock_irqrestore(&db->lock, flags);
599 netdev_err(dev, "No Tx resource %ld\n", db->tx_packet_cnt);
600 return NETDEV_TX_BUSY;
601 }
602
603
604 uw32(DCR7, 0);
605
606
607 txptr = db->tx_insert_ptr;
608 skb_copy_from_linear_data(skb, txptr->tx_buf_ptr, skb->len);
609 txptr->tdes1 = cpu_to_le32(0xe1000000 | skb->len);
610
611
612 db->tx_insert_ptr = txptr->next_tx_desc;
613
614
615 if (db->tx_packet_cnt < TX_DESC_CNT) {
616 txptr->tdes0 = cpu_to_le32(0x80000000);
617 db->tx_packet_cnt++;
618 uw32(DCR1, 0x1);
619 netif_trans_update(dev);
620 }
621
622
623 if ( db->tx_packet_cnt < TX_FREE_DESC_CNT )
624 netif_wake_queue(dev);
625
626
627 spin_unlock_irqrestore(&db->lock, flags);
628 uw32(DCR7, db->cr7_data);
629
630
631 dev_consume_skb_any(skb);
632
633 return NETDEV_TX_OK;
634}
635
636
637
638
639
640
641
642static int uli526x_stop(struct net_device *dev)
643{
644 struct uli526x_board_info *db = netdev_priv(dev);
645 void __iomem *ioaddr = db->ioaddr;
646
647
648 netif_stop_queue(dev);
649
650
651 del_timer_sync(&db->timer);
652
653
654 uw32(DCR0, ULI526X_RESET);
655 udelay(5);
656 db->phy.write(db, db->phy_addr, 0, 0x8000);
657
658
659 free_irq(db->pdev->irq, dev);
660
661
662 uli526x_free_rxbuffer(db);
663
664 return 0;
665}
666
667
668
669
670
671
672
673static irqreturn_t uli526x_interrupt(int irq, void *dev_id)
674{
675 struct net_device *dev = dev_id;
676 struct uli526x_board_info *db = netdev_priv(dev);
677 void __iomem *ioaddr = db->ioaddr;
678 unsigned long flags;
679
680 spin_lock_irqsave(&db->lock, flags);
681 uw32(DCR7, 0);
682
683
684 db->cr5_data = ur32(DCR5);
685 uw32(DCR5, db->cr5_data);
686 if ( !(db->cr5_data & 0x180c1) ) {
687
688 uw32(DCR7, db->cr7_data);
689 spin_unlock_irqrestore(&db->lock, flags);
690 return IRQ_HANDLED;
691 }
692
693
694 if (db->cr5_data & 0x2000) {
695
696 ULI526X_DBUG(1, "System bus error happen. CR5=", db->cr5_data);
697 db->reset_fatal++;
698 db->wait_reset = 1;
699 spin_unlock_irqrestore(&db->lock, flags);
700 return IRQ_HANDLED;
701 }
702
703
704 if ( (db->cr5_data & 0x40) && db->rx_avail_cnt )
705 uli526x_rx_packet(dev, db);
706
707
708 if (db->rx_avail_cnt<RX_DESC_CNT)
709 allocate_rx_buffer(dev);
710
711
712 if ( db->cr5_data & 0x01)
713 uli526x_free_tx_pkt(dev, db);
714
715
716 uw32(DCR7, db->cr7_data);
717
718 spin_unlock_irqrestore(&db->lock, flags);
719 return IRQ_HANDLED;
720}
721
722#ifdef CONFIG_NET_POLL_CONTROLLER
723static void uli526x_poll(struct net_device *dev)
724{
725 struct uli526x_board_info *db = netdev_priv(dev);
726
727
728 uli526x_interrupt(db->pdev->irq, dev);
729}
730#endif
731
732
733
734
735
736static void uli526x_free_tx_pkt(struct net_device *dev,
737 struct uli526x_board_info * db)
738{
739 struct tx_desc *txptr;
740 u32 tdes0;
741
742 txptr = db->tx_remove_ptr;
743 while(db->tx_packet_cnt) {
744 tdes0 = le32_to_cpu(txptr->tdes0);
745 if (tdes0 & 0x80000000)
746 break;
747
748
749 db->tx_packet_cnt--;
750 dev->stats.tx_packets++;
751
752
753 if ( tdes0 != 0x7fffffff ) {
754 dev->stats.collisions += (tdes0 >> 3) & 0xf;
755 dev->stats.tx_bytes += le32_to_cpu(txptr->tdes1) & 0x7ff;
756 if (tdes0 & TDES0_ERR_MASK) {
757 dev->stats.tx_errors++;
758 if (tdes0 & 0x0002) {
759 db->tx_fifo_underrun++;
760 if ( !(db->cr6_data & CR6_SFT) ) {
761 db->cr6_data = db->cr6_data | CR6_SFT;
762 update_cr6(db->cr6_data, db->ioaddr);
763 }
764 }
765 if (tdes0 & 0x0100)
766 db->tx_excessive_collision++;
767 if (tdes0 & 0x0200)
768 db->tx_late_collision++;
769 if (tdes0 & 0x0400)
770 db->tx_no_carrier++;
771 if (tdes0 & 0x0800)
772 db->tx_loss_carrier++;
773 if (tdes0 & 0x4000)
774 db->tx_jabber_timeout++;
775 }
776 }
777
778 txptr = txptr->next_tx_desc;
779 }
780
781
782 db->tx_remove_ptr = txptr;
783
784
785 if ( db->tx_packet_cnt < TX_WAKE_DESC_CNT )
786 netif_wake_queue(dev);
787}
788
789
790
791
792
793
794static void uli526x_rx_packet(struct net_device *dev, struct uli526x_board_info * db)
795{
796 struct rx_desc *rxptr;
797 struct sk_buff *skb;
798 int rxlen;
799 u32 rdes0;
800
801 rxptr = db->rx_ready_ptr;
802
803 while(db->rx_avail_cnt) {
804 rdes0 = le32_to_cpu(rxptr->rdes0);
805 if (rdes0 & 0x80000000)
806 {
807 break;
808 }
809
810 db->rx_avail_cnt--;
811 db->interval_rx_cnt++;
812
813 pci_unmap_single(db->pdev, le32_to_cpu(rxptr->rdes2), RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
814 if ( (rdes0 & 0x300) != 0x300) {
815
816
817 ULI526X_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
818 uli526x_reuse_skb(db, rxptr->rx_skb_ptr);
819 } else {
820
821 rxlen = ( (rdes0 >> 16) & 0x3fff) - 4;
822
823
824 if (rdes0 & 0x8000) {
825
826 dev->stats.rx_errors++;
827 if (rdes0 & 1)
828 dev->stats.rx_fifo_errors++;
829 if (rdes0 & 2)
830 dev->stats.rx_crc_errors++;
831 if (rdes0 & 0x80)
832 dev->stats.rx_length_errors++;
833 }
834
835 if ( !(rdes0 & 0x8000) ||
836 ((db->cr6_data & CR6_PM) && (rxlen>6)) ) {
837 struct sk_buff *new_skb = NULL;
838
839 skb = rxptr->rx_skb_ptr;
840
841
842
843 if ((rxlen < RX_COPY_SIZE) &&
844 (((new_skb = netdev_alloc_skb(dev, rxlen + 2)) != NULL))) {
845 skb = new_skb;
846
847 skb_reserve(skb, 2);
848 skb_put_data(skb,
849 skb_tail_pointer(rxptr->rx_skb_ptr),
850 rxlen);
851 uli526x_reuse_skb(db, rxptr->rx_skb_ptr);
852 } else
853 skb_put(skb, rxlen);
854
855 skb->protocol = eth_type_trans(skb, dev);
856 netif_rx(skb);
857 dev->stats.rx_packets++;
858 dev->stats.rx_bytes += rxlen;
859
860 } else {
861
862 ULI526X_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
863 uli526x_reuse_skb(db, rxptr->rx_skb_ptr);
864 }
865 }
866
867 rxptr = rxptr->next_rx_desc;
868 }
869
870 db->rx_ready_ptr = rxptr;
871}
872
873
874
875
876
877
878static void uli526x_set_filter_mode(struct net_device * dev)
879{
880 struct uli526x_board_info *db = netdev_priv(dev);
881 unsigned long flags;
882
883 ULI526X_DBUG(0, "uli526x_set_filter_mode()", 0);
884 spin_lock_irqsave(&db->lock, flags);
885
886 if (dev->flags & IFF_PROMISC) {
887 ULI526X_DBUG(0, "Enable PROM Mode", 0);
888 db->cr6_data |= CR6_PM | CR6_PBF;
889 update_cr6(db->cr6_data, db->ioaddr);
890 spin_unlock_irqrestore(&db->lock, flags);
891 return;
892 }
893
894 if (dev->flags & IFF_ALLMULTI ||
895 netdev_mc_count(dev) > ULI5261_MAX_MULTICAST) {
896 ULI526X_DBUG(0, "Pass all multicast address",
897 netdev_mc_count(dev));
898 db->cr6_data &= ~(CR6_PM | CR6_PBF);
899 db->cr6_data |= CR6_PAM;
900 spin_unlock_irqrestore(&db->lock, flags);
901 return;
902 }
903
904 ULI526X_DBUG(0, "Set multicast address", netdev_mc_count(dev));
905 send_filter_frame(dev, netdev_mc_count(dev));
906 spin_unlock_irqrestore(&db->lock, flags);
907}
908
909static void
910ULi_ethtool_get_link_ksettings(struct uli526x_board_info *db,
911 struct ethtool_link_ksettings *cmd)
912{
913 u32 supported, advertising;
914
915 supported = (SUPPORTED_10baseT_Half |
916 SUPPORTED_10baseT_Full |
917 SUPPORTED_100baseT_Half |
918 SUPPORTED_100baseT_Full |
919 SUPPORTED_Autoneg |
920 SUPPORTED_MII);
921
922 advertising = (ADVERTISED_10baseT_Half |
923 ADVERTISED_10baseT_Full |
924 ADVERTISED_100baseT_Half |
925 ADVERTISED_100baseT_Full |
926 ADVERTISED_Autoneg |
927 ADVERTISED_MII);
928
929 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
930 supported);
931 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
932 advertising);
933
934 cmd->base.port = PORT_MII;
935 cmd->base.phy_address = db->phy_addr;
936
937 cmd->base.speed = SPEED_10;
938 cmd->base.duplex = DUPLEX_HALF;
939
940 if(db->op_mode==ULI526X_100MHF || db->op_mode==ULI526X_100MFD)
941 {
942 cmd->base.speed = SPEED_100;
943 }
944 if(db->op_mode==ULI526X_10MFD || db->op_mode==ULI526X_100MFD)
945 {
946 cmd->base.duplex = DUPLEX_FULL;
947 }
948 if(db->link_failed)
949 {
950 cmd->base.speed = SPEED_UNKNOWN;
951 cmd->base.duplex = DUPLEX_UNKNOWN;
952 }
953
954 if (db->media_mode & ULI526X_AUTO)
955 {
956 cmd->base.autoneg = AUTONEG_ENABLE;
957 }
958}
959
960static void netdev_get_drvinfo(struct net_device *dev,
961 struct ethtool_drvinfo *info)
962{
963 struct uli526x_board_info *np = netdev_priv(dev);
964
965 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
966 strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
967}
968
969static int netdev_get_link_ksettings(struct net_device *dev,
970 struct ethtool_link_ksettings *cmd)
971{
972 struct uli526x_board_info *np = netdev_priv(dev);
973
974 ULi_ethtool_get_link_ksettings(np, cmd);
975
976 return 0;
977}
978
979static u32 netdev_get_link(struct net_device *dev) {
980 struct uli526x_board_info *np = netdev_priv(dev);
981
982 if(np->link_failed)
983 return 0;
984 else
985 return 1;
986}
987
988static void uli526x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
989{
990 wol->supported = WAKE_PHY | WAKE_MAGIC;
991 wol->wolopts = 0;
992}
993
994static const struct ethtool_ops netdev_ethtool_ops = {
995 .get_drvinfo = netdev_get_drvinfo,
996 .get_link = netdev_get_link,
997 .get_wol = uli526x_get_wol,
998 .get_link_ksettings = netdev_get_link_ksettings,
999};
1000
1001
1002
1003
1004
1005
1006static void uli526x_timer(struct timer_list *t)
1007{
1008 struct uli526x_board_info *db = from_timer(db, t, timer);
1009 struct net_device *dev = pci_get_drvdata(db->pdev);
1010 struct uli_phy_ops *phy = &db->phy;
1011 void __iomem *ioaddr = db->ioaddr;
1012 unsigned long flags;
1013 u8 tmp_cr12 = 0;
1014 u32 tmp_cr8;
1015
1016
1017 spin_lock_irqsave(&db->lock, flags);
1018
1019
1020
1021 tmp_cr8 = ur32(DCR8);
1022 if ( (db->interval_rx_cnt==0) && (tmp_cr8) ) {
1023 db->reset_cr8++;
1024 db->wait_reset = 1;
1025 }
1026 db->interval_rx_cnt = 0;
1027
1028
1029 if ( db->tx_packet_cnt &&
1030 time_after(jiffies, dev_trans_start(dev) + ULI526X_TX_KICK) ) {
1031 uw32(DCR1, 0x1);
1032
1033
1034 if ( time_after(jiffies, dev_trans_start(dev) + ULI526X_TX_TIMEOUT) ) {
1035 db->reset_TXtimeout++;
1036 db->wait_reset = 1;
1037 netdev_err(dev, " Tx timeout - resetting\n");
1038 }
1039 }
1040
1041 if (db->wait_reset) {
1042 ULI526X_DBUG(0, "Dynamic Reset device", db->tx_packet_cnt);
1043 db->reset_count++;
1044 uli526x_dynamic_reset(dev);
1045 db->timer.expires = ULI526X_TIMER_WUT;
1046 add_timer(&db->timer);
1047 spin_unlock_irqrestore(&db->lock, flags);
1048 return;
1049 }
1050
1051
1052 if ((phy->read(db, db->phy_addr, 5) & 0x01e0)!=0)
1053 tmp_cr12 = 3;
1054
1055 if ( !(tmp_cr12 & 0x3) && !db->link_failed ) {
1056
1057 ULI526X_DBUG(0, "Link Failed", tmp_cr12);
1058 netif_carrier_off(dev);
1059 netdev_info(dev, "NIC Link is Down\n");
1060 db->link_failed = 1;
1061
1062
1063
1064 if ( !(db->media_mode & 0x8) )
1065 phy->write(db, db->phy_addr, 0, 0x1000);
1066
1067
1068 if (db->media_mode & ULI526X_AUTO) {
1069 db->cr6_data&=~0x00000200;
1070 update_cr6(db->cr6_data, db->ioaddr);
1071 }
1072 } else
1073 if ((tmp_cr12 & 0x3) && db->link_failed) {
1074 ULI526X_DBUG(0, "Link link OK", tmp_cr12);
1075 db->link_failed = 0;
1076
1077
1078 if ( (db->media_mode & ULI526X_AUTO) &&
1079 uli526x_sense_speed(db) )
1080 db->link_failed = 1;
1081 uli526x_process_mode(db);
1082
1083 if(db->link_failed==0)
1084 {
1085 netdev_info(dev, "NIC Link is Up %d Mbps %s duplex\n",
1086 (db->op_mode == ULI526X_100MHF ||
1087 db->op_mode == ULI526X_100MFD)
1088 ? 100 : 10,
1089 (db->op_mode == ULI526X_10MFD ||
1090 db->op_mode == ULI526X_100MFD)
1091 ? "Full" : "Half");
1092 netif_carrier_on(dev);
1093 }
1094
1095 }
1096 else if(!(tmp_cr12 & 0x3) && db->link_failed)
1097 {
1098 if(db->init==1)
1099 {
1100 netdev_info(dev, "NIC Link is Down\n");
1101 netif_carrier_off(dev);
1102 }
1103 }
1104 db->init = 0;
1105
1106
1107 db->timer.expires = ULI526X_TIMER_WUT;
1108 add_timer(&db->timer);
1109 spin_unlock_irqrestore(&db->lock, flags);
1110}
1111
1112
1113
1114
1115
1116
1117
1118
1119static void uli526x_reset_prepare(struct net_device *dev)
1120{
1121 struct uli526x_board_info *db = netdev_priv(dev);
1122 void __iomem *ioaddr = db->ioaddr;
1123
1124
1125 db->cr6_data &= ~(CR6_RXSC | CR6_TXSC);
1126 update_cr6(db->cr6_data, ioaddr);
1127 uw32(DCR7, 0);
1128 uw32(DCR5, ur32(DCR5));
1129
1130
1131 netif_stop_queue(dev);
1132
1133
1134 uli526x_free_rxbuffer(db);
1135
1136
1137 db->tx_packet_cnt = 0;
1138 db->rx_avail_cnt = 0;
1139 db->link_failed = 1;
1140 db->init=1;
1141 db->wait_reset = 0;
1142}
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153static void uli526x_dynamic_reset(struct net_device *dev)
1154{
1155 ULI526X_DBUG(0, "uli526x_dynamic_reset()", 0);
1156
1157 uli526x_reset_prepare(dev);
1158
1159
1160 uli526x_init(dev);
1161
1162
1163 netif_wake_queue(dev);
1164}
1165
1166
1167
1168
1169
1170static int __maybe_unused uli526x_suspend(struct device *dev_d)
1171{
1172 struct net_device *dev = dev_get_drvdata(dev_d);
1173
1174 ULI526X_DBUG(0, "uli526x_suspend", 0);
1175
1176 if (!netif_running(dev))
1177 return 0;
1178
1179 netif_device_detach(dev);
1180 uli526x_reset_prepare(dev);
1181
1182 device_set_wakeup_enable(dev_d, 0);
1183
1184 return 0;
1185}
1186
1187
1188
1189
1190
1191static int __maybe_unused uli526x_resume(struct device *dev_d)
1192{
1193 struct net_device *dev = dev_get_drvdata(dev_d);
1194
1195 ULI526X_DBUG(0, "uli526x_resume", 0);
1196
1197
1198 if (!netif_running(dev))
1199 return 0;
1200
1201 netif_device_attach(dev);
1202
1203 uli526x_init(dev);
1204
1205 netif_wake_queue(dev);
1206
1207 return 0;
1208}
1209
1210
1211
1212
1213
1214static void uli526x_free_rxbuffer(struct uli526x_board_info * db)
1215{
1216 ULI526X_DBUG(0, "uli526x_free_rxbuffer()", 0);
1217
1218
1219 while (db->rx_avail_cnt) {
1220 dev_kfree_skb(db->rx_ready_ptr->rx_skb_ptr);
1221 db->rx_ready_ptr = db->rx_ready_ptr->next_rx_desc;
1222 db->rx_avail_cnt--;
1223 }
1224}
1225
1226
1227
1228
1229
1230
1231static void uli526x_reuse_skb(struct uli526x_board_info *db, struct sk_buff * skb)
1232{
1233 struct rx_desc *rxptr = db->rx_insert_ptr;
1234
1235 if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) {
1236 rxptr->rx_skb_ptr = skb;
1237 rxptr->rdes2 = cpu_to_le32(pci_map_single(db->pdev,
1238 skb_tail_pointer(skb),
1239 RX_ALLOC_SIZE,
1240 PCI_DMA_FROMDEVICE));
1241 wmb();
1242 rxptr->rdes0 = cpu_to_le32(0x80000000);
1243 db->rx_avail_cnt++;
1244 db->rx_insert_ptr = rxptr->next_rx_desc;
1245 } else
1246 ULI526X_DBUG(0, "SK Buffer reuse method error", db->rx_avail_cnt);
1247}
1248
1249
1250
1251
1252
1253
1254
1255static void uli526x_descriptor_init(struct net_device *dev, void __iomem *ioaddr)
1256{
1257 struct uli526x_board_info *db = netdev_priv(dev);
1258 struct tx_desc *tmp_tx;
1259 struct rx_desc *tmp_rx;
1260 unsigned char *tmp_buf;
1261 dma_addr_t tmp_tx_dma, tmp_rx_dma;
1262 dma_addr_t tmp_buf_dma;
1263 int i;
1264
1265 ULI526X_DBUG(0, "uli526x_descriptor_init()", 0);
1266
1267
1268 db->tx_insert_ptr = db->first_tx_desc;
1269 db->tx_remove_ptr = db->first_tx_desc;
1270 uw32(DCR4, db->first_tx_desc_dma);
1271
1272
1273 db->first_rx_desc = (void *)db->first_tx_desc + sizeof(struct tx_desc) * TX_DESC_CNT;
1274 db->first_rx_desc_dma = db->first_tx_desc_dma + sizeof(struct tx_desc) * TX_DESC_CNT;
1275 db->rx_insert_ptr = db->first_rx_desc;
1276 db->rx_ready_ptr = db->first_rx_desc;
1277 uw32(DCR3, db->first_rx_desc_dma);
1278
1279
1280 tmp_buf = db->buf_pool_start;
1281 tmp_buf_dma = db->buf_pool_dma_start;
1282 tmp_tx_dma = db->first_tx_desc_dma;
1283 for (tmp_tx = db->first_tx_desc, i = 0; i < TX_DESC_CNT; i++, tmp_tx++) {
1284 tmp_tx->tx_buf_ptr = tmp_buf;
1285 tmp_tx->tdes0 = cpu_to_le32(0);
1286 tmp_tx->tdes1 = cpu_to_le32(0x81000000);
1287 tmp_tx->tdes2 = cpu_to_le32(tmp_buf_dma);
1288 tmp_tx_dma += sizeof(struct tx_desc);
1289 tmp_tx->tdes3 = cpu_to_le32(tmp_tx_dma);
1290 tmp_tx->next_tx_desc = tmp_tx + 1;
1291 tmp_buf = tmp_buf + TX_BUF_ALLOC;
1292 tmp_buf_dma = tmp_buf_dma + TX_BUF_ALLOC;
1293 }
1294 (--tmp_tx)->tdes3 = cpu_to_le32(db->first_tx_desc_dma);
1295 tmp_tx->next_tx_desc = db->first_tx_desc;
1296
1297
1298 tmp_rx_dma=db->first_rx_desc_dma;
1299 for (tmp_rx = db->first_rx_desc, i = 0; i < RX_DESC_CNT; i++, tmp_rx++) {
1300 tmp_rx->rdes0 = cpu_to_le32(0);
1301 tmp_rx->rdes1 = cpu_to_le32(0x01000600);
1302 tmp_rx_dma += sizeof(struct rx_desc);
1303 tmp_rx->rdes3 = cpu_to_le32(tmp_rx_dma);
1304 tmp_rx->next_rx_desc = tmp_rx + 1;
1305 }
1306 (--tmp_rx)->rdes3 = cpu_to_le32(db->first_rx_desc_dma);
1307 tmp_rx->next_rx_desc = db->first_rx_desc;
1308
1309
1310 allocate_rx_buffer(dev);
1311}
1312
1313
1314
1315
1316
1317
1318static void update_cr6(u32 cr6_data, void __iomem *ioaddr)
1319{
1320 uw32(DCR6, cr6_data);
1321 udelay(5);
1322}
1323
1324
1325
1326
1327
1328
1329
1330#ifdef __BIG_ENDIAN
1331#define FLT_SHIFT 16
1332#else
1333#define FLT_SHIFT 0
1334#endif
1335
1336static void send_filter_frame(struct net_device *dev, int mc_cnt)
1337{
1338 struct uli526x_board_info *db = netdev_priv(dev);
1339 void __iomem *ioaddr = db->ioaddr;
1340 struct netdev_hw_addr *ha;
1341 struct tx_desc *txptr;
1342 u16 * addrptr;
1343 u32 * suptr;
1344 int i;
1345
1346 ULI526X_DBUG(0, "send_filter_frame()", 0);
1347
1348 txptr = db->tx_insert_ptr;
1349 suptr = (u32 *) txptr->tx_buf_ptr;
1350
1351
1352 addrptr = (u16 *) dev->dev_addr;
1353 *suptr++ = addrptr[0] << FLT_SHIFT;
1354 *suptr++ = addrptr[1] << FLT_SHIFT;
1355 *suptr++ = addrptr[2] << FLT_SHIFT;
1356
1357
1358 *suptr++ = 0xffff << FLT_SHIFT;
1359 *suptr++ = 0xffff << FLT_SHIFT;
1360 *suptr++ = 0xffff << FLT_SHIFT;
1361
1362
1363 netdev_for_each_mc_addr(ha, dev) {
1364 addrptr = (u16 *) ha->addr;
1365 *suptr++ = addrptr[0] << FLT_SHIFT;
1366 *suptr++ = addrptr[1] << FLT_SHIFT;
1367 *suptr++ = addrptr[2] << FLT_SHIFT;
1368 }
1369
1370 for (i = netdev_mc_count(dev); i < 14; i++) {
1371 *suptr++ = 0xffff << FLT_SHIFT;
1372 *suptr++ = 0xffff << FLT_SHIFT;
1373 *suptr++ = 0xffff << FLT_SHIFT;
1374 }
1375
1376
1377 db->tx_insert_ptr = txptr->next_tx_desc;
1378 txptr->tdes1 = cpu_to_le32(0x890000c0);
1379
1380
1381 if (db->tx_packet_cnt < TX_DESC_CNT) {
1382
1383 db->tx_packet_cnt++;
1384 txptr->tdes0 = cpu_to_le32(0x80000000);
1385 update_cr6(db->cr6_data | 0x2000, ioaddr);
1386 uw32(DCR1, 0x1);
1387 update_cr6(db->cr6_data, ioaddr);
1388 netif_trans_update(dev);
1389 } else
1390 netdev_err(dev, "No Tx resource - Send_filter_frame!\n");
1391}
1392
1393
1394
1395
1396
1397
1398
1399static void allocate_rx_buffer(struct net_device *dev)
1400{
1401 struct uli526x_board_info *db = netdev_priv(dev);
1402 struct rx_desc *rxptr;
1403 struct sk_buff *skb;
1404
1405 rxptr = db->rx_insert_ptr;
1406
1407 while(db->rx_avail_cnt < RX_DESC_CNT) {
1408 skb = netdev_alloc_skb(dev, RX_ALLOC_SIZE);
1409 if (skb == NULL)
1410 break;
1411 rxptr->rx_skb_ptr = skb;
1412 rxptr->rdes2 = cpu_to_le32(pci_map_single(db->pdev,
1413 skb_tail_pointer(skb),
1414 RX_ALLOC_SIZE,
1415 PCI_DMA_FROMDEVICE));
1416 wmb();
1417 rxptr->rdes0 = cpu_to_le32(0x80000000);
1418 rxptr = rxptr->next_rx_desc;
1419 db->rx_avail_cnt++;
1420 }
1421
1422 db->rx_insert_ptr = rxptr;
1423}
1424
1425
1426
1427
1428
1429
1430static u16 read_srom_word(struct uli526x_board_info *db, int offset)
1431{
1432 void __iomem *ioaddr = db->ioaddr;
1433 u16 srom_data = 0;
1434 int i;
1435
1436 uw32(DCR9, CR9_SROM_READ);
1437 uw32(DCR9, CR9_SROM_READ | CR9_SRCS);
1438
1439
1440 srom_clk_write(db, SROM_DATA_1);
1441 srom_clk_write(db, SROM_DATA_1);
1442 srom_clk_write(db, SROM_DATA_0);
1443
1444
1445 for (i = 5; i >= 0; i--) {
1446 srom_data = (offset & (1 << i)) ? SROM_DATA_1 : SROM_DATA_0;
1447 srom_clk_write(db, srom_data);
1448 }
1449
1450 uw32(DCR9, CR9_SROM_READ | CR9_SRCS);
1451
1452 for (i = 16; i > 0; i--) {
1453 uw32(DCR9, CR9_SROM_READ | CR9_SRCS | CR9_SRCLK);
1454 udelay(5);
1455 srom_data = (srom_data << 1) |
1456 ((ur32(DCR9) & CR9_CRDOUT) ? 1 : 0);
1457 uw32(DCR9, CR9_SROM_READ | CR9_SRCS);
1458 udelay(5);
1459 }
1460
1461 uw32(DCR9, CR9_SROM_READ);
1462 return srom_data;
1463}
1464
1465
1466
1467
1468
1469
1470static u8 uli526x_sense_speed(struct uli526x_board_info * db)
1471{
1472 struct uli_phy_ops *phy = &db->phy;
1473 u8 ErrFlag = 0;
1474 u16 phy_mode;
1475
1476 phy_mode = phy->read(db, db->phy_addr, 1);
1477 phy_mode = phy->read(db, db->phy_addr, 1);
1478
1479 if ( (phy_mode & 0x24) == 0x24 ) {
1480
1481 phy_mode = ((phy->read(db, db->phy_addr, 5) & 0x01e0)<<7);
1482 if(phy_mode&0x8000)
1483 phy_mode = 0x8000;
1484 else if(phy_mode&0x4000)
1485 phy_mode = 0x4000;
1486 else if(phy_mode&0x2000)
1487 phy_mode = 0x2000;
1488 else
1489 phy_mode = 0x1000;
1490
1491 switch (phy_mode) {
1492 case 0x1000: db->op_mode = ULI526X_10MHF; break;
1493 case 0x2000: db->op_mode = ULI526X_10MFD; break;
1494 case 0x4000: db->op_mode = ULI526X_100MHF; break;
1495 case 0x8000: db->op_mode = ULI526X_100MFD; break;
1496 default: db->op_mode = ULI526X_10MHF; ErrFlag = 1; break;
1497 }
1498 } else {
1499 db->op_mode = ULI526X_10MHF;
1500 ULI526X_DBUG(0, "Link Failed :", phy_mode);
1501 ErrFlag = 1;
1502 }
1503
1504 return ErrFlag;
1505}
1506
1507
1508
1509
1510
1511
1512
1513
1514static void uli526x_set_phyxcer(struct uli526x_board_info *db)
1515{
1516 struct uli_phy_ops *phy = &db->phy;
1517 u16 phy_reg;
1518
1519
1520 phy_reg = phy->read(db, db->phy_addr, 4) & ~0x01e0;
1521
1522 if (db->media_mode & ULI526X_AUTO) {
1523
1524 phy_reg |= db->PHY_reg4;
1525 } else {
1526
1527 switch(db->media_mode) {
1528 case ULI526X_10MHF: phy_reg |= 0x20; break;
1529 case ULI526X_10MFD: phy_reg |= 0x40; break;
1530 case ULI526X_100MHF: phy_reg |= 0x80; break;
1531 case ULI526X_100MFD: phy_reg |= 0x100; break;
1532 }
1533
1534 }
1535
1536
1537 if ( !(phy_reg & 0x01e0)) {
1538 phy_reg|=db->PHY_reg4;
1539 db->media_mode|=ULI526X_AUTO;
1540 }
1541 phy->write(db, db->phy_addr, 4, phy_reg);
1542
1543
1544 phy->write(db, db->phy_addr, 0, 0x1200);
1545 udelay(50);
1546}
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556static void uli526x_process_mode(struct uli526x_board_info *db)
1557{
1558 struct uli_phy_ops *phy = &db->phy;
1559 u16 phy_reg;
1560
1561
1562 if (db->op_mode & 0x4)
1563 db->cr6_data |= CR6_FDM;
1564 else
1565 db->cr6_data &= ~CR6_FDM;
1566
1567 update_cr6(db->cr6_data, db->ioaddr);
1568
1569
1570 if (!(db->media_mode & 0x8)) {
1571
1572 phy_reg = phy->read(db, db->phy_addr, 6);
1573 if (!(phy_reg & 0x1)) {
1574
1575 phy_reg = 0x0;
1576 switch(db->op_mode) {
1577 case ULI526X_10MHF: phy_reg = 0x0; break;
1578 case ULI526X_10MFD: phy_reg = 0x100; break;
1579 case ULI526X_100MHF: phy_reg = 0x2000; break;
1580 case ULI526X_100MFD: phy_reg = 0x2100; break;
1581 }
1582 phy->write(db, db->phy_addr, 0, phy_reg);
1583 }
1584 }
1585}
1586
1587
1588
1589static void phy_writeby_cr9(struct uli526x_board_info *db, u8 phy_addr,
1590 u8 offset, u16 phy_data)
1591{
1592 u16 i;
1593
1594
1595 for (i = 0; i < 35; i++)
1596 phy_write_1bit(db, PHY_DATA_1);
1597
1598
1599 phy_write_1bit(db, PHY_DATA_0);
1600 phy_write_1bit(db, PHY_DATA_1);
1601
1602
1603 phy_write_1bit(db, PHY_DATA_0);
1604 phy_write_1bit(db, PHY_DATA_1);
1605
1606
1607 for (i = 0x10; i > 0; i = i >> 1)
1608 phy_write_1bit(db, phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
1609
1610
1611 for (i = 0x10; i > 0; i = i >> 1)
1612 phy_write_1bit(db, offset & i ? PHY_DATA_1 : PHY_DATA_0);
1613
1614
1615 phy_write_1bit(db, PHY_DATA_1);
1616 phy_write_1bit(db, PHY_DATA_0);
1617
1618
1619 for (i = 0x8000; i > 0; i >>= 1)
1620 phy_write_1bit(db, phy_data & i ? PHY_DATA_1 : PHY_DATA_0);
1621}
1622
1623static u16 phy_readby_cr9(struct uli526x_board_info *db, u8 phy_addr, u8 offset)
1624{
1625 u16 phy_data;
1626 int i;
1627
1628
1629 for (i = 0; i < 35; i++)
1630 phy_write_1bit(db, PHY_DATA_1);
1631
1632
1633 phy_write_1bit(db, PHY_DATA_0);
1634 phy_write_1bit(db, PHY_DATA_1);
1635
1636
1637 phy_write_1bit(db, PHY_DATA_1);
1638 phy_write_1bit(db, PHY_DATA_0);
1639
1640
1641 for (i = 0x10; i > 0; i = i >> 1)
1642 phy_write_1bit(db, phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
1643
1644
1645 for (i = 0x10; i > 0; i = i >> 1)
1646 phy_write_1bit(db, offset & i ? PHY_DATA_1 : PHY_DATA_0);
1647
1648
1649 phy_read_1bit(db);
1650
1651
1652 for (phy_data = 0, i = 0; i < 16; i++) {
1653 phy_data <<= 1;
1654 phy_data |= phy_read_1bit(db);
1655 }
1656
1657 return phy_data;
1658}
1659
1660static u16 phy_readby_cr10(struct uli526x_board_info *db, u8 phy_addr,
1661 u8 offset)
1662{
1663 void __iomem *ioaddr = db->ioaddr;
1664 u32 cr10_value = phy_addr;
1665
1666 cr10_value = (cr10_value << 5) + offset;
1667 cr10_value = (cr10_value << 16) + 0x08000000;
1668 uw32(DCR10, cr10_value);
1669 udelay(1);
1670 while (1) {
1671 cr10_value = ur32(DCR10);
1672 if (cr10_value & 0x10000000)
1673 break;
1674 }
1675 return cr10_value & 0x0ffff;
1676}
1677
1678static void phy_writeby_cr10(struct uli526x_board_info *db, u8 phy_addr,
1679 u8 offset, u16 phy_data)
1680{
1681 void __iomem *ioaddr = db->ioaddr;
1682 u32 cr10_value = phy_addr;
1683
1684 cr10_value = (cr10_value << 5) + offset;
1685 cr10_value = (cr10_value << 16) + 0x04000000 + phy_data;
1686 uw32(DCR10, cr10_value);
1687 udelay(1);
1688}
1689
1690
1691
1692
1693static void phy_write_1bit(struct uli526x_board_info *db, u32 data)
1694{
1695 void __iomem *ioaddr = db->ioaddr;
1696
1697 uw32(DCR9, data);
1698 udelay(1);
1699 uw32(DCR9, data | MDCLKH);
1700 udelay(1);
1701 uw32(DCR9, data);
1702 udelay(1);
1703}
1704
1705
1706
1707
1708
1709
1710static u16 phy_read_1bit(struct uli526x_board_info *db)
1711{
1712 void __iomem *ioaddr = db->ioaddr;
1713 u16 phy_data;
1714
1715 uw32(DCR9, 0x50000);
1716 udelay(1);
1717 phy_data = (ur32(DCR9) >> 19) & 0x1;
1718 uw32(DCR9, 0x40000);
1719 udelay(1);
1720
1721 return phy_data;
1722}
1723
1724
1725static const struct pci_device_id uli526x_pci_tbl[] = {
1726 { 0x10B9, 0x5261, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_ULI5261_ID },
1727 { 0x10B9, 0x5263, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_ULI5263_ID },
1728 { 0, }
1729};
1730MODULE_DEVICE_TABLE(pci, uli526x_pci_tbl);
1731
1732static SIMPLE_DEV_PM_OPS(uli526x_pm_ops, uli526x_suspend, uli526x_resume);
1733
1734static struct pci_driver uli526x_driver = {
1735 .name = "uli526x",
1736 .id_table = uli526x_pci_tbl,
1737 .probe = uli526x_init_one,
1738 .remove = uli526x_remove_one,
1739 .driver.pm = &uli526x_pm_ops,
1740};
1741
1742MODULE_AUTHOR("Peer Chen, peer.chen@uli.com.tw");
1743MODULE_DESCRIPTION("ULi M5261/M5263 fast ethernet driver");
1744MODULE_LICENSE("GPL");
1745
1746module_param(debug, int, 0644);
1747module_param(mode, int, 0);
1748module_param(cr6set, int, 0);
1749MODULE_PARM_DESC(debug, "ULi M5261/M5263 enable debugging (0-1)");
1750MODULE_PARM_DESC(mode, "ULi M5261/M5263: Bit 0: 10/100Mbps, bit 2: duplex, bit 8: HomePNA");
1751
1752
1753
1754
1755
1756
1757static int __init uli526x_init_module(void)
1758{
1759
1760 ULI526X_DBUG(0, "init_module() ", debug);
1761
1762 if (debug)
1763 uli526x_debug = debug;
1764 if (cr6set)
1765 uli526x_cr6_user_set = cr6set;
1766
1767 switch (mode) {
1768 case ULI526X_10MHF:
1769 case ULI526X_100MHF:
1770 case ULI526X_10MFD:
1771 case ULI526X_100MFD:
1772 uli526x_media_mode = mode;
1773 break;
1774 default:
1775 uli526x_media_mode = ULI526X_AUTO;
1776 break;
1777 }
1778
1779 return pci_register_driver(&uli526x_driver);
1780}
1781
1782
1783
1784
1785
1786
1787
1788
1789static void __exit uli526x_cleanup_module(void)
1790{
1791 ULI526X_DBUG(0, "uli526x_cleanup_module() ", debug);
1792 pci_unregister_driver(&uli526x_driver);
1793}
1794
1795module_init(uli526x_init_module);
1796module_exit(uli526x_cleanup_module);
1797