1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72#include <linux/module.h>
73#include <linux/kernel.h>
74#include <linux/types.h>
75#include <linux/compiler.h>
76#include <linux/delay.h>
77#include <linux/init.h>
78#include <linux/interrupt.h>
79#include <linux/ioport.h>
80#include <linux/pci.h>
81#include <linux/netdevice.h>
82#include <linux/etherdevice.h>
83#include <linux/skbuff.h>
84#include <linux/ethtool.h>
85#include <linux/mii.h>
86#include <linux/if_vlan.h>
87#include <linux/ctype.h>
88#include <linux/crc32.h>
89#include <linux/dma-mapping.h>
90
91#include <asm/io.h>
92#include <asm/byteorder.h>
93#include <asm/uaccess.h>
94
95#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
96#define AMD8111E_VLAN_TAG_USED 1
97#else
98#define AMD8111E_VLAN_TAG_USED 0
99#endif
100
101#include "amd8111e.h"
102#define MODULE_NAME "amd8111e"
103#define MODULE_VERS "3.0.7"
104MODULE_AUTHOR("Advanced Micro Devices, Inc.");
105MODULE_DESCRIPTION ("AMD8111 based 10/100 Ethernet Controller. Driver Version "MODULE_VERS);
106MODULE_LICENSE("GPL");
107MODULE_DEVICE_TABLE(pci, amd8111e_pci_tbl);
108module_param_array(speed_duplex, int, NULL, 0);
109MODULE_PARM_DESC(speed_duplex, "Set device speed and duplex modes, 0: Auto Negotiate, 1: 10Mbps Half Duplex, 2: 10Mbps Full Duplex, 3: 100Mbps Half Duplex, 4: 100Mbps Full Duplex");
110module_param_array(coalesce, bool, NULL, 0);
111MODULE_PARM_DESC(coalesce, "Enable or Disable interrupt coalescing, 1: Enable, 0: Disable");
112module_param_array(dynamic_ipg, bool, NULL, 0);
113MODULE_PARM_DESC(dynamic_ipg, "Enable or Disable dynamic IPG, 1: Enable, 0: Disable");
114
115static DEFINE_PCI_DEVICE_TABLE(amd8111e_pci_tbl) = {
116
117 { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD8111E_7462,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
119 { 0, }
120
121};
122
123
124
125static int amd8111e_read_phy(struct amd8111e_priv* lp, int phy_id, int reg, u32* val)
126{
127 void __iomem *mmio = lp->mmio;
128 unsigned int reg_val;
129 unsigned int repeat= REPEAT_CNT;
130
131 reg_val = readl(mmio + PHY_ACCESS);
132 while (reg_val & PHY_CMD_ACTIVE)
133 reg_val = readl( mmio + PHY_ACCESS );
134
135 writel( PHY_RD_CMD | ((phy_id & 0x1f) << 21) |
136 ((reg & 0x1f) << 16), mmio +PHY_ACCESS);
137 do{
138 reg_val = readl(mmio + PHY_ACCESS);
139 udelay(30);
140 } while (--repeat && (reg_val & PHY_CMD_ACTIVE));
141 if(reg_val & PHY_RD_ERR)
142 goto err_phy_read;
143
144 *val = reg_val & 0xffff;
145 return 0;
146err_phy_read:
147 *val = 0;
148 return -EINVAL;
149
150}
151
152
153
154
155static int amd8111e_write_phy(struct amd8111e_priv* lp,int phy_id, int reg, u32 val)
156{
157 unsigned int repeat = REPEAT_CNT;
158 void __iomem *mmio = lp->mmio;
159 unsigned int reg_val;
160
161 reg_val = readl(mmio + PHY_ACCESS);
162 while (reg_val & PHY_CMD_ACTIVE)
163 reg_val = readl( mmio + PHY_ACCESS );
164
165 writel( PHY_WR_CMD | ((phy_id & 0x1f) << 21) |
166 ((reg & 0x1f) << 16)|val, mmio + PHY_ACCESS);
167
168 do{
169 reg_val = readl(mmio + PHY_ACCESS);
170 udelay(30);
171 } while (--repeat && (reg_val & PHY_CMD_ACTIVE));
172
173 if(reg_val & PHY_RD_ERR)
174 goto err_phy_write;
175
176 return 0;
177
178err_phy_write:
179 return -EINVAL;
180
181}
182
183
184
185static int amd8111e_mdio_read(struct net_device * dev, int phy_id, int reg_num)
186{
187 struct amd8111e_priv* lp = netdev_priv(dev);
188 unsigned int reg_val;
189
190 amd8111e_read_phy(lp,phy_id,reg_num,®_val);
191 return reg_val;
192
193}
194
195
196
197
198static void amd8111e_mdio_write(struct net_device * dev, int phy_id, int reg_num, int val)
199{
200 struct amd8111e_priv* lp = netdev_priv(dev);
201
202 amd8111e_write_phy(lp, phy_id, reg_num, val);
203}
204
205
206
207
208static void amd8111e_set_ext_phy(struct net_device *dev)
209{
210 struct amd8111e_priv *lp = netdev_priv(dev);
211 u32 bmcr,advert,tmp;
212
213
214 advert = amd8111e_mdio_read(dev, lp->ext_phy_addr, MII_ADVERTISE);
215 tmp = advert & ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
216 switch (lp->ext_phy_option){
217
218 default:
219 case SPEED_AUTONEG:
220 tmp |= ( ADVERTISE_10HALF|ADVERTISE_10FULL|
221 ADVERTISE_100HALF|ADVERTISE_100FULL) ;
222 break;
223 case SPEED10_HALF:
224 tmp |= ADVERTISE_10HALF;
225 break;
226 case SPEED10_FULL:
227 tmp |= ADVERTISE_10FULL;
228 break;
229 case SPEED100_HALF:
230 tmp |= ADVERTISE_100HALF;
231 break;
232 case SPEED100_FULL:
233 tmp |= ADVERTISE_100FULL;
234 break;
235 }
236
237 if(advert != tmp)
238 amd8111e_mdio_write(dev, lp->ext_phy_addr, MII_ADVERTISE, tmp);
239
240 bmcr = amd8111e_mdio_read(dev, lp->ext_phy_addr, MII_BMCR);
241 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
242 amd8111e_mdio_write(dev, lp->ext_phy_addr, MII_BMCR, bmcr);
243
244}
245
246
247
248
249
250static int amd8111e_free_skbs(struct net_device *dev)
251{
252 struct amd8111e_priv *lp = netdev_priv(dev);
253 struct sk_buff* rx_skbuff;
254 int i;
255
256
257 for(i = 0; i < NUM_TX_BUFFERS; i++){
258 if(lp->tx_skbuff[i]){
259 pci_unmap_single(lp->pci_dev,lp->tx_dma_addr[i], lp->tx_skbuff[i]->len,PCI_DMA_TODEVICE);
260 dev_kfree_skb (lp->tx_skbuff[i]);
261 lp->tx_skbuff[i] = NULL;
262 lp->tx_dma_addr[i] = 0;
263 }
264 }
265
266 for (i = 0; i < NUM_RX_BUFFERS; i++){
267 rx_skbuff = lp->rx_skbuff[i];
268 if(rx_skbuff != NULL){
269 pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[i],
270 lp->rx_buff_len - 2,PCI_DMA_FROMDEVICE);
271 dev_kfree_skb(lp->rx_skbuff[i]);
272 lp->rx_skbuff[i] = NULL;
273 lp->rx_dma_addr[i] = 0;
274 }
275 }
276
277 return 0;
278}
279
280
281
282
283static inline void amd8111e_set_rx_buff_len(struct net_device* dev)
284{
285 struct amd8111e_priv* lp = netdev_priv(dev);
286 unsigned int mtu = dev->mtu;
287
288 if (mtu > ETH_DATA_LEN){
289
290
291
292 lp->rx_buff_len = mtu + ETH_HLEN + 10;
293 lp->options |= OPTION_JUMBO_ENABLE;
294 } else{
295 lp->rx_buff_len = PKT_BUFF_SZ;
296 lp->options &= ~OPTION_JUMBO_ENABLE;
297 }
298}
299
300
301
302
303static int amd8111e_init_ring(struct net_device *dev)
304{
305 struct amd8111e_priv *lp = netdev_priv(dev);
306 int i;
307
308 lp->rx_idx = lp->tx_idx = 0;
309 lp->tx_complete_idx = 0;
310 lp->tx_ring_idx = 0;
311
312
313 if(lp->opened)
314
315 amd8111e_free_skbs(dev);
316
317 else{
318
319 if((lp->tx_ring = pci_alloc_consistent(lp->pci_dev,
320 sizeof(struct amd8111e_tx_dr)*NUM_TX_RING_DR,
321 &lp->tx_ring_dma_addr)) == NULL)
322
323 goto err_no_mem;
324
325 if((lp->rx_ring = pci_alloc_consistent(lp->pci_dev,
326 sizeof(struct amd8111e_rx_dr)*NUM_RX_RING_DR,
327 &lp->rx_ring_dma_addr)) == NULL)
328
329 goto err_free_tx_ring;
330
331 }
332
333 amd8111e_set_rx_buff_len(dev);
334
335
336 for (i = 0; i < NUM_RX_BUFFERS; i++) {
337
338 lp->rx_skbuff[i] = netdev_alloc_skb(dev, lp->rx_buff_len);
339 if (!lp->rx_skbuff[i]) {
340
341 for(--i; i >= 0 ;i--)
342 dev_kfree_skb(lp->rx_skbuff[i]);
343 goto err_free_rx_ring;
344 }
345 skb_reserve(lp->rx_skbuff[i],2);
346 }
347
348 for (i = 0; i < NUM_RX_BUFFERS; i++) {
349 lp->rx_dma_addr[i] = pci_map_single(lp->pci_dev,
350 lp->rx_skbuff[i]->data,lp->rx_buff_len-2, PCI_DMA_FROMDEVICE);
351
352 lp->rx_ring[i].buff_phy_addr = cpu_to_le32(lp->rx_dma_addr[i]);
353 lp->rx_ring[i].buff_count = cpu_to_le16(lp->rx_buff_len-2);
354 wmb();
355 lp->rx_ring[i].rx_flags = cpu_to_le16(OWN_BIT);
356 }
357
358
359 for (i = 0; i < NUM_TX_RING_DR; i++) {
360 lp->tx_ring[i].buff_phy_addr = 0;
361 lp->tx_ring[i].tx_flags = 0;
362 lp->tx_ring[i].buff_count = 0;
363 }
364
365 return 0;
366
367err_free_rx_ring:
368
369 pci_free_consistent(lp->pci_dev,
370 sizeof(struct amd8111e_rx_dr)*NUM_RX_RING_DR,lp->rx_ring,
371 lp->rx_ring_dma_addr);
372
373err_free_tx_ring:
374
375 pci_free_consistent(lp->pci_dev,
376 sizeof(struct amd8111e_tx_dr)*NUM_TX_RING_DR,lp->tx_ring,
377 lp->tx_ring_dma_addr);
378
379err_no_mem:
380 return -ENOMEM;
381}
382
383static int amd8111e_set_coalesce(struct net_device * dev, enum coal_mode cmod)
384{
385 unsigned int timeout;
386 unsigned int event_count;
387
388 struct amd8111e_priv *lp = netdev_priv(dev);
389 void __iomem *mmio = lp->mmio;
390 struct amd8111e_coalesce_conf * coal_conf = &lp->coal_conf;
391
392
393 switch(cmod)
394 {
395 case RX_INTR_COAL :
396 timeout = coal_conf->rx_timeout;
397 event_count = coal_conf->rx_event_count;
398 if( timeout > MAX_TIMEOUT ||
399 event_count > MAX_EVENT_COUNT )
400 return -EINVAL;
401
402 timeout = timeout * DELAY_TIMER_CONV;
403 writel(VAL0|STINTEN, mmio+INTEN0);
404 writel((u32)DLY_INT_A_R0|( event_count<< 16 )|timeout,
405 mmio+DLY_INT_A);
406 break;
407
408 case TX_INTR_COAL :
409 timeout = coal_conf->tx_timeout;
410 event_count = coal_conf->tx_event_count;
411 if( timeout > MAX_TIMEOUT ||
412 event_count > MAX_EVENT_COUNT )
413 return -EINVAL;
414
415
416 timeout = timeout * DELAY_TIMER_CONV;
417 writel(VAL0|STINTEN,mmio+INTEN0);
418 writel((u32)DLY_INT_B_T0|( event_count<< 16 )|timeout,
419 mmio+DLY_INT_B);
420 break;
421
422 case DISABLE_COAL:
423 writel(0,mmio+STVAL);
424 writel(STINTEN, mmio+INTEN0);
425 writel(0, mmio +DLY_INT_B);
426 writel(0, mmio+DLY_INT_A);
427 break;
428 case ENABLE_COAL:
429
430 writel((u32)SOFT_TIMER_FREQ, mmio+STVAL);
431 writel(VAL0|STINTEN, mmio+INTEN0);
432 break;
433 default:
434 break;
435
436 }
437 return 0;
438
439}
440
441
442
443
444static int amd8111e_restart(struct net_device *dev)
445{
446 struct amd8111e_priv *lp = netdev_priv(dev);
447 void __iomem *mmio = lp->mmio;
448 int i,reg_val;
449
450
451 writel(RUN, mmio + CMD0);
452
453 if(amd8111e_init_ring(dev))
454 return -ENOMEM;
455
456
457 writel((u32) VAL1|EN_PMGR, mmio + CMD3 );
458 writel((u32)XPHYANE|XPHYRST , mmio + CTRL2);
459
460 amd8111e_set_ext_phy(dev);
461
462
463 reg_val = readl(mmio + CTRL1);
464 reg_val &= ~XMTSP_MASK;
465 writel( reg_val| XMTSP_128 | CACHE_ALIGN, mmio + CTRL1 );
466
467
468 writel( APINT5EN | APINT4EN | APINT3EN | APINT2EN | APINT1EN |
469 APINT0EN | MIIPDTINTEN | MCCIINTEN | MCCINTEN | MREINTEN |
470 SPNDINTEN | MPINTEN | SINTEN | STINTEN, mmio + INTEN0);
471
472 writel(VAL3 | LCINTEN | VAL1 | TINTEN0 | VAL0 | RINTEN0, mmio + INTEN0);
473
474
475 writel((u32)lp->tx_ring_dma_addr,mmio + XMT_RING_BASE_ADDR0);
476 writel((u32)lp->rx_ring_dma_addr,mmio+ RCV_RING_BASE_ADDR0);
477
478 writew((u32)NUM_TX_RING_DR, mmio + XMT_RING_LEN0);
479 writew((u16)NUM_RX_RING_DR, mmio + RCV_RING_LEN0);
480
481
482 writew((u32)DEFAULT_IPG,mmio+IPG);
483 writew((u32)(DEFAULT_IPG-IFS1_DELTA), mmio + IFS1);
484
485 if(lp->options & OPTION_JUMBO_ENABLE){
486 writel((u32)VAL2|JUMBO, mmio + CMD3);
487
488 writel( REX_UFLO, mmio + CMD2);
489
490 writel( VAL0 | APAD_XMT|REX_RTRY , mmio + CMD2);
491 }else{
492 writel( VAL0 | APAD_XMT | REX_RTRY|REX_UFLO, mmio + CMD2);
493 writel((u32)JUMBO, mmio + CMD3);
494 }
495
496#if AMD8111E_VLAN_TAG_USED
497 writel((u32) VAL2|VSIZE|VL_TAG_DEL, mmio + CMD3);
498#endif
499 writel( VAL0 | APAD_XMT | REX_RTRY, mmio + CMD2 );
500
501
502 for (i = 0; i < ETH_ALEN; i++)
503 writeb( dev->dev_addr[i], mmio + PADR + i );
504
505
506 if(lp->options & OPTION_INTR_COAL_ENABLE){
507 printk(KERN_INFO "%s: Interrupt Coalescing Enabled.\n",
508 dev->name);
509 amd8111e_set_coalesce(dev,ENABLE_COAL);
510 }
511
512
513 writel(VAL2 | RDMD0, mmio + CMD0);
514 writel(VAL0 | INTREN | RUN, mmio + CMD0);
515
516
517 readl(mmio+CMD0);
518 return 0;
519}
520
521
522
523static void amd8111e_init_hw_default( struct amd8111e_priv* lp)
524{
525 unsigned int reg_val;
526 unsigned int logic_filter[2] ={0,};
527 void __iomem *mmio = lp->mmio;
528
529
530
531 writel(RUN, mmio + CMD0);
532
533
534 writew( 0x8100 | lp->ext_phy_addr, mmio + AUTOPOLL0);
535
536
537 writel(0, mmio + RCV_RING_BASE_ADDR0);
538
539
540 writel(0, mmio + XMT_RING_BASE_ADDR0);
541 writel(0, mmio + XMT_RING_BASE_ADDR1);
542 writel(0, mmio + XMT_RING_BASE_ADDR2);
543 writel(0, mmio + XMT_RING_BASE_ADDR3);
544
545
546 writel(CMD0_CLEAR,mmio + CMD0);
547
548
549 writel(CMD2_CLEAR, mmio +CMD2);
550
551
552 writel(CMD7_CLEAR , mmio + CMD7);
553
554
555 writel(0x0, mmio + DLY_INT_A);
556 writel(0x0, mmio + DLY_INT_B);
557
558
559 writel(0x0, mmio + FLOW_CONTROL);
560
561
562 reg_val = readl(mmio + INT0);
563 writel(reg_val, mmio + INT0);
564
565
566 writel(0x0, mmio + STVAL);
567
568
569 writel( INTEN0_CLEAR, mmio + INTEN0);
570
571
572 writel(0x0 , mmio + LADRF);
573
574
575 writel( 0x80010,mmio + SRAM_SIZE);
576
577
578 writel(0x0, mmio + RCV_RING_LEN0);
579
580
581 writel(0x0, mmio + XMT_RING_LEN0);
582 writel(0x0, mmio + XMT_RING_LEN1);
583 writel(0x0, mmio + XMT_RING_LEN2);
584 writel(0x0, mmio + XMT_RING_LEN3);
585
586
587 writel(0x0, mmio + XMT_RING_LIMIT);
588
589
590 writew(MIB_CLEAR, mmio + MIB_ADDR);
591
592
593 amd8111e_writeq(*(u64*)logic_filter,mmio+LADRF);
594
595
596 reg_val = readl(mmio + SRAM_SIZE);
597
598 if(lp->options & OPTION_JUMBO_ENABLE)
599 writel( VAL2|JUMBO, mmio + CMD3);
600#if AMD8111E_VLAN_TAG_USED
601 writel(VAL2|VSIZE|VL_TAG_DEL, mmio + CMD3 );
602#endif
603
604 writel(CTRL1_DEFAULT, mmio + CTRL1);
605
606
607 readl(mmio + CMD2);
608
609}
610
611
612
613
614
615static void amd8111e_disable_interrupt(struct amd8111e_priv* lp)
616{
617 u32 intr0;
618
619
620 writel(INTREN, lp->mmio + CMD0);
621
622
623 intr0 = readl(lp->mmio + INT0);
624 writel(intr0, lp->mmio + INT0);
625
626
627 readl(lp->mmio + INT0);
628
629}
630
631
632
633
634static void amd8111e_stop_chip(struct amd8111e_priv* lp)
635{
636 writel(RUN, lp->mmio + CMD0);
637
638
639 readl(lp->mmio + CMD0);
640}
641
642
643
644
645static void amd8111e_free_ring(struct amd8111e_priv* lp)
646{
647
648 if(lp->rx_ring){
649 pci_free_consistent(lp->pci_dev,
650 sizeof(struct amd8111e_rx_dr)*NUM_RX_RING_DR,
651 lp->rx_ring, lp->rx_ring_dma_addr);
652 lp->rx_ring = NULL;
653 }
654
655 if(lp->tx_ring){
656 pci_free_consistent(lp->pci_dev,
657 sizeof(struct amd8111e_tx_dr)*NUM_TX_RING_DR,
658 lp->tx_ring, lp->tx_ring_dma_addr);
659
660 lp->tx_ring = NULL;
661 }
662
663}
664
665
666
667
668static int amd8111e_tx(struct net_device *dev)
669{
670 struct amd8111e_priv* lp = netdev_priv(dev);
671 int tx_index = lp->tx_complete_idx & TX_RING_DR_MOD_MASK;
672 int status;
673
674 while (lp->tx_complete_idx != lp->tx_idx){
675 tx_index = lp->tx_complete_idx & TX_RING_DR_MOD_MASK;
676 status = le16_to_cpu(lp->tx_ring[tx_index].tx_flags);
677
678 if(status & OWN_BIT)
679 break;
680
681 lp->tx_ring[tx_index].buff_phy_addr = 0;
682
683
684 if (lp->tx_skbuff[tx_index]) {
685 pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[tx_index],
686 lp->tx_skbuff[tx_index]->len,
687 PCI_DMA_TODEVICE);
688 dev_kfree_skb_irq (lp->tx_skbuff[tx_index]);
689 lp->tx_skbuff[tx_index] = NULL;
690 lp->tx_dma_addr[tx_index] = 0;
691 }
692 lp->tx_complete_idx++;
693
694 lp->coal_conf.tx_packets++;
695 lp->coal_conf.tx_bytes +=
696 le16_to_cpu(lp->tx_ring[tx_index].buff_count);
697
698 if (netif_queue_stopped(dev) &&
699 lp->tx_complete_idx > lp->tx_idx - NUM_TX_BUFFERS +2){
700
701
702 netif_wake_queue (dev);
703 }
704 }
705 return 0;
706}
707
708
709static int amd8111e_rx_poll(struct napi_struct *napi, int budget)
710{
711 struct amd8111e_priv *lp = container_of(napi, struct amd8111e_priv, napi);
712 struct net_device *dev = lp->amd8111e_net_dev;
713 int rx_index = lp->rx_idx & RX_RING_DR_MOD_MASK;
714 void __iomem *mmio = lp->mmio;
715 struct sk_buff *skb,*new_skb;
716 int min_pkt_len, status;
717 unsigned int intr0;
718 int num_rx_pkt = 0;
719 short pkt_len;
720#if AMD8111E_VLAN_TAG_USED
721 short vtag;
722#endif
723 int rx_pkt_limit = budget;
724 unsigned long flags;
725
726 do{
727
728
729 while(1) {
730 status = le16_to_cpu(lp->rx_ring[rx_index].rx_flags);
731 if (status & OWN_BIT)
732 break;
733
734
735
736
737
738
739
740
741
742 if(status & ERR_BIT) {
743
744 lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
745 goto err_next_pkt;
746 }
747
748 if(!((status & STP_BIT) && (status & ENP_BIT))){
749
750 lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
751 goto err_next_pkt;
752 }
753 pkt_len = le16_to_cpu(lp->rx_ring[rx_index].msg_count) - 4;
754
755#if AMD8111E_VLAN_TAG_USED
756 vtag = status & TT_MASK;
757
758 if (vtag != 0)
759 min_pkt_len =MIN_PKT_LEN - 4;
760 else
761#endif
762 min_pkt_len =MIN_PKT_LEN;
763
764 if (pkt_len < min_pkt_len) {
765 lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
766 lp->drv_rx_errors++;
767 goto err_next_pkt;
768 }
769 if(--rx_pkt_limit < 0)
770 goto rx_not_empty;
771 new_skb = netdev_alloc_skb(dev, lp->rx_buff_len);
772 if (!new_skb) {
773
774
775 lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
776 lp->drv_rx_errors++;
777 goto err_next_pkt;
778 }
779
780 skb_reserve(new_skb, 2);
781 skb = lp->rx_skbuff[rx_index];
782 pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[rx_index],
783 lp->rx_buff_len-2, PCI_DMA_FROMDEVICE);
784 skb_put(skb, pkt_len);
785 lp->rx_skbuff[rx_index] = new_skb;
786 lp->rx_dma_addr[rx_index] = pci_map_single(lp->pci_dev,
787 new_skb->data,
788 lp->rx_buff_len-2,
789 PCI_DMA_FROMDEVICE);
790
791 skb->protocol = eth_type_trans(skb, dev);
792
793#if AMD8111E_VLAN_TAG_USED
794 if (vtag == TT_VLAN_TAGGED){
795 u16 vlan_tag = le16_to_cpu(lp->rx_ring[rx_index].tag_ctrl_info);
796 __vlan_hwaccel_put_tag(skb, vlan_tag);
797 }
798#endif
799 netif_receive_skb(skb);
800
801 lp->coal_conf.rx_packets++;
802 lp->coal_conf.rx_bytes += pkt_len;
803 num_rx_pkt++;
804
805 err_next_pkt:
806 lp->rx_ring[rx_index].buff_phy_addr
807 = cpu_to_le32(lp->rx_dma_addr[rx_index]);
808 lp->rx_ring[rx_index].buff_count =
809 cpu_to_le16(lp->rx_buff_len-2);
810 wmb();
811 lp->rx_ring[rx_index].rx_flags |= cpu_to_le16(OWN_BIT);
812 rx_index = (++lp->rx_idx) & RX_RING_DR_MOD_MASK;
813 }
814
815
816
817 intr0 = readl(mmio + INT0);
818
819 writel(intr0 & RINT0,mmio + INT0);
820
821 } while(intr0 & RINT0);
822
823 if (rx_pkt_limit > 0) {
824
825 spin_lock_irqsave(&lp->lock, flags);
826 __napi_complete(napi);
827 writel(VAL0|RINTEN0, mmio + INTEN0);
828 writel(VAL2 | RDMD0, mmio + CMD0);
829 spin_unlock_irqrestore(&lp->lock, flags);
830 }
831
832rx_not_empty:
833 return num_rx_pkt;
834}
835
836
837
838
839static int amd8111e_link_change(struct net_device* dev)
840{
841 struct amd8111e_priv *lp = netdev_priv(dev);
842 int status0,speed;
843
844
845 status0 = readl(lp->mmio + STAT0);
846
847 if(status0 & LINK_STATS){
848 if(status0 & AUTONEG_COMPLETE)
849 lp->link_config.autoneg = AUTONEG_ENABLE;
850 else
851 lp->link_config.autoneg = AUTONEG_DISABLE;
852
853 if(status0 & FULL_DPLX)
854 lp->link_config.duplex = DUPLEX_FULL;
855 else
856 lp->link_config.duplex = DUPLEX_HALF;
857 speed = (status0 & SPEED_MASK) >> 7;
858 if(speed == PHY_SPEED_10)
859 lp->link_config.speed = SPEED_10;
860 else if(speed == PHY_SPEED_100)
861 lp->link_config.speed = SPEED_100;
862
863 printk(KERN_INFO "%s: Link is Up. Speed is %s Mbps %s Duplex\n", dev->name,
864 (lp->link_config.speed == SPEED_100) ? "100": "10",
865 (lp->link_config.duplex == DUPLEX_FULL)? "Full": "Half");
866 netif_carrier_on(dev);
867 }
868 else{
869 lp->link_config.speed = SPEED_INVALID;
870 lp->link_config.duplex = DUPLEX_INVALID;
871 lp->link_config.autoneg = AUTONEG_INVALID;
872 printk(KERN_INFO "%s: Link is Down.\n",dev->name);
873 netif_carrier_off(dev);
874 }
875
876 return 0;
877}
878
879
880
881static int amd8111e_read_mib(void __iomem *mmio, u8 MIB_COUNTER)
882{
883 unsigned int status;
884 unsigned int data;
885 unsigned int repeat = REPEAT_CNT;
886
887 writew( MIB_RD_CMD | MIB_COUNTER, mmio + MIB_ADDR);
888 do {
889 status = readw(mmio + MIB_ADDR);
890 udelay(2);
891 }
892 while (--repeat && (status & MIB_CMD_ACTIVE));
893
894 data = readl(mmio + MIB_DATA);
895 return data;
896}
897
898
899
900
901
902static struct net_device_stats *amd8111e_get_stats(struct net_device *dev)
903{
904 struct amd8111e_priv *lp = netdev_priv(dev);
905 void __iomem *mmio = lp->mmio;
906 unsigned long flags;
907 struct net_device_stats *new_stats = &dev->stats;
908
909 if (!lp->opened)
910 return new_stats;
911 spin_lock_irqsave (&lp->lock, flags);
912
913
914 new_stats->rx_packets = amd8111e_read_mib(mmio, rcv_broadcast_pkts)+
915 amd8111e_read_mib(mmio, rcv_multicast_pkts)+
916 amd8111e_read_mib(mmio, rcv_unicast_pkts);
917
918
919 new_stats->tx_packets = amd8111e_read_mib(mmio, xmt_packets);
920
921
922 new_stats->rx_bytes = amd8111e_read_mib(mmio, rcv_octets);
923
924
925 new_stats->tx_bytes = amd8111e_read_mib(mmio, xmt_octets);
926
927
928
929 new_stats->rx_errors = amd8111e_read_mib(mmio, rcv_undersize_pkts)+
930 amd8111e_read_mib(mmio, rcv_fragments)+
931 amd8111e_read_mib(mmio, rcv_jabbers)+
932 amd8111e_read_mib(mmio, rcv_alignment_errors)+
933 amd8111e_read_mib(mmio, rcv_fcs_errors)+
934 amd8111e_read_mib(mmio, rcv_miss_pkts)+
935 lp->drv_rx_errors;
936
937
938 new_stats->tx_errors = amd8111e_read_mib(mmio, xmt_underrun_pkts);
939
940
941 new_stats->rx_dropped = amd8111e_read_mib(mmio, rcv_miss_pkts);
942
943
944 new_stats->tx_dropped = amd8111e_read_mib(mmio, xmt_underrun_pkts);
945
946
947 new_stats->multicast = amd8111e_read_mib(mmio, rcv_multicast_pkts);
948
949
950 new_stats->collisions = amd8111e_read_mib(mmio, xmt_collisions);
951
952
953 new_stats->rx_length_errors =
954 amd8111e_read_mib(mmio, rcv_undersize_pkts)+
955 amd8111e_read_mib(mmio, rcv_oversize_pkts);
956
957
958 new_stats->rx_over_errors = amd8111e_read_mib(mmio, rcv_miss_pkts);
959
960
961 new_stats->rx_crc_errors = amd8111e_read_mib(mmio, rcv_fcs_errors);
962
963
964 new_stats->rx_frame_errors =
965 amd8111e_read_mib(mmio, rcv_alignment_errors);
966
967
968 new_stats->rx_fifo_errors = amd8111e_read_mib(mmio, rcv_miss_pkts);
969
970
971 new_stats->rx_missed_errors = amd8111e_read_mib(mmio, rcv_miss_pkts);
972
973
974 new_stats->tx_aborted_errors =
975 amd8111e_read_mib(mmio, xmt_excessive_collision);
976
977
978 new_stats->tx_carrier_errors =
979 amd8111e_read_mib(mmio, xmt_loss_carrier);
980
981
982 new_stats->tx_fifo_errors = amd8111e_read_mib(mmio, xmt_underrun_pkts);
983
984
985 new_stats->tx_window_errors =
986 amd8111e_read_mib(mmio, xmt_late_collision);
987
988
989
990
991 spin_unlock_irqrestore (&lp->lock, flags);
992
993 return new_stats;
994}
995
996
997
998static int amd8111e_calc_coalesce(struct net_device *dev)
999{
1000 struct amd8111e_priv *lp = netdev_priv(dev);
1001 struct amd8111e_coalesce_conf * coal_conf = &lp->coal_conf;
1002 int tx_pkt_rate;
1003 int rx_pkt_rate;
1004 int tx_data_rate;
1005 int rx_data_rate;
1006 int rx_pkt_size;
1007 int tx_pkt_size;
1008
1009 tx_pkt_rate = coal_conf->tx_packets - coal_conf->tx_prev_packets;
1010 coal_conf->tx_prev_packets = coal_conf->tx_packets;
1011
1012 tx_data_rate = coal_conf->tx_bytes - coal_conf->tx_prev_bytes;
1013 coal_conf->tx_prev_bytes = coal_conf->tx_bytes;
1014
1015 rx_pkt_rate = coal_conf->rx_packets - coal_conf->rx_prev_packets;
1016 coal_conf->rx_prev_packets = coal_conf->rx_packets;
1017
1018 rx_data_rate = coal_conf->rx_bytes - coal_conf->rx_prev_bytes;
1019 coal_conf->rx_prev_bytes = coal_conf->rx_bytes;
1020
1021 if(rx_pkt_rate < 800){
1022 if(coal_conf->rx_coal_type != NO_COALESCE){
1023
1024 coal_conf->rx_timeout = 0x0;
1025 coal_conf->rx_event_count = 0;
1026 amd8111e_set_coalesce(dev,RX_INTR_COAL);
1027 coal_conf->rx_coal_type = NO_COALESCE;
1028 }
1029 }
1030 else{
1031
1032 rx_pkt_size = rx_data_rate/rx_pkt_rate;
1033 if (rx_pkt_size < 128){
1034 if(coal_conf->rx_coal_type != NO_COALESCE){
1035
1036 coal_conf->rx_timeout = 0;
1037 coal_conf->rx_event_count = 0;
1038 amd8111e_set_coalesce(dev,RX_INTR_COAL);
1039 coal_conf->rx_coal_type = NO_COALESCE;
1040 }
1041
1042 }
1043 else if ( (rx_pkt_size >= 128) && (rx_pkt_size < 512) ){
1044
1045 if(coal_conf->rx_coal_type != LOW_COALESCE){
1046 coal_conf->rx_timeout = 1;
1047 coal_conf->rx_event_count = 4;
1048 amd8111e_set_coalesce(dev,RX_INTR_COAL);
1049 coal_conf->rx_coal_type = LOW_COALESCE;
1050 }
1051 }
1052 else if ((rx_pkt_size >= 512) && (rx_pkt_size < 1024)){
1053
1054 if(coal_conf->rx_coal_type != MEDIUM_COALESCE){
1055 coal_conf->rx_timeout = 1;
1056 coal_conf->rx_event_count = 4;
1057 amd8111e_set_coalesce(dev,RX_INTR_COAL);
1058 coal_conf->rx_coal_type = MEDIUM_COALESCE;
1059 }
1060
1061 }
1062 else if(rx_pkt_size >= 1024){
1063 if(coal_conf->rx_coal_type != HIGH_COALESCE){
1064 coal_conf->rx_timeout = 2;
1065 coal_conf->rx_event_count = 3;
1066 amd8111e_set_coalesce(dev,RX_INTR_COAL);
1067 coal_conf->rx_coal_type = HIGH_COALESCE;
1068 }
1069 }
1070 }
1071
1072 if(tx_pkt_rate < 800){
1073 if(coal_conf->tx_coal_type != NO_COALESCE){
1074
1075 coal_conf->tx_timeout = 0x0;
1076 coal_conf->tx_event_count = 0;
1077 amd8111e_set_coalesce(dev,TX_INTR_COAL);
1078 coal_conf->tx_coal_type = NO_COALESCE;
1079 }
1080 }
1081 else{
1082
1083 tx_pkt_size = tx_data_rate/tx_pkt_rate;
1084 if (tx_pkt_size < 128){
1085
1086 if(coal_conf->tx_coal_type != NO_COALESCE){
1087
1088 coal_conf->tx_timeout = 0;
1089 coal_conf->tx_event_count = 0;
1090 amd8111e_set_coalesce(dev,TX_INTR_COAL);
1091 coal_conf->tx_coal_type = NO_COALESCE;
1092 }
1093
1094 }
1095 else if ( (tx_pkt_size >= 128) && (tx_pkt_size < 512) ){
1096
1097 if(coal_conf->tx_coal_type != LOW_COALESCE){
1098 coal_conf->tx_timeout = 1;
1099 coal_conf->tx_event_count = 2;
1100 amd8111e_set_coalesce(dev,TX_INTR_COAL);
1101 coal_conf->tx_coal_type = LOW_COALESCE;
1102
1103 }
1104 }
1105 else if ((tx_pkt_size >= 512) && (tx_pkt_size < 1024)){
1106
1107 if(coal_conf->tx_coal_type != MEDIUM_COALESCE){
1108 coal_conf->tx_timeout = 2;
1109 coal_conf->tx_event_count = 5;
1110 amd8111e_set_coalesce(dev,TX_INTR_COAL);
1111 coal_conf->tx_coal_type = MEDIUM_COALESCE;
1112 }
1113
1114 }
1115 else if(tx_pkt_size >= 1024){
1116 if (tx_pkt_size >= 1024){
1117 if(coal_conf->tx_coal_type != HIGH_COALESCE){
1118 coal_conf->tx_timeout = 4;
1119 coal_conf->tx_event_count = 8;
1120 amd8111e_set_coalesce(dev,TX_INTR_COAL);
1121 coal_conf->tx_coal_type = HIGH_COALESCE;
1122 }
1123 }
1124 }
1125 }
1126 return 0;
1127
1128}
1129
1130
1131
1132static irqreturn_t amd8111e_interrupt(int irq, void *dev_id)
1133{
1134
1135 struct net_device * dev = (struct net_device *) dev_id;
1136 struct amd8111e_priv *lp = netdev_priv(dev);
1137 void __iomem *mmio = lp->mmio;
1138 unsigned int intr0, intren0;
1139 unsigned int handled = 1;
1140
1141 if(unlikely(dev == NULL))
1142 return IRQ_NONE;
1143
1144 spin_lock(&lp->lock);
1145
1146
1147 writel(INTREN, mmio + CMD0);
1148
1149
1150 intr0 = readl(mmio + INT0);
1151 intren0 = readl(mmio + INTEN0);
1152
1153
1154
1155 if (!(intr0 & INTR)){
1156 handled = 0;
1157 goto err_no_interrupt;
1158 }
1159
1160
1161 writel(intr0, mmio + INT0);
1162
1163
1164 if (intr0 & RINT0) {
1165 if (napi_schedule_prep(&lp->napi)) {
1166
1167 writel(RINTEN0, mmio + INTEN0);
1168
1169 __napi_schedule(&lp->napi);
1170 } else if (intren0 & RINTEN0) {
1171 printk("************Driver bug! interrupt while in poll\n");
1172
1173 writel(RINTEN0, mmio + INTEN0);
1174 }
1175 }
1176
1177
1178 if (intr0 & TINT0)
1179 amd8111e_tx(dev);
1180
1181
1182 if (intr0 & LCINT)
1183 amd8111e_link_change(dev);
1184
1185
1186 if (intr0 & STINT)
1187 amd8111e_calc_coalesce(dev);
1188
1189err_no_interrupt:
1190 writel( VAL0 | INTREN,mmio + CMD0);
1191
1192 spin_unlock(&lp->lock);
1193
1194 return IRQ_RETVAL(handled);
1195}
1196
1197#ifdef CONFIG_NET_POLL_CONTROLLER
1198static void amd8111e_poll(struct net_device *dev)
1199{
1200 unsigned long flags;
1201 local_irq_save(flags);
1202 amd8111e_interrupt(0, dev);
1203 local_irq_restore(flags);
1204}
1205#endif
1206
1207
1208
1209
1210
1211static int amd8111e_close(struct net_device * dev)
1212{
1213 struct amd8111e_priv *lp = netdev_priv(dev);
1214 netif_stop_queue(dev);
1215
1216 napi_disable(&lp->napi);
1217
1218 spin_lock_irq(&lp->lock);
1219
1220 amd8111e_disable_interrupt(lp);
1221 amd8111e_stop_chip(lp);
1222
1223
1224 amd8111e_free_skbs(lp->amd8111e_net_dev);
1225
1226 netif_carrier_off(lp->amd8111e_net_dev);
1227
1228
1229 if(lp->options & OPTION_DYN_IPG_ENABLE)
1230 del_timer_sync(&lp->ipg_data.ipg_timer);
1231
1232 spin_unlock_irq(&lp->lock);
1233 free_irq(dev->irq, dev);
1234 amd8111e_free_ring(lp);
1235
1236
1237 amd8111e_get_stats(dev);
1238 lp->opened = 0;
1239 return 0;
1240}
1241
1242
1243static int amd8111e_open(struct net_device * dev )
1244{
1245 struct amd8111e_priv *lp = netdev_priv(dev);
1246
1247 if(dev->irq ==0 || request_irq(dev->irq, amd8111e_interrupt, IRQF_SHARED,
1248 dev->name, dev))
1249 return -EAGAIN;
1250
1251 napi_enable(&lp->napi);
1252
1253 spin_lock_irq(&lp->lock);
1254
1255 amd8111e_init_hw_default(lp);
1256
1257 if(amd8111e_restart(dev)){
1258 spin_unlock_irq(&lp->lock);
1259 napi_disable(&lp->napi);
1260 if (dev->irq)
1261 free_irq(dev->irq, dev);
1262 return -ENOMEM;
1263 }
1264
1265 if(lp->options & OPTION_DYN_IPG_ENABLE){
1266 add_timer(&lp->ipg_data.ipg_timer);
1267 printk(KERN_INFO "%s: Dynamic IPG Enabled.\n",dev->name);
1268 }
1269
1270 lp->opened = 1;
1271
1272 spin_unlock_irq(&lp->lock);
1273
1274 netif_start_queue(dev);
1275
1276 return 0;
1277}
1278
1279
1280
1281static int amd8111e_tx_queue_avail(struct amd8111e_priv* lp )
1282{
1283 int tx_index = lp->tx_idx & TX_BUFF_MOD_MASK;
1284 if (lp->tx_skbuff[tx_index])
1285 return -1;
1286 else
1287 return 0;
1288
1289}
1290
1291
1292
1293
1294static netdev_tx_t amd8111e_start_xmit(struct sk_buff *skb,
1295 struct net_device * dev)
1296{
1297 struct amd8111e_priv *lp = netdev_priv(dev);
1298 int tx_index;
1299 unsigned long flags;
1300
1301 spin_lock_irqsave(&lp->lock, flags);
1302
1303 tx_index = lp->tx_idx & TX_RING_DR_MOD_MASK;
1304
1305 lp->tx_ring[tx_index].buff_count = cpu_to_le16(skb->len);
1306
1307 lp->tx_skbuff[tx_index] = skb;
1308 lp->tx_ring[tx_index].tx_flags = 0;
1309
1310#if AMD8111E_VLAN_TAG_USED
1311 if (vlan_tx_tag_present(skb)) {
1312 lp->tx_ring[tx_index].tag_ctrl_cmd |=
1313 cpu_to_le16(TCC_VLAN_INSERT);
1314 lp->tx_ring[tx_index].tag_ctrl_info =
1315 cpu_to_le16(vlan_tx_tag_get(skb));
1316
1317 }
1318#endif
1319 lp->tx_dma_addr[tx_index] =
1320 pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
1321 lp->tx_ring[tx_index].buff_phy_addr =
1322 cpu_to_le32(lp->tx_dma_addr[tx_index]);
1323
1324
1325 wmb();
1326 lp->tx_ring[tx_index].tx_flags |=
1327 cpu_to_le16(OWN_BIT | STP_BIT | ENP_BIT|ADD_FCS_BIT|LTINT_BIT);
1328
1329 lp->tx_idx++;
1330
1331
1332 writel( VAL1 | TDMD0, lp->mmio + CMD0);
1333 writel( VAL2 | RDMD0,lp->mmio + CMD0);
1334
1335 if(amd8111e_tx_queue_avail(lp) < 0){
1336 netif_stop_queue(dev);
1337 }
1338 spin_unlock_irqrestore(&lp->lock, flags);
1339 return NETDEV_TX_OK;
1340}
1341
1342
1343
1344static void amd8111e_read_regs(struct amd8111e_priv *lp, u32 *buf)
1345{
1346 void __iomem *mmio = lp->mmio;
1347
1348 buf[0] = readl(mmio + XMT_RING_BASE_ADDR0);
1349 buf[1] = readl(mmio + XMT_RING_LEN0);
1350 buf[2] = readl(mmio + RCV_RING_BASE_ADDR0);
1351 buf[3] = readl(mmio + RCV_RING_LEN0);
1352 buf[4] = readl(mmio + CMD0);
1353 buf[5] = readl(mmio + CMD2);
1354 buf[6] = readl(mmio + CMD3);
1355 buf[7] = readl(mmio + CMD7);
1356 buf[8] = readl(mmio + INT0);
1357 buf[9] = readl(mmio + INTEN0);
1358 buf[10] = readl(mmio + LADRF);
1359 buf[11] = readl(mmio + LADRF+4);
1360 buf[12] = readl(mmio + STAT0);
1361}
1362
1363
1364
1365
1366
1367
1368static void amd8111e_set_multicast_list(struct net_device *dev)
1369{
1370 struct netdev_hw_addr *ha;
1371 struct amd8111e_priv *lp = netdev_priv(dev);
1372 u32 mc_filter[2] ;
1373 int bit_num;
1374
1375 if(dev->flags & IFF_PROMISC){
1376 writel( VAL2 | PROM, lp->mmio + CMD2);
1377 return;
1378 }
1379 else
1380 writel( PROM, lp->mmio + CMD2);
1381 if (dev->flags & IFF_ALLMULTI ||
1382 netdev_mc_count(dev) > MAX_FILTER_SIZE) {
1383
1384 mc_filter[1] = mc_filter[0] = 0xffffffff;
1385 lp->options |= OPTION_MULTICAST_ENABLE;
1386 amd8111e_writeq(*(u64*)mc_filter,lp->mmio + LADRF);
1387 return;
1388 }
1389 if (netdev_mc_empty(dev)) {
1390
1391 mc_filter[1] = mc_filter[0] = 0;
1392 lp->options &= ~OPTION_MULTICAST_ENABLE;
1393 amd8111e_writeq(*(u64*)mc_filter,lp->mmio + LADRF);
1394
1395 writel(PROM, lp->mmio + CMD2);
1396 return;
1397 }
1398
1399 lp->options |= OPTION_MULTICAST_ENABLE;
1400 mc_filter[1] = mc_filter[0] = 0;
1401 netdev_for_each_mc_addr(ha, dev) {
1402 bit_num = (ether_crc_le(ETH_ALEN, ha->addr) >> 26) & 0x3f;
1403 mc_filter[bit_num >> 5] |= 1 << (bit_num & 31);
1404 }
1405 amd8111e_writeq(*(u64*)mc_filter,lp->mmio+ LADRF);
1406
1407
1408 readl(lp->mmio + CMD2);
1409
1410}
1411
1412static void amd8111e_get_drvinfo(struct net_device* dev, struct ethtool_drvinfo *info)
1413{
1414 struct amd8111e_priv *lp = netdev_priv(dev);
1415 struct pci_dev *pci_dev = lp->pci_dev;
1416 strlcpy(info->driver, MODULE_NAME, sizeof(info->driver));
1417 strlcpy(info->version, MODULE_VERS, sizeof(info->version));
1418 snprintf(info->fw_version, sizeof(info->fw_version),
1419 "%u", chip_version);
1420 strlcpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info));
1421}
1422
1423static int amd8111e_get_regs_len(struct net_device *dev)
1424{
1425 return AMD8111E_REG_DUMP_LEN;
1426}
1427
1428static void amd8111e_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
1429{
1430 struct amd8111e_priv *lp = netdev_priv(dev);
1431 regs->version = 0;
1432 amd8111e_read_regs(lp, buf);
1433}
1434
1435static int amd8111e_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1436{
1437 struct amd8111e_priv *lp = netdev_priv(dev);
1438 spin_lock_irq(&lp->lock);
1439 mii_ethtool_gset(&lp->mii_if, ecmd);
1440 spin_unlock_irq(&lp->lock);
1441 return 0;
1442}
1443
1444static int amd8111e_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1445{
1446 struct amd8111e_priv *lp = netdev_priv(dev);
1447 int res;
1448 spin_lock_irq(&lp->lock);
1449 res = mii_ethtool_sset(&lp->mii_if, ecmd);
1450 spin_unlock_irq(&lp->lock);
1451 return res;
1452}
1453
1454static int amd8111e_nway_reset(struct net_device *dev)
1455{
1456 struct amd8111e_priv *lp = netdev_priv(dev);
1457 return mii_nway_restart(&lp->mii_if);
1458}
1459
1460static u32 amd8111e_get_link(struct net_device *dev)
1461{
1462 struct amd8111e_priv *lp = netdev_priv(dev);
1463 return mii_link_ok(&lp->mii_if);
1464}
1465
1466static void amd8111e_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol_info)
1467{
1468 struct amd8111e_priv *lp = netdev_priv(dev);
1469 wol_info->supported = WAKE_MAGIC|WAKE_PHY;
1470 if (lp->options & OPTION_WOL_ENABLE)
1471 wol_info->wolopts = WAKE_MAGIC;
1472}
1473
1474static int amd8111e_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol_info)
1475{
1476 struct amd8111e_priv *lp = netdev_priv(dev);
1477 if (wol_info->wolopts & ~(WAKE_MAGIC|WAKE_PHY))
1478 return -EINVAL;
1479 spin_lock_irq(&lp->lock);
1480 if (wol_info->wolopts & WAKE_MAGIC)
1481 lp->options |=
1482 (OPTION_WOL_ENABLE | OPTION_WAKE_MAGIC_ENABLE);
1483 else if(wol_info->wolopts & WAKE_PHY)
1484 lp->options |=
1485 (OPTION_WOL_ENABLE | OPTION_WAKE_PHY_ENABLE);
1486 else
1487 lp->options &= ~OPTION_WOL_ENABLE;
1488 spin_unlock_irq(&lp->lock);
1489 return 0;
1490}
1491
1492static const struct ethtool_ops ops = {
1493 .get_drvinfo = amd8111e_get_drvinfo,
1494 .get_regs_len = amd8111e_get_regs_len,
1495 .get_regs = amd8111e_get_regs,
1496 .get_settings = amd8111e_get_settings,
1497 .set_settings = amd8111e_set_settings,
1498 .nway_reset = amd8111e_nway_reset,
1499 .get_link = amd8111e_get_link,
1500 .get_wol = amd8111e_get_wol,
1501 .set_wol = amd8111e_set_wol,
1502};
1503
1504
1505
1506
1507
1508static int amd8111e_ioctl(struct net_device * dev , struct ifreq *ifr, int cmd)
1509{
1510 struct mii_ioctl_data *data = if_mii(ifr);
1511 struct amd8111e_priv *lp = netdev_priv(dev);
1512 int err;
1513 u32 mii_regval;
1514
1515 switch(cmd) {
1516 case SIOCGMIIPHY:
1517 data->phy_id = lp->ext_phy_addr;
1518
1519
1520 case SIOCGMIIREG:
1521
1522 spin_lock_irq(&lp->lock);
1523 err = amd8111e_read_phy(lp, data->phy_id,
1524 data->reg_num & PHY_REG_ADDR_MASK, &mii_regval);
1525 spin_unlock_irq(&lp->lock);
1526
1527 data->val_out = mii_regval;
1528 return err;
1529
1530 case SIOCSMIIREG:
1531
1532 spin_lock_irq(&lp->lock);
1533 err = amd8111e_write_phy(lp, data->phy_id,
1534 data->reg_num & PHY_REG_ADDR_MASK, data->val_in);
1535 spin_unlock_irq(&lp->lock);
1536
1537 return err;
1538
1539 default:
1540
1541 break;
1542 }
1543 return -EOPNOTSUPP;
1544}
1545static int amd8111e_set_mac_address(struct net_device *dev, void *p)
1546{
1547 struct amd8111e_priv *lp = netdev_priv(dev);
1548 int i;
1549 struct sockaddr *addr = p;
1550
1551 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1552 spin_lock_irq(&lp->lock);
1553
1554 for (i = 0; i < ETH_ALEN; i++)
1555 writeb( dev->dev_addr[i], lp->mmio + PADR + i );
1556
1557 spin_unlock_irq(&lp->lock);
1558
1559 return 0;
1560}
1561
1562
1563
1564
1565static int amd8111e_change_mtu(struct net_device *dev, int new_mtu)
1566{
1567 struct amd8111e_priv *lp = netdev_priv(dev);
1568 int err;
1569
1570 if ((new_mtu < AMD8111E_MIN_MTU) || (new_mtu > AMD8111E_MAX_MTU))
1571 return -EINVAL;
1572
1573 if (!netif_running(dev)) {
1574
1575
1576 dev->mtu = new_mtu;
1577 return 0;
1578 }
1579
1580 spin_lock_irq(&lp->lock);
1581
1582
1583 writel(RUN, lp->mmio + CMD0);
1584
1585 dev->mtu = new_mtu;
1586
1587 err = amd8111e_restart(dev);
1588 spin_unlock_irq(&lp->lock);
1589 if(!err)
1590 netif_start_queue(dev);
1591 return err;
1592}
1593
1594static int amd8111e_enable_magicpkt(struct amd8111e_priv* lp)
1595{
1596 writel( VAL1|MPPLBA, lp->mmio + CMD3);
1597 writel( VAL0|MPEN_SW, lp->mmio + CMD7);
1598
1599
1600 readl(lp->mmio + CMD7);
1601 return 0;
1602}
1603
1604static int amd8111e_enable_link_change(struct amd8111e_priv* lp)
1605{
1606
1607
1608 writel(VAL0|LCMODE_SW,lp->mmio + CMD7);
1609
1610
1611 readl(lp->mmio + CMD7);
1612 return 0;
1613}
1614
1615
1616
1617
1618
1619
1620
1621static void amd8111e_tx_timeout(struct net_device *dev)
1622{
1623 struct amd8111e_priv* lp = netdev_priv(dev);
1624 int err;
1625
1626 printk(KERN_ERR "%s: transmit timed out, resetting\n",
1627 dev->name);
1628 spin_lock_irq(&lp->lock);
1629 err = amd8111e_restart(dev);
1630 spin_unlock_irq(&lp->lock);
1631 if(!err)
1632 netif_wake_queue(dev);
1633}
1634static int amd8111e_suspend(struct pci_dev *pci_dev, pm_message_t state)
1635{
1636 struct net_device *dev = pci_get_drvdata(pci_dev);
1637 struct amd8111e_priv *lp = netdev_priv(dev);
1638
1639 if (!netif_running(dev))
1640 return 0;
1641
1642
1643 spin_lock_irq(&lp->lock);
1644 amd8111e_disable_interrupt(lp);
1645 spin_unlock_irq(&lp->lock);
1646
1647 netif_device_detach(dev);
1648
1649
1650 spin_lock_irq(&lp->lock);
1651 if(lp->options & OPTION_DYN_IPG_ENABLE)
1652 del_timer_sync(&lp->ipg_data.ipg_timer);
1653 amd8111e_stop_chip(lp);
1654 spin_unlock_irq(&lp->lock);
1655
1656 if(lp->options & OPTION_WOL_ENABLE){
1657
1658 if(lp->options & OPTION_WAKE_MAGIC_ENABLE)
1659 amd8111e_enable_magicpkt(lp);
1660 if(lp->options & OPTION_WAKE_PHY_ENABLE)
1661 amd8111e_enable_link_change(lp);
1662
1663 pci_enable_wake(pci_dev, PCI_D3hot, 1);
1664 pci_enable_wake(pci_dev, PCI_D3cold, 1);
1665
1666 }
1667 else{
1668 pci_enable_wake(pci_dev, PCI_D3hot, 0);
1669 pci_enable_wake(pci_dev, PCI_D3cold, 0);
1670 }
1671
1672 pci_save_state(pci_dev);
1673 pci_set_power_state(pci_dev, PCI_D3hot);
1674
1675 return 0;
1676}
1677static int amd8111e_resume(struct pci_dev *pci_dev)
1678{
1679 struct net_device *dev = pci_get_drvdata(pci_dev);
1680 struct amd8111e_priv *lp = netdev_priv(dev);
1681
1682 if (!netif_running(dev))
1683 return 0;
1684
1685 pci_set_power_state(pci_dev, PCI_D0);
1686 pci_restore_state(pci_dev);
1687
1688 pci_enable_wake(pci_dev, PCI_D3hot, 0);
1689 pci_enable_wake(pci_dev, PCI_D3cold, 0);
1690
1691 netif_device_attach(dev);
1692
1693 spin_lock_irq(&lp->lock);
1694 amd8111e_restart(dev);
1695
1696 if(lp->options & OPTION_DYN_IPG_ENABLE)
1697 mod_timer(&lp->ipg_data.ipg_timer,
1698 jiffies + IPG_CONVERGE_JIFFIES);
1699 spin_unlock_irq(&lp->lock);
1700
1701 return 0;
1702}
1703
1704
1705static void __devexit amd8111e_remove_one(struct pci_dev *pdev)
1706{
1707 struct net_device *dev = pci_get_drvdata(pdev);
1708 if (dev) {
1709 unregister_netdev(dev);
1710 iounmap(((struct amd8111e_priv *)netdev_priv(dev))->mmio);
1711 free_netdev(dev);
1712 pci_release_regions(pdev);
1713 pci_disable_device(pdev);
1714 pci_set_drvdata(pdev, NULL);
1715 }
1716}
1717static void amd8111e_config_ipg(struct net_device* dev)
1718{
1719 struct amd8111e_priv *lp = netdev_priv(dev);
1720 struct ipg_info* ipg_data = &lp->ipg_data;
1721 void __iomem *mmio = lp->mmio;
1722 unsigned int prev_col_cnt = ipg_data->col_cnt;
1723 unsigned int total_col_cnt;
1724 unsigned int tmp_ipg;
1725
1726 if(lp->link_config.duplex == DUPLEX_FULL){
1727 ipg_data->ipg = DEFAULT_IPG;
1728 return;
1729 }
1730
1731 if(ipg_data->ipg_state == SSTATE){
1732
1733 if(ipg_data->timer_tick == IPG_STABLE_TIME){
1734
1735 ipg_data->timer_tick = 0;
1736 ipg_data->ipg = MIN_IPG - IPG_STEP;
1737 ipg_data->current_ipg = MIN_IPG;
1738 ipg_data->diff_col_cnt = 0xFFFFFFFF;
1739 ipg_data->ipg_state = CSTATE;
1740 }
1741 else
1742 ipg_data->timer_tick++;
1743 }
1744
1745 if(ipg_data->ipg_state == CSTATE){
1746
1747
1748
1749 total_col_cnt = ipg_data->col_cnt =
1750 amd8111e_read_mib(mmio, xmt_collisions);
1751
1752 if ((total_col_cnt - prev_col_cnt) <
1753 (ipg_data->diff_col_cnt)){
1754
1755 ipg_data->diff_col_cnt =
1756 total_col_cnt - prev_col_cnt ;
1757
1758 ipg_data->ipg = ipg_data->current_ipg;
1759 }
1760
1761 ipg_data->current_ipg += IPG_STEP;
1762
1763 if (ipg_data->current_ipg <= MAX_IPG)
1764 tmp_ipg = ipg_data->current_ipg;
1765 else{
1766 tmp_ipg = ipg_data->ipg;
1767 ipg_data->ipg_state = SSTATE;
1768 }
1769 writew((u32)tmp_ipg, mmio + IPG);
1770 writew((u32)(tmp_ipg - IFS1_DELTA), mmio + IFS1);
1771 }
1772 mod_timer(&lp->ipg_data.ipg_timer, jiffies + IPG_CONVERGE_JIFFIES);
1773 return;
1774
1775}
1776
1777static void __devinit amd8111e_probe_ext_phy(struct net_device* dev)
1778{
1779 struct amd8111e_priv *lp = netdev_priv(dev);
1780 int i;
1781
1782 for (i = 0x1e; i >= 0; i--) {
1783 u32 id1, id2;
1784
1785 if (amd8111e_read_phy(lp, i, MII_PHYSID1, &id1))
1786 continue;
1787 if (amd8111e_read_phy(lp, i, MII_PHYSID2, &id2))
1788 continue;
1789 lp->ext_phy_id = (id1 << 16) | id2;
1790 lp->ext_phy_addr = i;
1791 return;
1792 }
1793 lp->ext_phy_id = 0;
1794 lp->ext_phy_addr = 1;
1795}
1796
1797static const struct net_device_ops amd8111e_netdev_ops = {
1798 .ndo_open = amd8111e_open,
1799 .ndo_stop = amd8111e_close,
1800 .ndo_start_xmit = amd8111e_start_xmit,
1801 .ndo_tx_timeout = amd8111e_tx_timeout,
1802 .ndo_get_stats = amd8111e_get_stats,
1803 .ndo_set_rx_mode = amd8111e_set_multicast_list,
1804 .ndo_validate_addr = eth_validate_addr,
1805 .ndo_set_mac_address = amd8111e_set_mac_address,
1806 .ndo_do_ioctl = amd8111e_ioctl,
1807 .ndo_change_mtu = amd8111e_change_mtu,
1808#ifdef CONFIG_NET_POLL_CONTROLLER
1809 .ndo_poll_controller = amd8111e_poll,
1810#endif
1811};
1812
1813static int __devinit amd8111e_probe_one(struct pci_dev *pdev,
1814 const struct pci_device_id *ent)
1815{
1816 int err,i,pm_cap;
1817 unsigned long reg_addr,reg_len;
1818 struct amd8111e_priv* lp;
1819 struct net_device* dev;
1820
1821 err = pci_enable_device(pdev);
1822 if(err){
1823 printk(KERN_ERR "amd8111e: Cannot enable new PCI device, "
1824 "exiting.\n");
1825 return err;
1826 }
1827
1828 if(!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)){
1829 printk(KERN_ERR "amd8111e: Cannot find PCI base address, "
1830 "exiting.\n");
1831 err = -ENODEV;
1832 goto err_disable_pdev;
1833 }
1834
1835 err = pci_request_regions(pdev, MODULE_NAME);
1836 if(err){
1837 printk(KERN_ERR "amd8111e: Cannot obtain PCI resources, "
1838 "exiting.\n");
1839 goto err_disable_pdev;
1840 }
1841
1842 pci_set_master(pdev);
1843
1844
1845 if((pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM))==0){
1846 printk(KERN_ERR "amd8111e: No Power Management capability, "
1847 "exiting.\n");
1848 goto err_free_reg;
1849 }
1850
1851
1852 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) < 0) {
1853 printk(KERN_ERR "amd8111e: DMA not supported,"
1854 "exiting.\n");
1855 goto err_free_reg;
1856 }
1857
1858 reg_addr = pci_resource_start(pdev, 0);
1859 reg_len = pci_resource_len(pdev, 0);
1860
1861 dev = alloc_etherdev(sizeof(struct amd8111e_priv));
1862 if (!dev) {
1863 err = -ENOMEM;
1864 goto err_free_reg;
1865 }
1866
1867 SET_NETDEV_DEV(dev, &pdev->dev);
1868
1869#if AMD8111E_VLAN_TAG_USED
1870 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX ;
1871#endif
1872
1873 lp = netdev_priv(dev);
1874 lp->pci_dev = pdev;
1875 lp->amd8111e_net_dev = dev;
1876 lp->pm_cap = pm_cap;
1877
1878 spin_lock_init(&lp->lock);
1879
1880 lp->mmio = ioremap(reg_addr, reg_len);
1881 if (!lp->mmio) {
1882 printk(KERN_ERR "amd8111e: Cannot map device registers, "
1883 "exiting\n");
1884 err = -ENOMEM;
1885 goto err_free_dev;
1886 }
1887
1888
1889 for (i = 0; i < ETH_ALEN; i++)
1890 dev->dev_addr[i] = readb(lp->mmio + PADR + i);
1891
1892
1893 lp->ext_phy_option = speed_duplex[card_idx];
1894 if(coalesce[card_idx])
1895 lp->options |= OPTION_INTR_COAL_ENABLE;
1896 if(dynamic_ipg[card_idx++])
1897 lp->options |= OPTION_DYN_IPG_ENABLE;
1898
1899
1900
1901 dev->netdev_ops = &amd8111e_netdev_ops;
1902 SET_ETHTOOL_OPS(dev, &ops);
1903 dev->irq =pdev->irq;
1904 dev->watchdog_timeo = AMD8111E_TX_TIMEOUT;
1905 netif_napi_add(dev, &lp->napi, amd8111e_rx_poll, 32);
1906
1907#if AMD8111E_VLAN_TAG_USED
1908 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1909#endif
1910
1911 amd8111e_probe_ext_phy(dev);
1912
1913
1914 lp->mii_if.dev = dev;
1915 lp->mii_if.mdio_read = amd8111e_mdio_read;
1916 lp->mii_if.mdio_write = amd8111e_mdio_write;
1917 lp->mii_if.phy_id = lp->ext_phy_addr;
1918
1919
1920 amd8111e_set_rx_buff_len(dev);
1921
1922
1923 err = register_netdev(dev);
1924 if (err) {
1925 printk(KERN_ERR "amd8111e: Cannot register net device, "
1926 "exiting.\n");
1927 goto err_iounmap;
1928 }
1929
1930 pci_set_drvdata(pdev, dev);
1931
1932
1933 if(lp->options & OPTION_DYN_IPG_ENABLE){
1934 init_timer(&lp->ipg_data.ipg_timer);
1935 lp->ipg_data.ipg_timer.data = (unsigned long) dev;
1936 lp->ipg_data.ipg_timer.function = (void *)&amd8111e_config_ipg;
1937 lp->ipg_data.ipg_timer.expires = jiffies +
1938 IPG_CONVERGE_JIFFIES;
1939 lp->ipg_data.ipg = DEFAULT_IPG;
1940 lp->ipg_data.ipg_state = CSTATE;
1941 }
1942
1943
1944
1945 chip_version = (readl(lp->mmio + CHIPID) & 0xf0000000)>>28;
1946 printk(KERN_INFO "%s: AMD-8111e Driver Version: %s\n",
1947 dev->name,MODULE_VERS);
1948 printk(KERN_INFO "%s: [ Rev %x ] PCI 10/100BaseT Ethernet %pM\n",
1949 dev->name, chip_version, dev->dev_addr);
1950 if (lp->ext_phy_id)
1951 printk(KERN_INFO "%s: Found MII PHY ID 0x%08x at address 0x%02x\n",
1952 dev->name, lp->ext_phy_id, lp->ext_phy_addr);
1953 else
1954 printk(KERN_INFO "%s: Couldn't detect MII PHY, assuming address 0x01\n",
1955 dev->name);
1956 return 0;
1957err_iounmap:
1958 iounmap(lp->mmio);
1959
1960err_free_dev:
1961 free_netdev(dev);
1962
1963err_free_reg:
1964 pci_release_regions(pdev);
1965
1966err_disable_pdev:
1967 pci_disable_device(pdev);
1968 pci_set_drvdata(pdev, NULL);
1969 return err;
1970
1971}
1972
1973static struct pci_driver amd8111e_driver = {
1974 .name = MODULE_NAME,
1975 .id_table = amd8111e_pci_tbl,
1976 .probe = amd8111e_probe_one,
1977 .remove = __devexit_p(amd8111e_remove_one),
1978 .suspend = amd8111e_suspend,
1979 .resume = amd8111e_resume
1980};
1981
1982static int __init amd8111e_init(void)
1983{
1984 return pci_register_driver(&amd8111e_driver);
1985}
1986
1987static void __exit amd8111e_cleanup(void)
1988{
1989 pci_unregister_driver(&amd8111e_driver);
1990}
1991
1992module_init(amd8111e_init);
1993module_exit(amd8111e_cleanup);
1994