1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/crc32.h>
24#include <linux/ethtool.h>
25#include <linux/interrupt.h>
26#include <linux/gfp.h>
27#include <linux/mii.h>
28#include <linux/mutex.h>
29
30#include <asm/div64.h>
31
32#define IPG_RX_RING_BYTES (sizeof(struct ipg_rx) * IPG_RFDLIST_LENGTH)
33#define IPG_TX_RING_BYTES (sizeof(struct ipg_tx) * IPG_TFDLIST_LENGTH)
34#define IPG_RESET_MASK \
35 (IPG_AC_GLOBAL_RESET | IPG_AC_RX_RESET | IPG_AC_TX_RESET | \
36 IPG_AC_DMA | IPG_AC_FIFO | IPG_AC_NETWORK | IPG_AC_HOST | \
37 IPG_AC_AUTO_INIT)
38
39#define ipg_w32(val32, reg) iowrite32((val32), ioaddr + (reg))
40#define ipg_w16(val16, reg) iowrite16((val16), ioaddr + (reg))
41#define ipg_w8(val8, reg) iowrite8((val8), ioaddr + (reg))
42
43#define ipg_r32(reg) ioread32(ioaddr + (reg))
44#define ipg_r16(reg) ioread16(ioaddr + (reg))
45#define ipg_r8(reg) ioread8(ioaddr + (reg))
46
47enum {
48 netdev_io_size = 128
49};
50
51#include "ipg.h"
52#define DRV_NAME "ipg"
53
54MODULE_AUTHOR("IC Plus Corp. 2003");
55MODULE_DESCRIPTION("IC Plus IP1000 Gigabit Ethernet Adapter Linux Driver");
56MODULE_LICENSE("GPL");
57
58
59
60
61#define IPG_MAX_RXFRAME_SIZE 0x0600
62#define IPG_RXFRAG_SIZE 0x0600
63#define IPG_RXSUPPORT_SIZE 0x0600
64#define IPG_IS_JUMBO false
65
66
67
68
69
70static unsigned short DefaultPhyParam[] = {
71
72
73
74
75
76
77
78 (0x4000 | (07 * 4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 27, 0xeb8e, 31,
79 0x0000,
80 30, 0x005e, 9, 0x0700,
81
82 (0x4100 | (07 * 4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 27, 0xeb8e, 31,
83 0x0000,
84 30, 0x005e, 9, 0x0700,
85 0x0000
86};
87
88static const char *ipg_brand_name[] = {
89 "IC PLUS IP1000 1000/100/10 based NIC",
90 "Sundance Technology ST2021 based NIC",
91 "Tamarack Microelectronics TC9020/9021 based NIC",
92 "D-Link NIC IP1000A"
93};
94
95static DEFINE_PCI_DEVICE_TABLE(ipg_pci_tbl) = {
96 { PCI_VDEVICE(SUNDANCE, 0x1023), 0 },
97 { PCI_VDEVICE(SUNDANCE, 0x2021), 1 },
98 { PCI_VDEVICE(DLINK, 0x9021), 2 },
99 { PCI_VDEVICE(DLINK, 0x4020), 3 },
100 { 0, }
101};
102
103MODULE_DEVICE_TABLE(pci, ipg_pci_tbl);
104
105static inline void __iomem *ipg_ioaddr(struct net_device *dev)
106{
107 struct ipg_nic_private *sp = netdev_priv(dev);
108 return sp->ioaddr;
109}
110
111#ifdef IPG_DEBUG
112static void ipg_dump_rfdlist(struct net_device *dev)
113{
114 struct ipg_nic_private *sp = netdev_priv(dev);
115 void __iomem *ioaddr = sp->ioaddr;
116 unsigned int i;
117 u32 offset;
118
119 IPG_DEBUG_MSG("_dump_rfdlist\n");
120
121 printk(KERN_INFO "rx_current = %2.2x\n", sp->rx_current);
122 printk(KERN_INFO "rx_dirty = %2.2x\n", sp->rx_dirty);
123 printk(KERN_INFO "RFDList start address = %16.16lx\n",
124 (unsigned long) sp->rxd_map);
125 printk(KERN_INFO "RFDListPtr register = %8.8x%8.8x\n",
126 ipg_r32(IPG_RFDLISTPTR1), ipg_r32(IPG_RFDLISTPTR0));
127
128 for (i = 0; i < IPG_RFDLIST_LENGTH; i++) {
129 offset = (u32) &sp->rxd[i].next_desc - (u32) sp->rxd;
130 printk(KERN_INFO "%2.2x %4.4x RFDNextPtr = %16.16lx\n", i,
131 offset, (unsigned long) sp->rxd[i].next_desc);
132 offset = (u32) &sp->rxd[i].rfs - (u32) sp->rxd;
133 printk(KERN_INFO "%2.2x %4.4x RFS = %16.16lx\n", i,
134 offset, (unsigned long) sp->rxd[i].rfs);
135 offset = (u32) &sp->rxd[i].frag_info - (u32) sp->rxd;
136 printk(KERN_INFO "%2.2x %4.4x frag_info = %16.16lx\n", i,
137 offset, (unsigned long) sp->rxd[i].frag_info);
138 }
139}
140
141static void ipg_dump_tfdlist(struct net_device *dev)
142{
143 struct ipg_nic_private *sp = netdev_priv(dev);
144 void __iomem *ioaddr = sp->ioaddr;
145 unsigned int i;
146 u32 offset;
147
148 IPG_DEBUG_MSG("_dump_tfdlist\n");
149
150 printk(KERN_INFO "tx_current = %2.2x\n", sp->tx_current);
151 printk(KERN_INFO "tx_dirty = %2.2x\n", sp->tx_dirty);
152 printk(KERN_INFO "TFDList start address = %16.16lx\n",
153 (unsigned long) sp->txd_map);
154 printk(KERN_INFO "TFDListPtr register = %8.8x%8.8x\n",
155 ipg_r32(IPG_TFDLISTPTR1), ipg_r32(IPG_TFDLISTPTR0));
156
157 for (i = 0; i < IPG_TFDLIST_LENGTH; i++) {
158 offset = (u32) &sp->txd[i].next_desc - (u32) sp->txd;
159 printk(KERN_INFO "%2.2x %4.4x TFDNextPtr = %16.16lx\n", i,
160 offset, (unsigned long) sp->txd[i].next_desc);
161
162 offset = (u32) &sp->txd[i].tfc - (u32) sp->txd;
163 printk(KERN_INFO "%2.2x %4.4x TFC = %16.16lx\n", i,
164 offset, (unsigned long) sp->txd[i].tfc);
165 offset = (u32) &sp->txd[i].frag_info - (u32) sp->txd;
166 printk(KERN_INFO "%2.2x %4.4x frag_info = %16.16lx\n", i,
167 offset, (unsigned long) sp->txd[i].frag_info);
168 }
169}
170#endif
171
172static void ipg_write_phy_ctl(void __iomem *ioaddr, u8 data)
173{
174 ipg_w8(IPG_PC_RSVD_MASK & data, PHY_CTRL);
175 ndelay(IPG_PC_PHYCTRLWAIT_NS);
176}
177
178static void ipg_drive_phy_ctl_low_high(void __iomem *ioaddr, u8 data)
179{
180 ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | data);
181 ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | data);
182}
183
184static void send_three_state(void __iomem *ioaddr, u8 phyctrlpolarity)
185{
186 phyctrlpolarity |= (IPG_PC_MGMTDATA & 0) | IPG_PC_MGMTDIR;
187
188 ipg_drive_phy_ctl_low_high(ioaddr, phyctrlpolarity);
189}
190
191static void send_end(void __iomem *ioaddr, u8 phyctrlpolarity)
192{
193 ipg_w8((IPG_PC_MGMTCLK_LO | (IPG_PC_MGMTDATA & 0) | IPG_PC_MGMTDIR |
194 phyctrlpolarity) & IPG_PC_RSVD_MASK, PHY_CTRL);
195}
196
197static u16 read_phy_bit(void __iomem *ioaddr, u8 phyctrlpolarity)
198{
199 u16 bit_data;
200
201 ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | phyctrlpolarity);
202
203 bit_data = ((ipg_r8(PHY_CTRL) & IPG_PC_MGMTDATA) >> 1) & 1;
204
205 ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | phyctrlpolarity);
206
207 return bit_data;
208}
209
210
211
212
213
214static int mdio_read(struct net_device *dev, int phy_id, int phy_reg)
215{
216 void __iomem *ioaddr = ipg_ioaddr(dev);
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232 struct {
233 u32 field;
234 unsigned int len;
235 } p[] = {
236 { GMII_PREAMBLE, 32 },
237 { GMII_ST, 2 },
238 { GMII_READ, 2 },
239 { phy_id, 5 },
240 { phy_reg, 5 },
241 { 0x0000, 2 },
242 { 0x0000, 16 },
243 { 0x0000, 1 }
244 };
245 unsigned int i, j;
246 u8 polarity, data;
247
248 polarity = ipg_r8(PHY_CTRL);
249 polarity &= (IPG_PC_DUPLEX_POLARITY | IPG_PC_LINK_POLARITY);
250
251
252 for (j = 0; j < 5; j++) {
253 for (i = 0; i < p[j].len; i++) {
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271 data = (p[j].field >> (p[j].len - 1 - i)) << 1;
272 data &= IPG_PC_MGMTDATA;
273 data |= polarity | IPG_PC_MGMTDIR;
274
275 ipg_drive_phy_ctl_low_high(ioaddr, data);
276 }
277 }
278
279 send_three_state(ioaddr, polarity);
280
281 read_phy_bit(ioaddr, polarity);
282
283
284
285
286
287 for (i = 0; i < p[6].len; i++) {
288 p[6].field |=
289 (read_phy_bit(ioaddr, polarity) << (p[6].len - 1 - i));
290 }
291
292 send_three_state(ioaddr, polarity);
293 send_three_state(ioaddr, polarity);
294 send_three_state(ioaddr, polarity);
295 send_end(ioaddr, polarity);
296
297
298 return p[6].field;
299}
300
301
302
303
304
305static void mdio_write(struct net_device *dev, int phy_id, int phy_reg, int val)
306{
307 void __iomem *ioaddr = ipg_ioaddr(dev);
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323 struct {
324 u32 field;
325 unsigned int len;
326 } p[] = {
327 { GMII_PREAMBLE, 32 },
328 { GMII_ST, 2 },
329 { GMII_WRITE, 2 },
330 { phy_id, 5 },
331 { phy_reg, 5 },
332 { 0x0002, 2 },
333 { val & 0xffff, 16 },
334 { 0x0000, 1 }
335 };
336 unsigned int i, j;
337 u8 polarity, data;
338
339 polarity = ipg_r8(PHY_CTRL);
340 polarity &= (IPG_PC_DUPLEX_POLARITY | IPG_PC_LINK_POLARITY);
341
342
343 for (j = 0; j < 7; j++) {
344 for (i = 0; i < p[j].len; i++) {
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362 data = (p[j].field >> (p[j].len - 1 - i)) << 1;
363 data &= IPG_PC_MGMTDATA;
364 data |= polarity | IPG_PC_MGMTDIR;
365
366 ipg_drive_phy_ctl_low_high(ioaddr, data);
367 }
368 }
369
370
371 for (j = 7; j < 8; j++) {
372 for (i = 0; i < p[j].len; i++) {
373 ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | polarity);
374
375 p[j].field |= ((ipg_r8(PHY_CTRL) &
376 IPG_PC_MGMTDATA) >> 1) << (p[j].len - 1 - i);
377
378 ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | polarity);
379 }
380 }
381}
382
383static void ipg_set_led_mode(struct net_device *dev)
384{
385 struct ipg_nic_private *sp = netdev_priv(dev);
386 void __iomem *ioaddr = sp->ioaddr;
387 u32 mode;
388
389 mode = ipg_r32(ASIC_CTRL);
390 mode &= ~(IPG_AC_LED_MODE_BIT_1 | IPG_AC_LED_MODE | IPG_AC_LED_SPEED);
391
392 if ((sp->led_mode & 0x03) > 1)
393 mode |= IPG_AC_LED_MODE_BIT_1;
394
395 if ((sp->led_mode & 0x01) == 1)
396 mode |= IPG_AC_LED_MODE;
397
398 if ((sp->led_mode & 0x08) == 8)
399 mode |= IPG_AC_LED_SPEED;
400
401 ipg_w32(mode, ASIC_CTRL);
402}
403
404static void ipg_set_phy_set(struct net_device *dev)
405{
406 struct ipg_nic_private *sp = netdev_priv(dev);
407 void __iomem *ioaddr = sp->ioaddr;
408 int physet;
409
410 physet = ipg_r8(PHY_SET);
411 physet &= ~(IPG_PS_MEM_LENB9B | IPG_PS_MEM_LEN9 | IPG_PS_NON_COMPDET);
412 physet |= ((sp->led_mode & 0x70) >> 4);
413 ipg_w8(physet, PHY_SET);
414}
415
416static int ipg_reset(struct net_device *dev, u32 resetflags)
417{
418
419
420
421
422 void __iomem *ioaddr = ipg_ioaddr(dev);
423 unsigned int timeout_count = 0;
424
425 IPG_DEBUG_MSG("_reset\n");
426
427 ipg_w32(ipg_r32(ASIC_CTRL) | resetflags, ASIC_CTRL);
428
429
430 mdelay(IPG_AC_RESETWAIT);
431
432 while (IPG_AC_RESET_BUSY & ipg_r32(ASIC_CTRL)) {
433 mdelay(IPG_AC_RESETWAIT);
434 if (++timeout_count > IPG_AC_RESET_TIMEOUT)
435 return -ETIME;
436 }
437
438 ipg_set_led_mode(dev);
439
440
441 ipg_set_phy_set(dev);
442 return 0;
443}
444
445
446static int ipg_find_phyaddr(struct net_device *dev)
447{
448 unsigned int phyaddr, i;
449
450 for (i = 0; i < 32; i++) {
451 u32 status;
452
453
454 phyaddr = (IPG_NIC_PHY_ADDRESS + i) % 32;
455
456
457
458
459
460 status = mdio_read(dev, phyaddr, MII_BMSR);
461
462 if ((status != 0xFFFF) && (status != 0))
463 return phyaddr;
464 }
465
466 return 0x1f;
467}
468
469
470
471
472
473static int ipg_config_autoneg(struct net_device *dev)
474{
475 struct ipg_nic_private *sp = netdev_priv(dev);
476 void __iomem *ioaddr = sp->ioaddr;
477 unsigned int txflowcontrol;
478 unsigned int rxflowcontrol;
479 unsigned int fullduplex;
480 u32 mac_ctrl_val;
481 u32 asicctrl;
482 u8 phyctrl;
483
484 IPG_DEBUG_MSG("_config_autoneg\n");
485
486 asicctrl = ipg_r32(ASIC_CTRL);
487 phyctrl = ipg_r8(PHY_CTRL);
488 mac_ctrl_val = ipg_r32(MAC_CTRL);
489
490
491
492
493 fullduplex = 0;
494 txflowcontrol = 0;
495 rxflowcontrol = 0;
496
497
498
499
500 sp->tenmbpsmode = 0;
501
502 printk(KERN_INFO "%s: Link speed = ", dev->name);
503
504
505 switch (phyctrl & IPG_PC_LINK_SPEED) {
506 case IPG_PC_LINK_SPEED_10MBPS:
507 printk("10Mbps.\n");
508 printk(KERN_INFO "%s: 10Mbps operational mode enabled.\n",
509 dev->name);
510 sp->tenmbpsmode = 1;
511 break;
512 case IPG_PC_LINK_SPEED_100MBPS:
513 printk("100Mbps.\n");
514 break;
515 case IPG_PC_LINK_SPEED_1000MBPS:
516 printk("1000Mbps.\n");
517 break;
518 default:
519 printk("undefined!\n");
520 return 0;
521 }
522
523 if (phyctrl & IPG_PC_DUPLEX_STATUS) {
524 fullduplex = 1;
525 txflowcontrol = 1;
526 rxflowcontrol = 1;
527 }
528
529
530 if (fullduplex == 1) {
531
532 printk(KERN_INFO "%s: setting full duplex, ", dev->name);
533
534 mac_ctrl_val |= IPG_MC_DUPLEX_SELECT_FD;
535
536 if (txflowcontrol == 1) {
537 printk("TX flow control");
538 mac_ctrl_val |= IPG_MC_TX_FLOW_CONTROL_ENABLE;
539 } else {
540 printk("no TX flow control");
541 mac_ctrl_val &= ~IPG_MC_TX_FLOW_CONTROL_ENABLE;
542 }
543
544 if (rxflowcontrol == 1) {
545 printk(", RX flow control.");
546 mac_ctrl_val |= IPG_MC_RX_FLOW_CONTROL_ENABLE;
547 } else {
548 printk(", no RX flow control.");
549 mac_ctrl_val &= ~IPG_MC_RX_FLOW_CONTROL_ENABLE;
550 }
551
552 printk("\n");
553 } else {
554
555 printk(KERN_INFO "%s: setting half duplex, "
556 "no TX flow control, no RX flow control.\n", dev->name);
557
558 mac_ctrl_val &= ~IPG_MC_DUPLEX_SELECT_FD &
559 ~IPG_MC_TX_FLOW_CONTROL_ENABLE &
560 ~IPG_MC_RX_FLOW_CONTROL_ENABLE;
561 }
562 ipg_w32(mac_ctrl_val, MAC_CTRL);
563 return 0;
564}
565
566
567
568
569static void ipg_nic_set_multicast_list(struct net_device *dev)
570{
571 void __iomem *ioaddr = ipg_ioaddr(dev);
572 struct netdev_hw_addr *ha;
573 unsigned int hashindex;
574 u32 hashtable[2];
575 u8 receivemode;
576
577 IPG_DEBUG_MSG("_nic_set_multicast_list\n");
578
579 receivemode = IPG_RM_RECEIVEUNICAST | IPG_RM_RECEIVEBROADCAST;
580
581 if (dev->flags & IFF_PROMISC) {
582
583 receivemode = IPG_RM_RECEIVEALLFRAMES;
584 } else if ((dev->flags & IFF_ALLMULTI) ||
585 ((dev->flags & IFF_MULTICAST) &&
586 (netdev_mc_count(dev) > IPG_MULTICAST_HASHTABLE_SIZE))) {
587
588
589 receivemode |= IPG_RM_RECEIVEMULTICAST;
590 } else if ((dev->flags & IFF_MULTICAST) && !netdev_mc_empty(dev)) {
591
592
593 receivemode |= IPG_RM_RECEIVEMULTICASTHASH;
594 }
595
596
597
598
599
600
601
602
603
604
605
606
607 hashtable[0] = 0x00000000;
608 hashtable[1] = 0x00000000;
609
610
611 netdev_for_each_mc_addr(ha, dev) {
612
613 hashindex = crc32_le(0xffffffff, ha->addr,
614 ETH_ALEN);
615
616
617 hashindex = hashindex & 0x3F;
618
619
620
621
622 set_bit(hashindex, (void *)hashtable);
623 }
624
625
626
627
628 ipg_w32(hashtable[0], HASHTABLE_0);
629 ipg_w32(hashtable[1], HASHTABLE_1);
630
631 ipg_w8(IPG_RM_RSVD_MASK & receivemode, RECEIVE_MODE);
632
633 IPG_DEBUG_MSG("ReceiveMode = %x\n", ipg_r8(RECEIVE_MODE));
634}
635
636static int ipg_io_config(struct net_device *dev)
637{
638 struct ipg_nic_private *sp = netdev_priv(dev);
639 void __iomem *ioaddr = ipg_ioaddr(dev);
640 u32 origmacctrl;
641 u32 restoremacctrl;
642
643 IPG_DEBUG_MSG("_io_config\n");
644
645 origmacctrl = ipg_r32(MAC_CTRL);
646
647 restoremacctrl = origmacctrl | IPG_MC_STATISTICS_ENABLE;
648
649
650
651
652 if (!IPG_STRIP_FCS_ON_RX)
653 restoremacctrl |= IPG_MC_RCV_FCS;
654
655
656
657
658 if (origmacctrl & IPG_MC_TX_ENABLED)
659 restoremacctrl |= IPG_MC_TX_ENABLE;
660
661 if (origmacctrl & IPG_MC_RX_ENABLED)
662 restoremacctrl |= IPG_MC_RX_ENABLE;
663
664
665
666
667 ipg_w32((origmacctrl & (IPG_MC_RX_DISABLE | IPG_MC_TX_DISABLE)) &
668 IPG_MC_RSVD_MASK, MAC_CTRL);
669
670
671
672
673 ipg_w32((origmacctrl & IPG_MC_IFS_96BIT) & IPG_MC_RSVD_MASK, MAC_CTRL);
674
675
676 ipg_nic_set_multicast_list(dev);
677
678 ipg_w16(sp->max_rxframe_size, MAX_FRAME_SIZE);
679
680 ipg_w8(IPG_RXDMAPOLLPERIOD_VALUE, RX_DMA_POLL_PERIOD);
681 ipg_w8(IPG_RXDMAURGENTTHRESH_VALUE, RX_DMA_URGENT_THRESH);
682 ipg_w8(IPG_RXDMABURSTTHRESH_VALUE, RX_DMA_BURST_THRESH);
683 ipg_w8(IPG_TXDMAPOLLPERIOD_VALUE, TX_DMA_POLL_PERIOD);
684 ipg_w8(IPG_TXDMAURGENTTHRESH_VALUE, TX_DMA_URGENT_THRESH);
685 ipg_w8(IPG_TXDMABURSTTHRESH_VALUE, TX_DMA_BURST_THRESH);
686 ipg_w16((IPG_IE_HOST_ERROR | IPG_IE_TX_DMA_COMPLETE |
687 IPG_IE_TX_COMPLETE | IPG_IE_INT_REQUESTED |
688 IPG_IE_UPDATE_STATS | IPG_IE_LINK_EVENT |
689 IPG_IE_RX_DMA_COMPLETE | IPG_IE_RX_DMA_PRIORITY), INT_ENABLE);
690 ipg_w16(IPG_FLOWONTHRESH_VALUE, FLOW_ON_THRESH);
691 ipg_w16(IPG_FLOWOFFTHRESH_VALUE, FLOW_OFF_THRESH);
692
693
694
695
696 ipg_w16(ipg_r16(DEBUG_CTRL) | 0x0200, DEBUG_CTRL);
697
698
699
700
701 ipg_w16(ipg_r16(DEBUG_CTRL) | 0x0010, DEBUG_CTRL);
702
703
704
705
706 ipg_w16(ipg_r16(DEBUG_CTRL) | 0x0020, DEBUG_CTRL);
707
708
709 ipg_w32(IPG_MC_RSVD_MASK & restoremacctrl, MAC_CTRL);
710
711
712 ipg_w32(IPG_RZ_ALL, RMON_STATISTICS_MASK);
713
714
715 ipg_w32(IPG_SM_MACCONTROLFRAMESXMTD | IPG_SM_MACCONTROLFRAMESRCVD |
716 IPG_SM_BCSTOCTETXMTOK_BCSTFRAMESXMTDOK | IPG_SM_TXJUMBOFRAMES |
717 IPG_SM_MCSTOCTETXMTOK_MCSTFRAMESXMTDOK | IPG_SM_RXJUMBOFRAMES |
718 IPG_SM_BCSTOCTETRCVDOK_BCSTFRAMESRCVDOK |
719 IPG_SM_UDPCHECKSUMERRORS | IPG_SM_TCPCHECKSUMERRORS |
720 IPG_SM_IPCHECKSUMERRORS, STATISTICS_MASK);
721
722 return 0;
723}
724
725
726
727
728
729static int ipg_get_rxbuff(struct net_device *dev, int entry)
730{
731 struct ipg_nic_private *sp = netdev_priv(dev);
732 struct ipg_rx *rxfd = sp->rxd + entry;
733 struct sk_buff *skb;
734 u64 rxfragsize;
735
736 IPG_DEBUG_MSG("_get_rxbuff\n");
737
738 skb = netdev_alloc_skb_ip_align(dev, sp->rxsupport_size);
739 if (!skb) {
740 sp->rx_buff[entry] = NULL;
741 return -ENOMEM;
742 }
743
744
745 skb->dev = dev;
746
747
748 sp->rx_buff[entry] = skb;
749
750 rxfd->frag_info = cpu_to_le64(pci_map_single(sp->pdev, skb->data,
751 sp->rx_buf_sz, PCI_DMA_FROMDEVICE));
752
753
754 rxfragsize = sp->rxfrag_size;
755 rxfd->frag_info |= cpu_to_le64((rxfragsize << 48) & IPG_RFI_FRAGLEN);
756
757 return 0;
758}
759
760static int init_rfdlist(struct net_device *dev)
761{
762 struct ipg_nic_private *sp = netdev_priv(dev);
763 void __iomem *ioaddr = sp->ioaddr;
764 unsigned int i;
765
766 IPG_DEBUG_MSG("_init_rfdlist\n");
767
768 for (i = 0; i < IPG_RFDLIST_LENGTH; i++) {
769 struct ipg_rx *rxfd = sp->rxd + i;
770
771 if (sp->rx_buff[i]) {
772 pci_unmap_single(sp->pdev,
773 le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
774 sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
775 dev_kfree_skb_irq(sp->rx_buff[i]);
776 sp->rx_buff[i] = NULL;
777 }
778
779
780 rxfd->rfs = 0x0000000000000000;
781
782 if (ipg_get_rxbuff(dev, i) < 0) {
783
784
785
786
787 IPG_DEBUG_MSG("Cannot allocate Rx buffer.\n");
788
789
790
791
792 if (i == 0) {
793 printk(KERN_ERR "%s: No memory available"
794 " for RFD list.\n", dev->name);
795 return -ENOMEM;
796 }
797 }
798
799 rxfd->next_desc = cpu_to_le64(sp->rxd_map +
800 sizeof(struct ipg_rx)*(i + 1));
801 }
802 sp->rxd[i - 1].next_desc = cpu_to_le64(sp->rxd_map);
803
804 sp->rx_current = 0;
805 sp->rx_dirty = 0;
806
807
808 ipg_w32((u32) sp->rxd_map, RFD_LIST_PTR_0);
809 ipg_w32(0x00000000, RFD_LIST_PTR_1);
810
811 return 0;
812}
813
814static void init_tfdlist(struct net_device *dev)
815{
816 struct ipg_nic_private *sp = netdev_priv(dev);
817 void __iomem *ioaddr = sp->ioaddr;
818 unsigned int i;
819
820 IPG_DEBUG_MSG("_init_tfdlist\n");
821
822 for (i = 0; i < IPG_TFDLIST_LENGTH; i++) {
823 struct ipg_tx *txfd = sp->txd + i;
824
825 txfd->tfc = cpu_to_le64(IPG_TFC_TFDDONE);
826
827 if (sp->tx_buff[i]) {
828 dev_kfree_skb_irq(sp->tx_buff[i]);
829 sp->tx_buff[i] = NULL;
830 }
831
832 txfd->next_desc = cpu_to_le64(sp->txd_map +
833 sizeof(struct ipg_tx)*(i + 1));
834 }
835 sp->txd[i - 1].next_desc = cpu_to_le64(sp->txd_map);
836
837 sp->tx_current = 0;
838 sp->tx_dirty = 0;
839
840
841 IPG_DDEBUG_MSG("Starting TFDListPtr = %8.8x\n",
842 (u32) sp->txd_map);
843 ipg_w32((u32) sp->txd_map, TFD_LIST_PTR_0);
844 ipg_w32(0x00000000, TFD_LIST_PTR_1);
845
846 sp->reset_current_tfd = 1;
847}
848
849
850
851
852
853static void ipg_nic_txfree(struct net_device *dev)
854{
855 struct ipg_nic_private *sp = netdev_priv(dev);
856 unsigned int released, pending, dirty;
857
858 IPG_DEBUG_MSG("_nic_txfree\n");
859
860 pending = sp->tx_current - sp->tx_dirty;
861 dirty = sp->tx_dirty % IPG_TFDLIST_LENGTH;
862
863 for (released = 0; released < pending; released++) {
864 struct sk_buff *skb = sp->tx_buff[dirty];
865 struct ipg_tx *txfd = sp->txd + dirty;
866
867 IPG_DEBUG_MSG("TFC = %16.16lx\n", (unsigned long) txfd->tfc);
868
869
870
871
872
873
874 if (!(txfd->tfc & cpu_to_le64(IPG_TFC_TFDDONE)))
875 break;
876
877
878 if (skb) {
879 pci_unmap_single(sp->pdev,
880 le64_to_cpu(txfd->frag_info) & ~IPG_TFI_FRAGLEN,
881 skb->len, PCI_DMA_TODEVICE);
882
883 dev_kfree_skb_irq(skb);
884
885 sp->tx_buff[dirty] = NULL;
886 }
887 dirty = (dirty + 1) % IPG_TFDLIST_LENGTH;
888 }
889
890 sp->tx_dirty += released;
891
892 if (netif_queue_stopped(dev) &&
893 (sp->tx_current != (sp->tx_dirty + IPG_TFDLIST_LENGTH))) {
894 netif_wake_queue(dev);
895 }
896}
897
898static void ipg_tx_timeout(struct net_device *dev)
899{
900 struct ipg_nic_private *sp = netdev_priv(dev);
901 void __iomem *ioaddr = sp->ioaddr;
902
903 ipg_reset(dev, IPG_AC_TX_RESET | IPG_AC_DMA | IPG_AC_NETWORK |
904 IPG_AC_FIFO);
905
906 spin_lock_irq(&sp->lock);
907
908
909 if (ipg_io_config(dev) < 0) {
910 printk(KERN_INFO "%s: Error during re-configuration.\n",
911 dev->name);
912 }
913
914 init_tfdlist(dev);
915
916 spin_unlock_irq(&sp->lock);
917
918 ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) & IPG_MC_RSVD_MASK,
919 MAC_CTRL);
920}
921
922
923
924
925
926
927static void ipg_nic_txcleanup(struct net_device *dev)
928{
929 struct ipg_nic_private *sp = netdev_priv(dev);
930 void __iomem *ioaddr = sp->ioaddr;
931 unsigned int i;
932
933 IPG_DEBUG_MSG("_nic_txcleanup\n");
934
935 for (i = 0; i < IPG_TFDLIST_LENGTH; i++) {
936
937
938
939 u32 txstatusdword = ipg_r32(TX_STATUS);
940
941 IPG_DEBUG_MSG("TxStatus = %8.8x\n", txstatusdword);
942
943
944
945
946 if (!(txstatusdword & IPG_TS_TX_COMPLETE))
947 break;
948
949
950 if (sp->tenmbpsmode) {
951 netif_wake_queue(dev);
952 }
953
954
955 if (txstatusdword & IPG_TS_TX_ERROR) {
956 IPG_DEBUG_MSG("Transmit error.\n");
957 sp->stats.tx_errors++;
958 }
959
960
961 if (txstatusdword & IPG_TS_LATE_COLLISION) {
962 IPG_DEBUG_MSG("Late collision on transmit.\n");
963 ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) &
964 IPG_MC_RSVD_MASK, MAC_CTRL);
965 }
966
967
968 if (txstatusdword & IPG_TS_TX_MAX_COLL) {
969 IPG_DEBUG_MSG("Maximum collisions on transmit.\n");
970 ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) &
971 IPG_MC_RSVD_MASK, MAC_CTRL);
972 }
973
974
975
976
977 if (txstatusdword & IPG_TS_TX_UNDERRUN) {
978 IPG_DEBUG_MSG("Transmitter underrun.\n");
979 sp->stats.tx_fifo_errors++;
980 ipg_reset(dev, IPG_AC_TX_RESET | IPG_AC_DMA |
981 IPG_AC_NETWORK | IPG_AC_FIFO);
982
983
984 if (ipg_io_config(dev) < 0) {
985 printk(KERN_INFO
986 "%s: Error during re-configuration.\n",
987 dev->name);
988 }
989 init_tfdlist(dev);
990
991 ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) &
992 IPG_MC_RSVD_MASK, MAC_CTRL);
993 }
994 }
995
996 ipg_nic_txfree(dev);
997}
998
999
1000static struct net_device_stats *ipg_nic_get_stats(struct net_device *dev)
1001{
1002 struct ipg_nic_private *sp = netdev_priv(dev);
1003 void __iomem *ioaddr = sp->ioaddr;
1004 u16 temp1;
1005 u16 temp2;
1006
1007 IPG_DEBUG_MSG("_nic_get_stats\n");
1008
1009
1010
1011
1012 if (!test_bit(__LINK_STATE_START, &dev->state))
1013 return &sp->stats;
1014
1015 sp->stats.rx_packets += ipg_r32(IPG_FRAMESRCVDOK);
1016 sp->stats.tx_packets += ipg_r32(IPG_FRAMESXMTDOK);
1017 sp->stats.rx_bytes += ipg_r32(IPG_OCTETRCVOK);
1018 sp->stats.tx_bytes += ipg_r32(IPG_OCTETXMTOK);
1019 temp1 = ipg_r16(IPG_FRAMESLOSTRXERRORS);
1020 sp->stats.rx_errors += temp1;
1021 sp->stats.rx_missed_errors += temp1;
1022 temp1 = ipg_r32(IPG_SINGLECOLFRAMES) + ipg_r32(IPG_MULTICOLFRAMES) +
1023 ipg_r32(IPG_LATECOLLISIONS);
1024 temp2 = ipg_r16(IPG_CARRIERSENSEERRORS);
1025 sp->stats.collisions += temp1;
1026 sp->stats.tx_dropped += ipg_r16(IPG_FRAMESABORTXSCOLLS);
1027 sp->stats.tx_errors += ipg_r16(IPG_FRAMESWEXDEFERRAL) +
1028 ipg_r32(IPG_FRAMESWDEFERREDXMT) + temp1 + temp2;
1029 sp->stats.multicast += ipg_r32(IPG_MCSTOCTETRCVDOK);
1030
1031
1032 sp->stats.tx_carrier_errors += temp2;
1033
1034
1035 sp->stats.rx_length_errors += ipg_r16(IPG_INRANGELENGTHERRORS) +
1036 ipg_r16(IPG_FRAMETOOLONGERRRORS);
1037 sp->stats.rx_crc_errors += ipg_r16(IPG_FRAMECHECKSEQERRORS);
1038
1039
1040 ipg_r32(IPG_MCSTFRAMESRCVDOK);
1041
1042 return &sp->stats;
1043}
1044
1045
1046static int ipg_nic_rxrestore(struct net_device *dev)
1047{
1048 struct ipg_nic_private *sp = netdev_priv(dev);
1049 const unsigned int curr = sp->rx_current;
1050 unsigned int dirty = sp->rx_dirty;
1051
1052 IPG_DEBUG_MSG("_nic_rxrestore\n");
1053
1054 for (dirty = sp->rx_dirty; curr - dirty > 0; dirty++) {
1055 unsigned int entry = dirty % IPG_RFDLIST_LENGTH;
1056
1057
1058 if (sp->rx_buff[entry])
1059 continue;
1060
1061
1062
1063
1064
1065 if (ipg_get_rxbuff(dev, entry) < 0) {
1066 IPG_DEBUG_MSG("Cannot allocate new Rx buffer.\n");
1067
1068 break;
1069 }
1070
1071
1072 sp->rxd[entry].rfs = 0x0000000000000000;
1073 }
1074 sp->rx_dirty = dirty;
1075
1076 return 0;
1077}
1078
1079
1080
1081
1082
1083
1084
1085
1086enum {
1087 NORMAL_PACKET,
1088 ERROR_PACKET
1089};
1090
1091enum {
1092 FRAME_NO_START_NO_END = 0,
1093 FRAME_WITH_START = 1,
1094 FRAME_WITH_END = 10,
1095 FRAME_WITH_START_WITH_END = 11
1096};
1097
1098static void ipg_nic_rx_free_skb(struct net_device *dev)
1099{
1100 struct ipg_nic_private *sp = netdev_priv(dev);
1101 unsigned int entry = sp->rx_current % IPG_RFDLIST_LENGTH;
1102
1103 if (sp->rx_buff[entry]) {
1104 struct ipg_rx *rxfd = sp->rxd + entry;
1105
1106 pci_unmap_single(sp->pdev,
1107 le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
1108 sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1109 dev_kfree_skb_irq(sp->rx_buff[entry]);
1110 sp->rx_buff[entry] = NULL;
1111 }
1112}
1113
1114static int ipg_nic_rx_check_frame_type(struct net_device *dev)
1115{
1116 struct ipg_nic_private *sp = netdev_priv(dev);
1117 struct ipg_rx *rxfd = sp->rxd + (sp->rx_current % IPG_RFDLIST_LENGTH);
1118 int type = FRAME_NO_START_NO_END;
1119
1120 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMESTART)
1121 type += FRAME_WITH_START;
1122 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMEEND)
1123 type += FRAME_WITH_END;
1124 return type;
1125}
1126
1127static int ipg_nic_rx_check_error(struct net_device *dev)
1128{
1129 struct ipg_nic_private *sp = netdev_priv(dev);
1130 unsigned int entry = sp->rx_current % IPG_RFDLIST_LENGTH;
1131 struct ipg_rx *rxfd = sp->rxd + entry;
1132
1133 if (IPG_DROP_ON_RX_ETH_ERRORS && (le64_to_cpu(rxfd->rfs) &
1134 (IPG_RFS_RXFIFOOVERRUN | IPG_RFS_RXRUNTFRAME |
1135 IPG_RFS_RXALIGNMENTERROR | IPG_RFS_RXFCSERROR |
1136 IPG_RFS_RXOVERSIZEDFRAME | IPG_RFS_RXLENGTHERROR))) {
1137 IPG_DEBUG_MSG("Rx error, RFS = %16.16lx\n",
1138 (unsigned long) rxfd->rfs);
1139
1140
1141 sp->stats.rx_errors++;
1142
1143
1144 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFIFOOVERRUN) {
1145 IPG_DEBUG_MSG("RX FIFO overrun occurred.\n");
1146
1147 sp->stats.rx_fifo_errors++;
1148 }
1149
1150 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXRUNTFRAME) {
1151 IPG_DEBUG_MSG("RX runt occurred.\n");
1152 sp->stats.rx_length_errors++;
1153 }
1154
1155
1156
1157
1158
1159 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXALIGNMENTERROR) {
1160 IPG_DEBUG_MSG("RX alignment error occurred.\n");
1161 sp->stats.rx_frame_errors++;
1162 }
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172 if (sp->rx_buff[entry]) {
1173 pci_unmap_single(sp->pdev,
1174 le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
1175 sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1176
1177 dev_kfree_skb_irq(sp->rx_buff[entry]);
1178 sp->rx_buff[entry] = NULL;
1179 }
1180 return ERROR_PACKET;
1181 }
1182 return NORMAL_PACKET;
1183}
1184
1185static void ipg_nic_rx_with_start_and_end(struct net_device *dev,
1186 struct ipg_nic_private *sp,
1187 struct ipg_rx *rxfd, unsigned entry)
1188{
1189 struct ipg_jumbo *jumbo = &sp->jumbo;
1190 struct sk_buff *skb;
1191 int framelen;
1192
1193 if (jumbo->found_start) {
1194 dev_kfree_skb_irq(jumbo->skb);
1195 jumbo->found_start = 0;
1196 jumbo->current_size = 0;
1197 jumbo->skb = NULL;
1198 }
1199
1200
1201 if (ipg_nic_rx_check_error(dev) != NORMAL_PACKET)
1202 return;
1203
1204 skb = sp->rx_buff[entry];
1205 if (!skb)
1206 return;
1207
1208
1209 framelen = le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFRAMELEN;
1210 if (framelen > sp->rxfrag_size)
1211 framelen = sp->rxfrag_size;
1212
1213 skb_put(skb, framelen);
1214 skb->protocol = eth_type_trans(skb, dev);
1215 skb_checksum_none_assert(skb);
1216 netif_rx(skb);
1217 sp->rx_buff[entry] = NULL;
1218}
1219
1220static void ipg_nic_rx_with_start(struct net_device *dev,
1221 struct ipg_nic_private *sp,
1222 struct ipg_rx *rxfd, unsigned entry)
1223{
1224 struct ipg_jumbo *jumbo = &sp->jumbo;
1225 struct pci_dev *pdev = sp->pdev;
1226 struct sk_buff *skb;
1227
1228
1229 if (ipg_nic_rx_check_error(dev) != NORMAL_PACKET)
1230 return;
1231
1232
1233 skb = sp->rx_buff[entry];
1234 if (!skb)
1235 return;
1236
1237 if (jumbo->found_start)
1238 dev_kfree_skb_irq(jumbo->skb);
1239
1240 pci_unmap_single(pdev, le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
1241 sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1242
1243 skb_put(skb, sp->rxfrag_size);
1244
1245 jumbo->found_start = 1;
1246 jumbo->current_size = sp->rxfrag_size;
1247 jumbo->skb = skb;
1248
1249 sp->rx_buff[entry] = NULL;
1250}
1251
1252static void ipg_nic_rx_with_end(struct net_device *dev,
1253 struct ipg_nic_private *sp,
1254 struct ipg_rx *rxfd, unsigned entry)
1255{
1256 struct ipg_jumbo *jumbo = &sp->jumbo;
1257
1258
1259 if (ipg_nic_rx_check_error(dev) == NORMAL_PACKET) {
1260 struct sk_buff *skb = sp->rx_buff[entry];
1261
1262 if (!skb)
1263 return;
1264
1265 if (jumbo->found_start) {
1266 int framelen, endframelen;
1267
1268 framelen = le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFRAMELEN;
1269
1270 endframelen = framelen - jumbo->current_size;
1271 if (framelen > sp->rxsupport_size)
1272 dev_kfree_skb_irq(jumbo->skb);
1273 else {
1274 memcpy(skb_put(jumbo->skb, endframelen),
1275 skb->data, endframelen);
1276
1277 jumbo->skb->protocol =
1278 eth_type_trans(jumbo->skb, dev);
1279
1280 skb_checksum_none_assert(jumbo->skb);
1281 netif_rx(jumbo->skb);
1282 }
1283 }
1284
1285 jumbo->found_start = 0;
1286 jumbo->current_size = 0;
1287 jumbo->skb = NULL;
1288
1289 ipg_nic_rx_free_skb(dev);
1290 } else {
1291 dev_kfree_skb_irq(jumbo->skb);
1292 jumbo->found_start = 0;
1293 jumbo->current_size = 0;
1294 jumbo->skb = NULL;
1295 }
1296}
1297
1298static void ipg_nic_rx_no_start_no_end(struct net_device *dev,
1299 struct ipg_nic_private *sp,
1300 struct ipg_rx *rxfd, unsigned entry)
1301{
1302 struct ipg_jumbo *jumbo = &sp->jumbo;
1303
1304
1305 if (ipg_nic_rx_check_error(dev) == NORMAL_PACKET) {
1306 struct sk_buff *skb = sp->rx_buff[entry];
1307
1308 if (skb) {
1309 if (jumbo->found_start) {
1310 jumbo->current_size += sp->rxfrag_size;
1311 if (jumbo->current_size <= sp->rxsupport_size) {
1312 memcpy(skb_put(jumbo->skb,
1313 sp->rxfrag_size),
1314 skb->data, sp->rxfrag_size);
1315 }
1316 }
1317 ipg_nic_rx_free_skb(dev);
1318 }
1319 } else {
1320 dev_kfree_skb_irq(jumbo->skb);
1321 jumbo->found_start = 0;
1322 jumbo->current_size = 0;
1323 jumbo->skb = NULL;
1324 }
1325}
1326
1327static int ipg_nic_rx_jumbo(struct net_device *dev)
1328{
1329 struct ipg_nic_private *sp = netdev_priv(dev);
1330 unsigned int curr = sp->rx_current;
1331 void __iomem *ioaddr = sp->ioaddr;
1332 unsigned int i;
1333
1334 IPG_DEBUG_MSG("_nic_rx\n");
1335
1336 for (i = 0; i < IPG_MAXRFDPROCESS_COUNT; i++, curr++) {
1337 unsigned int entry = curr % IPG_RFDLIST_LENGTH;
1338 struct ipg_rx *rxfd = sp->rxd + entry;
1339
1340 if (!(rxfd->rfs & cpu_to_le64(IPG_RFS_RFDDONE)))
1341 break;
1342
1343 switch (ipg_nic_rx_check_frame_type(dev)) {
1344 case FRAME_WITH_START_WITH_END:
1345 ipg_nic_rx_with_start_and_end(dev, sp, rxfd, entry);
1346 break;
1347 case FRAME_WITH_START:
1348 ipg_nic_rx_with_start(dev, sp, rxfd, entry);
1349 break;
1350 case FRAME_WITH_END:
1351 ipg_nic_rx_with_end(dev, sp, rxfd, entry);
1352 break;
1353 case FRAME_NO_START_NO_END:
1354 ipg_nic_rx_no_start_no_end(dev, sp, rxfd, entry);
1355 break;
1356 }
1357 }
1358
1359 sp->rx_current = curr;
1360
1361 if (i == IPG_MAXRFDPROCESS_COUNT) {
1362
1363
1364
1365
1366
1367 ipg_w32(ipg_r32(ASIC_CTRL) | IPG_AC_INT_REQUEST, ASIC_CTRL);
1368 }
1369
1370 ipg_nic_rxrestore(dev);
1371
1372 return 0;
1373}
1374
1375static int ipg_nic_rx(struct net_device *dev)
1376{
1377
1378 struct ipg_nic_private *sp = netdev_priv(dev);
1379 unsigned int curr = sp->rx_current;
1380 void __iomem *ioaddr = sp->ioaddr;
1381 struct ipg_rx *rxfd;
1382 unsigned int i;
1383
1384 IPG_DEBUG_MSG("_nic_rx\n");
1385
1386#define __RFS_MASK \
1387 cpu_to_le64(IPG_RFS_RFDDONE | IPG_RFS_FRAMESTART | IPG_RFS_FRAMEEND)
1388
1389 for (i = 0; i < IPG_MAXRFDPROCESS_COUNT; i++, curr++) {
1390 unsigned int entry = curr % IPG_RFDLIST_LENGTH;
1391 struct sk_buff *skb = sp->rx_buff[entry];
1392 unsigned int framelen;
1393
1394 rxfd = sp->rxd + entry;
1395
1396 if (((rxfd->rfs & __RFS_MASK) != __RFS_MASK) || !skb)
1397 break;
1398
1399
1400 framelen = le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFRAMELEN;
1401
1402
1403
1404
1405 if (framelen > sp->rxfrag_size) {
1406 IPG_DEBUG_MSG
1407 ("RFS FrameLen > allocated fragment size.\n");
1408
1409 framelen = sp->rxfrag_size;
1410 }
1411
1412 if ((IPG_DROP_ON_RX_ETH_ERRORS && (le64_to_cpu(rxfd->rfs) &
1413 (IPG_RFS_RXFIFOOVERRUN | IPG_RFS_RXRUNTFRAME |
1414 IPG_RFS_RXALIGNMENTERROR | IPG_RFS_RXFCSERROR |
1415 IPG_RFS_RXOVERSIZEDFRAME | IPG_RFS_RXLENGTHERROR)))) {
1416
1417 IPG_DEBUG_MSG("Rx error, RFS = %16.16lx\n",
1418 (unsigned long int) rxfd->rfs);
1419
1420
1421 sp->stats.rx_errors++;
1422
1423
1424 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFIFOOVERRUN) {
1425 IPG_DEBUG_MSG("RX FIFO overrun occurred.\n");
1426 sp->stats.rx_fifo_errors++;
1427 }
1428
1429 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXRUNTFRAME) {
1430 IPG_DEBUG_MSG("RX runt occurred.\n");
1431 sp->stats.rx_length_errors++;
1432 }
1433
1434 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXOVERSIZEDFRAME) ;
1435
1436
1437
1438
1439 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXALIGNMENTERROR) {
1440 IPG_DEBUG_MSG("RX alignment error occurred.\n");
1441 sp->stats.rx_frame_errors++;
1442 }
1443
1444 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFCSERROR) ;
1445
1446
1447
1448
1449
1450
1451
1452
1453 if (skb) {
1454 __le64 info = rxfd->frag_info;
1455
1456 pci_unmap_single(sp->pdev,
1457 le64_to_cpu(info) & ~IPG_RFI_FRAGLEN,
1458 sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1459
1460 dev_kfree_skb_irq(skb);
1461 }
1462 } else {
1463
1464
1465
1466
1467 skb_put(skb, framelen);
1468
1469
1470 skb->protocol = eth_type_trans(skb, dev);
1471
1472
1473
1474
1475
1476
1477
1478 skb_checksum_none_assert(skb);
1479
1480
1481
1482
1483
1484 netif_rx(skb);
1485 }
1486
1487
1488 sp->rx_buff[entry] = NULL;
1489 }
1490
1491
1492
1493
1494
1495
1496 if (i == IPG_MAXRFDPROCESS_COUNT)
1497 ipg_w32(ipg_r32(ASIC_CTRL) | IPG_AC_INT_REQUEST, ASIC_CTRL);
1498
1499#ifdef IPG_DEBUG
1500
1501 if (!i)
1502 sp->EmptyRFDListCount++;
1503#endif
1504 while ((le64_to_cpu(rxfd->rfs) & IPG_RFS_RFDDONE) &&
1505 !((le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMESTART) &&
1506 (le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMEEND))) {
1507 unsigned int entry = curr++ % IPG_RFDLIST_LENGTH;
1508
1509 rxfd = sp->rxd + entry;
1510
1511 IPG_DEBUG_MSG("Frame requires multiple RFDs.\n");
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522 if (sp->rx_buff[entry]) {
1523 pci_unmap_single(sp->pdev,
1524 le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
1525 sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1526 dev_kfree_skb_irq(sp->rx_buff[entry]);
1527 }
1528
1529
1530 sp->rx_buff[entry] = NULL;
1531 }
1532
1533 sp->rx_current = curr;
1534
1535
1536
1537
1538 if ((curr - sp->rx_dirty) >= IPG_MINUSEDRFDSTOFREE)
1539 ipg_nic_rxrestore(dev);
1540
1541 return 0;
1542}
1543
1544static void ipg_reset_after_host_error(struct work_struct *work)
1545{
1546 struct ipg_nic_private *sp =
1547 container_of(work, struct ipg_nic_private, task.work);
1548 struct net_device *dev = sp->dev;
1549
1550
1551
1552
1553
1554 ipg_reset(dev, IPG_AC_GLOBAL_RESET | IPG_AC_HOST | IPG_AC_DMA);
1555
1556 init_rfdlist(dev);
1557 init_tfdlist(dev);
1558
1559 if (ipg_io_config(dev) < 0) {
1560 printk(KERN_INFO "%s: Cannot recover from PCI error.\n",
1561 dev->name);
1562 schedule_delayed_work(&sp->task, HZ);
1563 }
1564}
1565
1566static irqreturn_t ipg_interrupt_handler(int irq, void *dev_inst)
1567{
1568 struct net_device *dev = dev_inst;
1569 struct ipg_nic_private *sp = netdev_priv(dev);
1570 void __iomem *ioaddr = sp->ioaddr;
1571 unsigned int handled = 0;
1572 u16 status;
1573
1574 IPG_DEBUG_MSG("_interrupt_handler\n");
1575
1576 if (sp->is_jumbo)
1577 ipg_nic_rxrestore(dev);
1578
1579 spin_lock(&sp->lock);
1580
1581
1582
1583
1584
1585
1586
1587 status = ipg_r16(INT_STATUS_ACK);
1588
1589 IPG_DEBUG_MSG("IntStatusAck = %4.4x\n", status);
1590
1591
1592 if (!(status & IPG_IS_RSVD_MASK))
1593 goto out_enable;
1594
1595 handled = 1;
1596
1597 if (unlikely(!netif_running(dev)))
1598 goto out_unlock;
1599
1600
1601 if (status & IPG_IS_RFD_LIST_END) {
1602 IPG_DEBUG_MSG("RFDListEnd Interrupt.\n");
1603
1604
1605
1606
1607
1608
1609 ipg_nic_rxrestore(dev);
1610
1611#ifdef IPG_DEBUG
1612
1613 sp->RFDlistendCount++;
1614#endif
1615 }
1616
1617
1618
1619 if ((status & IPG_IS_RX_DMA_PRIORITY) ||
1620 (status & IPG_IS_RFD_LIST_END) ||
1621 (status & IPG_IS_RX_DMA_COMPLETE) ||
1622 (status & IPG_IS_INT_REQUESTED)) {
1623#ifdef IPG_DEBUG
1624
1625
1626 if (status & (~(IPG_IS_RX_DMA_PRIORITY | IPG_IS_RFD_LIST_END |
1627 IPG_IS_RX_DMA_COMPLETE | IPG_IS_INT_REQUESTED) &
1628 (IPG_IS_HOST_ERROR | IPG_IS_TX_DMA_COMPLETE |
1629 IPG_IS_LINK_EVENT | IPG_IS_TX_COMPLETE |
1630 IPG_IS_UPDATE_STATS)))
1631 sp->RFDListCheckedCount++;
1632#endif
1633
1634 if (sp->is_jumbo)
1635 ipg_nic_rx_jumbo(dev);
1636 else
1637 ipg_nic_rx(dev);
1638 }
1639
1640
1641 if (status & IPG_IS_TX_DMA_COMPLETE)
1642 ipg_nic_txfree(dev);
1643
1644
1645
1646
1647 if (status & IPG_IS_TX_COMPLETE)
1648 ipg_nic_txcleanup(dev);
1649
1650
1651 if (status & IPG_IS_UPDATE_STATS)
1652 ipg_nic_get_stats(dev);
1653
1654
1655 if (status & IPG_IS_HOST_ERROR) {
1656 IPG_DDEBUG_MSG("HostError Interrupt\n");
1657
1658 schedule_delayed_work(&sp->task, 0);
1659 }
1660
1661
1662 if (status & IPG_IS_LINK_EVENT) {
1663 if (ipg_config_autoneg(dev) < 0)
1664 printk(KERN_INFO "%s: Auto-negotiation error.\n",
1665 dev->name);
1666 }
1667
1668
1669 if (status & IPG_IS_MAC_CTRL_FRAME)
1670 IPG_DEBUG_MSG("MACCtrlFrame interrupt.\n");
1671
1672
1673 if (status & IPG_IS_RX_COMPLETE)
1674 IPG_DEBUG_MSG("RxComplete interrupt.\n");
1675
1676
1677 if (status & IPG_IS_RX_EARLY)
1678 IPG_DEBUG_MSG("RxEarly interrupt.\n");
1679
1680out_enable:
1681
1682 ipg_w16(IPG_IE_TX_DMA_COMPLETE | IPG_IE_RX_DMA_COMPLETE |
1683 IPG_IE_HOST_ERROR | IPG_IE_INT_REQUESTED | IPG_IE_TX_COMPLETE |
1684 IPG_IE_LINK_EVENT | IPG_IE_UPDATE_STATS, INT_ENABLE);
1685out_unlock:
1686 spin_unlock(&sp->lock);
1687
1688 return IRQ_RETVAL(handled);
1689}
1690
1691static void ipg_rx_clear(struct ipg_nic_private *sp)
1692{
1693 unsigned int i;
1694
1695 for (i = 0; i < IPG_RFDLIST_LENGTH; i++) {
1696 if (sp->rx_buff[i]) {
1697 struct ipg_rx *rxfd = sp->rxd + i;
1698
1699 dev_kfree_skb_irq(sp->rx_buff[i]);
1700 sp->rx_buff[i] = NULL;
1701 pci_unmap_single(sp->pdev,
1702 le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
1703 sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1704 }
1705 }
1706}
1707
1708static void ipg_tx_clear(struct ipg_nic_private *sp)
1709{
1710 unsigned int i;
1711
1712 for (i = 0; i < IPG_TFDLIST_LENGTH; i++) {
1713 if (sp->tx_buff[i]) {
1714 struct ipg_tx *txfd = sp->txd + i;
1715
1716 pci_unmap_single(sp->pdev,
1717 le64_to_cpu(txfd->frag_info) & ~IPG_TFI_FRAGLEN,
1718 sp->tx_buff[i]->len, PCI_DMA_TODEVICE);
1719
1720 dev_kfree_skb_irq(sp->tx_buff[i]);
1721
1722 sp->tx_buff[i] = NULL;
1723 }
1724 }
1725}
1726
1727static int ipg_nic_open(struct net_device *dev)
1728{
1729 struct ipg_nic_private *sp = netdev_priv(dev);
1730 void __iomem *ioaddr = sp->ioaddr;
1731 struct pci_dev *pdev = sp->pdev;
1732 int rc;
1733
1734 IPG_DEBUG_MSG("_nic_open\n");
1735
1736 sp->rx_buf_sz = sp->rxsupport_size;
1737
1738
1739
1740
1741
1742
1743
1744 ipg_w16(0x0000, INT_ENABLE);
1745
1746
1747
1748
1749 rc = request_irq(pdev->irq, ipg_interrupt_handler, IRQF_SHARED,
1750 dev->name, dev);
1751 if (rc < 0) {
1752 printk(KERN_INFO "%s: Error when requesting interrupt.\n",
1753 dev->name);
1754 goto out;
1755 }
1756
1757 dev->irq = pdev->irq;
1758
1759 rc = -ENOMEM;
1760
1761 sp->rxd = dma_alloc_coherent(&pdev->dev, IPG_RX_RING_BYTES,
1762 &sp->rxd_map, GFP_KERNEL);
1763 if (!sp->rxd)
1764 goto err_free_irq_0;
1765
1766 sp->txd = dma_alloc_coherent(&pdev->dev, IPG_TX_RING_BYTES,
1767 &sp->txd_map, GFP_KERNEL);
1768 if (!sp->txd)
1769 goto err_free_rx_1;
1770
1771 rc = init_rfdlist(dev);
1772 if (rc < 0) {
1773 printk(KERN_INFO "%s: Error during configuration.\n",
1774 dev->name);
1775 goto err_free_tx_2;
1776 }
1777
1778 init_tfdlist(dev);
1779
1780 rc = ipg_io_config(dev);
1781 if (rc < 0) {
1782 printk(KERN_INFO "%s: Error during configuration.\n",
1783 dev->name);
1784 goto err_release_tfdlist_3;
1785 }
1786
1787
1788 if (ipg_config_autoneg(dev) < 0)
1789 printk(KERN_INFO "%s: Auto-negotiation error.\n", dev->name);
1790
1791
1792 sp->jumbo.found_start = 0;
1793 sp->jumbo.current_size = 0;
1794 sp->jumbo.skb = NULL;
1795
1796
1797 ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_RX_ENABLE | IPG_MC_TX_ENABLE) &
1798 IPG_MC_RSVD_MASK, MAC_CTRL);
1799
1800 netif_start_queue(dev);
1801out:
1802 return rc;
1803
1804err_release_tfdlist_3:
1805 ipg_tx_clear(sp);
1806 ipg_rx_clear(sp);
1807err_free_tx_2:
1808 dma_free_coherent(&pdev->dev, IPG_TX_RING_BYTES, sp->txd, sp->txd_map);
1809err_free_rx_1:
1810 dma_free_coherent(&pdev->dev, IPG_RX_RING_BYTES, sp->rxd, sp->rxd_map);
1811err_free_irq_0:
1812 free_irq(pdev->irq, dev);
1813 goto out;
1814}
1815
1816static int ipg_nic_stop(struct net_device *dev)
1817{
1818 struct ipg_nic_private *sp = netdev_priv(dev);
1819 void __iomem *ioaddr = sp->ioaddr;
1820 struct pci_dev *pdev = sp->pdev;
1821
1822 IPG_DEBUG_MSG("_nic_stop\n");
1823
1824 netif_stop_queue(dev);
1825
1826 IPG_DUMPTFDLIST(dev);
1827
1828 do {
1829 (void) ipg_r16(INT_STATUS_ACK);
1830
1831 ipg_reset(dev, IPG_AC_GLOBAL_RESET | IPG_AC_HOST | IPG_AC_DMA);
1832
1833 synchronize_irq(pdev->irq);
1834 } while (ipg_r16(INT_ENABLE) & IPG_IE_RSVD_MASK);
1835
1836 ipg_rx_clear(sp);
1837
1838 ipg_tx_clear(sp);
1839
1840 pci_free_consistent(pdev, IPG_RX_RING_BYTES, sp->rxd, sp->rxd_map);
1841 pci_free_consistent(pdev, IPG_TX_RING_BYTES, sp->txd, sp->txd_map);
1842
1843 free_irq(pdev->irq, dev);
1844
1845 return 0;
1846}
1847
1848static netdev_tx_t ipg_nic_hard_start_xmit(struct sk_buff *skb,
1849 struct net_device *dev)
1850{
1851 struct ipg_nic_private *sp = netdev_priv(dev);
1852 void __iomem *ioaddr = sp->ioaddr;
1853 unsigned int entry = sp->tx_current % IPG_TFDLIST_LENGTH;
1854 unsigned long flags;
1855 struct ipg_tx *txfd;
1856
1857 IPG_DDEBUG_MSG("_nic_hard_start_xmit\n");
1858
1859
1860
1861
1862 if (sp->tenmbpsmode)
1863 netif_stop_queue(dev);
1864
1865 if (sp->reset_current_tfd) {
1866 sp->reset_current_tfd = 0;
1867 entry = 0;
1868 }
1869
1870 txfd = sp->txd + entry;
1871
1872 sp->tx_buff[entry] = skb;
1873
1874
1875 txfd->tfc = cpu_to_le64(IPG_TFC_TFDDONE);
1876
1877
1878 txfd->tfc |= cpu_to_le64(IPG_TFC_WORDALIGNDISABLED |
1879 (IPG_TFC_FRAMEID & sp->tx_current) |
1880 (IPG_TFC_FRAGCOUNT & (1 << 24)));
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893 if (sp->tenmbpsmode)
1894 txfd->tfc |= cpu_to_le64(IPG_TFC_TXINDICATE);
1895 txfd->tfc |= cpu_to_le64(IPG_TFC_TXDMAINDICATE);
1896
1897
1898
1899 if (!(IPG_APPEND_FCS_ON_TX))
1900 txfd->tfc |= cpu_to_le64(IPG_TFC_FCSAPPENDDISABLE);
1901
1902
1903
1904
1905 if (IPG_ADD_IPCHECKSUM_ON_TX)
1906 txfd->tfc |= cpu_to_le64(IPG_TFC_IPCHECKSUMENABLE);
1907
1908 if (IPG_ADD_TCPCHECKSUM_ON_TX)
1909 txfd->tfc |= cpu_to_le64(IPG_TFC_TCPCHECKSUMENABLE);
1910
1911 if (IPG_ADD_UDPCHECKSUM_ON_TX)
1912 txfd->tfc |= cpu_to_le64(IPG_TFC_UDPCHECKSUMENABLE);
1913
1914
1915
1916
1917 if (IPG_INSERT_MANUAL_VLAN_TAG) {
1918 txfd->tfc |= cpu_to_le64(IPG_TFC_VLANTAGINSERT |
1919 ((u64) IPG_MANUAL_VLAN_VID << 32) |
1920 ((u64) IPG_MANUAL_VLAN_CFI << 44) |
1921 ((u64) IPG_MANUAL_VLAN_USERPRIORITY << 45));
1922 }
1923
1924
1925
1926
1927
1928
1929 txfd->frag_info = cpu_to_le64(pci_map_single(sp->pdev, skb->data,
1930 skb->len, PCI_DMA_TODEVICE));
1931
1932
1933
1934
1935 txfd->frag_info |= cpu_to_le64(IPG_TFI_FRAGLEN &
1936 ((u64) (skb->len & 0xffff) << 48));
1937
1938
1939
1940
1941 txfd->tfc &= cpu_to_le64(~IPG_TFC_TFDDONE);
1942
1943 spin_lock_irqsave(&sp->lock, flags);
1944
1945 sp->tx_current++;
1946
1947 mmiowb();
1948
1949 ipg_w32(IPG_DC_TX_DMA_POLL_NOW, DMA_CTRL);
1950
1951 if (sp->tx_current == (sp->tx_dirty + IPG_TFDLIST_LENGTH))
1952 netif_stop_queue(dev);
1953
1954 spin_unlock_irqrestore(&sp->lock, flags);
1955
1956 return NETDEV_TX_OK;
1957}
1958
1959static void ipg_set_phy_default_param(unsigned char rev,
1960 struct net_device *dev, int phy_address)
1961{
1962 unsigned short length;
1963 unsigned char revision;
1964 unsigned short *phy_param;
1965 unsigned short address, value;
1966
1967 phy_param = &DefaultPhyParam[0];
1968 length = *phy_param & 0x00FF;
1969 revision = (unsigned char)((*phy_param) >> 8);
1970 phy_param++;
1971 while (length != 0) {
1972 if (rev == revision) {
1973 while (length > 1) {
1974 address = *phy_param;
1975 value = *(phy_param + 1);
1976 phy_param += 2;
1977 mdio_write(dev, phy_address, address, value);
1978 length -= 4;
1979 }
1980 break;
1981 } else {
1982 phy_param += length / 2;
1983 length = *phy_param & 0x00FF;
1984 revision = (unsigned char)((*phy_param) >> 8);
1985 phy_param++;
1986 }
1987 }
1988}
1989
1990static int read_eeprom(struct net_device *dev, int eep_addr)
1991{
1992 void __iomem *ioaddr = ipg_ioaddr(dev);
1993 unsigned int i;
1994 int ret = 0;
1995 u16 value;
1996
1997 value = IPG_EC_EEPROM_READOPCODE | (eep_addr & 0xff);
1998 ipg_w16(value, EEPROM_CTRL);
1999
2000 for (i = 0; i < 1000; i++) {
2001 u16 data;
2002
2003 mdelay(10);
2004 data = ipg_r16(EEPROM_CTRL);
2005 if (!(data & IPG_EC_EEPROM_BUSY)) {
2006 ret = ipg_r16(EEPROM_DATA);
2007 break;
2008 }
2009 }
2010 return ret;
2011}
2012
2013static void ipg_init_mii(struct net_device *dev)
2014{
2015 struct ipg_nic_private *sp = netdev_priv(dev);
2016 struct mii_if_info *mii_if = &sp->mii_if;
2017 int phyaddr;
2018
2019 mii_if->dev = dev;
2020 mii_if->mdio_read = mdio_read;
2021 mii_if->mdio_write = mdio_write;
2022 mii_if->phy_id_mask = 0x1f;
2023 mii_if->reg_num_mask = 0x1f;
2024
2025 mii_if->phy_id = phyaddr = ipg_find_phyaddr(dev);
2026
2027 if (phyaddr != 0x1f) {
2028 u16 mii_phyctrl, mii_1000cr;
2029
2030 mii_1000cr = mdio_read(dev, phyaddr, MII_CTRL1000);
2031 mii_1000cr |= ADVERTISE_1000FULL | ADVERTISE_1000HALF |
2032 GMII_PHY_1000BASETCONTROL_PreferMaster;
2033 mdio_write(dev, phyaddr, MII_CTRL1000, mii_1000cr);
2034
2035 mii_phyctrl = mdio_read(dev, phyaddr, MII_BMCR);
2036
2037
2038 ipg_set_phy_default_param(sp->pdev->revision, dev, phyaddr);
2039
2040
2041 mii_phyctrl |= BMCR_RESET | BMCR_ANRESTART;
2042 mdio_write(dev, phyaddr, MII_BMCR, mii_phyctrl);
2043
2044 }
2045}
2046
2047static int ipg_hw_init(struct net_device *dev)
2048{
2049 struct ipg_nic_private *sp = netdev_priv(dev);
2050 void __iomem *ioaddr = sp->ioaddr;
2051 unsigned int i;
2052 int rc;
2053
2054
2055
2056 sp->led_mode = read_eeprom(dev, 6);
2057
2058
2059
2060
2061 rc = ipg_reset(dev, IPG_RESET_MASK);
2062 if (rc < 0)
2063 goto out;
2064
2065 ipg_init_mii(dev);
2066
2067
2068 for (i = 0; i < 3; i++)
2069 sp->station_addr[i] = read_eeprom(dev, 16 + i);
2070
2071 for (i = 0; i < 3; i++)
2072 ipg_w16(sp->station_addr[i], STATION_ADDRESS_0 + 2*i);
2073
2074
2075 dev->dev_addr[0] = ipg_r16(STATION_ADDRESS_0) & 0x00ff;
2076 dev->dev_addr[1] = (ipg_r16(STATION_ADDRESS_0) & 0xff00) >> 8;
2077 dev->dev_addr[2] = ipg_r16(STATION_ADDRESS_1) & 0x00ff;
2078 dev->dev_addr[3] = (ipg_r16(STATION_ADDRESS_1) & 0xff00) >> 8;
2079 dev->dev_addr[4] = ipg_r16(STATION_ADDRESS_2) & 0x00ff;
2080 dev->dev_addr[5] = (ipg_r16(STATION_ADDRESS_2) & 0xff00) >> 8;
2081out:
2082 return rc;
2083}
2084
2085static int ipg_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2086{
2087 struct ipg_nic_private *sp = netdev_priv(dev);
2088 int rc;
2089
2090 mutex_lock(&sp->mii_mutex);
2091 rc = generic_mii_ioctl(&sp->mii_if, if_mii(ifr), cmd, NULL);
2092 mutex_unlock(&sp->mii_mutex);
2093
2094 return rc;
2095}
2096
2097static int ipg_nic_change_mtu(struct net_device *dev, int new_mtu)
2098{
2099 struct ipg_nic_private *sp = netdev_priv(dev);
2100 int err;
2101
2102
2103
2104
2105
2106
2107 IPG_DEBUG_MSG("_nic_change_mtu\n");
2108
2109
2110
2111
2112
2113 if (new_mtu < 68 || new_mtu > 10240)
2114 return -EINVAL;
2115
2116 err = ipg_nic_stop(dev);
2117 if (err)
2118 return err;
2119
2120 dev->mtu = new_mtu;
2121
2122 sp->max_rxframe_size = new_mtu;
2123
2124 sp->rxfrag_size = new_mtu;
2125 if (sp->rxfrag_size > 4088)
2126 sp->rxfrag_size = 4088;
2127
2128 sp->rxsupport_size = sp->max_rxframe_size;
2129
2130 if (new_mtu > 0x0600)
2131 sp->is_jumbo = true;
2132 else
2133 sp->is_jumbo = false;
2134
2135 return ipg_nic_open(dev);
2136}
2137
2138static int ipg_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2139{
2140 struct ipg_nic_private *sp = netdev_priv(dev);
2141 int rc;
2142
2143 mutex_lock(&sp->mii_mutex);
2144 rc = mii_ethtool_gset(&sp->mii_if, cmd);
2145 mutex_unlock(&sp->mii_mutex);
2146
2147 return rc;
2148}
2149
2150static int ipg_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2151{
2152 struct ipg_nic_private *sp = netdev_priv(dev);
2153 int rc;
2154
2155 mutex_lock(&sp->mii_mutex);
2156 rc = mii_ethtool_sset(&sp->mii_if, cmd);
2157 mutex_unlock(&sp->mii_mutex);
2158
2159 return rc;
2160}
2161
2162static int ipg_nway_reset(struct net_device *dev)
2163{
2164 struct ipg_nic_private *sp = netdev_priv(dev);
2165 int rc;
2166
2167 mutex_lock(&sp->mii_mutex);
2168 rc = mii_nway_restart(&sp->mii_if);
2169 mutex_unlock(&sp->mii_mutex);
2170
2171 return rc;
2172}
2173
2174static const struct ethtool_ops ipg_ethtool_ops = {
2175 .get_settings = ipg_get_settings,
2176 .set_settings = ipg_set_settings,
2177 .nway_reset = ipg_nway_reset,
2178};
2179
2180static void __devexit ipg_remove(struct pci_dev *pdev)
2181{
2182 struct net_device *dev = pci_get_drvdata(pdev);
2183 struct ipg_nic_private *sp = netdev_priv(dev);
2184
2185 IPG_DEBUG_MSG("_remove\n");
2186
2187
2188 unregister_netdev(dev);
2189
2190 pci_iounmap(pdev, sp->ioaddr);
2191
2192 pci_release_regions(pdev);
2193
2194 free_netdev(dev);
2195 pci_disable_device(pdev);
2196 pci_set_drvdata(pdev, NULL);
2197}
2198
2199static const struct net_device_ops ipg_netdev_ops = {
2200 .ndo_open = ipg_nic_open,
2201 .ndo_stop = ipg_nic_stop,
2202 .ndo_start_xmit = ipg_nic_hard_start_xmit,
2203 .ndo_get_stats = ipg_nic_get_stats,
2204 .ndo_set_multicast_list = ipg_nic_set_multicast_list,
2205 .ndo_do_ioctl = ipg_ioctl,
2206 .ndo_tx_timeout = ipg_tx_timeout,
2207 .ndo_change_mtu = ipg_nic_change_mtu,
2208 .ndo_set_mac_address = eth_mac_addr,
2209 .ndo_validate_addr = eth_validate_addr,
2210};
2211
2212static int __devinit ipg_probe(struct pci_dev *pdev,
2213 const struct pci_device_id *id)
2214{
2215 unsigned int i = id->driver_data;
2216 struct ipg_nic_private *sp;
2217 struct net_device *dev;
2218 void __iomem *ioaddr;
2219 int rc;
2220
2221 rc = pci_enable_device(pdev);
2222 if (rc < 0)
2223 goto out;
2224
2225 printk(KERN_INFO "%s: %s\n", pci_name(pdev), ipg_brand_name[i]);
2226
2227 pci_set_master(pdev);
2228
2229 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
2230 if (rc < 0) {
2231 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2232 if (rc < 0) {
2233 printk(KERN_ERR "%s: DMA config failed.\n",
2234 pci_name(pdev));
2235 goto err_disable_0;
2236 }
2237 }
2238
2239
2240
2241
2242 dev = alloc_etherdev(sizeof(struct ipg_nic_private));
2243 if (!dev) {
2244 printk(KERN_ERR "%s: alloc_etherdev failed\n", pci_name(pdev));
2245 rc = -ENOMEM;
2246 goto err_disable_0;
2247 }
2248
2249 sp = netdev_priv(dev);
2250 spin_lock_init(&sp->lock);
2251 mutex_init(&sp->mii_mutex);
2252
2253 sp->is_jumbo = IPG_IS_JUMBO;
2254 sp->rxfrag_size = IPG_RXFRAG_SIZE;
2255 sp->rxsupport_size = IPG_RXSUPPORT_SIZE;
2256 sp->max_rxframe_size = IPG_MAX_RXFRAME_SIZE;
2257
2258
2259
2260 dev->netdev_ops = &ipg_netdev_ops;
2261 SET_NETDEV_DEV(dev, &pdev->dev);
2262 SET_ETHTOOL_OPS(dev, &ipg_ethtool_ops);
2263
2264 rc = pci_request_regions(pdev, DRV_NAME);
2265 if (rc)
2266 goto err_free_dev_1;
2267
2268 ioaddr = pci_iomap(pdev, 1, pci_resource_len(pdev, 1));
2269 if (!ioaddr) {
2270 printk(KERN_ERR "%s cannot map MMIO\n", pci_name(pdev));
2271 rc = -EIO;
2272 goto err_release_regions_2;
2273 }
2274
2275
2276 sp->ioaddr = ioaddr;
2277 sp->pdev = pdev;
2278 sp->dev = dev;
2279
2280 INIT_DELAYED_WORK(&sp->task, ipg_reset_after_host_error);
2281
2282 pci_set_drvdata(pdev, dev);
2283
2284 rc = ipg_hw_init(dev);
2285 if (rc < 0)
2286 goto err_unmap_3;
2287
2288 rc = register_netdev(dev);
2289 if (rc < 0)
2290 goto err_unmap_3;
2291
2292 printk(KERN_INFO "Ethernet device registered as: %s\n", dev->name);
2293out:
2294 return rc;
2295
2296err_unmap_3:
2297 pci_iounmap(pdev, ioaddr);
2298err_release_regions_2:
2299 pci_release_regions(pdev);
2300err_free_dev_1:
2301 free_netdev(dev);
2302err_disable_0:
2303 pci_disable_device(pdev);
2304 goto out;
2305}
2306
2307static struct pci_driver ipg_pci_driver = {
2308 .name = IPG_DRIVER_NAME,
2309 .id_table = ipg_pci_tbl,
2310 .probe = ipg_probe,
2311 .remove = __devexit_p(ipg_remove),
2312};
2313
2314static int __init ipg_init_module(void)
2315{
2316 return pci_register_driver(&ipg_pci_driver);
2317}
2318
2319static void __exit ipg_exit_module(void)
2320{
2321 pci_unregister_driver(&ipg_pci_driver);
2322}
2323
2324module_init(ipg_init_module);
2325module_exit(ipg_exit_module);
2326