1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141#include <linux/module.h>
142#include <linux/moduleparam.h>
143#include <linux/kernel.h>
144#include <linux/types.h>
145#include <linux/slab.h>
146#include <linux/delay.h>
147#include <linux/init.h>
148#include <linux/pci.h>
149#include <linux/dma-mapping.h>
150#include <linux/netdevice.h>
151#include <linux/etherdevice.h>
152#include <linux/mii.h>
153#include <linux/if_vlan.h>
154#include <linux/skbuff.h>
155#include <linux/ethtool.h>
156#include <linux/string.h>
157#include <asm/unaligned.h>
158
159
160#define DRV_NAME "e100"
161#define DRV_EXT "-NAPI"
162#define DRV_VERSION "3.5.23-k4"DRV_EXT
163#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver"
164#define DRV_COPYRIGHT "Copyright(c) 1999-2006 Intel Corporation"
165#define PFX DRV_NAME ": "
166
167#define E100_WATCHDOG_PERIOD (2 * HZ)
168#define E100_NAPI_WEIGHT 16
169
170MODULE_DESCRIPTION(DRV_DESCRIPTION);
171MODULE_AUTHOR(DRV_COPYRIGHT);
172MODULE_LICENSE("GPL");
173MODULE_VERSION(DRV_VERSION);
174
175static int debug = 3;
176static int eeprom_bad_csum_allow = 0;
177static int use_io = 0;
178module_param(debug, int, 0);
179module_param(eeprom_bad_csum_allow, int, 0);
180module_param(use_io, int, 0);
181MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
182MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums");
183MODULE_PARM_DESC(use_io, "Force use of i/o access mode");
184#define DPRINTK(nlevel, klevel, fmt, args...) \
185 (void)((NETIF_MSG_##nlevel & nic->msg_enable) && \
186 printk(KERN_##klevel PFX "%s: %s: " fmt, nic->netdev->name, \
187 __FUNCTION__ , ## args))
188
189#define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\
190 PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \
191 PCI_CLASS_NETWORK_ETHERNET << 8, 0xFFFF00, ich }
192static struct pci_device_id e100_id_table[] = {
193 INTEL_8255X_ETHERNET_DEVICE(0x1029, 0),
194 INTEL_8255X_ETHERNET_DEVICE(0x1030, 0),
195 INTEL_8255X_ETHERNET_DEVICE(0x1031, 3),
196 INTEL_8255X_ETHERNET_DEVICE(0x1032, 3),
197 INTEL_8255X_ETHERNET_DEVICE(0x1033, 3),
198 INTEL_8255X_ETHERNET_DEVICE(0x1034, 3),
199 INTEL_8255X_ETHERNET_DEVICE(0x1038, 3),
200 INTEL_8255X_ETHERNET_DEVICE(0x1039, 4),
201 INTEL_8255X_ETHERNET_DEVICE(0x103A, 4),
202 INTEL_8255X_ETHERNET_DEVICE(0x103B, 4),
203 INTEL_8255X_ETHERNET_DEVICE(0x103C, 4),
204 INTEL_8255X_ETHERNET_DEVICE(0x103D, 4),
205 INTEL_8255X_ETHERNET_DEVICE(0x103E, 4),
206 INTEL_8255X_ETHERNET_DEVICE(0x1050, 5),
207 INTEL_8255X_ETHERNET_DEVICE(0x1051, 5),
208 INTEL_8255X_ETHERNET_DEVICE(0x1052, 5),
209 INTEL_8255X_ETHERNET_DEVICE(0x1053, 5),
210 INTEL_8255X_ETHERNET_DEVICE(0x1054, 5),
211 INTEL_8255X_ETHERNET_DEVICE(0x1055, 5),
212 INTEL_8255X_ETHERNET_DEVICE(0x1056, 5),
213 INTEL_8255X_ETHERNET_DEVICE(0x1057, 5),
214 INTEL_8255X_ETHERNET_DEVICE(0x1059, 0),
215 INTEL_8255X_ETHERNET_DEVICE(0x1064, 6),
216 INTEL_8255X_ETHERNET_DEVICE(0x1065, 6),
217 INTEL_8255X_ETHERNET_DEVICE(0x1066, 6),
218 INTEL_8255X_ETHERNET_DEVICE(0x1067, 6),
219 INTEL_8255X_ETHERNET_DEVICE(0x1068, 6),
220 INTEL_8255X_ETHERNET_DEVICE(0x1069, 6),
221 INTEL_8255X_ETHERNET_DEVICE(0x106A, 6),
222 INTEL_8255X_ETHERNET_DEVICE(0x106B, 6),
223 INTEL_8255X_ETHERNET_DEVICE(0x1091, 7),
224 INTEL_8255X_ETHERNET_DEVICE(0x1092, 7),
225 INTEL_8255X_ETHERNET_DEVICE(0x1093, 7),
226 INTEL_8255X_ETHERNET_DEVICE(0x1094, 7),
227 INTEL_8255X_ETHERNET_DEVICE(0x1095, 7),
228 INTEL_8255X_ETHERNET_DEVICE(0x1209, 0),
229 INTEL_8255X_ETHERNET_DEVICE(0x1229, 0),
230 INTEL_8255X_ETHERNET_DEVICE(0x2449, 2),
231 INTEL_8255X_ETHERNET_DEVICE(0x2459, 2),
232 INTEL_8255X_ETHERNET_DEVICE(0x245D, 2),
233 INTEL_8255X_ETHERNET_DEVICE(0x27DC, 7),
234 { 0, }
235};
236MODULE_DEVICE_TABLE(pci, e100_id_table);
237
238enum mac {
239 mac_82557_D100_A = 0,
240 mac_82557_D100_B = 1,
241 mac_82557_D100_C = 2,
242 mac_82558_D101_A4 = 4,
243 mac_82558_D101_B0 = 5,
244 mac_82559_D101M = 8,
245 mac_82559_D101S = 9,
246 mac_82550_D102 = 12,
247 mac_82550_D102_C = 13,
248 mac_82551_E = 14,
249 mac_82551_F = 15,
250 mac_82551_10 = 16,
251 mac_unknown = 0xFF,
252};
253
254enum phy {
255 phy_100a = 0x000003E0,
256 phy_100c = 0x035002A8,
257 phy_82555_tx = 0x015002A8,
258 phy_nsc_tx = 0x5C002000,
259 phy_82562_et = 0x033002A8,
260 phy_82562_em = 0x032002A8,
261 phy_82562_ek = 0x031002A8,
262 phy_82562_eh = 0x017002A8,
263 phy_unknown = 0xFFFFFFFF,
264};
265
266
267struct csr {
268 struct {
269 u8 status;
270 u8 stat_ack;
271 u8 cmd_lo;
272 u8 cmd_hi;
273 u32 gen_ptr;
274 } scb;
275 u32 port;
276 u16 flash_ctrl;
277 u8 eeprom_ctrl_lo;
278 u8 eeprom_ctrl_hi;
279 u32 mdi_ctrl;
280 u32 rx_dma_count;
281};
282
283enum scb_status {
284 rus_ready = 0x10,
285 rus_mask = 0x3C,
286};
287
288enum ru_state {
289 RU_SUSPENDED = 0,
290 RU_RUNNING = 1,
291 RU_UNINITIALIZED = -1,
292};
293
294enum scb_stat_ack {
295 stat_ack_not_ours = 0x00,
296 stat_ack_sw_gen = 0x04,
297 stat_ack_rnr = 0x10,
298 stat_ack_cu_idle = 0x20,
299 stat_ack_frame_rx = 0x40,
300 stat_ack_cu_cmd_done = 0x80,
301 stat_ack_not_present = 0xFF,
302 stat_ack_rx = (stat_ack_sw_gen | stat_ack_rnr | stat_ack_frame_rx),
303 stat_ack_tx = (stat_ack_cu_idle | stat_ack_cu_cmd_done),
304};
305
306enum scb_cmd_hi {
307 irq_mask_none = 0x00,
308 irq_mask_all = 0x01,
309 irq_sw_gen = 0x02,
310};
311
312enum scb_cmd_lo {
313 cuc_nop = 0x00,
314 ruc_start = 0x01,
315 ruc_load_base = 0x06,
316 cuc_start = 0x10,
317 cuc_resume = 0x20,
318 cuc_dump_addr = 0x40,
319 cuc_dump_stats = 0x50,
320 cuc_load_base = 0x60,
321 cuc_dump_reset = 0x70,
322};
323
324enum cuc_dump {
325 cuc_dump_complete = 0x0000A005,
326 cuc_dump_reset_complete = 0x0000A007,
327};
328
329enum port {
330 software_reset = 0x0000,
331 selftest = 0x0001,
332 selective_reset = 0x0002,
333};
334
335enum eeprom_ctrl_lo {
336 eesk = 0x01,
337 eecs = 0x02,
338 eedi = 0x04,
339 eedo = 0x08,
340};
341
342enum mdi_ctrl {
343 mdi_write = 0x04000000,
344 mdi_read = 0x08000000,
345 mdi_ready = 0x10000000,
346};
347
348enum eeprom_op {
349 op_write = 0x05,
350 op_read = 0x06,
351 op_ewds = 0x10,
352 op_ewen = 0x13,
353};
354
355enum eeprom_offsets {
356 eeprom_cnfg_mdix = 0x03,
357 eeprom_id = 0x0A,
358 eeprom_config_asf = 0x0D,
359 eeprom_smbus_addr = 0x90,
360};
361
362enum eeprom_cnfg_mdix {
363 eeprom_mdix_enabled = 0x0080,
364};
365
366enum eeprom_id {
367 eeprom_id_wol = 0x0020,
368};
369
370enum eeprom_config_asf {
371 eeprom_asf = 0x8000,
372 eeprom_gcl = 0x4000,
373};
374
375enum cb_status {
376 cb_complete = 0x8000,
377 cb_ok = 0x2000,
378};
379
380enum cb_command {
381 cb_nop = 0x0000,
382 cb_iaaddr = 0x0001,
383 cb_config = 0x0002,
384 cb_multi = 0x0003,
385 cb_tx = 0x0004,
386 cb_ucode = 0x0005,
387 cb_dump = 0x0006,
388 cb_tx_sf = 0x0008,
389 cb_cid = 0x1f00,
390 cb_i = 0x2000,
391 cb_s = 0x4000,
392 cb_el = 0x8000,
393};
394
395struct rfd {
396 u16 status;
397 u16 command;
398 u32 link;
399 u32 rbd;
400 u16 actual_size;
401 u16 size;
402};
403
404struct rx {
405 struct rx *next, *prev;
406 struct sk_buff *skb;
407 dma_addr_t dma_addr;
408};
409
410#if defined(__BIG_ENDIAN_BITFIELD)
411#define X(a,b) b,a
412#else
413#define X(a,b) a,b
414#endif
415struct config {
416 u8 X(byte_count:6, pad0:2);
417 u8 X(X(rx_fifo_limit:4, tx_fifo_limit:3), pad1:1);
418 u8 adaptive_ifs;
419 u8 X(X(X(X(mwi_enable:1, type_enable:1), read_align_enable:1),
420 term_write_cache_line:1), pad3:4);
421 u8 X(rx_dma_max_count:7, pad4:1);
422 u8 X(tx_dma_max_count:7, dma_max_count_enable:1);
423 u8 X(X(X(X(X(X(X(late_scb_update:1, direct_rx_dma:1),
424 tno_intr:1), cna_intr:1), standard_tcb:1), standard_stat_counter:1),
425 rx_discard_overruns:1), rx_save_bad_frames:1);
426 u8 X(X(X(X(X(rx_discard_short_frames:1, tx_underrun_retry:2),
427 pad7:2), rx_extended_rfd:1), tx_two_frames_in_fifo:1),
428 tx_dynamic_tbd:1);
429 u8 X(X(mii_mode:1, pad8:6), csma_disabled:1);
430 u8 X(X(X(X(X(rx_tcpudp_checksum:1, pad9:3), vlan_arp_tco:1),
431 link_status_wake:1), arp_wake:1), mcmatch_wake:1);
432 u8 X(X(X(pad10:3, no_source_addr_insertion:1), preamble_length:2),
433 loopback:2);
434 u8 X(linear_priority:3, pad11:5);
435 u8 X(X(linear_priority_mode:1, pad12:3), ifs:4);
436 u8 ip_addr_lo;
437 u8 ip_addr_hi;
438 u8 X(X(X(X(X(X(X(promiscuous_mode:1, broadcast_disabled:1),
439 wait_after_win:1), pad15_1:1), ignore_ul_bit:1), crc_16_bit:1),
440 pad15_2:1), crs_or_cdt:1);
441 u8 fc_delay_lo;
442 u8 fc_delay_hi;
443 u8 X(X(X(X(X(rx_stripping:1, tx_padding:1), rx_crc_transfer:1),
444 rx_long_ok:1), fc_priority_threshold:3), pad18:1);
445 u8 X(X(X(X(X(X(X(addr_wake:1, magic_packet_disable:1),
446 fc_disable:1), fc_restop:1), fc_restart:1), fc_reject:1),
447 full_duplex_force:1), full_duplex_pin:1);
448 u8 X(X(X(pad20_1:5, fc_priority_location:1), multi_ia:1), pad20_2:1);
449 u8 X(X(pad21_1:3, multicast_all:1), pad21_2:4);
450 u8 X(X(rx_d102_mode:1, rx_vlan_drop:1), pad22:6);
451 u8 pad_d102[9];
452};
453
454#define E100_MAX_MULTICAST_ADDRS 64
455struct multi {
456 u16 count;
457 u8 addr[E100_MAX_MULTICAST_ADDRS * ETH_ALEN + 2];
458};
459
460
461#define UCODE_SIZE 134
462struct cb {
463 u16 status;
464 u16 command;
465 u32 link;
466 union {
467 u8 iaaddr[ETH_ALEN];
468 u32 ucode[UCODE_SIZE];
469 struct config config;
470 struct multi multi;
471 struct {
472 u32 tbd_array;
473 u16 tcb_byte_count;
474 u8 threshold;
475 u8 tbd_count;
476 struct {
477 u32 buf_addr;
478 u16 size;
479 u16 eol;
480 } tbd;
481 } tcb;
482 u32 dump_buffer_addr;
483 } u;
484 struct cb *next, *prev;
485 dma_addr_t dma_addr;
486 struct sk_buff *skb;
487};
488
489enum loopback {
490 lb_none = 0, lb_mac = 1, lb_phy = 3,
491};
492
493struct stats {
494 u32 tx_good_frames, tx_max_collisions, tx_late_collisions,
495 tx_underruns, tx_lost_crs, tx_deferred, tx_single_collisions,
496 tx_multiple_collisions, tx_total_collisions;
497 u32 rx_good_frames, rx_crc_errors, rx_alignment_errors,
498 rx_resource_errors, rx_overrun_errors, rx_cdt_errors,
499 rx_short_frame_errors;
500 u32 fc_xmt_pause, fc_rcv_pause, fc_rcv_unsupported;
501 u16 xmt_tco_frames, rcv_tco_frames;
502 u32 complete;
503};
504
505struct mem {
506 struct {
507 u32 signature;
508 u32 result;
509 } selftest;
510 struct stats stats;
511 u8 dump_buf[596];
512};
513
514struct param_range {
515 u32 min;
516 u32 max;
517 u32 count;
518};
519
520struct params {
521 struct param_range rfds;
522 struct param_range cbs;
523};
524
525struct nic {
526
527 u32 msg_enable ____cacheline_aligned;
528 struct net_device *netdev;
529 struct pci_dev *pdev;
530
531 struct rx *rxs ____cacheline_aligned;
532 struct rx *rx_to_use;
533 struct rx *rx_to_clean;
534 struct rfd blank_rfd;
535 enum ru_state ru_running;
536
537 spinlock_t cb_lock ____cacheline_aligned;
538 spinlock_t cmd_lock;
539 struct csr __iomem *csr;
540 enum scb_cmd_lo cuc_cmd;
541 unsigned int cbs_avail;
542 struct napi_struct napi;
543 struct cb *cbs;
544 struct cb *cb_to_use;
545 struct cb *cb_to_send;
546 struct cb *cb_to_clean;
547 u16 tx_command;
548
549
550 enum {
551 ich = (1 << 0),
552 promiscuous = (1 << 1),
553 multicast_all = (1 << 2),
554 wol_magic = (1 << 3),
555 ich_10h_workaround = (1 << 4),
556 } flags ____cacheline_aligned;
557
558 enum mac mac;
559 enum phy phy;
560 struct params params;
561 struct timer_list watchdog;
562 struct timer_list blink_timer;
563 struct mii_if_info mii;
564 struct work_struct tx_timeout_task;
565 enum loopback loopback;
566
567 struct mem *mem;
568 dma_addr_t dma_addr;
569
570 dma_addr_t cbs_dma_addr;
571 u8 adaptive_ifs;
572 u8 tx_threshold;
573 u32 tx_frames;
574 u32 tx_collisions;
575 u32 tx_deferred;
576 u32 tx_single_collisions;
577 u32 tx_multiple_collisions;
578 u32 tx_fc_pause;
579 u32 tx_tco_frames;
580
581 u32 rx_fc_pause;
582 u32 rx_fc_unsupported;
583 u32 rx_tco_frames;
584 u32 rx_over_length_errors;
585
586 u16 leds;
587 u16 eeprom_wc;
588 u16 eeprom[256];
589 spinlock_t mdio_lock;
590};
591
592static inline void e100_write_flush(struct nic *nic)
593{
594
595
596 (void)ioread8(&nic->csr->scb.status);
597}
598
599static void e100_enable_irq(struct nic *nic)
600{
601 unsigned long flags;
602
603 spin_lock_irqsave(&nic->cmd_lock, flags);
604 iowrite8(irq_mask_none, &nic->csr->scb.cmd_hi);
605 e100_write_flush(nic);
606 spin_unlock_irqrestore(&nic->cmd_lock, flags);
607}
608
609static void e100_disable_irq(struct nic *nic)
610{
611 unsigned long flags;
612
613 spin_lock_irqsave(&nic->cmd_lock, flags);
614 iowrite8(irq_mask_all, &nic->csr->scb.cmd_hi);
615 e100_write_flush(nic);
616 spin_unlock_irqrestore(&nic->cmd_lock, flags);
617}
618
619static void e100_hw_reset(struct nic *nic)
620{
621
622
623 iowrite32(selective_reset, &nic->csr->port);
624 e100_write_flush(nic); udelay(20);
625
626
627 iowrite32(software_reset, &nic->csr->port);
628 e100_write_flush(nic); udelay(20);
629
630
631 e100_disable_irq(nic);
632}
633
634static int e100_self_test(struct nic *nic)
635{
636 u32 dma_addr = nic->dma_addr + offsetof(struct mem, selftest);
637
638
639
640
641 nic->mem->selftest.signature = 0;
642 nic->mem->selftest.result = 0xFFFFFFFF;
643
644 iowrite32(selftest | dma_addr, &nic->csr->port);
645 e100_write_flush(nic);
646
647 msleep(10);
648
649
650 e100_disable_irq(nic);
651
652
653 if(nic->mem->selftest.result != 0) {
654 DPRINTK(HW, ERR, "Self-test failed: result=0x%08X\n",
655 nic->mem->selftest.result);
656 return -ETIMEDOUT;
657 }
658 if(nic->mem->selftest.signature == 0) {
659 DPRINTK(HW, ERR, "Self-test failed: timed out\n");
660 return -ETIMEDOUT;
661 }
662
663 return 0;
664}
665
666static void e100_eeprom_write(struct nic *nic, u16 addr_len, u16 addr, u16 data)
667{
668 u32 cmd_addr_data[3];
669 u8 ctrl;
670 int i, j;
671
672
673 cmd_addr_data[0] = op_ewen << (addr_len - 2);
674 cmd_addr_data[1] = (((op_write << addr_len) | addr) << 16) |
675 cpu_to_le16(data);
676 cmd_addr_data[2] = op_ewds << (addr_len - 2);
677
678
679 for(j = 0; j < 3; j++) {
680
681
682 iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
683 e100_write_flush(nic); udelay(4);
684
685 for(i = 31; i >= 0; i--) {
686 ctrl = (cmd_addr_data[j] & (1 << i)) ?
687 eecs | eedi : eecs;
688 iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
689 e100_write_flush(nic); udelay(4);
690
691 iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
692 e100_write_flush(nic); udelay(4);
693 }
694
695 msleep(10);
696
697
698 iowrite8(0, &nic->csr->eeprom_ctrl_lo);
699 e100_write_flush(nic); udelay(4);
700 }
701};
702
703
704static u16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr)
705{
706 u32 cmd_addr_data;
707 u16 data = 0;
708 u8 ctrl;
709 int i;
710
711 cmd_addr_data = ((op_read << *addr_len) | addr) << 16;
712
713
714 iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
715 e100_write_flush(nic); udelay(4);
716
717
718 for(i = 31; i >= 0; i--) {
719 ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs;
720 iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
721 e100_write_flush(nic); udelay(4);
722
723 iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
724 e100_write_flush(nic); udelay(4);
725
726
727
728 ctrl = ioread8(&nic->csr->eeprom_ctrl_lo);
729 if(!(ctrl & eedo) && i > 16) {
730 *addr_len -= (i - 16);
731 i = 17;
732 }
733
734 data = (data << 1) | (ctrl & eedo ? 1 : 0);
735 }
736
737
738 iowrite8(0, &nic->csr->eeprom_ctrl_lo);
739 e100_write_flush(nic); udelay(4);
740
741 return le16_to_cpu(data);
742};
743
744
745static int e100_eeprom_load(struct nic *nic)
746{
747 u16 addr, addr_len = 8, checksum = 0;
748
749
750 e100_eeprom_read(nic, &addr_len, 0);
751 nic->eeprom_wc = 1 << addr_len;
752
753 for(addr = 0; addr < nic->eeprom_wc; addr++) {
754 nic->eeprom[addr] = e100_eeprom_read(nic, &addr_len, addr);
755 if(addr < nic->eeprom_wc - 1)
756 checksum += cpu_to_le16(nic->eeprom[addr]);
757 }
758
759
760
761 checksum = le16_to_cpu(0xBABA - checksum);
762 if(checksum != nic->eeprom[nic->eeprom_wc - 1]) {
763 DPRINTK(PROBE, ERR, "EEPROM corrupted\n");
764 if (!eeprom_bad_csum_allow)
765 return -EAGAIN;
766 }
767
768 return 0;
769}
770
771
772static int e100_eeprom_save(struct nic *nic, u16 start, u16 count)
773{
774 u16 addr, addr_len = 8, checksum = 0;
775
776
777 e100_eeprom_read(nic, &addr_len, 0);
778 nic->eeprom_wc = 1 << addr_len;
779
780 if(start + count >= nic->eeprom_wc)
781 return -EINVAL;
782
783 for(addr = start; addr < start + count; addr++)
784 e100_eeprom_write(nic, addr_len, addr, nic->eeprom[addr]);
785
786
787
788 for(addr = 0; addr < nic->eeprom_wc - 1; addr++)
789 checksum += cpu_to_le16(nic->eeprom[addr]);
790 nic->eeprom[nic->eeprom_wc - 1] = le16_to_cpu(0xBABA - checksum);
791 e100_eeprom_write(nic, addr_len, nic->eeprom_wc - 1,
792 nic->eeprom[nic->eeprom_wc - 1]);
793
794 return 0;
795}
796
797#define E100_WAIT_SCB_TIMEOUT 20000
798#define E100_WAIT_SCB_FAST 20
799static int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr)
800{
801 unsigned long flags;
802 unsigned int i;
803 int err = 0;
804
805 spin_lock_irqsave(&nic->cmd_lock, flags);
806
807
808 for(i = 0; i < E100_WAIT_SCB_TIMEOUT; i++) {
809 if(likely(!ioread8(&nic->csr->scb.cmd_lo)))
810 break;
811 cpu_relax();
812 if(unlikely(i > E100_WAIT_SCB_FAST))
813 udelay(5);
814 }
815 if(unlikely(i == E100_WAIT_SCB_TIMEOUT)) {
816 err = -EAGAIN;
817 goto err_unlock;
818 }
819
820 if(unlikely(cmd != cuc_resume))
821 iowrite32(dma_addr, &nic->csr->scb.gen_ptr);
822 iowrite8(cmd, &nic->csr->scb.cmd_lo);
823
824err_unlock:
825 spin_unlock_irqrestore(&nic->cmd_lock, flags);
826
827 return err;
828}
829
830static int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
831 void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
832{
833 struct cb *cb;
834 unsigned long flags;
835 int err = 0;
836
837 spin_lock_irqsave(&nic->cb_lock, flags);
838
839 if(unlikely(!nic->cbs_avail)) {
840 err = -ENOMEM;
841 goto err_unlock;
842 }
843
844 cb = nic->cb_to_use;
845 nic->cb_to_use = cb->next;
846 nic->cbs_avail--;
847 cb->skb = skb;
848
849 if(unlikely(!nic->cbs_avail))
850 err = -ENOSPC;
851
852 cb_prepare(nic, cb, skb);
853
854
855
856 cb->command |= cpu_to_le16(cb_s);
857 wmb();
858 cb->prev->command &= cpu_to_le16(~cb_s);
859
860 while(nic->cb_to_send != nic->cb_to_use) {
861 if(unlikely(e100_exec_cmd(nic, nic->cuc_cmd,
862 nic->cb_to_send->dma_addr))) {
863
864
865
866
867
868 if(err == -ENOSPC) {
869
870 schedule_work(&nic->tx_timeout_task);
871 }
872 break;
873 } else {
874 nic->cuc_cmd = cuc_resume;
875 nic->cb_to_send = nic->cb_to_send->next;
876 }
877 }
878
879err_unlock:
880 spin_unlock_irqrestore(&nic->cb_lock, flags);
881
882 return err;
883}
884
885static u16 mdio_ctrl(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data)
886{
887 u32 data_out = 0;
888 unsigned int i;
889 unsigned long flags;
890
891
892
893
894
895
896
897
898 spin_lock_irqsave(&nic->mdio_lock, flags);
899 for (i = 100; i; --i) {
900 if (ioread32(&nic->csr->mdi_ctrl) & mdi_ready)
901 break;
902 udelay(20);
903 }
904 if (unlikely(!i)) {
905 printk("e100.mdio_ctrl(%s) won't go Ready\n",
906 nic->netdev->name );
907 spin_unlock_irqrestore(&nic->mdio_lock, flags);
908 return 0;
909 }
910 iowrite32((reg << 16) | (addr << 21) | dir | data, &nic->csr->mdi_ctrl);
911
912 for (i = 0; i < 100; i++) {
913 udelay(20);
914 if ((data_out = ioread32(&nic->csr->mdi_ctrl)) & mdi_ready)
915 break;
916 }
917 spin_unlock_irqrestore(&nic->mdio_lock, flags);
918 DPRINTK(HW, DEBUG,
919 "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n",
920 dir == mdi_read ? "READ" : "WRITE", addr, reg, data, data_out);
921 return (u16)data_out;
922}
923
924static int mdio_read(struct net_device *netdev, int addr, int reg)
925{
926 return mdio_ctrl(netdev_priv(netdev), addr, mdi_read, reg, 0);
927}
928
929static void mdio_write(struct net_device *netdev, int addr, int reg, int data)
930{
931 mdio_ctrl(netdev_priv(netdev), addr, mdi_write, reg, data);
932}
933
934static void e100_get_defaults(struct nic *nic)
935{
936 struct param_range rfds = { .min = 16, .max = 256, .count = 256 };
937 struct param_range cbs = { .min = 64, .max = 256, .count = 128 };
938
939
940 nic->mac = (nic->flags & ich) ? mac_82559_D101M : nic->pdev->revision;
941 if(nic->mac == mac_unknown)
942 nic->mac = mac_82557_D100_A;
943
944 nic->params.rfds = rfds;
945 nic->params.cbs = cbs;
946
947
948 nic->tx_threshold = 0xE0;
949
950
951 nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf |
952 ((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i));
953
954
955 nic->blank_rfd.command = cpu_to_le16(cb_el);
956 nic->blank_rfd.rbd = 0xFFFFFFFF;
957 nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN);
958
959
960 nic->mii.phy_id_mask = 0x1F;
961 nic->mii.reg_num_mask = 0x1F;
962 nic->mii.dev = nic->netdev;
963 nic->mii.mdio_read = mdio_read;
964 nic->mii.mdio_write = mdio_write;
965}
966
967static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
968{
969 struct config *config = &cb->u.config;
970 u8 *c = (u8 *)config;
971
972 cb->command = cpu_to_le16(cb_config);
973
974 memset(config, 0, sizeof(struct config));
975
976 config->byte_count = 0x16;
977 config->rx_fifo_limit = 0x8;
978 config->direct_rx_dma = 0x1;
979 config->standard_tcb = 0x1;
980 config->standard_stat_counter = 0x1;
981 config->rx_discard_short_frames = 0x1;
982 config->tx_underrun_retry = 0x3;
983 config->mii_mode = 0x1;
984 config->pad10 = 0x6;
985 config->no_source_addr_insertion = 0x1;
986 config->preamble_length = 0x2;
987 config->ifs = 0x6;
988 config->ip_addr_hi = 0xF2;
989 config->pad15_1 = 0x1;
990 config->pad15_2 = 0x1;
991 config->crs_or_cdt = 0x0;
992 config->fc_delay_hi = 0x40;
993 config->tx_padding = 0x1;
994 config->fc_priority_threshold = 0x7;
995 config->pad18 = 0x1;
996 config->full_duplex_pin = 0x1;
997 config->pad20_1 = 0x1F;
998 config->fc_priority_location = 0x1;
999 config->pad21_1 = 0x5;
1000
1001 config->adaptive_ifs = nic->adaptive_ifs;
1002 config->loopback = nic->loopback;
1003
1004 if(nic->mii.force_media && nic->mii.full_duplex)
1005 config->full_duplex_force = 0x1;
1006
1007 if(nic->flags & promiscuous || nic->loopback) {
1008 config->rx_save_bad_frames = 0x1;
1009 config->rx_discard_short_frames = 0x0;
1010 config->promiscuous_mode = 0x1;
1011 }
1012
1013 if(nic->flags & multicast_all)
1014 config->multicast_all = 0x1;
1015
1016
1017 if(netif_running(nic->netdev) || !(nic->flags & wol_magic))
1018 config->magic_packet_disable = 0x1;
1019
1020 if(nic->mac >= mac_82558_D101_A4) {
1021 config->fc_disable = 0x1;
1022 config->mwi_enable = 0x1;
1023 config->standard_tcb = 0x0;
1024 config->rx_long_ok = 0x1;
1025 if (nic->mac >= mac_82559_D101M) {
1026 config->tno_intr = 0x1;
1027
1028 if (nic->mac >= mac_82551_10) {
1029 config->byte_count = 0x20;
1030 config->rx_d102_mode = 0x1;
1031 }
1032 } else {
1033 config->standard_stat_counter = 0x0;
1034 }
1035 }
1036
1037 DPRINTK(HW, DEBUG, "[00-07]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1038 c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7]);
1039 DPRINTK(HW, DEBUG, "[08-15]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1040 c[8], c[9], c[10], c[11], c[12], c[13], c[14], c[15]);
1041 DPRINTK(HW, DEBUG, "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1042 c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]);
1043}
1044
1045
1046
1047
1048
1049
1050#define D101M_CPUSAVER_TIMER_DWORD 78
1051#define D101M_CPUSAVER_BUNDLE_DWORD 65
1052#define D101M_CPUSAVER_MIN_SIZE_DWORD 126
1053
1054#define D101M_B_RCVBUNDLE_UCODE \
1055{\
10560x00550215, 0xFFFF0437, 0xFFFFFFFF, 0x06A70789, 0xFFFFFFFF, 0x0558FFFF, \
10570x000C0001, 0x00101312, 0x000C0008, 0x00380216, \
10580x0010009C, 0x00204056, 0x002380CC, 0x00380056, \
10590x0010009C, 0x00244C0B, 0x00000800, 0x00124818, \
10600x00380438, 0x00000000, 0x00140000, 0x00380555, \
10610x00308000, 0x00100662, 0x00100561, 0x000E0408, \
10620x00134861, 0x000C0002, 0x00103093, 0x00308000, \
10630x00100624, 0x00100561, 0x000E0408, 0x00100861, \
10640x000C007E, 0x00222C21, 0x000C0002, 0x00103093, \
10650x00380C7A, 0x00080000, 0x00103090, 0x00380C7A, \
10660x00000000, 0x00000000, 0x00000000, 0x00000000, \
10670x0010009C, 0x00244C2D, 0x00010004, 0x00041000, \
10680x003A0437, 0x00044010, 0x0038078A, 0x00000000, \
10690x00100099, 0x00206C7A, 0x0010009C, 0x00244C48, \
10700x00130824, 0x000C0001, 0x00101213, 0x00260C75, \
10710x00041000, 0x00010004, 0x00130826, 0x000C0006, \
10720x002206A8, 0x0013C926, 0x00101313, 0x003806A8, \
10730x00000000, 0x00000000, 0x00000000, 0x00000000, \
10740x00000000, 0x00000000, 0x00000000, 0x00000000, \
10750x00080600, 0x00101B10, 0x00050004, 0x00100826, \
10760x00101210, 0x00380C34, 0x00000000, 0x00000000, \
10770x0021155B, 0x00100099, 0x00206559, 0x0010009C, \
10780x00244559, 0x00130836, 0x000C0000, 0x00220C62, \
10790x000C0001, 0x00101B13, 0x00229C0E, 0x00210C0E, \
10800x00226C0E, 0x00216C0E, 0x0022FC0E, 0x00215C0E, \
10810x00214C0E, 0x00380555, 0x00010004, 0x00041000, \
10820x00278C67, 0x00040800, 0x00018100, 0x003A0437, \
10830x00130826, 0x000C0001, 0x00220559, 0x00101313, \
10840x00380559, 0x00000000, 0x00000000, 0x00000000, \
10850x00000000, 0x00000000, 0x00000000, 0x00000000, \
10860x00000000, 0x00130831, 0x0010090B, 0x00124813, \
10870x000CFF80, 0x002606AB, 0x00041000, 0x00010004, \
10880x003806A8, 0x00000000, 0x00000000, 0x00000000, \
1089}
1090
1091
1092
1093
1094
1095
1096#define D101S_CPUSAVER_TIMER_DWORD 78
1097#define D101S_CPUSAVER_BUNDLE_DWORD 67
1098#define D101S_CPUSAVER_MIN_SIZE_DWORD 128
1099
1100#define D101S_RCVBUNDLE_UCODE \
1101{\
11020x00550242, 0xFFFF047E, 0xFFFFFFFF, 0x06FF0818, 0xFFFFFFFF, 0x05A6FFFF, \
11030x000C0001, 0x00101312, 0x000C0008, 0x00380243, \
11040x0010009C, 0x00204056, 0x002380D0, 0x00380056, \
11050x0010009C, 0x00244F8B, 0x00000800, 0x00124818, \
11060x0038047F, 0x00000000, 0x00140000, 0x003805A3, \
11070x00308000, 0x00100610, 0x00100561, 0x000E0408, \
11080x00134861, 0x000C0002, 0x00103093, 0x00308000, \
11090x00100624, 0x00100561, 0x000E0408, 0x00100861, \
11100x000C007E, 0x00222FA1, 0x000C0002, 0x00103093, \
11110x00380F90, 0x00080000, 0x00103090, 0x00380F90, \
11120x00000000, 0x00000000, 0x00000000, 0x00000000, \
11130x0010009C, 0x00244FAD, 0x00010004, 0x00041000, \
11140x003A047E, 0x00044010, 0x00380819, 0x00000000, \
11150x00100099, 0x00206FFD, 0x0010009A, 0x0020AFFD, \
11160x0010009C, 0x00244FC8, 0x00130824, 0x000C0001, \
11170x00101213, 0x00260FF7, 0x00041000, 0x00010004, \
11180x00130826, 0x000C0006, 0x00220700, 0x0013C926, \
11190x00101313, 0x00380700, 0x00000000, 0x00000000, \
11200x00000000, 0x00000000, 0x00000000, 0x00000000, \
11210x00080600, 0x00101B10, 0x00050004, 0x00100826, \
11220x00101210, 0x00380FB6, 0x00000000, 0x00000000, \
11230x002115A9, 0x00100099, 0x002065A7, 0x0010009A, \
11240x0020A5A7, 0x0010009C, 0x002445A7, 0x00130836, \
11250x000C0000, 0x00220FE4, 0x000C0001, 0x00101B13, \
11260x00229F8E, 0x00210F8E, 0x00226F8E, 0x00216F8E, \
11270x0022FF8E, 0x00215F8E, 0x00214F8E, 0x003805A3, \
11280x00010004, 0x00041000, 0x00278FE9, 0x00040800, \
11290x00018100, 0x003A047E, 0x00130826, 0x000C0001, \
11300x002205A7, 0x00101313, 0x003805A7, 0x00000000, \
11310x00000000, 0x00000000, 0x00000000, 0x00000000, \
11320x00000000, 0x00000000, 0x00000000, 0x00130831, \
11330x0010090B, 0x00124813, 0x000CFF80, 0x00260703, \
11340x00041000, 0x00010004, 0x00380700 \
1135}
1136
1137
1138
1139
1140
1141
1142#define D102_E_CPUSAVER_TIMER_DWORD 42
1143#define D102_E_CPUSAVER_BUNDLE_DWORD 54
1144#define D102_E_CPUSAVER_MIN_SIZE_DWORD 46
1145
1146#define D102_E_RCVBUNDLE_UCODE \
1147{\
11480x007D028F, 0x0E4204F9, 0x14ED0C85, 0x14FA14E9, 0x0EF70E36, 0x1FFF1FFF, \
11490x00E014B9, 0x00000000, 0x00000000, 0x00000000, \
11500x00E014BD, 0x00000000, 0x00000000, 0x00000000, \
11510x00E014D5, 0x00000000, 0x00000000, 0x00000000, \
11520x00000000, 0x00000000, 0x00000000, 0x00000000, \
11530x00E014C1, 0x00000000, 0x00000000, 0x00000000, \
11540x00000000, 0x00000000, 0x00000000, 0x00000000, \
11550x00000000, 0x00000000, 0x00000000, 0x00000000, \
11560x00000000, 0x00000000, 0x00000000, 0x00000000, \
11570x00E014C8, 0x00000000, 0x00000000, 0x00000000, \
11580x00200600, 0x00E014EE, 0x00000000, 0x00000000, \
11590x0030FF80, 0x00940E46, 0x00038200, 0x00102000, \
11600x00E00E43, 0x00000000, 0x00000000, 0x00000000, \
11610x00300006, 0x00E014FB, 0x00000000, 0x00000000, \
11620x00000000, 0x00000000, 0x00000000, 0x00000000, \
11630x00000000, 0x00000000, 0x00000000, 0x00000000, \
11640x00000000, 0x00000000, 0x00000000, 0x00000000, \
11650x00906E41, 0x00800E3C, 0x00E00E39, 0x00000000, \
11660x00906EFD, 0x00900EFD, 0x00E00EF8, 0x00000000, \
11670x00000000, 0x00000000, 0x00000000, 0x00000000, \
11680x00000000, 0x00000000, 0x00000000, 0x00000000, \
11690x00000000, 0x00000000, 0x00000000, 0x00000000, \
11700x00000000, 0x00000000, 0x00000000, 0x00000000, \
11710x00000000, 0x00000000, 0x00000000, 0x00000000, \
11720x00000000, 0x00000000, 0x00000000, 0x00000000, \
11730x00000000, 0x00000000, 0x00000000, 0x00000000, \
11740x00000000, 0x00000000, 0x00000000, 0x00000000, \
11750x00000000, 0x00000000, 0x00000000, 0x00000000, \
11760x00000000, 0x00000000, 0x00000000, 0x00000000, \
11770x00000000, 0x00000000, 0x00000000, 0x00000000, \
11780x00000000, 0x00000000, 0x00000000, 0x00000000, \
11790x00000000, 0x00000000, 0x00000000, 0x00000000, \
11800x00000000, 0x00000000, 0x00000000, 0x00000000, \
1181}
1182
1183static void e100_setup_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1184{
1185
1186 static struct {
1187 u32 ucode[UCODE_SIZE + 1];
1188 u8 mac;
1189 u8 timer_dword;
1190 u8 bundle_dword;
1191 u8 min_size_dword;
1192 } ucode_opts[] = {
1193 { D101M_B_RCVBUNDLE_UCODE,
1194 mac_82559_D101M,
1195 D101M_CPUSAVER_TIMER_DWORD,
1196 D101M_CPUSAVER_BUNDLE_DWORD,
1197 D101M_CPUSAVER_MIN_SIZE_DWORD },
1198 { D101S_RCVBUNDLE_UCODE,
1199 mac_82559_D101S,
1200 D101S_CPUSAVER_TIMER_DWORD,
1201 D101S_CPUSAVER_BUNDLE_DWORD,
1202 D101S_CPUSAVER_MIN_SIZE_DWORD },
1203 { D102_E_RCVBUNDLE_UCODE,
1204 mac_82551_F,
1205 D102_E_CPUSAVER_TIMER_DWORD,
1206 D102_E_CPUSAVER_BUNDLE_DWORD,
1207 D102_E_CPUSAVER_MIN_SIZE_DWORD },
1208 { D102_E_RCVBUNDLE_UCODE,
1209 mac_82551_10,
1210 D102_E_CPUSAVER_TIMER_DWORD,
1211 D102_E_CPUSAVER_BUNDLE_DWORD,
1212 D102_E_CPUSAVER_MIN_SIZE_DWORD },
1213 { {0}, 0, 0, 0, 0}
1214 }, *opts;
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272#define BUNDLESMALL 1
1273#define BUNDLEMAX (u16)6
1274#define INTDELAY (u16)1536
1275
1276
1277 if (nic->flags & ich)
1278 goto noloaducode;
1279
1280
1281 for (opts = ucode_opts; opts->mac; opts++) {
1282 int i;
1283 u32 *ucode = opts->ucode;
1284 if (nic->mac != opts->mac)
1285 continue;
1286
1287
1288 ucode[opts->timer_dword] &= 0xFFFF0000;
1289 ucode[opts->timer_dword] |= INTDELAY;
1290 ucode[opts->bundle_dword] &= 0xFFFF0000;
1291 ucode[opts->bundle_dword] |= BUNDLEMAX;
1292 ucode[opts->min_size_dword] &= 0xFFFF0000;
1293 ucode[opts->min_size_dword] |= (BUNDLESMALL) ? 0xFFFF : 0xFF80;
1294
1295 for (i = 0; i < UCODE_SIZE; i++)
1296 cb->u.ucode[i] = cpu_to_le32(ucode[i]);
1297 cb->command = cpu_to_le16(cb_ucode | cb_el);
1298 return;
1299 }
1300
1301noloaducode:
1302 cb->command = cpu_to_le16(cb_nop | cb_el);
1303}
1304
1305static inline int e100_exec_cb_wait(struct nic *nic, struct sk_buff *skb,
1306 void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
1307{
1308 int err = 0, counter = 50;
1309 struct cb *cb = nic->cb_to_clean;
1310
1311 if ((err = e100_exec_cb(nic, NULL, e100_setup_ucode)))
1312 DPRINTK(PROBE,ERR, "ucode cmd failed with error %d\n", err);
1313
1314
1315 nic->cuc_cmd = cuc_start;
1316
1317
1318 e100_write_flush(nic);
1319 udelay(10);
1320
1321
1322 while (!(cb->status & cpu_to_le16(cb_complete))) {
1323 msleep(10);
1324 if (!--counter) break;
1325 }
1326
1327
1328 iowrite8(~0, &nic->csr->scb.stat_ack);
1329
1330
1331 if (!counter || !(cb->status & cpu_to_le16(cb_ok))) {
1332 DPRINTK(PROBE,ERR, "ucode load failed\n");
1333 err = -EPERM;
1334 }
1335
1336 return err;
1337}
1338
1339static void e100_setup_iaaddr(struct nic *nic, struct cb *cb,
1340 struct sk_buff *skb)
1341{
1342 cb->command = cpu_to_le16(cb_iaaddr);
1343 memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN);
1344}
1345
1346static void e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1347{
1348 cb->command = cpu_to_le16(cb_dump);
1349 cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr +
1350 offsetof(struct mem, dump_buf));
1351}
1352
1353#define NCONFIG_AUTO_SWITCH 0x0080
1354#define MII_NSC_CONG MII_RESV1
1355#define NSC_CONG_ENABLE 0x0100
1356#define NSC_CONG_TXREADY 0x0400
1357#define ADVERTISE_FC_SUPPORTED 0x0400
1358static int e100_phy_init(struct nic *nic)
1359{
1360 struct net_device *netdev = nic->netdev;
1361 u32 addr;
1362 u16 bmcr, stat, id_lo, id_hi, cong;
1363
1364
1365 for(addr = 0; addr < 32; addr++) {
1366 nic->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr;
1367 bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
1368 stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
1369 stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
1370 if(!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
1371 break;
1372 }
1373 DPRINTK(HW, DEBUG, "phy_addr = %d\n", nic->mii.phy_id);
1374 if(addr == 32)
1375 return -EAGAIN;
1376
1377
1378 for(addr = 0; addr < 32; addr++) {
1379 if(addr != nic->mii.phy_id) {
1380 mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE);
1381 } else {
1382 bmcr = mdio_read(netdev, addr, MII_BMCR);
1383 mdio_write(netdev, addr, MII_BMCR,
1384 bmcr & ~BMCR_ISOLATE);
1385 }
1386 }
1387
1388
1389 id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1);
1390 id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2);
1391 nic->phy = (u32)id_hi << 16 | (u32)id_lo;
1392 DPRINTK(HW, DEBUG, "phy ID = 0x%08X\n", nic->phy);
1393
1394
1395#define NCS_PHY_MODEL_MASK 0xFFF0FFFF
1396 if((nic->phy & NCS_PHY_MODEL_MASK) == phy_nsc_tx) {
1397
1398 cong = mdio_read(netdev, nic->mii.phy_id, MII_NSC_CONG);
1399 cong |= NSC_CONG_TXREADY;
1400 cong &= ~NSC_CONG_ENABLE;
1401 mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong);
1402 }
1403
1404 if((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
1405 (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) &&
1406 !(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) {
1407
1408 mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG,
1409 nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH);
1410 }
1411
1412 return 0;
1413}
1414
1415static int e100_hw_init(struct nic *nic)
1416{
1417 int err;
1418
1419 e100_hw_reset(nic);
1420
1421 DPRINTK(HW, ERR, "e100_hw_init\n");
1422 if(!in_interrupt() && (err = e100_self_test(nic)))
1423 return err;
1424
1425 if((err = e100_phy_init(nic)))
1426 return err;
1427 if((err = e100_exec_cmd(nic, cuc_load_base, 0)))
1428 return err;
1429 if((err = e100_exec_cmd(nic, ruc_load_base, 0)))
1430 return err;
1431 if ((err = e100_exec_cb_wait(nic, NULL, e100_setup_ucode)))
1432 return err;
1433 if((err = e100_exec_cb(nic, NULL, e100_configure)))
1434 return err;
1435 if((err = e100_exec_cb(nic, NULL, e100_setup_iaaddr)))
1436 return err;
1437 if((err = e100_exec_cmd(nic, cuc_dump_addr,
1438 nic->dma_addr + offsetof(struct mem, stats))))
1439 return err;
1440 if((err = e100_exec_cmd(nic, cuc_dump_reset, 0)))
1441 return err;
1442
1443 e100_disable_irq(nic);
1444
1445 return 0;
1446}
1447
1448static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1449{
1450 struct net_device *netdev = nic->netdev;
1451 struct dev_mc_list *list = netdev->mc_list;
1452 u16 i, count = min(netdev->mc_count, E100_MAX_MULTICAST_ADDRS);
1453
1454 cb->command = cpu_to_le16(cb_multi);
1455 cb->u.multi.count = cpu_to_le16(count * ETH_ALEN);
1456 for(i = 0; list && i < count; i++, list = list->next)
1457 memcpy(&cb->u.multi.addr[i*ETH_ALEN], &list->dmi_addr,
1458 ETH_ALEN);
1459}
1460
1461static void e100_set_multicast_list(struct net_device *netdev)
1462{
1463 struct nic *nic = netdev_priv(netdev);
1464
1465 DPRINTK(HW, DEBUG, "mc_count=%d, flags=0x%04X\n",
1466 netdev->mc_count, netdev->flags);
1467
1468 if(netdev->flags & IFF_PROMISC)
1469 nic->flags |= promiscuous;
1470 else
1471 nic->flags &= ~promiscuous;
1472
1473 if(netdev->flags & IFF_ALLMULTI ||
1474 netdev->mc_count > E100_MAX_MULTICAST_ADDRS)
1475 nic->flags |= multicast_all;
1476 else
1477 nic->flags &= ~multicast_all;
1478
1479 e100_exec_cb(nic, NULL, e100_configure);
1480 e100_exec_cb(nic, NULL, e100_multi);
1481}
1482
1483static void e100_update_stats(struct nic *nic)
1484{
1485 struct net_device *dev = nic->netdev;
1486 struct net_device_stats *ns = &dev->stats;
1487 struct stats *s = &nic->mem->stats;
1488 u32 *complete = (nic->mac < mac_82558_D101_A4) ? &s->fc_xmt_pause :
1489 (nic->mac < mac_82559_D101M) ? (u32 *)&s->xmt_tco_frames :
1490 &s->complete;
1491
1492
1493
1494
1495
1496 if(*complete == le32_to_cpu(cuc_dump_reset_complete)) {
1497 *complete = 0;
1498 nic->tx_frames = le32_to_cpu(s->tx_good_frames);
1499 nic->tx_collisions = le32_to_cpu(s->tx_total_collisions);
1500 ns->tx_aborted_errors += le32_to_cpu(s->tx_max_collisions);
1501 ns->tx_window_errors += le32_to_cpu(s->tx_late_collisions);
1502 ns->tx_carrier_errors += le32_to_cpu(s->tx_lost_crs);
1503 ns->tx_fifo_errors += le32_to_cpu(s->tx_underruns);
1504 ns->collisions += nic->tx_collisions;
1505 ns->tx_errors += le32_to_cpu(s->tx_max_collisions) +
1506 le32_to_cpu(s->tx_lost_crs);
1507 ns->rx_length_errors += le32_to_cpu(s->rx_short_frame_errors) +
1508 nic->rx_over_length_errors;
1509 ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors);
1510 ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors);
1511 ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors);
1512 ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors);
1513 ns->rx_missed_errors += le32_to_cpu(s->rx_resource_errors);
1514 ns->rx_errors += le32_to_cpu(s->rx_crc_errors) +
1515 le32_to_cpu(s->rx_alignment_errors) +
1516 le32_to_cpu(s->rx_short_frame_errors) +
1517 le32_to_cpu(s->rx_cdt_errors);
1518 nic->tx_deferred += le32_to_cpu(s->tx_deferred);
1519 nic->tx_single_collisions +=
1520 le32_to_cpu(s->tx_single_collisions);
1521 nic->tx_multiple_collisions +=
1522 le32_to_cpu(s->tx_multiple_collisions);
1523 if(nic->mac >= mac_82558_D101_A4) {
1524 nic->tx_fc_pause += le32_to_cpu(s->fc_xmt_pause);
1525 nic->rx_fc_pause += le32_to_cpu(s->fc_rcv_pause);
1526 nic->rx_fc_unsupported +=
1527 le32_to_cpu(s->fc_rcv_unsupported);
1528 if(nic->mac >= mac_82559_D101M) {
1529 nic->tx_tco_frames +=
1530 le16_to_cpu(s->xmt_tco_frames);
1531 nic->rx_tco_frames +=
1532 le16_to_cpu(s->rcv_tco_frames);
1533 }
1534 }
1535 }
1536
1537
1538 if(e100_exec_cmd(nic, cuc_dump_reset, 0))
1539 DPRINTK(TX_ERR, DEBUG, "exec cuc_dump_reset failed\n");
1540}
1541
1542static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex)
1543{
1544
1545
1546
1547 if(duplex == DUPLEX_HALF) {
1548 u32 prev = nic->adaptive_ifs;
1549 u32 min_frames = (speed == SPEED_100) ? 1000 : 100;
1550
1551 if((nic->tx_frames / 32 < nic->tx_collisions) &&
1552 (nic->tx_frames > min_frames)) {
1553 if(nic->adaptive_ifs < 60)
1554 nic->adaptive_ifs += 5;
1555 } else if (nic->tx_frames < min_frames) {
1556 if(nic->adaptive_ifs >= 5)
1557 nic->adaptive_ifs -= 5;
1558 }
1559 if(nic->adaptive_ifs != prev)
1560 e100_exec_cb(nic, NULL, e100_configure);
1561 }
1562}
1563
1564static void e100_watchdog(unsigned long data)
1565{
1566 struct nic *nic = (struct nic *)data;
1567 struct ethtool_cmd cmd;
1568
1569 DPRINTK(TIMER, DEBUG, "right now = %ld\n", jiffies);
1570
1571
1572
1573 mii_ethtool_gset(&nic->mii, &cmd);
1574
1575 if(mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) {
1576 DPRINTK(LINK, INFO, "link up, %sMbps, %s-duplex\n",
1577 cmd.speed == SPEED_100 ? "100" : "10",
1578 cmd.duplex == DUPLEX_FULL ? "full" : "half");
1579 } else if(!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) {
1580 DPRINTK(LINK, INFO, "link down\n");
1581 }
1582
1583 mii_check_link(&nic->mii);
1584
1585
1586
1587
1588
1589
1590 spin_lock_irq(&nic->cmd_lock);
1591 iowrite8(ioread8(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi);
1592 e100_write_flush(nic);
1593 spin_unlock_irq(&nic->cmd_lock);
1594
1595 e100_update_stats(nic);
1596 e100_adjust_adaptive_ifs(nic, cmd.speed, cmd.duplex);
1597
1598 if(nic->mac <= mac_82557_D100_C)
1599
1600 e100_set_multicast_list(nic->netdev);
1601
1602 if(nic->flags & ich && cmd.speed==SPEED_10 && cmd.duplex==DUPLEX_HALF)
1603
1604 nic->flags |= ich_10h_workaround;
1605 else
1606 nic->flags &= ~ich_10h_workaround;
1607
1608 mod_timer(&nic->watchdog,
1609 round_jiffies(jiffies + E100_WATCHDOG_PERIOD));
1610}
1611
1612static void e100_xmit_prepare(struct nic *nic, struct cb *cb,
1613 struct sk_buff *skb)
1614{
1615 cb->command = nic->tx_command;
1616
1617 if((nic->cbs_avail & ~15) == nic->cbs_avail)
1618 cb->command |= cpu_to_le16(cb_i);
1619 cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd);
1620 cb->u.tcb.tcb_byte_count = 0;
1621 cb->u.tcb.threshold = nic->tx_threshold;
1622 cb->u.tcb.tbd_count = 1;
1623 cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev,
1624 skb->data, skb->len, PCI_DMA_TODEVICE));
1625
1626 cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
1627}
1628
1629static int e100_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1630{
1631 struct nic *nic = netdev_priv(netdev);
1632 int err;
1633
1634 if(nic->flags & ich_10h_workaround) {
1635
1636
1637
1638 if(e100_exec_cmd(nic, cuc_nop, 0))
1639 DPRINTK(TX_ERR, DEBUG, "exec cuc_nop failed\n");
1640 udelay(1);
1641 }
1642
1643 err = e100_exec_cb(nic, skb, e100_xmit_prepare);
1644
1645 switch(err) {
1646 case -ENOSPC:
1647
1648 DPRINTK(TX_ERR, DEBUG, "No space for CB\n");
1649 netif_stop_queue(netdev);
1650 break;
1651 case -ENOMEM:
1652
1653 DPRINTK(TX_ERR, DEBUG, "Out of Tx resources, returning skb\n");
1654 netif_stop_queue(netdev);
1655 return 1;
1656 }
1657
1658 netdev->trans_start = jiffies;
1659 return 0;
1660}
1661
1662static int e100_tx_clean(struct nic *nic)
1663{
1664 struct net_device *dev = nic->netdev;
1665 struct cb *cb;
1666 int tx_cleaned = 0;
1667
1668 spin_lock(&nic->cb_lock);
1669
1670
1671 for(cb = nic->cb_to_clean;
1672 cb->status & cpu_to_le16(cb_complete);
1673 cb = nic->cb_to_clean = cb->next) {
1674 DPRINTK(TX_DONE, DEBUG, "cb[%d]->status = 0x%04X\n",
1675 (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)),
1676 cb->status);
1677
1678 if(likely(cb->skb != NULL)) {
1679 dev->stats.tx_packets++;
1680 dev->stats.tx_bytes += cb->skb->len;
1681
1682 pci_unmap_single(nic->pdev,
1683 le32_to_cpu(cb->u.tcb.tbd.buf_addr),
1684 le16_to_cpu(cb->u.tcb.tbd.size),
1685 PCI_DMA_TODEVICE);
1686 dev_kfree_skb_any(cb->skb);
1687 cb->skb = NULL;
1688 tx_cleaned = 1;
1689 }
1690 cb->status = 0;
1691 nic->cbs_avail++;
1692 }
1693
1694 spin_unlock(&nic->cb_lock);
1695
1696
1697 if(unlikely(tx_cleaned && netif_queue_stopped(nic->netdev)))
1698 netif_wake_queue(nic->netdev);
1699
1700 return tx_cleaned;
1701}
1702
1703static void e100_clean_cbs(struct nic *nic)
1704{
1705 if(nic->cbs) {
1706 while(nic->cbs_avail != nic->params.cbs.count) {
1707 struct cb *cb = nic->cb_to_clean;
1708 if(cb->skb) {
1709 pci_unmap_single(nic->pdev,
1710 le32_to_cpu(cb->u.tcb.tbd.buf_addr),
1711 le16_to_cpu(cb->u.tcb.tbd.size),
1712 PCI_DMA_TODEVICE);
1713 dev_kfree_skb(cb->skb);
1714 }
1715 nic->cb_to_clean = nic->cb_to_clean->next;
1716 nic->cbs_avail++;
1717 }
1718 pci_free_consistent(nic->pdev,
1719 sizeof(struct cb) * nic->params.cbs.count,
1720 nic->cbs, nic->cbs_dma_addr);
1721 nic->cbs = NULL;
1722 nic->cbs_avail = 0;
1723 }
1724 nic->cuc_cmd = cuc_start;
1725 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean =
1726 nic->cbs;
1727}
1728
1729static int e100_alloc_cbs(struct nic *nic)
1730{
1731 struct cb *cb;
1732 unsigned int i, count = nic->params.cbs.count;
1733
1734 nic->cuc_cmd = cuc_start;
1735 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL;
1736 nic->cbs_avail = 0;
1737
1738 nic->cbs = pci_alloc_consistent(nic->pdev,
1739 sizeof(struct cb) * count, &nic->cbs_dma_addr);
1740 if(!nic->cbs)
1741 return -ENOMEM;
1742
1743 for(cb = nic->cbs, i = 0; i < count; cb++, i++) {
1744 cb->next = (i + 1 < count) ? cb + 1 : nic->cbs;
1745 cb->prev = (i == 0) ? nic->cbs + count - 1 : cb - 1;
1746
1747 cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb);
1748 cb->link = cpu_to_le32(nic->cbs_dma_addr +
1749 ((i+1) % count) * sizeof(struct cb));
1750 cb->skb = NULL;
1751 }
1752
1753 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs;
1754 nic->cbs_avail = count;
1755
1756 return 0;
1757}
1758
1759static inline void e100_start_receiver(struct nic *nic, struct rx *rx)
1760{
1761 if(!nic->rxs) return;
1762 if(RU_SUSPENDED != nic->ru_running) return;
1763
1764
1765 if(!rx) rx = nic->rxs;
1766
1767
1768 if(rx->skb) {
1769 e100_exec_cmd(nic, ruc_start, rx->dma_addr);
1770 nic->ru_running = RU_RUNNING;
1771 }
1772}
1773
1774#define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN)
1775static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
1776{
1777 if(!(rx->skb = netdev_alloc_skb(nic->netdev, RFD_BUF_LEN + NET_IP_ALIGN)))
1778 return -ENOMEM;
1779
1780
1781 skb_reserve(rx->skb, NET_IP_ALIGN);
1782 skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd));
1783 rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
1784 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
1785
1786 if(pci_dma_mapping_error(rx->dma_addr)) {
1787 dev_kfree_skb_any(rx->skb);
1788 rx->skb = NULL;
1789 rx->dma_addr = 0;
1790 return -ENOMEM;
1791 }
1792
1793
1794
1795 if(rx->prev->skb) {
1796 struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data;
1797 put_unaligned(cpu_to_le32(rx->dma_addr),
1798 (u32 *)&prev_rfd->link);
1799 wmb();
1800 prev_rfd->command &= ~cpu_to_le16(cb_el);
1801 pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr,
1802 sizeof(struct rfd), PCI_DMA_TODEVICE);
1803 }
1804
1805 return 0;
1806}
1807
1808static int e100_rx_indicate(struct nic *nic, struct rx *rx,
1809 unsigned int *work_done, unsigned int work_to_do)
1810{
1811 struct net_device *dev = nic->netdev;
1812 struct sk_buff *skb = rx->skb;
1813 struct rfd *rfd = (struct rfd *)skb->data;
1814 u16 rfd_status, actual_size;
1815
1816 if(unlikely(work_done && *work_done >= work_to_do))
1817 return -EAGAIN;
1818
1819
1820 pci_dma_sync_single_for_cpu(nic->pdev, rx->dma_addr,
1821 sizeof(struct rfd), PCI_DMA_FROMDEVICE);
1822 rfd_status = le16_to_cpu(rfd->status);
1823
1824 DPRINTK(RX_STATUS, DEBUG, "status=0x%04X\n", rfd_status);
1825
1826
1827 if(unlikely(!(rfd_status & cb_complete)))
1828 return -ENODATA;
1829
1830
1831 actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF;
1832 if(unlikely(actual_size > RFD_BUF_LEN - sizeof(struct rfd)))
1833 actual_size = RFD_BUF_LEN - sizeof(struct rfd);
1834
1835
1836 pci_unmap_single(nic->pdev, rx->dma_addr,
1837 RFD_BUF_LEN, PCI_DMA_FROMDEVICE);
1838
1839
1840 if(le16_to_cpu(rfd->command) & cb_el)
1841 nic->ru_running = RU_SUSPENDED;
1842
1843
1844 skb_reserve(skb, sizeof(struct rfd));
1845 skb_put(skb, actual_size);
1846 skb->protocol = eth_type_trans(skb, nic->netdev);
1847
1848 if(unlikely(!(rfd_status & cb_ok))) {
1849
1850 dev_kfree_skb_any(skb);
1851 } else if(actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN) {
1852
1853 nic->rx_over_length_errors++;
1854 dev_kfree_skb_any(skb);
1855 } else {
1856 dev->stats.rx_packets++;
1857 dev->stats.rx_bytes += actual_size;
1858 nic->netdev->last_rx = jiffies;
1859 netif_receive_skb(skb);
1860 if(work_done)
1861 (*work_done)++;
1862 }
1863
1864 rx->skb = NULL;
1865
1866 return 0;
1867}
1868
1869static void e100_rx_clean(struct nic *nic, unsigned int *work_done,
1870 unsigned int work_to_do)
1871{
1872 struct rx *rx;
1873 int restart_required = 0;
1874 struct rx *rx_to_start = NULL;
1875
1876
1877
1878
1879
1880 if(RU_SUSPENDED == nic->ru_running)
1881 restart_required = 1;
1882
1883
1884 for(rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) {
1885 int err = e100_rx_indicate(nic, rx, work_done, work_to_do);
1886 if(-EAGAIN == err) {
1887
1888
1889 restart_required = 0;
1890 break;
1891 } else if(-ENODATA == err)
1892 break;
1893 }
1894
1895
1896 if(restart_required)
1897 rx_to_start = nic->rx_to_clean;
1898
1899
1900 for(rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) {
1901 if(unlikely(e100_rx_alloc_skb(nic, rx)))
1902 break;
1903 }
1904
1905 if(restart_required) {
1906
1907 writeb(stat_ack_rnr, &nic->csr->scb.stat_ack);
1908 e100_start_receiver(nic, rx_to_start);
1909 if(work_done)
1910 (*work_done)++;
1911 }
1912}
1913
1914static void e100_rx_clean_list(struct nic *nic)
1915{
1916 struct rx *rx;
1917 unsigned int i, count = nic->params.rfds.count;
1918
1919 nic->ru_running = RU_UNINITIALIZED;
1920
1921 if(nic->rxs) {
1922 for(rx = nic->rxs, i = 0; i < count; rx++, i++) {
1923 if(rx->skb) {
1924 pci_unmap_single(nic->pdev, rx->dma_addr,
1925 RFD_BUF_LEN, PCI_DMA_FROMDEVICE);
1926 dev_kfree_skb(rx->skb);
1927 }
1928 }
1929 kfree(nic->rxs);
1930 nic->rxs = NULL;
1931 }
1932
1933 nic->rx_to_use = nic->rx_to_clean = NULL;
1934}
1935
1936static int e100_rx_alloc_list(struct nic *nic)
1937{
1938 struct rx *rx;
1939 unsigned int i, count = nic->params.rfds.count;
1940
1941 nic->rx_to_use = nic->rx_to_clean = NULL;
1942 nic->ru_running = RU_UNINITIALIZED;
1943
1944 if(!(nic->rxs = kcalloc(count, sizeof(struct rx), GFP_ATOMIC)))
1945 return -ENOMEM;
1946
1947 for(rx = nic->rxs, i = 0; i < count; rx++, i++) {
1948 rx->next = (i + 1 < count) ? rx + 1 : nic->rxs;
1949 rx->prev = (i == 0) ? nic->rxs + count - 1 : rx - 1;
1950 if(e100_rx_alloc_skb(nic, rx)) {
1951 e100_rx_clean_list(nic);
1952 return -ENOMEM;
1953 }
1954 }
1955
1956 nic->rx_to_use = nic->rx_to_clean = nic->rxs;
1957 nic->ru_running = RU_SUSPENDED;
1958
1959 return 0;
1960}
1961
1962static irqreturn_t e100_intr(int irq, void *dev_id)
1963{
1964 struct net_device *netdev = dev_id;
1965 struct nic *nic = netdev_priv(netdev);
1966 u8 stat_ack = ioread8(&nic->csr->scb.stat_ack);
1967
1968 DPRINTK(INTR, DEBUG, "stat_ack = 0x%02X\n", stat_ack);
1969
1970 if(stat_ack == stat_ack_not_ours ||
1971 stat_ack == stat_ack_not_present)
1972 return IRQ_NONE;
1973
1974
1975 iowrite8(stat_ack, &nic->csr->scb.stat_ack);
1976
1977
1978 if(stat_ack & stat_ack_rnr)
1979 nic->ru_running = RU_SUSPENDED;
1980
1981 if(likely(netif_rx_schedule_prep(netdev, &nic->napi))) {
1982 e100_disable_irq(nic);
1983 __netif_rx_schedule(netdev, &nic->napi);
1984 }
1985
1986 return IRQ_HANDLED;
1987}
1988
1989static int e100_poll(struct napi_struct *napi, int budget)
1990{
1991 struct nic *nic = container_of(napi, struct nic, napi);
1992 struct net_device *netdev = nic->netdev;
1993 unsigned int work_done = 0;
1994
1995 e100_rx_clean(nic, &work_done, budget);
1996 e100_tx_clean(nic);
1997
1998
1999 if (work_done < budget) {
2000 netif_rx_complete(netdev, napi);
2001 e100_enable_irq(nic);
2002 }
2003
2004 return work_done;
2005}
2006
2007#ifdef CONFIG_NET_POLL_CONTROLLER
2008static void e100_netpoll(struct net_device *netdev)
2009{
2010 struct nic *nic = netdev_priv(netdev);
2011
2012 e100_disable_irq(nic);
2013 e100_intr(nic->pdev->irq, netdev);
2014 e100_tx_clean(nic);
2015 e100_enable_irq(nic);
2016}
2017#endif
2018
2019static int e100_set_mac_address(struct net_device *netdev, void *p)
2020{
2021 struct nic *nic = netdev_priv(netdev);
2022 struct sockaddr *addr = p;
2023
2024 if (!is_valid_ether_addr(addr->sa_data))
2025 return -EADDRNOTAVAIL;
2026
2027 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2028 e100_exec_cb(nic, NULL, e100_setup_iaaddr);
2029
2030 return 0;
2031}
2032
2033static int e100_change_mtu(struct net_device *netdev, int new_mtu)
2034{
2035 if(new_mtu < ETH_ZLEN || new_mtu > ETH_DATA_LEN)
2036 return -EINVAL;
2037 netdev->mtu = new_mtu;
2038 return 0;
2039}
2040
2041static int e100_asf(struct nic *nic)
2042{
2043
2044 return((nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) &&
2045 (nic->eeprom[eeprom_config_asf] & eeprom_asf) &&
2046 !(nic->eeprom[eeprom_config_asf] & eeprom_gcl) &&
2047 ((nic->eeprom[eeprom_smbus_addr] & 0xFF) != 0xFE));
2048}
2049
2050static int e100_up(struct nic *nic)
2051{
2052 int err;
2053
2054 if((err = e100_rx_alloc_list(nic)))
2055 return err;
2056 if((err = e100_alloc_cbs(nic)))
2057 goto err_rx_clean_list;
2058 if((err = e100_hw_init(nic)))
2059 goto err_clean_cbs;
2060 e100_set_multicast_list(nic->netdev);
2061 e100_start_receiver(nic, NULL);
2062 mod_timer(&nic->watchdog, jiffies);
2063 if((err = request_irq(nic->pdev->irq, e100_intr, IRQF_SHARED,
2064 nic->netdev->name, nic->netdev)))
2065 goto err_no_irq;
2066 netif_wake_queue(nic->netdev);
2067 napi_enable(&nic->napi);
2068
2069
2070 e100_enable_irq(nic);
2071 return 0;
2072
2073err_no_irq:
2074 del_timer_sync(&nic->watchdog);
2075err_clean_cbs:
2076 e100_clean_cbs(nic);
2077err_rx_clean_list:
2078 e100_rx_clean_list(nic);
2079 return err;
2080}
2081
2082static void e100_down(struct nic *nic)
2083{
2084
2085 napi_disable(&nic->napi);
2086 netif_stop_queue(nic->netdev);
2087 e100_hw_reset(nic);
2088 free_irq(nic->pdev->irq, nic->netdev);
2089 del_timer_sync(&nic->watchdog);
2090 netif_carrier_off(nic->netdev);
2091 e100_clean_cbs(nic);
2092 e100_rx_clean_list(nic);
2093}
2094
2095static void e100_tx_timeout(struct net_device *netdev)
2096{
2097 struct nic *nic = netdev_priv(netdev);
2098
2099
2100
2101 schedule_work(&nic->tx_timeout_task);
2102}
2103
2104static void e100_tx_timeout_task(struct work_struct *work)
2105{
2106 struct nic *nic = container_of(work, struct nic, tx_timeout_task);
2107 struct net_device *netdev = nic->netdev;
2108
2109 DPRINTK(TX_ERR, DEBUG, "scb.status=0x%02X\n",
2110 ioread8(&nic->csr->scb.status));
2111 e100_down(netdev_priv(netdev));
2112 e100_up(netdev_priv(netdev));
2113}
2114
2115static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode)
2116{
2117 int err;
2118 struct sk_buff *skb;
2119
2120
2121
2122
2123
2124
2125 if((err = e100_rx_alloc_list(nic)))
2126 return err;
2127 if((err = e100_alloc_cbs(nic)))
2128 goto err_clean_rx;
2129
2130
2131 if(nic->flags & ich && loopback_mode == lb_phy)
2132 loopback_mode = lb_mac;
2133
2134 nic->loopback = loopback_mode;
2135 if((err = e100_hw_init(nic)))
2136 goto err_loopback_none;
2137
2138 if(loopback_mode == lb_phy)
2139 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR,
2140 BMCR_LOOPBACK);
2141
2142 e100_start_receiver(nic, NULL);
2143
2144 if(!(skb = netdev_alloc_skb(nic->netdev, ETH_DATA_LEN))) {
2145 err = -ENOMEM;
2146 goto err_loopback_none;
2147 }
2148 skb_put(skb, ETH_DATA_LEN);
2149 memset(skb->data, 0xFF, ETH_DATA_LEN);
2150 e100_xmit_frame(skb, nic->netdev);
2151
2152 msleep(10);
2153
2154 pci_dma_sync_single_for_cpu(nic->pdev, nic->rx_to_clean->dma_addr,
2155 RFD_BUF_LEN, PCI_DMA_FROMDEVICE);
2156
2157 if(memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd),
2158 skb->data, ETH_DATA_LEN))
2159 err = -EAGAIN;
2160
2161err_loopback_none:
2162 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, 0);
2163 nic->loopback = lb_none;
2164 e100_clean_cbs(nic);
2165 e100_hw_reset(nic);
2166err_clean_rx:
2167 e100_rx_clean_list(nic);
2168 return err;
2169}
2170
2171#define MII_LED_CONTROL 0x1B
2172static void e100_blink_led(unsigned long data)
2173{
2174 struct nic *nic = (struct nic *)data;
2175 enum led_state {
2176 led_on = 0x01,
2177 led_off = 0x04,
2178 led_on_559 = 0x05,
2179 led_on_557 = 0x07,
2180 };
2181
2182 nic->leds = (nic->leds & led_on) ? led_off :
2183 (nic->mac < mac_82559_D101M) ? led_on_557 : led_on_559;
2184 mdio_write(nic->netdev, nic->mii.phy_id, MII_LED_CONTROL, nic->leds);
2185 mod_timer(&nic->blink_timer, jiffies + HZ / 4);
2186}
2187
2188static int e100_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
2189{
2190 struct nic *nic = netdev_priv(netdev);
2191 return mii_ethtool_gset(&nic->mii, cmd);
2192}
2193
2194static int e100_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
2195{
2196 struct nic *nic = netdev_priv(netdev);
2197 int err;
2198
2199 mdio_write(netdev, nic->mii.phy_id, MII_BMCR, BMCR_RESET);
2200 err = mii_ethtool_sset(&nic->mii, cmd);
2201 e100_exec_cb(nic, NULL, e100_configure);
2202
2203 return err;
2204}
2205
2206static void e100_get_drvinfo(struct net_device *netdev,
2207 struct ethtool_drvinfo *info)
2208{
2209 struct nic *nic = netdev_priv(netdev);
2210 strcpy(info->driver, DRV_NAME);
2211 strcpy(info->version, DRV_VERSION);
2212 strcpy(info->fw_version, "N/A");
2213 strcpy(info->bus_info, pci_name(nic->pdev));
2214}
2215
2216#define E100_PHY_REGS 0x1C
2217static int e100_get_regs_len(struct net_device *netdev)
2218{
2219 struct nic *nic = netdev_priv(netdev);
2220 return 1 + E100_PHY_REGS + sizeof(nic->mem->dump_buf);
2221}
2222
2223static void e100_get_regs(struct net_device *netdev,
2224 struct ethtool_regs *regs, void *p)
2225{
2226 struct nic *nic = netdev_priv(netdev);
2227 u32 *buff = p;
2228 int i;
2229
2230 regs->version = (1 << 24) | nic->pdev->revision;
2231 buff[0] = ioread8(&nic->csr->scb.cmd_hi) << 24 |
2232 ioread8(&nic->csr->scb.cmd_lo) << 16 |
2233 ioread16(&nic->csr->scb.status);
2234 for(i = E100_PHY_REGS; i >= 0; i--)
2235 buff[1 + E100_PHY_REGS - i] =
2236 mdio_read(netdev, nic->mii.phy_id, i);
2237 memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf));
2238 e100_exec_cb(nic, NULL, e100_dump);
2239 msleep(10);
2240 memcpy(&buff[2 + E100_PHY_REGS], nic->mem->dump_buf,
2241 sizeof(nic->mem->dump_buf));
2242}
2243
2244static void e100_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2245{
2246 struct nic *nic = netdev_priv(netdev);
2247 wol->supported = (nic->mac >= mac_82558_D101_A4) ? WAKE_MAGIC : 0;
2248 wol->wolopts = (nic->flags & wol_magic) ? WAKE_MAGIC : 0;
2249}
2250
2251static int e100_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2252{
2253 struct nic *nic = netdev_priv(netdev);
2254
2255 if(wol->wolopts != WAKE_MAGIC && wol->wolopts != 0)
2256 return -EOPNOTSUPP;
2257
2258 if(wol->wolopts)
2259 nic->flags |= wol_magic;
2260 else
2261 nic->flags &= ~wol_magic;
2262
2263 e100_exec_cb(nic, NULL, e100_configure);
2264
2265 return 0;
2266}
2267
2268static u32 e100_get_msglevel(struct net_device *netdev)
2269{
2270 struct nic *nic = netdev_priv(netdev);
2271 return nic->msg_enable;
2272}
2273
2274static void e100_set_msglevel(struct net_device *netdev, u32 value)
2275{
2276 struct nic *nic = netdev_priv(netdev);
2277 nic->msg_enable = value;
2278}
2279
2280static int e100_nway_reset(struct net_device *netdev)
2281{
2282 struct nic *nic = netdev_priv(netdev);
2283 return mii_nway_restart(&nic->mii);
2284}
2285
2286static u32 e100_get_link(struct net_device *netdev)
2287{
2288 struct nic *nic = netdev_priv(netdev);
2289 return mii_link_ok(&nic->mii);
2290}
2291
2292static int e100_get_eeprom_len(struct net_device *netdev)
2293{
2294 struct nic *nic = netdev_priv(netdev);
2295 return nic->eeprom_wc << 1;
2296}
2297
2298#define E100_EEPROM_MAGIC 0x1234
2299static int e100_get_eeprom(struct net_device *netdev,
2300 struct ethtool_eeprom *eeprom, u8 *bytes)
2301{
2302 struct nic *nic = netdev_priv(netdev);
2303
2304 eeprom->magic = E100_EEPROM_MAGIC;
2305 memcpy(bytes, &((u8 *)nic->eeprom)[eeprom->offset], eeprom->len);
2306
2307 return 0;
2308}
2309
2310static int e100_set_eeprom(struct net_device *netdev,
2311 struct ethtool_eeprom *eeprom, u8 *bytes)
2312{
2313 struct nic *nic = netdev_priv(netdev);
2314
2315 if(eeprom->magic != E100_EEPROM_MAGIC)
2316 return -EINVAL;
2317
2318 memcpy(&((u8 *)nic->eeprom)[eeprom->offset], bytes, eeprom->len);
2319
2320 return e100_eeprom_save(nic, eeprom->offset >> 1,
2321 (eeprom->len >> 1) + 1);
2322}
2323
2324static void e100_get_ringparam(struct net_device *netdev,
2325 struct ethtool_ringparam *ring)
2326{
2327 struct nic *nic = netdev_priv(netdev);
2328 struct param_range *rfds = &nic->params.rfds;
2329 struct param_range *cbs = &nic->params.cbs;
2330
2331 ring->rx_max_pending = rfds->max;
2332 ring->tx_max_pending = cbs->max;
2333 ring->rx_mini_max_pending = 0;
2334 ring->rx_jumbo_max_pending = 0;
2335 ring->rx_pending = rfds->count;
2336 ring->tx_pending = cbs->count;
2337 ring->rx_mini_pending = 0;
2338 ring->rx_jumbo_pending = 0;
2339}
2340
2341static int e100_set_ringparam(struct net_device *netdev,
2342 struct ethtool_ringparam *ring)
2343{
2344 struct nic *nic = netdev_priv(netdev);
2345 struct param_range *rfds = &nic->params.rfds;
2346 struct param_range *cbs = &nic->params.cbs;
2347
2348 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
2349 return -EINVAL;
2350
2351 if(netif_running(netdev))
2352 e100_down(nic);
2353 rfds->count = max(ring->rx_pending, rfds->min);
2354 rfds->count = min(rfds->count, rfds->max);
2355 cbs->count = max(ring->tx_pending, cbs->min);
2356 cbs->count = min(cbs->count, cbs->max);
2357 DPRINTK(DRV, INFO, "Ring Param settings: rx: %d, tx %d\n",
2358 rfds->count, cbs->count);
2359 if(netif_running(netdev))
2360 e100_up(nic);
2361
2362 return 0;
2363}
2364
2365static const char e100_gstrings_test[][ETH_GSTRING_LEN] = {
2366 "Link test (on/offline)",
2367 "Eeprom test (on/offline)",
2368 "Self test (offline)",
2369 "Mac loopback (offline)",
2370 "Phy loopback (offline)",
2371};
2372#define E100_TEST_LEN sizeof(e100_gstrings_test) / ETH_GSTRING_LEN
2373
2374static void e100_diag_test(struct net_device *netdev,
2375 struct ethtool_test *test, u64 *data)
2376{
2377 struct ethtool_cmd cmd;
2378 struct nic *nic = netdev_priv(netdev);
2379 int i, err;
2380
2381 memset(data, 0, E100_TEST_LEN * sizeof(u64));
2382 data[0] = !mii_link_ok(&nic->mii);
2383 data[1] = e100_eeprom_load(nic);
2384 if(test->flags & ETH_TEST_FL_OFFLINE) {
2385
2386
2387 err = mii_ethtool_gset(&nic->mii, &cmd);
2388
2389 if(netif_running(netdev))
2390 e100_down(nic);
2391 data[2] = e100_self_test(nic);
2392 data[3] = e100_loopback_test(nic, lb_mac);
2393 data[4] = e100_loopback_test(nic, lb_phy);
2394
2395
2396 err = mii_ethtool_sset(&nic->mii, &cmd);
2397
2398 if(netif_running(netdev))
2399 e100_up(nic);
2400 }
2401 for(i = 0; i < E100_TEST_LEN; i++)
2402 test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0;
2403
2404 msleep_interruptible(4 * 1000);
2405}
2406
2407static int e100_phys_id(struct net_device *netdev, u32 data)
2408{
2409 struct nic *nic = netdev_priv(netdev);
2410
2411 if(!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ))
2412 data = (u32)(MAX_SCHEDULE_TIMEOUT / HZ);
2413 mod_timer(&nic->blink_timer, jiffies);
2414 msleep_interruptible(data * 1000);
2415 del_timer_sync(&nic->blink_timer);
2416 mdio_write(netdev, nic->mii.phy_id, MII_LED_CONTROL, 0);
2417
2418 return 0;
2419}
2420
2421static const char e100_gstrings_stats[][ETH_GSTRING_LEN] = {
2422 "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
2423 "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
2424 "rx_length_errors", "rx_over_errors", "rx_crc_errors",
2425 "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
2426 "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
2427 "tx_heartbeat_errors", "tx_window_errors",
2428
2429 "tx_deferred", "tx_single_collisions", "tx_multi_collisions",
2430 "tx_flow_control_pause", "rx_flow_control_pause",
2431 "rx_flow_control_unsupported", "tx_tco_packets", "rx_tco_packets",
2432};
2433#define E100_NET_STATS_LEN 21
2434#define E100_STATS_LEN sizeof(e100_gstrings_stats) / ETH_GSTRING_LEN
2435
2436static int e100_get_sset_count(struct net_device *netdev, int sset)
2437{
2438 switch (sset) {
2439 case ETH_SS_TEST:
2440 return E100_TEST_LEN;
2441 case ETH_SS_STATS:
2442 return E100_STATS_LEN;
2443 default:
2444 return -EOPNOTSUPP;
2445 }
2446}
2447
2448static void e100_get_ethtool_stats(struct net_device *netdev,
2449 struct ethtool_stats *stats, u64 *data)
2450{
2451 struct nic *nic = netdev_priv(netdev);
2452 int i;
2453
2454 for(i = 0; i < E100_NET_STATS_LEN; i++)
2455 data[i] = ((unsigned long *)&netdev->stats)[i];
2456
2457 data[i++] = nic->tx_deferred;
2458 data[i++] = nic->tx_single_collisions;
2459 data[i++] = nic->tx_multiple_collisions;
2460 data[i++] = nic->tx_fc_pause;
2461 data[i++] = nic->rx_fc_pause;
2462 data[i++] = nic->rx_fc_unsupported;
2463 data[i++] = nic->tx_tco_frames;
2464 data[i++] = nic->rx_tco_frames;
2465}
2466
2467static void e100_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
2468{
2469 switch(stringset) {
2470 case ETH_SS_TEST:
2471 memcpy(data, *e100_gstrings_test, sizeof(e100_gstrings_test));
2472 break;
2473 case ETH_SS_STATS:
2474 memcpy(data, *e100_gstrings_stats, sizeof(e100_gstrings_stats));
2475 break;
2476 }
2477}
2478
2479static const struct ethtool_ops e100_ethtool_ops = {
2480 .get_settings = e100_get_settings,
2481 .set_settings = e100_set_settings,
2482 .get_drvinfo = e100_get_drvinfo,
2483 .get_regs_len = e100_get_regs_len,
2484 .get_regs = e100_get_regs,
2485 .get_wol = e100_get_wol,
2486 .set_wol = e100_set_wol,
2487 .get_msglevel = e100_get_msglevel,
2488 .set_msglevel = e100_set_msglevel,
2489 .nway_reset = e100_nway_reset,
2490 .get_link = e100_get_link,
2491 .get_eeprom_len = e100_get_eeprom_len,
2492 .get_eeprom = e100_get_eeprom,
2493 .set_eeprom = e100_set_eeprom,
2494 .get_ringparam = e100_get_ringparam,
2495 .set_ringparam = e100_set_ringparam,
2496 .self_test = e100_diag_test,
2497 .get_strings = e100_get_strings,
2498 .phys_id = e100_phys_id,
2499 .get_ethtool_stats = e100_get_ethtool_stats,
2500 .get_sset_count = e100_get_sset_count,
2501};
2502
2503static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2504{
2505 struct nic *nic = netdev_priv(netdev);
2506
2507 return generic_mii_ioctl(&nic->mii, if_mii(ifr), cmd, NULL);
2508}
2509
2510static int e100_alloc(struct nic *nic)
2511{
2512 nic->mem = pci_alloc_consistent(nic->pdev, sizeof(struct mem),
2513 &nic->dma_addr);
2514 return nic->mem ? 0 : -ENOMEM;
2515}
2516
2517static void e100_free(struct nic *nic)
2518{
2519 if(nic->mem) {
2520 pci_free_consistent(nic->pdev, sizeof(struct mem),
2521 nic->mem, nic->dma_addr);
2522 nic->mem = NULL;
2523 }
2524}
2525
2526static int e100_open(struct net_device *netdev)
2527{
2528 struct nic *nic = netdev_priv(netdev);
2529 int err = 0;
2530
2531 netif_carrier_off(netdev);
2532 if((err = e100_up(nic)))
2533 DPRINTK(IFUP, ERR, "Cannot open interface, aborting.\n");
2534 return err;
2535}
2536
2537static int e100_close(struct net_device *netdev)
2538{
2539 e100_down(netdev_priv(netdev));
2540 return 0;
2541}
2542
2543static int __devinit e100_probe(struct pci_dev *pdev,
2544 const struct pci_device_id *ent)
2545{
2546 struct net_device *netdev;
2547 struct nic *nic;
2548 int err;
2549 DECLARE_MAC_BUF(mac);
2550
2551 if(!(netdev = alloc_etherdev(sizeof(struct nic)))) {
2552 if(((1 << debug) - 1) & NETIF_MSG_PROBE)
2553 printk(KERN_ERR PFX "Etherdev alloc failed, abort.\n");
2554 return -ENOMEM;
2555 }
2556
2557 netdev->open = e100_open;
2558 netdev->stop = e100_close;
2559 netdev->hard_start_xmit = e100_xmit_frame;
2560 netdev->set_multicast_list = e100_set_multicast_list;
2561 netdev->set_mac_address = e100_set_mac_address;
2562 netdev->change_mtu = e100_change_mtu;
2563 netdev->do_ioctl = e100_do_ioctl;
2564 SET_ETHTOOL_OPS(netdev, &e100_ethtool_ops);
2565 netdev->tx_timeout = e100_tx_timeout;
2566 netdev->watchdog_timeo = E100_WATCHDOG_PERIOD;
2567#ifdef CONFIG_NET_POLL_CONTROLLER
2568 netdev->poll_controller = e100_netpoll;
2569#endif
2570 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
2571
2572 nic = netdev_priv(netdev);
2573 netif_napi_add(netdev, &nic->napi, e100_poll, E100_NAPI_WEIGHT);
2574 nic->netdev = netdev;
2575 nic->pdev = pdev;
2576 nic->msg_enable = (1 << debug) - 1;
2577 pci_set_drvdata(pdev, netdev);
2578
2579 if((err = pci_enable_device(pdev))) {
2580 DPRINTK(PROBE, ERR, "Cannot enable PCI device, aborting.\n");
2581 goto err_out_free_dev;
2582 }
2583
2584 if(!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2585 DPRINTK(PROBE, ERR, "Cannot find proper PCI device "
2586 "base address, aborting.\n");
2587 err = -ENODEV;
2588 goto err_out_disable_pdev;
2589 }
2590
2591 if((err = pci_request_regions(pdev, DRV_NAME))) {
2592 DPRINTK(PROBE, ERR, "Cannot obtain PCI resources, aborting.\n");
2593 goto err_out_disable_pdev;
2594 }
2595
2596 if((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
2597 DPRINTK(PROBE, ERR, "No usable DMA configuration, aborting.\n");
2598 goto err_out_free_res;
2599 }
2600
2601 SET_NETDEV_DEV(netdev, &pdev->dev);
2602
2603 if (use_io)
2604 DPRINTK(PROBE, INFO, "using i/o access mode\n");
2605
2606 nic->csr = pci_iomap(pdev, (use_io ? 1 : 0), sizeof(struct csr));
2607 if(!nic->csr) {
2608 DPRINTK(PROBE, ERR, "Cannot map device registers, aborting.\n");
2609 err = -ENOMEM;
2610 goto err_out_free_res;
2611 }
2612
2613 if(ent->driver_data)
2614 nic->flags |= ich;
2615 else
2616 nic->flags &= ~ich;
2617
2618 e100_get_defaults(nic);
2619
2620
2621 spin_lock_init(&nic->cb_lock);
2622 spin_lock_init(&nic->cmd_lock);
2623 spin_lock_init(&nic->mdio_lock);
2624
2625
2626
2627
2628 e100_hw_reset(nic);
2629
2630 pci_set_master(pdev);
2631
2632 init_timer(&nic->watchdog);
2633 nic->watchdog.function = e100_watchdog;
2634 nic->watchdog.data = (unsigned long)nic;
2635 init_timer(&nic->blink_timer);
2636 nic->blink_timer.function = e100_blink_led;
2637 nic->blink_timer.data = (unsigned long)nic;
2638
2639 INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task);
2640
2641 if((err = e100_alloc(nic))) {
2642 DPRINTK(PROBE, ERR, "Cannot alloc driver memory, aborting.\n");
2643 goto err_out_iounmap;
2644 }
2645
2646 if((err = e100_eeprom_load(nic)))
2647 goto err_out_free;
2648
2649 e100_phy_init(nic);
2650
2651 memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN);
2652 memcpy(netdev->perm_addr, nic->eeprom, ETH_ALEN);
2653 if (!is_valid_ether_addr(netdev->perm_addr)) {
2654 if (!eeprom_bad_csum_allow) {
2655 DPRINTK(PROBE, ERR, "Invalid MAC address from "
2656 "EEPROM, aborting.\n");
2657 err = -EAGAIN;
2658 goto err_out_free;
2659 } else {
2660 DPRINTK(PROBE, ERR, "Invalid MAC address from EEPROM, "
2661 "you MUST configure one.\n");
2662 }
2663 }
2664
2665
2666 if((nic->mac >= mac_82558_D101_A4) &&
2667 (nic->eeprom[eeprom_id] & eeprom_id_wol))
2668 nic->flags |= wol_magic;
2669
2670
2671 err = pci_enable_wake(pdev, 0, 0);
2672 if (err)
2673 DPRINTK(PROBE, ERR, "Error clearing wake event\n");
2674
2675 strcpy(netdev->name, "eth%d");
2676 if((err = register_netdev(netdev))) {
2677 DPRINTK(PROBE, ERR, "Cannot register net device, aborting.\n");
2678 goto err_out_free;
2679 }
2680
2681 DPRINTK(PROBE, INFO, "addr 0x%llx, irq %d, MAC addr %s\n",
2682 (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0),
2683 pdev->irq, print_mac(mac, netdev->dev_addr));
2684
2685 return 0;
2686
2687err_out_free:
2688 e100_free(nic);
2689err_out_iounmap:
2690 pci_iounmap(pdev, nic->csr);
2691err_out_free_res:
2692 pci_release_regions(pdev);
2693err_out_disable_pdev:
2694 pci_disable_device(pdev);
2695err_out_free_dev:
2696 pci_set_drvdata(pdev, NULL);
2697 free_netdev(netdev);
2698 return err;
2699}
2700
2701static void __devexit e100_remove(struct pci_dev *pdev)
2702{
2703 struct net_device *netdev = pci_get_drvdata(pdev);
2704
2705 if(netdev) {
2706 struct nic *nic = netdev_priv(netdev);
2707 unregister_netdev(netdev);
2708 e100_free(nic);
2709 iounmap(nic->csr);
2710 free_netdev(netdev);
2711 pci_release_regions(pdev);
2712 pci_disable_device(pdev);
2713 pci_set_drvdata(pdev, NULL);
2714 }
2715}
2716
2717#ifdef CONFIG_PM
2718static int e100_suspend(struct pci_dev *pdev, pm_message_t state)
2719{
2720 struct net_device *netdev = pci_get_drvdata(pdev);
2721 struct nic *nic = netdev_priv(netdev);
2722
2723 if (netif_running(netdev))
2724 napi_disable(&nic->napi);
2725 del_timer_sync(&nic->watchdog);
2726 netif_carrier_off(nic->netdev);
2727 netif_device_detach(netdev);
2728
2729 pci_save_state(pdev);
2730
2731 if ((nic->flags & wol_magic) | e100_asf(nic)) {
2732 pci_enable_wake(pdev, PCI_D3hot, 1);
2733 pci_enable_wake(pdev, PCI_D3cold, 1);
2734 } else {
2735 pci_enable_wake(pdev, PCI_D3hot, 0);
2736 pci_enable_wake(pdev, PCI_D3cold, 0);
2737 }
2738
2739 free_irq(pdev->irq, netdev);
2740
2741 pci_disable_device(pdev);
2742 pci_set_power_state(pdev, PCI_D3hot);
2743
2744 return 0;
2745}
2746
2747static int e100_resume(struct pci_dev *pdev)
2748{
2749 struct net_device *netdev = pci_get_drvdata(pdev);
2750 struct nic *nic = netdev_priv(netdev);
2751
2752 pci_set_power_state(pdev, PCI_D0);
2753 pci_restore_state(pdev);
2754
2755 pci_enable_wake(pdev, 0, 0);
2756
2757 netif_device_attach(netdev);
2758 if (netif_running(netdev))
2759 e100_up(nic);
2760
2761 return 0;
2762}
2763#endif
2764
2765static void e100_shutdown(struct pci_dev *pdev)
2766{
2767 struct net_device *netdev = pci_get_drvdata(pdev);
2768 struct nic *nic = netdev_priv(netdev);
2769
2770 if (netif_running(netdev))
2771 napi_disable(&nic->napi);
2772 del_timer_sync(&nic->watchdog);
2773 netif_carrier_off(nic->netdev);
2774
2775 if ((nic->flags & wol_magic) | e100_asf(nic)) {
2776 pci_enable_wake(pdev, PCI_D3hot, 1);
2777 pci_enable_wake(pdev, PCI_D3cold, 1);
2778 } else {
2779 pci_enable_wake(pdev, PCI_D3hot, 0);
2780 pci_enable_wake(pdev, PCI_D3cold, 0);
2781 }
2782
2783 free_irq(pdev->irq, netdev);
2784
2785 pci_disable_device(pdev);
2786 pci_set_power_state(pdev, PCI_D3hot);
2787}
2788
2789
2790
2791
2792
2793
2794
2795static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
2796{
2797 struct net_device *netdev = pci_get_drvdata(pdev);
2798 struct nic *nic = netdev_priv(netdev);
2799
2800
2801 netdev->stop(netdev);
2802
2803
2804 napi_enable(&nic->napi);
2805 netif_device_detach(netdev);
2806 pci_disable_device(pdev);
2807
2808
2809 return PCI_ERS_RESULT_NEED_RESET;
2810}
2811
2812
2813
2814
2815
2816
2817
2818static pci_ers_result_t e100_io_slot_reset(struct pci_dev *pdev)
2819{
2820 struct net_device *netdev = pci_get_drvdata(pdev);
2821 struct nic *nic = netdev_priv(netdev);
2822
2823 if (pci_enable_device(pdev)) {
2824 printk(KERN_ERR "e100: Cannot re-enable PCI device after reset.\n");
2825 return PCI_ERS_RESULT_DISCONNECT;
2826 }
2827 pci_set_master(pdev);
2828
2829
2830 if (0 != PCI_FUNC(pdev->devfn))
2831 return PCI_ERS_RESULT_RECOVERED;
2832 e100_hw_reset(nic);
2833 e100_phy_init(nic);
2834
2835 return PCI_ERS_RESULT_RECOVERED;
2836}
2837
2838
2839
2840
2841
2842
2843
2844
2845static void e100_io_resume(struct pci_dev *pdev)
2846{
2847 struct net_device *netdev = pci_get_drvdata(pdev);
2848 struct nic *nic = netdev_priv(netdev);
2849
2850
2851 pci_enable_wake(pdev, 0, 0);
2852
2853 netif_device_attach(netdev);
2854 if (netif_running(netdev)) {
2855 e100_open(netdev);
2856 mod_timer(&nic->watchdog, jiffies);
2857 }
2858}
2859
2860static struct pci_error_handlers e100_err_handler = {
2861 .error_detected = e100_io_error_detected,
2862 .slot_reset = e100_io_slot_reset,
2863 .resume = e100_io_resume,
2864};
2865
2866static struct pci_driver e100_driver = {
2867 .name = DRV_NAME,
2868 .id_table = e100_id_table,
2869 .probe = e100_probe,
2870 .remove = __devexit_p(e100_remove),
2871#ifdef CONFIG_PM
2872
2873 .suspend = e100_suspend,
2874 .resume = e100_resume,
2875#endif
2876 .shutdown = e100_shutdown,
2877 .err_handler = &e100_err_handler,
2878};
2879
2880static int __init e100_init_module(void)
2881{
2882 if(((1 << debug) - 1) & NETIF_MSG_DRV) {
2883 printk(KERN_INFO PFX "%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
2884 printk(KERN_INFO PFX "%s\n", DRV_COPYRIGHT);
2885 }
2886 return pci_register_driver(&e100_driver);
2887}
2888
2889static void __exit e100_cleanup_module(void)
2890{
2891 pci_unregister_driver(&e100_driver);
2892}
2893
2894module_init(e100_init_module);
2895module_exit(e100_cleanup_module);
2896